n-ou commited on
Commit
8d3a7b6
Β·
1 Parent(s): b094ec8
Dockerfile CHANGED
@@ -1,13 +1,30 @@
1
- FROM python:3.12
 
2
 
3
- WORKDIR /code
 
 
4
 
5
- COPY ./requirements.txt /code/requirements.txt
 
 
 
6
 
7
- RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
8
 
 
 
9
  COPY . .
10
 
 
 
 
 
 
 
11
  EXPOSE 7860
12
 
13
- CMD ["shiny", "run", "app.py", "--host", "0.0.0.0", "--port", "7860"]
 
 
 
 
 
1
+ # Dockerfile for Weather Assistant Demo
2
+ # This file configures how the application will run on Hugging Face Spaces
3
 
4
+ # Start with Python 3.12 as our base image
5
+ # This gives us a clean Python environment to work with
6
+ FROM python:3.12
7
 
8
+ # Install UV package manager
9
+ # UV is a fast, reliable Python package installer written in Rust
10
+ # It's much faster than pip for installing dependencies
11
+ RUN pip install uv
12
 
 
13
 
14
+ # Copy all files from our project into the container
15
+ # This includes app.py, pyproject.toml, and other necessary files
16
  COPY . .
17
 
18
+ # Use UV to install all dependencies specified in pyproject.toml
19
+ # This ensures we have the same packages as in local development
20
+ RUN uv sync
21
+
22
+ # Configure the container to listen on port 7860
23
+ # This is the default port that Hugging Face Spaces expects
24
  EXPOSE 7860
25
 
26
+ # Command to run when the container starts
27
+ # This launches the Shiny app with specific host and port settings
28
+ # - host 0.0.0.0 allows connections from outside the container
29
+ # - port 7860 matches our EXPOSE setting above
30
+ CMD ["/.venv/bin/shiny", "run", "app.py", "--host", "0.0.0.0", "--port", "7860"]
README.md DELETED
@@ -1,21 +0,0 @@
1
- ---
2
- title: Group 13
3
- emoji: 🌍
4
- colorFrom: yellow
5
- colorTo: indigo
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- short_description: JOB
10
- ---
11
-
12
- This is a templated Space for [Shiny for Python](https://shiny.rstudio.com/py/).
13
-
14
-
15
- To get started with a new app do the following:
16
-
17
- 1) Install Shiny with `pip install shiny`
18
- 2) Create a new app with `shiny create`
19
- 3) Then run the app with `shiny run --reload`
20
-
21
- To learn more about this framework please see the [Documentation](https://shiny.rstudio.com/py/docs/overview.html).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,162 +1,89 @@
1
- import faicons as fa
2
- import plotly.express as px
3
-
4
- # Load data and compute static values
5
- from shared import app_dir, tips
6
- from shinywidgets import render_plotly
7
-
8
- from shiny import reactive, render
9
- from shiny.express import input, ui
10
-
11
- bill_rng = (min(tips.total_bill), max(tips.total_bill))
12
-
13
- # Add page title and sidebar
14
- ui.page_opts(title="Restaurant tipping", fillable=True)
15
-
16
- with ui.sidebar(open="desktop"):
17
- ui.input_slider(
18
- "total_bill",
19
- "Bill amount",
20
- min=bill_rng[0],
21
- max=bill_rng[1],
22
- value=bill_rng,
23
- pre="$",
24
- )
25
- ui.input_checkbox_group(
26
- "time",
27
- "Food service",
28
- ["Lunch", "Dinner"],
29
- selected=["Lunch", "Dinner"],
30
- inline=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  )
32
- ui.input_action_button("reset", "Reset filter")
33
-
34
- # Add main content
35
- ICONS = {
36
- "user": fa.icon_svg("user", "regular"),
37
- "wallet": fa.icon_svg("wallet"),
38
- "currency-dollar": fa.icon_svg("dollar-sign"),
39
- "ellipsis": fa.icon_svg("ellipsis"),
40
- }
41
-
42
- with ui.layout_columns(fill=False):
43
- with ui.value_box(showcase=ICONS["user"]):
44
- "Total tippers"
45
-
46
- @render.express
47
- def total_tippers():
48
- tips_data().shape[0]
49
-
50
- with ui.value_box(showcase=ICONS["wallet"]):
51
- "Average tip"
52
-
53
- @render.express
54
- def average_tip():
55
- d = tips_data()
56
- if d.shape[0] > 0:
57
- perc = d.tip / d.total_bill
58
- f"{perc.mean():.1%}"
59
-
60
- with ui.value_box(showcase=ICONS["currency-dollar"]):
61
- "Average bill"
62
-
63
- @render.express
64
- def average_bill():
65
- d = tips_data()
66
- if d.shape[0] > 0:
67
- bill = d.total_bill.mean()
68
- f"${bill:.2f}"
69
-
70
-
71
- with ui.layout_columns(col_widths=[6, 6, 12]):
72
- with ui.card(full_screen=True):
73
- ui.card_header("Tips data")
74
-
75
- @render.data_frame
76
- def table():
77
- return render.DataGrid(tips_data())
78
-
79
- with ui.card(full_screen=True):
80
- with ui.card_header(class_="d-flex justify-content-between align-items-center"):
81
- "Total bill vs tip"
82
- with ui.popover(title="Add a color variable", placement="top"):
83
- ICONS["ellipsis"]
84
- ui.input_radio_buttons(
85
- "scatter_color",
86
- None,
87
- ["none", "sex", "smoker", "day", "time"],
88
- inline=True,
89
- )
90
-
91
- @render_plotly
92
- def scatterplot():
93
- color = input.scatter_color()
94
- return px.scatter(
95
- tips_data(),
96
- x="total_bill",
97
- y="tip",
98
- color=None if color == "none" else color,
99
- trendline="lowess",
100
- )
101
-
102
- with ui.card(full_screen=True):
103
- with ui.card_header(class_="d-flex justify-content-between align-items-center"):
104
- "Tip percentages"
105
- with ui.popover(title="Add a color variable"):
106
- ICONS["ellipsis"]
107
- ui.input_radio_buttons(
108
- "tip_perc_y",
109
- "Split by:",
110
- ["sex", "smoker", "day", "time"],
111
- selected="day",
112
- inline=True,
113
- )
114
-
115
- @render_plotly
116
- def tip_perc():
117
- from ridgeplot import ridgeplot
118
-
119
- dat = tips_data()
120
- dat["percent"] = dat.tip / dat.total_bill
121
- yvar = input.tip_perc_y()
122
- uvals = dat[yvar].unique()
123
-
124
- samples = [[dat.percent[dat[yvar] == val]] for val in uvals]
125
-
126
- plt = ridgeplot(
127
- samples=samples,
128
- labels=uvals,
129
- bandwidth=0.01,
130
- colorscale="viridis",
131
- colormode="row-index",
132
- )
133
-
134
- plt.update_layout(
135
- legend=dict(
136
- orientation="h", yanchor="bottom", y=1.02, xanchor="center", x=0.5
137
- )
138
- )
139
-
140
- return plt
141
-
142
-
143
- ui.include_css(app_dir / "styles.css")
144
-
145
- # --------------------------------------------------------
146
- # Reactive calculations and effects
147
- # --------------------------------------------------------
148
-
149
 
150
- @reactive.calc
151
- def tips_data():
152
- bill = input.total_bill()
153
- idx1 = tips.total_bill.between(bill[0], bill[1])
154
- idx2 = tips.time.isin(input.time())
155
- return tips[idx1 & idx2]
156
 
 
 
 
 
 
 
 
 
157
 
158
- @reactive.effect
159
- @reactive.event(input.reset)
160
- def _():
161
- ui.update_slider("total_bill", value=bill_rng)
162
- ui.update_checkbox_group("time", selected=["Lunch", "Dinner"])
 
1
+ from shiny import App
2
+ from shiny.ui import page_navbar
3
+ from shiny import ui
4
+ import os
5
+
6
+ from ui import (
7
+ home,
8
+ interview_scheduler,
9
+ candidate_profile,
10
+ correlation_analysis,
11
+ chart_generation,
12
+ job_creation,
13
+ document_creation # πŸ‘ˆ NEW
14
+ )
15
+
16
+ from server import (
17
+ home as home_srv,
18
+ interview_scheduler as interview_scheduler_srv,
19
+ candidate_profile as candidate_profile_srv,
20
+ correlation_analysis as correlation_analysis_srv,
21
+ job_creation as job_creation_srv,
22
+ plot_generation as plot_generation_srv,
23
+ document_creation as document_creation_srv # πŸ‘ˆ NEW
24
+ )
25
+
26
+ ui = ui.page_fluid(
27
+ # HEAD TAGS (global styles/fonts/scripts)
28
+ ui.tags.head(
29
+ ui.tags.link(
30
+ rel="stylesheet",
31
+ href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap"
32
+ ),
33
+ ui.tags.link(
34
+ rel="stylesheet",
35
+ href="/static/custom.css"
36
+ ),
37
+ ui.tags.script(
38
+ """
39
+ document.addEventListener("DOMContentLoaded", function () {
40
+ const navLinks = document.querySelectorAll('.nav-link');
41
+ navLinks.forEach(link => {
42
+ link.addEventListener('click', function (e) {
43
+ // Delay default behavior to allow fade-out
44
+ const targetTab = this.getAttribute('data-bs-target');
45
+ const current = document.querySelector('.tab-pane.active');
46
+ if (current && targetTab && targetTab !== `#${current.id}`) {
47
+ current.classList.remove('active');
48
+ current.style.opacity = 0;
49
+
50
+ setTimeout(() => {
51
+ const next = document.querySelector(targetTab);
52
+ if (next) {
53
+ next.classList.add('active');
54
+ next.style.opacity = 1;
55
+ }
56
+ }, 150); // Match fade-out time
57
+ e.preventDefault();
58
+ }
59
+ });
60
+ });
61
+ });
62
+ """
63
+ )
64
+ ),
65
+
66
+ # MAIN NAVBAR
67
+ ui.page_navbar(
68
+ home.ui,
69
+ job_creation.app_ui,
70
+ candidate_profile.ui,
71
+ interview_scheduler.interview_scheduler_ui,
72
+ correlation_analysis.ui,
73
+ chart_generation.plot_ui,
74
+ document_creation.document_creation_ui,
75
+ title="AI Recruitment Hub"
76
  )
77
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
 
 
 
 
 
 
79
 
80
+ def server(input, output, session):
81
+ home_srv.server(input, output, session)
82
+ job_creation_srv.server(input, output, session)
83
+ candidate_profile_srv.server(input, output, session)
84
+ interview_scheduler_srv.server(input, output, session)
85
+ correlation_analysis_srv.server(input, output, session)
86
+ plot_generation_srv.server(input, output, session)
87
+ document_creation_srv.server(input, output, session)
88
 
89
+ app = App(ui, server, static_assets={'/static': os.path.join(os.path.dirname(__file__), "styles")})
 
 
 
 
code/context.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ CONTEXT_PATH = "data/context.json"
5
+
6
+ def init_context():
7
+ if not os.path.exists("data"):
8
+ os.makedirs("data")
9
+ if not os.path.exists(CONTEXT_PATH):
10
+ with open(CONTEXT_PATH, "w") as f:
11
+ json.dump({"jobs": {}, "candidates": {}, 'employees': {}}, f)
12
+
13
+ def load_context():
14
+ with open(CONTEXT_PATH, "r") as f:
15
+ return json.load(f)
16
+
17
+ def save_job_context(job_id, job_data):
18
+ init_context()
19
+ context = load_context()
20
+ context["jobs"][job_id] = job_data
21
+ with open(CONTEXT_PATH, "w") as f:
22
+ json.dump(context, f, indent=2)
23
+
24
+ def save_candidate_context(candidate_id, candidate_data):
25
+ init_context()
26
+ context = load_context()
27
+ context["candidates"][candidate_id] = candidate_data
28
+ with open(CONTEXT_PATH, "w") as f:
29
+ json.dump(context, f, indent=2)
30
+
31
+ def get_job_context(job_id):
32
+ context = load_context()
33
+ return context["jobs"].get(job_id, {})
34
+
35
+ def get_candidate_context(candidate_id):
36
+ context = load_context()
37
+ return context["candidates"].get(candidate_id, {})
38
+
39
+ def get_all_jobs():
40
+ return load_context()["jobs"]
41
+
42
+ def get_all_candidates():
43
+ return load_context()["candidates"]
44
+
45
+ def save_employee_context(employee_id, employee_data):
46
+ init_context()
47
+ context = load_context()
48
+ context["employees"][employee_id] = employee_data
49
+ with open(CONTEXT_PATH, "w") as f:
50
+ json.dump(context, f, indent=2)
51
+
52
+ def get_employee_context(employee_id):
53
+ context = load_context()
54
+ return context["employees"].get(employee_id, {})
55
+
56
+ def get_all_employees():
57
+ return load_context()["employees"]
58
+
59
+ def clear_context():
60
+ """Forcefully clears all saved job and candidate data in the context file."""
61
+ if not os.path.exists("data"):
62
+ os.makedirs("data")
63
+ with open(CONTEXT_PATH, "w") as f:
64
+ json.dump({"jobs": {}, "candidates": {}, 'employees': {}}, f, indent=2)
65
+
66
+ def save_team_summary(summary_text):
67
+ init_context()
68
+ context = load_context()
69
+ context["team_summary"] = summary_text
70
+ with open(CONTEXT_PATH, "w") as f:
71
+ json.dump(context, f, indent=2)
72
+
73
+ def get_team_summary():
74
+ context = load_context()
75
+ return context.get("team_summary", "")
76
+
77
+ def save_candidate_offer(candidate_id, offer_text):
78
+ init_context()
79
+ context = load_context()
80
+ candidate = context["candidates"].get(candidate_id, {})
81
+ if "onboarding_docs" not in candidate:
82
+ candidate["onboarding_docs"] = {}
83
+ candidate["onboarding_docs"]["offer_letter"] = offer_text
84
+ context["candidates"][candidate_id] = candidate
85
+ with open(CONTEXT_PATH, "w") as f:
86
+ json.dump(context, f, indent=2)
87
+
88
+ def get_candidate_offer(candidate_id):
89
+ context = load_context()
90
+ return context["candidates"].get(candidate_id, {}).get("onboarding_docs", {}).get("offer_letter", "")
91
+
code/llm_connect.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pyrsm as rsm
3
+ import google.generativeai as genai
4
+ import requests
5
+
6
+ # from google.genai import types
7
+ from typing import List
8
+
9
+
10
+ def query_llama(
11
+ messages: List[dict],
12
+ model: str = "llama-3",
13
+ max_tokens: int = 4000,
14
+ temperature: int = 0.4,
15
+ api_key: str = "",
16
+ ) -> dict:
17
+ """
18
+ Send a query to the Llama API
19
+
20
+ Args:
21
+ messages (list): List of dictionaries containing message role and content pairs
22
+ model (str): The model to use. Defaults to "llama-3"
23
+ max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 4000
24
+ temperature (int, optional): Controls randomness in the output. 0 is deterministic, higher values more random. Defaults to 0.4
25
+ api_key (str, optional): Authentication token for API access. Defaults to ""
26
+
27
+ Example:
28
+ messages = [
29
+ {"role": "system", "content": "You are a helpful assistant"},
30
+ {"role": "user", "content": "Hello!"}
31
+ ]
32
+ response = query_llama(messages, api_key="your-api-key")
33
+
34
+ Returns:
35
+ dict: The model's response
36
+ """
37
+ url = "https://traip13.tgptinf.ucsd.edu/v1/chat/completions"
38
+ if not api_key or len(api_key) == 0:
39
+ raise ValueError("LLAMA: API key is required")
40
+
41
+ headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
42
+ data = {
43
+ "messages": messages,
44
+ "model": model,
45
+ "max_tokens": max_tokens,
46
+ "temperature": temperature, # Adjust temperature for randomness (0 for deterministic)
47
+ "stream": False,
48
+ "n": 1,
49
+ }
50
+
51
+ response = requests.post(url, headers=headers, json=data)
52
+ response.raise_for_status() # Raise an exception for bad status codes
53
+
54
+ return response.json()
55
+
56
+
57
+ def test_llama_connection(api_key: str, timeout: int = 20) -> bool:
58
+ """Test connection to Llama API with a basic request"""
59
+ import requests
60
+
61
+ url = "https://traip13.tgptinf.ucsd.edu/v1/chat/completions"
62
+ headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
63
+ data = {"messages": [], "model": "llama-3", "max_tokens": 1, "temperature": 0.4}
64
+
65
+ try:
66
+ response = requests.post(url, headers=headers, json=data, timeout=20)
67
+ print(f"Status code: {response.status_code}")
68
+ print(f"Response headers: {response.headers}")
69
+ try:
70
+ print(f"Response body: {response.json()}")
71
+ except Exception:
72
+ print(f"Response text: {response.text}")
73
+ if response.status_code == 401:
74
+ print("Authentication failed - check your API key")
75
+ return False
76
+ elif response.status_code == 404:
77
+ print("API endpoint not found")
78
+ return False
79
+ elif response.status_code == 200:
80
+ return True
81
+ else:
82
+ print(f"Unexpected status code: {response.status_code}")
83
+ return False
84
+ except requests.exceptions.Timeout:
85
+ print(f"Connection timed out after {timeout} seconds")
86
+ return False
87
+ except requests.exceptions.ConnectionError:
88
+ print("Could not connect to server")
89
+ return False
90
+
91
+
92
+ def query_gemini(
93
+ messages: List[dict],
94
+ model: str = "gemini-2.0-flash",
95
+ max_tokens: int = 4000,
96
+ temperature: int = 0.4,
97
+ api_key: str = "",
98
+ ) -> dict:
99
+ """
100
+ Send a query to the Gemini API
101
+
102
+ Args:
103
+ messages (list): List of dictionaries containing message role and content pairs
104
+ model (str): The model to use. Defaults to "gemini-2.0-flash"
105
+ max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 4000
106
+ temperature (int, optional): Controls randomness in the output. 0 is deterministic, higher values more random. Defaults to 0.4
107
+ api_key (str, optional): Authentication token for API access. Defaults to ""
108
+
109
+ Returns:
110
+ dict: The model's response
111
+ """
112
+
113
+ if not api_key or len(api_key) == 0:
114
+ raise ValueError("Gemini: API key is required")
115
+
116
+ # Convert OpenAI-style messages to Gemini format
117
+ system_message = [msg["content"] for msg in messages if msg["role"] == "system"]
118
+ user_messages = [msg["content"] for msg in messages if msg["role"] == "user"]
119
+
120
+ # Combine system message (if any) with the user message
121
+ prompt = ". ".join(system_message + user_messages)
122
+
123
+ genai.configure(api_key=api_key)
124
+
125
+ # Initialize the model
126
+ model = genai.GenerativeModel(model_name=model)
127
+
128
+ # Define the generation configuration using the specific class
129
+ generation_config_obj = genai.types.GenerationConfig(
130
+ temperature=temperature,
131
+ max_output_tokens=max_tokens,
132
+ )
133
+
134
+ # Generate content using the correct parameter name 'generation_config'
135
+ response = model.generate_content(
136
+ contents=prompt, generation_config=generation_config_obj
137
+ )
138
+
139
+ return response.text
140
+
141
+
142
+ def get_response(
143
+ input: str | List[str],
144
+ template: callable,
145
+ role: str = "You are a helpful assistant.",
146
+ temperature: float = 0.4,
147
+ max_tokens: int = 4000,
148
+ md: bool = True,
149
+ llm: str = "llama",
150
+ model_name: str = None,
151
+ ):
152
+ """
153
+ Function to get a response from the LLama API
154
+ """
155
+ messages = [
156
+ {"role": "system", "content": role},
157
+ {
158
+ "role": "user",
159
+ "content": template(input),
160
+ },
161
+ ]
162
+
163
+ if llm == "llama":
164
+ response = query_llama(
165
+ messages=messages,
166
+ api_key=os.getenv("LLAMA_API_KEY"),
167
+ temperature=temperature,
168
+ max_tokens=max_tokens,
169
+ )["choices"][0]["message"]["content"]
170
+ elif llm == "gemini":
171
+ response = query_gemini(
172
+ messages=messages,
173
+ api_key=os.getenv("GEMINI_API_KEY"),
174
+ temperature=temperature,
175
+ max_tokens=max_tokens,
176
+ model=model_name if model_name else 'gemini-2.0-flash'
177
+ )
178
+ else:
179
+ raise ValueError("LLM: Invalid LLM specified")
180
+
181
+ if md:
182
+ return rsm.md(response)
183
+ else:
184
+ return response
185
+
186
+
187
+ if __name__ == "__main__":
188
+ from dotenv import load_dotenv
189
+
190
+ # Load environment variables
191
+ load_dotenv()
192
+
193
+ messages = [
194
+ {"role": "system", "content": "You are a helpful assistant."},
195
+ {"role": "user", "content": "Hi, how are you? What is your name?"},
196
+ ]
197
+
198
+ # Testing Llama connection
199
+ try:
200
+ print("\nTesting Llama connection ...")
201
+ test_llama_connection(api_key=os.getenv("LLAMA_API_KEY"))
202
+ except Exception as e:
203
+ print(f"Error: {e}")
204
+
205
+ # Testing Llama connection
206
+ try:
207
+ print("\nQuerying Llama ...")
208
+ response = query_llama(messages, api_key=os.getenv("LLAMA_API_KEY"))
209
+ print(response)
210
+ except Exception as e:
211
+ print(f"Error: {e}")
212
+
213
+ try:
214
+ print("\nQuerying Gemini ...")
215
+ response = query_gemini(messages, api_key=os.getenv("GEMINI_API_KEY"))
216
+ print(response)
217
+ except Exception as e:
218
+ print(f"Error: {e}")
219
+
220
+ try:
221
+ print("\nTesting get_response ...")
222
+
223
+ def template(input):
224
+ return f"""Evaluate the following statement for factual accuracy. If it's incorrect, provide the correct information:
225
+ Statement: {input}
226
+ Evaluation:"""
227
+
228
+ response = get_response(
229
+ "The capital of the Netherlands is Utrecht.",
230
+ template=template,
231
+ md=False,
232
+ llm="llama",
233
+ )
234
+ print(response)
235
+
236
+ except Exception as e:
237
+ print(f"Error: {e}")
server/__init__.py ADDED
File without changes
server/candidate_profile.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import fitz
3
+ import json
4
+ import re
5
+ from shiny import reactive, render, ui
6
+ from context import get_candidate_context, save_candidate_context, get_team_summary, get_job_context, get_all_jobs, get_all_candidates
7
+ from llm_connect import get_response
8
+ import html
9
+ import markdown
10
+
11
+
12
+ RESUME_DIR = "../data/resumes"
13
+
14
+ def extract_text_from_pdf(filename):
15
+ path = os.path.join(RESUME_DIR, filename) + '.pdf'
16
+ if not os.path.exists(path):
17
+ print(f"❌ Resume not found: {path}")
18
+ return None, None
19
+ try:
20
+ doc = fitz.open(path)
21
+ return "\n".join([page.get_text() for page in doc]), path
22
+ except Exception as e:
23
+ print("❌ PDF error:", e)
24
+ return None, None
25
+
26
+ def parse_resume_with_llm(resume_text, job_description_text, team_profiles, team_summary):
27
+ prompt = (
28
+ f"You are evaluating a candidate for the following job posting:\n\n"
29
+ f"{job_description_text}\n\n"
30
+ f"Here is the candidate's resume:\n\n"
31
+ f"{resume_text}\n\n"
32
+ f"Here are the profiles of the current team members:\n\n{team_profiles}\n\n"
33
+ f"Here is the team summary:\n\n{team_summary}\n\n"
34
+ "Extract the following fields into a valid JSON object:\n"
35
+ "- Name\n"
36
+ "- Email\n"
37
+ "- Years of Experience\n"
38
+ "- Key Skills (as a list)\n"
39
+ "- Llama Score (judge the candidate's overall fit for the job on a scale of 1–10)\n\n"
40
+ "⚠️ Return ONLY a single valid JSON object and nothing else.\n"
41
+ )
42
+
43
+ response_text = get_response(
44
+ input=prompt,
45
+ template=lambda x: x,
46
+ llm="llama",
47
+ md=False,
48
+ temperature=0.0,
49
+ max_tokens=700,
50
+ )
51
+
52
+ response_text = response_text.strip().replace("```json", "").replace("```", "").strip()
53
+ match = re.search(r'\{\s*".+?"\s*:.+?\}', response_text, re.DOTALL)
54
+ if not match:
55
+ raise ValueError("No valid JSON object found in LLM response.")
56
+ return json.loads(match.group(0))
57
+
58
+ def review_llama_score(resume_text, job_description_text, score, team_profiles, team_summary):
59
+ prompt = (
60
+ f"You are evaluating a candidate for the following posting:\n\n"
61
+ f"{job_description_text}\n\n"
62
+ f"Resume:\n{resume_text}\n\n"
63
+ f"Team Profiles:\n{team_profiles}\n\n"
64
+ f"Team Summary:\n{team_summary}\n\n"
65
+ f"Llama gave this candidate a score of {score}/10.\n"
66
+ "What is your score (1–10)? Only return the number."
67
+ )
68
+
69
+ return get_response(
70
+ input=prompt,
71
+ template=lambda x: x,
72
+ llm="gemini",
73
+ md=False,
74
+ temperature=0.0,
75
+ max_tokens=10,
76
+ model_name ='gemini-2.0-flash-lite'
77
+ ).strip()
78
+
79
+ def summarize_entire_resume(resume_text, job_description_text, score, team_profiles, team_summary):
80
+ prompt = (
81
+ f"Job Description:\n{job_description_text}\n\n"
82
+ f"Resume:\n{resume_text}\n\n"
83
+ f"Team Profiles:\n{team_profiles}\n\n"
84
+ f"Team Summary:\n{team_summary}\n\n"
85
+ f"The candidate received a score of {score}/10.\n"
86
+ "Write a detailed, honest summary of this candidate's qualifications and fit."
87
+ )
88
+
89
+ return get_response(
90
+ input=prompt,
91
+ template=lambda x: x,
92
+ llm="llama",
93
+ md=False,
94
+ temperature=0.7,
95
+ max_tokens=500
96
+ ).strip()
97
+
98
+ def review_llama_summary(resume_text, job_description_text, score, llama_review, team_profiles, team_summary):
99
+ prompt = (
100
+ f"You are reviewing this Llama summary for a candidate:\n\n"
101
+ f"Job Description:\n{job_description_text}\n\n"
102
+ f"Resume:\n{resume_text}\n\n"
103
+ f"Llama Summary:\n{llama_review}\n\n"
104
+ f"Team Profiles:\n{team_profiles}\n\n"
105
+ f"Team Summary:\n{team_summary}\n\n"
106
+ f"Llama scored this candidate {score}/10.\n"
107
+ "Write your own short evaluation and state if you agree or disagree with Llama’s score."
108
+ )
109
+
110
+ return get_response(
111
+ input=prompt,
112
+ template=lambda x: x,
113
+ llm="gemini",
114
+ md=False,
115
+ temperature=0.7,
116
+ max_tokens=500
117
+ ).strip()
118
+
119
+ def server(input, output, session):
120
+
121
+
122
+ @reactive.effect
123
+ def _populate_job_dropdown():
124
+ jobs = get_all_jobs()
125
+ job_choices = {
126
+ k: f"{v.get('title', 'Untitled')} ({k[:8]})"
127
+ for k, v in jobs.items()
128
+ }
129
+ print(job_choices)
130
+ ui.update_select("job_dropdown_for_doc", choices=job_choices)
131
+
132
+
133
+ @reactive.effect
134
+ def _populate_candidate_dropdown():
135
+ job_id = input.job_dropdown_for_doc()
136
+ print("πŸ“Ž selected job_id:", job_id)
137
+
138
+ if not job_id:
139
+ ui.update_select("candidate_dropdown_for_doc", choices={"⬅️ Select a job first": ""})
140
+ return
141
+
142
+ candidates = get_all_candidates()
143
+
144
+ filtered = {
145
+ cid: f"{v.get('Name', cid)} ({v.get('Resume File', 'N/A')})"
146
+ for cid, v in candidates.items()
147
+ if v.get("job_id") == job_id and v.get("Resume File")
148
+ }
149
+
150
+ print(f"βœ… Found {len(filtered)} candidates for job {job_id}")
151
+
152
+ if filtered:
153
+ ui.update_select("candidate_dropdown_for_doc", choices=filtered)
154
+ else:
155
+ ui.update_select("candidate_dropdown_for_doc", choices={"❌ No matching resumes": ""})
156
+
157
+
158
+
159
+
160
+ @output
161
+ @render.ui
162
+ def summary():
163
+ input.show_gemini() # βœ… force reactive trigger
164
+ input.job_dropdown_doc()
165
+ input.candidate_dropdown_doc()
166
+
167
+ filename = input.candidate_dropdown_doc()
168
+ job_id = input.job_dropdown_doc() # πŸ”§ ADD THIS LINE
169
+ use_gemini = input.show_gemini()
170
+
171
+ if not filename or not job_id:
172
+ return "Please select both resume and job ID."
173
+
174
+ job_context = get_job_context(job_id) # βœ… This now works
175
+ job_description_text = job_context.get("job_description", "No job description available.")
176
+ team_profiles = job_context.get("team_profiles", "No team profile available.")
177
+ team_summary = get_team_summary()
178
+
179
+ candidate_id = os.path.splitext(filename)[0]
180
+ ctx = get_candidate_context(candidate_id)
181
+
182
+ # βœ… If already evaluated for this job, return cached summary
183
+ if ctx.get("job_id") == job_id and "Llama Summary" in ctx:
184
+ use_gemini = input.show_gemini()
185
+ print(f"πŸ§ͺ Cached summary found for {candidate_id} / job {job_id} | Gemini: {use_gemini}")
186
+
187
+ raw = ctx.get("Gemini Summary" if use_gemini else "Llama Summary", "No summary available")
188
+ rendered = markdown.markdown(raw.strip())
189
+
190
+ return ui.HTML(
191
+ f"""
192
+ <div style="
193
+ font-family: 'Inter', 'Segoe UI', 'Helvetica Neue', sans-serif;
194
+ font-size: 1rem;
195
+ line-height: 1.6;
196
+ white-space: normal;
197
+ word-wrap: break-word;
198
+ max-width: 900px;
199
+ ">
200
+ {rendered}
201
+ </div>
202
+ """
203
+ )
204
+
205
+ # βœ… Run full pipeline
206
+ resume_text, resume_path = extract_text_from_pdf(filename)
207
+ if not resume_text:
208
+ return "Failed to extract resume."
209
+
210
+ try:
211
+ parsed = parse_resume_with_llm(resume_text, job_description_text, team_profiles, team_summary)
212
+ except Exception as e:
213
+ return f"❌ LLM field extraction failed: {e}"
214
+
215
+ llama_score = parsed["Llama Score"]
216
+ gemini_score = review_llama_score(resume_text, job_description_text, llama_score, team_profiles, team_summary)
217
+ try:
218
+ gemini_score = int(gemini_score)
219
+ except:
220
+ gemini_score = None
221
+
222
+ avg_score = (
223
+ (llama_score + gemini_score) / 2
224
+ if isinstance(llama_score, int) and isinstance(gemini_score, int)
225
+ else "N/A"
226
+ )
227
+
228
+ llama_summary = summarize_entire_resume(resume_text, job_description_text, llama_score, team_profiles, team_summary)
229
+ gemini_review = review_llama_summary(resume_text, job_description_text, llama_score, llama_summary, team_profiles, team_summary)
230
+
231
+ # βœ… Save new result
232
+ ctx.update({
233
+ "job_id": job_id,
234
+ "Resume File": filename,
235
+ "Name": parsed.get("Name"),
236
+ "Email": parsed.get("Email"),
237
+ "Years of Experience": parsed.get("Years of Experience"),
238
+ "Key Skills": parsed.get("Key Skills", []),
239
+ "Llama Score": llama_score,
240
+ "Gemini Score": gemini_score,
241
+ "avg_score": avg_score,
242
+ "Llama Summary": llama_summary,
243
+ "Gemini Summary": gemini_review
244
+ })
245
+
246
+ save_candidate_context(candidate_id, ctx)
247
+
248
+ print(use_gemini)
249
+ summary_text = gemini_review if use_gemini else llama_summary
250
+ rendered = markdown.markdown(summary_text)
251
+
252
+
253
+ return ui.HTML(f"""
254
+ <div style="
255
+ font-family: 'Inter', 'Segoe UI', 'Helvetica Neue', sans-serif;
256
+ font-size: 1rem;
257
+ line-height: 1.6;
258
+ white-space: normal;
259
+ word-wrap: break-word;
260
+ max-width: 900px;
261
+ ">
262
+ {rendered}
263
+ </div>
264
+ """)
265
+
266
+
267
+ @output
268
+ @render.ui
269
+ def score():
270
+ filename = input.candidate_dropdown_doc()
271
+ job_id = input.job_dropdown_doc()
272
+
273
+ if not filename or not job_id:
274
+ return ui.HTML("<p style='color: #888;'>Select a resume and job to view score.</p>")
275
+
276
+ candidate_id = os.path.splitext(filename)[0]
277
+ ctx = get_candidate_context(candidate_id)
278
+
279
+ if ctx.get("job_id") == job_id and "avg_score" in ctx:
280
+ score = ctx["avg_score"]
281
+
282
+ # Choose a color based on the score
283
+ if isinstance(score, (int, float)):
284
+ color = (
285
+ "green" if score >= 8 else
286
+ "orange" if score >= 5 else
287
+ "red"
288
+ )
289
+ else:
290
+ color = "gray"
291
+
292
+ return ui.HTML(f"""
293
+ <div style="
294
+ background-color: {color};
295
+ color: white;
296
+ font-weight: bold;
297
+ font-size: 1.1rem;
298
+ padding: 0.6rem 1.2rem;
299
+ border-radius: 8px;
300
+ display: inline-block;
301
+ ">
302
+ Average Score: {score}
303
+ </div>
304
+ """)
305
+
306
+ return ui.HTML("<p style='color: #888;'>Score not available. Generate profile first.</p>")
307
+
308
+
309
+ @output
310
+ @render.text
311
+ def candidate_note_ui():
312
+ filename = input.candidate_dropdown_doc()
313
+ job_id = input.job_dropdown_doc()
314
+ if not filename or not job_id:
315
+ return ui.input_text_area("candidate_note", "Add a note:", rows=3)
316
+
317
+ candidate_id = os.path.splitext(filename)[0]
318
+ ctx = get_candidate_context(candidate_id)
319
+ note = ctx.get("Note", "") if ctx.get("job_id") == job_id else ""
320
+ return ui.input_text_area("candidate_note", "Add a note:", value=note, rows=3)
321
+
322
+ @output
323
+ @render.ui
324
+ def candidate_tags_ui():
325
+ filename = input.candidate_dropdown_doc()
326
+ job_id = input.job_dropdown_doc()
327
+ if not filename or not job_id:
328
+ return ui.input_text("candidate_tags", "Tags (comma-separated):")
329
+
330
+ candidate_id = os.path.splitext(filename)[0]
331
+ ctx = get_candidate_context(candidate_id)
332
+ tags = ", ".join(ctx.get("Tags", [])) if ctx.get("job_id") == job_id else ""
333
+ return ui.input_text("candidate_tags", "Tags (comma-separated):", value=tags)
334
+
335
+
336
+ @output
337
+ @render.text
338
+ def note_preview():
339
+ filename = input.candidate_dropdown_doc()
340
+ job_id = input.job_dropdown_doc()
341
+ if not filename or not job_id:
342
+ return ""
343
+
344
+ candidate_id = os.path.splitext(filename)[0]
345
+ ctx = get_candidate_context(candidate_id)
346
+
347
+ if ctx.get("job_id") != job_id:
348
+ return ""
349
+
350
+ note = ctx.get("Note", "[No note]")
351
+ tags = ctx.get("Tags", [])
352
+ return f"πŸ“ Note:\n{note}\n\n🏷️ Tags: {', '.join(tags)}"
353
+
354
+ @output
355
+ @render.text
356
+ @reactive.event(input.save_note_tags)
357
+ def note_tag_status():
358
+ filename = input.candidate_dropdown_doc()
359
+ job_id = input.job_dropdown_doc()
360
+ if not filename or not job_id:
361
+ return "❌ Please select both a resume and a job ID."
362
+
363
+ candidate_id = os.path.splitext(filename)[0]
364
+ ctx = get_candidate_context(candidate_id)
365
+
366
+ # Only update if job_id matches
367
+ if ctx.get("job_id") != job_id:
368
+ return "⚠️ Cannot save notes β€” no profile generated for this candidate/job combination."
369
+
370
+ # Get input
371
+ note = input.candidate_note().strip()
372
+ tags_raw = input.candidate_tags()
373
+ tags = [tag.strip() for tag in tags_raw.split(",") if tag.strip()]
374
+
375
+ # Save to context
376
+ ctx["Note"] = note
377
+ ctx["Tags"] = tags
378
+ save_candidate_context(candidate_id, ctx)
379
+
380
+ return "βœ… Note and tags saved."
381
+
server/correlation_analysis.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import pandas as pd
4
+ import numpy as np
5
+ import ast
6
+ from dotenv import load_dotenv
7
+ from shiny import reactive, render, ui
8
+ import google.generativeai as genai
9
+ from google.generativeai.types import FunctionDeclaration, Tool
10
+ from google.api_core.exceptions import ResourceExhausted
11
+ import markdown
12
+
13
+ from context import get_all_candidates, get_all_jobs
14
+
15
+ load_dotenv()
16
+
17
+ # === Tool Function ===
18
+ def correlate_columns(df: pd.DataFrame, col1: str, col2: str) -> dict:
19
+ if col1 not in df.columns or col2 not in df.columns:
20
+ return {"error": f"One or both columns not found: '{col1}', '{col2}'"}
21
+ if col1 == col2:
22
+ return {"error": "Cannot correlate a column with itself."}
23
+ subset = df[[col1, col2]].dropna()
24
+ for col in [col1, col2]:
25
+ if subset[col].dtype == "object" or pd.api.types.is_categorical_dtype(subset[col]):
26
+ subset[col], _ = pd.factorize(subset[col])
27
+ try:
28
+ return subset.corr(method="pearson").to_dict()
29
+ except Exception as e:
30
+ return {"error": str(e)}
31
+
32
+ # === Gemini Tool Setup ===
33
+ correlation_func_schema = FunctionDeclaration(
34
+ name="correlate_columns",
35
+ description="Calculate the Pearson correlation between two candidate variables.",
36
+ parameters={
37
+ "type": "object",
38
+ "properties": {
39
+ "col1": {"type": "string", "description": "First column"},
40
+ "col2": {"type": "string", "description": "Second column"}
41
+ },
42
+ "required": ["col1", "col2"]
43
+ }
44
+ )
45
+ correlation_tool = Tool(function_declarations=[correlation_func_schema])
46
+ genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
47
+ model = genai.GenerativeModel("gemini-2.0-flash", tools=[correlation_tool])
48
+
49
+ # === Server ===
50
+ def server(input, output, session):
51
+ print("βœ… Loaded context-aware Gemini correlation server")
52
+
53
+ last_corr = reactive.Value(None)
54
+ last_cols = reactive.Value(("", ""))
55
+ chat_status = reactive.Value("")
56
+
57
+
58
+ @reactive.effect
59
+ def _populate_job_ids():
60
+ raw_candidates = get_all_candidates()
61
+ job_ids_used = {c.get("job_id") for c in raw_candidates.values() if "job_id" in c}
62
+
63
+ all_jobs = get_all_jobs()
64
+
65
+ # Build label: value mapping
66
+ job_choices = {
67
+ job_id: f"{job_data.get('title', 'Untitled')} ({job_id[:8]})"
68
+ for job_id, job_data in all_jobs.items()
69
+ if job_id in job_ids_used
70
+ }
71
+
72
+ print(f"πŸ“Š Populating job_id dropdown with {len(job_choices)} items")
73
+ ui.update_select("job_id", choices=job_choices)
74
+
75
+
76
+ @reactive.Calc
77
+ def candidates():
78
+ raw = get_all_candidates()
79
+ job_id = input.job_id()
80
+ if not job_id:
81
+ return pd.DataFrame()
82
+ df = pd.DataFrame([c for c in raw.values() if c.get("job_id") == job_id])
83
+ df["Years of Experience"] = pd.to_numeric(df["Years of Experience"], errors="coerce")
84
+ df["avg_score"] = pd.to_numeric(df["avg_score"], errors="coerce")
85
+ df["Key Skills"] = df["Key Skills"].apply(lambda x: ast.literal_eval(x) if isinstance(x, str) else x)
86
+ return df
87
+
88
+ @reactive.effect
89
+ def _populate_cols():
90
+ df = candidates()
91
+ if df.empty:
92
+ return
93
+ exclude = {
94
+ "Name", "Email", "Key Skills", "Candidate ID", "Application ID", "Resume File",
95
+ "Llama Summary", "Gemini Summary", "Note", "candidate_id", "job_id",
96
+ "application_date", "source"
97
+ }
98
+ cols = [col for col in df.columns if col not in exclude]
99
+ ui.update_select("col1", choices=cols)
100
+ ui.update_select("col2", choices=cols)
101
+
102
+ @output
103
+ @render.table
104
+ def candidate_table():
105
+ df = candidates()
106
+ return df.drop(columns=["Resume File", "Llama Summary", "Gemini Summary", "onboarding_docs", "job_id", "Candidate ID"], errors="ignore").head(10)
107
+
108
+ @output
109
+ @render.ui
110
+ def correlation_output():
111
+ if input.calc_corr() == 0:
112
+ return ui.p("⬇️ Select columns and click 'Calculate Correlation'.")
113
+ df = candidates()
114
+ col1 = input.col1()
115
+ col2 = input.col2()
116
+ if df.empty or not col1 or not col2:
117
+ return ui.p("⚠️ Please select a job and valid columns.")
118
+ result = correlate_columns(df, col1, col2)
119
+ if "error" in result:
120
+ return ui.p(f"❌ {result['error']}")
121
+ try:
122
+ corr_value = result[col1][col2]
123
+ except:
124
+ return ui.p("❌ Failed to extract correlation value.")
125
+
126
+ last_corr.set(corr_value)
127
+ last_cols.set((col1, col2))
128
+
129
+ prompt = (
130
+ f"The Pearson correlation between '{col1}' and '{col2}' is {corr_value:.4f}.\n\n"
131
+ f"Explain this for a recruiter: include statistical meaning, hiring implications, and limitations."
132
+ )
133
+ try:
134
+ chat = model.start_chat()
135
+ response = chat.send_message(prompt)
136
+ explanation = markdown.markdown(response.text.strip())
137
+ except Exception as e:
138
+ explanation = f"<b>⚠️ Gemini error:</b> {str(e)}"
139
+
140
+ return ui.HTML(f"""
141
+ <div><strong>{col1}</strong> vs <strong>{col2}</strong> correlation: <b>{corr_value:.4f}</b></div>
142
+ <hr><div><strong>LLM Explanation:</strong><br>{explanation}</div>
143
+ """)
144
+
145
+ @output
146
+ @render.text
147
+ def chat_status_ui():
148
+ return chat_status.get()
149
+
150
+ @output
151
+ @render.ui
152
+ @reactive.event(input.chat_send)
153
+ def chat_response():
154
+ user_msg = input.chat_input().strip()
155
+ col1, col2 = last_cols.get()
156
+ corr_value = last_corr.get()
157
+ df = candidates()
158
+
159
+ if not user_msg:
160
+ return ui.HTML("<i>⚠️ Please enter a follow-up question.</i>")
161
+ if df.empty:
162
+ return ui.HTML("<i>⚠️ No candidate data loaded.</i>")
163
+ if not col1 or corr_value is None:
164
+ return ui.HTML("<i>⚠️ Please run a correlation first.</i>")
165
+
166
+ chat_status.set("πŸ’¬ Thinking...")
167
+
168
+ # Provide full data context for Gemini: 10 rows, all columns
169
+ cleaned_df = df.drop(columns=["Resume File", "Llama Summary", "Gemini Summary"], errors="ignore")
170
+ sample_json = json.dumps(cleaned_df.head(10).to_dict(orient="records"), indent=2)
171
+
172
+ prompt = (
173
+ f"You are helping a recruiter analyze candidate data.\n\n"
174
+ f"The last Pearson correlation was between '{col1}' and '{col2}' = {corr_value:.4f}.\n"
175
+ f"The user asked: \"{user_msg}\"\n\n"
176
+ f"Here is a preview of the first 10 rows of the dataset:\n{sample_json}\n\n"
177
+ f"Use both the correlation and sample data to respond helpfully."
178
+ )
179
+
180
+ try:
181
+ chat = model.start_chat()
182
+ response = chat.send_message(prompt)
183
+ explanation = markdown.markdown(response.text.strip())
184
+ except ResourceExhausted:
185
+ explanation = "<b>❌ Gemini quota exceeded. Try again soon.</b>"
186
+ except Exception as e:
187
+ explanation = f"<b>❌ Gemini error:</b> {str(e)}"
188
+
189
+ chat_status.set("")
190
+ return ui.HTML(explanation)
server/document_creation.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from shiny import reactive, render, ui
3
+ from context import (
4
+ get_candidate_context,
5
+ get_job_context,
6
+ get_team_summary,
7
+ save_candidate_context,
8
+ get_all_jobs,
9
+ get_all_candidates
10
+ )
11
+ from llm_connect import get_response
12
+
13
+ from fpdf import FPDF
14
+ import markdown
15
+ import io
16
+
17
+
18
+ def draft_offer_letter(candidate_name, job_title, compensation, start_date, team_summary, job_description, hiring_manager_notes):
19
+ prompt = (
20
+ f"Candidate Name: {candidate_name}\n"
21
+ f"Job Title: {job_title}\n"
22
+ f"Compensation: {compensation}\n"
23
+ f"Start Date: {start_date}\n\n"
24
+ f"Job Description:\n{job_description}\n\n"
25
+ f"Team Summary:\n{team_summary}\n\n"
26
+ f"Hiring Manager Notes:\n{hiring_manager_notes}\n\n"
27
+ "Write a professional, clear, and positive offer letter for this candidate. "
28
+ "Include a summary of the role, compensation details, start date, and a warm welcome. "
29
+ "Avoid excessive legal language but maintain formality."
30
+ )
31
+
32
+ return get_response(
33
+ input=prompt,
34
+ template=lambda x: x,
35
+ llm="llama",
36
+ md=False,
37
+ temperature=0.5,
38
+ max_tokens=600
39
+ ).strip()
40
+
41
+
42
+ def generate_full_contract(candidate_name, job_title, compensation, start_date, clauses, company_policies, legal_notes):
43
+ prompt = (
44
+ f"Candidate Name: {candidate_name}\n"
45
+ f"Job Title: {job_title}\n"
46
+ f"Compensation: {compensation}\n"
47
+ f"Start Date: {start_date}\n\n"
48
+ f"Clauses:\n{clauses}\n\n"
49
+ f"Company Policies:\n{company_policies}\n\n"
50
+ f"Legal Notes:\n{legal_notes}\n\n"
51
+ "Draft a complete employment contract using the information above. "
52
+ "Structure it with proper headings, include all clauses, and align with common HR compliance standards. "
53
+ "Use formal legal language where appropriate."
54
+ )
55
+
56
+ return get_response(
57
+ input=prompt,
58
+ template=lambda x: x,
59
+ llm="llama",
60
+ md=False,
61
+ temperature=0.4,
62
+ max_tokens=1200
63
+ ).strip()
64
+
65
+
66
+ def server(input, output, session):
67
+ # === Update job dropdown from context ===
68
+ @reactive.effect
69
+ def _populate_job_dropdown():
70
+ jobs = get_all_jobs()
71
+ # value = job_id (UUID), label = title
72
+ job_choices = {
73
+ k: f"{v.get('title', 'Untitled')} ({k[:8]})"
74
+ for k, v in jobs.items()
75
+ }
76
+ ui.update_select("job_dropdown_doc", choices=job_choices)
77
+
78
+
79
+ # === Update candidate dropdown based on selected job ===
80
+ @reactive.effect
81
+ def _populate_candidate_dropdown():
82
+ job_id = input.job_dropdown_doc()
83
+ print("πŸ“Ž selected job_id:", job_id)
84
+
85
+ if not job_id:
86
+ ui.update_select("candidate_dropdown_doc", choices={"⬅️ Select a job first": ""})
87
+ return
88
+
89
+ candidates = get_all_candidates()
90
+
91
+ filtered = {
92
+ cid: f"{v.get('Name', cid)} ({v.get('Resume File', 'N/A')})"
93
+ for cid, v in candidates.items()
94
+ if v.get("job_id") == job_id and v.get("Resume File")
95
+ }
96
+
97
+
98
+ print(f"βœ… Found {len(filtered)} candidates for job {job_id}")
99
+
100
+ if filtered:
101
+ ui.update_select("candidate_dropdown_doc", choices=filtered)
102
+ else:
103
+ ui.update_select("candidate_dropdown_doc", choices={"❌ No matching resumes": ""})
104
+
105
+
106
+
107
+
108
+ # === Offer letter generation ===
109
+ @output
110
+ @render.text
111
+ @reactive.event(input.generate_offer)
112
+ def offer_letter_text():
113
+ candidate_id = input.candidate_dropdown_doc()
114
+ job_id = input.job_dropdown_doc()
115
+
116
+ print("πŸ“¦ candidate_id:", candidate_id)
117
+ print("πŸ“¦ job_id:", job_id)
118
+
119
+ if not candidate_id or not job_id:
120
+ return "❌ Select a resume and a job."
121
+
122
+ ctx = get_candidate_context(candidate_id)
123
+ job = get_job_context(job_id)
124
+
125
+ print("πŸ“ ctx loaded:", bool(ctx))
126
+ print("πŸ“ job loaded:", bool(job))
127
+
128
+ if not ctx or not job:
129
+ return "❌ Missing candidate or job context."
130
+
131
+ comp_override = input.override_compensation().strip()
132
+ start_override = input.override_start_date().strip()
133
+ notes_override = input.override_notes().strip()
134
+
135
+ offer = draft_offer_letter(
136
+ candidate_name=ctx.get("Name", "Candidate"),
137
+ job_title=job.get("title", "Unknown Role"),
138
+ compensation=comp_override or job.get("compensation", "TBD"),
139
+ start_date=start_override or job.get("start_date", "TBD"),
140
+ team_summary=get_team_summary(),
141
+ job_description=job.get("job_description", ""),
142
+ hiring_manager_notes=notes_override or job.get("notes", "")
143
+ )
144
+
145
+ ctx.setdefault("onboarding_docs", {})["offer_letter"] = offer
146
+ save_candidate_context(candidate_id, ctx)
147
+ return ui.HTML(f"<pre style='font-family: Georgia; font-size: 1rem'>{offer}</pre>")
148
+
149
+ # === Contract generation ===
150
+ @output
151
+ @render.text
152
+ @reactive.event(input.generate_contract)
153
+ def contract_text():
154
+ candidate_id = input.candidate_dropdown_doc()
155
+ job_id = input.job_dropdown_doc()
156
+
157
+ if not candidate_id or not job_id:
158
+ return "❌ Select a resume and a job."
159
+
160
+ ctx = get_candidate_context(candidate_id)
161
+ job = get_job_context(job_id)
162
+
163
+ if not ctx or not job:
164
+ return "❌ Missing candidate or job context."
165
+
166
+ comp_override = input.override_compensation().strip()
167
+ start_override = input.override_start_date().strip()
168
+
169
+ contract = generate_full_contract(
170
+ candidate_name=ctx.get("Name", "Candidate"),
171
+ job_title=job.get("title", "Unknown Role"),
172
+ compensation=comp_override or job.get("compensation", "TBD"),
173
+ start_date=start_override or job.get("start_date", "TBD"),
174
+ clauses=job.get("clauses", "Standard IP, termination, arbitration clauses."),
175
+ company_policies=job.get("policies", "All standard company HR policies apply."),
176
+ legal_notes=job.get("legal_notes", "Subject to U.S. labor law.")
177
+ )
178
+
179
+ ctx.setdefault("onboarding_docs", {})["contract"] = contract
180
+ save_candidate_context(candidate_id, ctx)
181
+ return ui.HTML(f"<pre style='font-family: Georgia; font-size: 1rem'>{contract}</pre>")
182
+
183
+ @output
184
+ @render.download(filename="Offer_Letter.pdf")
185
+ def download_offer():
186
+ candidate_id = input.candidate_dropdown_doc()
187
+ ctx = get_candidate_context(candidate_id)
188
+ offer = ctx.get("onboarding_docs", {}).get("offer_letter", "No offer letter found.")
189
+
190
+ pdf = FPDF()
191
+ pdf.add_page()
192
+ pdf.set_auto_page_break(auto=True, margin=15)
193
+ pdf.set_font("Arial", size=12)
194
+
195
+ for line in offer.split("\n"):
196
+ pdf.multi_cell(0, 10, line)
197
+
198
+ pdf_bytes = io.BytesIO()
199
+ pdf_bytes.write(pdf.output(dest="S").encode("latin1"))
200
+ pdf_bytes.seek(0)
201
+ return pdf_bytes
202
+
203
+
204
+ @output
205
+ @render.download(filename="Contract.pdf")
206
+ def download_contract():
207
+ candidate_id = input.candidate_dropdown_doc()
208
+ ctx = get_candidate_context(candidate_id)
209
+ contract = ctx.get("onboarding_docs", {}).get("contract", "No contract found.")
210
+
211
+ pdf = FPDF()
212
+ pdf.add_page()
213
+ pdf.set_auto_page_break(auto=True, margin=15)
214
+ pdf.set_font("Arial", size=12)
215
+
216
+ for line in contract.split("\n"):
217
+ pdf.multi_cell(0, 10, line)
218
+
219
+ pdf_bytes = io.BytesIO()
220
+ pdf_bytes.write(pdf.output(dest="S").encode("latin1"))
221
+ pdf_bytes.seek(0)
222
+ return pdf_bytes
223
+
224
+
server/home.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+ from pathlib import Path
3
+ from shiny import reactive, ui, render
4
+
5
+ import os
6
+ import sys
7
+ sys.path.append('code')
8
+
9
+ from context import (
10
+ get_all_jobs,
11
+ get_all_candidates,
12
+ save_candidate_context,
13
+ init_context
14
+ )
15
+
16
+ init_context()
17
+
18
+ UPLOAD_DIR = Path(os.path.join("..", "data"))
19
+ UPLOAD_DIR.mkdir(parents=True, exist_ok=True)
20
+
21
+ def server(input, output, session):
22
+
23
+ @output
24
+ @render.text
25
+ @reactive.event(input.upload_resume_btn)
26
+ def upload_result():
27
+ print("🚨 upload_result triggered")
28
+
29
+ fileinfo = input.resume_file()
30
+ job_id = input.job_id_input()
31
+ print(f"πŸ“₯ fileinfo = {fileinfo}")
32
+ print(f"πŸ“Œ job_id = {job_id}")
33
+
34
+ if not fileinfo or not job_id:
35
+ return "❌ Missing file or job ID."
36
+
37
+ file_meta = fileinfo[0]
38
+ resume_bytes = Path(file_meta["datapath"]).read_bytes()
39
+
40
+ candidate_id = str(uuid.uuid4())
41
+ filename = f"{candidate_id}.pdf"
42
+ target_path = UPLOAD_DIR / filename
43
+ target_path.write_bytes(resume_bytes)
44
+
45
+ candidate_data = {
46
+ "candidate_id": candidate_id,
47
+ "job_id": job_id,
48
+ "Resume File": filename,
49
+ "Application ID": str(uuid.uuid4())
50
+ }
51
+ save_candidate_context(candidate_id, candidate_data)
52
+
53
+ print(f"βœ… Uploaded {file_meta['name']} β†’ job_id: {job_id}")
54
+ return f"βœ… Resume uploaded and linked to `{job_id[:8]}`.\nCandidate ID: `{candidate_id}`"
55
+
56
+ @reactive.effect
57
+ def _populate_job_ids():
58
+ all_jobs = get_all_jobs()
59
+
60
+ chart_choices = {
61
+ job_id: f"{job_data.get('title', 'Untitled')} ({job_id[:8]})"
62
+ for job_id, job_data in all_jobs.items()
63
+ }
64
+
65
+ print(f"Job IDs: {len(chart_choices)} loaded")
66
+ ui.update_select("job_id_input", choices=chart_choices, selected=None)
server/interview_scheduler.py ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os, sys, json, io, zipfile
3
+ import json
4
+ import requests
5
+ from dotenv import load_dotenv
6
+ from shiny import reactive, render, ui
7
+
8
+ # Access ../code/context.py
9
+
10
+ from context import get_all_candidates
11
+ from llm_connect import get_response
12
+
13
+ from datetime import datetime
14
+ from fpdf import FPDF
15
+ from PyPDF2 import PdfReader
16
+ import markdown
17
+
18
+ # Load Calendly token
19
+ load_dotenv()
20
+ CALENDLY_API_KEY = os.getenv('CALENDLY_API_KEY')
21
+ if not CALENDLY_API_KEY:
22
+ raise RuntimeError("❌ CALENDLY_API_KEY not set.")
23
+
24
+ HEADERS = {
25
+ 'Authorization': f"Bearer {CALENDLY_API_KEY}",
26
+ 'Content-Type': 'application/json'
27
+ }
28
+
29
+ _event_url_cache = None
30
+
31
+ def get_user_uri():
32
+ response = requests.get("https://api.calendly.com/users/me", headers=HEADERS, timeout=5)
33
+ response.raise_for_status()
34
+ return response.json()['resource']['uri']
35
+
36
+ def get_event_type_link(user_uri):
37
+ response = requests.get(f"https://api.calendly.com/event_types?user={user_uri}", headers=HEADERS, timeout=5)
38
+ response.raise_for_status()
39
+ return response.json()['collection'][0]['scheduling_url']
40
+
41
+ def schedule_interview(name, email):
42
+ global _event_url_cache
43
+ if _event_url_cache is None:
44
+ user_uri = get_user_uri()
45
+ _event_url_cache = get_event_type_link(user_uri)
46
+ return f"{_event_url_cache}?name={name.replace(' ', '+')}&email={email}"
47
+
48
+
49
+ def draft_invite_email_with_llm(name, email, link, job_data):
50
+ prompt = (
51
+ f"You are a recruiter inviting a candidate to schedule an interview.\n\n"
52
+ f"Candidate Name: {name}\n"
53
+ f"Candidate Email: {email}\n\n"
54
+ f"Job Title: {job_data.get('title', 'Unknown')}\n"
55
+ f"Specialization: {job_data.get('specialization', '')}\n"
56
+ f"Job Description:\n{job_data.get('job_description', '')}\n\n"
57
+ f"Scheduling Link: {link}\n\n"
58
+ f"Write a professional, warm, and concise email inviting the candidate to schedule an interview. "
59
+ f"Include the scheduling link. Return only the email body text. No formatting or extra explanation.\n"
60
+ )
61
+
62
+ return get_response(
63
+ input=prompt,
64
+ template=lambda x: x,
65
+ llm="llama",
66
+ md=False,
67
+ temperature=0.7,
68
+ max_tokens=500,
69
+ )
70
+
71
+ def export_email_as_pdf(name, email_text):
72
+ pdf = FPDF()
73
+ pdf.add_page()
74
+ pdf.set_auto_page_break(auto=True, margin=15)
75
+ pdf.set_font("Arial", size=12)
76
+ for line in email_text.strip().split("\n"):
77
+ pdf.multi_cell(0, 10, line)
78
+ output_dir = os.path.join(os.path.dirname(__file__), "emails")
79
+ os.makedirs(output_dir, exist_ok=True)
80
+ filename = os.path.join(output_dir, f"{name.replace(' ', '_')}.pdf")
81
+ pdf.output(filename)
82
+ return filename
83
+
84
+
85
+
86
+ # === MAIN SHINY SERVER FUNCTION ===
87
+ def server(input, output, session):
88
+ print("βœ… Entered server()")
89
+
90
+ if not hasattr(session, "_memo"):
91
+ session._memo = {} # πŸ›  manually initialize in-memory store
92
+
93
+ @reactive.Calc
94
+ def job_options():
95
+ raw_candidates = get_all_candidates()
96
+ job_ids_used = {c.get("job_id") for c in raw_candidates.values() if "job_id" in c}
97
+
98
+ path = "../data/context.json"
99
+ try:
100
+ with open(path, "r") as f:
101
+ full = json.load(f)
102
+ all_jobs = full.get("jobs", {})
103
+ session._memo["all_jobs"] = all_jobs
104
+ session._memo["all_candidates"] = full.get("candidates", {})
105
+
106
+ # Build job_id: label mapping only for jobs with candidates
107
+ job_choices = {
108
+ job_id: f"{job_data.get('title', 'Untitled')} ({job_id[:8]})"
109
+ for job_id, job_data in all_jobs.items()
110
+ if job_id in job_ids_used
111
+ }
112
+
113
+ print(f"πŸ“Š Loaded {len(job_choices)} job IDs with candidates")
114
+ return job_choices
115
+ except Exception as e:
116
+ print("❌ Failed to load job/candidate context:", e)
117
+ return {}
118
+
119
+
120
+ @output
121
+ @render.ui
122
+ def name_selector():
123
+ job_ids = job_options()
124
+ if not job_ids:
125
+ return ui.p("No jobs available.")
126
+ return ui.div(
127
+ ui.input_select("selected_job", "Select Job ID", choices=job_ids),
128
+ ui.output_ui("candidate_checkbox")
129
+ )
130
+
131
+ @output
132
+ @render.ui
133
+ def candidate_checkbox():
134
+ job_id = input.selected_job()
135
+ if not job_id:
136
+ return ui.p("Select a job to view candidates.")
137
+
138
+ candidates = session._memo.get("all_candidates", {})
139
+ filtered = [
140
+ {
141
+ "label": f"{c['Name']} ({c['Email']})",
142
+ "name": c["Name"],
143
+ "email": c["Email"]
144
+ }
145
+ for c in candidates.values()
146
+ if str(c.get("job_id", "")).strip() == str(job_id).strip()
147
+ ]
148
+
149
+ session._memo["filtered_candidates"] = filtered
150
+
151
+ if not filtered:
152
+ return ui.p("No candidates match this job.")
153
+
154
+ return ui.input_checkbox_group(
155
+ "selected_names",
156
+ "Select candidates to schedule",
157
+ choices=[c["label"] for c in filtered]
158
+ )
159
+
160
+ @output
161
+ @render.ui
162
+ @reactive.event(input.generate_links)
163
+ def output_links_html():
164
+ selected = input.selected_names()
165
+ if not selected:
166
+ return ui.p("No candidates selected.")
167
+
168
+ job_id = input.selected_job()
169
+ session._memo["active_job_id"] = job_id
170
+
171
+ job_data = session._memo.get("all_jobs", {}).get(job_id, {})
172
+ candidates = {
173
+ f"{c['name']} ({c['email']})": c
174
+ for c in session._memo.get("filtered_candidates", [])
175
+ }
176
+
177
+ results = []
178
+ pdf_paths = []
179
+
180
+ for label in selected:
181
+ c = candidates.get(label)
182
+ if not c:
183
+ results.append(ui.p(f"{label}: Not found"))
184
+ continue
185
+ try:
186
+ link = schedule_interview(c['name'], c['email'])
187
+ email_text = draft_invite_email_with_llm(c['name'], c['email'], link, job_data)
188
+
189
+ # Sanitize name + timestamp
190
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
191
+ safe_name = f"{c['name'].replace(' ', '_')}_{timestamp}"
192
+ filename = f"{safe_name}.pdf"
193
+
194
+ # Correct folder: milestone2/data/emails/{job_id}/
195
+ output_dir = f"../data/emails/{job_id}"
196
+
197
+ os.makedirs(output_dir, exist_ok=True)
198
+
199
+ # Full path to PDF file
200
+ pdf_path = os.path.join(output_dir, filename)
201
+
202
+ # Generate PDF
203
+ pdf = FPDF()
204
+ pdf.add_page()
205
+ pdf.set_auto_page_break(auto=True, margin=15)
206
+ pdf.set_font("Arial", size=12)
207
+ for line in email_text.strip().split("\n"):
208
+ pdf.multi_cell(0, 10, line)
209
+ pdf.output(pdf_path)
210
+
211
+ # Store PDF
212
+ pdf_paths.append(pdf_path)
213
+
214
+ results.append(
215
+ ui.HTML(f"<p><b>{c['name']}</b>: <a href='{link}' target='_blank'>πŸ“… Schedule</a> β€” PDF ready</p>")
216
+ )
217
+
218
+ except Exception as e:
219
+ results.append(ui.p(f"{c['name']}: ERROR - {e}"))
220
+
221
+ session._memo["pdf_paths"] = pdf_paths
222
+ return ui.div(*results)
223
+
224
+
225
+ @output
226
+ @render.ui
227
+ @reactive.event(input.generate_links) # πŸ” Trigger update after generate button
228
+ def pdf_selector():
229
+ files = session._memo.get("pdf_paths", [])
230
+ if not files:
231
+ return ui.p("No PDFs to preview.")
232
+
233
+ default_pdf = os.path.basename(files[0])
234
+ return ui.input_select(
235
+ "selected_pdf",
236
+ "Preview PDF",
237
+ choices=[os.path.basename(f) for f in files],
238
+ selected=default_pdf
239
+ )
240
+
241
+
242
+ @output
243
+ @render.ui
244
+ @reactive.Calc
245
+ def pdf_preview():
246
+ selected = input.selected_pdf()
247
+ job_id = session._memo.get("active_job_id", "").strip()
248
+
249
+ # βœ… DEBUG: show raw values
250
+ print("πŸ“„ pdf_preview triggered")
251
+ print("πŸ” selected_pdf:", selected)
252
+ print("πŸ“ active_job_id:", job_id)
253
+
254
+ if not selected:
255
+ return ui.p("⚠️ No PDF selected.")
256
+ if not job_id:
257
+ return ui.p("⚠️ No active job selected.")
258
+
259
+ file_path = os.path.abspath(
260
+ os.path.join(
261
+ os.path.dirname(__file__),
262
+ "..", "..", "milestone2", "data", "emails", job_id,
263
+ os.path.basename(selected)
264
+ )
265
+ )
266
+
267
+
268
+ if not os.path.exists(file_path):
269
+ print("❌ File not found on disk.")
270
+ return ui.p(f"❌ PDF not found: {selected}")
271
+
272
+ try:
273
+ reader = PdfReader(file_path)
274
+ text = "\n".join([page.extract_text() or "" for page in reader.pages])
275
+ print("βœ… PDF text extracted.")
276
+ except Exception as e:
277
+ print("❌ Exception during PDF read:", e)
278
+ return ui.p(f"❌ Failed to extract PDF text: {e}")
279
+
280
+ html = markdown.markdown(text)
281
+ return ui.HTML(f"""
282
+ <div style='padding: 1em; font-family: Georgia, serif; font-size: 1rem; line-height: 1.6;'>
283
+ {html}
284
+ </div>
285
+ """)
286
+
287
+
288
+
289
+
290
+ @output
291
+ @render.download(filename="Interview_Emails.zip")
292
+ def download_emails():
293
+ pdf_paths = session._memo.get("pdf_paths", [])
294
+ if not pdf_paths:
295
+ return None # nothing to download
296
+
297
+ job_id = session._memo.get("active_job_id", "").strip()
298
+ zip_path = f"../data/emails/{job_id}/Interview_Emails.zip"
299
+
300
+ if os.path.exists(zip_path):
301
+ os.remove(zip_path)
302
+
303
+ with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
304
+ for path in pdf_paths:
305
+ zipf.write(path, arcname=os.path.basename(path))
306
+
307
+ return zip_path
308
+
309
+ @output
310
+ @render.text
311
+ @reactive.event(input.submit_chat)
312
+ def refined_output():
313
+ user_instruction = input.chat_prompt().strip()
314
+ selected = input.selected_pdf()
315
+ job_id = session._memo.get("active_job_id", "").strip()
316
+
317
+ if not selected or not job_id:
318
+ return "⚠️ Select a PDF to edit."
319
+
320
+ # Load original text
321
+ pdf_path = f"../data/emails/{job_id}/{selected}"
322
+
323
+ if not os.path.exists(pdf_path):
324
+ return "❌ Could not find the original PDF."
325
+
326
+ reader = PdfReader(pdf_path)
327
+ original_text = "\n".join([page.extract_text() or "" for page in reader.pages])
328
+
329
+ # Call your LLM with edit prompt
330
+ full_prompt = (
331
+ f"The following is an email invitation:\n\n"
332
+ f"{original_text}\n\n"
333
+ f"User instruction: {user_instruction}\n\n"
334
+ f"Please revise the email accordingly. Return only the revised email."
335
+ )
336
+
337
+ try:
338
+ revised = get_response(
339
+ input=full_prompt,
340
+ template=lambda x: x,
341
+ llm="llama",
342
+ md=False,
343
+ temperature=0.6,
344
+ max_tokens=600
345
+ )
346
+ return revised.strip()
347
+ except Exception as e:
348
+ return f"❌ LLM failed: {e}"
349
+
350
+
351
+ @reactive.effect
352
+ @reactive.event(input.toggle_edit)
353
+ def load_pdf_for_editing():
354
+ selected = input.selected_pdf()
355
+ job_id = session._memo.get("active_job_id", "").strip()
356
+ if not selected or not job_id:
357
+ return
358
+
359
+ file_path = f"../data/emails/{job_id}/{selected}"
360
+
361
+ if not os.path.exists(file_path):
362
+ return
363
+ reader = PdfReader(file_path)
364
+ text = "\n".join([page.extract_text() or "" for page in reader.pages])
365
+
366
+ ui.update_text_area("edit_text", value=text)
367
+
368
+
369
+
370
+ @reactive.effect
371
+ @reactive.event(input.save_edit)
372
+ def save_edited_pdf():
373
+ selected = input.selected_pdf()
374
+ job_id = session._memo.get("active_job_id", "").strip()
375
+ new_text = input.edit_text().strip()
376
+
377
+ if not selected or not job_id or not new_text:
378
+ return
379
+
380
+ file_path = f"../data/emails/{job_id}/{selected}"
381
+
382
+ pdf = FPDF()
383
+ pdf.add_page()
384
+ pdf.set_auto_page_break(auto=True, margin=15)
385
+ pdf.set_font("Arial", size=12)
386
+ for line in new_text.splitlines():
387
+ pdf.multi_cell(0, 10, line)
388
+ pdf.output(file_path)
389
+
390
+ print(f"βœ… Overwrote PDF: {file_path}")
391
+
392
+ session.send_input_message("selected_pdf", {"value": selected})
393
+
394
+
395
+ @output
396
+ @render.ui
397
+ def edit_ui_block():
398
+ if input.toggle_edit() % 2 == 1:
399
+ return ui.div(
400
+ ui.input_text_area("edit_text", "Edit Email Text:", rows=20),
401
+ ui.input_action_button("save_edit", "πŸ’Ύ Overwrite PDF"),
402
+ style="margin-top: 1em;"
403
+ )
404
+ else:
405
+ return None
406
+
407
+
408
+
server/job_creation.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from shiny import reactive, render, ui
2
+ import uuid
3
+ import os
4
+ import markdown
5
+ from llm_connect import get_response
6
+ from context import save_job_context
7
+ import json
8
+
9
+ # βœ… Global reactive cache shared across handlers
10
+ response_cache = reactive.Value("")
11
+
12
+
13
+ def call_chatbot(user_input: str, session_id: str) -> str:
14
+ prompt = (
15
+ "You are an intelligent recruiting assistant.\n"
16
+ "If the user asks to generate a job description, do so with sections:\n"
17
+ "- About the Role\n- Responsibilities\n- Required Skills\n"
18
+ "- Preferred Qualifications\n- Company Culture Highlights\n"
19
+ "- Salary and Visa Requirements\n\n"
20
+ "If the user asks anything else, just respond helpfully.\n\n"
21
+ f"User: {user_input}"
22
+ )
23
+ return get_response(input=prompt, template=lambda x: x, llm="llama", md=False, temperature=0.9, max_tokens=1000).strip()
24
+
25
+
26
+ def extract_job_metadata(job_description: str) -> dict:
27
+ prompt = f"""
28
+ You are a structured data extraction assistant.
29
+ Given a job description, extract these 3 fields:
30
+
31
+ 1. "job_title": (string) The job title.
32
+ 2. "specialization": (string) The domain or technical area, like 'Data Science', 'Finance', or 'Healthcare'.
33
+ 3. "years_required": (integer or null) Minimum years of experience mentioned. If not present, return null.
34
+
35
+ Respond in EXACTLY this JSON format:
36
+
37
+ {{
38
+ "job_title": "...",
39
+ "specialization": "...",
40
+ "years_required": ...
41
+ }}
42
+
43
+ Job Description:
44
+ \"\"\"{job_description}\"\"\"
45
+ """
46
+ response = get_response(
47
+ input=prompt,
48
+ template=lambda x: x,
49
+ llm="llama",
50
+ md=False,
51
+ temperature=0.2,
52
+ max_tokens=200
53
+ )
54
+
55
+ try:
56
+ return json.loads(response)
57
+ except Exception as e:
58
+ print(f"⚠️ Failed to parse metadata response: {e}")
59
+ return {
60
+ "job_title": None,
61
+ "specialization": None,
62
+ "years_required": None
63
+ }
64
+
65
+
66
+ def server(input, output, session):
67
+ session_id = str(uuid.uuid4())
68
+ chat_status = reactive.Value("")
69
+ save_status = reactive.Value("")
70
+
71
+ @output
72
+ @render.ui
73
+ @reactive.event(input.submit_btn)
74
+ def job_chat_response():
75
+ user_input = input.user_input().strip()
76
+
77
+ if not user_input:
78
+ return ui.HTML("<i>⚠️ Please enter a prompt.</i>")
79
+
80
+ chat_status.set("πŸ’¬ Thinking...")
81
+
82
+ try:
83
+ raw_response = call_chatbot(user_input, session_id)
84
+ response_cache.set(raw_response)
85
+ html = markdown.markdown(raw_response, extensions=["extra", "sane_lists"])
86
+ except Exception as e:
87
+ html = f"<b>❌ Error:</b> {str(e)}"
88
+ response_cache.set("")
89
+
90
+ chat_status.set("")
91
+ return ui.HTML(html)
92
+
93
+
94
+ @reactive.effect()
95
+ @reactive.event(input.save_job_btn)
96
+ def save_generated_job():
97
+ print("πŸ’₯ Save button clicked")
98
+ raw_response = response_cache.get().strip()
99
+ if not raw_response:
100
+ print("⚠️ No job response cached.")
101
+ save_status.set("⚠️ No job to save.")
102
+ return
103
+
104
+ try:
105
+ print("πŸ” Extracting metadata from response...")
106
+ metadata = extract_job_metadata(raw_response)
107
+ print("βœ… Metadata extracted:")
108
+ print(json.dumps(metadata, indent=2))
109
+
110
+ job_id = str(uuid.uuid4())
111
+ job_data = {
112
+ "job_id": job_id,
113
+ "title": metadata.get("job_title") or "Untitled",
114
+ "specialization": metadata.get("specialization") or "General",
115
+ "years_required": metadata.get("years_required"),
116
+ "job_description": raw_response
117
+ }
118
+
119
+ save_job_context(job_id, job_data)
120
+ save_status.set(f"βœ… Job saved: {job_data['title']}")
121
+ print(f"βœ… Job saved to context: {job_id}")
122
+
123
+ except Exception as e:
124
+ error_msg = f"❌ Failed to save job: {e}"
125
+ print(error_msg)
126
+ save_status.set(error_msg)
127
+
128
+ @output(id="save_status_ui")
129
+ @render.text
130
+ def render_save_status():
131
+ return save_status.get()
server/plot_generation.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ sys.path.append('../code')
4
+
5
+ import pandas as pd
6
+ import numpy as np
7
+ from dotenv import load_dotenv
8
+ import json
9
+ from IPython.display import Markdown, display
10
+ import markdown
11
+ from shiny import reactive, render, ui, req
12
+ from shiny.express import render as render_express
13
+ from google.api_core.exceptions import ResourceExhausted
14
+ from pathlib import Path
15
+ import tempfile
16
+
17
+ load_dotenv()
18
+
19
+ import google.generativeai as genai
20
+ from google.generativeai.types import FunctionDeclaration, Tool
21
+
22
+ from llm_connect import get_response
23
+ from context import get_all_candidates, get_all_jobs
24
+ import uuid
25
+
26
+
27
+ # === TOOL FUNCTION === #
28
+
29
+ import plotly.express as px
30
+ import plotly.io as pio
31
+
32
+ def generate_plot(df: pd.DataFrame, chart_x: str, chart_y: str = None, chart_type: str = "scatter"):
33
+ width = 1000
34
+ height = 600
35
+
36
+ if chart_x not in df.columns:
37
+ raise ValueError(f"Column '{chart_x}' not in DataFrame.")
38
+ if chart_type != "histogram" and (not chart_y or chart_y not in df.columns):
39
+ raise ValueError(f"Column '{chart_y}' not in DataFrame.")
40
+
41
+ if chart_type == "scatter":
42
+ fig = px.scatter(df, x=chart_x, y=chart_y, title=f"Scatter Plot: {chart_y} vs {chart_x}", width=width, height=height)
43
+ elif chart_type == "bar":
44
+ fig = px.bar(df, x=chart_x, y=chart_y, title=f"Bar Chart: {chart_y} vs {chart_x}", width=width, height=height)
45
+ elif chart_type == "line":
46
+ fig = px.line(df, x=chart_x, y=chart_y, title=f"Line Chart: {chart_y} vs {chart_x}", width=width, height=height)
47
+ elif chart_type == "histogram":
48
+ fig = px.histogram(df, x=chart_x, title=f"Histogram of {chart_x}", width=width, height=height)
49
+ else:
50
+ raise ValueError(f"Unsupported chart type: {chart_type}")
51
+
52
+ return fig
53
+
54
+
55
+
56
+ # === REGISTER TOOL === #
57
+ plot_func_schema = FunctionDeclaration(
58
+ name="generate_plot",
59
+ description="Generate and return a chart from candidate data.",
60
+ parameters={
61
+ "type": "object",
62
+ "properties": {
63
+ "chart_x": {
64
+ "type": "string",
65
+ "description": "The x-axis column to plot."
66
+ },
67
+ "chart_y": {
68
+ "type": "string",
69
+ "description": "The y-axis column to plot (omit for histogram)."
70
+ },
71
+ "chart_type": {
72
+ "type": "string",
73
+ "enum": ["scatter", "bar", "line", "histogram"],
74
+ "description": "Type of chart to render."
75
+ }
76
+ },
77
+ "required": ["chart_x", "chart_type"]
78
+ }
79
+ )
80
+
81
+ plot_tool = Tool(function_declarations=[plot_func_schema])
82
+ genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
83
+ model = genai.GenerativeModel("gemini-2.0-flash", tools=[plot_tool])
84
+
85
+
86
+ # === MAIN SHINY SERVER FUNCTION ===
87
+
88
+ def server(input, output, session):
89
+ print("βœ… Entered server()")
90
+
91
+ last_cols = reactive.Value(('', ''))
92
+ last_type = reactive.Value('')
93
+ last_chat = reactive.Value(None)
94
+ last_chart_spec = reactive.Value(None)
95
+
96
+ @reactive.effect
97
+ def _populate_job_ids():
98
+ raw_candidates = get_all_candidates()
99
+ job_ids_used = {c.get("job_id") for c in raw_candidates.values() if "job_id" in c}
100
+
101
+ all_jobs = get_all_jobs()
102
+
103
+ chart_choices = {
104
+ job_id: f"{job_data.get('title', 'Untitled')} ({job_id[:8]})"
105
+ for job_id, job_data in all_jobs.items()
106
+ if job_id in job_ids_used
107
+ }
108
+
109
+ print(f"πŸ“Š Chart Job IDs: {len(chart_choices)} loaded")
110
+ ui.update_select("chart_job_id", choices=chart_choices, selected="")
111
+
112
+ @reactive.Calc
113
+ def candidates():
114
+ raw = get_all_candidates()
115
+ filtered_job = input.chart_job_id()
116
+ if not filtered_job:
117
+ print("⚠️ No job selected.")
118
+ return pd.DataFrame()
119
+ df = pd.DataFrame([c for c in raw.values() if c.get("job_id") == filtered_job])
120
+ return df
121
+
122
+ @reactive.Calc
123
+ def plot_inputs():
124
+ df = candidates()
125
+ chart_x = input.chart_x()
126
+ chart_y = input.chart_y()
127
+ chart_type = input.chart_type()
128
+
129
+ if df.empty or not chart_x or (chart_type != "histogram" and not chart_y):
130
+ return None
131
+
132
+ return (df, chart_x, chart_y, chart_type)
133
+
134
+
135
+ @reactive.effect
136
+ def column_dropdowns():
137
+ selected_job = input.chart_job_id()
138
+ if not selected_job:
139
+ # Clear dropdowns if no job selected
140
+ ui.update_select("chart_x", choices=[], selected="")
141
+ ui.update_select("chart_y", choices=[], selected="")
142
+ return
143
+
144
+ df = candidates()
145
+ if df.empty:
146
+ return
147
+ exclude = {"Name", "Email", "Resume File", "Key Skills", "Llama Summary", "Gemini Summary",
148
+ "Note", "candidate_id", "job_id", "application_date", "source"}
149
+ valid = [c for c in df.columns if c not in exclude]
150
+ default_x = valid[0] if valid else ""
151
+ default_y = valid[1] if len(valid) > 1 else default_x
152
+ ui.update_select("chart_x", choices=valid, selected=default_x)
153
+ ui.update_select("chart_y", choices=valid, selected=default_y)
154
+
155
+
156
+ @output
157
+ @render.ui
158
+ @reactive.event(input.generate_plot)
159
+ def generate_display_plot():
160
+ print("⚑ generate_display_plot triggered")
161
+
162
+ inputs = plot_inputs()
163
+ if inputs is None:
164
+ print("❌ Inputs not ready")
165
+ return ui.p("Please complete all selections before generating a chart.")
166
+
167
+ df, chart_x, chart_y, chart_type = inputs
168
+
169
+ try:
170
+ fig = generate_plot(df, chart_x, chart_y if chart_type != "histogram" else None, chart_type)
171
+ html = fig.to_html(full_html=False, include_plotlyjs="div")
172
+
173
+ last_chart_spec.set(fig.to_json())
174
+ last_cols.set((chart_x, chart_y))
175
+ last_type.set(chart_type)
176
+
177
+ return ui.HTML(html + f"<div style='display:none'>{uuid.uuid4()}</div>")
178
+
179
+ except Exception as e:
180
+ print(f"❌ Chart generation failed: {e}")
181
+ return ui.p(f"Chart generation error: {e}")
182
+
183
+
184
+
185
+
186
+ @output
187
+ @render.ui
188
+ @reactive.event(input.generate_plot)
189
+ def llm_explain_plot():
190
+ df = candidates()
191
+ chart_x = input.chart_x()
192
+ chart_y = input.chart_y()
193
+ chart_type = input.chart_type()
194
+ spec_json = last_chart_spec.get()
195
+
196
+ if df.empty or not chart_x or (chart_type != "histogram" and not chart_y):
197
+ return ui.p("⚠️ Please select valid columns and chart type.")
198
+
199
+ try:
200
+ columns = [chart_x] + ([chart_y] if chart_y else [])
201
+ summary = df[columns].describe().to_string()
202
+
203
+ if spec_json:
204
+ plot = pio.from_json(spec_json)
205
+
206
+ prompt = (
207
+ f"Here is the data, summary and plot of {chart_x} vs {chart_y} used to generate a {chart_type} plot"
208
+ f"Plot: \n{plot}\n\n"
209
+ f"Data: {df}\n\n"
210
+ f"Summary: {summary}\n\n"
211
+ "Explain the chart to a recruiter, focusing on insights, trends and implications for hiring.\n\n"
212
+ "Do not call any tool or function. Respond in natural language only."
213
+ "Be detailed and be clear of why the chart shapes up the way it did."
214
+ )
215
+
216
+ chat = model.start_chat()
217
+ response = chat.send_message(prompt)
218
+ explanation = markdown.markdown(response.text.strip())
219
+ last_chat.set(chat)
220
+ except Exception as e:
221
+ explanation = f"⚠️ Gemini error: {str(e)}"
222
+
223
+ return ui.HTML(f"{explanation}")
224
+
225
+ @output
226
+ @render.ui
227
+ @reactive.event(input.chart_chat_send)
228
+ def chat_followup():
229
+ user_msg = input.chart_chat_input().strip()
230
+ chat = last_chat.get()
231
+ df = candidates()
232
+
233
+ chart_x, chart_y = last_cols.get()
234
+ chart_type = last_type.get()
235
+ spec_json = last_chart_spec.get()
236
+
237
+ if not user_msg:
238
+ return ui.HTML("⚠️ Please enter a follow-up question.")
239
+ if not chat:
240
+ return ui.HTML("⚠️ Please generate a chart first.")
241
+ if not chart_x or not chart_type:
242
+ return ui.HTML("⚠️ Please choose chart variables and/or type first.")
243
+
244
+ # Provide full data context for Gemini: 10 rows, all columns
245
+ clean_df = df.drop(columns=["Resume File", "Llama Summary", "Gemini Summary"], errors="ignore")
246
+ sample_json = json.dumps(clean_df.head(10).to_dict(orient="records"), indent=2)
247
+
248
+ if spec_json:
249
+ plot = pio.from_json(spec_json)
250
+
251
+ followup = (
252
+ f"Here is a sample of the first 10 rows of candidate data: \n{sample_json}\n\n"
253
+ f"This is the generated {chart_type} chart between '{chart_x}' and '{chart_y}:"
254
+ f"Plot: {plot}"
255
+ f"The user asked: \"{user_msg}\"\n\n"
256
+ f"Respond helpfully based on the chart context and question. Be detailed and clear in your explanation"
257
+ )
258
+
259
+ try:
260
+ chat = model.start_chat()
261
+ response = chat.send_message(followup)
262
+ if hasattr(response, "text") and response.text:
263
+ explanation = markdown.markdown(response.text.strip())
264
+ else:
265
+ explanation = "⚠️ Gemini responded with a tool function call instead of natural language. Try adjusting the prompt."
266
+ except ResourceExhausted:
267
+ explanation = "<b>❌ Gemini quota exceeded. Try again soon.</b>"
268
+ except Exception as e:
269
+ explanation = f"<b>❌ Gemini error:</b> {str(e)}"
270
+
271
+ return ui.HTML(explanation)
272
+
273
+ @reactive.effect
274
+ def log_generate_trigger():
275
+ _ = input.generate_plot()
276
+ print("πŸ‘† generate_plot button was clicked")
277
+
shared.py DELETED
@@ -1,6 +0,0 @@
1
- from pathlib import Path
2
-
3
- import pandas as pd
4
-
5
- app_dir = Path(__file__).parent
6
- tips = pd.read_csv(app_dir / "tips.csv")
 
 
 
 
 
 
 
styles.css DELETED
@@ -1,12 +0,0 @@
1
- :root {
2
- --bslib-sidebar-main-bg: #f8f8f8;
3
- }
4
-
5
- .popover {
6
- --bs-popover-header-bg: #222;
7
- --bs-popover-header-color: #fff;
8
- }
9
-
10
- .popover .btn-close {
11
- filter: var(--bs-btn-close-white-filter);
12
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
styles/custom.css ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* ----------------- */
2
+ /* 🌞 Base Light Theme */
3
+ /* ----------------- */
4
+ body {
5
+ background-color: #f4f6f8;
6
+ font-family: 'Inter', 'Segoe UI', sans-serif;
7
+ font-size: 16px;
8
+ color: #212529;
9
+ margin: 0;
10
+ padding: 0;
11
+ transition: background-color 0.3s ease, color 0.3s ease;
12
+ }
13
+
14
+ .container {
15
+ max-width: 1200px;
16
+ margin: auto;
17
+ padding: 2em;
18
+ }
19
+
20
+ /* Typography */
21
+ h1, h2, h3, h4, h5, h6,
22
+ p, li, label, span {
23
+ color: #1e2b3a;
24
+ margin-bottom: 1em;
25
+ font-weight: 500;
26
+ }
27
+
28
+ /* Cards */
29
+ .card {
30
+ background: linear-gradient(to bottom right, #ffffff, #f9fbfd);
31
+ border-radius: 14px;
32
+ padding: 2em;
33
+ box-shadow: 0 6px 16px rgba(0,0,0,0.05);
34
+ margin-bottom: 2em;
35
+ animation: fadeInUp 0.4s ease forwards;
36
+ }
37
+
38
+ /* Inputs */
39
+ input[type="text"], select, textarea {
40
+ width: 100%;
41
+ padding: 0.75em 1em;
42
+ border: 1px solid #ced4da;
43
+ border-radius: 10px;
44
+ background-color: #ffffff;
45
+ font-size: 1rem;
46
+ transition: background-color 0.3s, border-color 0.3s, color 0.3s, transform 0.2s;
47
+ }
48
+
49
+ input:focus, textarea:focus, select:focus {
50
+ border-color: #007bff;
51
+ outline: none;
52
+ box-shadow: 0 0 0 2px rgba(0, 123, 255, 0.2);
53
+ transform: scale(1.01);
54
+ }
55
+
56
+ /* Buttons */
57
+ button, .btn {
58
+ background: linear-gradient(to right, #0d6efd, #0b5ed7);
59
+ border: none;
60
+ color: #ffffff;
61
+ padding: 0.75em 1.5em;
62
+ border-radius: 10px;
63
+ font-size: 1rem;
64
+ font-weight: 600;
65
+ cursor: pointer;
66
+ transition: background 0.2s ease, transform 0.1s ease;
67
+ }
68
+
69
+ button:hover, .btn:hover {
70
+ background: linear-gradient(to right, #0b5ed7, #0a53be);
71
+ }
72
+
73
+ button:active, .btn:active {
74
+ transform: scale(0.96);
75
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.15) inset;
76
+ }
77
+
78
+ /* Navigation (Light Mode) */
79
+ .navbar,
80
+ .nav-tabs,
81
+ .nav-link,
82
+ .nav-item {
83
+ background-color: #ffffff;
84
+ color: #212529;
85
+ border-color: #dee2e6;
86
+ transition: background-color 0.3s, color 0.3s;
87
+ }
88
+
89
+ .nav-link.active {
90
+ background-color: #e9ecef;
91
+ color: #000;
92
+ }
93
+
94
+
95
+ /* ----------------- */
96
+ /* ✨ Animations */
97
+ /* ----------------- */
98
+
99
+ /* Global transition for everything */
100
+ body, body * {
101
+ transition:
102
+ background-color 0.3s ease,
103
+ color 0.3s ease,
104
+ border-color 0.3s ease,
105
+ box-shadow 0.3s ease,
106
+ transform 0.2s ease;
107
+ }
108
+
109
+ /* Fade-in for cards */
110
+ @keyframes fadeInUp {
111
+ from {
112
+ opacity: 0;
113
+ transform: translateY(20px);
114
+ }
115
+ to {
116
+ opacity: 1;
117
+ transform: translateY(0);
118
+ }
119
+ }
120
+
121
+ /* Nav hover underline */
122
+ .nav-link {
123
+ position: relative;
124
+ }
125
+
126
+ .nav-link::after {
127
+ content: "";
128
+ position: absolute;
129
+ bottom: 0;
130
+ left: 0;
131
+ height: 2px;
132
+ width: 0%;
133
+ background-color: currentColor;
134
+ transition: width 0.3s ease;
135
+ }
136
+
137
+ .nav-link:hover::after {
138
+ width: 100%;
139
+ }
140
+
141
+ /* Modal / panel zoom-in (if you use them) */
142
+ .modal, .panel, .popup {
143
+ opacity: 0;
144
+ transform: scale(0.96);
145
+ animation: zoomIn 0.3s ease forwards;
146
+ }
147
+
148
+ @keyframes zoomIn {
149
+ to {
150
+ opacity: 1;
151
+ transform: scale(1);
152
+ }
153
+ }
154
+
155
+ /* Tab transition */
156
+ .tab-pane {
157
+ opacity: 0;
158
+ transition: opacity 0.3s ease;
159
+ }
160
+
161
+ .tab-pane.active {
162
+ opacity: 1;
163
+ }
tips.csv DELETED
@@ -1,245 +0,0 @@
1
- total_bill,tip,sex,smoker,day,time,size
2
- 16.99,1.01,Female,No,Sun,Dinner,2
3
- 10.34,1.66,Male,No,Sun,Dinner,3
4
- 21.01,3.5,Male,No,Sun,Dinner,3
5
- 23.68,3.31,Male,No,Sun,Dinner,2
6
- 24.59,3.61,Female,No,Sun,Dinner,4
7
- 25.29,4.71,Male,No,Sun,Dinner,4
8
- 8.77,2.0,Male,No,Sun,Dinner,2
9
- 26.88,3.12,Male,No,Sun,Dinner,4
10
- 15.04,1.96,Male,No,Sun,Dinner,2
11
- 14.78,3.23,Male,No,Sun,Dinner,2
12
- 10.27,1.71,Male,No,Sun,Dinner,2
13
- 35.26,5.0,Female,No,Sun,Dinner,4
14
- 15.42,1.57,Male,No,Sun,Dinner,2
15
- 18.43,3.0,Male,No,Sun,Dinner,4
16
- 14.83,3.02,Female,No,Sun,Dinner,2
17
- 21.58,3.92,Male,No,Sun,Dinner,2
18
- 10.33,1.67,Female,No,Sun,Dinner,3
19
- 16.29,3.71,Male,No,Sun,Dinner,3
20
- 16.97,3.5,Female,No,Sun,Dinner,3
21
- 20.65,3.35,Male,No,Sat,Dinner,3
22
- 17.92,4.08,Male,No,Sat,Dinner,2
23
- 20.29,2.75,Female,No,Sat,Dinner,2
24
- 15.77,2.23,Female,No,Sat,Dinner,2
25
- 39.42,7.58,Male,No,Sat,Dinner,4
26
- 19.82,3.18,Male,No,Sat,Dinner,2
27
- 17.81,2.34,Male,No,Sat,Dinner,4
28
- 13.37,2.0,Male,No,Sat,Dinner,2
29
- 12.69,2.0,Male,No,Sat,Dinner,2
30
- 21.7,4.3,Male,No,Sat,Dinner,2
31
- 19.65,3.0,Female,No,Sat,Dinner,2
32
- 9.55,1.45,Male,No,Sat,Dinner,2
33
- 18.35,2.5,Male,No,Sat,Dinner,4
34
- 15.06,3.0,Female,No,Sat,Dinner,2
35
- 20.69,2.45,Female,No,Sat,Dinner,4
36
- 17.78,3.27,Male,No,Sat,Dinner,2
37
- 24.06,3.6,Male,No,Sat,Dinner,3
38
- 16.31,2.0,Male,No,Sat,Dinner,3
39
- 16.93,3.07,Female,No,Sat,Dinner,3
40
- 18.69,2.31,Male,No,Sat,Dinner,3
41
- 31.27,5.0,Male,No,Sat,Dinner,3
42
- 16.04,2.24,Male,No,Sat,Dinner,3
43
- 17.46,2.54,Male,No,Sun,Dinner,2
44
- 13.94,3.06,Male,No,Sun,Dinner,2
45
- 9.68,1.32,Male,No,Sun,Dinner,2
46
- 30.4,5.6,Male,No,Sun,Dinner,4
47
- 18.29,3.0,Male,No,Sun,Dinner,2
48
- 22.23,5.0,Male,No,Sun,Dinner,2
49
- 32.4,6.0,Male,No,Sun,Dinner,4
50
- 28.55,2.05,Male,No,Sun,Dinner,3
51
- 18.04,3.0,Male,No,Sun,Dinner,2
52
- 12.54,2.5,Male,No,Sun,Dinner,2
53
- 10.29,2.6,Female,No,Sun,Dinner,2
54
- 34.81,5.2,Female,No,Sun,Dinner,4
55
- 9.94,1.56,Male,No,Sun,Dinner,2
56
- 25.56,4.34,Male,No,Sun,Dinner,4
57
- 19.49,3.51,Male,No,Sun,Dinner,2
58
- 38.01,3.0,Male,Yes,Sat,Dinner,4
59
- 26.41,1.5,Female,No,Sat,Dinner,2
60
- 11.24,1.76,Male,Yes,Sat,Dinner,2
61
- 48.27,6.73,Male,No,Sat,Dinner,4
62
- 20.29,3.21,Male,Yes,Sat,Dinner,2
63
- 13.81,2.0,Male,Yes,Sat,Dinner,2
64
- 11.02,1.98,Male,Yes,Sat,Dinner,2
65
- 18.29,3.76,Male,Yes,Sat,Dinner,4
66
- 17.59,2.64,Male,No,Sat,Dinner,3
67
- 20.08,3.15,Male,No,Sat,Dinner,3
68
- 16.45,2.47,Female,No,Sat,Dinner,2
69
- 3.07,1.0,Female,Yes,Sat,Dinner,1
70
- 20.23,2.01,Male,No,Sat,Dinner,2
71
- 15.01,2.09,Male,Yes,Sat,Dinner,2
72
- 12.02,1.97,Male,No,Sat,Dinner,2
73
- 17.07,3.0,Female,No,Sat,Dinner,3
74
- 26.86,3.14,Female,Yes,Sat,Dinner,2
75
- 25.28,5.0,Female,Yes,Sat,Dinner,2
76
- 14.73,2.2,Female,No,Sat,Dinner,2
77
- 10.51,1.25,Male,No,Sat,Dinner,2
78
- 17.92,3.08,Male,Yes,Sat,Dinner,2
79
- 27.2,4.0,Male,No,Thur,Lunch,4
80
- 22.76,3.0,Male,No,Thur,Lunch,2
81
- 17.29,2.71,Male,No,Thur,Lunch,2
82
- 19.44,3.0,Male,Yes,Thur,Lunch,2
83
- 16.66,3.4,Male,No,Thur,Lunch,2
84
- 10.07,1.83,Female,No,Thur,Lunch,1
85
- 32.68,5.0,Male,Yes,Thur,Lunch,2
86
- 15.98,2.03,Male,No,Thur,Lunch,2
87
- 34.83,5.17,Female,No,Thur,Lunch,4
88
- 13.03,2.0,Male,No,Thur,Lunch,2
89
- 18.28,4.0,Male,No,Thur,Lunch,2
90
- 24.71,5.85,Male,No,Thur,Lunch,2
91
- 21.16,3.0,Male,No,Thur,Lunch,2
92
- 28.97,3.0,Male,Yes,Fri,Dinner,2
93
- 22.49,3.5,Male,No,Fri,Dinner,2
94
- 5.75,1.0,Female,Yes,Fri,Dinner,2
95
- 16.32,4.3,Female,Yes,Fri,Dinner,2
96
- 22.75,3.25,Female,No,Fri,Dinner,2
97
- 40.17,4.73,Male,Yes,Fri,Dinner,4
98
- 27.28,4.0,Male,Yes,Fri,Dinner,2
99
- 12.03,1.5,Male,Yes,Fri,Dinner,2
100
- 21.01,3.0,Male,Yes,Fri,Dinner,2
101
- 12.46,1.5,Male,No,Fri,Dinner,2
102
- 11.35,2.5,Female,Yes,Fri,Dinner,2
103
- 15.38,3.0,Female,Yes,Fri,Dinner,2
104
- 44.3,2.5,Female,Yes,Sat,Dinner,3
105
- 22.42,3.48,Female,Yes,Sat,Dinner,2
106
- 20.92,4.08,Female,No,Sat,Dinner,2
107
- 15.36,1.64,Male,Yes,Sat,Dinner,2
108
- 20.49,4.06,Male,Yes,Sat,Dinner,2
109
- 25.21,4.29,Male,Yes,Sat,Dinner,2
110
- 18.24,3.76,Male,No,Sat,Dinner,2
111
- 14.31,4.0,Female,Yes,Sat,Dinner,2
112
- 14.0,3.0,Male,No,Sat,Dinner,2
113
- 7.25,1.0,Female,No,Sat,Dinner,1
114
- 38.07,4.0,Male,No,Sun,Dinner,3
115
- 23.95,2.55,Male,No,Sun,Dinner,2
116
- 25.71,4.0,Female,No,Sun,Dinner,3
117
- 17.31,3.5,Female,No,Sun,Dinner,2
118
- 29.93,5.07,Male,No,Sun,Dinner,4
119
- 10.65,1.5,Female,No,Thur,Lunch,2
120
- 12.43,1.8,Female,No,Thur,Lunch,2
121
- 24.08,2.92,Female,No,Thur,Lunch,4
122
- 11.69,2.31,Male,No,Thur,Lunch,2
123
- 13.42,1.68,Female,No,Thur,Lunch,2
124
- 14.26,2.5,Male,No,Thur,Lunch,2
125
- 15.95,2.0,Male,No,Thur,Lunch,2
126
- 12.48,2.52,Female,No,Thur,Lunch,2
127
- 29.8,4.2,Female,No,Thur,Lunch,6
128
- 8.52,1.48,Male,No,Thur,Lunch,2
129
- 14.52,2.0,Female,No,Thur,Lunch,2
130
- 11.38,2.0,Female,No,Thur,Lunch,2
131
- 22.82,2.18,Male,No,Thur,Lunch,3
132
- 19.08,1.5,Male,No,Thur,Lunch,2
133
- 20.27,2.83,Female,No,Thur,Lunch,2
134
- 11.17,1.5,Female,No,Thur,Lunch,2
135
- 12.26,2.0,Female,No,Thur,Lunch,2
136
- 18.26,3.25,Female,No,Thur,Lunch,2
137
- 8.51,1.25,Female,No,Thur,Lunch,2
138
- 10.33,2.0,Female,No,Thur,Lunch,2
139
- 14.15,2.0,Female,No,Thur,Lunch,2
140
- 16.0,2.0,Male,Yes,Thur,Lunch,2
141
- 13.16,2.75,Female,No,Thur,Lunch,2
142
- 17.47,3.5,Female,No,Thur,Lunch,2
143
- 34.3,6.7,Male,No,Thur,Lunch,6
144
- 41.19,5.0,Male,No,Thur,Lunch,5
145
- 27.05,5.0,Female,No,Thur,Lunch,6
146
- 16.43,2.3,Female,No,Thur,Lunch,2
147
- 8.35,1.5,Female,No,Thur,Lunch,2
148
- 18.64,1.36,Female,No,Thur,Lunch,3
149
- 11.87,1.63,Female,No,Thur,Lunch,2
150
- 9.78,1.73,Male,No,Thur,Lunch,2
151
- 7.51,2.0,Male,No,Thur,Lunch,2
152
- 14.07,2.5,Male,No,Sun,Dinner,2
153
- 13.13,2.0,Male,No,Sun,Dinner,2
154
- 17.26,2.74,Male,No,Sun,Dinner,3
155
- 24.55,2.0,Male,No,Sun,Dinner,4
156
- 19.77,2.0,Male,No,Sun,Dinner,4
157
- 29.85,5.14,Female,No,Sun,Dinner,5
158
- 48.17,5.0,Male,No,Sun,Dinner,6
159
- 25.0,3.75,Female,No,Sun,Dinner,4
160
- 13.39,2.61,Female,No,Sun,Dinner,2
161
- 16.49,2.0,Male,No,Sun,Dinner,4
162
- 21.5,3.5,Male,No,Sun,Dinner,4
163
- 12.66,2.5,Male,No,Sun,Dinner,2
164
- 16.21,2.0,Female,No,Sun,Dinner,3
165
- 13.81,2.0,Male,No,Sun,Dinner,2
166
- 17.51,3.0,Female,Yes,Sun,Dinner,2
167
- 24.52,3.48,Male,No,Sun,Dinner,3
168
- 20.76,2.24,Male,No,Sun,Dinner,2
169
- 31.71,4.5,Male,No,Sun,Dinner,4
170
- 10.59,1.61,Female,Yes,Sat,Dinner,2
171
- 10.63,2.0,Female,Yes,Sat,Dinner,2
172
- 50.81,10.0,Male,Yes,Sat,Dinner,3
173
- 15.81,3.16,Male,Yes,Sat,Dinner,2
174
- 7.25,5.15,Male,Yes,Sun,Dinner,2
175
- 31.85,3.18,Male,Yes,Sun,Dinner,2
176
- 16.82,4.0,Male,Yes,Sun,Dinner,2
177
- 32.9,3.11,Male,Yes,Sun,Dinner,2
178
- 17.89,2.0,Male,Yes,Sun,Dinner,2
179
- 14.48,2.0,Male,Yes,Sun,Dinner,2
180
- 9.6,4.0,Female,Yes,Sun,Dinner,2
181
- 34.63,3.55,Male,Yes,Sun,Dinner,2
182
- 34.65,3.68,Male,Yes,Sun,Dinner,4
183
- 23.33,5.65,Male,Yes,Sun,Dinner,2
184
- 45.35,3.5,Male,Yes,Sun,Dinner,3
185
- 23.17,6.5,Male,Yes,Sun,Dinner,4
186
- 40.55,3.0,Male,Yes,Sun,Dinner,2
187
- 20.69,5.0,Male,No,Sun,Dinner,5
188
- 20.9,3.5,Female,Yes,Sun,Dinner,3
189
- 30.46,2.0,Male,Yes,Sun,Dinner,5
190
- 18.15,3.5,Female,Yes,Sun,Dinner,3
191
- 23.1,4.0,Male,Yes,Sun,Dinner,3
192
- 15.69,1.5,Male,Yes,Sun,Dinner,2
193
- 19.81,4.19,Female,Yes,Thur,Lunch,2
194
- 28.44,2.56,Male,Yes,Thur,Lunch,2
195
- 15.48,2.02,Male,Yes,Thur,Lunch,2
196
- 16.58,4.0,Male,Yes,Thur,Lunch,2
197
- 7.56,1.44,Male,No,Thur,Lunch,2
198
- 10.34,2.0,Male,Yes,Thur,Lunch,2
199
- 43.11,5.0,Female,Yes,Thur,Lunch,4
200
- 13.0,2.0,Female,Yes,Thur,Lunch,2
201
- 13.51,2.0,Male,Yes,Thur,Lunch,2
202
- 18.71,4.0,Male,Yes,Thur,Lunch,3
203
- 12.74,2.01,Female,Yes,Thur,Lunch,2
204
- 13.0,2.0,Female,Yes,Thur,Lunch,2
205
- 16.4,2.5,Female,Yes,Thur,Lunch,2
206
- 20.53,4.0,Male,Yes,Thur,Lunch,4
207
- 16.47,3.23,Female,Yes,Thur,Lunch,3
208
- 26.59,3.41,Male,Yes,Sat,Dinner,3
209
- 38.73,3.0,Male,Yes,Sat,Dinner,4
210
- 24.27,2.03,Male,Yes,Sat,Dinner,2
211
- 12.76,2.23,Female,Yes,Sat,Dinner,2
212
- 30.06,2.0,Male,Yes,Sat,Dinner,3
213
- 25.89,5.16,Male,Yes,Sat,Dinner,4
214
- 48.33,9.0,Male,No,Sat,Dinner,4
215
- 13.27,2.5,Female,Yes,Sat,Dinner,2
216
- 28.17,6.5,Female,Yes,Sat,Dinner,3
217
- 12.9,1.1,Female,Yes,Sat,Dinner,2
218
- 28.15,3.0,Male,Yes,Sat,Dinner,5
219
- 11.59,1.5,Male,Yes,Sat,Dinner,2
220
- 7.74,1.44,Male,Yes,Sat,Dinner,2
221
- 30.14,3.09,Female,Yes,Sat,Dinner,4
222
- 12.16,2.2,Male,Yes,Fri,Lunch,2
223
- 13.42,3.48,Female,Yes,Fri,Lunch,2
224
- 8.58,1.92,Male,Yes,Fri,Lunch,1
225
- 15.98,3.0,Female,No,Fri,Lunch,3
226
- 13.42,1.58,Male,Yes,Fri,Lunch,2
227
- 16.27,2.5,Female,Yes,Fri,Lunch,2
228
- 10.09,2.0,Female,Yes,Fri,Lunch,2
229
- 20.45,3.0,Male,No,Sat,Dinner,4
230
- 13.28,2.72,Male,No,Sat,Dinner,2
231
- 22.12,2.88,Female,Yes,Sat,Dinner,2
232
- 24.01,2.0,Male,Yes,Sat,Dinner,4
233
- 15.69,3.0,Male,Yes,Sat,Dinner,3
234
- 11.61,3.39,Male,No,Sat,Dinner,2
235
- 10.77,1.47,Male,No,Sat,Dinner,2
236
- 15.53,3.0,Male,Yes,Sat,Dinner,2
237
- 10.07,1.25,Male,No,Sat,Dinner,2
238
- 12.6,1.0,Male,Yes,Sat,Dinner,2
239
- 32.83,1.17,Male,Yes,Sat,Dinner,2
240
- 35.83,4.67,Female,No,Sat,Dinner,3
241
- 29.03,5.92,Male,No,Sat,Dinner,3
242
- 27.18,2.0,Female,Yes,Sat,Dinner,2
243
- 22.67,2.0,Male,Yes,Sat,Dinner,2
244
- 17.82,1.75,Male,No,Sat,Dinner,2
245
- 18.78,3.0,Female,No,Thur,Dinner,2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ui/__init__.py ADDED
File without changes
ui/candidate_profile.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from shiny import ui
2
+
3
+ ui = ui.nav_panel(
4
+ "Candidate Profile",
5
+
6
+ ui.h2("πŸ§‘β€πŸ’Ό Candidate Dashboard"),
7
+
8
+ ui.layout_columns(
9
+
10
+ # LEFT PANEL: Resume + Job Selection, Scores, Notes
11
+ ui.card(
12
+ ui.h4("🎯 Evaluation Controls"),
13
+ ui.input_select("job_dropdown_for_doc", "Select Job", choices=[]),
14
+ ui.input_select("candidate_dropdown_for_doc", "Select Candidate", choices=[]),
15
+
16
+ ui.tags.hr(),
17
+ ui.h4("πŸ“Š Score Summary"),
18
+ ui.output_ui("score"),
19
+
20
+ ui.tags.hr(),
21
+ ui.h4("πŸ—’οΈ Notes & Tags"),
22
+ ui.output_ui("candidate_note_ui"),
23
+ ui.output_ui("candidate_tags_ui"),
24
+ ui.input_action_button("save_note_tags", "πŸ’Ύ Save Notes & Tags"),
25
+ ui.output_text_verbatim("note_tag_status"),
26
+ ui.output_text_verbatim("note_preview"),
27
+
28
+ col_width=4
29
+ ),
30
+ # RIGHT PANEL: LLM Summary
31
+ ui.card(
32
+ ui.h4("🧠 Candidate Summary"),
33
+ ui.div(
34
+ ui.input_switch('show_gemini', 'Show Gemini', value=False),
35
+ ui.output_ui("summary"),
36
+ class_="mt-2"
37
+ ),
38
+ col_width=8
39
+ )
40
+ )
41
+ )
ui/chart_generation.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from shiny import ui
2
+
3
+ # βœ… Define the chart tab panel ONCE
4
+ plot_ui = ui.nav_panel(
5
+ "Chart Generation", # tab label
6
+ ui.h2("Visualise Trends Between Chosen Candidate Variables"),
7
+ ui.layout_columns(
8
+ ui.card(
9
+ ui.input_select("chart_job_id", "Select Job ID", choices=[], selected=None),
10
+ ui.input_select("chart_x", "X-axis variable", choices=[], selected=None),
11
+ ui.input_select("chart_y", "Y-axis variable", choices=[], selected=None),
12
+ ui.input_radio_buttons('chart_type', 'Chart Type', choices=['scatter', 'bar', 'line', 'histogram'], selected='scatter'),
13
+
14
+
15
+ ui.div(
16
+ ui.output_ui("generate_display_plot"),
17
+ id="plot_wrapper",
18
+ class_="plot-wrapper",
19
+ style="padding-top: 1rem; border: 1px solid #ddd; background-color: #fff; width: 100%; overflow-x: auto;"
20
+ ),
21
+
22
+
23
+ ui.input_action_button("generate_plot", "Generate Chart & Explanation"),
24
+ ),
25
+ ui.card(
26
+ ui.h4("LLM Explanation"),
27
+ ui.output_ui("llm_explain_plot")
28
+ ),
29
+ ui.card(
30
+ ui.h4("Ask a Follow-up Question"),
31
+ ui.input_text_area("chart_chat_input", "Ask anything:", rows=2, width="100%"),
32
+ ui.input_action_button("chart_chat_send", "Send"),
33
+ ui.h4("Gemini Response"),
34
+ ui.div(
35
+ ui.output_ui("chat_followup"),
36
+ style="""
37
+ min-height: 300px;
38
+ max-height: 700px;
39
+ overflow-y: auto;
40
+ resize: vertical;
41
+ padding: 1rem;
42
+ font-size: 1rem;
43
+ border: 1px solid #ccc;
44
+ background-color: #fefefe;
45
+ box-shadow: 0 0 4px rgba(0,0,0,0.1);
46
+ """
47
+ ),
48
+ class_="response-box",
49
+ style="""
50
+ min-height: 600px;
51
+ width: 100%;
52
+ """
53
+ ),
54
+ col_widths=(5, 5, 5)
55
+ )
56
+ )
ui/correlation_analysis.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from shiny import ui
2
+
3
+ ui = ui.nav_panel(
4
+ "Correlation Explorer",
5
+
6
+ # === Global CSS Styling ===
7
+ ui.tags.style("""
8
+ .chat-label {
9
+ font-weight: bold;
10
+ margin-top: 1em;
11
+ }
12
+ .response-box {
13
+ background-color: #f9f9f9;
14
+ border: 1px solid #ccc;
15
+ padding: 1em;
16
+ border-radius: 8px;
17
+ margin-top: 1em;
18
+ max-height: 400px;
19
+ overflow-y: auto;
20
+ white-space: normal;
21
+ }
22
+ .markdown p {
23
+ margin: 0.5em 0;
24
+ }
25
+ """),
26
+
27
+ # === Header ===
28
+ ui.h2("Correlation Insights from Candidate Data"),
29
+
30
+ # === Box 1: Correlation Controls ===
31
+ ui.card(
32
+ ui.input_select("job_id", "Select Job ID", choices=[]),
33
+ ui.input_select("col1", "Column 1", choices=[]),
34
+ ui.input_select("col2", "Column 2", choices=[]),
35
+ ui.input_action_button("calc_corr", "Calculate Correlation"),
36
+ style="margin-bottom: 2em;"
37
+ ),
38
+
39
+ # === Box 2: Gemini Correlation Output ===
40
+ ui.card(
41
+ ui.h4("Gemini Correlation Insight"),
42
+ ui.output_ui("correlation_output"),
43
+ style="margin-bottom: 2em;"
44
+ ),
45
+
46
+ # === Box 3: Follow-up Chat and LLM Response ===
47
+ ui.card(
48
+ ui.h4("Ask a Follow-up Question"),
49
+ ui.input_text("chat_input", "Your question:", placeholder="e.g. Does this apply to junior candidates?"),
50
+ ui.input_action_button("chat_send", "Send"),
51
+ ui.output_text("chat_status_ui"),
52
+ ui.div(
53
+ ui.h4("Gemini Response"),
54
+ ui.output_ui("chat_response"),
55
+ class_="response-box"
56
+ ),
57
+ style="margin-bottom: 2em;"
58
+ ),
59
+
60
+ # === Box 4: Candidate Table Preview ===
61
+ ui.card(
62
+ ui.h4("Candidate Data Preview"),
63
+ ui.output_table("candidate_table", width="100%"),
64
+ style="margin-bottom: 2em;"
65
+ )
66
+ )
ui/document_creation.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from shiny import ui
2
+
3
+ document_creation_ui = ui.nav_panel(
4
+ "Document Creation",
5
+ ui.h2("Generate Offer Letter or Contract"),
6
+
7
+ ui.layout_columns(
8
+ # LEFT COLUMN β€” Form inputs
9
+ ui.card(
10
+ ui.input_select("job_dropdown_doc", "Select Job ID", choices=[]),
11
+ ui.input_select("candidate_dropdown_doc", "Select Resume", choices=[]),
12
+ ui.input_text("override_compensation", "Override Compensation (optional):", placeholder="$140,000 + equity"),
13
+ ui.input_text("override_start_date", "Override Start Date (optional):", placeholder="2025-07-01"),
14
+ ui.input_text_area("override_notes", "Hiring Manager Notes (optional):", rows=3),
15
+ ui.input_action_button("generate_offer", "βœ‰οΈ Generate Offer Letter", class_="btn-primary"),
16
+ ui.input_action_button("generate_contract", "πŸ“„ Generate Contract", class_="btn-secondary"),
17
+ ),
18
+
19
+ # RIGHT COLUMN β€” Results
20
+ ui.card(
21
+ ui.h4("Generated Offer Letter"),
22
+ ui.div(
23
+ ui.output_text("offer_letter_text"),
24
+ ui.download_button("download_offer", "πŸ“₯ Download Offer Letter as PDF"),
25
+ style="""
26
+ border: 1px solid #ccc;
27
+ padding: 1.5em;
28
+ border-radius: 8px;
29
+ background-color: #f9f9f9;
30
+ min-height: 200px;
31
+ font-size: 1rem;
32
+ line-height: 1.6;
33
+ max-width: 900px;
34
+ margin-top: 1em;
35
+ white-space: pre-wrap;
36
+ overflow-y: auto;
37
+ """
38
+ ),
39
+ ui.hr(),
40
+ ui.h4("Generated Contract"),
41
+ ui.div(
42
+ ui.output_text("contract_text"),
43
+ ui.download_button("download_contract", "πŸ“₯ Download Contract as PDF"),
44
+ style="""
45
+ border: 1px solid #ccc;
46
+ padding: 1.5em;
47
+ border-radius: 8px;
48
+ background-color: #f1f1f1;
49
+ min-height: 200px;
50
+ font-size: 1rem;
51
+ line-height: 1.6;
52
+ max-width: 900px;
53
+ margin-top: 1em;
54
+ white-space: pre-wrap;
55
+ overflow-y: auto;
56
+ """
57
+ )
58
+ ),
59
+ col_widths=(5, 7)
60
+ )
61
+ )
ui/home.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from shiny import ui
2
+
3
+ ui = ui.nav_panel(
4
+ "🏠 Home",
5
+
6
+ # Top welcome content
7
+ ui.card(
8
+ ui.h2("Welcome to the AI Recruitment Hub"),
9
+ ui.p("This dashboard helps you manage candidate evaluations, visualize trends, and generate documentation with LLMs."),
10
+ ui.tags.ul(
11
+ ui.tags.li("πŸ“₯ Upload and parse resumes"),
12
+ ui.tags.li("πŸ”Ž Evaluate candidate fit for specific roles"),
13
+ ui.tags.li("πŸ“Š Analyze trends across candidates"),
14
+ ui.tags.li("🧠 Compare LLM summaries and scores"),
15
+ ui.tags.li("πŸ“ Generate offer letters and contracts")
16
+ ),
17
+ ui.hr(),
18
+ ui.p("Use the navigation bar above to access each module."),
19
+ width=12
20
+ ),
21
+
22
+ # Quick-access tool cards
23
+ ui.layout_columns(
24
+ ui.card(
25
+ ui.h4("πŸš€ Candidate Profile Viewer"),
26
+ ui.p("Explore individual candidate summaries and scores."),
27
+ ui.input_action_button("go_to_candidate", "Go to Candidate Profile", class_="btn btn-primary")
28
+ ),
29
+ ui.card(
30
+ ui.h4("πŸ“… Interview Scheduler"),
31
+ ui.p("Generate Calendly links for interviews."),
32
+ ui.input_action_button("go_to_scheduler", "Go to Scheduler", class_="btn btn-secondary")
33
+ ),
34
+ ui.card(
35
+ ui.h4("πŸ“ˆ Chart Insights"),
36
+ ui.p("Plot correlations and visualize trends."),
37
+ ui.input_action_button("go_to_charts", "Go to Charting", class_="btn btn-info")
38
+ ),
39
+ col_widths=(4, 4, 4)
40
+ ),
41
+
42
+ # Resume Upload Section
43
+ ui.card(
44
+ ui.h4("πŸ“€ Upload Resume & Link to Job"),
45
+ ui.layout_columns(
46
+ ui.input_file("resume_file", "Upload Resume", accept=[".pdf", ".docx"]),
47
+ ui.input_select("job_id_input", "Select Job", choices=[]), # To be populated by server
48
+ col_widths=(6, 6)
49
+ ),
50
+ ui.div(
51
+ ui.input_action_button("upload_resume_btn", "Upload & Link", class_="btn btn-success"),
52
+ ui.output_text_verbatim("upload_result"),
53
+ class_="d-flex gap-3 align-items-center mt-3"
54
+ ),
55
+ width=12
56
+ ),
57
+
58
+ # JS nav handlers
59
+ ui.tags.script("""
60
+ document.addEventListener('click', function(e) {
61
+ if (e.target.id === 'go_to_candidate') {
62
+ document.querySelector('a[data-value="Candidate Profile"]')?.click();
63
+ }
64
+ if (e.target.id === 'go_to_scheduler') {
65
+ document.querySelector('a[data-value="Interview Scheduler"]')?.click();
66
+ }
67
+ if (e.target.id === 'go_to_charts') {
68
+ document.querySelector('a[data-value="Chart Generation"]')?.click();
69
+ }
70
+ });
71
+ """)
72
+ )
ui/interview_scheduler.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from shiny import ui
2
+
3
+ interview_scheduler_ui = ui.nav_panel(
4
+ "Interview Scheduler",
5
+ ui.h2("Generate Interview Invitations"),
6
+
7
+ ui.layout_columns(
8
+ # LEFT COLUMN β€” Input controls
9
+ ui.card(
10
+ ui.h4("Candidate Selection"),
11
+ ui.output_ui("name_selector"),
12
+ ui.input_action_button("generate_links", "βœ‰οΈ Generate Interview Emails", class_="btn-primary"),
13
+ ),
14
+
15
+ # RIGHT COLUMN β€” Results and Editing
16
+ ui.card(
17
+ ui.h4("Generated Interview Emails"),
18
+
19
+ # Output area for hyperlinks + status
20
+ ui.div(
21
+ ui.output_ui("output_links_html"),
22
+ style="""
23
+ border: 1px solid #ccc;
24
+ padding: 1.5em;
25
+ border-radius: 8px;
26
+ background-color: #f9f9f9;
27
+ min-height: 200px;
28
+ font-size: 1rem;
29
+ line-height: 1.6;
30
+ max-width: 900px;
31
+ margin-top: 1em;
32
+ white-space: normal;
33
+ overflow-wrap: anywhere;
34
+ """
35
+ ),
36
+
37
+ # ZIP download button
38
+ ui.download_button("download_emails", "πŸ“₯ Download All Emails as ZIP"),
39
+
40
+ ui.hr(),
41
+ ui.h4("PDF Preview"),
42
+ ui.output_ui("pdf_selector"),
43
+
44
+ ui.div(
45
+ ui.output_ui("pdf_preview"),
46
+ style="""
47
+ border: 1px solid #ccc;
48
+ padding: 1.5em;
49
+ border-radius: 8px;
50
+ background-color: #f1f1f1;
51
+ min-height: 400px;
52
+ max-width: 900px;
53
+ margin-top: 1em;
54
+ overflow-y: auto;
55
+ font-family: Georgia, serif;
56
+ font-size: 1rem;
57
+ line-height: 1.6;
58
+ white-space: normal;
59
+ """
60
+ ),
61
+
62
+ ui.hr(),
63
+ ui.h4("Chat to Refine Email"),
64
+ ui.div(
65
+ ui.input_text_area("chat_prompt", "Suggest an edit:", rows=3, placeholder="e.g. make it more concise or formal"),
66
+ ui.input_action_button("submit_chat", "✏️ Apply Change"),
67
+ style="margin-top: 1em;"
68
+ ),
69
+ ui.output_text_verbatim("refined_output"),
70
+
71
+ ui.hr(),
72
+ ui.h4("Edit Email"),
73
+ ui.input_action_button("toggle_edit", "✏️ Edit This Email"),
74
+
75
+ # βœ… Output UI that will be rendered dynamically
76
+ ui.output_ui("edit_ui_block")
77
+ ),
78
+ col_widths=(5, 7)
79
+ )
80
+ )
ui/job_creation.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from shiny import ui
2
+
3
+ app_ui = ui.nav_panel(
4
+ "Job Creation",
5
+ ui.h2("Generate a Job Description"),
6
+
7
+ # Box 1: Prompt Input
8
+ ui.card(
9
+ ui.input_text(
10
+ "user_input",
11
+ "Enter prompt",
12
+ placeholder="e.g. Write a job post for a data analyst",
13
+ width="100%"
14
+ ),
15
+ ui.input_action_button("submit_btn", "Submit"),
16
+ style="margin-bottom: 2em;"
17
+ ),
18
+
19
+ # Box 2: LLM Response
20
+ ui.card(
21
+ ui.h4("LLM Response"),
22
+ ui.output_ui("job_chat_response"),
23
+ style="""
24
+ padding: 1.5em;
25
+ background-color: #f9f9f9;
26
+ min-height: 400px;
27
+ font-size: 1rem;
28
+ line-height: 1.6;
29
+ border-radius: 8px;
30
+ border: 1px solid #ccc;
31
+ white-space: normal;
32
+ overflow-y: auto;
33
+ max-width: 800px;
34
+ """
35
+ ),
36
+
37
+ # Box 3: Save Job + Status
38
+ ui.card(
39
+ ui.input_action_button("save_job_btn", "Save Job"),
40
+ ui.output_text("save_status_ui"),
41
+ style="margin-top: 2em;"
42
+ )
43
+ )