Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -111,7 +111,20 @@ def generate_response(message: str, system_prompt: str, temperature: float, max_
|
|
| 111 |
max_output_tokens=max_tokens
|
| 112 |
)
|
| 113 |
)
|
| 114 |
-
print("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
return response.text
|
| 116 |
|
| 117 |
def analyze_resume_with_job_description(resume_text, job_description, temperature, max_tokens):
|
|
|
|
| 111 |
max_output_tokens=max_tokens
|
| 112 |
)
|
| 113 |
)
|
| 114 |
+
print("message", message)
|
| 115 |
+
print("system_prompt", system_prompt)
|
| 116 |
+
print("temp",temperature)
|
| 117 |
+
print("max_tokens", max_tokens)
|
| 118 |
+
print("response", response)
|
| 119 |
+
print("response-text", response.text)
|
| 120 |
+
|
| 121 |
+
response1 = client.models.generate_content(
|
| 122 |
+
model="gemini-2.5-flash",
|
| 123 |
+
contents=message
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
print("response1-text", response.text)
|
| 127 |
+
|
| 128 |
return response.text
|
| 129 |
|
| 130 |
def analyze_resume_with_job_description(resume_text, job_description, temperature, max_tokens):
|