Danial7 commited on
Commit
f102421
·
verified ·
1 Parent(s): 17d688c

Update utils/suggestions.py

Browse files
Files changed (1) hide show
  1. utils/suggestions.py +23 -31
utils/suggestions.py CHANGED
@@ -1,42 +1,34 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer
2
  import torch
3
 
4
- # Load model & tokenizer once (cache on HF Spaces)
5
- tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
 
6
  model = AutoModelForCausalLM.from_pretrained(
7
- "mistralai/Mistral-7B-Instruct-v0.1",
8
- torch_dtype=torch.float32,
9
- device_map="auto"
10
  )
 
11
 
12
- def call_llm(prompt: str) -> str:
13
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
14
-
15
  with torch.no_grad():
16
- output_ids = model.generate(
17
- input_ids,
18
- max_new_tokens=256,
19
  do_sample=True,
20
- top_p=0.95,
21
- temperature=0.7
22
  )
23
- output = tokenizer.decode(output_ids[0], skip_special_tokens=True)
24
-
25
- # Remove prompt from output if repeated
26
- return output.replace(prompt, "").strip()
27
-
28
- def get_certification_suggestions(text: str) -> str:
29
- prompt = f"Suggest top professional certifications to help advance this person's career:\n{text}"
30
- return call_llm(prompt)
31
 
32
- def get_higher_education_suggestions(text: str) -> str:
33
- prompt = f"Suggest suitable higher education programs or fields based on this CV:\n{text}"
34
- return call_llm(prompt)
35
 
36
- def get_visa_recommendations(text: str) -> str:
37
- prompt = f"Based on this CV, suggest possible countries and visa pathways to work or study abroad:\n{text}"
38
- return call_llm(prompt)
39
 
40
- def get_career_advice(text: str) -> str:
41
- prompt = f"You are a career counselor. Give detailed personalized career advice based on the following CV:\n{text}"
42
- return call_llm(prompt)
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import torch
3
 
4
+ # Load ungated Phi-2 model
5
+ model_name = "microsoft/phi-2"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(
8
+ model_name,
9
+ torch_dtype=torch.float32
 
10
  )
11
+ model.eval()
12
 
13
+ def call_llm(prompt):
14
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
 
15
  with torch.no_grad():
16
+ outputs = model.generate(
17
+ inputs["input_ids"],
18
+ max_new_tokens=250,
19
  do_sample=True,
20
+ temperature=0.7,
21
+ top_k=50
22
  )
23
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
 
 
24
 
25
+ def get_certification_suggestions(cv_text):
26
+ prompt = f"""You are a helpful career assistant. Analyze the following CV content and suggest relevant certifications to improve the candidate's chances of getting international jobs.
 
27
 
28
+ CV:
29
+ {cv_text}
 
30
 
31
+ List the top 5 certifications with short descriptions.
32
+ """
33
+ response = call_llm(prompt)
34
+ return response.strip()