Manikandan-Alagu commited on
Commit
064ad85
Β·
verified Β·
1 Parent(s): a81d2a2

Update huggingfaceModel.py

Browse files
Files changed (1) hide show
  1. huggingfaceModel.py +71 -71
huggingfaceModel.py CHANGED
@@ -1,71 +1,71 @@
1
- import os
2
- from huggingface_hub import InferenceClient
3
-
4
- def generate_dropout_insights(input_values, risk_factors):
5
- """
6
- Generates structured dropout risk insights using AI, ensuring Unicode-safe text for PDFs.
7
- """
8
- excluded_fields = {"roll_no", "semester", "degree"} # Fields to exclude from analysis
9
-
10
- # Filter only relevant input values
11
- filtered_input_values = {key: value for key, value in input_values.items() if key not in excluded_fields}
12
-
13
- # Map input values to their corresponding risk factors
14
- risk_mapping = {key: risk for key, risk in zip(filtered_input_values.keys(), risk_factors)}
15
-
16
- # Construct message for AI model
17
- formatted_input = "\n".join([f"{key}: {value} (Risk: {risk})" for key, (value, risk) in zip(filtered_input_values.keys(), zip(filtered_input_values.values(), risk_factors))])
18
-
19
- messages = [
20
- {
21
- "role": "user",
22
- "content": f"""
23
- Generate a **detailed dropout risk analysis** report based on the following data:
24
-
25
- {formatted_input}
26
-
27
- Format the response as:
28
-
29
- ## **Analysis of Student Dropout Risk**
30
-
31
- **Overall Risk Level: {{risk_level}}**
32
-
33
- ### **🟒 Strengths:**
34
- List areas where the student is performing well:
35
- {{strengths}}
36
-
37
- ### **πŸ”΄ Concerns:**
38
- Identify factors that increase dropout risk:
39
- {{concerns}}
40
-
41
- ### **πŸ“Œ Action Plan for Improvement:**
42
- Provide targeted **solutions** based on the student's **weak areas**:
43
- {{recommendations}}
44
-
45
- ## **πŸ“ Final Recommendation:**
46
- {{overall_recommendation}}
47
- """
48
- }
49
- ]
50
-
51
- client = InferenceClient(
52
- provider="hyperbolic",
53
- api_key=os.getenv("HUGGINGFACE_API_TOKEN")
54
- )
55
-
56
- stream = client.chat.completions.create(
57
- model="Qwen/QwQ-32B",
58
- messages=messages,
59
- temperature=0.5,
60
- max_tokens=2048,
61
- top_p=0.7,
62
- stream=True
63
- )
64
-
65
- # Collect and return the generated report
66
- report = ""
67
- for chunk in stream:
68
- report += chunk.choices[0].delta.content
69
-
70
- return report
71
-
 
1
+ import os
2
+ from huggingface_hub import InferenceClient
3
+
4
+ def generate_dropout_insights(input_values, risk_factors):
5
+ """
6
+ Generates structured dropout risk insights using AI, ensuring Unicode-safe text for PDFs.
7
+ """
8
+ excluded_fields = {"roll_no", "semester", "degree"} # Fields to exclude from analysis
9
+
10
+ # Filter only relevant input values
11
+ filtered_input_values = {key: value for key, value in input_values.items() if key not in excluded_fields}
12
+
13
+ # Map input values to their corresponding risk factors
14
+ risk_mapping = {key: risk for key, risk in zip(filtered_input_values.keys(), risk_factors)}
15
+
16
+ # Construct message for AI model
17
+ formatted_input = "\n".join([f"{key}: {value} (Risk: {risk})" for key, (value, risk) in zip(filtered_input_values.keys(), zip(filtered_input_values.values(), risk_factors))])
18
+
19
+ messages = [
20
+ {
21
+ "role": "user",
22
+ "content": f"""
23
+ Generate a **detailed dropout risk analysis** report based on the following data:
24
+
25
+ {formatted_input}
26
+
27
+ Format the response as:
28
+
29
+ ## **Analysis of Student Dropout Risk**
30
+
31
+ **Overall Risk Level: {{risk_level}}**
32
+
33
+ ### **🟒 Strengths:**
34
+ List areas where the student is performing well:
35
+ {{strengths}}
36
+
37
+ ### **πŸ”΄ Concerns:**
38
+ Identify factors that increase dropout risk:
39
+ {{concerns}}
40
+
41
+ ### **πŸ“Œ Action Plan for Improvement:**
42
+ Provide targeted **solutions** based on the student's **weak areas**:
43
+ {{recommendations}}
44
+
45
+ ## **πŸ“ Final Recommendation:**
46
+ {{overall_recommendation}}
47
+ """
48
+ }
49
+ ]
50
+
51
+ client = InferenceClient(
52
+ provider="nebius",
53
+ api_key=os.getenv("HUGGINGFACE_API_TOKEN")
54
+ )
55
+
56
+ stream = client.chat.completions.create(
57
+ model="deepseek-ai/DeepSeek-R1",
58
+ messages=messages,
59
+ temperature=0.5,
60
+ max_tokens=2048,
61
+ top_p=0.7,
62
+ stream=True
63
+ )
64
+
65
+ # Collect and return the generated report
66
+ report = ""
67
+ for chunk in stream:
68
+ report += chunk.choices[0].delta.content
69
+
70
+ return report
71
+