Update index.html
Browse files- index.html +200 -155
index.html
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
<!
|
| 2 |
<html lang="en">
|
| 3 |
<head>
|
| 4 |
<meta charset="UTF-8" />
|
|
@@ -15,7 +15,7 @@
|
|
| 15 |
<!-- Navigation -->
|
| 16 |
<nav>
|
| 17 |
<div class="nav-container">
|
| 18 |
-
<div class="logo"><i class="ri-
|
| 19 |
<button class="mobile-menu-btn" onclick="toggleMenu()">
|
| 20 |
<i class="ri-align-justify"></i>
|
| 21 |
</button>
|
|
@@ -43,15 +43,14 @@
|
|
| 43 |
class="profile-image"
|
| 44 |
/>
|
| 45 |
<h1>Truong-Phuc Nguyen</h1>
|
| 46 |
-
<p class="subtitle">NLP
|
| 47 |
<p class="description">
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
processing, question answering systems, and legal AI applications.
|
| 51 |
</p>
|
| 52 |
<div class="social-links">
|
| 53 |
<a href="#" class="social-link" id="scholarLink">
|
| 54 |
-
<i class="ri-graduation-cap-fill"></i>
|
| 55 |
</a>
|
| 56 |
<a href="#" class="social-link" id="githubLink">
|
| 57 |
<i class="ri-github-fill"></i> GitHub
|
|
@@ -69,20 +68,19 @@
|
|
| 69 |
<h2 class="section-title">About Me</h2>
|
| 70 |
<div class="glass-card">
|
| 71 |
<p style="text-align: justify; line-height: 1.8">
|
| 72 |
-
Hello! I'm Truong-Phuc Nguyen
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
language processing using both pre-trained language models and large
|
| 77 |
-
language models.
|
| 78 |
</p>
|
| 79 |
<p style="text-align: justify; line-height: 1.8; margin-top: 1rem">
|
| 80 |
-
Through
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
where I can leverage my
|
| 85 |
-
processing algorithms, and research
|
|
|
|
| 86 |
</p>
|
| 87 |
</div>
|
| 88 |
</section>
|
|
@@ -96,25 +94,33 @@
|
|
| 96 |
Education
|
| 97 |
</h3>
|
| 98 |
<p style="color: var(--accent); font-weight: 600">
|
| 99 |
-
Bachelor of Engineering in Computer Science (
|
| 100 |
-
|
| 101 |
</p>
|
| 102 |
<p style="color: var(--text-secondary)">September 2021 - June 2025</p>
|
| 103 |
|
| 104 |
<div class="education-stats">
|
| 105 |
<div class="stat-item">
|
| 106 |
<div class="stat-number">9.04</div>
|
| 107 |
-
<div class="stat-label">
|
| 108 |
</div>
|
| 109 |
<div class="stat-item">
|
| 110 |
<div class="stat-number">3.75</div>
|
| 111 |
-
<div class="stat-label">
|
| 112 |
</div>
|
| 113 |
<div class="stat-item">
|
| 114 |
-
<div class="stat-number">
|
| 115 |
-
<div class="stat-label">
|
| 116 |
</div>
|
| 117 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
</div>
|
| 119 |
</section>
|
| 120 |
|
|
@@ -129,49 +135,38 @@
|
|
| 129 |
|
| 130 |
<div class="publication-item">
|
| 131 |
<h4>
|
| 132 |
-
|
| 133 |
-
|
|
|
|
| 134 |
</h4>
|
| 135 |
<p class="publication-meta">
|
| 136 |
-
<strong>Authors:</strong> Nhu Hai Phung, Chi Thanh Nguyen,
|
| 137 |
-
|
| 138 |
-
<strong>
|
| 139 |
-
<strong>Published in:</strong> Engineering Applications of
|
| 140 |
-
Artificial Intelligence, Volume 159, Page 111570. Elsevier,
|
| 141 |
-
2025<br />
|
| 142 |
-
<strong>Impact:</strong> WoS, Q1, IF: 8.0<br />
|
| 143 |
-
<a
|
| 144 |
-
href="https://doi.org/10.1016/j.engappai.2025.111570"
|
| 145 |
-
class="doi-link"
|
| 146 |
-
target="_blank"
|
| 147 |
-
>DOI: 10.1016/j.engappai.2025.111570</a
|
| 148 |
-
>
|
| 149 |
</p>
|
| 150 |
</div>
|
| 151 |
|
| 152 |
<div class="publication-item">
|
| 153 |
<h4>
|
| 154 |
-
|
| 155 |
-
|
|
|
|
| 156 |
</h4>
|
| 157 |
<p class="publication-meta">
|
| 158 |
-
<strong>Authors:</strong> <strong>Truong-Phuc Nguyen</strong>,
|
| 159 |
-
|
| 160 |
-
<strong>Status:</strong> Submitted to Artificial Intelligence and
|
| 161 |
-
Law, Springer, 2025
|
| 162 |
</p>
|
| 163 |
</div>
|
| 164 |
|
| 165 |
<div class="publication-item">
|
| 166 |
<h4>
|
| 167 |
-
|
| 168 |
-
|
|
|
|
| 169 |
</h4>
|
| 170 |
<p class="publication-meta">
|
| 171 |
-
<strong>Authors:</strong> Tien-Dat Nguyen,
|
| 172 |
-
<strong>
|
| 173 |
-
<strong>Published in:</strong> UTEHY Journal of Applied Science
|
| 174 |
-
and Technology, Vol. 40, 2023, pp. 27-32
|
| 175 |
</p>
|
| 176 |
</div>
|
| 177 |
</div>
|
|
@@ -183,69 +178,50 @@
|
|
| 183 |
|
| 184 |
<div class="publication-item">
|
| 185 |
<h4>
|
| 186 |
-
|
|
|
|
|
|
|
| 187 |
</h4>
|
| 188 |
<p class="publication-meta">
|
| 189 |
-
<strong>Authors:</strong> <strong>Truong-Phuc Nguyen</strong>,
|
| 190 |
-
Quy-Nhan Nguyen & Minh-Tien Nguyen<br />
|
| 191 |
<strong>Status:</strong> Submitted to ACL ARR 2026 January Cycle.
|
| 192 |
</p>
|
| 193 |
</div>
|
| 194 |
-
|
| 195 |
<div class="publication-item">
|
| 196 |
<h4>
|
| 197 |
-
|
| 198 |
-
|
|
|
|
| 199 |
</h4>
|
| 200 |
<p class="publication-meta">
|
| 201 |
-
<strong>Authors:</strong> <strong>Truong-Phuc Nguyen</strong>,
|
| 202 |
-
|
| 203 |
-
Duong, Minh-Tien Nguyen<br />
|
| 204 |
-
<strong>Published in:</strong> Proceedings of 2025 17th International Conference on Knowledge and System Engineering (KSE), Da Lat, Vietnam, 2025, pp. 1-5<br />
|
| 205 |
-
<a
|
| 206 |
-
href="https://doi.org/10.1109/KSE68178.2025.11309584"
|
| 207 |
-
class="doi-link"
|
| 208 |
-
target="_blank"
|
| 209 |
-
>DOI: 10.1109/KSE68178.2025.11309584</a>
|
| 210 |
</p>
|
| 211 |
</div>
|
| 212 |
|
| 213 |
<div class="publication-item">
|
| 214 |
<h4>
|
| 215 |
-
|
| 216 |
-
|
|
|
|
|
|
|
| 217 |
</h4>
|
| 218 |
<p class="publication-meta">
|
| 219 |
-
<strong>Authors:</strong> <strong>Truong-Phuc Nguyen</strong>,
|
| 220 |
-
|
| 221 |
-
Minh-Tien Nguyen<br />
|
| 222 |
-
<strong>Published in:</strong> Information and Communication
|
| 223 |
-
Technology. SOICT 2024. CCIS, vol. 2352, pp. 441-455. Springer,
|
| 224 |
-
Singapore, 2025<br />
|
| 225 |
-
<a
|
| 226 |
-
href="https://doi.org/10.1007/978-981-96-4288-5_34"
|
| 227 |
-
class="doi-link"
|
| 228 |
-
target="_blank"
|
| 229 |
-
>DOI: 10.1007/978-981-96-4288-5_34</a
|
| 230 |
-
>
|
| 231 |
</p>
|
| 232 |
</div>
|
| 233 |
|
| 234 |
<div class="publication-item">
|
| 235 |
<h4>
|
| 236 |
-
|
|
|
|
|
|
|
| 237 |
</h4>
|
| 238 |
<p class="publication-meta">
|
| 239 |
-
<strong>Authors:</strong> Thu-Ha Nguyen,
|
| 240 |
-
<strong>
|
| 241 |
-
Le Thi Viet Huong, Chi Thanh Nguyen, and Minh-Tien Nguyen<br />
|
| 242 |
-
<strong>Published in:</strong> Proceedings of 2024 16th International Conference on Knowledge and System Engineering (KSE), Kuala Lumpur, Malaysia, 2024, pp. 440-446.<br />
|
| 243 |
-
<a
|
| 244 |
-
href="https://doi.org/10.1109/KSE63888.2024.11063637"
|
| 245 |
-
class="doi-link"
|
| 246 |
-
target="_blank"
|
| 247 |
-
>DOI: 10.1109/KSE63888.2024.11063637</a
|
| 248 |
-
>
|
| 249 |
</p>
|
| 250 |
</div>
|
| 251 |
</div>
|
|
@@ -273,22 +249,33 @@
|
|
| 273 |
<div class="glass-card">
|
| 274 |
<div class="timeline">
|
| 275 |
<div class="timeline-item">
|
| 276 |
-
<div class="timeline-date">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 277 |
<h3>
|
| 278 |
-
MedMAS
|
| 279 |
-
Generation
|
| 280 |
</h3>
|
| 281 |
<p style="color: var(--accent); font-weight: 600">
|
| 282 |
-
|
| 283 |
</p>
|
| 284 |
<p class="timeline-content">
|
| 285 |
-
Building a
|
| 286 |
-
extract patient information through conversations, generate
|
| 287 |
-
follow-up questions to collect more patient information and
|
| 288 |
-
create summary reports for the pre-medical examination stage
|
| 289 |
-
for patients, to save examination time for doctors and improve
|
| 290 |
-
the patient's experience during medical examination and
|
| 291 |
-
treatment.
|
| 292 |
</p>
|
| 293 |
<p
|
| 294 |
style="
|
|
@@ -303,26 +290,13 @@
|
|
| 303 |
|
| 304 |
<div class="timeline-item">
|
| 305 |
<div class="timeline-date">September 2024 - Present</div>
|
| 306 |
-
<h3>
|
| 307 |
-
ViLegalLM: Language Models for Vietnamese Legal Text
|
| 308 |
-
</h3>
|
| 309 |
<p style="color: var(--accent); font-weight: 600">
|
| 310 |
NLU Laboratory, Hung Yen University of Technology and
|
| 311 |
Education
|
| 312 |
</p>
|
| 313 |
<p class="timeline-content">
|
| 314 |
-
|
| 315 |
-
1.72B) models specifically for the legal domain in Vietnam
|
| 316 |
-
through continual pretraining of language models on large
|
| 317 |
-
datasets from four sources of authoritative legal documents
|
| 318 |
-
in Vietnam. Legal pretrained models are trained on 16GB high-quality
|
| 319 |
-
large-scale synthetic datasets, compared with 7 state-of-the-art
|
| 320 |
-
Vietnamese general and legal LMs of the same size across 10
|
| 321 |
-
benchmarks spanning 4 main tasks: Information Retrieval,
|
| 322 |
-
Question Answering, Natural Language Inference, and Syllogism Reasoning.
|
| 323 |
-
ViLegalLM achieves state-of-the-art performance on 10 benchmarks,
|
| 324 |
-
establishes the newest strong baselines for Vietnamese Legal text
|
| 325 |
-
processing.
|
| 326 |
</p>
|
| 327 |
<p
|
| 328 |
style="
|
|
@@ -339,16 +313,11 @@
|
|
| 339 |
<div class="timeline-date">August 2025 - September 2025</div>
|
| 340 |
<h3>Adaptive Weighted Ensemble for Legal Text Processing</h3>
|
| 341 |
<p style="color: var(--accent); font-weight: 600">
|
| 342 |
-
NLU Laboratory,
|
|
|
|
| 343 |
</p>
|
| 344 |
<p class="timeline-content">
|
| 345 |
-
Designed a framework combining multiple bi-encoders through
|
| 346 |
-
query-specific confidence calculation, advanced dynamic
|
| 347 |
-
weighting, and ensemble score fusion with cross-encoder
|
| 348 |
-
reranker. Achieved 3rd place in Legal Information Retrieval
|
| 349 |
-
task (F2-score: 0.8482, 7.51% improvement) and 2nd place in
|
| 350 |
-
Legal Question Answering (97.56% accuracy). Paper accepted at
|
| 351 |
-
ISAILD-KSE 2025.
|
| 352 |
</p>
|
| 353 |
<p
|
| 354 |
style="
|
|
@@ -364,17 +333,13 @@
|
|
| 364 |
<div class="timeline-item">
|
| 365 |
<div class="timeline-date">February 2024 - September 2025</div>
|
| 366 |
<h3>
|
| 367 |
-
IntelliChat -
|
| 368 |
</h3>
|
| 369 |
<p style="color: var(--accent); font-weight: 600">
|
| 370 |
-
NLU Laboratory,
|
| 371 |
</p>
|
| 372 |
<p class="timeline-content">
|
| 373 |
-
Built
|
| 374 |
-
integrating information retrieval with answer generation.
|
| 375 |
-
System processes legal queries, retrieves relevant legal
|
| 376 |
-
articles, and generates accurate answers using
|
| 377 |
-
state-of-the-art NLP techniques.
|
| 378 |
</p>
|
| 379 |
<p
|
| 380 |
style="
|
|
@@ -393,15 +358,16 @@
|
|
| 393 |
QACTune - Advanced Legal Information Retrieval Framework
|
| 394 |
</h3>
|
| 395 |
<p style="color: var(--accent); font-weight: 600">
|
| 396 |
-
NLU Laboratory,
|
|
|
|
| 397 |
</p>
|
| 398 |
<p class="timeline-content">
|
| 399 |
Developed a novel fine-tuning framework leveraging
|
| 400 |
Question-Context-Answer relationships for enhancing legal
|
| 401 |
information retrieval in low-resource settings. Average
|
| 402 |
-
improvements of 3.9% and 4.8% in MAP@
|
| 403 |
-
Engineering Applications of Artificial Intelligence (WoS,
|
| 404 |
-
IF: 8.0).
|
| 405 |
</p>
|
| 406 |
<p
|
| 407 |
style="
|
|
@@ -421,7 +387,8 @@
|
|
| 421 |
Education
|
| 422 |
</h3>
|
| 423 |
<p style="color: var(--accent); font-weight: 600">
|
| 424 |
-
NLU Laboratory,
|
|
|
|
| 425 |
</p>
|
| 426 |
<p class="timeline-content">
|
| 427 |
Pioneered Vietnamese Question-Answer Generation research in
|
|
@@ -621,29 +588,56 @@
|
|
| 621 |
<div class="timeline">
|
| 622 |
<div class="timeline-item">
|
| 623 |
<div class="timeline-date">November 2025</div>
|
| 624 |
-
<h3>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 625 |
</div>
|
| 626 |
<div class="timeline-item">
|
| 627 |
<div class="timeline-date">July 2025</div>
|
| 628 |
-
<h3>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 629 |
</div>
|
| 630 |
<div class="timeline-item">
|
| 631 |
<div class="timeline-date">February 2025</div>
|
| 632 |
-
<h3>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 633 |
</div>
|
| 634 |
<div class="timeline-item">
|
| 635 |
<div class="timeline-date">December 2024</div>
|
| 636 |
<h3>
|
| 637 |
-
|
|
|
|
|
|
|
| 638 |
</h3>
|
| 639 |
</div>
|
| 640 |
<div class="timeline-item">
|
| 641 |
<div class="timeline-date">February 2024</div>
|
| 642 |
-
<h3>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 643 |
</div>
|
| 644 |
<div class="timeline-item">
|
| 645 |
-
<div class="timeline-date">
|
| 646 |
-
<h3>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 647 |
</div>
|
| 648 |
</div>
|
| 649 |
</div>
|
|
@@ -656,25 +650,40 @@
|
|
| 656 |
<div class="award-item">
|
| 657 |
<div class="award-icon"><i class="ri-star-line"></i></div>
|
| 658 |
<div class="award-info">
|
| 659 |
-
<h4>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 660 |
<p class="award-date">November 2024</p>
|
| 661 |
</div>
|
| 662 |
</div>
|
| 663 |
<div class="award-item">
|
| 664 |
<div class="award-icon"><i class="ri-bard-line"></i></div>
|
| 665 |
<div class="award-info">
|
| 666 |
-
<h4>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 667 |
<p class="award-date">2021-2025</p>
|
| 668 |
<p style="color: var(--text-secondary); font-size: 0.9rem">
|
| 669 |
4 Academic Excellence & 8 Talented Program Scholarships -
|
| 670 |
-
Consistently ranked #1
|
| 671 |
</p>
|
| 672 |
</div>
|
| 673 |
</div>
|
| 674 |
<div class="award-item">
|
| 675 |
<div class="award-icon"><i class="ri-star-line"></i></div>
|
| 676 |
<div class="award-info">
|
| 677 |
-
<h4>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 678 |
<p class="award-date">2021-2023</p>
|
| 679 |
<p style="color: var(--text-secondary); font-size: 0.9rem">
|
| 680 |
Two consecutive academic years
|
|
@@ -692,40 +701,76 @@
|
|
| 692 |
<div class="award-item">
|
| 693 |
<div class="award-icon"><i class="ri-honour-line"></i></div>
|
| 694 |
<div class="award-info">
|
| 695 |
-
<h4>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 696 |
<p class="award-date">June 2025</p>
|
|
|
|
|
|
|
|
|
|
| 697 |
</div>
|
| 698 |
</div>
|
| 699 |
<div class="award-item">
|
| 700 |
<div class="award-icon"><i class="ri-honour-line"></i></div>
|
| 701 |
<div class="award-info">
|
| 702 |
-
<h4>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 703 |
<p class="award-date">June 2025</p>
|
|
|
|
|
|
|
|
|
|
| 704 |
</div>
|
| 705 |
</div>
|
| 706 |
<div class="award-item">
|
| 707 |
<div class="award-icon"><i class="ri-honour-line"></i></div>
|
| 708 |
<div class="award-info">
|
| 709 |
<h4>
|
| 710 |
-
|
|
|
|
|
|
|
|
|
|
| 711 |
</h4>
|
| 712 |
<p class="award-date">May 2025</p>
|
|
|
|
|
|
|
|
|
|
| 713 |
</div>
|
| 714 |
</div>
|
| 715 |
<div class="award-item">
|
| 716 |
<div class="award-icon"><i class="ri-honour-line"></i></div>
|
| 717 |
<div class="award-info">
|
| 718 |
-
<h4>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 719 |
<p class="award-date">March 2025</p>
|
|
|
|
|
|
|
|
|
|
| 720 |
</div>
|
| 721 |
</div>
|
| 722 |
<div class="award-item">
|
| 723 |
<div class="award-icon"><i class="ri-honour-line"></i></div>
|
| 724 |
<div class="award-info">
|
| 725 |
<h4>
|
| 726 |
-
|
|
|
|
|
|
|
|
|
|
| 727 |
</h4>
|
| 728 |
<p class="award-date">June 2024</p>
|
|
|
|
|
|
|
|
|
|
| 729 |
</div>
|
| 730 |
</div>
|
| 731 |
<div class="award-item">
|
|
@@ -826,7 +871,7 @@
|
|
| 826 |
|
| 827 |
<!-- Footer -->
|
| 828 |
<footer>
|
| 829 |
-
<p>©
|
| 830 |
</footer>
|
| 831 |
|
| 832 |
<script src="script.js"></script>
|
|
|
|
| 1 |
+
<!doctype html>
|
| 2 |
<html lang="en">
|
| 3 |
<head>
|
| 4 |
<meta charset="UTF-8" />
|
|
|
|
| 15 |
<!-- Navigation -->
|
| 16 |
<nav>
|
| 17 |
<div class="nav-container">
|
| 18 |
+
<div class="logo"><i class="ri-star-fill"></i></i></div>
|
| 19 |
<button class="mobile-menu-btn" onclick="toggleMenu()">
|
| 20 |
<i class="ri-align-justify"></i>
|
| 21 |
</button>
|
|
|
|
| 43 |
class="profile-image"
|
| 44 |
/>
|
| 45 |
<h1>Truong-Phuc Nguyen</h1>
|
| 46 |
+
<!-- <p class="subtitle">NLP Researcher</p> -->
|
| 47 |
<p class="description">
|
| 48 |
+
<i class="ri-arrow-left-double-line"></i></i>To stand where others cannot, you must
|
| 49 |
+
endure what others will not.<i class="ri-arrow-right-double-line"></i>
|
|
|
|
| 50 |
</p>
|
| 51 |
<div class="social-links">
|
| 52 |
<a href="#" class="social-link" id="scholarLink">
|
| 53 |
+
<i class="ri-graduation-cap-fill"></i> GG Scholar
|
| 54 |
</a>
|
| 55 |
<a href="#" class="social-link" id="githubLink">
|
| 56 |
<i class="ri-github-fill"></i> GitHub
|
|
|
|
| 68 |
<h2 class="section-title">About Me</h2>
|
| 69 |
<div class="glass-card">
|
| 70 |
<p style="text-align: justify; line-height: 1.8">
|
| 71 |
+
Hello! I'm Truong-Phuc Nguyen, a CS student specializing in ML, DL,
|
| 72 |
+
and NLP. My expertise spans across NLP tasks, such as: Information
|
| 73 |
+
Retrieval, Question Answering, Text Generation, Summarization for
|
| 74 |
+
Vietnamese text processing using both PLMs and LLMs.
|
|
|
|
|
|
|
| 75 |
</p>
|
| 76 |
<p style="text-align: justify; line-height: 1.8; margin-top: 1rem">
|
| 77 |
+
Through research journey, I have successfully built some NLP demo systems
|
| 78 |
+
demonstrating feasibility, including legal question-answering
|
| 79 |
+
systems, clinical report summarization, and question generation
|
| 80 |
+
tools for education. Currently, I am seeking advanced learning
|
| 81 |
+
opportunities related to CS/NLP where I can leverage my knowledge of
|
| 82 |
+
language modeling, text processing algorithms, and research
|
| 83 |
+
experience.
|
| 84 |
</p>
|
| 85 |
</div>
|
| 86 |
</section>
|
|
|
|
| 94 |
Education
|
| 95 |
</h3>
|
| 96 |
<p style="color: var(--accent); font-weight: 600">
|
| 97 |
+
Bachelor of Engineering in Computer Science (Gifted and Talented
|
| 98 |
+
Programs)
|
| 99 |
</p>
|
| 100 |
<p style="color: var(--text-secondary)">September 2021 - June 2025</p>
|
| 101 |
|
| 102 |
<div class="education-stats">
|
| 103 |
<div class="stat-item">
|
| 104 |
<div class="stat-number">9.04</div>
|
| 105 |
+
<div class="stat-label">#1 in 10 scale</div>
|
| 106 |
</div>
|
| 107 |
<div class="stat-item">
|
| 108 |
<div class="stat-number">3.75</div>
|
| 109 |
+
<div class="stat-label">#2 in 4 scale</div>
|
| 110 |
</div>
|
| 111 |
<div class="stat-item">
|
| 112 |
+
<div class="stat-number">9.9</div>
|
| 113 |
+
<div class="stat-label">thesis score</div>
|
| 114 |
</div>
|
| 115 |
</div>
|
| 116 |
+
<div class="education-stats">
|
| 117 |
+
<p class="publication-meta">
|
| 118 |
+
<strong>Graduate Thesis:</strong> A Study of Vietnamese Legal
|
| 119 |
+
Question Answering with Pre-trained and Large Language Models<br />
|
| 120 |
+
<strong>Score:</strong> 9.9/10 (Awarded Excellence Graduate Thesis
|
| 121 |
+
Presentation)<br />
|
| 122 |
+
</p>
|
| 123 |
+
</div>
|
| 124 |
</div>
|
| 125 |
</section>
|
| 126 |
|
|
|
|
| 135 |
|
| 136 |
<div class="publication-item">
|
| 137 |
<h4>
|
| 138 |
+
<a href="https://www.sciencedirect.com/science/article/abs/pii/S0952197625015726" target="_blank" rel="noopener noreferrer" class="paper-title-link">
|
| 139 |
+
[1] A fine-tuning framework based on question, context, and answer relationships for enhancing legal information retrieval<i class="ri-link"></i>
|
| 140 |
+
</a>
|
| 141 |
</h4>
|
| 142 |
<p class="publication-meta">
|
| 143 |
+
<strong>Authors:</strong> Nhu Hai Phung, Chi Thanh Nguyen, Minh-Tien Nguyen, Thu Ha Nguyen, Huu Loi Le, and <strong>Truong-Phuc Nguyen</strong><br />
|
| 144 |
+
<strong>Published in:</strong> Engineering Applications of Artificial Intelligence, Volume 159, Page 111570. Elsevier, 2025<br />
|
| 145 |
+
<strong>Impact:</strong> WoS-SCIE, Q1, IF: 8.0<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
</p>
|
| 147 |
</div>
|
| 148 |
|
| 149 |
<div class="publication-item">
|
| 150 |
<h4>
|
| 151 |
+
<a href="https://link.springer.com/journal/10506" target="_blank" rel="noopener noreferrer" class="paper-title-link">
|
| 152 |
+
[2] Towards Vietnamese Legal Question Answering: An Empirical Study<i class="ri-link"></i>
|
| 153 |
+
</a>
|
| 154 |
</h4>
|
| 155 |
<p class="publication-meta">
|
| 156 |
+
<strong>Authors:</strong> <strong>Truong-Phuc Nguyen</strong>, Van-Quyet Nguyen, and Minh-Tien Nguyen<br />
|
| 157 |
+
<strong>Status:</strong> Submitted to Artificial Intelligence and Law, Springer, 2025
|
|
|
|
|
|
|
| 158 |
</p>
|
| 159 |
</div>
|
| 160 |
|
| 161 |
<div class="publication-item">
|
| 162 |
<h4>
|
| 163 |
+
<a href="https://jst.utehy.edu.vn/index.php/jst/article/view/650" target="_blank" rel="noopener noreferrer" class="paper-title-link">
|
| 164 |
+
[3] Application of Machine Learning in Image Recognition to Detect Some Abnormalities in the Examination Rooms<i class="ri-link"></i>
|
| 165 |
+
</a>
|
| 166 |
</h4>
|
| 167 |
<p class="publication-meta">
|
| 168 |
+
<strong>Authors:</strong> Tien-Dat Nguyen, <strong>Truong-Phuc Nguyen</strong>, and Pham Minh Chuan<br />
|
| 169 |
+
<strong>Published in:</strong> UTEHY Journal of Applied Science and Technology (University Journal), Vol. 40, 2023, pp. 27-32
|
|
|
|
|
|
|
| 170 |
</p>
|
| 171 |
</div>
|
| 172 |
</div>
|
|
|
|
| 178 |
|
| 179 |
<div class="publication-item">
|
| 180 |
<h4>
|
| 181 |
+
<a href="https://2026.aclweb.org/" target="_blank" rel="noopener noreferrer" class="paper-title-link">
|
| 182 |
+
[1] ViLegalLM: Language Models for Vietnamese Legal Text<i class="ri-link"></i>
|
| 183 |
+
</a>
|
| 184 |
</h4>
|
| 185 |
<p class="publication-meta">
|
| 186 |
+
<strong>Authors:</strong> <strong>Truong-Phuc Nguyen</strong>, Quy-Nhan Nguyen, and Minh-Tien Nguyen<br />
|
|
|
|
| 187 |
<strong>Status:</strong> Submitted to ACL ARR 2026 January Cycle.
|
| 188 |
</p>
|
| 189 |
</div>
|
| 190 |
+
|
| 191 |
<div class="publication-item">
|
| 192 |
<h4>
|
| 193 |
+
<a href="https://ieeexplore.ieee.org/document/11309584" target="_blank" rel="noopener noreferrer" class="paper-title-link">
|
| 194 |
+
[2] UTEHY-NLU@ALQAC 2025: Dynamic Weighted Ensemble and Adaptive Reasoning for Vietnamese Legal Text Processing<i class="ri-link"></i>
|
| 195 |
+
</a>
|
| 196 |
</h4>
|
| 197 |
<p class="publication-meta">
|
| 198 |
+
<strong>Authors:</strong> <strong>Truong-Phuc Nguyen</strong>, Quy-Nhan Nguyen, Manh-Cuong Phan, Chi-Hai Cao, Trinh-Hoai-An Duong, and Minh-Tien Nguyen<br />
|
| 199 |
+
<strong>Published in:</strong> Proceedings of 2025 17th International Conference on Knowledge and System Engineering (KSE 2025), Da Lat, Vietnam, 2025, pp. 1-5<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
</p>
|
| 201 |
</div>
|
| 202 |
|
| 203 |
<div class="publication-item">
|
| 204 |
<h4>
|
| 205 |
+
<a href="https://link.springer.com/chapter/10.1007/978-981-96-4288-5_34" target="_blank" rel="noopener noreferrer" class="paper-title-link">
|
| 206 |
+
[3] ViEduQA: A New Vietnamese Dataset for Question Answer Generation in Education<i class="ri-link"></i>
|
| 207 |
+
</a>
|
| 208 |
+
|
| 209 |
</h4>
|
| 210 |
<p class="publication-meta">
|
| 211 |
+
<strong>Authors:</strong> <strong>Truong-Phuc Nguyen</strong>, Huu-Loi Le, Pham Quoc-Hung, Nong Quang Huy, Xuan-Hieu Phan, and Minh-Tien Nguyen<br />
|
| 212 |
+
<strong>Published in:</strong> Information and Communication Technology. SOICT 2024. CCIS, vol. 2352, pp. 441-455. Springer, Singapore, 2025<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 213 |
</p>
|
| 214 |
</div>
|
| 215 |
|
| 216 |
<div class="publication-item">
|
| 217 |
<h4>
|
| 218 |
+
<a href="https://ieeexplore.ieee.org/document/11063637" target="_blank" rel="noopener noreferrer" class="paper-title-link">
|
| 219 |
+
[4] Vietnamese Legal Question Answering: An Experimental Study<i class="ri-link"></i>
|
| 220 |
+
</a>
|
| 221 |
</h4>
|
| 222 |
<p class="publication-meta">
|
| 223 |
+
<strong>Authors:</strong> Thu-Ha Nguyen, <strong>Truong-Phuc Nguyen</strong>, Khang T. Trung, Huu-Loi Le, Le Thi Viet Huong, Chi Thanh Nguyen, and Minh-Tien Nguyen<br />
|
| 224 |
+
<strong>Published in:</strong> Proceedings of 2024 16th International Conference on Knowledge and System Engineering (KSE 2024), Kuala Lumpur, Malaysia, 2024, pp. 440-446.<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 225 |
</p>
|
| 226 |
</div>
|
| 227 |
</div>
|
|
|
|
| 249 |
<div class="glass-card">
|
| 250 |
<div class="timeline">
|
| 251 |
<div class="timeline-item">
|
| 252 |
+
<div class="timeline-date">Jan 2026 - present</div>
|
| 253 |
+
<h3>Emotion Recognition in Conversation (ERC)</h3>
|
| 254 |
+
<p style="color: var(--accent); font-weight: 600">
|
| 255 |
+
NLU Laboratory, Hung Yen University of Technology and Education, Vietnam
|
| 256 |
+
</p>
|
| 257 |
+
<p class="timeline-content">(to be updated ...)</p>
|
| 258 |
+
<p
|
| 259 |
+
style="
|
| 260 |
+
color: var(--text-secondary);
|
| 261 |
+
font-size: 0.9rem;
|
| 262 |
+
margin-top: 0.5rem;
|
| 263 |
+
"
|
| 264 |
+
>
|
| 265 |
+
<strong>Advisor:</strong> Assoc. Prof. Minh-Tien Nguyen
|
| 266 |
+
</p>
|
| 267 |
+
</div>
|
| 268 |
+
|
| 269 |
+
<div class="timeline-item">
|
| 270 |
+
<div class="timeline-date">August 2025 - Jan 2026</div>
|
| 271 |
<h3>
|
| 272 |
+
MedMAS: Multi-agent System for Pre-intake Clinical Note Generation in Conversation
|
|
|
|
| 273 |
</h3>
|
| 274 |
<p style="color: var(--accent); font-weight: 600">
|
| 275 |
+
BioInfomatic Laboratory, Feng Chia University, Taiwan
|
| 276 |
</p>
|
| 277 |
<p class="timeline-content">
|
| 278 |
+
Building a multi-agent system that extracts patient information through conversations, generates targeted follow-up questions to gather comprehensive patient data, and creates detailed pre-visit clinical reports. This streamlines the examination process, saving physician time and improving patient experience during medical consultations. The system is evaluated across three core tasks: Named Entity Recognition (NER), Question Generation (QG), and Summarization, achieving state-of-the-art results on both MTS-Dialog and CliniKnote benchmark datasets, demonstrating the superiority of multi-agent architectures over conventional approaches such as in-context learning and instruction-tuning in the medical domain.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 279 |
</p>
|
| 280 |
<p
|
| 281 |
style="
|
|
|
|
| 290 |
|
| 291 |
<div class="timeline-item">
|
| 292 |
<div class="timeline-date">September 2024 - Present</div>
|
| 293 |
+
<h3>ViLegalLM: Language Models for Vietnamese Legal Text</h3>
|
|
|
|
|
|
|
| 294 |
<p style="color: var(--accent); font-weight: 600">
|
| 295 |
NLU Laboratory, Hung Yen University of Technology and
|
| 296 |
Education
|
| 297 |
</p>
|
| 298 |
<p class="timeline-content">
|
| 299 |
+
ViLegalLM comprises one representation model (135M) and two generation models (1.54B, 1.72B) specifically for Vietnamese legal text through continual pretraining on newly 16GB of high-quality legal documents. ViLegalLM achieves state-of-the-art performance across 10 benchmarks spanning four main tasks: Information Retrieval (IR), Question Answering (QA), Natural Language Inference (NLI), and Syllogism Reasoning, outperforming 7 state-of-the-art Vietnamese models and establishing new strong baselines for Vietnamese legal text processing. The project also contributes three large-scale synthetic training datasets to address the shortage of high-quality legal training data in Vietnam.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 300 |
</p>
|
| 301 |
<p
|
| 302 |
style="
|
|
|
|
| 313 |
<div class="timeline-date">August 2025 - September 2025</div>
|
| 314 |
<h3>Adaptive Weighted Ensemble for Legal Text Processing</h3>
|
| 315 |
<p style="color: var(--accent); font-weight: 600">
|
| 316 |
+
NLU Laboratory, Hung Yen University of Technology and
|
| 317 |
+
Education, Vietnam
|
| 318 |
</p>
|
| 319 |
<p class="timeline-content">
|
| 320 |
+
Designed a framework combining multiple bi-encoders through query-specific confidence calculation, advanced dynamic weighting, and ensemble score fusion with cross-encoder reranker. Achieved 3rd place in Legal Information Retrieval task (F2-score: 0.8482, 7.51% improvement) and 2nd place in Legal Question Answering (97.56% accuracy) in ALQAC 2025 Competition. Paper accepted at 17th International Conference on Knowledge and System Engineering (KSE 2025).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 321 |
</p>
|
| 322 |
<p
|
| 323 |
style="
|
|
|
|
| 333 |
<div class="timeline-item">
|
| 334 |
<div class="timeline-date">February 2024 - September 2025</div>
|
| 335 |
<h3>
|
| 336 |
+
IntelliChat - Question Answering System for Vietnam Legal Documents
|
| 337 |
</h3>
|
| 338 |
<p style="color: var(--accent); font-weight: 600">
|
| 339 |
+
NLU Laboratory, Hung Yen University of Technology and Education, Vietnam
|
| 340 |
</p>
|
| 341 |
<p class="timeline-content">
|
| 342 |
+
Built a demo legal question-answering system for Vietnamese, integrating information retrieval with answer extraction/generation optimized for the legal domain. IntelliChat outperforms GPT-3.5 and state-of-the-art open-source LLMs (~7B parameters) in both automatic and human evaluations, and is deployed online to enable Vietnamese citizens to independently access and understand legal documents.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 343 |
</p>
|
| 344 |
<p
|
| 345 |
style="
|
|
|
|
| 358 |
QACTune - Advanced Legal Information Retrieval Framework
|
| 359 |
</h3>
|
| 360 |
<p style="color: var(--accent); font-weight: 600">
|
| 361 |
+
NLU Laboratory, Hung Yen University of Technology and
|
| 362 |
+
Education, Vietnam
|
| 363 |
</p>
|
| 364 |
<p class="timeline-content">
|
| 365 |
Developed a novel fine-tuning framework leveraging
|
| 366 |
Question-Context-Answer relationships for enhancing legal
|
| 367 |
information retrieval in low-resource settings. Average
|
| 368 |
+
improvements of 3.9% and 4.8% in MAP@100. Published in
|
| 369 |
+
Engineering Applications of Artificial Intelligence (WoS-SCIE,
|
| 370 |
+
Q1, IF: 8.0).
|
| 371 |
</p>
|
| 372 |
<p
|
| 373 |
style="
|
|
|
|
| 387 |
Education
|
| 388 |
</h3>
|
| 389 |
<p style="color: var(--accent); font-weight: 600">
|
| 390 |
+
NLU Laboratory, Hung Yen University of Technology and
|
| 391 |
+
Education, Vietnam
|
| 392 |
</p>
|
| 393 |
<p class="timeline-content">
|
| 394 |
Pioneered Vietnamese Question-Answer Generation research in
|
|
|
|
| 588 |
<div class="timeline">
|
| 589 |
<div class="timeline-item">
|
| 590 |
<div class="timeline-date">November 2025</div>
|
| 591 |
+
<h3>
|
| 592 |
+
<a href="BLANK" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 593 |
+
KSE 2025 Certification of Presentation
|
| 594 |
+
<i class="ri-link"></i>
|
| 595 |
+
</a>
|
| 596 |
+
</h3>
|
| 597 |
</div>
|
| 598 |
<div class="timeline-item">
|
| 599 |
<div class="timeline-date">July 2025</div>
|
| 600 |
+
<h3>
|
| 601 |
+
<a href="BLANK" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 602 |
+
TOEIC Reading & Listening
|
| 603 |
+
<i class="ri-link"></i>
|
| 604 |
+
</a>
|
| 605 |
+
</h3>
|
| 606 |
</div>
|
| 607 |
<div class="timeline-item">
|
| 608 |
<div class="timeline-date">February 2025</div>
|
| 609 |
+
<h3>
|
| 610 |
+
<a href="https://drive.google.com/file/d/1hfjzTfOE7g5Z00r9lnCeiuCYvUAEQYqM" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 611 |
+
Hugging Face Agents Course
|
| 612 |
+
<i class="ri-link"></i>
|
| 613 |
+
</a>
|
| 614 |
+
</h3>
|
| 615 |
</div>
|
| 616 |
<div class="timeline-item">
|
| 617 |
<div class="timeline-date">December 2024</div>
|
| 618 |
<h3>
|
| 619 |
+
<a href="https://drive.google.com/file/d/1kLpV4UPiM5p35W19peId4IxBLU3ZOnNO" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 620 |
+
SOICT 2024 Presentation - Conference Presentation Certification<i class="ri-link"></i>
|
| 621 |
+
</a>
|
| 622 |
</h3>
|
| 623 |
</div>
|
| 624 |
<div class="timeline-item">
|
| 625 |
<div class="timeline-date">February 2024</div>
|
| 626 |
+
<h3>
|
| 627 |
+
<a href="https://www.coursera.org/account/accomplishments/verify/WLBFF5JSP4KR" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 628 |
+
Stanford Machine Learning Specialization
|
| 629 |
+
<i class="ri-link"></i>
|
| 630 |
+
</a>
|
| 631 |
+
</h3>
|
| 632 |
</div>
|
| 633 |
<div class="timeline-item">
|
| 634 |
+
<div class="timeline-date">November 2022</div>
|
| 635 |
+
<h3>
|
| 636 |
+
<a href="https://www.coursera.org/account/accomplishments/professional-cert/A6Z4LZPH9QWB" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 637 |
+
Google Data Analytics Professional Certificate
|
| 638 |
+
<i class="ri-link"></i>
|
| 639 |
+
</a>
|
| 640 |
+
</h3>
|
| 641 |
</div>
|
| 642 |
</div>
|
| 643 |
</div>
|
|
|
|
| 650 |
<div class="award-item">
|
| 651 |
<div class="award-icon"><i class="ri-star-line"></i></div>
|
| 652 |
<div class="award-info">
|
| 653 |
+
<h4>
|
| 654 |
+
<a href="https://drive.google.com/file/d/15Wkn5Zpp1qmXZShZHYFDYsWd9LmL8Q8J/view?usp=sharing" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 655 |
+
TOYOTA Excellence Academic Scholarship
|
| 656 |
+
<i class="ri-link"></i>
|
| 657 |
+
</a>
|
| 658 |
+
</h4>
|
| 659 |
<p class="award-date">November 2024</p>
|
| 660 |
</div>
|
| 661 |
</div>
|
| 662 |
<div class="award-item">
|
| 663 |
<div class="award-icon"><i class="ri-bard-line"></i></div>
|
| 664 |
<div class="award-info">
|
| 665 |
+
<h4>
|
| 666 |
+
<a href="BLANK" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 667 |
+
Academic Excellence Scholarships
|
| 668 |
+
<i class="ri-link"></i>
|
| 669 |
+
</a>
|
| 670 |
+
</h4>
|
| 671 |
<p class="award-date">2021-2025</p>
|
| 672 |
<p style="color: var(--text-secondary); font-size: 0.9rem">
|
| 673 |
4 Academic Excellence & 8 Talented Program Scholarships -
|
| 674 |
+
Consistently ranked #1 in CS program
|
| 675 |
</p>
|
| 676 |
</div>
|
| 677 |
</div>
|
| 678 |
<div class="award-item">
|
| 679 |
<div class="award-icon"><i class="ri-star-line"></i></div>
|
| 680 |
<div class="award-info">
|
| 681 |
+
<h4>
|
| 682 |
+
<a href="BLANK" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 683 |
+
Full Tuition Waiver
|
| 684 |
+
<i class="ri-link"></i>
|
| 685 |
+
</a>
|
| 686 |
+
</h4>
|
| 687 |
<p class="award-date">2021-2023</p>
|
| 688 |
<p style="color: var(--text-secondary); font-size: 0.9rem">
|
| 689 |
Two consecutive academic years
|
|
|
|
| 701 |
<div class="award-item">
|
| 702 |
<div class="award-icon"><i class="ri-honour-line"></i></div>
|
| 703 |
<div class="award-info">
|
| 704 |
+
<h4>
|
| 705 |
+
<a href="https://drive.google.com/file/d/1OmMmKChNj_oQIpmG4wS6k2ZjJf1YywAL" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 706 |
+
Sencond Prize - MOE-level Student Scientific Research
|
| 707 |
+
<i class="ri-link"></i>
|
| 708 |
+
</a>
|
| 709 |
+
</h4>
|
| 710 |
<p class="award-date">June 2025</p>
|
| 711 |
+
<p style="color: var(--text-secondary); font-size: 0.9rem">
|
| 712 |
+
<strong>Research project:</strong> Research on building question answering system for Vietnamese legal documents
|
| 713 |
+
</p>
|
| 714 |
</div>
|
| 715 |
</div>
|
| 716 |
<div class="award-item">
|
| 717 |
<div class="award-icon"><i class="ri-honour-line"></i></div>
|
| 718 |
<div class="award-info">
|
| 719 |
+
<h4>
|
| 720 |
+
<a href="https://www.facebook.com/reel/1962974367572405" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 721 |
+
Excellence Graduate Thesis Presentation
|
| 722 |
+
<i class="ri-link"></i>
|
| 723 |
+
</a>
|
| 724 |
+
</h4>
|
| 725 |
<p class="award-date">June 2025</p>
|
| 726 |
+
<p style="color: var(--text-secondary); font-size: 0.9rem">
|
| 727 |
+
<strong>Thesis title:</strong> A Study of Vietnamese Legal Question Answering with Pre-trained and Large Language Models
|
| 728 |
+
</p>
|
| 729 |
</div>
|
| 730 |
</div>
|
| 731 |
<div class="award-item">
|
| 732 |
<div class="award-icon"><i class="ri-honour-line"></i></div>
|
| 733 |
<div class="award-info">
|
| 734 |
<h4>
|
| 735 |
+
<a href="BLANK" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 736 |
+
First Prize - University-level Student Scientific Research
|
| 737 |
+
<i class="ri-link"></i>
|
| 738 |
+
</a>
|
| 739 |
</h4>
|
| 740 |
<p class="award-date">May 2025</p>
|
| 741 |
+
<p style="color: var(--text-secondary); font-size: 0.9rem">
|
| 742 |
+
<strong>Research project:</strong> Research on building question answering system for Vietnamese legal documents
|
| 743 |
+
</p>
|
| 744 |
</div>
|
| 745 |
</div>
|
| 746 |
<div class="award-item">
|
| 747 |
<div class="award-icon"><i class="ri-honour-line"></i></div>
|
| 748 |
<div class="award-info">
|
| 749 |
+
<h4>
|
| 750 |
+
<a href="BLANK" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 751 |
+
First Prize - Faculty-level Student Scientific Research
|
| 752 |
+
<i class="ri-link"></i>
|
| 753 |
+
</a>
|
| 754 |
+
</h4>
|
| 755 |
<p class="award-date">March 2025</p>
|
| 756 |
+
<p style="color: var(--text-secondary); font-size: 0.9rem">
|
| 757 |
+
<strong>Research project:</strong> Research on building question answering system for Vietnamese legal documents
|
| 758 |
+
</p>
|
| 759 |
</div>
|
| 760 |
</div>
|
| 761 |
<div class="award-item">
|
| 762 |
<div class="award-icon"><i class="ri-honour-line"></i></div>
|
| 763 |
<div class="award-info">
|
| 764 |
<h4>
|
| 765 |
+
<a href="BLANK" target="_blank" rel="noopener noreferrer" class="cert-link">
|
| 766 |
+
Second Prize - Faculty-level Student Scientific Research
|
| 767 |
+
<i class="ri-link"></i>
|
| 768 |
+
</a>
|
| 769 |
</h4>
|
| 770 |
<p class="award-date">June 2024</p>
|
| 771 |
+
<p style="color: var(--text-secondary); font-size: 0.9rem">
|
| 772 |
+
<strong>Research project:</strong> Research on developing a student attendance system using facial recognition and detecting unusual behavior in the classroom
|
| 773 |
+
</p>
|
| 774 |
</div>
|
| 775 |
</div>
|
| 776 |
<div class="award-item">
|
|
|
|
| 871 |
|
| 872 |
<!-- Footer -->
|
| 873 |
<footer>
|
| 874 |
+
<p>© 2026 Truong-Phuc Nguyen. All rights reserved.</p>
|
| 875 |
</footer>
|
| 876 |
|
| 877 |
<script src="script.js"></script>
|