amber commited on
Commit ·
4f7ed3b
1
Parent(s): 4dedd9a
Add application file
Browse files- .gitattributes copy +35 -0
- .gitignore +132 -0
- LICENSE +21 -0
- README copy.md +13 -0
- app.py +222 -0
- index.html +39 -0
- pages/1_🐱_Common_LLM_Demo.py +43 -0
- pages/2_🐣_File_Analyser_Demo.py +40 -0
- pages/3_🗯️_DocQA_Demo.py +260 -0
- pages/4_🌺_Cherokee_Model_with_SFT_Demo.py +449 -0
- pages/5_🦁_Cherokee_Model_with_RAG_Demo.py +252 -0
- pages/6_🌵_Cherokee_Converter.py +228 -0
- requirements.txt +110 -0
- static/cherokee_source.png +0 -0
- static/cherokee_tag.png +0 -0
- static/gif.gif +0 -0
- static/image.png +0 -0
- static/logo.png +0 -0
- static/logo0.png +0 -0
- static/logo1.png +0 -0
- static/lora.png +0 -0
- static/name.png +0 -0
- static/paper1.png +0 -0
- static/paper2-1.png +0 -0
- static/perfomance0.png +0 -0
- static/perfomance1.png +0 -0
- static/perfomance2.png +0 -0
- static/rag.png +0 -0
- static/stack.png +0 -0
.gitattributes copy
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
# *.html
|
| 6 |
+
private/
|
| 7 |
+
.vscode/
|
| 8 |
+
|
| 9 |
+
# C extensions
|
| 10 |
+
*.so
|
| 11 |
+
|
| 12 |
+
# Distribution / packaging
|
| 13 |
+
.Python
|
| 14 |
+
build/
|
| 15 |
+
develop-eggs/
|
| 16 |
+
dist/
|
| 17 |
+
downloads/
|
| 18 |
+
eggs/
|
| 19 |
+
.eggs/
|
| 20 |
+
lib/
|
| 21 |
+
lib64/
|
| 22 |
+
parts/
|
| 23 |
+
sdist/
|
| 24 |
+
var/
|
| 25 |
+
wheels/
|
| 26 |
+
pip-wheel-metadata/
|
| 27 |
+
share/python-wheels/
|
| 28 |
+
*.egg-info/
|
| 29 |
+
.installed.cfg
|
| 30 |
+
*.egg
|
| 31 |
+
MANIFEST
|
| 32 |
+
|
| 33 |
+
# PyInstaller
|
| 34 |
+
# Usually these files are written by a python script from a template
|
| 35 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 36 |
+
*.manifest
|
| 37 |
+
*.spec
|
| 38 |
+
|
| 39 |
+
# Installer logs
|
| 40 |
+
pip-log.txt
|
| 41 |
+
pip-delete-this-directory.txt
|
| 42 |
+
|
| 43 |
+
# Unit test / coverage reports
|
| 44 |
+
htmlcov/
|
| 45 |
+
.tox/
|
| 46 |
+
.nox/
|
| 47 |
+
.coverage
|
| 48 |
+
.coverage.*
|
| 49 |
+
.cache
|
| 50 |
+
nosetests.xml
|
| 51 |
+
coverage.xml
|
| 52 |
+
*.cover
|
| 53 |
+
*.py,cover
|
| 54 |
+
.hypothesis/
|
| 55 |
+
.pytest_cache/
|
| 56 |
+
|
| 57 |
+
# Translations
|
| 58 |
+
*.mo
|
| 59 |
+
*.pot
|
| 60 |
+
|
| 61 |
+
# Django stuff:
|
| 62 |
+
*.log
|
| 63 |
+
local_settings.py
|
| 64 |
+
db.sqlite3
|
| 65 |
+
db.sqlite3-journal
|
| 66 |
+
|
| 67 |
+
# Flask stuff:
|
| 68 |
+
instance/
|
| 69 |
+
.webassets-cache
|
| 70 |
+
|
| 71 |
+
# Scrapy stuff:
|
| 72 |
+
.scrapy
|
| 73 |
+
|
| 74 |
+
# Sphinx documentation
|
| 75 |
+
docs/_build/
|
| 76 |
+
|
| 77 |
+
# PyBuilder
|
| 78 |
+
target/
|
| 79 |
+
|
| 80 |
+
# Jupyter Notebook
|
| 81 |
+
.ipynb_checkpoints
|
| 82 |
+
|
| 83 |
+
# IPython
|
| 84 |
+
profile_default/
|
| 85 |
+
ipython_config.py
|
| 86 |
+
|
| 87 |
+
# pyenv
|
| 88 |
+
.python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 98 |
+
__pypackages__/
|
| 99 |
+
|
| 100 |
+
# Celery stuff
|
| 101 |
+
celerybeat-schedule
|
| 102 |
+
celerybeat.pid
|
| 103 |
+
|
| 104 |
+
# SageMath parsed files
|
| 105 |
+
*.sage.py
|
| 106 |
+
|
| 107 |
+
# Environments
|
| 108 |
+
.env
|
| 109 |
+
.venv
|
| 110 |
+
env/
|
| 111 |
+
venv/
|
| 112 |
+
ENV/
|
| 113 |
+
env.bak/
|
| 114 |
+
venv.bak/
|
| 115 |
+
./venv
|
| 116 |
+
# Spyder project settings
|
| 117 |
+
.spyderproject
|
| 118 |
+
.spyproject
|
| 119 |
+
|
| 120 |
+
# Rope project settings
|
| 121 |
+
.ropeproject
|
| 122 |
+
|
| 123 |
+
# mkdocs documentation
|
| 124 |
+
/site
|
| 125 |
+
|
| 126 |
+
# mypy
|
| 127 |
+
.mypy_cache/
|
| 128 |
+
.dmypy.json
|
| 129 |
+
dmypy.json
|
| 130 |
+
|
| 131 |
+
# Pyre type checker
|
| 132 |
+
.pyre/
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2021 Qiusheng Wu
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README copy.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Myspace
|
| 3 |
+
emoji: 📈
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: streamlit
|
| 7 |
+
sdk_version: 1.39.0
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
short_description: myspace
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import leafmap.foliumap as leafmap
|
| 3 |
+
import base64
|
| 4 |
+
# from faker import Faker
|
| 5 |
+
import random
|
| 6 |
+
st.set_page_config(layout="wide")
|
| 7 |
+
|
| 8 |
+
# LOGO_URL_LARGE="./static/lora.png"
|
| 9 |
+
st.logo(
|
| 10 |
+
"./static/logo1.png",
|
| 11 |
+
link="https://nicedata.eu.org/"
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
st.sidebar.title("Contact")
|
| 16 |
+
with st.sidebar:
|
| 17 |
+
st.info(
|
| 18 |
+
"""
|
| 19 |
+
- Email: [sh.wang4067@gmail.com](mailto:sh.wang4067@gmail.com)
|
| 20 |
+
- Tel: +86 181-1615-2720
|
| 21 |
+
- Homepage: [nicedata.eu.org](https://nicedata.eu.org)
|
| 22 |
+
- Github: [wdzhwsh4076](https://github.com/wdzhwsh4076)
|
| 23 |
+
- Address: Boda Campus, Xinjiang University, Urumqi City, China
|
| 24 |
+
"""
|
| 25 |
+
)
|
| 26 |
+
p1, p2 = st.columns(2)
|
| 27 |
+
with p2:
|
| 28 |
+
|
| 29 |
+
st.subheader("")
|
| 30 |
+
st.image("./static/self.jpeg",width=200)
|
| 31 |
+
with p1:
|
| 32 |
+
|
| 33 |
+
st.title("Shaohuang Wang")
|
| 34 |
+
|
| 35 |
+
st.markdown(
|
| 36 |
+
"""
|
| 37 |
+
I'm Shaohuang Wang, a Computer Science Master's student at Xinjiang University, focusing on Machine Learning and Data Structures.
|
| 38 |
+
|
| 39 |
+
My research interests include Large Language Models and Recommender Systems. I'm also a developer at NLPIR Lab, working on NLP and data processing technologies.
|
| 40 |
+
|
| 41 |
+
[Email](mailto:sh.wang4067@gmail.com) / [CV](https://nicedata.eu.org) / [Bio](https://nicedata.eu.org) / [Google Scholar](https://scholar.google.com) / [Twitter](https://twitter.com) / [Github](https://github.com/wdzhwsh4076)
|
| 42 |
+
|
| 43 |
+
"""
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
# st.info("Click on the left sidebar menu to navigate to the different apps.")
|
| 47 |
+
|
| 48 |
+
# =========================================================================
|
| 49 |
+
st.markdown('<a name="customizable-border"></a>', unsafe_allow_html=True)
|
| 50 |
+
st.header("👨🏻🎓 Education", divider="rainbow")
|
| 51 |
+
st.markdown("""
|
| 52 |
+
### Xinjiang University, China
|
| 53 |
+
- **M.Eng. in Computer Science** (2022.09 - Present)
|
| 54 |
+
- GPA: 3.5/4.0
|
| 55 |
+
- Core Course: Machine Learning, Computer Networks, Data Structures
|
| 56 |
+
- Thesis: Research on scientific and technological information recommendation via LLM
|
| 57 |
+
- Research Interest: LLM, RAG, SFT, Fine-tuning, Recommend System
|
| 58 |
+
|
| 59 |
+
### Shanghai University of Engineering Science, China
|
| 60 |
+
- **B.Eng. in Vehicle Engineering** (2016.09 - 2020.06)
|
| 61 |
+
- GPA: 3.1/4.0
|
| 62 |
+
- Awards: National Scholarship (Top 1%), First-Class Scholarship (Top 3%)
|
| 63 |
+
"""
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
# =========================================================================
|
| 67 |
+
st.markdown('<a name="customizable-border"></a>', unsafe_allow_html=True)
|
| 68 |
+
st.header("🧑🏻💻 Publication", divider="rainbow")
|
| 69 |
+
def get_file_url(path):
|
| 70 |
+
file_ = open(path, "rb")
|
| 71 |
+
contents = file_.read()
|
| 72 |
+
data_url = base64.b64encode(contents).decode("utf-8")
|
| 73 |
+
file_.close()
|
| 74 |
+
return data_url
|
| 75 |
+
p1, p2 = st.columns(2)
|
| 76 |
+
old_skeleton_url = get_file_url("./static/lora.png")
|
| 77 |
+
new_skeleton_url = get_file_url("./static/rag.png")
|
| 78 |
+
with p1:
|
| 79 |
+
st.subheader("Bypassing LLM Safeguards: The In-Context Tense Attack Approach.")
|
| 80 |
+
st.info("Wang, S et al. International Conference on Computer Engineering and Networks, 2406.12243 (2024). (Accepted)")
|
| 81 |
+
st.markdown("""
|
| 82 |
+
We explore the power of combining tense attacks with in-context examples in manipulating the security of LLMs and propose In-Context Tense Attack (ITA) for jailbreaking purposes.
|
| 83 |
+
"""
|
| 84 |
+
)
|
| 85 |
+
f1, f2, f3 = st.columns(3)
|
| 86 |
+
with f1:
|
| 87 |
+
with open("./static/lora.png", "rb") as file1:
|
| 88 |
+
btn1 = st.download_button(
|
| 89 |
+
key='4',
|
| 90 |
+
label="Arxiv",
|
| 91 |
+
data=file1,
|
| 92 |
+
file_name="flower.png",
|
| 93 |
+
mime="image/png",
|
| 94 |
+
type="primary",
|
| 95 |
+
use_container_width=True,
|
| 96 |
+
)
|
| 97 |
+
with f2:
|
| 98 |
+
with open("./static/lora.png", "rb") as file2:
|
| 99 |
+
btn = st.download_button(
|
| 100 |
+
key='1',
|
| 101 |
+
label="PDF",
|
| 102 |
+
data=file2,
|
| 103 |
+
file_name="flower.png",
|
| 104 |
+
mime="image/png",
|
| 105 |
+
type="secondary",
|
| 106 |
+
use_container_width=True,
|
| 107 |
+
)
|
| 108 |
+
with f3:
|
| 109 |
+
st.link_button("Cite", "https://streamlit.io/gallery", use_container_width=True,)
|
| 110 |
+
|
| 111 |
+
with p2:
|
| 112 |
+
# st.markdown(
|
| 113 |
+
# f'<img src="data:image/gif;base64,{new_skeleton_url}" width=450 alt="demo gif">',
|
| 114 |
+
# unsafe_allow_html=True,
|
| 115 |
+
# )
|
| 116 |
+
st.image("./static/paper1.png",width=450)
|
| 117 |
+
st.caption("""
|
| 118 |
+
Tense Attack is a technique targeting Large Language Models (LLMs) that
|
| 119 |
+
exploits the models’ potential vulnerability when processing requests phrased in
|
| 120 |
+
the past tense,
|
| 121 |
+
""")
|
| 122 |
+
|
| 123 |
+
st.divider()
|
| 124 |
+
p1, p2 = st.columns(2)
|
| 125 |
+
old_skeleton_url = get_file_url("./static/lora.png")
|
| 126 |
+
new_skeleton_url = get_file_url("./static/rag.png")
|
| 127 |
+
with p2:
|
| 128 |
+
st.subheader("CherryRec: Enhancing News Recommendation Quality via LLM driven Framework.")
|
| 129 |
+
st.info("Wang, S et al. CherryRec: Enhancing News Recommendation Quality via LLM driven Framework. ICASSP (2025). (Under Review)")
|
| 130 |
+
st.markdown("""
|
| 131 |
+
Introduced CherryRec, a news recommendation framework using LLMs to filter low-value news and recommend high-quality news by understanding user preferences and integrating multi-dimensional scores.
|
| 132 |
+
"""
|
| 133 |
+
)
|
| 134 |
+
f1, f2, f3 = st.columns(3)
|
| 135 |
+
with f1:
|
| 136 |
+
with open("./static/lora.png", "rb") as file1:
|
| 137 |
+
btn = st.download_button(
|
| 138 |
+
key='2',
|
| 139 |
+
label="Arxiv",
|
| 140 |
+
data=file1,
|
| 141 |
+
file_name="flower.png",
|
| 142 |
+
mime="image/png",
|
| 143 |
+
type="primary",
|
| 144 |
+
use_container_width=True,
|
| 145 |
+
)
|
| 146 |
+
with f2:
|
| 147 |
+
with open("./static/lora.png", "rb") as file2:
|
| 148 |
+
btn = st.download_button(
|
| 149 |
+
key='3',
|
| 150 |
+
label="PDF",
|
| 151 |
+
data=file2,
|
| 152 |
+
file_name="flower.png",
|
| 153 |
+
mime="image/png",
|
| 154 |
+
type="secondary",
|
| 155 |
+
use_container_width=True,
|
| 156 |
+
)
|
| 157 |
+
with f3:
|
| 158 |
+
st.link_button("Cite", "https://streamlit.io/gallery",use_container_width=True,)
|
| 159 |
+
|
| 160 |
+
with p1:
|
| 161 |
+
st.image("./static/paper2-1.png",width=450)
|
| 162 |
+
st.caption("""
|
| 163 |
+
Knowledge-aware News Rapid Selector (KnRS) quickly identifies relevant
|
| 164 |
+
news candidates by assessing user interaction history and content attributes.
|
| 165 |
+
Content-aware News LLM Evaluator (CnLE) refines selections using a fine-tuned
|
| 166 |
+
LLM, deeply understanding user preferences to enhance personalized news rec-
|
| 167 |
+
ommendations.
|
| 168 |
+
""")
|
| 169 |
+
with st.expander("A list of other publications "):
|
| 170 |
+
st.markdown(
|
| 171 |
+
"""
|
| 172 |
+
1. **Wang, S** et al. "Bypassing LLM Safeguards: The In-Context Tense Attack Approach." International Conference on Computer Engineering and Networks, 2406.12243 (2024). (Accepted)
|
| 173 |
+
2. **Wang, S** et al. "CherryRec: Enhancing News Recommendation Quality via LLM driven Framework." ICASSP (2025). (Under Review)
|
| 174 |
+
3. Liang, Y & **Wang, S** et al. "LLaMA-MoT: A Cost-Effective Framework for Visual-Linguistic Instruction Tuning Based on Multi-Head Adapters and Chain-of-Thought." ESWA (2024). (Under Review)
|
| 175 |
+
4. **Wang, S** et al. "An agile construction method of instruction fine-tuning dataset based on semi-structured data." Patent (2024). (Submitted)
|
| 176 |
+
5. **Wang, S** et al. "Finite element analysis of modular automotive body based on Ansys." Guangxi Journal of Light Industry (2020). (Accepted)
|
| 177 |
+
6. **Wang, S** et al. "Buffer connecting device for vehicle." Patent (2020). (Accepted)
|
| 178 |
+
|
| 179 |
+
*Still in possession of 8 patents, along with various other publications.*
|
| 180 |
+
"""
|
| 181 |
+
)
|
| 182 |
+
# =========================================================================
|
| 183 |
+
|
| 184 |
+
# =========================================================================
|
| 185 |
+
st.markdown('<a name="customizable-border"></a>', unsafe_allow_html=True)
|
| 186 |
+
st.header("🧑🏻🏫 Research Experience", divider="rainbow")
|
| 187 |
+
st.markdown("""
|
| 188 |
+
### Domain Information Tracking and Processing Project
|
| 189 |
+
- **Developer@NLPIR Lab** (2022.09 - Present)
|
| 190 |
+
- Responsible for the development of the algorithm tool layer, including data collection, review and correction, dynamic selection, keyword extraction, and briefing generation algorithms.
|
| 191 |
+
- Utilized Elasticsearch and MySQL databases for data storage and processing, optimizing data query and analysis processes.
|
| 192 |
+
- Achieved rapid system deployment and front-end and back-end separation design through Docker, simplifying operations and maintenance and enhancing system maintainability.
|
| 193 |
+
- **Technology Stack**: Python, Elasticsearch, MySQL, Docker, Vue.JS, FastAPI
|
| 194 |
+
|
| 195 |
+
### Doc2QA Framework for Large Language Model SFT Datasets
|
| 196 |
+
- **Developer@NLPIR Lab** (2023.04 - Present)
|
| 197 |
+
- Designed and released a comprehensive dataset for QA instruction fine-tuning using semi-structured data, providing a valuable resource for future research.
|
| 198 |
+
- Developed a novel framework, "Doc2QA" based on Large Language Models (LLMs) to generate question-answer pairs from semi-structured data such as HTML, DOC, and PDF.
|
| 199 |
+
- **Technology Stack**: Python, Llama-factory, Vllm, FastAPI, Docker, JavaScript
|
| 200 |
+
"""
|
| 201 |
+
)
|
| 202 |
+
st.markdown('<a name="customizable-border"></a>', unsafe_allow_html=True)
|
| 203 |
+
st.header("🚵🏻♂️ Skills", divider="rainbow")
|
| 204 |
+
st.markdown(
|
| 205 |
+
"""
|
| 206 |
+
- **Proficient in Coding**: Python, FastAPI, Elasticsearch, Docker, Vue.Js, Nginx
|
| 207 |
+
- **Model Training in AI/ML**: PyTorch, TensorFlow, Llama-index, Vllm, Llama-factory
|
| 208 |
+
- **Simulation and Design**: AutoCAD, CATIA, SolidWorks, ANSYS, 3DMax, Adobe Photoshop/Illustrator
|
| 209 |
+
- **Languages**: Chinese (native), English (IELTS: 6.5, L: 6.5 R: 7.5 W: 6.0 S: 6.0), Japanese (JLPT-N2)
|
| 210 |
+
"""
|
| 211 |
+
)
|
| 212 |
+
st.image(
|
| 213 |
+
"./static/stack.png",
|
| 214 |
+
)
|
| 215 |
+
# row1_col1, row1_col2 = st.columns(2)
|
| 216 |
+
# with row1_col1:
|
| 217 |
+
# st.image("https://github.com/giswqs/data/raw/main/timelapse/spain.gif")
|
| 218 |
+
# st.image("https://github.com/giswqs/data/raw/main/timelapse/las_vegas.gif")
|
| 219 |
+
|
| 220 |
+
# with row1_col2:
|
| 221 |
+
# st.image("https://github.com/giswqs/data/raw/main/timelapse/goes.gif")
|
| 222 |
+
# st.image("https://github.com/giswqs/data/raw/main/timelapse/fire.gif")
|
index.html
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html>
|
| 3 |
+
<head>
|
| 4 |
+
<title>Streamlit for Geospatial</title>
|
| 5 |
+
<style type="text/css">
|
| 6 |
+
html {
|
| 7 |
+
overflow: auto;
|
| 8 |
+
}
|
| 9 |
+
html,
|
| 10 |
+
body,
|
| 11 |
+
div,
|
| 12 |
+
iframe {
|
| 13 |
+
margin: 0px;
|
| 14 |
+
padding: 0px;
|
| 15 |
+
height: 100%;
|
| 16 |
+
border: none;
|
| 17 |
+
}
|
| 18 |
+
iframe {
|
| 19 |
+
display: block;
|
| 20 |
+
width: 100%;
|
| 21 |
+
border: none;
|
| 22 |
+
overflow-y: auto;
|
| 23 |
+
overflow-x: hidden;
|
| 24 |
+
}
|
| 25 |
+
</style>
|
| 26 |
+
</head>
|
| 27 |
+
<body>
|
| 28 |
+
<iframe
|
| 29 |
+
src="https://share.streamlit.io/giswqs/streamlit-geospatial/app.py"
|
| 30 |
+
frameborder="0"
|
| 31 |
+
marginheight="0"
|
| 32 |
+
marginwidth="0"
|
| 33 |
+
width="100%"
|
| 34 |
+
height="100%"
|
| 35 |
+
scrolling="auto"
|
| 36 |
+
>
|
| 37 |
+
</iframe>
|
| 38 |
+
</body>
|
| 39 |
+
</html>
|
pages/1_🐱_Common_LLM_Demo.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from openai import OpenAI
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import dotenv
|
| 4 |
+
import os
|
| 5 |
+
# Load environment variables from .env file
|
| 6 |
+
dotenv.load_dotenv()
|
| 7 |
+
st.set_page_config(layout="wide")
|
| 8 |
+
|
| 9 |
+
# LOGO_URL_LARGE="./static/lora.png"
|
| 10 |
+
st.logo(
|
| 11 |
+
"./static/logo1.png",
|
| 12 |
+
link="https://nicedata.eu.org/"
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
with st.sidebar:
|
| 16 |
+
openai_api_key = st.text_input("API Key", key="chatbot_api_key", type="password")
|
| 17 |
+
"[Get an API key](https://platform.openai.com/account/api-keys)"
|
| 18 |
+
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)"
|
| 19 |
+
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
|
| 20 |
+
|
| 21 |
+
st.title("💬 Chatbot")
|
| 22 |
+
st.caption("🚀 A Streamlit chatbot powered by Wang")
|
| 23 |
+
if "messages" not in st.session_state:
|
| 24 |
+
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
|
| 25 |
+
|
| 26 |
+
for msg in st.session_state.messages:
|
| 27 |
+
st.chat_message(msg["role"]).write(msg["content"])
|
| 28 |
+
|
| 29 |
+
if prompt := st.chat_input():
|
| 30 |
+
# if not openai_api_key:
|
| 31 |
+
# st.info("Please add your OpenAI API key to continue.")
|
| 32 |
+
# st.stop()
|
| 33 |
+
|
| 34 |
+
client = OpenAI(
|
| 35 |
+
api_key=os.environ.get("SOTA_API_KEY"),
|
| 36 |
+
base_url=os.environ.get("SOTA_API_BASE")
|
| 37 |
+
)
|
| 38 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 39 |
+
st.chat_message("user").write(prompt)
|
| 40 |
+
response = client.chat.completions.create(model=os.environ.get("SOTA_API_MODEL"), messages=st.session_state.messages)
|
| 41 |
+
msg = response.choices[0].message.content
|
| 42 |
+
st.session_state.messages.append({"role": "assistant", "content": msg})
|
| 43 |
+
st.chat_message("assistant").write(msg)
|
pages/2_🐣_File_Analyser_Demo.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import anthropic
|
| 3 |
+
st.set_page_config(layout="wide")
|
| 4 |
+
|
| 5 |
+
# LOGO_URL_LARGE="./static/lora.png"
|
| 6 |
+
st.logo(
|
| 7 |
+
"./static/logo1.png",
|
| 8 |
+
link="https://nicedata.eu.org/"
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
with st.sidebar:
|
| 12 |
+
anthropic_api_key = st.text_input(" API Key", key="file_qa_api_key", type="password")
|
| 13 |
+
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/1_File_Q%26A.py)"
|
| 14 |
+
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
|
| 15 |
+
|
| 16 |
+
st.title("📝 File Analys Bot")
|
| 17 |
+
uploaded_file = st.file_uploader("Upload an article", type=("txt", "md"))
|
| 18 |
+
question = st.text_input(
|
| 19 |
+
"Ask something about the article",
|
| 20 |
+
placeholder="Can you give me a short summary?",
|
| 21 |
+
disabled=not uploaded_file,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
if uploaded_file and question and not anthropic_api_key:
|
| 25 |
+
st.info("Please add your Anthropic API key to continue.")
|
| 26 |
+
|
| 27 |
+
if uploaded_file and question and anthropic_api_key:
|
| 28 |
+
article = uploaded_file.read().decode()
|
| 29 |
+
prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n<article>
|
| 30 |
+
{article}\n\n</article>\n\n{question}{anthropic.AI_PROMPT}"""
|
| 31 |
+
|
| 32 |
+
client = anthropic.Client(api_key=anthropic_api_key)
|
| 33 |
+
response = client.completions.create(
|
| 34 |
+
prompt=prompt,
|
| 35 |
+
stop_sequences=[anthropic.HUMAN_PROMPT],
|
| 36 |
+
model="claude-v1", # "claude-2" for Claude 2 model
|
| 37 |
+
max_tokens_to_sample=100,
|
| 38 |
+
)
|
| 39 |
+
st.write("### Answer")
|
| 40 |
+
st.write(response.completion)
|
pages/3_🗯️_DocQA_Demo.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
# import streamlit.components.v1 as components
|
| 3 |
+
# components.iframe("https://cherokee.nicedata.eu.org/", height=500)
|
| 4 |
+
import streamlit.components.v1 as components
|
| 5 |
+
import base64
|
| 6 |
+
from faker import Faker
|
| 7 |
+
import random
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
import pandas as pd
|
| 10 |
+
import requests
|
| 11 |
+
|
| 12 |
+
st.set_page_config(layout="wide")
|
| 13 |
+
|
| 14 |
+
# LOGO_URL_LARGE="./static/lora.png"
|
| 15 |
+
st.logo(
|
| 16 |
+
"./static/logo1.png",
|
| 17 |
+
link="https://nicedata.eu.org/"
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
st.title("🔥 DocQA")
|
| 24 |
+
st.markdown(
|
| 25 |
+
"""
|
| 26 |
+
Currently, enhancing the instructional compliance of Large Language Models (LLMs) largely relies on high-quality instruction-response pairs. However, existing methods for constructing Supervised Fine-Tuning (SFT) data have several shortcomings: 1) reliance on a single sample source, which typically involves unstructured and unsupervised texts, neglecting the more prevalent semi-structured data, leading to excessively high training costs; 2) issues with data quality, such as QA questions lacking focus, short and vague responses, and severe hallucinations in the answers.
|
| 27 |
+
To tackle these challenges, this paper proposes a scalable solution.It involves training LLMs
|
| 28 |
+
To overcome these limitations, we introduce the Doc2QA framework for QA data construction, aimed at efficiently leveraging the diverse and widely available semi-structured data (including Html, Doc, PDFs, etc.) to generate QA pairs. This approach reduces the high preparation costs and inaccuracies caused by data hallucinations, such as short, unfocused, and hallucinatory QA pairs.
|
| 29 |
+
Experiments demonstrate that our method surpasses classical QA construction techniques in multiple test datasets and benchmarks, especially on AlpacaEval, where the Doc2QA model, using only xx\% of the training data, achieved an xx\% performance improvement. Additionally, manual review confirmed the exceptional quality of the generated dataset, significantly enhancing the practical value of SFT data and setting a new benchmark for the precision of LLM's instructional compliance. This represents a significant step towards more efficient and accurate LLM training.
|
| 30 |
+
"""
|
| 31 |
+
)
|
| 32 |
+
st.info("Click on the left sidebar menu to navigate to the different apps.")
|
| 33 |
+
|
| 34 |
+
# Border Demo
|
| 35 |
+
st.markdown('<a name="customizable-border"></a>', unsafe_allow_html=True)
|
| 36 |
+
st.header("🔲 Detail", divider="rainbow")
|
| 37 |
+
st.markdown("You can now customize `st.container` and `st.form` by adding or removing the border")
|
| 38 |
+
|
| 39 |
+
# Initialize Faker to generate fake data
|
| 40 |
+
fake = Faker()
|
| 41 |
+
|
| 42 |
+
def random_date(start, end):
|
| 43 |
+
return start + (end - start) * random.random()
|
| 44 |
+
|
| 45 |
+
def create_sample_data(num_rows=10):
|
| 46 |
+
data = {
|
| 47 |
+
"Customer Name": [fake.name() for _ in range(num_rows)],
|
| 48 |
+
"Product": [random.choice(["Laptop", "Smartphone", "Tablet", "Headphones", "Charger"]) for _ in range(num_rows)],
|
| 49 |
+
"Quantity": [random.randint(1, 5) for _ in range(num_rows)],
|
| 50 |
+
"Order Date": [random_date(datetime(2021, 1, 1), datetime(2023, 1, 1)).strftime("%Y-%m-%d") for _ in range(num_rows)]
|
| 51 |
+
}
|
| 52 |
+
return pd.DataFrame(data)
|
| 53 |
+
df = create_sample_data(10)
|
| 54 |
+
|
| 55 |
+
after, before = st.columns(2)
|
| 56 |
+
|
| 57 |
+
with after:
|
| 58 |
+
st.subheader("New customizable borders")
|
| 59 |
+
st.info("⬇️ :red[st.container] can now be configured to have a border")
|
| 60 |
+
|
| 61 |
+
with st.container(border=True):
|
| 62 |
+
# st.info("This text and table are inside a container with a border")
|
| 63 |
+
st.dataframe(data=df, use_container_width=True)
|
| 64 |
+
|
| 65 |
+
st.code(
|
| 66 |
+
"""
|
| 67 |
+
with st.container(border=True):
|
| 68 |
+
st.dataframe(data=df, use_container_width=True)
|
| 69 |
+
"""
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
st.info("⬇️ :red[st.form] can now be configured to appear without a border")
|
| 73 |
+
|
| 74 |
+
with st.form(key="my_form_2", border=False):
|
| 75 |
+
st.dataframe(data=df, use_container_width=True)
|
| 76 |
+
st.form_submit_button(label="Submit")
|
| 77 |
+
|
| 78 |
+
st.code(
|
| 79 |
+
"""
|
| 80 |
+
with st.form(key="my_form_2", border=False):
|
| 81 |
+
st.dataframe(data=df, use_container_width=True)
|
| 82 |
+
st.form_submit_button(label="Submit")
|
| 83 |
+
"""
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
with before:
|
| 87 |
+
st.subheader("Old non-customizable borders")
|
| 88 |
+
st.info("⬇️ :red[st.container] does not have a border")
|
| 89 |
+
with st.container():
|
| 90 |
+
st.dataframe(data=df, use_container_width=True)
|
| 91 |
+
|
| 92 |
+
# Instead of st.empty(), use a markdown with empty space
|
| 93 |
+
st.markdown('<div style="height: 31px;"></div>', unsafe_allow_html=True)
|
| 94 |
+
|
| 95 |
+
st.code(
|
| 96 |
+
"""
|
| 97 |
+
with st.container():
|
| 98 |
+
st.dataframe(data=df, use_container_width=True)
|
| 99 |
+
"""
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
st.info("⬇️ :red[st.form] always has a border")
|
| 103 |
+
with st.form(key="my_form_1"):
|
| 104 |
+
st.dataframe(data=df, use_container_width=True)
|
| 105 |
+
st.form_submit_button(label="Submit")
|
| 106 |
+
|
| 107 |
+
st.code(
|
| 108 |
+
"""
|
| 109 |
+
with st.form(key="my_form"):
|
| 110 |
+
st.info("This text and table are inside a form with a border")
|
| 111 |
+
st.dataframe(data=df, use_container_width=True)
|
| 112 |
+
st.form_submit_button(label="Submit")
|
| 113 |
+
"""
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
## -------------------------------------------------------------------- ##
|
| 117 |
+
with st.expander("Here are some details about this training process."):
|
| 118 |
+
st.markdown(
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
```shell
|
| 122 |
+
bf16: true
|
| 123 |
+
cutoff_len: 1024
|
| 124 |
+
dataset: dict_word_v4,dict_sentence_v4
|
| 125 |
+
dataset_dir: data
|
| 126 |
+
ddp_timeout: 180000000
|
| 127 |
+
do_train: true
|
| 128 |
+
finetuning_type: lora
|
| 129 |
+
flash_attn: auto
|
| 130 |
+
gradient_accumulation_steps: 8
|
| 131 |
+
include_num_input_tokens_seen: true
|
| 132 |
+
learning_rate: 0.0001
|
| 133 |
+
logging_steps: 5
|
| 134 |
+
lora_alpha: 16
|
| 135 |
+
lora_dropout: 0.1
|
| 136 |
+
lora_rank: 8
|
| 137 |
+
lora_target: all
|
| 138 |
+
lr_scheduler_type: cosine
|
| 139 |
+
max_grad_norm: 1.0
|
| 140 |
+
max_samples: 100000
|
| 141 |
+
model_name_or_path: /wsh/models/Meta-Llama-3-8B-Instruct
|
| 142 |
+
num_train_epochs: 40.0
|
| 143 |
+
optim: adamw_torch
|
| 144 |
+
output_dir: saves/Custom/lora/train_2024-09-15-17-54-11-v4-learn_rate_0001
|
| 145 |
+
packing: false
|
| 146 |
+
per_device_train_batch_size: 2
|
| 147 |
+
plot_loss: true
|
| 148 |
+
preprocessing_num_workers: 16
|
| 149 |
+
report_to: none
|
| 150 |
+
save_steps: 100
|
| 151 |
+
stage: sft
|
| 152 |
+
warmup_steps: 0
|
| 153 |
+
```
|
| 154 |
+
"""
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
tab1, tab2 = st.tabs(
|
| 158 |
+
[
|
| 159 |
+
"Multi-Dimensional Scatter Analysis",
|
| 160 |
+
"Scatter Basics"
|
| 161 |
+
]
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
with tab1:
|
| 165 |
+
st.subheader("Dynamic Scatter Chart", anchor=False)
|
| 166 |
+
st.caption("Choose the dimension for the x-axis, y-axis, color, and size to explore average house price, average rent, geographic region, and median income in the United States.")
|
| 167 |
+
|
| 168 |
+
st.divider()
|
| 169 |
+
st.code(
|
| 170 |
+
"""
|
| 171 |
+
import streamlit as st
|
| 172 |
+
import pandas as pd
|
| 173 |
+
|
| 174 |
+
@st.cache_data
|
| 175 |
+
def load_data():
|
| 176 |
+
df = pd.read_csv('1.27/pages/data_simplified.csv')
|
| 177 |
+
df['Average House Price'] = df['Average House Price'].str.replace('$', '').str.replace(',', '').astype(int)
|
| 178 |
+
df['Median Income'] = df['Median Income'].str.replace('$', '').str.replace(',', '').astype(int)
|
| 179 |
+
|
| 180 |
+
sorted_regions = df.groupby('Region in the US')['Average House Price'].mean().sort_values().index.tolist()
|
| 181 |
+
df['Region in the US'] = pd.Categorical(df['Region in the US'], categories=sorted_regions, ordered=True)
|
| 182 |
+
df = df.sort_values('Region in the US')
|
| 183 |
+
|
| 184 |
+
# Create income buckets
|
| 185 |
+
income_bins = [0, 50000, 100000, 150000, 200000, float('inf')]
|
| 186 |
+
income_labels = ['<50k', '50k-100k', '100k-150k', '150k-200k', '200k+']
|
| 187 |
+
df['Income Bucket'] = pd.cut(df['Median Income'], bins=income_bins, labels=income_labels, right=False)
|
| 188 |
+
|
| 189 |
+
df['Income Bucket'] = pd.Categorical(df['Income Bucket'], categories=income_labels, ordered=True)
|
| 190 |
+
df = df.sort_values('Income Bucket')
|
| 191 |
+
|
| 192 |
+
return df
|
| 193 |
+
|
| 194 |
+
df = load_data()
|
| 195 |
+
|
| 196 |
+
col1, col2, col3, col4 = st.columns(4)
|
| 197 |
+
|
| 198 |
+
x_axis = col1.selectbox('X-axis:', df.columns, index=1, disabled=True)
|
| 199 |
+
y_axis = col2.selectbox('Y-axis:', df.columns, index=0)
|
| 200 |
+
color_dim = col3.selectbox('Color:', df.columns, index=3)
|
| 201 |
+
size_dim = col4.selectbox('Size:', df.columns, index=2)
|
| 202 |
+
|
| 203 |
+
st.scatter_chart(
|
| 204 |
+
df,
|
| 205 |
+
x=x_axis,
|
| 206 |
+
y=y_axis,
|
| 207 |
+
color=color_dim,
|
| 208 |
+
size=size_dim,
|
| 209 |
+
height=600,
|
| 210 |
+
use_container_width=True
|
| 211 |
+
)
|
| 212 |
+
"""
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
with tab2:
|
| 216 |
+
st.subheader("Simple Scatter Chart", anchor=False)
|
| 217 |
+
st.caption("The chart shows some positive correlation between Average Rent and Average House Price")
|
| 218 |
+
|
| 219 |
+
st.divider()
|
| 220 |
+
st.code(
|
| 221 |
+
"""
|
| 222 |
+
import streamlit as st
|
| 223 |
+
import pandas as pd
|
| 224 |
+
|
| 225 |
+
@st.cache_data
|
| 226 |
+
def load_data():
|
| 227 |
+
df = pd.read_csv('1.27/pages/data_simplified.csv')
|
| 228 |
+
df['Average House Price'] = df['Average House Price'].str.replace('$', '').str.replace(',', '').astype(int)
|
| 229 |
+
df['Median Income'] = df['Median Income'].str.replace('$', '').str.replace(',', '').astype(int)
|
| 230 |
+
|
| 231 |
+
sorted_regions = df.groupby('Region in the US')['Average House Price'].mean().sort_values().index.tolist()
|
| 232 |
+
df['Region in the US'] = pd.Categorical(df['Region in the US'], categories=sorted_regions, ordered=True)
|
| 233 |
+
df = df.sort_values('Region in the US')
|
| 234 |
+
|
| 235 |
+
# Create income buckets
|
| 236 |
+
income_bins = [0, 50000, 100000, 150000, 200000, float('inf')]
|
| 237 |
+
income_labels = ['<50k', '50k-100k', '100k-150k', '150k-200k', '200k+']
|
| 238 |
+
df['Income Bucket'] = pd.cut(df['Median Income'], bins=income_bins, labels=income_labels, right=False)
|
| 239 |
+
|
| 240 |
+
df['Income Bucket'] = pd.Categorical(df['Income Bucket'], categories=income_labels, ordered=True)
|
| 241 |
+
df = df.sort_values('Income Bucket')
|
| 242 |
+
|
| 243 |
+
return df
|
| 244 |
+
|
| 245 |
+
df = load_data()
|
| 246 |
+
|
| 247 |
+
st.scatter_chart(
|
| 248 |
+
df,
|
| 249 |
+
x='Average Rent',
|
| 250 |
+
y='Average House Price',
|
| 251 |
+
height=600,
|
| 252 |
+
use_container_width=True
|
| 253 |
+
)
|
| 254 |
+
"""
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
# ============================================
|
| 259 |
+
|
| 260 |
+
# ============================================s
|
pages/4_🌺_Cherokee_Model_with_SFT_Demo.py
ADDED
|
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
# import streamlit.components.v1 as components
|
| 3 |
+
# components.iframe("https://cherokee.nicedata.eu.org/", height=500)
|
| 4 |
+
import streamlit.components.v1 as components
|
| 5 |
+
import base64
|
| 6 |
+
# from faker import Faker
|
| 7 |
+
import random
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
import pandas as pd
|
| 10 |
+
import requests
|
| 11 |
+
import time
|
| 12 |
+
|
| 13 |
+
st.set_page_config(layout="wide")
|
| 14 |
+
|
| 15 |
+
# LOGO_URL_LARGE="./static/lora.png"
|
| 16 |
+
st.logo(
|
| 17 |
+
"./static/logo1.png",
|
| 18 |
+
link="https://nicedata.eu.org/"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
with st.sidebar:
|
| 23 |
+
st.title('💬 Cherokee Model')
|
| 24 |
+
st.write('This chatbot is created using the open-source Llama 3 LLM model from Meta.')
|
| 25 |
+
|
| 26 |
+
st.markdown('📖 Learn how to build this app in this [blog](https://nicedata.eu.org/Cherokee)!')
|
| 27 |
+
|
| 28 |
+
st.info(
|
| 29 |
+
"""
|
| 30 |
+
- Email: [sh.wang4067@gmail.com](mailto:sh.wang4067@gmail.com)
|
| 31 |
+
- Tel: +86 181-1615-2720
|
| 32 |
+
- Homepage: [nicedata.eu.org](https://nicedata.eu.org)
|
| 33 |
+
- Github: [wdzhwsh4076](https://github.com/wdzhwsh4076)
|
| 34 |
+
- Address: Boda Campus, Xinjiang University, Urumqi City, China
|
| 35 |
+
"""
|
| 36 |
+
)
|
| 37 |
+
st.markdown(
|
| 38 |
+
"""
|
| 39 |
+
### Link
|
| 40 |
+
|
| 41 |
+
[1. cherokee dictionary](https://www.cherokeedictionary.net/)
|
| 42 |
+
|
| 43 |
+
[2. cherokee 500 word](https://www.cherokeedictionary.net/first500)
|
| 44 |
+
"""
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
st.title("Cherokee Language Model with SFT")
|
| 48 |
+
# st.markdown(
|
| 49 |
+
# """
|
| 50 |
+
# I am excited to present the latest language model, which has been fine-tuned using the state-of-the-art LoRA (Low-Rank Adaptation) technique on the robust foundation of the LLaMA3-8B model.
|
| 51 |
+
# This is an open-source project and you are very welcome to contribute your comments, questions, resources, and apps as [issues](https://github.com/giswqs/streamlit-geospatial/issues) or
|
| 52 |
+
# [pull requests](https://github.com/giswqs/streamlit-geospatial/pulls) to the [GitHub repository](https://github.com/giswqs/streamlit-geospatial).
|
| 53 |
+
|
| 54 |
+
# """
|
| 55 |
+
# )
|
| 56 |
+
st.info("Click on the left sidebar menu to navigate to the different apps.")
|
| 57 |
+
|
| 58 |
+
text="""
|
| 59 |
+
<script type="module" crossorigin src="https://gradio.s3-us-west-2.amazonaws.com/4.41.0/gradio.js"></script>
|
| 60 |
+
<gradio-app src="https://cherokee.nicedata.eu.org" eager="true" initial_height="500px"></gradio-app>
|
| 61 |
+
"""
|
| 62 |
+
components.html(text, width=700, height=1150, scrolling=True)
|
| 63 |
+
|
| 64 |
+
# perfomance
|
| 65 |
+
st.markdown('<a name="new-app-loading-animation"></a>', unsafe_allow_html=True)
|
| 66 |
+
st.header("⏳ Performance", divider="rainbow")
|
| 67 |
+
st.markdown("This model has demonstrated exceptional performance in Cherokee language translation tasks, surpassing mainstream models such as LLaMA3-8B, LLaMA3.1-8B, and PHI3. It has achieved state-of-the-art (SOTA) results without the common issue of catastrophic forgetting. Here are some figures about this model. ")
|
| 68 |
+
|
| 69 |
+
# col1 = st.columns(3)
|
| 70 |
+
# col1, col2, col3 = st.columns(3)
|
| 71 |
+
# col1.metric("Rouge-l", "78.2", "76.1")
|
| 72 |
+
# # col2.metric("Wind", "9 mph", "-8%")
|
| 73 |
+
# # col3.metric("Humidity", "86%", "4%")
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def get_file_url(path):
|
| 77 |
+
file_ = open(path, "rb")
|
| 78 |
+
contents = file_.read()
|
| 79 |
+
data_url = base64.b64encode(contents).decode("utf-8")
|
| 80 |
+
file_.close()
|
| 81 |
+
return data_url
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
gif1, gif2 = st.columns(2)
|
| 86 |
+
old_skeleton_url = get_file_url("./static/perfomance2.png")
|
| 87 |
+
new_skeleton_url = get_file_url("./static/perfomance1.png")
|
| 88 |
+
with gif1:
|
| 89 |
+
# st.subheader("Detail")
|
| 90 |
+
|
| 91 |
+
st.markdown(
|
| 92 |
+
f'<img src="data:image/gif;base64,{old_skeleton_url}" width=450 alt="demo gif">',
|
| 93 |
+
unsafe_allow_html=True,
|
| 94 |
+
)
|
| 95 |
+
st.caption("Fig 1: Loss in the process of training")
|
| 96 |
+
|
| 97 |
+
with gif2:
|
| 98 |
+
# st.subheader("Detail")
|
| 99 |
+
|
| 100 |
+
st.markdown(
|
| 101 |
+
f'<img src="data:image/gif;base64,{new_skeleton_url}" width=450 alt="demo gif">',
|
| 102 |
+
unsafe_allow_html=True,
|
| 103 |
+
)
|
| 104 |
+
st.caption("Fig 2: Six Key Performance Indicators")
|
| 105 |
+
|
| 106 |
+
tab_url = get_file_url("./static/perfomance0.png")
|
| 107 |
+
# st.subheader("Detail")
|
| 108 |
+
st.markdown(
|
| 109 |
+
f'<img src="data:image/gif;base64,{tab_url}" width=700 alt="demo gif">',
|
| 110 |
+
unsafe_allow_html=True,
|
| 111 |
+
)
|
| 112 |
+
st.caption("Tab 1: Data about Six Key Performance Indicators in five case ")
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
st.markdown("""
|
| 116 |
+
Here are part data about evaluation processtion.
|
| 117 |
+
""")
|
| 118 |
+
df = pd.DataFrame(
|
| 119 |
+
{
|
| 120 |
+
"prompt": [
|
| 121 |
+
"translate: ᎧᏃᎮᏍᎩ",
|
| 122 |
+
"translate: ᏧᎩᏨᏅᏓ",
|
| 123 |
+
"translate: ᎤᏓᏅᎦᎸᏓ",
|
| 124 |
+
"translate: ᎤᏲᎢ",
|
| 125 |
+
"translate: ᏦᏪᏅᏒ ᏗᎦᏎᏍᏙᏗ ᏗᎪᏪᎵ",
|
| 126 |
+
"translate: ᎠᎵᏍᏕᎸᏙᏗᏱ",
|
| 127 |
+
"translate: ᎧᏁᎢᏍᏙᏗ",
|
| 128 |
+
"translate: ᎠᏲᏓᏅ",
|
| 129 |
+
"translate: ᎩᎵᏏ",
|
| 130 |
+
"translate: ᎤᎬᏫᏳ ᎠᏂᏁᎳ",
|
| 131 |
+
"translate: ᎠᏧᎲᏍᎦ",
|
| 132 |
+
"translate: ᎣᏏ ᎠᏰᎸᏅ",
|
| 133 |
+
"translate: ᎠᏍᏓᏱᏓᎩᏍᏙᏗ",
|
| 134 |
+
"translate: ᏦᎳᏂ",
|
| 135 |
+
"translate: ᏥᏔᎦ",
|
| 136 |
+
"translate: ᎤᎶᏘᎭ",
|
| 137 |
+
"translate: ᎤᏓᏅᏖᏗ",
|
| 138 |
+
"translate: ᎤᏓᏩᏛᎯᏙᎸᎩ",
|
| 139 |
+
"translate: ᏗᎵᎪᎲᏍᎩ",
|
| 140 |
+
"translate: ᏗᏂᏃᎨᏂ",
|
| 141 |
+
"translate: ᏧᎾᏓᎸᏉᏗ ᎢᎦ",
|
| 142 |
+
"translate: ᏃᏈᏏ ᏗᎾᎦᏎᏍᏗᏍᎩ",
|
| 143 |
+
"translate: ᎠᏋᏌ",
|
| 144 |
+
"translate: ᎠᏎᎯᎯ ᎠᏍᏆᏂᎪᏗᏓᏅ ᏗᎫᎪᏔᏂᏙᎸᎯ",
|
| 145 |
+
"translate: ᏗᏙᎳᎩ",
|
| 146 |
+
"translate: ᎧᎾᏍᏕᏢᏗ",
|
| 147 |
+
"translate: ᎦᏃᏍᎩᏍᏗ",
|
| 148 |
+
"translate: ᎬᏂᎦᎵᏍᏗ",
|
| 149 |
+
"translate: ᎦᎸᏙᏗ ᎪᏪᎳᎾᎥᎢ"
|
| 150 |
+
],
|
| 151 |
+
"label": [
|
| 152 |
+
"indicator",
|
| 153 |
+
"daily",
|
| 154 |
+
"pure",
|
| 155 |
+
"bad",
|
| 156 |
+
"homework",
|
| 157 |
+
"relieve",
|
| 158 |
+
"insinuate",
|
| 159 |
+
"grade",
|
| 160 |
+
"english",
|
| 161 |
+
"queen",
|
| 162 |
+
"He is fishing",
|
| 163 |
+
"satisfied",
|
| 164 |
+
"wrench",
|
| 165 |
+
"window",
|
| 166 |
+
"chicken",
|
| 167 |
+
"He is hatting it",
|
| 168 |
+
"worries",
|
| 169 |
+
"visited",
|
| 170 |
+
"interactive",
|
| 171 |
+
"arms",
|
| 172 |
+
"valentine's day",
|
| 173 |
+
"astronomers",
|
| 174 |
+
"self",
|
| 175 |
+
"metered data plan",
|
| 176 |
+
"practical",
|
| 177 |
+
"pick",
|
| 178 |
+
"larceny",
|
| 179 |
+
"jolly",
|
| 180 |
+
"context menu"
|
| 181 |
+
],
|
| 182 |
+
"predict": [
|
| 183 |
+
"indicator",
|
| 184 |
+
"daily",
|
| 185 |
+
"pure",
|
| 186 |
+
"bad",
|
| 187 |
+
"homework",
|
| 188 |
+
"relieve",
|
| 189 |
+
"insinuate",
|
| 190 |
+
"grade",
|
| 191 |
+
"english",
|
| 192 |
+
"queen",
|
| 193 |
+
"He is fishing",
|
| 194 |
+
"satisfied",
|
| 195 |
+
"wrench",
|
| 196 |
+
"window",
|
| 197 |
+
"chicken",
|
| 198 |
+
"He is hatting it",
|
| 199 |
+
"worries",
|
| 200 |
+
"visited",
|
| 201 |
+
"interactive",
|
| 202 |
+
"arms",
|
| 203 |
+
"valentine's day",
|
| 204 |
+
"astronomers",
|
| 205 |
+
"self",
|
| 206 |
+
"metered data plan",
|
| 207 |
+
"practical",
|
| 208 |
+
"pick",
|
| 209 |
+
"larceny",
|
| 210 |
+
"jolly",
|
| 211 |
+
"context menu"
|
| 212 |
+
]
|
| 213 |
+
}
|
| 214 |
+
)
|
| 215 |
+
st.dataframe(
|
| 216 |
+
df,
|
| 217 |
+
column_config={
|
| 218 |
+
"prompt": st.column_config.LinkColumn("prompt"),
|
| 219 |
+
"predict": st.column_config.LinkColumn("predict"),
|
| 220 |
+
"label": st.column_config.LinkColumn("label"),
|
| 221 |
+
},
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
with st.expander("Here are more details about performance."):
|
| 225 |
+
df = pd.DataFrame(
|
| 226 |
+
{
|
| 227 |
+
"name": ["Roadmap", "Extras", "Issues"],
|
| 228 |
+
"url": ["https://roadmap.streamlit.app", "https://extras.streamlit.app", "https://issues.streamlit.app"],
|
| 229 |
+
"stars": [random.randint(0, 1000) for _ in range(3)],
|
| 230 |
+
"views_history": [[random.randint(0, 5000) for _ in range(30)] for _ in range(3)],
|
| 231 |
+
}
|
| 232 |
+
)
|
| 233 |
+
st.dataframe(
|
| 234 |
+
df,
|
| 235 |
+
column_config={
|
| 236 |
+
"name": "App name",
|
| 237 |
+
"stars": st.column_config.NumberColumn(
|
| 238 |
+
"Github Stars",
|
| 239 |
+
help="Number of stars on GitHub",
|
| 240 |
+
format="%d ⭐",
|
| 241 |
+
),
|
| 242 |
+
"url": st.column_config.LinkColumn("App URL"),
|
| 243 |
+
"views_history": st.column_config.LineChartColumn(
|
| 244 |
+
"Views (past 30 days)", y_min=0, y_max=5000
|
| 245 |
+
),
|
| 246 |
+
},
|
| 247 |
+
hide_index=True,
|
| 248 |
+
)
|
| 249 |
+
st.markdown("""
|
| 250 |
+
Here are some details about performance.
|
| 251 |
+
```shell
|
| 252 |
+
{
|
| 253 |
+
"predict_bleu-4": 96.79794598214286,
|
| 254 |
+
"predict_rouge-1": 98.21964419642859,
|
| 255 |
+
"predict_rouge-2": 97.57667857142857,
|
| 256 |
+
"predict_rouge-l": 98.36520848214286,
|
| 257 |
+
"predict_runtime": 93.1528,
|
| 258 |
+
"predict_samples_per_second": 2.147,
|
| 259 |
+
"predict_steps_per_second": 0.075
|
| 260 |
+
}
|
| 261 |
+
```
|
| 262 |
+
""")
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
# st.divider()
|
| 266 |
+
|
| 267 |
+
## -------------------------------------------------------------------- ##
|
| 268 |
+
|
| 269 |
+
# dataset
|
| 270 |
+
st.markdown('<a name="customizable-border"></a>', unsafe_allow_html=True)
|
| 271 |
+
st.header("🔲 Datasets", divider="rainbow")
|
| 272 |
+
st.markdown("Trained on **two datasets** build by myself to ensure its proficiency in **Cherokee-Englishtranslation**.")
|
| 273 |
+
|
| 274 |
+
st.markdown("""
|
| 275 |
+
#### Cherokee-English Word Dataset (10.2k)
|
| 276 |
+
|
| 277 |
+
This dataset focuses on vocabulary, ensuring that our model has a comprehensive grasp of Cherokee words and their English counterparts.
|
| 278 |
+
""")
|
| 279 |
+
text="""
|
| 280 |
+
<iframe
|
| 281 |
+
src="https://huggingface.co/datasets/wang4067/cherokee-english-word-10.2k/embed/viewer/default/train"
|
| 282 |
+
frameborder="0"
|
| 283 |
+
width="100%"
|
| 284 |
+
height="560px"
|
| 285 |
+
></iframe>
|
| 286 |
+
"""
|
| 287 |
+
components.html(text,width=700, height=560, scrolling=False)
|
| 288 |
+
|
| 289 |
+
st.markdown("""
|
| 290 |
+
#### Cherokee-English Bible Sentence Dataset (7.96k)
|
| 291 |
+
|
| 292 |
+
This dataset provides a rich source of bilingual text, enabling our model to understand and reproduce the nuances of the Cherokee language within a religious context.
|
| 293 |
+
""")
|
| 294 |
+
text="""
|
| 295 |
+
<iframe
|
| 296 |
+
src="https://huggingface.co/datasets/wang4067/cherokee-english-bible-7.96k/embed/viewer/default/train"
|
| 297 |
+
frameborder="0"
|
| 298 |
+
width="100%"
|
| 299 |
+
height="560px"
|
| 300 |
+
></iframe>
|
| 301 |
+
"""
|
| 302 |
+
components.html(text,width=700, height=560, scrolling=False)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
# App skeleton Demo
|
| 313 |
+
st.markdown('<a name="new-app-loading-animation"></a>', unsafe_allow_html=True)
|
| 314 |
+
st.header("⏳ Method", divider="rainbow")
|
| 315 |
+
st.markdown("""
|
| 316 |
+
|
| 317 |
+
#### LoRa (Low-Rank Adaptation)
|
| 318 |
+
|
| 319 |
+
LoRA reduces the number of trainable parameters by learning pairs of rank-decompostion matrices while freezing the original weights. This vastly reduces the storage requirement for large language models adapted to specific tasks and enables efficient task-switching during deployment all without introducing inference latency. LoRA also outperforms several other adaptation methods including adapter, prefix-tuning, and fine-tuning.
|
| 320 |
+
|
| 321 |
+
""")
|
| 322 |
+
|
| 323 |
+
st.markdown("""
|
| 324 |
+
In this paper, adopt a more parameter-efficient approach, where the task-specific parameter increment $\Delta\Phi = \Delta\Phi(\Theta)$ is further encoded by a much smaller-sized set of parameters $\Theta$ with $|\Theta| \ll |\Phi_0|$.
|
| 325 |
+
The task of finding $\Delta\Phi$ thus becomes optimizing over $\Theta$:
|
| 326 |
+
""")
|
| 327 |
+
|
| 328 |
+
st.latex(r'''
|
| 329 |
+
\begin{align}
|
| 330 |
+
\max_{\Theta} \sum_{(x,y)\in Z} \sum_{t=1}^{|y|} \log\left({p_{\Phi_0+\Delta\Phi(\Theta)}(y_{t} | x, y_{<t})}\right)
|
| 331 |
+
\end{align}
|
| 332 |
+
''')
|
| 333 |
+
|
| 334 |
+
st.markdown("""
|
| 335 |
+
#### RAG (Retrieval-Augmented Generation)
|
| 336 |
+
|
| 337 |
+
Retrieval-Augmented Generation (RAG) is the process of optimizing the output of a large language model, so it references an authoritative knowledge base outside of its training data sources before generating a response. Large Language Models (LLMs) are trained on vast volumes of data and use billions of parameters to generate original output for tasks like answering questions, translating languages, and completing sentences. RAG extends the already powerful capabilities of LLMs to specific domains or an organization's internal knowledge base, all without the need to retrain the model. It is a cost-effective approach to improving LLM output so it remains relevant, accurate, and useful in various contexts.
|
| 338 |
+
""")
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def get_file_url(path):
|
| 342 |
+
file_ = open(path, "rb")
|
| 343 |
+
contents = file_.read()
|
| 344 |
+
data_url = base64.b64encode(contents).decode("utf-8")
|
| 345 |
+
file_.close()
|
| 346 |
+
return data_url
|
| 347 |
+
|
| 348 |
+
old_skeleton_url = get_file_url("./static/lora.png")
|
| 349 |
+
new_skeleton_url = get_file_url("./static/rag.png")
|
| 350 |
+
|
| 351 |
+
gif1, gif2 = st.columns(2)
|
| 352 |
+
with gif1:
|
| 353 |
+
# st.subheader("detail")
|
| 354 |
+
|
| 355 |
+
st.markdown(
|
| 356 |
+
f'<img src="data:image/gif;base64,{old_skeleton_url}" width=450 alt="demo gif">',
|
| 357 |
+
unsafe_allow_html=True,
|
| 358 |
+
)
|
| 359 |
+
st.caption("Fig 3: As illustrated above, the decomposition of ΔW means that we represent the large matrix ΔW with two smaller LoRA matrices, A and B. If A has the same number of rows as ΔW and B has the same number of columns as ΔW, we can write the decomposition as ΔW = AB. (AB is the matrix multiplication result between matrices A and B.) ")
|
| 360 |
+
|
| 361 |
+
with gif2:
|
| 362 |
+
# st.subheader("detail")
|
| 363 |
+
|
| 364 |
+
st.markdown(
|
| 365 |
+
f'<img src="data:image/gif;base64,{new_skeleton_url}" width=450 alt="demo gif">',
|
| 366 |
+
unsafe_allow_html=True,
|
| 367 |
+
)
|
| 368 |
+
st.caption("""
|
| 369 |
+
Fig 4: RAG extends the power of LLMs by accessing relevant proprietary data without retraining. When using RAG with Elastic, you benefit from:
|
| 370 |
+
Cutting-edge search techniques
|
| 371 |
+
Easy model selection and the ability to swap models effortlessly
|
| 372 |
+
Secure document and role-based access to ensure your data stays protected
|
| 373 |
+
""")
|
| 374 |
+
|
| 375 |
+
st.divider()
|
| 376 |
+
|
| 377 |
+
## -------------------------------------------------------------------- ##
|
| 378 |
+
|
| 379 |
+
# Border Demo
|
| 380 |
+
st.markdown('<a name="customizable-border"></a>', unsafe_allow_html=True)
|
| 381 |
+
st.header("🔲 Future Work ", divider="rainbow")
|
| 382 |
+
st.markdown("""
|
| 383 |
+
### 1. Data Work
|
| 384 |
+
|
| 385 |
+
**✅ Approach 1:** Inherit training data from the lab or colleagues without verifying the data quality before training.
|
| 386 |
+
|
| 387 |
+
**✅ Approach 2:** Download open-source data to construct a "system + query + answer" dataset.
|
| 388 |
+
|
| 389 |
+
**✅ Approach 3:** Utilize GPT-4 to generate data, mastering the prompts that GPT-4 prefers. Recognize the importance of prompt diversity and explore various methods to expand the diversity of tasks and expressions in prompts. Deliberately include noisy prompts to enhance noise resistance. Be meticulous in checking data quality and align annotation standards with colleagues.
|
| 390 |
+
|
| 391 |
+
**❓ Approach 4:** Drive the data construction process with user interaction logs, collecting real user prompts, and using rules or GPT-4 to analyze user feedback to obtain high-quality answer data.
|
| 392 |
+
|
| 393 |
+
**❓ Approach 5:** Draw inspiration from concepts like chain-of-thought, retrieval-augmented generation, function call, and agent-based approaches to break down complex tasks at the data level. For example, if the model can't write a long novel, then "the model writes an outline for the novel, and then the model writes the long novel based on the outline."
|
| 394 |
+
|
| 395 |
+
### 2. Training Work
|
| 396 |
+
|
| 397 |
+
**✅ Approach 1:** Inherit training code from the lab or colleagues, modify the data path, and run the training script.
|
| 398 |
+
|
| 399 |
+
**❓ Approach 2:** Inherit or download training code, study every parameter of the launch code, and understand why offloading is enabled, what sequence parallelism means, etc. Then, examine how the dataloader handles data formats and whether the session data loss is calculated only in the last round or in every round. Investigate which special tokens are used in the code.
|
| 400 |
+
|
| 401 |
+
**❓ Approach 3:** Not only understand each parameter but also form your own insights: Is an epoch of 3 too many? Is 100,000 training data entries appropriate? Are there too many special tokens? Is the learning rate too high for a 7B model, and how many steps should be used for warm-up, or can warm-up be omitted? With these questions in mind, consult with ChatGPT or read articles from experts to gain further insights.
|
| 402 |
+
""")
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
## -------------------------------------------------------------------- ##
|
| 406 |
+
with st.expander("Here are some details about this training process."):
|
| 407 |
+
st.markdown(
|
| 408 |
+
"""
|
| 409 |
+
|
| 410 |
+
```shell
|
| 411 |
+
bf16: true
|
| 412 |
+
cutoff_len: 1024
|
| 413 |
+
dataset: dict_word_v4,dict_sentence_v4
|
| 414 |
+
dataset_dir: data
|
| 415 |
+
ddp_timeout: 180000000
|
| 416 |
+
do_train: true
|
| 417 |
+
finetuning_type: lora
|
| 418 |
+
flash_attn: auto
|
| 419 |
+
gradient_accumulation_steps: 8
|
| 420 |
+
include_num_input_tokens_seen: true
|
| 421 |
+
learning_rate: 0.0001
|
| 422 |
+
logging_steps: 5
|
| 423 |
+
lora_alpha: 16
|
| 424 |
+
lora_dropout: 0.1
|
| 425 |
+
lora_rank: 8
|
| 426 |
+
lora_target: all
|
| 427 |
+
lr_scheduler_type: cosine
|
| 428 |
+
max_grad_norm: 1.0
|
| 429 |
+
max_samples: 100000
|
| 430 |
+
model_name_or_path: /wsh/models/Meta-Llama-3-8B-Instruct
|
| 431 |
+
num_train_epochs: 40.0
|
| 432 |
+
optim: adamw_torch
|
| 433 |
+
output_dir: saves/Custom/lora/train_2024-09-15-17-54-11-v4-learn_rate_0001
|
| 434 |
+
packing: false
|
| 435 |
+
per_device_train_batch_size: 2
|
| 436 |
+
plot_loss: true
|
| 437 |
+
preprocessing_num_workers: 16
|
| 438 |
+
report_to: none
|
| 439 |
+
save_steps: 100
|
| 440 |
+
stage: sft
|
| 441 |
+
warmup_steps: 0
|
| 442 |
+
```
|
| 443 |
+
"""
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
# ============================================
|
| 448 |
+
|
| 449 |
+
# ============================================s
|
pages/5_🦁_Cherokee_Model_with_RAG_Demo.py
ADDED
|
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
# import streamlit.components.v1 as components
|
| 3 |
+
# components.iframe("https://cherokee.nicedata.eu.org/", height=500)
|
| 4 |
+
import streamlit.components.v1 as components
|
| 5 |
+
import base64
|
| 6 |
+
# from faker import Faker
|
| 7 |
+
import random
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
import pandas as pd
|
| 10 |
+
import requests
|
| 11 |
+
import time
|
| 12 |
+
st.set_page_config(layout="wide")
|
| 13 |
+
|
| 14 |
+
# LOGO_URL_LARGE="./static/lora.png"
|
| 15 |
+
st.logo(
|
| 16 |
+
"./static/logo1.png",
|
| 17 |
+
link="https://nicedata.eu.org/"
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
with st.sidebar:
|
| 22 |
+
st.title('💬 Cherokee Model')
|
| 23 |
+
st.write('This chatbot is created using the open-source Llama 3 LLM model from Meta.')
|
| 24 |
+
|
| 25 |
+
st.markdown('📖 Learn how to build this app in this [blog](https://nicedata.eu.org/Cherokee)!')
|
| 26 |
+
|
| 27 |
+
st.info(
|
| 28 |
+
"""
|
| 29 |
+
- Email: [sh.wang4067@gmail.com](mailto:sh.wang4067@gmail.com)
|
| 30 |
+
- Tel: +86 181-1615-2720
|
| 31 |
+
- Homepage: [nicedata.eu.org](https://nicedata.eu.org)
|
| 32 |
+
- Github: [wdzhwsh4076](https://github.com/wdzhwsh4076)
|
| 33 |
+
- Address: Boda Campus, Xinjiang University, Urumqi City, China
|
| 34 |
+
"""
|
| 35 |
+
)
|
| 36 |
+
st.markdown(
|
| 37 |
+
"""
|
| 38 |
+
### Link
|
| 39 |
+
|
| 40 |
+
[1. cherokee dictionary](https://www.cherokeedictionary.net/)
|
| 41 |
+
|
| 42 |
+
[2. cherokee 500 word](https://www.cherokeedictionary.net/first500)
|
| 43 |
+
"""
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
st.title("Cherokee Language Model with RAG")
|
| 47 |
+
# st.markdown(
|
| 48 |
+
# """
|
| 49 |
+
# I am excited to present the latest language model, which has been fine-tuned using the state-of-the-art LoRA (Low-Rank Adaptation) technique on the robust foundation of the LLaMA3-8B model.
|
| 50 |
+
# This is an open-source project and you are very welcome to contribute your comments, questions, resources, and apps as [issues](https://github.com/giswqs/streamlit-geospatial/issues) or
|
| 51 |
+
# [pull requests](https://github.com/giswqs/streamlit-geospatial/pulls) to the [GitHub repository](https://github.com/giswqs/streamlit-geospatial).
|
| 52 |
+
|
| 53 |
+
# """
|
| 54 |
+
# )s
|
| 55 |
+
st.info("Click on the left sidebar menu to navigate to the different apps.")
|
| 56 |
+
|
| 57 |
+
components.iframe("https://211.nicedata.eu.org/chatbot/Su0heMXH9oIXOd5C", height=700)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def get_file_url(path):
|
| 62 |
+
file_ = open(path, "rb")
|
| 63 |
+
contents = file_.read()
|
| 64 |
+
data_url = base64.b64encode(contents).decode("utf-8")
|
| 65 |
+
file_.close()
|
| 66 |
+
return data_url
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
## -------------------------------------------------------------------- ##
|
| 71 |
+
|
| 72 |
+
# dataset
|
| 73 |
+
st.markdown('<a name="customizable-border"></a>', unsafe_allow_html=True)
|
| 74 |
+
st.header("🔲 Datasets", divider="rainbow")
|
| 75 |
+
st.markdown("Trained on **two datasets** build by myself to ensure its proficiency in **Cherokee-Englishtranslation**.")
|
| 76 |
+
|
| 77 |
+
st.markdown("""
|
| 78 |
+
#### Cherokee-English Word Dataset (10.2k)
|
| 79 |
+
|
| 80 |
+
This dataset focuses on vocabulary, ensuring that our model has a comprehensive grasp of Cherokee words and their English counterparts.
|
| 81 |
+
""")
|
| 82 |
+
text="""
|
| 83 |
+
<iframe
|
| 84 |
+
src="https://huggingface.co/datasets/wang4067/cherokee-english-word-10.2k/embed/viewer/default/train"
|
| 85 |
+
frameborder="0"
|
| 86 |
+
width="100%"
|
| 87 |
+
height="560px"
|
| 88 |
+
></iframe>
|
| 89 |
+
"""
|
| 90 |
+
components.html(text,width=700, height=560, scrolling=False)
|
| 91 |
+
|
| 92 |
+
st.markdown("""
|
| 93 |
+
#### Cherokee-English Bible Sentence Dataset (7.96k)
|
| 94 |
+
|
| 95 |
+
This dataset provides a rich source of bilingual text, enabling our model to understand and reproduce the nuances of the Cherokee language within a religious context.
|
| 96 |
+
""")
|
| 97 |
+
text="""
|
| 98 |
+
<iframe
|
| 99 |
+
src="https://huggingface.co/datasets/wang4067/cherokee-english-bible-7.96k/embed/viewer/default/train"
|
| 100 |
+
frameborder="0"
|
| 101 |
+
width="100%"
|
| 102 |
+
height="560px"
|
| 103 |
+
></iframe>
|
| 104 |
+
"""
|
| 105 |
+
components.html(text,width=700, height=560, scrolling=False)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# App skeleton Demo
|
| 116 |
+
st.markdown('<a name="new-app-loading-animation"></a>', unsafe_allow_html=True)
|
| 117 |
+
st.header("⏳ Method", divider="rainbow")
|
| 118 |
+
st.markdown("""
|
| 119 |
+
|
| 120 |
+
#### LoRa (Low-Rank Adaptation)
|
| 121 |
+
|
| 122 |
+
LoRA reduces the number of trainable parameters by learning pairs of rank-decompostion matrices while freezing the original weights. This vastly reduces the storage requirement for large language models adapted to specific tasks and enables efficient task-switching during deployment all without introducing inference latency. LoRA also outperforms several other adaptation methods including adapter, prefix-tuning, and fine-tuning.
|
| 123 |
+
|
| 124 |
+
""")
|
| 125 |
+
|
| 126 |
+
st.markdown("""
|
| 127 |
+
In this paper, adopt a more parameter-efficient approach, where the task-specific parameter increment $\Delta\Phi = \Delta\Phi(\Theta)$ is further encoded by a much smaller-sized set of parameters $\Theta$ with $|\Theta| \ll |\Phi_0|$.
|
| 128 |
+
The task of finding $\Delta\Phi$ thus becomes optimizing over $\Theta$:
|
| 129 |
+
""")
|
| 130 |
+
|
| 131 |
+
st.latex(r'''
|
| 132 |
+
\begin{align}
|
| 133 |
+
\max_{\Theta} \sum_{(x,y)\in Z} \sum_{t=1}^{|y|} \log\left({p_{\Phi_0+\Delta\Phi(\Theta)}(y_{t} | x, y_{<t})}\right)
|
| 134 |
+
\end{align}
|
| 135 |
+
''')
|
| 136 |
+
|
| 137 |
+
st.markdown("""
|
| 138 |
+
#### RAG (Retrieval-Augmented Generation)
|
| 139 |
+
|
| 140 |
+
Retrieval-Augmented Generation (RAG) is the process of optimizing the output of a large language model, so it references an authoritative knowledge base outside of its training data sources before generating a response. Large Language Models (LLMs) are trained on vast volumes of data and use billions of parameters to generate original output for tasks like answering questions, translating languages, and completing sentences. RAG extends the already powerful capabilities of LLMs to specific domains or an organization's internal knowledge base, all without the need to retrain the model. It is a cost-effective approach to improving LLM output so it remains relevant, accurate, and useful in various contexts.
|
| 141 |
+
""")
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def get_file_url(path):
|
| 145 |
+
file_ = open(path, "rb")
|
| 146 |
+
contents = file_.read()
|
| 147 |
+
data_url = base64.b64encode(contents).decode("utf-8")
|
| 148 |
+
file_.close()
|
| 149 |
+
return data_url
|
| 150 |
+
|
| 151 |
+
old_skeleton_url = get_file_url("./static/lora.png")
|
| 152 |
+
new_skeleton_url = get_file_url("./static/rag.png")
|
| 153 |
+
|
| 154 |
+
gif1, gif2 = st.columns(2)
|
| 155 |
+
with gif1:
|
| 156 |
+
# st.subheader("detail")
|
| 157 |
+
|
| 158 |
+
st.markdown(
|
| 159 |
+
f'<img src="data:image/gif;base64,{old_skeleton_url}" width=450 alt="demo gif">',
|
| 160 |
+
unsafe_allow_html=True,
|
| 161 |
+
)
|
| 162 |
+
st.caption("Fig 3: As illustrated above, the decomposition of ΔW means that we represent the large matrix ΔW with two smaller LoRA matrices, A and B. If A has the same number of rows as ΔW and B has the same number of columns as ΔW, we can write the decomposition as ΔW = AB. (AB is the matrix multiplication result between matrices A and B.) ")
|
| 163 |
+
|
| 164 |
+
with gif2:
|
| 165 |
+
# st.subheader("detail")
|
| 166 |
+
|
| 167 |
+
st.markdown(
|
| 168 |
+
f'<img src="data:image/gif;base64,{new_skeleton_url}" width=450 alt="demo gif">',
|
| 169 |
+
unsafe_allow_html=True,
|
| 170 |
+
)
|
| 171 |
+
st.caption("""
|
| 172 |
+
Fig 4: RAG extends the power of LLMs by accessing relevant proprietary data without retraining. When using RAG with Elastic, you benefit from:
|
| 173 |
+
Cutting-edge search techniques
|
| 174 |
+
Easy model selection and the ability to swap models effortlessly
|
| 175 |
+
Secure document and role-based access to ensure your data stays protected
|
| 176 |
+
""")
|
| 177 |
+
|
| 178 |
+
st.divider()
|
| 179 |
+
|
| 180 |
+
## -------------------------------------------------------------------- ##
|
| 181 |
+
|
| 182 |
+
# Border Demo
|
| 183 |
+
st.markdown('<a name="customizable-border"></a>', unsafe_allow_html=True)
|
| 184 |
+
st.header("🔲 Future Work ", divider="rainbow")
|
| 185 |
+
st.markdown("""
|
| 186 |
+
### 1. Data Work
|
| 187 |
+
|
| 188 |
+
**✅ Approach 1:** Inherit training data from the lab or colleagues without verifying the data quality before training.
|
| 189 |
+
|
| 190 |
+
**✅ Approach 2:** Download open-source data to construct a "system + query + answer" dataset.
|
| 191 |
+
|
| 192 |
+
**✅ Approach 3:** Utilize GPT-4 to generate data, mastering the prompts that GPT-4 prefers. Recognize the importance of prompt diversity and explore various methods to expand the diversity of tasks and expressions in prompts. Deliberately include noisy prompts to enhance noise resistance. Be meticulous in checking data quality and align annotation standards with colleagues.
|
| 193 |
+
|
| 194 |
+
**❓ Approach 4:** Drive the data construction process with user interaction logs, collecting real user prompts, and using rules or GPT-4 to analyze user feedback to obtain high-quality answer data.
|
| 195 |
+
|
| 196 |
+
**❓ Approach 5:** Draw inspiration from concepts like chain-of-thought, retrieval-augmented generation, function call, and agent-based approaches to break down complex tasks at the data level. For example, if the model can't write a long novel, then "the model writes an outline for the novel, and then the model writes the long novel based on the outline."
|
| 197 |
+
|
| 198 |
+
### 2. Training Work
|
| 199 |
+
|
| 200 |
+
**✅ Approach 1:** Inherit training code from the lab or colleagues, modify the data path, and run the training script.
|
| 201 |
+
|
| 202 |
+
**❓ Approach 2:** Inherit or download training code, study every parameter of the launch code, and understand why offloading is enabled, what sequence parallelism means, etc. Then, examine how the dataloader handles data formats and whether the session data loss is calculated only in the last round or in every round. Investigate which special tokens are used in the code.
|
| 203 |
+
|
| 204 |
+
**❓ Approach 3:** Not only understand each parameter but also form your own insights: Is an epoch of 3 too many? Is 100,000 training data entries appropriate? Are there too many special tokens? Is the learning rate too high for a 7B model, and how many steps should be used for warm-up, or can warm-up be omitted? With these questions in mind, consult with ChatGPT or read articles from experts to gain further insights.
|
| 205 |
+
""")
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
## -------------------------------------------------------------------- ##
|
| 209 |
+
with st.expander("Here are some details about this training process."):
|
| 210 |
+
st.markdown(
|
| 211 |
+
"""
|
| 212 |
+
|
| 213 |
+
```shell
|
| 214 |
+
bf16: true
|
| 215 |
+
cutoff_len: 1024
|
| 216 |
+
dataset: dict_word_v4,dict_sentence_v4
|
| 217 |
+
dataset_dir: data
|
| 218 |
+
ddp_timeout: 180000000
|
| 219 |
+
do_train: true
|
| 220 |
+
finetuning_type: lora
|
| 221 |
+
flash_attn: auto
|
| 222 |
+
gradient_accumulation_steps: 8
|
| 223 |
+
include_num_input_tokens_seen: true
|
| 224 |
+
learning_rate: 0.0001
|
| 225 |
+
logging_steps: 5
|
| 226 |
+
lora_alpha: 16
|
| 227 |
+
lora_dropout: 0.1
|
| 228 |
+
lora_rank: 8
|
| 229 |
+
lora_target: all
|
| 230 |
+
lr_scheduler_type: cosine
|
| 231 |
+
max_grad_norm: 1.0
|
| 232 |
+
max_samples: 100000
|
| 233 |
+
model_name_or_path: /wsh/models/Meta-Llama-3-8B-Instruct
|
| 234 |
+
num_train_epochs: 40.0
|
| 235 |
+
optim: adamw_torch
|
| 236 |
+
output_dir: saves/Custom/lora/train_2024-09-15-17-54-11-v4-learn_rate_0001
|
| 237 |
+
packing: false
|
| 238 |
+
per_device_train_batch_size: 2
|
| 239 |
+
plot_loss: true
|
| 240 |
+
preprocessing_num_workers: 16
|
| 241 |
+
report_to: none
|
| 242 |
+
save_steps: 100
|
| 243 |
+
stage: sft
|
| 244 |
+
warmup_steps: 0
|
| 245 |
+
```
|
| 246 |
+
"""
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
# ============================================
|
| 251 |
+
|
| 252 |
+
# ============================================s
|
pages/6_🌵_Cherokee_Converter.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
# import streamlit.components.v1 as components
|
| 3 |
+
# components.iframe("https://cherokee.nicedata.eu.org/", height=500)
|
| 4 |
+
import streamlit.components.v1 as components
|
| 5 |
+
import base64
|
| 6 |
+
# from faker import Faker
|
| 7 |
+
import random
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
import pandas as pd
|
| 10 |
+
import requests
|
| 11 |
+
import time
|
| 12 |
+
st.set_page_config(layout="wide")
|
| 13 |
+
|
| 14 |
+
# LOGO_URL_LARGE="./static/lora.png"
|
| 15 |
+
st.logo(
|
| 16 |
+
"./static/logo1.png",
|
| 17 |
+
link="https://nicedata.eu.org/"
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
with st.sidebar:
|
| 21 |
+
st.title('🌵 Cherokee Syllabary and Phonetic Converter')
|
| 22 |
+
st.write('This chatbot is created using the open-source Llama 3 LLM model from Meta.')
|
| 23 |
+
|
| 24 |
+
st.markdown('📖 Learn how to build this app in this [blog](https://nicedata.eu.org/Cherokee)!')
|
| 25 |
+
|
| 26 |
+
st.info(
|
| 27 |
+
"""
|
| 28 |
+
- Email: [sh.wang4067@gmail.com](mailto:sh.wang4067@gmail.com)
|
| 29 |
+
- Tel: +86 181-1615-2720
|
| 30 |
+
- Homepage: [nicedata.eu.org](https://nicedata.eu.org)
|
| 31 |
+
- Github: [wdzhwsh4076](https://github.com/wdzhwsh4076)
|
| 32 |
+
- Address: Boda Campus, Xinjiang University, Urumqi City, China
|
| 33 |
+
"""
|
| 34 |
+
)
|
| 35 |
+
st.markdown(
|
| 36 |
+
"""
|
| 37 |
+
### Link
|
| 38 |
+
|
| 39 |
+
[1. cherokee dictionary](https://www.cherokeedictionary.net/)
|
| 40 |
+
|
| 41 |
+
[2. cherokee 500 word](https://www.cherokeedictionary.net/first500)
|
| 42 |
+
"""
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
st.title("🌵 Cherokee Converter")
|
| 46 |
+
# st.markdown(
|
| 47 |
+
# """
|
| 48 |
+
# I am excited to present the latest language model, which has been fine-tuned using the state-of-the-art LoRA (Low-Rank Adaptation) technique on the robust foundation of the LLaMA3-8B model.
|
| 49 |
+
# This is an open-source project and you are very welcome to contribute your comments, questions, resources, and apps as [issues](https://github.com/giswqs/streamlit-geospatial/issues) or
|
| 50 |
+
# [pull requests](https://github.com/giswqs/streamlit-geospatial/pulls) to the [GitHub repository](https://github.com/giswqs/streamlit-geospatial).
|
| 51 |
+
|
| 52 |
+
# """
|
| 53 |
+
# )s
|
| 54 |
+
st.info("Click on the left sidebar menu to navigate to the different apps.")
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def get_file_url(path):
|
| 59 |
+
file_ = open(path, "rb")
|
| 60 |
+
contents = file_.read()
|
| 61 |
+
data_url = base64.b64encode(contents).decode("utf-8")
|
| 62 |
+
file_.close()
|
| 63 |
+
return data_url
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
## -------------------------------------------------------------------- ##
|
| 68 |
+
def syllabary_to_phonetic(syllabary_sentence: str) -> str:
|
| 69 |
+
syllabary_to_phonetic_map = {
|
| 70 |
+
'Ꭰ': 'a', 'Ꭱ': 'e', 'Ꭲ': 'i', 'Ꭳ': 'o', 'Ꭴ': 'u', 'Ꭵ': 'v',
|
| 71 |
+
'Ꭶ': 'ga', 'Ꭷ': 'ka', 'Ꭸ': 'ge', 'Ꭹ': 'gi', 'Ꭺ': 'go', 'Ꭻ': 'gu', 'Ꭼ': 'gv',
|
| 72 |
+
'Ꭽ': 'ha', 'Ꭾ': 'he', 'Ꭿ': 'hi', 'Ꮀ': 'ho', 'Ꮁ': 'hu', 'Ꮂ': 'hv',
|
| 73 |
+
'Ꮃ': 'la', 'Ꮄ': 'le', 'Ꮅ': 'li', 'Ꮆ': 'lo', 'Ꮇ': 'lu', 'Ꮈ': 'lv',
|
| 74 |
+
'Ꮉ': 'ma', 'Ꮊ': 'me', 'Ꮋ': 'mi', 'Ꮌ': 'mo', 'Ꮍ': 'mu', 'Ᏽ': 'mv',
|
| 75 |
+
'Ꮎ': 'na', 'Ꮏ': 'hna', 'Ꮐ': 'nah', 'Ꮑ': 'ne', 'Ꮒ': 'ni', 'Ꮓ': 'no', 'Ꮔ': 'nu', 'Ꮕ': 'nv',
|
| 76 |
+
'Ꮖ': 'qua', 'Ꮗ': 'que', 'Ꮘ': 'qui', 'Ꮙ': 'quo', 'Ꮚ': 'quu', 'Ꮛ': 'quv',
|
| 77 |
+
'Ꮝ': 's', 'Ꮜ': 'sa', 'Ꮞ': 'se', 'Ꮟ': 'si', 'Ꮠ': 'so', 'Ꮡ': 'su', 'Ꮢ': 'sv',
|
| 78 |
+
'Ꮣ': 'da', 'Ꮤ': 'ta', 'Ꮥ': 'de', 'Ꮦ': 'te', 'Ꮧ': 'di', 'Ꮨ': 'ti', 'Ꮩ': 'do', 'Ꮪ': 'du', 'Ꮫ': 'dv',
|
| 79 |
+
'Ꮬ': 'dla', 'Ꮭ': 'tla', 'Ꮮ': 'tle', 'Ꮯ': 'tli', 'Ꮰ': 'tlo', 'Ꮱ': 'tlu', 'Ꮲ': 'tlv',
|
| 80 |
+
'Ꮳ': 'tsa', 'Ꮴ': 'tse', 'Ꮵ': 'tsi', 'Ꮶ': 'tso', 'Ꮷ': 'tsu', 'Ꮸ': 'tsv',
|
| 81 |
+
'Ꮹ': 'wa', 'Ꮺ': 'we', 'Ꮻ': 'wi', 'Ꮼ': 'wo', 'Ꮽ': 'wu', 'Ꮾ': 'wv',
|
| 82 |
+
'Ꮿ': 'ya', 'Ᏸ': 'ye', 'Ᏹ': 'yi', 'Ᏺ': 'yo', 'Ᏻ': 'yu', 'Ᏼ': 'yv',
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
phonetic_sentence = ''
|
| 86 |
+
for char in syllabary_sentence:
|
| 87 |
+
if char in syllabary_to_phonetic_map:
|
| 88 |
+
phonetic_sentence += syllabary_to_phonetic_map[char]
|
| 89 |
+
else:
|
| 90 |
+
phonetic_sentence += char
|
| 91 |
+
|
| 92 |
+
return phonetic_sentence
|
| 93 |
+
|
| 94 |
+
def phonetic_to_syllabary(phonetic_sentence: str) -> str:
|
| 95 |
+
phonetic_to_syllabary_map = {
|
| 96 |
+
'a': 'Ꭰ', 'e': 'Ꭱ', 'i': 'Ꭲ', 'o': 'Ꭳ', 'u': 'Ꭴ', 'v': 'Ꭵ',
|
| 97 |
+
'ga': 'Ꭶ', 'ka': 'Ꭷ', 'ge': 'Ꭸ', 'gi': 'Ꭹ', 'go': 'Ꭺ', 'gu': 'Ꭻ', 'gv': 'Ꭼ',
|
| 98 |
+
'ha': 'Ꭽ', 'he': 'Ꭾ', 'hi': 'Ꭿ', 'ho': 'Ꮀ', 'hu': 'Ꮁ', 'hv': 'Ꮂ',
|
| 99 |
+
'la': 'Ꮃ', 'le': 'Ꮄ', 'li': 'Ꮅ', 'lo': 'Ꮆ', 'lu': 'Ꮇ', 'lv': 'Ꮈ',
|
| 100 |
+
'ma': 'Ꮉ', 'me': 'Ꮊ', 'mi': 'Ꮋ', 'mo': 'Ꮌ', 'mu': 'Ꮍ', 'mv': 'Ᏽ',
|
| 101 |
+
'na': 'Ꮎ', 'hna': 'Ꮏ', 'nah': 'Ꮐ', 'ne': 'Ꮑ', 'ni': 'Ꮒ', 'no': 'Ꮓ', 'nu': 'Ꮔ', 'nv': 'Ꮕ',
|
| 102 |
+
'qua': 'Ꮖ', 'que': 'Ꮗ', 'qui': 'Ꮘ', 'quo': 'Ꮙ', 'quu': 'Ꮚ', 'quv': 'Ꮛ',
|
| 103 |
+
's': 'Ꮝ', 'sa': 'Ꮜ', 'se': 'Ꮞ', 'si': 'Ꮟ', 'so': 'Ꮠ', 'su': 'Ꮡ', 'sv': 'Ꮢ',
|
| 104 |
+
'da': 'Ꮣ', 'ta': 'Ꮤ', 'de': 'Ꮥ', 'te': 'Ꮦ', 'di': 'Ꮧ', 'ti': 'Ꮨ', 'do': 'Ꮩ', 'du': 'Ꮪ', 'dv': 'Ꮫ',
|
| 105 |
+
'dla': 'Ꮬ', 'tla': 'Ꮭ', 'tle': 'Ꮮ', 'tli': 'Ꮯ', 'tlo': 'Ꮰ', 'tlu': 'Ꮱ', 'tlv': 'Ꮲ',
|
| 106 |
+
'tsa': 'Ꮳ', 'tse': 'Ꮴ', 'tsi': 'Ꮵ', 'tso': 'Ꮶ', 'tsu': 'Ꮷ', 'tsv': 'Ꮸ',
|
| 107 |
+
'wa': '���', 'we': 'Ꮺ', 'wi': 'Ꮻ', 'wo': 'Ꮼ', 'wu': 'Ꮽ', 'wv': 'Ꮾ',
|
| 108 |
+
'ya': 'Ꮿ', 'ye': 'Ᏸ', 'yi': 'Ᏹ', 'yo': 'Ᏺ', 'yu': 'Ᏻ', 'yv': 'Ᏼ',
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
syllabary_sentence = ''
|
| 112 |
+
i = 0
|
| 113 |
+
while i < len(phonetic_sentence):
|
| 114 |
+
if i + 2 <= len(phonetic_sentence) and phonetic_sentence[i:i+2] in phonetic_to_syllabary_map:
|
| 115 |
+
syllabary_sentence += phonetic_to_syllabary_map[phonetic_sentence[i:i+2]]
|
| 116 |
+
i += 2
|
| 117 |
+
elif i + 3 <= len(phonetic_sentence) and phonetic_sentence[i:i+3] in phonetic_to_syllabary_map:
|
| 118 |
+
syllabary_sentence += phonetic_to_syllabary_map[phonetic_sentence[i:i+3]]
|
| 119 |
+
i += 3
|
| 120 |
+
elif phonetic_sentence[i] in phonetic_to_syllabary_map:
|
| 121 |
+
syllabary_sentence += phonetic_to_syllabary_map[phonetic_sentence[i]]
|
| 122 |
+
i += 1
|
| 123 |
+
else:
|
| 124 |
+
syllabary_sentence += phonetic_sentence[i]
|
| 125 |
+
i += 1
|
| 126 |
+
|
| 127 |
+
return syllabary_sentence
|
| 128 |
+
|
| 129 |
+
# Example usage
|
| 130 |
+
syllabary_sentence = "ᎨᏍᏗ ᏯᏍᎦᎢᎮ ᏥᏄᏍᏕ ᎠᎬᏱ ᏣᎴᏂᏍᎨ ᎠᏂᎩᏍᎬ, ᎾᎥᏂ ᏭᎷᏤᎢ, ᏏᏲ, ᎤᏍᏗ ᎠᏣᏗ ᎬᏉᏎᎰ ᏃᎴ ᎨᏍᏗ ᎯᎸᎯᏳ ᏥᎪᎥ ᏂᎯ ᎢᏳᏍᏗ ᎠᏣᏗ."
|
| 131 |
+
phonetic_sentence = syllabary_to_phonetic(syllabary_sentence)
|
| 132 |
+
print("Phonetic:", phonetic_sentence)
|
| 133 |
+
|
| 134 |
+
reconstructed_syllabary = phonetic_to_syllabary(phonetic_sentence)
|
| 135 |
+
print("Reconstructed Syllabary:", reconstructed_syllabary)
|
| 136 |
+
print("Original and reconstructed match:", syllabary_sentence == reconstructed_syllabary)
|
| 137 |
+
|
| 138 |
+
# dataset
|
| 139 |
+
st.markdown('<a name="customizable-border"></a>', unsafe_allow_html=True)
|
| 140 |
+
st.header("🔲 Demo", divider="rainbow")
|
| 141 |
+
# st.markdown("Enter Cherokee Syllabary Text:")
|
| 142 |
+
|
| 143 |
+
# st.markdown("""
|
| 144 |
+
# #### Cherokee-English Word Dataset (10.2k)
|
| 145 |
+
|
| 146 |
+
# This dataset focuses on vocabulary, ensuring that our model has a comprehensive grasp of Cherokee words and their English counterparts.
|
| 147 |
+
# """)
|
| 148 |
+
# Input text area for syllabary
|
| 149 |
+
# Create two columns
|
| 150 |
+
# First row: syllabary to phonetic
|
| 151 |
+
st.subheader("Syllabary to Phonetic")
|
| 152 |
+
col1, col2 = st.columns(2)
|
| 153 |
+
|
| 154 |
+
with col1:
|
| 155 |
+
syllabary_input = st.text_area("Enter Cherokee Syllabary Text:",
|
| 156 |
+
"ᎨᏍᏗ ᏯᏍᎦᎢᎮ ᏥᏄᏍᏕ ᎠᎬᏱ ᏣᎴᏂᏍᎨ ᎠᏂᎩᏍᎬ",
|
| 157 |
+
height=100, key="syllabary_input")
|
| 158 |
+
|
| 159 |
+
if st.button("Convert to Phonetic"):
|
| 160 |
+
phonetic_output = syllabary_to_phonetic(syllabary_input)
|
| 161 |
+
st.session_state.phonetic_output = phonetic_output
|
| 162 |
+
|
| 163 |
+
with col2:
|
| 164 |
+
st.text_area("Phonetic Output:",
|
| 165 |
+
value=st.session_state.get('phonetic_output', ''),
|
| 166 |
+
height=100, key="phonetic_output")
|
| 167 |
+
|
| 168 |
+
# Second row: phonetic to syllabary
|
| 169 |
+
st.subheader("Phonetic to Syllabary")
|
| 170 |
+
col3, col4 = st.columns(2)
|
| 171 |
+
|
| 172 |
+
with col3:
|
| 173 |
+
phonetic_input = st.text_area("Enter Phonetic Text:",
|
| 174 |
+
"gesdi yasgaihe jinusde agvyi jalenisge anigigv",
|
| 175 |
+
height=100, key="phonetic_input")
|
| 176 |
+
|
| 177 |
+
if st.button("Convert to Syllabary"):
|
| 178 |
+
syllabary_output = phonetic_to_syllabary(phonetic_input)
|
| 179 |
+
st.session_state.syllabary_output = syllabary_output
|
| 180 |
+
|
| 181 |
+
with col4:
|
| 182 |
+
st.text_area("Syllabary Output:",
|
| 183 |
+
value=st.session_state.get('syllabary_output', ''),
|
| 184 |
+
height=100, key="syllabary_output")
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
# App skeleton Demo
|
| 188 |
+
st.markdown('<a name="new-app-loading-animation"></a>', unsafe_allow_html=True)
|
| 189 |
+
st.header("⏳ Method", divider="rainbow")
|
| 190 |
+
st.markdown("""
|
| 191 |
+
#### Cherokee syllabary
|
| 192 |
+
|
| 193 |
+
The Cherokee syllabary is a syllabary invented by Sequoyah in the late 1810s and early 1820s to write the Cherokee language. His creation of the syllabary is particularly noteworthy as he was illiterate until its creation.[3] He first experimented with logograms, but his system later developed into the syllabary. In his system, each symbol represents a syllable rather than a single phoneme; the 85 (originally 86)[1] characters provide a suitable method for writing Cherokee. The letters resemble characters from other scripts, such as Latin, Greek, Cyrillic, and Glagolitic, however, these are not used to represent the same sounds.
|
| 194 |
+
""")
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def get_file_url(path):
|
| 198 |
+
file_ = open(path, "rb")
|
| 199 |
+
contents = file_.read()
|
| 200 |
+
data_url = base64.b64encode(contents).decode("utf-8")
|
| 201 |
+
file_.close()
|
| 202 |
+
return data_url
|
| 203 |
+
|
| 204 |
+
old_skeleton_url = get_file_url("./static/cherokee_tag.png")
|
| 205 |
+
new_skeleton_url = get_file_url("./static/cherokee_source.png")
|
| 206 |
+
|
| 207 |
+
gif1, gif2 = st.columns(2)
|
| 208 |
+
with gif1:
|
| 209 |
+
# st.subheader("detail")
|
| 210 |
+
|
| 211 |
+
st.markdown(
|
| 212 |
+
f'<img src="data:image/gif;base64,{old_skeleton_url}" width=450 alt="demo gif">',
|
| 213 |
+
unsafe_allow_html=True,
|
| 214 |
+
)
|
| 215 |
+
st.caption("Fig: https://en.wikipedia.org/wiki/Cherokee_syllabary ")
|
| 216 |
+
|
| 217 |
+
with gif2:
|
| 218 |
+
# st.subheader("detail")
|
| 219 |
+
|
| 220 |
+
st.markdown(
|
| 221 |
+
f'<img src="data:image/gif;base64,{new_skeleton_url}" width=450 alt="demo gif">',
|
| 222 |
+
unsafe_allow_html=True,
|
| 223 |
+
)
|
| 224 |
+
st.caption("""Fig: https://en.wikipedia.org/wiki/Cherokee_syllabary """)
|
| 225 |
+
|
| 226 |
+
st.divider()
|
| 227 |
+
|
| 228 |
+
## -------------------------------------------------------------------- ##
|
requirements.txt
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
altair==5.4.1
|
| 2 |
+
annotated-types==0.7.0
|
| 3 |
+
anthropic==0.34.2
|
| 4 |
+
anyio==4.6.0
|
| 5 |
+
anywidget==0.9.13
|
| 6 |
+
asttokens==2.4.1
|
| 7 |
+
attrs==24.2.0
|
| 8 |
+
beautifulsoup4==4.12.3
|
| 9 |
+
blinker==1.8.2
|
| 10 |
+
bqplot==0.12.43
|
| 11 |
+
branca==0.7.2
|
| 12 |
+
cachetools==5.5.0
|
| 13 |
+
certifi==2024.8.30
|
| 14 |
+
charset-normalizer==3.3.2
|
| 15 |
+
click==8.1.7
|
| 16 |
+
colour==0.1.5
|
| 17 |
+
comm==0.2.2
|
| 18 |
+
contourpy==1.3.0
|
| 19 |
+
cycler==0.12.1
|
| 20 |
+
decorator==5.1.1
|
| 21 |
+
distro==1.9.0
|
| 22 |
+
duckdb==1.1.0
|
| 23 |
+
exceptiongroup==1.2.2
|
| 24 |
+
executing==2.1.0
|
| 25 |
+
filelock==3.16.1
|
| 26 |
+
fonttools==4.54.0
|
| 27 |
+
fsspec==2024.9.0
|
| 28 |
+
gdown==5.2.0
|
| 29 |
+
geojson==3.1.0
|
| 30 |
+
gitdb==4.0.11
|
| 31 |
+
GitPython==3.1.43
|
| 32 |
+
h11==0.14.0
|
| 33 |
+
httpcore==1.0.5
|
| 34 |
+
httpx==0.27.2
|
| 35 |
+
huggingface-hub==0.25.1
|
| 36 |
+
idna==3.10
|
| 37 |
+
ipyevents==2.0.2
|
| 38 |
+
ipyfilechooser==0.6.0
|
| 39 |
+
ipyleaflet==0.19.2
|
| 40 |
+
ipython==8.27.0
|
| 41 |
+
ipytree==0.2.2
|
| 42 |
+
ipyvue==1.11.1
|
| 43 |
+
ipyvuetify==1.10.0
|
| 44 |
+
ipywidgets==8.1.5
|
| 45 |
+
jedi==0.19.1
|
| 46 |
+
Jinja2==3.1.4
|
| 47 |
+
jiter==0.5.0
|
| 48 |
+
jsonschema==4.23.0
|
| 49 |
+
jsonschema-specifications==2023.12.1
|
| 50 |
+
jupyter-leaflet==0.19.2
|
| 51 |
+
jupyterlab_widgets==3.0.13
|
| 52 |
+
kiwisolver==1.4.7
|
| 53 |
+
markdown-it-py==3.0.0
|
| 54 |
+
MarkupSafe==2.1.5
|
| 55 |
+
matplotlib==3.9.2
|
| 56 |
+
matplotlib-inline==0.1.7
|
| 57 |
+
mdurl==0.1.2
|
| 58 |
+
narwhals==1.8.2
|
| 59 |
+
numpy==2.1.1
|
| 60 |
+
openai==1.47.1
|
| 61 |
+
packaging==24.1
|
| 62 |
+
pandas==2.2.3
|
| 63 |
+
parso==0.8.4
|
| 64 |
+
pexpect==4.9.0
|
| 65 |
+
pillow==10.4.0
|
| 66 |
+
prompt_toolkit==3.0.47
|
| 67 |
+
protobuf==5.28.2
|
| 68 |
+
psygnal==0.11.1
|
| 69 |
+
ptyprocess==0.7.0
|
| 70 |
+
pure_eval==0.2.3
|
| 71 |
+
pyarrow==17.0.0
|
| 72 |
+
pydantic==2.9.2
|
| 73 |
+
pydantic_core==2.23.4
|
| 74 |
+
pydeck==0.9.1
|
| 75 |
+
Pygments==2.18.0
|
| 76 |
+
pyparsing==3.1.4
|
| 77 |
+
pyshp==2.3.1
|
| 78 |
+
PySocks==1.7.1
|
| 79 |
+
pystac==1.10.1
|
| 80 |
+
pystac-client==0.8.3
|
| 81 |
+
python-box==7.2.0
|
| 82 |
+
python-dateutil==2.9.0.post0
|
| 83 |
+
pytz==2024.2
|
| 84 |
+
PyYAML==6.0.2
|
| 85 |
+
referencing==0.35.1
|
| 86 |
+
requests==2.32.3
|
| 87 |
+
rich==13.8.1
|
| 88 |
+
rpds-py==0.20.0
|
| 89 |
+
scooby==0.10.0
|
| 90 |
+
six==1.16.0
|
| 91 |
+
smmap==5.0.1
|
| 92 |
+
sniffio==1.3.1
|
| 93 |
+
soupsieve==2.6
|
| 94 |
+
stack-data==0.6.3
|
| 95 |
+
tenacity==8.5.0
|
| 96 |
+
tokenizers==0.20.0
|
| 97 |
+
toml==0.10.2
|
| 98 |
+
tornado==6.4.1
|
| 99 |
+
tqdm==4.66.5
|
| 100 |
+
traitlets==5.14.3
|
| 101 |
+
traittypes==0.2.1
|
| 102 |
+
typing_extensions==4.12.2
|
| 103 |
+
tzdata==2024.1
|
| 104 |
+
urllib3==2.2.3
|
| 105 |
+
watchdog==4.0.2
|
| 106 |
+
wcwidth==0.2.13
|
| 107 |
+
whitebox==2.3.5
|
| 108 |
+
whiteboxgui==2.3.0
|
| 109 |
+
widgetsnbextension==4.0.13
|
| 110 |
+
xyzservices==2024.9.0
|
static/cherokee_source.png
ADDED
|
static/cherokee_tag.png
ADDED
|
static/gif.gif
ADDED
|
static/image.png
ADDED
|
static/logo.png
ADDED
|
static/logo0.png
ADDED
|
static/logo1.png
ADDED
|
static/lora.png
ADDED
|
static/name.png
ADDED
|
static/paper1.png
ADDED
|
static/paper2-1.png
ADDED
|
static/perfomance0.png
ADDED
|
static/perfomance1.png
ADDED
|
static/perfomance2.png
ADDED
|
static/rag.png
ADDED
|
static/stack.png
ADDED
|