Spaces:
Paused
Paused
Upload 82 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- .gitignore +134 -0
- Dockerfile +1 -1
- LICENSE +201 -0
- Procfile +1 -0
- README.en.md +480 -0
- README.md +506 -8
- Screenshots/BT_Linux_Panel_Deploy_1.png +0 -0
- Screenshots/BT_Linux_Panel_Deploy_2.png +0 -0
- Screenshots/api_image_result.png +0 -0
- Screenshots/api_video_result.png +0 -0
- Screenshots/benchmarks/Douyin_API.png +0 -0
- Screenshots/benchmarks/Douyin_API_Douyin_wtf.png +0 -0
- Screenshots/benchmarks/TikTok_API.png +0 -0
- Screenshots/benchmarks/TikTok_API_Douyin_wtf.png +0 -0
- Screenshots/benchmarks/info +1 -0
- Screenshots/home.png +0 -0
- Screenshots/home_en.png +0 -0
- Screenshots/multi_results.png +3 -0
- Screenshots/multi_results_en.png +0 -0
- Screenshots/single_result.png +0 -0
- Screenshots/single_result_en.png +0 -0
- Screenshots/tiktok_API.png +0 -0
- Screenshots/v3_screenshots/Home.png +0 -0
- Screenshots/v3_screenshots/Home_en.png +0 -0
- Screenshots/v3_screenshots/info +1 -0
- app/api/endpoints/bilibili_web.py +697 -0
- app/api/endpoints/douyin_web.py +1070 -0
- app/api/endpoints/download.py +156 -0
- app/api/endpoints/hybrid_parsing.py +53 -0
- app/api/endpoints/ios_shortcut.py +24 -0
- app/api/endpoints/tiktok_app.py +49 -0
- app/api/endpoints/tiktok_web.py +951 -0
- app/api/models/APIResponseModel.py +41 -0
- app/api/router.py +29 -0
- app/main.py +147 -0
- app/web/app.py +92 -0
- app/web/views/About.py +23 -0
- app/web/views/Document.py +65 -0
- app/web/views/Downloader.py +18 -0
- app/web/views/EasterEgg.py +60 -0
- app/web/views/ParseVideo.py +238 -0
- app/web/views/Shortcuts.py +48 -0
- app/web/views/ViewsUtils.py +24 -0
- bash/install.sh +61 -0
- bash/update.sh +31 -0
- config.yaml +52 -0
- crawlers/base_crawler.py +349 -0
- crawlers/bilibili/web/config.yaml +13 -0
- crawlers/bilibili/web/endpoints.py +62 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
Screenshots/multi_results.png filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
pip-wheel-metadata/
|
| 24 |
+
share/python-wheels/
|
| 25 |
+
*.egg-info/
|
| 26 |
+
.installed.cfg
|
| 27 |
+
*.egg
|
| 28 |
+
MANIFEST
|
| 29 |
+
|
| 30 |
+
# PyInstaller
|
| 31 |
+
# Usually these files are written by a python script from a template
|
| 32 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 33 |
+
*.manifest
|
| 34 |
+
*.spec
|
| 35 |
+
|
| 36 |
+
# Installer logs
|
| 37 |
+
pip-log.txt
|
| 38 |
+
pip-delete-this-directory.txt
|
| 39 |
+
|
| 40 |
+
# Unit test / coverage reports
|
| 41 |
+
htmlcov/
|
| 42 |
+
.tox/
|
| 43 |
+
.nox/
|
| 44 |
+
.coverage
|
| 45 |
+
.coverage.*
|
| 46 |
+
.cache
|
| 47 |
+
nosetests.xml
|
| 48 |
+
coverage.xml
|
| 49 |
+
*.cover
|
| 50 |
+
*.py,cover
|
| 51 |
+
.hypothesis/
|
| 52 |
+
.pytest_cache/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
target/
|
| 76 |
+
|
| 77 |
+
# Jupyter Notebook
|
| 78 |
+
.ipynb_checkpoints
|
| 79 |
+
|
| 80 |
+
# IPython
|
| 81 |
+
profile_default/
|
| 82 |
+
ipython_config.py
|
| 83 |
+
|
| 84 |
+
# pyenv
|
| 85 |
+
.python-version
|
| 86 |
+
|
| 87 |
+
# pipenv
|
| 88 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 89 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 90 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 91 |
+
# install all needed dependencies.
|
| 92 |
+
#Pipfile.lock
|
| 93 |
+
|
| 94 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 95 |
+
__pypackages__/
|
| 96 |
+
|
| 97 |
+
# Celery stuff
|
| 98 |
+
celerybeat-schedule
|
| 99 |
+
celerybeat.pid
|
| 100 |
+
|
| 101 |
+
# SageMath parsed files
|
| 102 |
+
*.sage.py
|
| 103 |
+
|
| 104 |
+
# Environments
|
| 105 |
+
.env
|
| 106 |
+
.venv
|
| 107 |
+
env/
|
| 108 |
+
venv/
|
| 109 |
+
ENV/
|
| 110 |
+
env.bak/
|
| 111 |
+
venv.bak/
|
| 112 |
+
|
| 113 |
+
# Spyder project settings
|
| 114 |
+
.spyderproject
|
| 115 |
+
.spyproject
|
| 116 |
+
|
| 117 |
+
# Rope project settings
|
| 118 |
+
.ropeproject
|
| 119 |
+
|
| 120 |
+
# mkdocs documentation
|
| 121 |
+
/site
|
| 122 |
+
|
| 123 |
+
# mypy
|
| 124 |
+
.mypy_cache/
|
| 125 |
+
.dmypy.json
|
| 126 |
+
dmypy.json
|
| 127 |
+
|
| 128 |
+
# Pyre type checker
|
| 129 |
+
.pyre/
|
| 130 |
+
|
| 131 |
+
# pycharm
|
| 132 |
+
.idea
|
| 133 |
+
/app/api/endpoints/download/
|
| 134 |
+
/download/
|
Dockerfile
CHANGED
|
@@ -30,4 +30,4 @@ RUN pip3 install --no-cache-dir -r requirements.txt
|
|
| 30 |
RUN chmod +x start.sh
|
| 31 |
|
| 32 |
# Command to run on container start
|
| 33 |
-
CMD ["./start.sh"]
|
|
|
|
| 30 |
RUN chmod +x start.sh
|
| 31 |
|
| 32 |
# Command to run on container start
|
| 33 |
+
CMD ["./start.sh"]
|
LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
Procfile
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
web: python3 start.py
|
README.en.md
ADDED
|
@@ -0,0 +1,480 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
<a href="https://douyin.wtf/" alt="logo" ><img src="https://raw.githubusercontent.com/Evil0ctal/Douyin_TikTok_Download_API/main/logo/logo192.png" width="120"/></a>
|
| 3 |
+
</div>
|
| 4 |
+
<h1 align="center">Douyin_TikTok_Download_API(抖音/TikTok API)</h1>
|
| 5 |
+
|
| 6 |
+
<div align="center">
|
| 7 |
+
|
| 8 |
+
[English](./README.en.md)\|[Simplified Chinese](./README.md)
|
| 9 |
+
|
| 10 |
+
🚀"Douyin_TikTok_Download_API" is a high-performance asynchronous API that can be used out of the box[Tik Tok](https://www.douyin.com)\|[TikTok](https://www.tiktok.com)\|[Bilibili](https://www.bilibili.com)Data crawling tool supports API calling, online batch analysis and downloading.
|
| 11 |
+
|
| 12 |
+
[](LICENSE)[](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/releases/latest)[](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/stargazers)[](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/network/members)[](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/issues)[](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/issues?q=is%3Aissue+is%3Aclosed)<br>[](https://pypi.org/project/douyin-tiktok-scraper/)[](https://pypi.org/project/douyin-tiktok-scraper/#files)[](https://pypi.org/project/douyin-tiktok-scraper/)[](https://pypi.org/project/douyin-tiktok-scraper/)<br>[](https://api.douyin.wtf/docs)[](https://api.tikhub.io/docs)<br>[](https://afdian.net/@evil0ctal)[](https://ko-fi.com/evil0ctal)[](https://www.patreon.com/evil0ctal)
|
| 13 |
+
|
| 14 |
+
</div>
|
| 15 |
+
|
| 16 |
+
## Sponsor
|
| 17 |
+
|
| 18 |
+
These sponsors have paid to be placed here,**Douyin_TikTok_Download_API**The project will always be free and open source. If you would like to become a sponsor of this project, please check out my[GitHub Sponsor Page](https://github.com/sponsors/evil0ctal)。
|
| 19 |
+
|
| 20 |
+
<div align="center">
|
| 21 |
+
<a href="https://www.tikhub.io/" target="_blank">
|
| 22 |
+
<img src="https://tikhub.io/wp-content/uploads/2024/11/Main-Logo.webp" width="100" alt="TikHub.io - Global Social Data & API Marketplace">
|
| 23 |
+
</a>
|
| 24 |
+
<div>
|
| 25 |
+
<h2><b>TikHub.io</b></h2>
|
| 26 |
+
<p>Your Ultimate Social Media Data & API Marketplace</p>
|
| 27 |
+
<p>
|
| 28 |
+
Professional data solutions for Douyin, Xiaohongshu, TikTok, Instagram, YouTube,
|
| 29 |
+
Twitter, and more.<br>
|
| 30 |
+
Real-time Data | Flexible APIs | Seamless Integration | Competitive Pricing with Discounts
|
| 31 |
+
</p>
|
| 32 |
+
<p>
|
| 33 |
+
<b>Discover TikHub.io Marketplace</b><br>
|
| 34 |
+
Buy and sell custom APIs, services, and social media solutions.<br>
|
| 35 |
+
Join a thriving ecosystem of developers, businesses, and content creators.
|
| 36 |
+
</p>
|
| 37 |
+
<p><em>Trusted by leading global influencer marketing and social media intelligence platforms</em></p>
|
| 38 |
+
</div>
|
| 39 |
+
</div>
|
| 40 |
+
|
| 41 |
+
## 👻Introduction
|
| 42 |
+
|
| 43 |
+
> 🚨If you need to use a private server to run this project, please refer to:[Deployment preparations](./README.md#%EF%B8%8F%E9%83%A8%E7%BD%B2%E5%89%8D%E7%9A%84%E5%87%86%E5%A4%87%E5%B7%A5%E4%BD%9C%E8%AF%B7%E4%BB%94%E7%BB%86%E9%98%85%E8%AF%BB),[Docker deployment](./README.md#%E9%83%A8%E7%BD%B2%E6%96%B9%E5%BC%8F%E4%BA%8C-docker),[One-click deployment](./README.md#%E9%83%A8%E7%BD%B2%E6%96%B9%E5%BC%8F%E4%B8%80-linux)
|
| 44 |
+
|
| 45 |
+
This project is based on[PyWebIO](https://github.com/pywebio/PyWebIO),[speedy](https://fastapi.tiangolo.com/),[HTTPX](https://www.python-httpx.org/), fast and asynchronous[Tik Tok](https://www.douyin.com/)/[TikTok](https://www.tiktok.com/)Data crawling tool, and realizes online batch analysis and downloading of videos or photo albums without watermarks through the Web, data crawling API, iOS shortcut command without watermark downloads and other functions. You can deploy or modify this project yourself to achieve more functions, or you can call it directly in your project[scraper.py](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/Stable/scraper.py)or install an existing[pip package](https://pypi.org/project/douyin-tiktok-scraper/)As a parsing library, it is easy to crawl data, etc.....
|
| 46 |
+
|
| 47 |
+
_Some simple application scenarios:_
|
| 48 |
+
|
| 49 |
+
_Download prohibited videos, perform data analysis, download without watermark on iOS (with[Shortcut command APP that comes with iOS](https://apps.apple.com/cn/app/%E5%BF%AB%E6%8D%B7%E6%8C%87%E4%BB%A4/id915249334)Cooperate with the API of this project to achieve in-app downloads or read clipboard downloads), etc....._
|
| 50 |
+
|
| 51 |
+
## 🔊 V4 version notes
|
| 52 |
+
|
| 53 |
+
- If you are interested in writing this project together, please add us on WeChat`Evil0ctal`Note: Github project reconstruction, everyone can communicate and learn from each other in the group. Advertising and illegal things are not allowed. It is purely for making friends and technical exchanges.
|
| 54 |
+
- This project uses`X-Bogus`Algorithms and`A_Bogus`The algorithm requests the Web API of Douyin and TikTok.
|
| 55 |
+
- Due to Douyin's risk control, after deploying this project, please**Obtain the cookie of Douyin website in the browser and replace it in config.yaml.**
|
| 56 |
+
- Please read the document below before raising an issue. Solutions to most problems will be included in the document.
|
| 57 |
+
- This project is completely free, but when using it, please comply with:[Apache-2.0 license](https://github.com/Evil0ctal/Douyin_TikTok_Download_API?tab=Apache-2.0-1-ov-file#readme)
|
| 58 |
+
|
| 59 |
+
## 🔖TikHub.io API
|
| 60 |
+
|
| 61 |
+
[TikHub.io](https://api.tikhub.io/)It is an API platform that provides various public data interfaces including Douyin and TikTok. If you want to support[Douyin_TikTok_Download_API](https://github.com/Evil0ctal/Douyin_TikTok_Download_API)For project development, we strongly recommend that you choose[TikHub.io](https://api.tikhub.io/)。
|
| 62 |
+
|
| 63 |
+
#### Features:
|
| 64 |
+
|
| 65 |
+
> 📦 Ready to use right out of the box
|
| 66 |
+
|
| 67 |
+
Simplify the use process and use the packaged SDK to quickly carry out development work. All API interfaces are designed based on RESTful architecture and are described and documented using OpenAPI specifications, with sample parameters included to ensure easier calling.
|
| 68 |
+
|
| 69 |
+
> 💰 Cost advantage
|
| 70 |
+
|
| 71 |
+
There are no preset package restrictions and no monthly usage thresholds. All consumption is billed immediately based on actual usage, and tiered billing is performed based on the user's daily requests. At the same time, free quota can be obtained through daily sign-in in the user backend. , and these free credits will not expire.
|
| 72 |
+
|
| 73 |
+
> ⚡️ Fast support
|
| 74 |
+
|
| 75 |
+
We have a large Discord community server, where administrators and other users will quickly reply to you and help you quickly solve current problems.
|
| 76 |
+
|
| 77 |
+
> 🎉Embrace open source
|
| 78 |
+
|
| 79 |
+
Part of TikHub's source code will be open sourced on Github, and it will sponsor authors of some open source projects.
|
| 80 |
+
|
| 81 |
+
#### Link:
|
| 82 |
+
|
| 83 |
+
- Github:[TikHub Github](https://github.com/TikHubIO)
|
| 84 |
+
- Discord:[Tikhub discord](https://discord.com/invite/aMEAS8Xsvz)
|
| 85 |
+
- Register:[TikTok signup](https://beta-web.tikhub.io/en-us/users/signup)
|
| 86 |
+
- API Docs:[TikHub API Docs](https://api.tikhub.io/)
|
| 87 |
+
|
| 88 |
+
## 🖥Demo site: I am very vulnerable...please do not stress test (·•᷄ࡇ•᷅ )
|
| 89 |
+
|
| 90 |
+
> 😾The online download function of the demo site has been turned off, and due to cookie reasons, Douyin's parsing and API services cannot guarantee availability on the Demo site.
|
| 91 |
+
|
| 92 |
+
🍔Web APP:<https://douyin.wtf/>
|
| 93 |
+
|
| 94 |
+
🍟API Document:<https://douyin.wtf/docs>
|
| 95 |
+
|
| 96 |
+
🌭TikHub API Document:<https://api.tikhub.io/docs>
|
| 97 |
+
|
| 98 |
+
💾iOS Shortcut (shortcut command):[Shortcut release](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/discussions/104?sort=top)
|
| 99 |
+
|
| 100 |
+
📦️Desktop downloader (recommended by warehouse):
|
| 101 |
+
|
| 102 |
+
- [Johnserf-Seed/TikTokDownload](https://github.com/Johnserf-Seed/TikTokDownload)
|
| 103 |
+
- [HFrost0/bilix](https://github.com/HFrost0/bilix)
|
| 104 |
+
- [Tairraos/TikDown - \[needs update\]](https://github.com/Tairraos/TikDown/)
|
| 105 |
+
|
| 106 |
+
## ⚗️Technology stack
|
| 107 |
+
|
| 108 |
+
- [/app/web](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/main/app/web)-[PyWebIO](https://www.pyweb.io/)
|
| 109 |
+
- [/app/api](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/main/app/api)-[speedy](https://fastapi.tiangolo.com/)
|
| 110 |
+
- [/crawlers](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/main/crawlers)-[HTTPX](https://www.python-httpx.org/)
|
| 111 |
+
|
| 112 |
+
> **_/crawlers_**
|
| 113 |
+
|
| 114 |
+
- Submit requests to APIs on different platforms and retrieve data. After processing, a dictionary (dict) is returned, and asynchronous support is supported.
|
| 115 |
+
|
| 116 |
+
> **_/app/api_**
|
| 117 |
+
|
| 118 |
+
- Get request parameters and use`Crawlers`The related classes process the data and return it in JSON form, download the video, and cooperate with iOS shortcut commands to achieve fast calling and support asynchronous.
|
| 119 |
+
|
| 120 |
+
> **_/app/web_**
|
| 121 |
+
|
| 122 |
+
- use`PyWebIO`A simple web program created to process the values entered on the web page and use them`Crawlers`The related class processing interface outputs related data on the web page.
|
| 123 |
+
|
| 124 |
+
**_Most of the parameters of the above files can be found in the corresponding`config.yaml`Modify in_**
|
| 125 |
+
|
| 126 |
+
## 💡Project file structure
|
| 127 |
+
|
| 128 |
+
./Douyin_TikTok_Download_API
|
| 129 |
+
├─app
|
| 130 |
+
│ ├─api
|
| 131 |
+
│ │ ├─endpoints
|
| 132 |
+
│ │ └─models
|
| 133 |
+
│ ├─download
|
| 134 |
+
│ └─web
|
| 135 |
+
│ └─views
|
| 136 |
+
└─crawlers
|
| 137 |
+
├─bilibili
|
| 138 |
+
│ └─web
|
| 139 |
+
├─douyin
|
| 140 |
+
│ └─web
|
| 141 |
+
├─hybrid
|
| 142 |
+
├─tiktok
|
| 143 |
+
│ ├─app
|
| 144 |
+
│ └─web
|
| 145 |
+
└─utils
|
| 146 |
+
|
| 147 |
+
## ✨Supported functions:
|
| 148 |
+
|
| 149 |
+
- Batch parsing on the web page (supports Douyin/TikTok mixed parsing)
|
| 150 |
+
- Download videos or photo albums online.
|
| 151 |
+
- make[pip package](https://pypi.org/project/douyin-tiktok-scraper/)Conveniently and quickly import your projects
|
| 152 |
+
- [iOS shortcut commands to quickly call API](https://apps.apple.com/cn/app/%E5%BF%AB%E6%8D%B7%E6%8C%87%E4%BB%A4/id915249334)Achieve in-app download of watermark-free videos/photo albums
|
| 153 |
+
- Complete API documentation ([Demo/Demonstration](https://api.douyin.wtf/docs))
|
| 154 |
+
- Rich API interface:
|
| 155 |
+
- Douyin web version API
|
| 156 |
+
|
| 157 |
+
- [x] Video data analysis
|
| 158 |
+
- [x] Get user homepage work data
|
| 159 |
+
- [x] Obtain the data of works liked by the user's homepage
|
| 160 |
+
- [x] Obtain the data of collected works on the user's homepage
|
| 161 |
+
- [x] Get user homepage information
|
| 162 |
+
- [x] Get user collection work data
|
| 163 |
+
- [x] Get user live stream data
|
| 164 |
+
- [x] Get the live streaming data of a specified user
|
| 165 |
+
- [x] Get the ranking of users who give gifts in the live broadcast room
|
| 166 |
+
- [x] Get single video comment data
|
| 167 |
+
- [x] Get the comment reply data of the specified video
|
| 168 |
+
- [x] Generate msToken
|
| 169 |
+
- [x] Generate verify_fp
|
| 170 |
+
- [x] Generate s_v_web_id
|
| 171 |
+
- [x] Generate X-Bogus parameters using interface URL
|
| 172 |
+
- [x] Generate A_Bogus parameters using interface URL
|
| 173 |
+
- [x] Extract a single user id
|
| 174 |
+
- [x] Extract list user id
|
| 175 |
+
- [x] Extract a single work id
|
| 176 |
+
- [x] Extract list work id
|
| 177 |
+
- [x] Extract live broadcast room number from list
|
| 178 |
+
- [x] Extract live broadcast room number from list
|
| 179 |
+
- TikTok web version API
|
| 180 |
+
|
| 181 |
+
- [x] Video data analysis
|
| 182 |
+
- [x] Get user homepage work data
|
| 183 |
+
- [x] Obtain the data of works liked by the user's homepage
|
| 184 |
+
- [x] Get user homepage information
|
| 185 |
+
- [x] Get fan data on user homepage
|
| 186 |
+
- [x] Get user homepage follow data
|
| 187 |
+
- [x] Get user homepage collection work data
|
| 188 |
+
- [x] Get user homepage collection data
|
| 189 |
+
- [x] Get user homepage playlist data
|
| 190 |
+
- [x] Get single video comment data
|
| 191 |
+
- [x] Get the comment reply data of the specified video
|
| 192 |
+
- [x] Generate msToken
|
| 193 |
+
- [x] Generate ttwid
|
| 194 |
+
- [x] Generate X-Bogus parameters using interface URL
|
| 195 |
+
- [x] Extract a single user sec_user_id
|
| 196 |
+
- [x] Extract list user sec_user_id
|
| 197 |
+
- [x] Extract a single work id
|
| 198 |
+
- [x] Extract list work id
|
| 199 |
+
- [x] Get user unique_id
|
| 200 |
+
- [x] Get list unique_id
|
| 201 |
+
- Bilibili web version API
|
| 202 |
+
- [x] Get individual video details
|
| 203 |
+
- [x] Get video stream address
|
| 204 |
+
- [x] Obtain user-published video work data
|
| 205 |
+
- [x] Get all the user's favorites information
|
| 206 |
+
- [x] Get video data in specified favorites
|
| 207 |
+
- [x] Get information about a specified user
|
| 208 |
+
- [x] Get comprehensive popular video information
|
| 209 |
+
- [x] Get comments for specified video
|
| 210 |
+
- [x] Get the reply to the specified comment under the video
|
| 211 |
+
- [x] Get the specified user's news
|
| 212 |
+
- [x] Get real-time video barrages
|
| 213 |
+
- [x] Get specified live broadcast room information
|
| 214 |
+
- [x] Get live room video stream
|
| 215 |
+
- [x] Get the anchors who are live broadcasting in the specified partition
|
| 216 |
+
- [x] Get a list of all live broadcast partitions
|
| 217 |
+
- [x] Obtain video sub-p information through bv number
|
| 218 |
+
|
| 219 |
+
* * *
|
| 220 |
+
|
| 221 |
+
## 📦Call the parsing library (obsolete and needs to be updated):
|
| 222 |
+
|
| 223 |
+
> 💡PyPi:<https://pypi.org/project/douyin-tiktok-scraper/>
|
| 224 |
+
|
| 225 |
+
Install the parsing library:`pip install douyin-tiktok-scraper`
|
| 226 |
+
|
| 227 |
+
```python
|
| 228 |
+
import asyncio
|
| 229 |
+
from douyin_tiktok_scraper.scraper import Scraper
|
| 230 |
+
|
| 231 |
+
api = Scraper()
|
| 232 |
+
|
| 233 |
+
async def hybrid_parsing(url: str) -> dict:
|
| 234 |
+
# Hybrid parsing(Douyin/TikTok URL)
|
| 235 |
+
result = await api.hybrid_parsing(url)
|
| 236 |
+
print(f"The hybrid parsing result:\n {result}")
|
| 237 |
+
return result
|
| 238 |
+
|
| 239 |
+
asyncio.run(hybrid_parsing(url=input("Paste Douyin/TikTok/Bilibili share URL here: ")))
|
| 240 |
+
```
|
| 241 |
+
|
| 242 |
+
## 🗺️Supported submission formats:
|
| 243 |
+
|
| 244 |
+
> 💡Tip: Including but not limited to the following examples. If you encounter link parsing failure, please open a new one.[issue](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/issues)
|
| 245 |
+
|
| 246 |
+
- Douyin sharing password (copy in APP)
|
| 247 |
+
|
| 248 |
+
```text
|
| 249 |
+
7.43 pda:/ 让你在几秒钟之内记住我 https://v.douyin.com/L5pbfdP/ 复制此链接,打开Dou音搜索,直接观看视频!
|
| 250 |
+
```
|
| 251 |
+
|
| 252 |
+
- Douyin short URL (copy within APP)
|
| 253 |
+
|
| 254 |
+
```text
|
| 255 |
+
https://v.douyin.com/L4FJNR3/
|
| 256 |
+
```
|
| 257 |
+
|
| 258 |
+
- Douyin normal URL (copy from web version)
|
| 259 |
+
|
| 260 |
+
```text
|
| 261 |
+
https://www.douyin.com/video/6914948781100338440
|
| 262 |
+
```
|
| 263 |
+
|
| 264 |
+
- Douyin discovery page URL (APP copy)
|
| 265 |
+
|
| 266 |
+
```text
|
| 267 |
+
https://www.douyin.com/discover?modal_id=7069543727328398622
|
| 268 |
+
```
|
| 269 |
+
|
| 270 |
+
- TikTok short URL (copy within APP)
|
| 271 |
+
|
| 272 |
+
```text
|
| 273 |
+
https://www.tiktok.com/t/ZTR9nDNWq/
|
| 274 |
+
```
|
| 275 |
+
|
| 276 |
+
- TikTok normal URL (copy from web version)
|
| 277 |
+
|
| 278 |
+
```text
|
| 279 |
+
https://www.tiktok.com/@evil0ctal/video/7156033831819037994
|
| 280 |
+
```
|
| 281 |
+
|
| 282 |
+
- Douyin/TikTok batch URL (no need to use matching separation)
|
| 283 |
+
|
| 284 |
+
```text
|
| 285 |
+
https://v.douyin.com/L4NpDJ6/
|
| 286 |
+
https://www.douyin.com/video/7126745726494821640
|
| 287 |
+
2.84 nqe:/ 骑白马的也可以是公主%%百万转场变身https://v.douyin.com/L4FJNR3/ 复制此链接,打开Dou音搜索,直接观看视频!
|
| 288 |
+
https://www.tiktok.com/t/ZTR9nkkmL/
|
| 289 |
+
https://www.tiktok.com/t/ZTR9nDNWq/
|
| 290 |
+
https://www.tiktok.com/@evil0ctal/video/7156033831819037994
|
| 291 |
+
```
|
| 292 |
+
|
| 293 |
+
## 🛰️API documentation
|
| 294 |
+
|
| 295 |
+
**_API documentation:_**
|
| 296 |
+
|
| 297 |
+
local:<http://localhost/docs>
|
| 298 |
+
|
| 299 |
+
Online:<https://api.douyin.wtf/docs>
|
| 300 |
+
|
| 301 |
+
**_API demo:_**
|
| 302 |
+
|
| 303 |
+
- Crawl video data (TikTok or Douyin hybrid analysis)`https://api.douyin.wtf/api/hybrid/video_data?url=[视频链接/Video URL]&minimal=false`
|
| 304 |
+
- Download videos/photo albums (TikTok or Douyin hybrid analysis)`https://api.douyin.wtf/api/download?url=[视频链接/Video URL]&prefix=true&with_watermark=false`
|
| 305 |
+
|
| 306 |
+
**_For more demonstrations, please see the documentation..._**
|
| 307 |
+
|
| 308 |
+
## ⚠️Preparation work before deployment (please read carefully):
|
| 309 |
+
|
| 310 |
+
- You need to solve the problem of crawler cookie risk control by yourself, otherwise the interface may become unusable. After modifying the configuration file, you need to restart the service for it to take effect, and it is best to use cookies from accounts that you have already logged in to.
|
| 311 |
+
- Douyin web cookie (obtain and replace the cookie in the configuration file below):
|
| 312 |
+
- <https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/30e56e5a7f97f87d60b1045befb1f6db147f8590/crawlers/douyin/web/config.yaml#L7>
|
| 313 |
+
- TikTok web-side cookies (obtain and replace the cookies in the configuration file below):
|
| 314 |
+
- <https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/30e56e5a7f97f87d60b1045befb1f6db147f8590/crawlers/tiktok/web/config.yaml#L6>
|
| 315 |
+
- I turned off the online download function of the demo site. The video someone downloaded was so huge that it crashed the server. You can right-click on the web page parsing results page to save the video...
|
| 316 |
+
- The cookies of the demo site are my own and are not guaranteed to be valid for a long time. They only serve as a demonstration. If you deploy it yourself, please obtain the cookies yourself.
|
| 317 |
+
- If you need to directly access the video link returned by TikTok Web API, an HTTP 403 error will occur. Please use the API in this project.`/api/download`The interface downloads TikTok videos. This interface has been manually closed in the demo site, and you need to deploy this project by yourself.
|
| 318 |
+
- here is one**Video tutorial**You can refer to:**_<https://www.bilibili.com/video/BV1vE421j7NR/>_**
|
| 319 |
+
|
| 320 |
+
## 💻Deployment (Method 1 Linux)
|
| 321 |
+
|
| 322 |
+
> 💡Tips: It is best to deploy this project to a server in the United States, otherwise strange BUGs may occur.
|
| 323 |
+
|
| 324 |
+
Recommended for everyone to use[Digitalocean](https://www.digitalocean.com/)server, because you can have sex for free.
|
| 325 |
+
|
| 326 |
+
Use my invitation link to sign up and you can get a $200 credit, and when you spend $25 on it, I can also get a $25 reward.
|
| 327 |
+
|
| 328 |
+
My invitation link:
|
| 329 |
+
|
| 330 |
+
<https://m.do.co/c/9f72a27dec35>
|
| 331 |
+
|
| 332 |
+
> Use script to deploy this project with one click
|
| 333 |
+
|
| 334 |
+
- This project provides a one-click deployment script that can quickly deploy this project on the server.
|
| 335 |
+
- The script was tested on Ubuntu 20.04 LTS. Other systems may have problems. If there are any problems, please solve them yourself.
|
| 336 |
+
- Download using wget command[install.sh](https://raw.githubusercontent.com/Evil0ctal/Douyin_TikTok_Download_API/main/bash/install.sh)to the server and run
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
wget -O install.sh https://raw.githubusercontent.com/Evil0ctal/Douyin_TikTok_Download_API/main/bash/install.sh && sudo bash install.sh
|
| 340 |
+
|
| 341 |
+
> Start/stop service
|
| 342 |
+
|
| 343 |
+
- Use the following commands to control running or stopping the service:
|
| 344 |
+
- `sudo systemctl start Douyin_TikTok_Download_API.service`
|
| 345 |
+
- `sudo systemctl stop Douyin_TikTok_Download_API.service`
|
| 346 |
+
|
| 347 |
+
> Turn on/off automatic operation at startup
|
| 348 |
+
|
| 349 |
+
- Use the following commands to set the service to run automatically at boot or cancel automatic run at boot:
|
| 350 |
+
- `sudo systemctl enable Douyin_TikTok_Download_API.service`
|
| 351 |
+
- `sudo systemctl disable Douyin_TikTok_Download_API.service`
|
| 352 |
+
|
| 353 |
+
> Update project
|
| 354 |
+
|
| 355 |
+
- When the project is updated, ensure that the update script is executed in the virtual environment and all dependencies are updated. Enter the project bash directory and run update.sh:
|
| 356 |
+
- `cd /www/wwwroot/Douyin_TikTok_Download_API/bash && sudo bash update.sh`
|
| 357 |
+
|
| 358 |
+
## 💽Deployment (Method 2 Docker)
|
| 359 |
+
|
| 360 |
+
> 💡Tip: Docker deployment is the simplest deployment method and is suitable for users who are not familiar with Linux. This method is suitable for ensuring environment consistency, isolation and quick setup.
|
| 361 |
+
> Please use a server that can normally access Douyin or TikTok, otherwise strange BUG may occur.
|
| 362 |
+
|
| 363 |
+
### Preparation
|
| 364 |
+
|
| 365 |
+
Before you begin, make sure Docker is installed on your system. If you haven't installed Docker yet, you can install it from[Docker official website](https://www.docker.com/products/docker-desktop/)Download and install.
|
| 366 |
+
|
| 367 |
+
### Step 1: Pull the Docker image
|
| 368 |
+
|
| 369 |
+
First, pull the latest Douyin_TikTok_Download_API image from Docker Hub.
|
| 370 |
+
|
| 371 |
+
```bash
|
| 372 |
+
docker pull evil0ctal/douyin_tiktok_download_api:latest
|
| 373 |
+
```
|
| 374 |
+
|
| 375 |
+
Can be replaced if needed`latest`Label the specific version you need to deploy.
|
| 376 |
+
|
| 377 |
+
### Step 2: Run the Docker container
|
| 378 |
+
|
| 379 |
+
After pulling the image, you can start a container from this image. Here are the commands to run the container, including basic configuration:
|
| 380 |
+
|
| 381 |
+
```bash
|
| 382 |
+
docker run -d --name douyin_tiktok_api -p 80:80 evil0ctal/douyin_tiktok_download_api
|
| 383 |
+
```
|
| 384 |
+
|
| 385 |
+
Each part of this command does the following:
|
| 386 |
+
|
| 387 |
+
- `-d`: Run the container in the background (detached mode).
|
| 388 |
+
- `--name douyin_tiktok_api `: Name the container`douyin_tiktok_api `。
|
| 389 |
+
- `-p 80:80`: Map port 80 on the host to port 80 of the container. Adjust the port number based on your configuration or port availability.
|
| 390 |
+
- `evil0ctal/douyin_tiktok_download_api`: The name of the Docker image to use.
|
| 391 |
+
|
| 392 |
+
### Step 3: Verify the container is running
|
| 393 |
+
|
| 394 |
+
Check if your container is running using the following command:
|
| 395 |
+
|
| 396 |
+
```bash
|
| 397 |
+
docker ps
|
| 398 |
+
```
|
| 399 |
+
|
| 400 |
+
This will list all active containers. Find`douyin_tiktok_api `to confirm that it is functioning properly.
|
| 401 |
+
|
| 402 |
+
### Step 4: Access the App
|
| 403 |
+
|
| 404 |
+
Once the container is running, you should be able to pass`http://localhost`Or API client access Douyin_TikTok_Download_API. Adjust the URL if a different port is configured or accessed from a remote location.
|
| 405 |
+
|
| 406 |
+
### Optional: Custom Docker commands
|
| 407 |
+
|
| 408 |
+
For more advanced deployments, you may wish to customize Docker commands to include environment variables, volume mounts for persistent data, or other Docker parameters. Here is an example:
|
| 409 |
+
|
| 410 |
+
```bash
|
| 411 |
+
docker run -d --name douyin_tiktok_api -p 80:80 \
|
| 412 |
+
-v /path/to/your/data:/data \
|
| 413 |
+
-e MY_ENV_VAR=my_value \
|
| 414 |
+
evil0ctal/douyin_tiktok_download_api
|
| 415 |
+
```
|
| 416 |
+
|
| 417 |
+
- `-v /path/to/your/data:/data`: Change the`/path/to/your/data`Directory mounted to the container`/data`Directory for persisting or sharing data.
|
| 418 |
+
- `-e MY_ENV_VAR=my_value`: Set environment variables within the container`MY_ENV_VAR`, whose value is`my_value`。
|
| 419 |
+
|
| 420 |
+
### Configuration file modification
|
| 421 |
+
|
| 422 |
+
Most of the configuration of the project can be found in the following directories:`config.yaml`File modification:
|
| 423 |
+
|
| 424 |
+
- `/crawlers/douyin/web/config.yaml`
|
| 425 |
+
- `/crawlers/tiktok/web/config.yaml`
|
| 426 |
+
- `/crawlers/tiktok/app/config.yaml`
|
| 427 |
+
|
| 428 |
+
### Step 5: Stop and remove the container
|
| 429 |
+
|
| 430 |
+
When you need to stop and remove a container, use the following commands:
|
| 431 |
+
|
| 432 |
+
```bash
|
| 433 |
+
# Stop
|
| 434 |
+
docker stop douyin_tiktok_api
|
| 435 |
+
|
| 436 |
+
# Remove
|
| 437 |
+
docker rm douyin_tiktok_api
|
| 438 |
+
```
|
| 439 |
+
|
| 440 |
+
## 📸Screenshot
|
| 441 |
+
|
| 442 |
+
**_API speed test (compared to official API)_**
|
| 443 |
+
|
| 444 |
+
<details><summary>🔎点击展开截图</summary>
|
| 445 |
+
|
| 446 |
+
Douyin official API:
|
| 447 |
+
|
| 448 |
+
API of this project:
|
| 449 |
+
|
| 450 |
+
TikTok official API:
|
| 451 |
+
|
| 452 |
+
API of this project:
|
| 453 |
+
|
| 454 |
+
</details>
|
| 455 |
+
<hr>
|
| 456 |
+
|
| 457 |
+
**_Project interface_**
|
| 458 |
+
|
| 459 |
+
<details><summary>🔎点击展开截图</summary>
|
| 460 |
+
|
| 461 |
+
Web main interface:
|
| 462 |
+
|
| 463 |
+

|
| 464 |
+
|
| 465 |
+
Web main interface:
|
| 466 |
+
|
| 467 |
+

|
| 468 |
+
|
| 469 |
+
</details>
|
| 470 |
+
<hr>
|
| 471 |
+
|
| 472 |
+
## 📜 Star History
|
| 473 |
+
|
| 474 |
+
[](https://star-history.com/#Evil0ctal/Douyin_TikTok_Download_API&Timeline)
|
| 475 |
+
|
| 476 |
+
[Apache-2.0 license](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/Stable/LICENSE)
|
| 477 |
+
|
| 478 |
+
> Start: 2021/11/06
|
| 479 |
+
|
| 480 |
+
> GitHub:[@Evil0ctal](https://github.com/Evil0ctal)
|
README.md
CHANGED
|
@@ -1,10 +1,508 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
-
title: Douyindl
|
| 3 |
-
emoji: 🦀
|
| 4 |
-
colorFrom: green
|
| 5 |
-
colorTo: gray
|
| 6 |
-
sdk: docker
|
| 7 |
-
pinned: false
|
| 8 |
-
---
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
<a href="https://douyin.wtf/" alt="logo" ><img src="https://raw.githubusercontent.com/Evil0ctal/Douyin_TikTok_Download_API/main/logo/logo192.png" width="120"/></a>
|
| 3 |
+
</div>
|
| 4 |
+
<h1 align="center">Douyin_TikTok_Download_API(抖音/TikTok API)</h1>
|
| 5 |
+
|
| 6 |
+
<div align="center">
|
| 7 |
+
|
| 8 |
+
[English](./README.en.md) | [简体中文](./README.md)
|
| 9 |
+
|
| 10 |
+
🚀「Douyin_TikTok_Download_API」是一个开箱即用的高性能异步[抖音](https://www.douyin.com)|[TikTok](https://www.tiktok.com)|[Bilibili](https://www.bilibili.com)数据爬取工具,支持API调用,在线批量解析及下载。
|
| 11 |
+
|
| 12 |
+
[](LICENSE)
|
| 13 |
+
[](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/releases/latest)
|
| 14 |
+
[](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/stargazers)
|
| 15 |
+
[](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/network/members)
|
| 16 |
+
[](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/issues)
|
| 17 |
+
[](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/issues?q=is%3Aissue+is%3Aclosed)
|
| 18 |
+

|
| 19 |
+
<br>
|
| 20 |
+
[](https://pypi.org/project/douyin-tiktok-scraper/)
|
| 21 |
+
[](https://pypi.org/project/douyin-tiktok-scraper/#files)
|
| 22 |
+
[](https://pypi.org/project/douyin-tiktok-scraper/)
|
| 23 |
+
[](https://pypi.org/project/douyin-tiktok-scraper/)
|
| 24 |
+
<br>
|
| 25 |
+
[](https://api.douyin.wtf/docs)
|
| 26 |
+
[](https://api.tikhub.io/docs)
|
| 27 |
+
<br>
|
| 28 |
+
[](https://afdian.net/@evil0ctal)
|
| 29 |
+
[](https://ko-fi.com/evil0ctal)
|
| 30 |
+
[](https://www.patreon.com/evil0ctal)
|
| 31 |
+
|
| 32 |
+
</div>
|
| 33 |
+
|
| 34 |
+
## 赞助商
|
| 35 |
+
|
| 36 |
+
这些赞助商已付费放置在这里,**Douyin_TikTok_Download_API** 项目将永远免费且开源。如果您希望成为该项目的赞助商,请查看我的 [GitHub 赞助商页面](https://github.com/sponsors/evil0ctal)。
|
| 37 |
+
|
| 38 |
+
<div align="center">
|
| 39 |
+
<a href="https://www.tikhub.io/" target="_blank">
|
| 40 |
+
<img src="https://tikhub.io/wp-content/uploads/2024/11/Main-Logo.webp" width="100" alt="TikHub.io - Global Social Data & API Marketplace">
|
| 41 |
+
</a>
|
| 42 |
+
<div>
|
| 43 |
+
<h2><b>TikHub.io</b></h2>
|
| 44 |
+
<p>Your Ultimate Social Media Data & API Marketplace</p>
|
| 45 |
+
<p>
|
| 46 |
+
Professional data solutions for Douyin, Xiaohongshu, TikTok, Instagram, YouTube,
|
| 47 |
+
Twitter, and more.<br>
|
| 48 |
+
Real-time Data | Flexible APIs | Seamless Integration | Competitive Pricing with Discounts
|
| 49 |
+
</p>
|
| 50 |
+
<p>
|
| 51 |
+
<b>Discover TikHub.io Marketplace</b><br>
|
| 52 |
+
Buy and sell custom APIs, services, and social media solutions.<br>
|
| 53 |
+
Join a thriving ecosystem of developers, businesses, and content creators.
|
| 54 |
+
</p>
|
| 55 |
+
<p><em>Trusted by leading global influencer marketing and social media intelligence platforms</em></p>
|
| 56 |
+
</div>
|
| 57 |
+
</div>
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
## 👻介绍
|
| 61 |
+
|
| 62 |
+
> 🚨如需使用私有服务器运行本项目,请参考:[部署准备工作](./README.md#%EF%B8%8F%E9%83%A8%E7%BD%B2%E5%89%8D%E7%9A%84%E5%87%86%E5%A4%87%E5%B7%A5%E4%BD%9C%E8%AF%B7%E4%BB%94%E7%BB%86%E9%98%85%E8%AF%BB), [Docker部署](./README.md#%E9%83%A8%E7%BD%B2%E6%96%B9%E5%BC%8F%E4%BA%8C-docker), [一键部署](./README.md#%E9%83%A8%E7%BD%B2%E6%96%B9%E5%BC%8F%E4%B8%80-linux)
|
| 63 |
+
|
| 64 |
+
本项目是基于 [PyWebIO](https://github.com/pywebio/PyWebIO),[FastAPI](https://fastapi.tiangolo.com/),[HTTPX](https://www.python-httpx.org/),快速异步的[抖音](https://www.douyin.com/)/[TikTok](https://www.tiktok.com/)数据爬取工具,并通过Web端实现在线批量解析以及下载无水印视频或图集,数据爬取API,iOS快捷指令无水印下载等功能。你可以自己部署或改造本项目实现更多功能,也可以在你的项目中直接调用[scraper.py](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/Stable/scraper.py)或安装现有的[pip包](https://pypi.org/project/douyin-tiktok-scraper/)作为解析库轻松爬取数据等.....
|
| 65 |
+
|
| 66 |
+
*一些简单的运用场景:*
|
| 67 |
+
|
| 68 |
+
*下载禁止下载的视频,进行数据分析,iOS无水印下载(搭配[iOS自带的快捷指令APP](https://apps.apple.com/cn/app/%E5%BF%AB%E6%8D%B7%E6%8C%87%E4%BB%A4/id915249334)
|
| 69 |
+
配合本项目API实现应用内下载或读取剪贴板下载)等.....*
|
| 70 |
+
|
| 71 |
+
## 🔊 V4 版本备注
|
| 72 |
+
|
| 73 |
+
- 感兴趣一起写这个项目的给请加微信`Evil0ctal`备注github项目重构,大家可以在群里互相交流学习,不允许发广告以及违法的东西,纯粹交朋友和技术交流。
|
| 74 |
+
- 本项目使用`X-Bogus`算法以及`A_Bogus`算法请求抖音和TikTok的Web API。
|
| 75 |
+
- 由于Douyin的风控,部署完本项目后请在**浏览器中获取Douyin网站的Cookie然后在config.yaml中进行替换。**
|
| 76 |
+
- 请在提出issue之前先阅读下方的文档,大多数问题的解决方法都会包含在文档中。
|
| 77 |
+
- 本项目是完全免费的,但使用时请遵守:[Apache-2.0 license](https://github.com/Evil0ctal/Douyin_TikTok_Download_API?tab=Apache-2.0-1-ov-file#readme)
|
| 78 |
+
|
| 79 |
+
## 🔖TikHub.io API
|
| 80 |
+
|
| 81 |
+
[TikHub.io](https://api.tikhub.io/)是一个API平台,提供包括Douyin、TikTok在内的各种公开数据接口,如果您想支持 [Douyin_TikTok_Download_API](https://github.com/Evil0ctal/Douyin_TikTok_Download_API) 项目的开发,我们强烈建议您选择[TikHub.io](https://api.tikhub.io/)。
|
| 82 |
+
|
| 83 |
+
#### 特点:
|
| 84 |
+
|
| 85 |
+
> 📦 开箱即用
|
| 86 |
+
|
| 87 |
+
简化使用流程,利用封装好的SDK迅速开展开发工作。所有API接口均依据RESTful架构设计,并使用OpenAPI规范进行描述和文档化,附带示例参数,确保调用更加简便。
|
| 88 |
+
|
| 89 |
+
> 💰 成本优势
|
| 90 |
+
|
| 91 |
+
不预设套餐限制,没有月度使用门槛,所有消费按实际使用量即时计费,并且根据用户每日的请求量进行阶梯式计费,同时可以通过每日签到在用户后台进行签到获取免费的额度,并且这些免费额度不会过期。
|
| 92 |
+
|
| 93 |
+
> ⚡️ 快速支持
|
| 94 |
+
|
| 95 |
+
我们有一个庞大的Discord社区服务器,管理员和其他用户会在服务器中快速的回复你,帮助你快速解决当前的问题。
|
| 96 |
+
|
| 97 |
+
> 🎉 拥抱开源
|
| 98 |
+
|
| 99 |
+
TikHub的部分源代码会开源在Github上,并且会赞助一些开源项目的作者。
|
| 100 |
+
|
| 101 |
+
#### 链接:
|
| 102 |
+
|
| 103 |
+
- Github: [TikHub Github](https://github.com/TikHubIO)
|
| 104 |
+
- Discord: [TikHub Discord](https://discord.com/invite/aMEAS8Xsvz)
|
| 105 |
+
- Register: [TikHub signup](https://beta-web.tikhub.io/en-us/users/signup)
|
| 106 |
+
- API Docs: [TikHub API Docs](https://api.tikhub.io/)
|
| 107 |
+
|
| 108 |
+
## 🖥演示站点: 我很脆弱...请勿压测(·•᷄ࡇ•᷅ )
|
| 109 |
+
|
| 110 |
+
> 😾演示站点的在线下载功能已关闭,并且由于Cookie原因,Douyin的解析以及API服务在Demo站点无法保证可用性。
|
| 111 |
+
|
| 112 |
+
🍔Web APP: [https://douyin.wtf/](https://douyin.wtf/)
|
| 113 |
+
|
| 114 |
+
🍟API Document: [https://douyin.wtf/docs](https://douyin.wtf/docs)
|
| 115 |
+
|
| 116 |
+
🌭TikHub API Document: [https://api.tikhub.io/docs](https://api.tikhub.io/docs)
|
| 117 |
+
|
| 118 |
+
💾iOS Shortcut(快捷指令): [Shortcut release](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/discussions/104?sort=top)
|
| 119 |
+
|
| 120 |
+
📦️桌面端下载器(仓库推荐):
|
| 121 |
+
|
| 122 |
+
- [Johnserf-Seed/TikTokDownload](https://github.com/Johnserf-Seed/TikTokDownload)
|
| 123 |
+
- [HFrost0/bilix](https://github.com/HFrost0/bilix)
|
| 124 |
+
- [Tairraos/TikDown - [需更新]](https://github.com/Tairraos/TikDown/)
|
| 125 |
+
|
| 126 |
+
## ⚗️技术栈
|
| 127 |
+
|
| 128 |
+
* [/app/web](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/main/app/web) - [PyWebIO](https://www.pyweb.io/)
|
| 129 |
+
* [/app/api](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/main/app/api) - [FastAPI](https://fastapi.tiangolo.com/)
|
| 130 |
+
* [/crawlers](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/main/crawlers) - [HTTPX](https://www.python-httpx.org/)
|
| 131 |
+
|
| 132 |
+
> ***/crawlers***
|
| 133 |
+
|
| 134 |
+
- 向不同平台的API提交请求并取回数据,处理后返回字典(dict),支持异步。
|
| 135 |
+
|
| 136 |
+
> ***/app/api***
|
| 137 |
+
|
| 138 |
+
- 获得请求参数并使用`Crawlers`相关类处理数据后以JSON形式返回,视频下载,配合iOS快捷指令实现快速调用,支持异步。
|
| 139 |
+
|
| 140 |
+
> ***/app/web***
|
| 141 |
+
|
| 142 |
+
- 使用`PyWebIO`制作的简易Web程序,将网页输入的值进行处理后使用`Crawlers`相关类处理接口输出相关数据在网页上。
|
| 143 |
+
|
| 144 |
+
***以上文件的参数大多可在对应的`config.yaml`中进行修改***
|
| 145 |
+
|
| 146 |
+
## 💡项目文件结构
|
| 147 |
+
|
| 148 |
+
```
|
| 149 |
+
./Douyin_TikTok_Download_API
|
| 150 |
+
├─app
|
| 151 |
+
│ ├─api
|
| 152 |
+
│ │ ├─endpoints
|
| 153 |
+
│ │ └─models
|
| 154 |
+
│ ├─download
|
| 155 |
+
│ └─web
|
| 156 |
+
│ └─views
|
| 157 |
+
└─crawlers
|
| 158 |
+
├─bilibili
|
| 159 |
+
│ └─web
|
| 160 |
+
├─douyin
|
| 161 |
+
│ └─web
|
| 162 |
+
├─hybrid
|
| 163 |
+
├─tiktok
|
| 164 |
+
│ ├─app
|
| 165 |
+
│ └─web
|
| 166 |
+
└─utils
|
| 167 |
+
```
|
| 168 |
+
|
| 169 |
+
## ✨支持功能:
|
| 170 |
+
|
| 171 |
+
- 网页端批量解析(支持抖音/TikTok混合解析)
|
| 172 |
+
- 在线下载视频或图集。
|
| 173 |
+
- 制作[pip包](https://pypi.org/project/douyin-tiktok-scraper/)方便快速导入你的项目
|
| 174 |
+
- [iOS快捷指令快速调用API](https://apps.apple.com/cn/app/%E5%BF%AB%E6%8D%B7%E6%8C%87%E4%BB%A4/id915249334)实现应用内下载无水印视频/图集
|
| 175 |
+
- 完善的API文档([Demo/演示](https://api.douyin.wtf/docs))
|
| 176 |
+
- 丰富的API接口:
|
| 177 |
+
- 抖音网页版API
|
| 178 |
+
|
| 179 |
+
- [x] 视频数据解析
|
| 180 |
+
- [x] 获取用户主页作品数据
|
| 181 |
+
- [x] 获取用户主页喜欢作品数据
|
| 182 |
+
- [x] 获取用户主页收藏作品数据
|
| 183 |
+
- [x] 获取用户主页信息
|
| 184 |
+
- [x] 获取用户合辑作品数据
|
| 185 |
+
- [x] 获取用户直播流数据
|
| 186 |
+
- [x] 获取指定用户的直播流数据
|
| 187 |
+
- [x] 获取直播间送礼用户排行榜
|
| 188 |
+
- [x] 获取单个视频评论数据
|
| 189 |
+
- [x] 获取指定视频的评论回复数据
|
| 190 |
+
- [x] 生成msToken
|
| 191 |
+
- [x] 生成verify_fp
|
| 192 |
+
- [x] 生成s_v_web_id
|
| 193 |
+
- [x] 使用接口网址生成X-Bogus参数
|
| 194 |
+
- [x] 使用接口网址生成A_Bogus参数
|
| 195 |
+
- [x] 提取单个用户id
|
| 196 |
+
- [x] 提取列表用户id
|
| 197 |
+
- [x] 提取单个作品id
|
| 198 |
+
- [x] 提取列表作品id
|
| 199 |
+
- [x] 提取列表直播间号
|
| 200 |
+
- [x] 提取列表直播间号
|
| 201 |
+
- TikTok网页版API
|
| 202 |
+
|
| 203 |
+
- [x] 视频数据解析
|
| 204 |
+
- [x] 获取用户主页作品数据
|
| 205 |
+
- [x] 获取用户主页喜欢作品数据
|
| 206 |
+
- [x] 获取用户主页信息
|
| 207 |
+
- [x] 获取用户主页粉丝数据
|
| 208 |
+
- [x] 获取用户主页关注数据
|
| 209 |
+
- [x] 获取用户主页合辑作品数据
|
| 210 |
+
- [x] 获取用户主页搜藏数据
|
| 211 |
+
- [x] 获取用户主页播放列表数据
|
| 212 |
+
- [x] 获取单个视频评论数据
|
| 213 |
+
- [x] 获取指定视频的评论回复数据
|
| 214 |
+
- [x] 生成msToken
|
| 215 |
+
- [x] 生成ttwid
|
| 216 |
+
- [x] 使用接口网址生成X-Bogus参数
|
| 217 |
+
- [x] 提取单个用户sec_user_id
|
| 218 |
+
- [x] 提取列表用户sec_user_id
|
| 219 |
+
- [x] 提取单个作品id
|
| 220 |
+
- [x] 提取列表作品id
|
| 221 |
+
- [x] 获取用户unique_id
|
| 222 |
+
- [x] 获取列表unique_id
|
| 223 |
+
- 哔哩哔哩网页版API
|
| 224 |
+
- [x] 获取单个视频详情信息
|
| 225 |
+
- [x] 获取视频流地址
|
| 226 |
+
- [x] 获取用户发布视频作品数据
|
| 227 |
+
- [x] 获取用户所有收藏夹信息
|
| 228 |
+
- [x] 获取指定收藏夹内视频数据
|
| 229 |
+
- [x] 获取指定用户的信息
|
| 230 |
+
- [x] 获取综合热门视频信息
|
| 231 |
+
- [x] 获取指定视频的评论
|
| 232 |
+
- [x] 获取视频下指定评论的回复
|
| 233 |
+
- [x] 获取指定用户动态
|
| 234 |
+
- [x] 获取视频实时弹幕
|
| 235 |
+
- [x] 获取指定直播间信息
|
| 236 |
+
- [x] 获取直播间视频流
|
| 237 |
+
- [x] 获取指定分区正在直播的主播
|
| 238 |
+
- [x] 获取所有直播分区列表
|
| 239 |
+
- [x] 通过bv号获得视频分p信息
|
| 240 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 241 |
|
| 242 |
+
## 📦调用解析库(已废弃需要更新):
|
| 243 |
+
|
| 244 |
+
> 💡PyPi:[https://pypi.org/project/douyin-tiktok-scraper/](https://pypi.org/project/douyin-tiktok-scraper/)
|
| 245 |
+
|
| 246 |
+
安装解析库:`pip install douyin-tiktok-scraper`
|
| 247 |
+
|
| 248 |
+
```python
|
| 249 |
+
import asyncio
|
| 250 |
+
from douyin_tiktok_scraper.scraper import Scraper
|
| 251 |
+
|
| 252 |
+
api = Scraper()
|
| 253 |
+
|
| 254 |
+
async def hybrid_parsing(url: str) -> dict:
|
| 255 |
+
# Hybrid parsing(Douyin/TikTok URL)
|
| 256 |
+
result = await api.hybrid_parsing(url)
|
| 257 |
+
print(f"The hybrid parsing result:\n {result}")
|
| 258 |
+
return result
|
| 259 |
+
|
| 260 |
+
asyncio.run(hybrid_parsing(url=input("Paste Douyin/TikTok/Bilibili share URL here: ")))
|
| 261 |
+
```
|
| 262 |
+
|
| 263 |
+
## 🗺️支持的提交格式:
|
| 264 |
+
|
| 265 |
+
> 💡提示:包含但不仅限于以下例子,如果遇到链接解析失败请开启一个新 [issue](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/issues)
|
| 266 |
+
|
| 267 |
+
- 抖音分享口令 (APP内复制)
|
| 268 |
+
|
| 269 |
+
```text
|
| 270 |
+
7.43 pda:/ 让你在几秒钟之内记住我 https://v.douyin.com/L5pbfdP/ 复制此链接,打开Dou音搜索,直接观看视频!
|
| 271 |
+
```
|
| 272 |
+
|
| 273 |
+
- 抖音短网址 (APP内复制)
|
| 274 |
+
|
| 275 |
+
```text
|
| 276 |
+
https://v.douyin.com/L4FJNR3/
|
| 277 |
+
```
|
| 278 |
+
|
| 279 |
+
- 抖音正常网址 (网页版复制)
|
| 280 |
+
|
| 281 |
+
```text
|
| 282 |
+
https://www.douyin.com/video/6914948781100338440
|
| 283 |
+
```
|
| 284 |
+
|
| 285 |
+
- 抖音发现页网址 (APP复制)
|
| 286 |
+
|
| 287 |
+
```text
|
| 288 |
+
https://www.douyin.com/discover?modal_id=7069543727328398622
|
| 289 |
+
```
|
| 290 |
+
|
| 291 |
+
- TikTok短网址 (APP内复制)
|
| 292 |
+
|
| 293 |
+
```text
|
| 294 |
+
https://www.tiktok.com/t/ZTR9nDNWq/
|
| 295 |
+
```
|
| 296 |
+
|
| 297 |
+
- TikTok正常网址 (网页版复制)
|
| 298 |
+
|
| 299 |
+
```text
|
| 300 |
+
https://www.tiktok.com/@evil0ctal/video/7156033831819037994
|
| 301 |
+
```
|
| 302 |
+
|
| 303 |
+
- 抖音/TikTok批量网址(无需使用符合隔开)
|
| 304 |
+
|
| 305 |
+
```text
|
| 306 |
+
https://v.douyin.com/L4NpDJ6/
|
| 307 |
+
https://www.douyin.com/video/7126745726494821640
|
| 308 |
+
2.84 nqe:/ 骑白马的也可以是公主%%百万转场变身https://v.douyin.com/L4FJNR3/ 复制此链接,打开Dou音搜索,直接观看视频!
|
| 309 |
+
https://www.tiktok.com/t/ZTR9nkkmL/
|
| 310 |
+
https://www.tiktok.com/t/ZTR9nDNWq/
|
| 311 |
+
https://www.tiktok.com/@evil0ctal/video/7156033831819037994
|
| 312 |
+
```
|
| 313 |
+
|
| 314 |
+
## 🛰️API文档
|
| 315 |
+
|
| 316 |
+
***API文档:***
|
| 317 |
+
|
| 318 |
+
本地:[http://localhost/docs](http://localhost/docs)
|
| 319 |
+
|
| 320 |
+
在线:[https://api.douyin.wtf/docs](https://api.douyin.wtf/docs)
|
| 321 |
+
|
| 322 |
+
***API演示:***
|
| 323 |
+
|
| 324 |
+
- 爬取视频数据(TikTok或Douyin混合解析)
|
| 325 |
+
`https://api.douyin.wtf/api/hybrid/video_data?url=[视频链接/Video URL]&minimal=false`
|
| 326 |
+
- 下载视频/图集(TikTok或Douyin混合解析)
|
| 327 |
+
`https://api.douyin.wtf/api/download?url=[视频链接/Video URL]&prefix=true&with_watermark=false`
|
| 328 |
+
|
| 329 |
+
***更多演示请查看文档内容......***
|
| 330 |
+
|
| 331 |
+
## ⚠️部署前的准备工作(请仔细阅读):
|
| 332 |
+
|
| 333 |
+
- 你需要自行解决爬虫Cookie风控问题,否则可能会导致接口无法使用,修改完配置文件后需要重启服务才能生效,并且最好使用已经登录过的账号的Cookie。
|
| 334 |
+
- 抖音网页端Cookie(自行获取并替换下面配置文件中的Cookie):
|
| 335 |
+
- https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/30e56e5a7f97f87d60b1045befb1f6db147f8590/crawlers/douyin/web/config.yaml#L7
|
| 336 |
+
- TikTok网页端Cookie(自行获取并替换下面配置文件中的Cookie):
|
| 337 |
+
- https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/30e56e5a7f97f87d60b1045befb1f6db147f8590/crawlers/tiktok/web/config.yaml#L6
|
| 338 |
+
- 演示站点的在线下载功能被我关掉了,有人下的视频巨大无比直接给我服务器干崩了,你可以在网页解析结果页面右键保存视频...
|
| 339 |
+
- 演示站点的Cookie是我自己的,不保证长期有效,只起到演示作用,自己部署的话请自行获取Cookie。
|
| 340 |
+
- 需要TikTok Web API返回的视频链接直接访问会发生HTTP 403错误,请使用本项目API中的`/api/download`接口对TikTok 视频进行下载,这个接口在演示站点中已经被手动关闭了,需要你自行部署本项目。
|
| 341 |
+
- 这里有一个**视频教程**可以参考:***[https://www.bilibili.com/video/BV1vE421j7NR/](https://www.bilibili.com/video/BV1vE421j7NR/)***
|
| 342 |
+
|
| 343 |
+
## 💻部署(方式一 Linux)
|
| 344 |
+
|
| 345 |
+
> 💡提示:最好将本项目部署至美国地区的服务器,否则可能会出现奇怪的BUG。
|
| 346 |
+
|
| 347 |
+
推荐大家使用[Digitalocean](https://www.digitalocean.com/)的服务器,因为可以白嫖。
|
| 348 |
+
|
| 349 |
+
使用我的邀请链接注册,你可以获得$200的credit,当你在上面消费$25时,我也可以获得$25的奖励。
|
| 350 |
+
|
| 351 |
+
我的邀请链接:
|
| 352 |
+
|
| 353 |
+
[https://m.do.co/c/9f72a27dec35](https://m.do.co/c/9f72a27dec35)
|
| 354 |
+
|
| 355 |
+
> 使用脚本一键部署本项目
|
| 356 |
+
|
| 357 |
+
- 本项目提供了一键部署脚本,可以在服务器上快速部署本项目。
|
| 358 |
+
- 脚本是在Ubuntu 20.04 LTS上测试的,其他系统可能会有问题,如果有问题请自行解决。
|
| 359 |
+
- 使用wget命令下载[install.sh](https://raw.githubusercontent.com/Evil0ctal/Douyin_TikTok_Download_API/main/bash/install.sh)至服务器并运行
|
| 360 |
+
|
| 361 |
+
```
|
| 362 |
+
wget -O install.sh https://raw.githubusercontent.com/Evil0ctal/Douyin_TikTok_Download_API/main/bash/install.sh && sudo bash install.sh
|
| 363 |
+
```
|
| 364 |
+
|
| 365 |
+
> 开启/停止服务
|
| 366 |
+
|
| 367 |
+
- 使用以下命令来控制服务的运行或停止:
|
| 368 |
+
- `sudo systemctl start Douyin_TikTok_Download_API.service`
|
| 369 |
+
- `sudo systemctl stop Douyin_TikTok_Download_API.service`
|
| 370 |
+
|
| 371 |
+
> 开启/关闭开机自动运行
|
| 372 |
+
|
| 373 |
+
- 使用以下命令来设置服务开机自动运行或取消开机自动运行:
|
| 374 |
+
- `sudo systemctl enable Douyin_TikTok_Download_API.service`
|
| 375 |
+
- `sudo systemctl disable Douyin_TikTok_Download_API.service`
|
| 376 |
+
|
| 377 |
+
> 更新项目
|
| 378 |
+
|
| 379 |
+
- 项目更新时,确保更新脚本在虚拟环境中执行,更新所有依赖。进入项目bash目录并运行update.sh:
|
| 380 |
+
- `cd /www/wwwroot/Douyin_TikTok_Download_API/bash && sudo bash update.sh`
|
| 381 |
+
|
| 382 |
+
## 💽部署(方式二 Docker)
|
| 383 |
+
|
| 384 |
+
> 💡提示:Docker部署是最简单的部署方式,适合不熟悉Linux的用户,这种方法适合保证环境一致性、隔离性和快速设置。
|
| 385 |
+
> 请使用能正常访问Douyin或TikTok的服务器,否则可能会出现奇怪的BUG。
|
| 386 |
+
|
| 387 |
+
### 准备工作
|
| 388 |
+
|
| 389 |
+
开始之前,请确保您的系统已安装Docker。如果还未安装Docker,可以从[Docker官方网站](https://www.docker.com/products/docker-desktop/)下载并安装。
|
| 390 |
+
|
| 391 |
+
### 步骤1:拉取Docker镜像
|
| 392 |
+
|
| 393 |
+
首先,从Docker Hub拉取最新的Douyin_TikTok_Download_API镜像。
|
| 394 |
+
|
| 395 |
+
```bash
|
| 396 |
+
docker pull evil0ctal/douyin_tiktok_download_api:latest
|
| 397 |
+
```
|
| 398 |
+
|
| 399 |
+
如果需要,可以替换`latest`为你需要部署的具体版本标签。
|
| 400 |
+
|
| 401 |
+
### 步骤2:运行Docker容器
|
| 402 |
+
|
| 403 |
+
拉取镜像后,您可以从此镜像启动一个容器。以下是运行容器的命令,包括基本配置:
|
| 404 |
+
|
| 405 |
+
```bash
|
| 406 |
+
docker run -d --name douyin_tiktok_api -p 80:80 evil0ctal/douyin_tiktok_download_api
|
| 407 |
+
```
|
| 408 |
+
|
| 409 |
+
这个命令的每个部分作用如下:
|
| 410 |
+
|
| 411 |
+
* `-d`:在后台运行容器(分离模式)。
|
| 412 |
+
* `--name douyin_tiktok_api `:将容器命名为`douyin_tiktok_api `。
|
| 413 |
+
* `-p 80:80`:将主机上的80端口映射到容器的80端口。根据您的配置或端口可用性调整端口号。
|
| 414 |
+
* `evil0ctal/douyin_tiktok_download_api`:要使用的Docker镜像名称。
|
| 415 |
+
|
| 416 |
+
### 步骤3:验证容器是否运行
|
| 417 |
+
|
| 418 |
+
使用以下命令检查您的容器是否正在运行:
|
| 419 |
+
|
| 420 |
+
```bash
|
| 421 |
+
docker ps
|
| 422 |
+
```
|
| 423 |
+
|
| 424 |
+
这将列出所有活动容器。查找`douyin_tiktok_api `以确认其正常运行。
|
| 425 |
+
|
| 426 |
+
### 步骤4:访问应用程序
|
| 427 |
+
|
| 428 |
+
容器运行后,您应该能够通过`http://localhost`或API客户端访问Douyin_TikTok_Download_API。如果配置了不同的端口或从远程位置访问,请调整URL。
|
| 429 |
+
|
| 430 |
+
### 可选:自定义Docker命令
|
| 431 |
+
|
| 432 |
+
对于更高级的部署,您可能希望自定义Docker命令,包括环境变量、持久数据的卷挂载或其他Docker参数。这是一个示例:
|
| 433 |
+
|
| 434 |
+
```bash
|
| 435 |
+
docker run -d --name douyin_tiktok_api -p 80:80 \
|
| 436 |
+
-v /path/to/your/data:/data \
|
| 437 |
+
-e MY_ENV_VAR=my_value \
|
| 438 |
+
evil0ctal/douyin_tiktok_download_api
|
| 439 |
+
```
|
| 440 |
+
|
| 441 |
+
* `-v /path/to/your/data:/data`:将主机上的`/path/to/your/data`目录挂载到容器的`/data`目录,用于持久化或共享数据。
|
| 442 |
+
* `-e MY_ENV_VAR=my_value`:在容器内设置环境变量`MY_ENV_VAR`,其值为`my_value`。
|
| 443 |
+
|
| 444 |
+
### 配置文件修改
|
| 445 |
+
|
| 446 |
+
项目的大部分配置可以在以下几个目录中的`config.yaml`文件进行修改:
|
| 447 |
+
|
| 448 |
+
* `/crawlers/douyin/web/config.yaml`
|
| 449 |
+
* `/crawlers/tiktok/web/config.yaml`
|
| 450 |
+
* `/crawlers/tiktok/app/config.yaml`
|
| 451 |
+
|
| 452 |
+
### 步骤5:停止并移除容器
|
| 453 |
+
|
| 454 |
+
需要停止和移除容器时,使用以下命令:
|
| 455 |
+
|
| 456 |
+
```bash
|
| 457 |
+
# Stop
|
| 458 |
+
docker stop douyin_tiktok_api
|
| 459 |
+
|
| 460 |
+
# Remove
|
| 461 |
+
docker rm douyin_tiktok_api
|
| 462 |
+
```
|
| 463 |
+
|
| 464 |
+
## 📸截图
|
| 465 |
+
|
| 466 |
+
***API速度测试(对比官方API)***
|
| 467 |
+
|
| 468 |
+
<details><summary>🔎点击展开截图</summary>
|
| 469 |
+
|
| 470 |
+
抖音官方API:
|
| 471 |
+

|
| 472 |
+
|
| 473 |
+
本项目API:
|
| 474 |
+

|
| 475 |
+
|
| 476 |
+
TikTok官方API:
|
| 477 |
+

|
| 478 |
+
|
| 479 |
+
本项目API:
|
| 480 |
+

|
| 481 |
+
|
| 482 |
+
</details>
|
| 483 |
+
<hr>
|
| 484 |
+
|
| 485 |
+
***项目界面***
|
| 486 |
+
|
| 487 |
+
<details><summary>🔎点击展开截图</summary>
|
| 488 |
+
|
| 489 |
+
Web主界面:
|
| 490 |
+
|
| 491 |
+

|
| 492 |
+
|
| 493 |
+
Web main interface:
|
| 494 |
+
|
| 495 |
+

|
| 496 |
+
|
| 497 |
+
</details>
|
| 498 |
+
<hr>
|
| 499 |
+
|
| 500 |
+
## 📜 Star历史
|
| 501 |
+
|
| 502 |
+
[](https://star-history.com/#Evil0ctal/Douyin_TikTok_Download_API&Timeline)
|
| 503 |
+
|
| 504 |
+
[Apache-2.0 license](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/blob/Stable/LICENSE)
|
| 505 |
+
|
| 506 |
+
> Start: 2021/11/06
|
| 507 |
+
|
| 508 |
+
> GitHub: [@Evil0ctal](https://github.com/Evil0ctal)
|
Screenshots/BT_Linux_Panel_Deploy_1.png
ADDED
|
Screenshots/BT_Linux_Panel_Deploy_2.png
ADDED
|
Screenshots/api_image_result.png
ADDED
|
Screenshots/api_video_result.png
ADDED
|
Screenshots/benchmarks/Douyin_API.png
ADDED
|
Screenshots/benchmarks/Douyin_API_Douyin_wtf.png
ADDED
|
Screenshots/benchmarks/TikTok_API.png
ADDED
|
Screenshots/benchmarks/TikTok_API_Douyin_wtf.png
ADDED
|
Screenshots/benchmarks/info
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
API benchmarks screenshots
|
Screenshots/home.png
ADDED
|
Screenshots/home_en.png
ADDED
|
Screenshots/multi_results.png
ADDED
|
Git LFS Details
|
Screenshots/multi_results_en.png
ADDED
|
Screenshots/single_result.png
ADDED
|
Screenshots/single_result_en.png
ADDED
|
Screenshots/tiktok_API.png
ADDED
|
Screenshots/v3_screenshots/Home.png
ADDED
|
Screenshots/v3_screenshots/Home_en.png
ADDED
|
Screenshots/v3_screenshots/info
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
V3.0 Screenshots
|
app/api/endpoints/bilibili_web.py
ADDED
|
@@ -0,0 +1,697 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, Body, Query, Request, HTTPException # 导入FastAPI组件
|
| 2 |
+
from app.api.models.APIResponseModel import ResponseModel, ErrorResponseModel # 导入响应模型
|
| 3 |
+
|
| 4 |
+
from crawlers.bilibili.web.web_crawler import BilibiliWebCrawler # 导入哔哩哔哩web爬虫
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
router = APIRouter()
|
| 8 |
+
BilibiliWebCrawler = BilibiliWebCrawler()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# 获取单个视频详情信息
|
| 12 |
+
@router.get("/fetch_one_video", response_model=ResponseModel, summary="获取单个视频详情信息/Get single video data")
|
| 13 |
+
async def fetch_one_video(request: Request,
|
| 14 |
+
bv_id: str = Query(example="BV1M1421t7hT", description="作品id/Video id")):
|
| 15 |
+
"""
|
| 16 |
+
# [中文]
|
| 17 |
+
### 用途:
|
| 18 |
+
- 获取单个视频详情信息
|
| 19 |
+
### 参数:
|
| 20 |
+
- bv_id: 作品id
|
| 21 |
+
### 返回:
|
| 22 |
+
- 视频详情信息
|
| 23 |
+
|
| 24 |
+
# [English]
|
| 25 |
+
### Purpose:
|
| 26 |
+
- Get single video data
|
| 27 |
+
### Parameters:
|
| 28 |
+
- bv_id: Video id
|
| 29 |
+
### Return:
|
| 30 |
+
- Video data
|
| 31 |
+
|
| 32 |
+
# [示例/Example]
|
| 33 |
+
bv_id = "BV1M1421t7hT"
|
| 34 |
+
"""
|
| 35 |
+
try:
|
| 36 |
+
data = await BilibiliWebCrawler.fetch_one_video(bv_id)
|
| 37 |
+
return ResponseModel(code=200,
|
| 38 |
+
router=request.url.path,
|
| 39 |
+
data=data)
|
| 40 |
+
except Exception as e:
|
| 41 |
+
status_code = 400
|
| 42 |
+
detail = ErrorResponseModel(code=status_code,
|
| 43 |
+
router=request.url.path,
|
| 44 |
+
params=dict(request.query_params),
|
| 45 |
+
)
|
| 46 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# 获取视频流地址
|
| 50 |
+
@router.get("/fetch_video_playurl", response_model=ResponseModel, summary="获取视频流地址/Get video playurl")
|
| 51 |
+
async def fetch_one_video(request: Request,
|
| 52 |
+
bv_id: str = Query(example="BV1y7411Q7Eq", description="作品id/Video id"),
|
| 53 |
+
cid:str = Query(example="171776208", description="作品cid/Video cid")):
|
| 54 |
+
"""
|
| 55 |
+
# [中文]
|
| 56 |
+
### 用途:
|
| 57 |
+
- 获取视频流地址
|
| 58 |
+
### 参数:
|
| 59 |
+
- bv_id: 作品id
|
| 60 |
+
- cid: 作品cid
|
| 61 |
+
### 返回:
|
| 62 |
+
- 视频流地址
|
| 63 |
+
|
| 64 |
+
# [English]
|
| 65 |
+
### Purpose:
|
| 66 |
+
- Get video playurl
|
| 67 |
+
### Parameters:
|
| 68 |
+
- bv_id: Video id
|
| 69 |
+
- cid: Video cid
|
| 70 |
+
### Return:
|
| 71 |
+
- Video playurl
|
| 72 |
+
|
| 73 |
+
# [示例/Example]
|
| 74 |
+
bv_id = "BV1y7411Q7Eq"
|
| 75 |
+
cid = "171776208"
|
| 76 |
+
"""
|
| 77 |
+
try:
|
| 78 |
+
data = await BilibiliWebCrawler.fetch_video_playurl(bv_id, cid)
|
| 79 |
+
return ResponseModel(code=200,
|
| 80 |
+
router=request.url.path,
|
| 81 |
+
data=data)
|
| 82 |
+
except Exception as e:
|
| 83 |
+
status_code = 400
|
| 84 |
+
detail = ErrorResponseModel(code=status_code,
|
| 85 |
+
router=request.url.path,
|
| 86 |
+
params=dict(request.query_params),
|
| 87 |
+
)
|
| 88 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
# 获取用户发布视频作品数据
|
| 92 |
+
@router.get("/fetch_user_post_videos", response_model=ResponseModel,
|
| 93 |
+
summary="获取用户主页作品数据/Get user homepage video data")
|
| 94 |
+
async def fetch_user_post_videos(request: Request,
|
| 95 |
+
uid: str = Query(example="178360345", description="用户UID"),
|
| 96 |
+
pn: int = Query(default=1, description="页码/Page number"),):
|
| 97 |
+
"""
|
| 98 |
+
# [中文]
|
| 99 |
+
### 用途:
|
| 100 |
+
- 获取用户发布的视频数据
|
| 101 |
+
### 参数:
|
| 102 |
+
- uid: 用户UID
|
| 103 |
+
- pn: 页码
|
| 104 |
+
### 返回:
|
| 105 |
+
- 用户发布的视频数据
|
| 106 |
+
|
| 107 |
+
# [English]
|
| 108 |
+
### Purpose:
|
| 109 |
+
- Get user post video data
|
| 110 |
+
### Parameters:
|
| 111 |
+
- uid: User UID
|
| 112 |
+
- pn: Page number
|
| 113 |
+
### Return:
|
| 114 |
+
- User posted video data
|
| 115 |
+
|
| 116 |
+
# [示例/Example]
|
| 117 |
+
uid = "178360345"
|
| 118 |
+
pn = 1
|
| 119 |
+
"""
|
| 120 |
+
try:
|
| 121 |
+
data = await BilibiliWebCrawler.fetch_user_post_videos(uid, pn)
|
| 122 |
+
return ResponseModel(code=200,
|
| 123 |
+
router=request.url.path,
|
| 124 |
+
data=data)
|
| 125 |
+
except Exception as e:
|
| 126 |
+
status_code = 400
|
| 127 |
+
detail = ErrorResponseModel(code=status_code,
|
| 128 |
+
router=request.url.path,
|
| 129 |
+
params=dict(request.query_params),
|
| 130 |
+
)
|
| 131 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
# 获取用户所有收藏夹信息
|
| 135 |
+
@router.get("/fetch_collect_folders", response_model=ResponseModel,
|
| 136 |
+
summary="获取用户所有收藏夹信息/Get user collection folders")
|
| 137 |
+
async def fetch_collect_folders(request: Request,
|
| 138 |
+
uid: str = Query(example="178360345", description="用户UID")):
|
| 139 |
+
"""
|
| 140 |
+
# [中文]
|
| 141 |
+
### 用途:
|
| 142 |
+
- 获取用户收藏作品数据
|
| 143 |
+
### 参数:
|
| 144 |
+
- uid: 用户UID
|
| 145 |
+
### 返回:
|
| 146 |
+
- 用户收藏夹信息
|
| 147 |
+
|
| 148 |
+
# [English]
|
| 149 |
+
### Purpose:
|
| 150 |
+
- Get user collection folders
|
| 151 |
+
### Parameters:
|
| 152 |
+
- uid: User UID
|
| 153 |
+
### Return:
|
| 154 |
+
- user collection folders
|
| 155 |
+
|
| 156 |
+
# [示例/Example]
|
| 157 |
+
uid = "178360345"
|
| 158 |
+
"""
|
| 159 |
+
try:
|
| 160 |
+
data = await BilibiliWebCrawler.fetch_collect_folders(uid)
|
| 161 |
+
return ResponseModel(code=200,
|
| 162 |
+
router=request.url.path,
|
| 163 |
+
data=data)
|
| 164 |
+
except Exception as e:
|
| 165 |
+
status_code = 400
|
| 166 |
+
detail = ErrorResponseModel(code=status_code,
|
| 167 |
+
router=request.url.path,
|
| 168 |
+
params=dict(request.query_params),
|
| 169 |
+
)
|
| 170 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# 获取指定收藏夹内视频数据
|
| 174 |
+
@router.get("/fetch_user_collection_videos", response_model=ResponseModel,
|
| 175 |
+
summary="获取指定收藏夹内视频数据/Gets video data from a collection folder")
|
| 176 |
+
async def fetch_user_collection_videos(request: Request,
|
| 177 |
+
folder_id: str = Query(example="1756059545",
|
| 178 |
+
description="收藏夹id/collection folder id"),
|
| 179 |
+
pn: int = Query(default=1, description="页码/Page number")
|
| 180 |
+
):
|
| 181 |
+
"""
|
| 182 |
+
# [中文]
|
| 183 |
+
### 用途:
|
| 184 |
+
- 获取指定收藏夹内视频数据
|
| 185 |
+
### 参数:
|
| 186 |
+
- folder_id: 用户UID
|
| 187 |
+
- pn: 页码
|
| 188 |
+
### 返回:
|
| 189 |
+
- 指定收藏夹内视频数据
|
| 190 |
+
|
| 191 |
+
# [English]
|
| 192 |
+
### Purpose:
|
| 193 |
+
- Gets video data from a collection folder
|
| 194 |
+
### Parameters:
|
| 195 |
+
- folder_id: collection folder id
|
| 196 |
+
- pn: Page number
|
| 197 |
+
### Return:
|
| 198 |
+
- video data from collection folder
|
| 199 |
+
|
| 200 |
+
# [示例/Example]
|
| 201 |
+
folder_id = "1756059545"
|
| 202 |
+
pn = 1
|
| 203 |
+
"""
|
| 204 |
+
try:
|
| 205 |
+
data = await BilibiliWebCrawler.fetch_folder_videos(folder_id, pn)
|
| 206 |
+
return ResponseModel(code=200,
|
| 207 |
+
router=request.url.path,
|
| 208 |
+
data=data)
|
| 209 |
+
except Exception as e:
|
| 210 |
+
status_code = 400
|
| 211 |
+
detail = ErrorResponseModel(code=status_code,
|
| 212 |
+
router=request.url.path,
|
| 213 |
+
params=dict(request.query_params),
|
| 214 |
+
)
|
| 215 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
# 获取指定用户的信息
|
| 219 |
+
@router.get("/fetch_user_profile", response_model=ResponseModel,
|
| 220 |
+
summary="获取指定用户的信息/Get information of specified user")
|
| 221 |
+
async def fetch_collect_folders(request: Request,
|
| 222 |
+
uid: str = Query(example="178360345", description="用户UID")):
|
| 223 |
+
"""
|
| 224 |
+
# [中文]
|
| 225 |
+
### 用途:
|
| 226 |
+
- 获取指定用户的信息
|
| 227 |
+
### 参数:
|
| 228 |
+
- uid: 用户UID
|
| 229 |
+
### 返回:
|
| 230 |
+
- 指定用户的个人信息
|
| 231 |
+
|
| 232 |
+
# [English]
|
| 233 |
+
### Purpose:
|
| 234 |
+
- Get information of specified user
|
| 235 |
+
### Parameters:
|
| 236 |
+
- uid: User UID
|
| 237 |
+
### Return:
|
| 238 |
+
- information of specified user
|
| 239 |
+
|
| 240 |
+
# [示例/Example]
|
| 241 |
+
uid = "178360345"
|
| 242 |
+
"""
|
| 243 |
+
try:
|
| 244 |
+
data = await BilibiliWebCrawler.fetch_user_profile(uid)
|
| 245 |
+
return ResponseModel(code=200,
|
| 246 |
+
router=request.url.path,
|
| 247 |
+
data=data)
|
| 248 |
+
except Exception as e:
|
| 249 |
+
status_code = 400
|
| 250 |
+
detail = ErrorResponseModel(code=status_code,
|
| 251 |
+
router=request.url.path,
|
| 252 |
+
params=dict(request.query_params),
|
| 253 |
+
)
|
| 254 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
# 获取综合热门视频信息
|
| 258 |
+
@router.get("/fetch_com_popular", response_model=ResponseModel,
|
| 259 |
+
summary="获取综合热门视频信息/Get comprehensive popular video information")
|
| 260 |
+
async def fetch_collect_folders(request: Request,
|
| 261 |
+
pn: int = Query(default=1, description="页码/Page number")):
|
| 262 |
+
"""
|
| 263 |
+
# [中文]
|
| 264 |
+
### 用途:
|
| 265 |
+
- 获取综合热门视频信息
|
| 266 |
+
### 参数:
|
| 267 |
+
- pn: 页码
|
| 268 |
+
### 返回:
|
| 269 |
+
- 综合热门视频信息
|
| 270 |
+
|
| 271 |
+
# [English]
|
| 272 |
+
### Purpose:
|
| 273 |
+
- Get comprehensive popular video information
|
| 274 |
+
### Parameters:
|
| 275 |
+
- pn: Page number
|
| 276 |
+
### Return:
|
| 277 |
+
- comprehensive popular video information
|
| 278 |
+
|
| 279 |
+
# [示例/Example]
|
| 280 |
+
pn = 1
|
| 281 |
+
"""
|
| 282 |
+
try:
|
| 283 |
+
data = await BilibiliWebCrawler.fetch_com_popular(pn)
|
| 284 |
+
return ResponseModel(code=200,
|
| 285 |
+
router=request.url.path,
|
| 286 |
+
data=data)
|
| 287 |
+
except Exception as e:
|
| 288 |
+
status_code = 400
|
| 289 |
+
detail = ErrorResponseModel(code=status_code,
|
| 290 |
+
router=request.url.path,
|
| 291 |
+
params=dict(request.query_params),
|
| 292 |
+
)
|
| 293 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
# 获取指定视频的评论
|
| 297 |
+
@router.get("/fetch_video_comments", response_model=ResponseModel,
|
| 298 |
+
summary="获取指定视频的评论/Get comments on the specified video")
|
| 299 |
+
async def fetch_collect_folders(request: Request,
|
| 300 |
+
bv_id: str = Query(example="BV1M1421t7hT", description="作品id/Video id"),
|
| 301 |
+
pn: int = Query(default=1, description="页码/Page number")):
|
| 302 |
+
"""
|
| 303 |
+
# [中文]
|
| 304 |
+
### 用途:
|
| 305 |
+
- 获取指定视频的评论
|
| 306 |
+
### 参数:
|
| 307 |
+
- bv_id: 作品id
|
| 308 |
+
- pn: 页码
|
| 309 |
+
### 返回:
|
| 310 |
+
- 指定视频的评论数据
|
| 311 |
+
|
| 312 |
+
# [English]
|
| 313 |
+
### Purpose:
|
| 314 |
+
- Get comments on the specified video
|
| 315 |
+
### Parameters:
|
| 316 |
+
- bv_id: Video id
|
| 317 |
+
- pn: Page number
|
| 318 |
+
### Return:
|
| 319 |
+
- comments of the specified video
|
| 320 |
+
|
| 321 |
+
# [示例/Example]
|
| 322 |
+
bv_id = "BV1M1421t7hT"
|
| 323 |
+
pn = 1
|
| 324 |
+
"""
|
| 325 |
+
try:
|
| 326 |
+
data = await BilibiliWebCrawler.fetch_video_comments(bv_id, pn)
|
| 327 |
+
return ResponseModel(code=200,
|
| 328 |
+
router=request.url.path,
|
| 329 |
+
data=data)
|
| 330 |
+
except Exception as e:
|
| 331 |
+
status_code = 400
|
| 332 |
+
detail = ErrorResponseModel(code=status_code,
|
| 333 |
+
router=request.url.path,
|
| 334 |
+
params=dict(request.query_params),
|
| 335 |
+
)
|
| 336 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
# 获取视频下指定评论的回复
|
| 340 |
+
@router.get("/fetch_comment_reply", response_model=ResponseModel,
|
| 341 |
+
summary="获取视频下指定评论的回复/Get reply to the specified comment")
|
| 342 |
+
async def fetch_collect_folders(request: Request,
|
| 343 |
+
bv_id: str = Query(example="BV1M1421t7hT", description="作品id/Video id"),
|
| 344 |
+
pn: int = Query(default=1, description="页码/Page number"),
|
| 345 |
+
rpid: str = Query(example="237109455120", description="回复id/Reply id")):
|
| 346 |
+
"""
|
| 347 |
+
# [中文]
|
| 348 |
+
### 用途:
|
| 349 |
+
- 获取视频下指定评论的回复
|
| 350 |
+
### 参数:
|
| 351 |
+
- bv_id: 作品id
|
| 352 |
+
- pn: 页码
|
| 353 |
+
- rpid: 回复id
|
| 354 |
+
### 返回:
|
| 355 |
+
- 指定评论的回复数据
|
| 356 |
+
|
| 357 |
+
# [English]
|
| 358 |
+
### Purpose:
|
| 359 |
+
- Get reply to the specified comment
|
| 360 |
+
### Parameters:
|
| 361 |
+
- bv_id: Video id
|
| 362 |
+
- pn: Page number
|
| 363 |
+
- rpid: Reply id
|
| 364 |
+
### Return:
|
| 365 |
+
- Reply of the specified comment
|
| 366 |
+
|
| 367 |
+
# [示例/Example]
|
| 368 |
+
bv_id = "BV1M1421t7hT"
|
| 369 |
+
pn = 1
|
| 370 |
+
rpid = "237109455120"
|
| 371 |
+
"""
|
| 372 |
+
try:
|
| 373 |
+
data = await BilibiliWebCrawler.fetch_comment_reply(bv_id, pn, rpid)
|
| 374 |
+
return ResponseModel(code=200,
|
| 375 |
+
router=request.url.path,
|
| 376 |
+
data=data)
|
| 377 |
+
except Exception as e:
|
| 378 |
+
status_code = 400
|
| 379 |
+
detail = ErrorResponseModel(code=status_code,
|
| 380 |
+
router=request.url.path,
|
| 381 |
+
params=dict(request.query_params),
|
| 382 |
+
)
|
| 383 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
# 获取指定用户动态
|
| 387 |
+
@router.get("/fetch_user_dynamic", response_model=ResponseModel,
|
| 388 |
+
summary="获取指定用户动态/Get dynamic information of specified user")
|
| 389 |
+
async def fetch_collect_folders(request: Request,
|
| 390 |
+
uid: str = Query(example="16015678", description="用户UID"),
|
| 391 |
+
offset: str = Query(default="", example="953154282154098691",
|
| 392 |
+
description="开始索引/offset")):
|
| 393 |
+
"""
|
| 394 |
+
# [中文]
|
| 395 |
+
### 用途:
|
| 396 |
+
- 获取指定用户动态
|
| 397 |
+
### 参数:
|
| 398 |
+
- uid: 用户UID
|
| 399 |
+
- offset: 开始索引
|
| 400 |
+
### 返回:
|
| 401 |
+
- 指定用户动态数据
|
| 402 |
+
|
| 403 |
+
# [English]
|
| 404 |
+
### Purpose:
|
| 405 |
+
- Get dynamic information of specified user
|
| 406 |
+
### Parameters:
|
| 407 |
+
- uid: User UID
|
| 408 |
+
- offset: offset
|
| 409 |
+
### Return:
|
| 410 |
+
- dynamic information of specified user
|
| 411 |
+
|
| 412 |
+
# [示例/Example]
|
| 413 |
+
uid = "178360345"
|
| 414 |
+
offset = "953154282154098691"
|
| 415 |
+
"""
|
| 416 |
+
try:
|
| 417 |
+
data = await BilibiliWebCrawler.fetch_user_dynamic(uid, offset)
|
| 418 |
+
return ResponseModel(code=200,
|
| 419 |
+
router=request.url.path,
|
| 420 |
+
data=data)
|
| 421 |
+
except Exception as e:
|
| 422 |
+
status_code = 400
|
| 423 |
+
detail = ErrorResponseModel(code=status_code,
|
| 424 |
+
router=request.url.path,
|
| 425 |
+
params=dict(request.query_params),
|
| 426 |
+
)
|
| 427 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
# 获取视频实时弹幕
|
| 431 |
+
@router.get("/fetch_video_danmaku", response_model=ResponseModel, summary="获取视频实时弹幕/Get Video Danmaku")
|
| 432 |
+
async def fetch_one_video(request: Request,
|
| 433 |
+
cid: str = Query(example="1639235405", description="作品cid/Video cid")):
|
| 434 |
+
"""
|
| 435 |
+
# [中文]
|
| 436 |
+
### 用途:
|
| 437 |
+
- 获取视频实时弹幕
|
| 438 |
+
### 参数:
|
| 439 |
+
- cid: 作品cid
|
| 440 |
+
### 返回:
|
| 441 |
+
- 视频实时弹幕
|
| 442 |
+
|
| 443 |
+
# [English]
|
| 444 |
+
### Purpose:
|
| 445 |
+
- Get Video Danmaku
|
| 446 |
+
### Parameters:
|
| 447 |
+
- cid: Video cid
|
| 448 |
+
### Return:
|
| 449 |
+
- Video Danmaku
|
| 450 |
+
|
| 451 |
+
# [示例/Example]
|
| 452 |
+
cid = "1639235405"
|
| 453 |
+
"""
|
| 454 |
+
try:
|
| 455 |
+
data = await BilibiliWebCrawler.fetch_video_danmaku(cid)
|
| 456 |
+
return ResponseModel(code=200,
|
| 457 |
+
router=request.url.path,
|
| 458 |
+
data=data)
|
| 459 |
+
except Exception as e:
|
| 460 |
+
status_code = 400
|
| 461 |
+
detail = ErrorResponseModel(code=status_code,
|
| 462 |
+
router=request.url.path,
|
| 463 |
+
params=dict(request.query_params),
|
| 464 |
+
)
|
| 465 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
# 获取指定直播间信息
|
| 469 |
+
@router.get("/fetch_live_room_detail", response_model=ResponseModel,
|
| 470 |
+
summary="获取指定直播间信息/Get information of specified live room")
|
| 471 |
+
async def fetch_collect_folders(request: Request,
|
| 472 |
+
room_id: str = Query(example="22816111", description="直播间ID/Live room ID")):
|
| 473 |
+
"""
|
| 474 |
+
# [中文]
|
| 475 |
+
### 用途:
|
| 476 |
+
- 获取指定直播间信息
|
| 477 |
+
### 参数:
|
| 478 |
+
- room_id: 直播间ID
|
| 479 |
+
### 返回:
|
| 480 |
+
- 指定直播间信息
|
| 481 |
+
|
| 482 |
+
# [English]
|
| 483 |
+
### Purpose:
|
| 484 |
+
- Get information of specified live room
|
| 485 |
+
### Parameters:
|
| 486 |
+
- room_id: Live room ID
|
| 487 |
+
### Return:
|
| 488 |
+
- information of specified live room
|
| 489 |
+
|
| 490 |
+
# [示例/Example]
|
| 491 |
+
room_id = "22816111"
|
| 492 |
+
"""
|
| 493 |
+
try:
|
| 494 |
+
data = await BilibiliWebCrawler.fetch_live_room_detail(room_id)
|
| 495 |
+
return ResponseModel(code=200,
|
| 496 |
+
router=request.url.path,
|
| 497 |
+
data=data)
|
| 498 |
+
except Exception as e:
|
| 499 |
+
status_code = 400
|
| 500 |
+
detail = ErrorResponseModel(code=status_code,
|
| 501 |
+
router=request.url.path,
|
| 502 |
+
params=dict(request.query_params),
|
| 503 |
+
)
|
| 504 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
# 获取指定直播间视频流
|
| 508 |
+
@router.get("/fetch_live_videos", response_model=ResponseModel,
|
| 509 |
+
summary="获取直播间视频流/Get live video data of specified room")
|
| 510 |
+
async def fetch_collect_folders(request: Request,
|
| 511 |
+
room_id: str = Query(example="1815229528", description="直播间ID/Live room ID")):
|
| 512 |
+
"""
|
| 513 |
+
# [中文]
|
| 514 |
+
### 用途:
|
| 515 |
+
- 获取指定直播间视频流
|
| 516 |
+
### 参数:
|
| 517 |
+
- room_id: 直播间ID
|
| 518 |
+
### 返回:
|
| 519 |
+
- 指定直播间视频流
|
| 520 |
+
|
| 521 |
+
# [English]
|
| 522 |
+
### Purpose:
|
| 523 |
+
- Get live video data of specified room
|
| 524 |
+
### Parameters:
|
| 525 |
+
- room_id: Live room ID
|
| 526 |
+
### Return:
|
| 527 |
+
- live video data of specified room
|
| 528 |
+
|
| 529 |
+
# [示例/Example]
|
| 530 |
+
room_id = "1815229528"
|
| 531 |
+
"""
|
| 532 |
+
try:
|
| 533 |
+
data = await BilibiliWebCrawler.fetch_live_videos(room_id)
|
| 534 |
+
return ResponseModel(code=200,
|
| 535 |
+
router=request.url.path,
|
| 536 |
+
data=data)
|
| 537 |
+
except Exception as e:
|
| 538 |
+
status_code = 400
|
| 539 |
+
detail = ErrorResponseModel(code=status_code,
|
| 540 |
+
router=request.url.path,
|
| 541 |
+
params=dict(request.query_params),
|
| 542 |
+
)
|
| 543 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
# 获取指定分区正在直播的主播
|
| 547 |
+
@router.get("/fetch_live_streamers", response_model=ResponseModel,
|
| 548 |
+
summary="获取指定分区正在直播的主播/Get live streamers of specified live area")
|
| 549 |
+
async def fetch_collect_folders(request: Request,
|
| 550 |
+
area_id: str = Query(example="9", description="直播分区id/Live area ID"),
|
| 551 |
+
pn: int = Query(default=1, description="页码/Page number")):
|
| 552 |
+
"""
|
| 553 |
+
# [中文]
|
| 554 |
+
### 用途:
|
| 555 |
+
- 获取指定分区正在直播的主播
|
| 556 |
+
### 参数:
|
| 557 |
+
- area_id: 直播分区id
|
| 558 |
+
- pn: 页码
|
| 559 |
+
### 返回:
|
| 560 |
+
- 指定分区正在直播的主播
|
| 561 |
+
|
| 562 |
+
# [English]
|
| 563 |
+
### Purpose:
|
| 564 |
+
- Get live streamers of specified live area
|
| 565 |
+
### Parameters:
|
| 566 |
+
- area_id: Live area ID
|
| 567 |
+
- pn: Page number
|
| 568 |
+
### Return:
|
| 569 |
+
- live streamers of specified live area
|
| 570 |
+
|
| 571 |
+
# [示例/Example]
|
| 572 |
+
area_id = "9"
|
| 573 |
+
pn = 1
|
| 574 |
+
"""
|
| 575 |
+
try:
|
| 576 |
+
data = await BilibiliWebCrawler.fetch_live_streamers(area_id, pn)
|
| 577 |
+
return ResponseModel(code=200,
|
| 578 |
+
router=request.url.path,
|
| 579 |
+
data=data)
|
| 580 |
+
except Exception as e:
|
| 581 |
+
status_code = 400
|
| 582 |
+
detail = ErrorResponseModel(code=status_code,
|
| 583 |
+
router=request.url.path,
|
| 584 |
+
params=dict(request.query_params),
|
| 585 |
+
)
|
| 586 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
# 获取所有直播分区列表
|
| 590 |
+
@router.get("/fetch_all_live_areas", response_model=ResponseModel,
|
| 591 |
+
summary="获取所有直播分区列表/Get a list of all live areas")
|
| 592 |
+
async def fetch_collect_folders(request: Request,):
|
| 593 |
+
"""
|
| 594 |
+
# [中文]
|
| 595 |
+
### 用途:
|
| 596 |
+
- 获取所有直播分区列表
|
| 597 |
+
### 参数:
|
| 598 |
+
### 返回:
|
| 599 |
+
- 所有直播分区列表
|
| 600 |
+
|
| 601 |
+
# [English]
|
| 602 |
+
### Purpose:
|
| 603 |
+
- Get a list of all live areas
|
| 604 |
+
### Parameters:
|
| 605 |
+
### Return:
|
| 606 |
+
- list of all live areas
|
| 607 |
+
|
| 608 |
+
# [示例/Example]
|
| 609 |
+
"""
|
| 610 |
+
try:
|
| 611 |
+
data = await BilibiliWebCrawler.fetch_all_live_areas()
|
| 612 |
+
return ResponseModel(code=200,
|
| 613 |
+
router=request.url.path,
|
| 614 |
+
data=data)
|
| 615 |
+
except Exception as e:
|
| 616 |
+
status_code = 400
|
| 617 |
+
detail = ErrorResponseModel(code=status_code,
|
| 618 |
+
router=request.url.path,
|
| 619 |
+
params=dict(request.query_params),
|
| 620 |
+
)
|
| 621 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
# 通过bv号获得视频aid号
|
| 625 |
+
@router.get("/bv_to_aid", response_model=ResponseModel, summary="通过bv号获得视频aid号/Generate aid by bvid")
|
| 626 |
+
async def fetch_one_video(request: Request,
|
| 627 |
+
bv_id: str = Query(example="BV1M1421t7hT", description="作品id/Video id")):
|
| 628 |
+
"""
|
| 629 |
+
# [中文]
|
| 630 |
+
### 用途:
|
| 631 |
+
- 通过bv号获得视频aid号
|
| 632 |
+
### 参数:
|
| 633 |
+
- bv_id: 作品id
|
| 634 |
+
### 返回:
|
| 635 |
+
- 视频aid号
|
| 636 |
+
|
| 637 |
+
# [English]
|
| 638 |
+
### Purpose:
|
| 639 |
+
- Generate aid by bvid
|
| 640 |
+
### Parameters:
|
| 641 |
+
- bv_id: Video id
|
| 642 |
+
### Return:
|
| 643 |
+
- Video aid
|
| 644 |
+
|
| 645 |
+
# [示例/Example]
|
| 646 |
+
bv_id = "BV1M1421t7hT"
|
| 647 |
+
"""
|
| 648 |
+
try:
|
| 649 |
+
data = await BilibiliWebCrawler.bv_to_aid(bv_id)
|
| 650 |
+
return ResponseModel(code=200,
|
| 651 |
+
router=request.url.path,
|
| 652 |
+
data=data)
|
| 653 |
+
except Exception as e:
|
| 654 |
+
status_code = 400
|
| 655 |
+
detail = ErrorResponseModel(code=status_code,
|
| 656 |
+
router=request.url.path,
|
| 657 |
+
params=dict(request.query_params),
|
| 658 |
+
)
|
| 659 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
# 通过bv号获得视频分p信息
|
| 663 |
+
@router.get("/fetch_video_parts", response_model=ResponseModel, summary="通过bv号获得视频分p信息/Get Video Parts By bvid")
|
| 664 |
+
async def fetch_one_video(request: Request,
|
| 665 |
+
bv_id: str = Query(example="BV1vf421i7hV", description="作品id/Video id")):
|
| 666 |
+
"""
|
| 667 |
+
# [中文]
|
| 668 |
+
### 用途:
|
| 669 |
+
- 通过bv号获得视频分p信息
|
| 670 |
+
### 参数:
|
| 671 |
+
- bv_id: 作品id
|
| 672 |
+
### 返回:
|
| 673 |
+
- 视频分p信息
|
| 674 |
+
|
| 675 |
+
# [English]
|
| 676 |
+
### Purpose:
|
| 677 |
+
- Get Video Parts By bvid
|
| 678 |
+
### Parameters:
|
| 679 |
+
- bv_id: Video id
|
| 680 |
+
### Return:
|
| 681 |
+
- Video Parts
|
| 682 |
+
|
| 683 |
+
# [示例/Example]
|
| 684 |
+
bv_id = "BV1vf421i7hV"
|
| 685 |
+
"""
|
| 686 |
+
try:
|
| 687 |
+
data = await BilibiliWebCrawler.fetch_video_parts(bv_id)
|
| 688 |
+
return ResponseModel(code=200,
|
| 689 |
+
router=request.url.path,
|
| 690 |
+
data=data)
|
| 691 |
+
except Exception as e:
|
| 692 |
+
status_code = 400
|
| 693 |
+
detail = ErrorResponseModel(code=status_code,
|
| 694 |
+
router=request.url.path,
|
| 695 |
+
params=dict(request.query_params),
|
| 696 |
+
)
|
| 697 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
app/api/endpoints/douyin_web.py
ADDED
|
@@ -0,0 +1,1070 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
|
| 3 |
+
from fastapi import APIRouter, Body, Query, Request, HTTPException # 导入FastAPI组件
|
| 4 |
+
from app.api.models.APIResponseModel import ResponseModel, ErrorResponseModel # 导入响应模型
|
| 5 |
+
|
| 6 |
+
from crawlers.douyin.web.web_crawler import DouyinWebCrawler # 导入抖音Web爬虫
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
router = APIRouter()
|
| 10 |
+
DouyinWebCrawler = DouyinWebCrawler()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# 获取单个作品数据
|
| 14 |
+
@router.get("/fetch_one_video", response_model=ResponseModel, summary="获取单个作品数据/Get single video data")
|
| 15 |
+
async def fetch_one_video(request: Request,
|
| 16 |
+
aweme_id: str = Query(example="7372484719365098803", description="作品id/Video id")):
|
| 17 |
+
"""
|
| 18 |
+
# [中文]
|
| 19 |
+
### 用途:
|
| 20 |
+
- 获取单个作品数据
|
| 21 |
+
### 参数:
|
| 22 |
+
- aweme_id: 作品id
|
| 23 |
+
### 返回:
|
| 24 |
+
- 作品数据
|
| 25 |
+
|
| 26 |
+
# [English]
|
| 27 |
+
### Purpose:
|
| 28 |
+
- Get single video data
|
| 29 |
+
### Parameters:
|
| 30 |
+
- aweme_id: Video id
|
| 31 |
+
### Return:
|
| 32 |
+
- Video data
|
| 33 |
+
|
| 34 |
+
# [示例/Example]
|
| 35 |
+
aweme_id = "7372484719365098803"
|
| 36 |
+
"""
|
| 37 |
+
try:
|
| 38 |
+
data = await DouyinWebCrawler.fetch_one_video(aweme_id)
|
| 39 |
+
return ResponseModel(code=200,
|
| 40 |
+
router=request.url.path,
|
| 41 |
+
data=data)
|
| 42 |
+
except Exception as e:
|
| 43 |
+
status_code = 400
|
| 44 |
+
detail = ErrorResponseModel(code=status_code,
|
| 45 |
+
router=request.url.path,
|
| 46 |
+
params=dict(request.query_params),
|
| 47 |
+
)
|
| 48 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# 获取用户作品集合数据
|
| 52 |
+
@router.get("/fetch_user_post_videos", response_model=ResponseModel,
|
| 53 |
+
summary="获取用户主页作品数据/Get user homepage video data")
|
| 54 |
+
async def fetch_user_post_videos(request: Request,
|
| 55 |
+
sec_user_id: str = Query(
|
| 56 |
+
example="MS4wLjABAAAANXSltcLCzDGmdNFI2Q_QixVTr67NiYzjKOIP5s03CAE",
|
| 57 |
+
description="用户sec_user_id/User sec_user_id"),
|
| 58 |
+
max_cursor: int = Query(default=0, description="最大游标/Maximum cursor"),
|
| 59 |
+
count: int = Query(default=20, description="每页数量/Number per page")):
|
| 60 |
+
"""
|
| 61 |
+
# [中文]
|
| 62 |
+
### 用途:
|
| 63 |
+
- 获取用户主页作品数据
|
| 64 |
+
### 参数:
|
| 65 |
+
- sec_user_id: 用户sec_user_id
|
| 66 |
+
- max_cursor: 最大游标
|
| 67 |
+
- count: 最大数量
|
| 68 |
+
### 返回:
|
| 69 |
+
- 用户作品数据
|
| 70 |
+
|
| 71 |
+
# [English]
|
| 72 |
+
### Purpose:
|
| 73 |
+
- Get user homepage video data
|
| 74 |
+
### Parameters:
|
| 75 |
+
- sec_user_id: User sec_user_id
|
| 76 |
+
- max_cursor: Maximum cursor
|
| 77 |
+
- count: Maximum count number
|
| 78 |
+
### Return:
|
| 79 |
+
- User video data
|
| 80 |
+
|
| 81 |
+
# [示例/Example]
|
| 82 |
+
sec_user_id = "MS4wLjABAAAANXSltcLCzDGmdNFI2Q_QixVTr67NiYzjKOIP5s03CAE"
|
| 83 |
+
max_cursor = 0
|
| 84 |
+
counts = 20
|
| 85 |
+
"""
|
| 86 |
+
try:
|
| 87 |
+
data = await DouyinWebCrawler.fetch_user_post_videos(sec_user_id, max_cursor, count)
|
| 88 |
+
return ResponseModel(code=200,
|
| 89 |
+
router=request.url.path,
|
| 90 |
+
data=data)
|
| 91 |
+
except Exception as e:
|
| 92 |
+
status_code = 400
|
| 93 |
+
detail = ErrorResponseModel(code=status_code,
|
| 94 |
+
router=request.url.path,
|
| 95 |
+
params=dict(request.query_params),
|
| 96 |
+
)
|
| 97 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
# 获取用户喜欢作品数据
|
| 101 |
+
@router.get("/fetch_user_like_videos", response_model=ResponseModel,
|
| 102 |
+
summary="获取用户喜欢作品数据/Get user like video data")
|
| 103 |
+
async def fetch_user_like_videos(request: Request,
|
| 104 |
+
sec_user_id: str = Query(
|
| 105 |
+
example="MS4wLjABAAAAW9FWcqS7RdQAWPd2AA5fL_ilmqsIFUCQ_Iym6Yh9_cUa6ZRqVLjVQSUjlHrfXY1Y",
|
| 106 |
+
description="用户sec_user_id/User sec_user_id"),
|
| 107 |
+
max_cursor: int = Query(default=0, description="最大游标/Maximum cursor"),
|
| 108 |
+
counts: int = Query(default=20, description="每页数量/Number per page")):
|
| 109 |
+
"""
|
| 110 |
+
# [中文]
|
| 111 |
+
### 用途:
|
| 112 |
+
- 获取用户喜欢作品数据
|
| 113 |
+
### 参数:
|
| 114 |
+
- sec_user_id: 用户sec_user_id
|
| 115 |
+
- max_cursor: 最大游标
|
| 116 |
+
- count: 最大数量
|
| 117 |
+
### 返回:
|
| 118 |
+
- 用户作品数据
|
| 119 |
+
|
| 120 |
+
# [English]
|
| 121 |
+
### Purpose:
|
| 122 |
+
- Get user like video data
|
| 123 |
+
### Parameters:
|
| 124 |
+
- sec_user_id: User sec_user_id
|
| 125 |
+
- max_cursor: Maximum cursor
|
| 126 |
+
- count: Maximum count number
|
| 127 |
+
### Return:
|
| 128 |
+
- User video data
|
| 129 |
+
|
| 130 |
+
# [示例/Example]
|
| 131 |
+
sec_user_id = "MS4wLjABAAAAW9FWcqS7RdQAWPd2AA5fL_ilmqsIFUCQ_Iym6Yh9_cUa6ZRqVLjVQSUjlHrfXY1Y"
|
| 132 |
+
max_cursor = 0
|
| 133 |
+
counts = 20
|
| 134 |
+
"""
|
| 135 |
+
try:
|
| 136 |
+
data = await DouyinWebCrawler.fetch_user_like_videos(sec_user_id, max_cursor, counts)
|
| 137 |
+
return ResponseModel(code=200,
|
| 138 |
+
router=request.url.path,
|
| 139 |
+
data=data)
|
| 140 |
+
except Exception as e:
|
| 141 |
+
status_code = 400
|
| 142 |
+
detail = ErrorResponseModel(code=status_code,
|
| 143 |
+
router=request.url.path,
|
| 144 |
+
params=dict(request.query_params),
|
| 145 |
+
)
|
| 146 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
# 获取用户收藏作品数据(用户提供自己的Cookie)
|
| 150 |
+
@router.get("/fetch_user_collection_videos", response_model=ResponseModel,
|
| 151 |
+
summary="获取用户收藏作品数据/Get user collection video data")
|
| 152 |
+
async def fetch_user_collection_videos(request: Request,
|
| 153 |
+
cookie: str = Query(example="YOUR_COOKIE",
|
| 154 |
+
description="用户网页版抖音Cookie/Your web version of Douyin Cookie"),
|
| 155 |
+
max_cursor: int = Query(default=0, description="最大游标/Maximum cursor"),
|
| 156 |
+
counts: int = Query(default=20, description="每页数量/Number per page")):
|
| 157 |
+
"""
|
| 158 |
+
# [中文]
|
| 159 |
+
### 用途:
|
| 160 |
+
- 获取用户收藏作品数据
|
| 161 |
+
### 参数:
|
| 162 |
+
- cookie: 用户网页版抖音Cookie(此接口需要用户提供自己的Cookie)
|
| 163 |
+
- max_cursor: 最大游标
|
| 164 |
+
- count: 最大数量
|
| 165 |
+
### 返回:
|
| 166 |
+
- 用户作品数据
|
| 167 |
+
|
| 168 |
+
# [English]
|
| 169 |
+
### Purpose:
|
| 170 |
+
- Get user collection video data
|
| 171 |
+
### Parameters:
|
| 172 |
+
- cookie: User's web version of Douyin Cookie (This interface requires users to provide their own Cookie)
|
| 173 |
+
- max_cursor: Maximum cursor
|
| 174 |
+
- count: Maximum number
|
| 175 |
+
### Return:
|
| 176 |
+
- User video data
|
| 177 |
+
|
| 178 |
+
# [示例/Example]
|
| 179 |
+
cookie = "YOUR_COOKIE"
|
| 180 |
+
max_cursor = 0
|
| 181 |
+
counts = 20
|
| 182 |
+
"""
|
| 183 |
+
try:
|
| 184 |
+
data = await DouyinWebCrawler.fetch_user_collection_videos(cookie, max_cursor, counts)
|
| 185 |
+
return ResponseModel(code=200,
|
| 186 |
+
router=request.url.path,
|
| 187 |
+
data=data)
|
| 188 |
+
except Exception as e:
|
| 189 |
+
status_code = 400
|
| 190 |
+
detail = ErrorResponseModel(code=status_code,
|
| 191 |
+
router=request.url.path,
|
| 192 |
+
params=dict(request.query_params),
|
| 193 |
+
)
|
| 194 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
# 获取用户合辑作品数据
|
| 198 |
+
@router.get("/fetch_user_mix_videos", response_model=ResponseModel,
|
| 199 |
+
summary="获取用户合辑作品数据/Get user mix video data")
|
| 200 |
+
async def fetch_user_mix_videos(request: Request,
|
| 201 |
+
mix_id: str = Query(example="7348687990509553679", description="合辑id/Mix id"),
|
| 202 |
+
max_cursor: int = Query(default=0, description="最大游标/Maximum cursor"),
|
| 203 |
+
counts: int = Query(default=20, description="每页数量/Number per page")):
|
| 204 |
+
"""
|
| 205 |
+
# [中文]
|
| 206 |
+
### 用途:
|
| 207 |
+
- 获取用户合辑作品数据
|
| 208 |
+
### 参数:
|
| 209 |
+
- mix_id: 合辑id
|
| 210 |
+
- max_cursor: 最大游标
|
| 211 |
+
- count: 最大数量
|
| 212 |
+
### 返回:
|
| 213 |
+
- 用户作品数据
|
| 214 |
+
|
| 215 |
+
# [English]
|
| 216 |
+
### Purpose:
|
| 217 |
+
- Get user mix video data
|
| 218 |
+
### Parameters:
|
| 219 |
+
- mix_id: Mix id
|
| 220 |
+
- max_cursor: Maximum cursor
|
| 221 |
+
- count: Maximum number
|
| 222 |
+
### Return:
|
| 223 |
+
- User video data
|
| 224 |
+
|
| 225 |
+
# [示例/Example]
|
| 226 |
+
url = https://www.douyin.com/collection/7348687990509553679
|
| 227 |
+
mix_id = "7348687990509553679"
|
| 228 |
+
max_cursor = 0
|
| 229 |
+
counts = 20
|
| 230 |
+
"""
|
| 231 |
+
try:
|
| 232 |
+
data = await DouyinWebCrawler.fetch_user_mix_videos(mix_id, max_cursor, counts)
|
| 233 |
+
return ResponseModel(code=200,
|
| 234 |
+
router=request.url.path,
|
| 235 |
+
data=data)
|
| 236 |
+
except Exception as e:
|
| 237 |
+
status_code = 400
|
| 238 |
+
detail = ErrorResponseModel(code=status_code,
|
| 239 |
+
router=request.url.path,
|
| 240 |
+
params=dict(request.query_params),
|
| 241 |
+
)
|
| 242 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
# 获取用户直播流数据
|
| 246 |
+
@router.get("/fetch_user_live_videos", response_model=ResponseModel,
|
| 247 |
+
summary="获取用户直播流数据/Get user live video data")
|
| 248 |
+
async def fetch_user_live_videos(request: Request,
|
| 249 |
+
webcast_id: str = Query(example="285520721194",
|
| 250 |
+
description="直播间webcast_id/Room webcast_id")):
|
| 251 |
+
"""
|
| 252 |
+
# [中文]
|
| 253 |
+
### 用途:
|
| 254 |
+
- 获取用户直播流数据
|
| 255 |
+
### 参数:
|
| 256 |
+
- webcast_id: 直播间webcast_id
|
| 257 |
+
### 返回:
|
| 258 |
+
- 直播流数据
|
| 259 |
+
|
| 260 |
+
# [English]
|
| 261 |
+
### Purpose:
|
| 262 |
+
- Get user live video data
|
| 263 |
+
### Parameters:
|
| 264 |
+
- webcast_id: Room webcast_id
|
| 265 |
+
### Return:
|
| 266 |
+
- Live stream data
|
| 267 |
+
|
| 268 |
+
# [示例/Example]
|
| 269 |
+
webcast_id = "285520721194"
|
| 270 |
+
"""
|
| 271 |
+
try:
|
| 272 |
+
data = await DouyinWebCrawler.fetch_user_live_videos(webcast_id)
|
| 273 |
+
return ResponseModel(code=200,
|
| 274 |
+
router=request.url.path,
|
| 275 |
+
data=data)
|
| 276 |
+
except Exception as e:
|
| 277 |
+
status_code = 400
|
| 278 |
+
detail = ErrorResponseModel(code=status_code,
|
| 279 |
+
router=request.url.path,
|
| 280 |
+
params=dict(request.query_params),
|
| 281 |
+
)
|
| 282 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
# 获取指定用户的直播流数据
|
| 286 |
+
@router.get("/fetch_user_live_videos_by_room_id",
|
| 287 |
+
response_model=ResponseModel,
|
| 288 |
+
summary="获取指定用户的直播流数据/Get live video data of specified user")
|
| 289 |
+
async def fetch_user_live_videos_by_room_id(request: Request,
|
| 290 |
+
room_id: str = Query(example="7318296342189919011",
|
| 291 |
+
description="直播间room_id/Room room_id")):
|
| 292 |
+
"""
|
| 293 |
+
# [中文]
|
| 294 |
+
### 用途:
|
| 295 |
+
- 获取指定用户的直播流数据
|
| 296 |
+
### 参数:
|
| 297 |
+
- room_id: 直播间room_id
|
| 298 |
+
### 返回:
|
| 299 |
+
- 直播流数据
|
| 300 |
+
|
| 301 |
+
# [English]
|
| 302 |
+
### Purpose:
|
| 303 |
+
- Get live video data of specified user
|
| 304 |
+
### Parameters:
|
| 305 |
+
- room_id: Room room_id
|
| 306 |
+
### Return:
|
| 307 |
+
- Live stream data
|
| 308 |
+
|
| 309 |
+
# [示例/Example]
|
| 310 |
+
room_id = "7318296342189919011"
|
| 311 |
+
"""
|
| 312 |
+
try:
|
| 313 |
+
data = await DouyinWebCrawler.fetch_user_live_videos_by_room_id(room_id)
|
| 314 |
+
return ResponseModel(code=200,
|
| 315 |
+
router=request.url.path,
|
| 316 |
+
data=data)
|
| 317 |
+
except Exception as e:
|
| 318 |
+
status_code = 400
|
| 319 |
+
detail = ErrorResponseModel(code=status_code,
|
| 320 |
+
router=request.url.path,
|
| 321 |
+
params=dict(request.query_params),
|
| 322 |
+
)
|
| 323 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
# 获取直播间送礼用户排行榜
|
| 327 |
+
@router.get("/fetch_live_gift_ranking",
|
| 328 |
+
response_model=ResponseModel,
|
| 329 |
+
summary="获取直播间送礼用户排行榜/Get live room gift user ranking")
|
| 330 |
+
async def fetch_live_gift_ranking(request: Request,
|
| 331 |
+
room_id: str = Query(example="7356585666190461731",
|
| 332 |
+
description="直播间room_id/Room room_id"),
|
| 333 |
+
rank_type: int = Query(default=30, description="排行类型/Leaderboard type")):
|
| 334 |
+
"""
|
| 335 |
+
# [中文]
|
| 336 |
+
### 用途:
|
| 337 |
+
- 获取直播间送礼用户排行榜
|
| 338 |
+
### 参数:
|
| 339 |
+
- room_id: 直播间room_id
|
| 340 |
+
- rank_type: 排行类型,默认为30不用修改。
|
| 341 |
+
### 返回:
|
| 342 |
+
- 排行榜数据
|
| 343 |
+
|
| 344 |
+
# [English]
|
| 345 |
+
### Purpose:
|
| 346 |
+
- Get live room gift user ranking
|
| 347 |
+
### Parameters:
|
| 348 |
+
- room_id: Room room_id
|
| 349 |
+
- rank_type: Leaderboard type, default is 30, no need to modify.
|
| 350 |
+
### Return:
|
| 351 |
+
- Leaderboard data
|
| 352 |
+
|
| 353 |
+
# [示例/Example]
|
| 354 |
+
room_id = "7356585666190461731"
|
| 355 |
+
rank_type = 30
|
| 356 |
+
"""
|
| 357 |
+
try:
|
| 358 |
+
data = await DouyinWebCrawler.fetch_live_gift_ranking(room_id, rank_type)
|
| 359 |
+
return ResponseModel(code=200,
|
| 360 |
+
router=request.url.path,
|
| 361 |
+
data=data)
|
| 362 |
+
except Exception as e:
|
| 363 |
+
status_code = 400
|
| 364 |
+
detail = ErrorResponseModel(code=status_code,
|
| 365 |
+
router=request.url.path,
|
| 366 |
+
params=dict(request.query_params),
|
| 367 |
+
)
|
| 368 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
# 抖音直播间商品信息
|
| 372 |
+
@router.get("/fetch_live_room_product_result",
|
| 373 |
+
response_model=ResponseModel,
|
| 374 |
+
summary="抖音直播间商品信息/Douyin live room product information")
|
| 375 |
+
async def fetch_live_room_product_result(request: Request,
|
| 376 |
+
cookie: str = Query(example="YOUR_COOKIE",
|
| 377 |
+
description="用户网页版抖音Cookie/Your web version of Douyin Cookie"),
|
| 378 |
+
room_id: str = Query(example="7356742011975715619",
|
| 379 |
+
description="直播间room_id/Room room_id"),
|
| 380 |
+
author_id: str = Query(example="2207432981615527",
|
| 381 |
+
description="作者id/Author id"),
|
| 382 |
+
limit: int = Query(default=20, description="数量/Number")):
|
| 383 |
+
"""
|
| 384 |
+
# [中文]
|
| 385 |
+
### 用途:
|
| 386 |
+
- 抖音直播间商品信息
|
| 387 |
+
### 参数:
|
| 388 |
+
- cookie: 用户网页版抖音Cookie(此接口需要用户提供自己的Cookie,如获取失败请手动过一次验证码)
|
| 389 |
+
- room_id: 直播间room_id
|
| 390 |
+
- author_id: 作者id
|
| 391 |
+
- limit: 数量
|
| 392 |
+
### 返回:
|
| 393 |
+
- 商品信息
|
| 394 |
+
|
| 395 |
+
# [English]
|
| 396 |
+
### Purpose:
|
| 397 |
+
- Douyin live room product information
|
| 398 |
+
### Parameters:
|
| 399 |
+
- cookie: User's web version of Douyin Cookie (This interface requires users to provide their own Cookie, if the acquisition fails, please manually pass the captcha code once)
|
| 400 |
+
- room_id: Room room_id
|
| 401 |
+
- author_id: Author id
|
| 402 |
+
- limit: Number
|
| 403 |
+
### Return:
|
| 404 |
+
- Product information
|
| 405 |
+
|
| 406 |
+
# [示例/Example]
|
| 407 |
+
cookie = "YOUR_COOKIE"
|
| 408 |
+
room_id = "7356742011975715619"
|
| 409 |
+
author_id = "2207432981615527"
|
| 410 |
+
limit = 20
|
| 411 |
+
"""
|
| 412 |
+
try:
|
| 413 |
+
data = await DouyinWebCrawler.fetch_live_room_product_result(cookie, room_id, author_id, limit)
|
| 414 |
+
return ResponseModel(code=200,
|
| 415 |
+
router=request.url.path,
|
| 416 |
+
data=data)
|
| 417 |
+
except Exception as e:
|
| 418 |
+
status_code = 400
|
| 419 |
+
detail = ErrorResponseModel(code=status_code,
|
| 420 |
+
router=request.url.path,
|
| 421 |
+
params=dict(request.query_params),
|
| 422 |
+
)
|
| 423 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
# 获取指定用户的信息
|
| 427 |
+
@router.get("/handler_user_profile",
|
| 428 |
+
response_model=ResponseModel,
|
| 429 |
+
summary="获取指定用户的信息/Get information of specified user")
|
| 430 |
+
async def handler_user_profile(request: Request,
|
| 431 |
+
sec_user_id: str = Query(
|
| 432 |
+
example="MS4wLjABAAAAW9FWcqS7RdQAWPd2AA5fL_ilmqsIFUCQ_Iym6Yh9_cUa6ZRqVLjVQSUjlHrfXY1Y",
|
| 433 |
+
description="用户sec_user_id/User sec_user_id")):
|
| 434 |
+
"""
|
| 435 |
+
# [中文]
|
| 436 |
+
### 用途:
|
| 437 |
+
- 获取指定用户的信息
|
| 438 |
+
### 参数:
|
| 439 |
+
- sec_user_id: 用户sec_user_id
|
| 440 |
+
### 返回:
|
| 441 |
+
- 用户信息
|
| 442 |
+
|
| 443 |
+
# [English]
|
| 444 |
+
### Purpose:
|
| 445 |
+
- Get information of specified user
|
| 446 |
+
### Parameters:
|
| 447 |
+
- sec_user_id: User sec_user_id
|
| 448 |
+
### Return:
|
| 449 |
+
- User information
|
| 450 |
+
|
| 451 |
+
# [示例/Example]
|
| 452 |
+
sec_user_id = "MS4wLjABAAAAW9FWcqS7RdQAWPd2AA5fL_ilmqsIFUCQ_Iym6Yh9_cUa6ZRqVLjVQSUjlHrfXY1Y"
|
| 453 |
+
"""
|
| 454 |
+
try:
|
| 455 |
+
data = await DouyinWebCrawler.handler_user_profile(sec_user_id)
|
| 456 |
+
return ResponseModel(code=200,
|
| 457 |
+
router=request.url.path,
|
| 458 |
+
data=data)
|
| 459 |
+
except Exception as e:
|
| 460 |
+
status_code = 400
|
| 461 |
+
detail = ErrorResponseModel(code=status_code,
|
| 462 |
+
router=request.url.path,
|
| 463 |
+
params=dict(request.query_params),
|
| 464 |
+
)
|
| 465 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
# 获取单个视频评论数据
|
| 469 |
+
@router.get("/fetch_video_comments",
|
| 470 |
+
response_model=ResponseModel,
|
| 471 |
+
summary="获取单个视频评论数据/Get single video comments data")
|
| 472 |
+
async def fetch_video_comments(request: Request,
|
| 473 |
+
aweme_id: str = Query(example="7372484719365098803", description="作品id/Video id"),
|
| 474 |
+
cursor: int = Query(default=0, description="游标/Cursor"),
|
| 475 |
+
count: int = Query(default=20, description="数量/Number")):
|
| 476 |
+
"""
|
| 477 |
+
# [中文]
|
| 478 |
+
### 用途:
|
| 479 |
+
- 获取单个视频评论数据
|
| 480 |
+
### 参数:
|
| 481 |
+
- aweme_id: 作品id
|
| 482 |
+
- cursor: 游标
|
| 483 |
+
- count: 数量
|
| 484 |
+
### 返回:
|
| 485 |
+
- 评论数据
|
| 486 |
+
|
| 487 |
+
# [English]
|
| 488 |
+
### Purpose:
|
| 489 |
+
- Get single video comments data
|
| 490 |
+
### Parameters:
|
| 491 |
+
- aweme_id: Video id
|
| 492 |
+
- cursor: Cursor
|
| 493 |
+
- count: Number
|
| 494 |
+
### Return:
|
| 495 |
+
- Comments data
|
| 496 |
+
|
| 497 |
+
# [示例/Example]
|
| 498 |
+
aweme_id = "7372484719365098803"
|
| 499 |
+
cursor = 0
|
| 500 |
+
count = 20
|
| 501 |
+
"""
|
| 502 |
+
try:
|
| 503 |
+
data = await DouyinWebCrawler.fetch_video_comments(aweme_id, cursor, count)
|
| 504 |
+
return ResponseModel(code=200,
|
| 505 |
+
router=request.url.path,
|
| 506 |
+
data=data)
|
| 507 |
+
except Exception as e:
|
| 508 |
+
status_code = 400
|
| 509 |
+
detail = ErrorResponseModel(code=status_code,
|
| 510 |
+
router=request.url.path,
|
| 511 |
+
params=dict(request.query_params),
|
| 512 |
+
)
|
| 513 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
# 获取指定视频的评论回复数据
|
| 517 |
+
@router.get("/fetch_video_comment_replies",
|
| 518 |
+
response_model=ResponseModel,
|
| 519 |
+
summary="获取指定视频的评论回复数据/Get comment replies data of specified video")
|
| 520 |
+
async def fetch_video_comments_reply(request: Request,
|
| 521 |
+
item_id: str = Query(example="7354666303006723354", description="作品id/Video id"),
|
| 522 |
+
comment_id: str = Query(example="7354669356632638218",
|
| 523 |
+
description="评论id/Comment id"),
|
| 524 |
+
cursor: int = Query(default=0, description="游标/Cursor"),
|
| 525 |
+
count: int = Query(default=20, description="数量/Number")):
|
| 526 |
+
"""
|
| 527 |
+
# [中文]
|
| 528 |
+
### 用途:
|
| 529 |
+
- 获取指定视频的评论回复数据
|
| 530 |
+
### 参数:
|
| 531 |
+
- item_id: 作品id
|
| 532 |
+
- comment_id: 评论id
|
| 533 |
+
- cursor: 游标
|
| 534 |
+
- count: 数量
|
| 535 |
+
### 返回:
|
| 536 |
+
- 评论回复数据
|
| 537 |
+
|
| 538 |
+
# [English]
|
| 539 |
+
### Purpose:
|
| 540 |
+
- Get comment replies data of specified video
|
| 541 |
+
### Parameters:
|
| 542 |
+
- item_id: Video id
|
| 543 |
+
- comment_id: Comment id
|
| 544 |
+
- cursor: Cursor
|
| 545 |
+
- count: Number
|
| 546 |
+
### Return:
|
| 547 |
+
- Comment replies data
|
| 548 |
+
|
| 549 |
+
# [示例/Example]
|
| 550 |
+
aweme_id = "7354666303006723354"
|
| 551 |
+
comment_id = "7354669356632638218"
|
| 552 |
+
cursor = 0
|
| 553 |
+
count = 20
|
| 554 |
+
"""
|
| 555 |
+
try:
|
| 556 |
+
data = await DouyinWebCrawler.fetch_video_comments_reply(item_id, comment_id, cursor, count)
|
| 557 |
+
return ResponseModel(code=200,
|
| 558 |
+
router=request.url.path,
|
| 559 |
+
data=data)
|
| 560 |
+
except Exception as e:
|
| 561 |
+
status_code = 400
|
| 562 |
+
detail = ErrorResponseModel(code=status_code,
|
| 563 |
+
router=request.url.path,
|
| 564 |
+
params=dict(request.query_params),
|
| 565 |
+
)
|
| 566 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
# 生成真实msToken
|
| 570 |
+
@router.get("/generate_real_msToken",
|
| 571 |
+
response_model=ResponseModel,
|
| 572 |
+
summary="生成真实msToken/Generate real msToken")
|
| 573 |
+
async def generate_real_msToken(request: Request):
|
| 574 |
+
"""
|
| 575 |
+
# [中文]
|
| 576 |
+
### 用途:
|
| 577 |
+
- 生成真实msToken
|
| 578 |
+
### 返回:
|
| 579 |
+
- msToken
|
| 580 |
+
|
| 581 |
+
# [English]
|
| 582 |
+
### Purpose:
|
| 583 |
+
- Generate real msToken
|
| 584 |
+
### Return:
|
| 585 |
+
- msToken
|
| 586 |
+
"""
|
| 587 |
+
try:
|
| 588 |
+
data = await DouyinWebCrawler.gen_real_msToken()
|
| 589 |
+
return ResponseModel(code=200,
|
| 590 |
+
router=request.url.path,
|
| 591 |
+
data=data)
|
| 592 |
+
except Exception as e:
|
| 593 |
+
status_code = 400
|
| 594 |
+
detail = ErrorResponseModel(code=status_code,
|
| 595 |
+
router=request.url.path,
|
| 596 |
+
params=dict(request.query_params),
|
| 597 |
+
)
|
| 598 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 599 |
+
|
| 600 |
+
|
| 601 |
+
# 生成ttwid
|
| 602 |
+
@router.get("/generate_ttwid",
|
| 603 |
+
response_model=ResponseModel,
|
| 604 |
+
summary="生成ttwid/Generate ttwid")
|
| 605 |
+
async def generate_ttwid(request: Request):
|
| 606 |
+
"""
|
| 607 |
+
# [中文]
|
| 608 |
+
### 用途:
|
| 609 |
+
- 生成ttwid
|
| 610 |
+
### 返回:
|
| 611 |
+
- ttwid
|
| 612 |
+
|
| 613 |
+
# [English]
|
| 614 |
+
### Purpose:
|
| 615 |
+
- Generate ttwid
|
| 616 |
+
### Return:
|
| 617 |
+
- ttwid
|
| 618 |
+
"""
|
| 619 |
+
try:
|
| 620 |
+
data = await DouyinWebCrawler.gen_ttwid()
|
| 621 |
+
return ResponseModel(code=200,
|
| 622 |
+
router=request.url.path,
|
| 623 |
+
data=data)
|
| 624 |
+
except Exception as e:
|
| 625 |
+
status_code = 400
|
| 626 |
+
detail = ErrorResponseModel(code=status_code,
|
| 627 |
+
router=request.url.path,
|
| 628 |
+
params=dict(request.query_params),
|
| 629 |
+
)
|
| 630 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 631 |
+
|
| 632 |
+
|
| 633 |
+
# 生成verify_fp
|
| 634 |
+
@router.get("/generate_verify_fp",
|
| 635 |
+
response_model=ResponseModel,
|
| 636 |
+
summary="生成verify_fp/Generate verify_fp")
|
| 637 |
+
async def generate_verify_fp(request: Request):
|
| 638 |
+
"""
|
| 639 |
+
# [中文]
|
| 640 |
+
### 用途:
|
| 641 |
+
- 生成verify_fp
|
| 642 |
+
### 返回:
|
| 643 |
+
- verify_fp
|
| 644 |
+
|
| 645 |
+
# [English]
|
| 646 |
+
### Purpose:
|
| 647 |
+
- Generate verify_fp
|
| 648 |
+
### Return:
|
| 649 |
+
- verify_fp
|
| 650 |
+
"""
|
| 651 |
+
try:
|
| 652 |
+
data = await DouyinWebCrawler.gen_verify_fp()
|
| 653 |
+
return ResponseModel(code=200,
|
| 654 |
+
router=request.url.path,
|
| 655 |
+
data=data)
|
| 656 |
+
except Exception as e:
|
| 657 |
+
status_code = 400
|
| 658 |
+
detail = ErrorResponseModel(code=status_code,
|
| 659 |
+
router=request.url.path,
|
| 660 |
+
params=dict(request.query_params),
|
| 661 |
+
)
|
| 662 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
# 生成s_v_web_id
|
| 666 |
+
@router.get("/generate_s_v_web_id",
|
| 667 |
+
response_model=ResponseModel,
|
| 668 |
+
summary="生成s_v_web_id/Generate s_v_web_id")
|
| 669 |
+
async def generate_s_v_web_id(request: Request):
|
| 670 |
+
"""
|
| 671 |
+
# [中文]
|
| 672 |
+
### 用途:
|
| 673 |
+
- 生成s_v_web_id
|
| 674 |
+
### 返回:
|
| 675 |
+
- s_v_web_id
|
| 676 |
+
|
| 677 |
+
# [English]
|
| 678 |
+
### Purpose:
|
| 679 |
+
- Generate s_v_web_id
|
| 680 |
+
### Return:
|
| 681 |
+
- s_v_web_id
|
| 682 |
+
"""
|
| 683 |
+
try:
|
| 684 |
+
data = await DouyinWebCrawler.gen_s_v_web_id()
|
| 685 |
+
return ResponseModel(code=200,
|
| 686 |
+
router=request.url.path,
|
| 687 |
+
data=data)
|
| 688 |
+
except Exception as e:
|
| 689 |
+
status_code = 400
|
| 690 |
+
detail = ErrorResponseModel(code=status_code,
|
| 691 |
+
router=request.url.path,
|
| 692 |
+
params=dict(request.query_params),
|
| 693 |
+
)
|
| 694 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
# 使用接口地址生成Xbogus参数
|
| 698 |
+
@router.get("/generate_x_bogus",
|
| 699 |
+
response_model=ResponseModel,
|
| 700 |
+
summary="使用接口网址生成X-Bogus参数/Generate X-Bogus parameter using API URL")
|
| 701 |
+
async def generate_x_bogus(request: Request,
|
| 702 |
+
url: str = Query(
|
| 703 |
+
example="https://www.douyin.com/aweme/v1/web/aweme/detail/?aweme_id=7148736076176215311&device_platform=webapp&aid=6383&channel=channel_pc_web&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1920&screen_height=1080&browser_language=zh-CN&browser_platform=Win32&browser_name=Edge&browser_version=117.0.2045.47&browser_online=true&engine_name=Blink&engine_version="),
|
| 704 |
+
user_agent: str = Query(
|
| 705 |
+
example="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36")):
|
| 706 |
+
"""
|
| 707 |
+
# [中文]
|
| 708 |
+
### 用途:
|
| 709 |
+
- 使用接口网址生成X-Bogus参数
|
| 710 |
+
### 参数:
|
| 711 |
+
- url: 接口网址
|
| 712 |
+
|
| 713 |
+
# [English]
|
| 714 |
+
### Purpose:
|
| 715 |
+
- Generate X-Bogus parameter using API URL
|
| 716 |
+
### Parameters:
|
| 717 |
+
- url: API URL
|
| 718 |
+
|
| 719 |
+
# [示例/Example]
|
| 720 |
+
url = "https://www.douyin.com/aweme/v1/web/aweme/detail/?aweme_id=7148736076176215311&device_platform=webapp&aid=6383&channel=channel_pc_web&pc_client_type=1&version_code=170400&version_name=17.4.0&cookie_enabled=true&screen_width=1920&screen_height=1080&browser_language=zh-CN&browser_platform=Win32&browser_name=Edge&browser_version=117.0.2045.47&browser_online=true&engine_name=Blink&engine_version=117.0.0.0&os_name=Windows&os_version=10&cpu_core_num=128&device_memory=10240&platform=PC&downlink=10&effective_type=4g&round_trip_time=100"
|
| 721 |
+
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"
|
| 722 |
+
"""
|
| 723 |
+
try:
|
| 724 |
+
x_bogus = await DouyinWebCrawler.get_x_bogus(url, user_agent)
|
| 725 |
+
return ResponseModel(code=200,
|
| 726 |
+
router=request.url.path,
|
| 727 |
+
data=x_bogus)
|
| 728 |
+
except Exception as e:
|
| 729 |
+
status_code = 400
|
| 730 |
+
detail = ErrorResponseModel(code=status_code,
|
| 731 |
+
router=request.url.path,
|
| 732 |
+
params=dict(request.query_params),
|
| 733 |
+
)
|
| 734 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 735 |
+
|
| 736 |
+
|
| 737 |
+
# 使用接口地址生成Abogus参数
|
| 738 |
+
@router.get("/generate_a_bogus",
|
| 739 |
+
response_model=ResponseModel,
|
| 740 |
+
summary="使用接口网址生成A-Bogus参数/Generate A-Bogus parameter using API URL")
|
| 741 |
+
async def generate_a_bogus(request: Request,
|
| 742 |
+
url: str = Query(
|
| 743 |
+
example="https://www.douyin.com/aweme/v1/web/aweme/detail/?device_platform=webapp&aid=6383&channel=channel_pc_web&pc_client_type=1&version_code=190500&version_name=19.5.0&cookie_enabled=true&browser_language=zh-CN&browser_platform=Win32&browser_name=Firefox&browser_online=true&engine_name=Gecko&os_name=Windows&os_version=10&platform=PC&screen_width=1920&screen_height=1080&browser_version=124.0&engine_version=122.0.0.0&cpu_core_num=12&device_memory=8&aweme_id=7372484719365098803"),
|
| 744 |
+
user_agent: str = Query(
|
| 745 |
+
example="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36")):
|
| 746 |
+
"""
|
| 747 |
+
# [中文]
|
| 748 |
+
### 用途:
|
| 749 |
+
- 使用接口网址生成A-Bogus参数
|
| 750 |
+
### 参数:
|
| 751 |
+
- url: 接口网址
|
| 752 |
+
- user_agent: 用户代理,暂时不支持自定义,直接使用默认值即可。
|
| 753 |
+
|
| 754 |
+
# [English]
|
| 755 |
+
### Purpose:
|
| 756 |
+
- Generate A-Bogus parameter using API URL
|
| 757 |
+
### Parameters:
|
| 758 |
+
- url: API URL
|
| 759 |
+
- user_agent: User agent, temporarily does not support customization, just use the default value.
|
| 760 |
+
|
| 761 |
+
# [示例/Example]
|
| 762 |
+
url = "https://www.douyin.com/aweme/v1/web/aweme/detail/?device_platform=webapp&aid=6383&channel=channel_pc_web&pc_client_type=1&version_code=190500&version_name=19.5.0&cookie_enabled=true&browser_language=zh-CN&browser_platform=Win32&browser_name=Firefox&browser_online=true&engine_name=Gecko&os_name=Windows&os_version=10&platform=PC&screen_width=1920&screen_height=1080&browser_version=124.0&engine_version=122.0.0.0&cpu_core_num=12&device_memory=8&aweme_id=7372484719365098803"
|
| 763 |
+
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"
|
| 764 |
+
"""
|
| 765 |
+
try:
|
| 766 |
+
a_bogus = await DouyinWebCrawler.get_a_bogus(url, user_agent)
|
| 767 |
+
return ResponseModel(code=200,
|
| 768 |
+
router=request.url.path,
|
| 769 |
+
data=a_bogus)
|
| 770 |
+
except Exception as e:
|
| 771 |
+
status_code = 400
|
| 772 |
+
detail = ErrorResponseModel(code=status_code,
|
| 773 |
+
router=request.url.path,
|
| 774 |
+
params=dict(request.query_params),
|
| 775 |
+
)
|
| 776 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
# 提取单个用户id
|
| 780 |
+
@router.get("/get_sec_user_id",
|
| 781 |
+
response_model=ResponseModel,
|
| 782 |
+
summary="提取单个用户id/Extract single user id")
|
| 783 |
+
async def get_sec_user_id(request: Request,
|
| 784 |
+
url: str = Query(
|
| 785 |
+
example="https://www.douyin.com/user/MS4wLjABAAAANXSltcLCzDGmdNFI2Q_QixVTr67NiYzjKOIP5s03CAE")):
|
| 786 |
+
"""
|
| 787 |
+
# [中文]
|
| 788 |
+
### 用途:
|
| 789 |
+
- 提取单个用户id
|
| 790 |
+
### 参数:
|
| 791 |
+
- url: 用户主页链接
|
| 792 |
+
### 返回:
|
| 793 |
+
- 用户sec_user_id
|
| 794 |
+
|
| 795 |
+
# [English]
|
| 796 |
+
### Purpose:
|
| 797 |
+
- Extract single user id
|
| 798 |
+
### Parameters:
|
| 799 |
+
- url: User homepage link
|
| 800 |
+
### Return:
|
| 801 |
+
- User sec_user_id
|
| 802 |
+
|
| 803 |
+
# [示例/Example]
|
| 804 |
+
url = "https://www.douyin.com/user/MS4wLjABAAAANXSltcLCzDGmdNFI2Q_QixVTr67NiYzjKOIP5s03CAE"
|
| 805 |
+
"""
|
| 806 |
+
try:
|
| 807 |
+
data = await DouyinWebCrawler.get_sec_user_id(url)
|
| 808 |
+
return ResponseModel(code=200,
|
| 809 |
+
router=request.url.path,
|
| 810 |
+
data=data)
|
| 811 |
+
except Exception as e:
|
| 812 |
+
status_code = 400
|
| 813 |
+
detail = ErrorResponseModel(code=status_code,
|
| 814 |
+
router=request.url.path,
|
| 815 |
+
params=dict(request.query_params),
|
| 816 |
+
)
|
| 817 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 818 |
+
|
| 819 |
+
|
| 820 |
+
# 提取列表用户id
|
| 821 |
+
@router.post("/get_all_sec_user_id",
|
| 822 |
+
response_model=ResponseModel,
|
| 823 |
+
summary="提取列表用户id/Extract list user id")
|
| 824 |
+
async def get_all_sec_user_id(request: Request,
|
| 825 |
+
url: List[str] = Body(
|
| 826 |
+
example=[
|
| 827 |
+
"https://www.douyin.com/user/MS4wLjABAAAANXSltcLCzDGmdNFI2Q_QixVTr67NiYzjKOIP5s03CAE?vid=7285950278132616463",
|
| 828 |
+
"https://www.douyin.com/user/MS4wLjABAAAAVsneOf144eGDFf8Xp9QNb1VW6ovXnNT5SqJBhJfe8KQBKWKDTWK5Hh-_i9mJzb8C",
|
| 829 |
+
"长按复制此条消息,打开抖音搜索,查看TA的更多作品。 https://v.douyin.com/idFqvUms/",
|
| 830 |
+
"https://v.douyin.com/idFqvUms/",
|
| 831 |
+
],
|
| 832 |
+
description="用户主页链接列表/User homepage link list"
|
| 833 |
+
)):
|
| 834 |
+
"""
|
| 835 |
+
# [中文]
|
| 836 |
+
### 用途:
|
| 837 |
+
- 提取列表用户id
|
| 838 |
+
### 参数:
|
| 839 |
+
- url: 用户主页链接列表
|
| 840 |
+
### 返回:
|
| 841 |
+
- 用户sec_user_id列表
|
| 842 |
+
|
| 843 |
+
# [English]
|
| 844 |
+
### Purpose:
|
| 845 |
+
- Extract list user id
|
| 846 |
+
### Parameters:
|
| 847 |
+
- url: User homepage link list
|
| 848 |
+
### Return:
|
| 849 |
+
- User sec_user_id list
|
| 850 |
+
|
| 851 |
+
# [示例/Example]
|
| 852 |
+
```json
|
| 853 |
+
{
|
| 854 |
+
"urls":[
|
| 855 |
+
"https://www.douyin.com/user/MS4wLjABAAAANXSltcLCzDGmdNFI2Q_QixVTr67NiYzjKOIP5s03CAE?vid=7285950278132616463",
|
| 856 |
+
"https://www.douyin.com/user/MS4wLjABAAAAVsneOf144eGDFf8Xp9QNb1VW6ovXnNT5SqJBhJfe8KQBKWKDTWK5Hh-_i9mJzb8C",
|
| 857 |
+
"长按复制此条消息,打开抖音搜索,查看TA的更多作品。 https://v.douyin.com/idFqvUms/",
|
| 858 |
+
"https://v.douyin.com/idFqvUms/"
|
| 859 |
+
]
|
| 860 |
+
}
|
| 861 |
+
```
|
| 862 |
+
"""
|
| 863 |
+
try:
|
| 864 |
+
data = await DouyinWebCrawler.get_all_sec_user_id(url)
|
| 865 |
+
return ResponseModel(code=200,
|
| 866 |
+
router=request.url.path,
|
| 867 |
+
data=data)
|
| 868 |
+
except Exception as e:
|
| 869 |
+
status_code = 400
|
| 870 |
+
detail = ErrorResponseModel(code=status_code,
|
| 871 |
+
router=request.url.path,
|
| 872 |
+
params=dict(request.query_params),
|
| 873 |
+
)
|
| 874 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
# 提取单个作品id
|
| 878 |
+
@router.get("/get_aweme_id",
|
| 879 |
+
response_model=ResponseModel,
|
| 880 |
+
summary="提取单个作品id/Extract single video id")
|
| 881 |
+
async def get_aweme_id(request: Request,
|
| 882 |
+
url: str = Query(example="https://www.douyin.com/video/7298145681699622182")):
|
| 883 |
+
"""
|
| 884 |
+
# [中文]
|
| 885 |
+
### 用途:
|
| 886 |
+
- 提取单个作品id
|
| 887 |
+
### 参数:
|
| 888 |
+
- url: 作品链接
|
| 889 |
+
### 返回:
|
| 890 |
+
- 作品id
|
| 891 |
+
|
| 892 |
+
# [English]
|
| 893 |
+
### Purpose:
|
| 894 |
+
- Extract single video id
|
| 895 |
+
### Parameters:
|
| 896 |
+
- url: Video link
|
| 897 |
+
### Return:
|
| 898 |
+
- Video id
|
| 899 |
+
|
| 900 |
+
# [示例/Example]
|
| 901 |
+
url = "https://www.douyin.com/video/7298145681699622182"
|
| 902 |
+
"""
|
| 903 |
+
try:
|
| 904 |
+
data = await DouyinWebCrawler.get_aweme_id(url)
|
| 905 |
+
return ResponseModel(code=200,
|
| 906 |
+
router=request.url.path,
|
| 907 |
+
data=data)
|
| 908 |
+
except Exception as e:
|
| 909 |
+
status_code = 400
|
| 910 |
+
detail = ErrorResponseModel(code=status_code,
|
| 911 |
+
router=request.url.path,
|
| 912 |
+
params=dict(request.query_params),
|
| 913 |
+
)
|
| 914 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
# 提取列表作品id
|
| 918 |
+
@router.post("/get_all_aweme_id",
|
| 919 |
+
response_model=ResponseModel,
|
| 920 |
+
summary="提取列表作品id/Extract list video id")
|
| 921 |
+
async def get_all_aweme_id(request: Request,
|
| 922 |
+
url: List[str] = Body(
|
| 923 |
+
example=[
|
| 924 |
+
"0.53 02/26 I@v.sE Fus:/ 你别太帅了郑润泽# 现场版live # 音乐节 # 郑润泽 https://v.douyin.com/iRNBho6u/ 复制此链接,打开Dou音搜索,直接观看视频!",
|
| 925 |
+
"https://v.douyin.com/iRNBho6u/",
|
| 926 |
+
"https://www.iesdouyin.com/share/video/7298145681699622182/?region=CN&mid=7298145762238565171&u_code=l1j9bkbd&did=MS4wLjABAAAAtqpCx0hpOERbdSzQdjRZw-wFPxaqdbAzsKDmbJMUI3KWlMGQHC-n6dXAqa-dM2EP&iid=MS4wLjABAAAANwkJuWIRFOzg5uCpDRpMj4OX-QryoDgn-yYlXQnRwQQ&with_sec_did=1&titleType=title&share_sign=05kGlqGmR4_IwCX.ZGk6xuL0osNA..5ur7b0jbOx6cc-&share_version=170400&ts=1699262937&from_aid=6383&from_ssr=1&from=web_code_link",
|
| 927 |
+
"https://www.douyin.com/video/7298145681699622182?previous_page=web_code_link",
|
| 928 |
+
"https://www.douyin.com/video/7298145681699622182",
|
| 929 |
+
],
|
| 930 |
+
description="作品链接列表/Video link list")):
|
| 931 |
+
"""
|
| 932 |
+
# [中文]
|
| 933 |
+
### 用途:
|
| 934 |
+
- 提取列表作品id
|
| 935 |
+
### 参数:
|
| 936 |
+
- url: 作品链接列表
|
| 937 |
+
### 返回:
|
| 938 |
+
- 作品id列表
|
| 939 |
+
|
| 940 |
+
# [English]
|
| 941 |
+
### Purpose:
|
| 942 |
+
- Extract list video id
|
| 943 |
+
### Parameters:
|
| 944 |
+
- url: Video link list
|
| 945 |
+
### Return:
|
| 946 |
+
- Video id list
|
| 947 |
+
|
| 948 |
+
# [示例/Example]
|
| 949 |
+
```json
|
| 950 |
+
{
|
| 951 |
+
"urls":[
|
| 952 |
+
"0.53 02/26 I@v.sE Fus:/ 你别太帅了郑润泽# 现场版live # 音乐节 # 郑润泽 https://v.douyin.com/iRNBho6u/ 复制此链接,打开Dou音搜索,直接观看视频!",
|
| 953 |
+
"https://v.douyin.com/iRNBho6u/",
|
| 954 |
+
"https://www.iesdouyin.com/share/video/7298145681699622182/?region=CN&mid=7298145762238565171&u_code=l1j9bkbd&did=MS4wLjABAAAAtqpCx0hpOERbdSzQdjRZw-wFPxaqdbAzsKDmbJMUI3KWlMGQHC-n6dXAqa-dM2EP&iid=MS4wLjABAAAANwkJuWIRFOzg5uCpDRpMj4OX-QryoDgn-yYlXQnRwQQ&with_sec_did=1&titleType=title&share_sign=05kGlqGmR4_IwCX.ZGk6xuL0osNA..5ur7b0jbOx6cc-&share_version=170400&ts=1699262937&from_aid=6383&from_ssr=1&from=web_code_link",
|
| 955 |
+
"https://www.douyin.com/video/7298145681699622182?previous_page=web_code_link",
|
| 956 |
+
"https://www.douyin.com/video/7298145681699622182",
|
| 957 |
+
]
|
| 958 |
+
}
|
| 959 |
+
```
|
| 960 |
+
"""
|
| 961 |
+
try:
|
| 962 |
+
data = await DouyinWebCrawler.get_all_aweme_id(url)
|
| 963 |
+
return ResponseModel(code=200,
|
| 964 |
+
router=request.url.path,
|
| 965 |
+
data=data)
|
| 966 |
+
except Exception as e:
|
| 967 |
+
status_code = 400
|
| 968 |
+
detail = ErrorResponseModel(code=status_code,
|
| 969 |
+
router=request.url.path,
|
| 970 |
+
params=dict(request.query_params),
|
| 971 |
+
)
|
| 972 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 973 |
+
|
| 974 |
+
|
| 975 |
+
# 提取列表直播间号
|
| 976 |
+
@router.get("/get_webcast_id",
|
| 977 |
+
response_model=ResponseModel,
|
| 978 |
+
summary="提取列表直播间号/Extract list webcast id")
|
| 979 |
+
async def get_webcast_id(request: Request,
|
| 980 |
+
url: str = Query(example="https://live.douyin.com/775841227732")):
|
| 981 |
+
"""
|
| 982 |
+
# [中文]
|
| 983 |
+
### 用途:
|
| 984 |
+
- 提取列表直播间号
|
| 985 |
+
### 参数:
|
| 986 |
+
- url: 直播间链接
|
| 987 |
+
### 返回:
|
| 988 |
+
- 直播间号
|
| 989 |
+
|
| 990 |
+
# [English]
|
| 991 |
+
### Purpose:
|
| 992 |
+
- Extract list webcast id
|
| 993 |
+
### Parameters:
|
| 994 |
+
- url: Room link
|
| 995 |
+
### Return:
|
| 996 |
+
- Room id
|
| 997 |
+
|
| 998 |
+
# [示例/Example]
|
| 999 |
+
url = "https://live.douyin.com/775841227732"
|
| 1000 |
+
"""
|
| 1001 |
+
try:
|
| 1002 |
+
data = await DouyinWebCrawler.get_webcast_id(url)
|
| 1003 |
+
return ResponseModel(code=200,
|
| 1004 |
+
router=request.url.path,
|
| 1005 |
+
data=data)
|
| 1006 |
+
except Exception as e:
|
| 1007 |
+
status_code = 400
|
| 1008 |
+
detail = ErrorResponseModel(code=status_code,
|
| 1009 |
+
router=request.url.path,
|
| 1010 |
+
params=dict(request.query_params),
|
| 1011 |
+
)
|
| 1012 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 1013 |
+
|
| 1014 |
+
|
| 1015 |
+
# 提取列表直播间号
|
| 1016 |
+
@router.post("/get_all_webcast_id",
|
| 1017 |
+
response_model=ResponseModel,
|
| 1018 |
+
summary="提取列表直播间号/Extract list webcast id")
|
| 1019 |
+
async def get_all_webcast_id(request: Request,
|
| 1020 |
+
url: List[str] = Body(
|
| 1021 |
+
example=[
|
| 1022 |
+
"https://live.douyin.com/775841227732",
|
| 1023 |
+
"https://live.douyin.com/775841227732?room_id=7318296342189919011&enter_from_merge=web_share_link&enter_method=web_share_link&previous_page=app_code_link",
|
| 1024 |
+
'https://webcast.amemv.com/douyin/webcast/reflow/7318296342189919011?u_code=l1j9bkbd&did=MS4wLjABAAAAEs86TBQPNwAo-RGrcxWyCdwKhI66AK3Pqf3ieo6HaxI&iid=MS4wLjABAAAA0ptpM-zzoliLEeyvWOCUt-_dQza4uSjlIvbtIazXnCY&with_sec_did=1&use_link_command=1&ecom_share_track_params=&extra_params={"from_request_id":"20231230162057EC005772A8EAA0199906","im_channel_invite_id":"0"}&user_id=3644207898042206&liveId=7318296342189919011&from=share&style=share&enter_method=click_share&roomId=7318296342189919011&activity_info={}',
|
| 1025 |
+
"6i- Q@x.Sl 03/23 【醒子8ke的直播间】 点击打开👉https://v.douyin.com/i8tBR7hX/ 或长按复制此条消息,打开抖音,看TA直播",
|
| 1026 |
+
"https://v.douyin.com/i8tBR7hX/",
|
| 1027 |
+
],
|
| 1028 |
+
description="直播间链接列表/Room link list")):
|
| 1029 |
+
"""
|
| 1030 |
+
# [中文]
|
| 1031 |
+
### 用途:
|
| 1032 |
+
- 提取列表直播间号
|
| 1033 |
+
### 参数:
|
| 1034 |
+
- url: 直播间链接列表
|
| 1035 |
+
### 返回:
|
| 1036 |
+
- 直播间号列表
|
| 1037 |
+
|
| 1038 |
+
# [English]
|
| 1039 |
+
### Purpose:
|
| 1040 |
+
- Extract list webcast id
|
| 1041 |
+
### Parameters:
|
| 1042 |
+
- url: Room link list
|
| 1043 |
+
### Return:
|
| 1044 |
+
- Room id list
|
| 1045 |
+
|
| 1046 |
+
# [示例/Example]
|
| 1047 |
+
```json
|
| 1048 |
+
{
|
| 1049 |
+
"urls": [
|
| 1050 |
+
"https://live.douyin.com/775841227732",
|
| 1051 |
+
"https://live.douyin.com/775841227732?room_id=7318296342189919011&enter_from_merge=web_share_link&enter_method=web_share_link&previous_page=app_code_link",
|
| 1052 |
+
'https://webcast.amemv.com/douyin/webcast/reflow/7318296342189919011?u_code=l1j9bkbd&did=MS4wLjABAAAAEs86TBQPNwAo-RGrcxWyCdwKhI66AK3Pqf3ieo6HaxI&iid=MS4wLjABAAAA0ptpM-zzoliLEeyvWOCUt-_dQza4uSjlIvbtIazXnCY&with_sec_did=1&use_link_command=1&ecom_share_track_params=&extra_params={"from_request_id":"20231230162057EC005772A8EAA0199906","im_channel_invite_id":"0"}&user_id=3644207898042206&liveId=7318296342189919011&from=share&style=share&enter_method=click_share&roomId=7318296342189919011&activity_info={}',
|
| 1053 |
+
"6i- Q@x.Sl 03/23 【醒子8ke的直播间】 点击打开👉https://v.douyin.com/i8tBR7hX/ 或长按复制此条消息,打开抖音,看TA直播",
|
| 1054 |
+
"https://v.douyin.com/i8tBR7hX/",
|
| 1055 |
+
]
|
| 1056 |
+
}
|
| 1057 |
+
```
|
| 1058 |
+
"""
|
| 1059 |
+
try:
|
| 1060 |
+
data = await DouyinWebCrawler.get_all_webcast_id(url)
|
| 1061 |
+
return ResponseModel(code=200,
|
| 1062 |
+
router=request.url.path,
|
| 1063 |
+
data=data)
|
| 1064 |
+
except Exception as e:
|
| 1065 |
+
status_code = 400
|
| 1066 |
+
detail = ErrorResponseModel(code=status_code,
|
| 1067 |
+
router=request.url.path,
|
| 1068 |
+
params=dict(request.query_params),
|
| 1069 |
+
)
|
| 1070 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
app/api/endpoints/download.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import zipfile
|
| 3 |
+
|
| 4 |
+
import aiofiles
|
| 5 |
+
import httpx
|
| 6 |
+
import yaml
|
| 7 |
+
from fastapi import APIRouter, Request, Query # 导入FastAPI组件
|
| 8 |
+
from starlette.responses import FileResponse
|
| 9 |
+
|
| 10 |
+
from app.api.models.APIResponseModel import ErrorResponseModel # 导入响应模型
|
| 11 |
+
from crawlers.hybrid.hybrid_crawler import HybridCrawler # 导入混合数据爬虫
|
| 12 |
+
|
| 13 |
+
router = APIRouter()
|
| 14 |
+
HybridCrawler = HybridCrawler()
|
| 15 |
+
|
| 16 |
+
# 读取上级再上级目录的配置文件
|
| 17 |
+
config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 'config.yaml')
|
| 18 |
+
with open(config_path, 'r', encoding='utf-8') as file:
|
| 19 |
+
config = yaml.safe_load(file)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
async def fetch_data(url: str, headers: dict = None):
|
| 23 |
+
headers = {
|
| 24 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
| 25 |
+
} if headers is None else headers.get('headers')
|
| 26 |
+
async with httpx.AsyncClient() as client:
|
| 27 |
+
response = await client.get(url, headers=headers)
|
| 28 |
+
response.raise_for_status() # 确保响应是成功的
|
| 29 |
+
return response
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@router.get("/download", summary="在线下载抖音|TikTok视频/图片/Online download Douyin|TikTok video/image")
|
| 33 |
+
async def download_file_hybrid(request: Request,
|
| 34 |
+
url: str = Query(
|
| 35 |
+
example="https://www.douyin.com/video/7372484719365098803",
|
| 36 |
+
description="视频或图片的URL地址,也支持抖音|TikTok的分享链接,例如:https://v.douyin.com/e4J8Q7A/"),
|
| 37 |
+
prefix: bool = True,
|
| 38 |
+
with_watermark: bool = False):
|
| 39 |
+
"""
|
| 40 |
+
# [中文]
|
| 41 |
+
### 用途:
|
| 42 |
+
- 在线下载抖音|TikTok 无水印或有水印的视频/图片
|
| 43 |
+
- 通过传入的视频URL参数,获取对应的视频或图片数据,然后下载到本地。
|
| 44 |
+
- 如果你在尝试直接访问TikTok单一视频接口的JSON数据中的视频播放地址时遇到HTTP403错误,那么你可以使用此接口来下载视频。
|
| 45 |
+
- 这个接口会占用一定的服务器资源,所以在Demo站点是默认关闭的,你可以在本地部署后调用此接口。
|
| 46 |
+
### 参数:
|
| 47 |
+
- url: 视频或图片的URL地址,也支持抖音|TikTok的分享链接,例如:https://v.douyin.com/e4J8Q7A/。
|
| 48 |
+
- prefix: 下载文件的前缀,默认为True,可以在配置文件中修改。
|
| 49 |
+
- with_watermark: 是否下载带水印的视频或图片,默认为False。
|
| 50 |
+
### 返回:
|
| 51 |
+
- 返回下载的视频或图片文件响应。
|
| 52 |
+
|
| 53 |
+
# [English]
|
| 54 |
+
### Purpose:
|
| 55 |
+
- Download Douyin|TikTok video/image with or without watermark online.
|
| 56 |
+
- By passing the video URL parameter, get the corresponding video or image data, and then download it to the local.
|
| 57 |
+
- If you encounter an HTTP403 error when trying to access the video playback address in the JSON data of the TikTok single video interface directly, you can use this interface to download the video.
|
| 58 |
+
- This interface will occupy a certain amount of server resources, so it is disabled by default on the Demo site, you can call this interface after deploying it locally.
|
| 59 |
+
### Parameters:
|
| 60 |
+
- url: The URL address of the video or image, also supports Douyin|TikTok sharing links, for example: https://v.douyin.com/e4J8Q7A/.
|
| 61 |
+
- prefix: The prefix of the downloaded file, the default is True, and can be modified in the configuration file.
|
| 62 |
+
- with_watermark: Whether to download videos or images with watermarks, the default is False.
|
| 63 |
+
### Returns:
|
| 64 |
+
- Return the response of the downloaded video or image file.
|
| 65 |
+
|
| 66 |
+
# [示例/Example]
|
| 67 |
+
url: https://www.douyin.com/video/7372484719365098803
|
| 68 |
+
"""
|
| 69 |
+
# 是否开启此端点/Whether to enable this endpoint
|
| 70 |
+
if not config["API"]["Download_Switch"]:
|
| 71 |
+
code = 400
|
| 72 |
+
message = "Download endpoint is disabled in the configuration file. | 配置文件中已禁用下载端点。"
|
| 73 |
+
return ErrorResponseModel(code=code, message=message, router=request.url.path,
|
| 74 |
+
params=dict(request.query_params))
|
| 75 |
+
|
| 76 |
+
# 开始解析数据/Start parsing data
|
| 77 |
+
try:
|
| 78 |
+
data = await HybridCrawler.hybrid_parsing_single_video(url, minimal=True)
|
| 79 |
+
except Exception as e:
|
| 80 |
+
code = 400
|
| 81 |
+
return ErrorResponseModel(code=code, message=str(e), router=request.url.path, params=dict(request.query_params))
|
| 82 |
+
|
| 83 |
+
# 开始下载文件/Start downloading files
|
| 84 |
+
try:
|
| 85 |
+
data_type = data.get('type')
|
| 86 |
+
platform = data.get('platform')
|
| 87 |
+
aweme_id = data.get('aweme_id')
|
| 88 |
+
file_prefix = config.get("API").get("Download_File_Prefix") if prefix else ''
|
| 89 |
+
download_path = os.path.join(config.get("API").get("Download_Path"), f"{platform}_{data_type}")
|
| 90 |
+
|
| 91 |
+
# 确保目录存在/Ensure the directory exists
|
| 92 |
+
os.makedirs(download_path, exist_ok=True)
|
| 93 |
+
|
| 94 |
+
# 下载视频文件/Download video file
|
| 95 |
+
if data_type == 'video':
|
| 96 |
+
file_name = f"{file_prefix}{platform}_{aweme_id}.mp4" if not with_watermark else f"{file_prefix}{platform}_{aweme_id}_watermark.mp4"
|
| 97 |
+
url = data.get('video_data').get('nwm_video_url_HQ') if not with_watermark else data.get('video_data').get(
|
| 98 |
+
'wm_video_url_HQ')
|
| 99 |
+
file_path = os.path.join(download_path, file_name)
|
| 100 |
+
|
| 101 |
+
# 判断文件是否存在,存在就直接返回
|
| 102 |
+
if os.path.exists(file_path):
|
| 103 |
+
return FileResponse(path=file_path, media_type='video/mp4', filename=file_name)
|
| 104 |
+
|
| 105 |
+
# 获取视频文件
|
| 106 |
+
__headers = await HybridCrawler.TikTokWebCrawler.get_tiktok_headers() if platform == 'tiktok' else await HybridCrawler.DouyinWebCrawler.get_douyin_headers()
|
| 107 |
+
response = await fetch_data(url, headers=__headers)
|
| 108 |
+
|
| 109 |
+
# 保存文件
|
| 110 |
+
async with aiofiles.open(file_path, 'wb') as out_file:
|
| 111 |
+
await out_file.write(response.content)
|
| 112 |
+
|
| 113 |
+
# 返回文件内容
|
| 114 |
+
return FileResponse(path=file_path, filename=file_name, media_type="video/mp4")
|
| 115 |
+
|
| 116 |
+
# 下载图片文件/Download image file
|
| 117 |
+
elif data_type == 'image':
|
| 118 |
+
# 压缩文件属性/Compress file properties
|
| 119 |
+
zip_file_name = f"{file_prefix}{platform}_{aweme_id}_images.zip" if not with_watermark else f"{file_prefix}{platform}_{aweme_id}_images_watermark.zip"
|
| 120 |
+
zip_file_path = os.path.join(download_path, zip_file_name)
|
| 121 |
+
|
| 122 |
+
# 判断文件是否存在,存在就直接返回、
|
| 123 |
+
if os.path.exists(zip_file_path):
|
| 124 |
+
return FileResponse(path=zip_file_path, filename=zip_file_name, media_type="application/zip")
|
| 125 |
+
|
| 126 |
+
# 获取图片文件/Get image file
|
| 127 |
+
urls = data.get('image_data').get('no_watermark_image_list') if not with_watermark else data.get(
|
| 128 |
+
'image_data').get('watermark_image_list')
|
| 129 |
+
image_file_list = []
|
| 130 |
+
for url in urls:
|
| 131 |
+
# 请求图片文件/Request image file
|
| 132 |
+
response = await fetch_data(url)
|
| 133 |
+
index = int(urls.index(url))
|
| 134 |
+
content_type = response.headers.get('content-type')
|
| 135 |
+
file_format = content_type.split('/')[1]
|
| 136 |
+
file_name = f"{file_prefix}{platform}_{aweme_id}_{index + 1}.{file_format}" if not with_watermark else f"{file_prefix}{platform}_{aweme_id}_{index + 1}_watermark.{file_format}"
|
| 137 |
+
file_path = os.path.join(download_path, file_name)
|
| 138 |
+
image_file_list.append(file_path)
|
| 139 |
+
|
| 140 |
+
# 保存文件/Save file
|
| 141 |
+
async with aiofiles.open(file_path, 'wb') as out_file:
|
| 142 |
+
await out_file.write(response.content)
|
| 143 |
+
|
| 144 |
+
# 压缩文件/Compress file
|
| 145 |
+
with zipfile.ZipFile(zip_file_path, 'w') as zip_file:
|
| 146 |
+
for image_file in image_file_list:
|
| 147 |
+
zip_file.write(image_file, os.path.basename(image_file))
|
| 148 |
+
|
| 149 |
+
# 返回压缩文件/Return compressed file
|
| 150 |
+
return FileResponse(path=zip_file_path, filename=zip_file_name, media_type="application/zip")
|
| 151 |
+
|
| 152 |
+
# 异常处理/Exception handling
|
| 153 |
+
except Exception as e:
|
| 154 |
+
print(e)
|
| 155 |
+
code = 400
|
| 156 |
+
return ErrorResponseModel(code=code, message=str(e), router=request.url.path, params=dict(request.query_params))
|
app/api/endpoints/hybrid_parsing.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
|
| 3 |
+
from fastapi import APIRouter, Body, Query, Request, HTTPException # 导入FastAPI组件
|
| 4 |
+
|
| 5 |
+
from app.api.models.APIResponseModel import ResponseModel, ErrorResponseModel # 导入响应模型
|
| 6 |
+
|
| 7 |
+
# 爬虫/Crawler
|
| 8 |
+
from crawlers.hybrid.hybrid_crawler import HybridCrawler # 导入混合爬虫
|
| 9 |
+
|
| 10 |
+
HybridCrawler = HybridCrawler() # 实例化混合爬虫
|
| 11 |
+
|
| 12 |
+
router = APIRouter()
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@router.get("/video_data", response_model=ResponseModel, tags=["Hybrid-API"],
|
| 16 |
+
summary="混合解析单一视频接口/Hybrid parsing single video endpoint")
|
| 17 |
+
async def hybrid_parsing_single_video(request: Request,
|
| 18 |
+
url: str = Query(example="https://v.douyin.com/L4FJNR3/"),
|
| 19 |
+
minimal: bool = Query(default=False)):
|
| 20 |
+
"""
|
| 21 |
+
# [中文]
|
| 22 |
+
### 用途:
|
| 23 |
+
- 该接口用于解析抖音/TikTok单一视频的数据。
|
| 24 |
+
### 参数:
|
| 25 |
+
- `url`: 视频链接、分享链接、分享文本。
|
| 26 |
+
### 返回:
|
| 27 |
+
- `data`: 视频数据。
|
| 28 |
+
|
| 29 |
+
# [English]
|
| 30 |
+
### Purpose:
|
| 31 |
+
- This endpoint is used to parse data of a single Douyin/TikTok video.
|
| 32 |
+
### Parameters:
|
| 33 |
+
- `url`: Video link, share link, or share text.
|
| 34 |
+
### Returns:
|
| 35 |
+
- `data`: Video data.
|
| 36 |
+
|
| 37 |
+
# [Example]
|
| 38 |
+
url = "https://v.douyin.com/L4FJNR3/"
|
| 39 |
+
"""
|
| 40 |
+
try:
|
| 41 |
+
# 解析视频/Parse video
|
| 42 |
+
data = await HybridCrawler.hybrid_parsing_single_video(url=url, minimal=minimal)
|
| 43 |
+
# 返回数据/Return data
|
| 44 |
+
return ResponseModel(code=200,
|
| 45 |
+
router=request.url.path,
|
| 46 |
+
data=data)
|
| 47 |
+
except Exception as e:
|
| 48 |
+
status_code = 400
|
| 49 |
+
detail = ErrorResponseModel(code=status_code,
|
| 50 |
+
router=request.url.path,
|
| 51 |
+
params=dict(request.query_params),
|
| 52 |
+
)
|
| 53 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
app/api/endpoints/ios_shortcut.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import yaml
|
| 3 |
+
from fastapi import APIRouter
|
| 4 |
+
from app.api.models.APIResponseModel import iOS_Shortcut
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# 读取上级再上级目录的配置文件
|
| 8 |
+
config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 'config.yaml')
|
| 9 |
+
with open(config_path, 'r', encoding='utf-8') as file:
|
| 10 |
+
config = yaml.safe_load(file)
|
| 11 |
+
|
| 12 |
+
router = APIRouter()
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@router.get("/shortcut", response_model=iOS_Shortcut, summary="用于iOS快捷指令的版本更新信息/Version update information for iOS shortcuts")
|
| 16 |
+
async def get_shortcut():
|
| 17 |
+
shortcut_config = config["iOS_Shortcut"]
|
| 18 |
+
version = shortcut_config["iOS_Shortcut_Version"]
|
| 19 |
+
update = shortcut_config['iOS_Shortcut_Update_Time']
|
| 20 |
+
link = shortcut_config['iOS_Shortcut_Link']
|
| 21 |
+
link_en = shortcut_config['iOS_Shortcut_Link_EN']
|
| 22 |
+
note = shortcut_config['iOS_Shortcut_Update_Note']
|
| 23 |
+
note_en = shortcut_config['iOS_Shortcut_Update_Note_EN']
|
| 24 |
+
return iOS_Shortcut(version=str(version), update=update, link=link, link_en=link_en, note=note, note_en=note_en)
|
app/api/endpoints/tiktok_app.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, Query, Request, HTTPException # 导入FastAPI组件
|
| 2 |
+
from app.api.models.APIResponseModel import ResponseModel, ErrorResponseModel # 导入响应模型
|
| 3 |
+
|
| 4 |
+
from crawlers.tiktok.app.app_crawler import TikTokAPPCrawler # 导入APP爬虫
|
| 5 |
+
|
| 6 |
+
router = APIRouter()
|
| 7 |
+
TikTokAPPCrawler = TikTokAPPCrawler()
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# 获取单个作品数据
|
| 11 |
+
@router.get("/fetch_one_video",
|
| 12 |
+
response_model=ResponseModel,
|
| 13 |
+
summary="获取单个作品数据/Get single video data"
|
| 14 |
+
)
|
| 15 |
+
async def fetch_one_video(request: Request,
|
| 16 |
+
aweme_id: str = Query(example="7350810998023949599", description="作品id/Video id")):
|
| 17 |
+
"""
|
| 18 |
+
# [中文]
|
| 19 |
+
### 用途:
|
| 20 |
+
- 获取单个作品数据
|
| 21 |
+
### 参数:
|
| 22 |
+
- aweme_id: 作品id
|
| 23 |
+
### 返回:
|
| 24 |
+
- 作品数据
|
| 25 |
+
|
| 26 |
+
# [English]
|
| 27 |
+
### Purpose:
|
| 28 |
+
- Get single video data
|
| 29 |
+
### Parameters:
|
| 30 |
+
- aweme_id: Video id
|
| 31 |
+
### Return:
|
| 32 |
+
- Video data
|
| 33 |
+
|
| 34 |
+
# [示例/Example]
|
| 35 |
+
aweme_id = "7350810998023949599"
|
| 36 |
+
"""
|
| 37 |
+
try:
|
| 38 |
+
data = await TikTokAPPCrawler.fetch_one_video(aweme_id)
|
| 39 |
+
return ResponseModel(code=200,
|
| 40 |
+
router=request.url.path,
|
| 41 |
+
data=data)
|
| 42 |
+
except Exception as e:
|
| 43 |
+
status_code = 400
|
| 44 |
+
detail = ErrorResponseModel(code=status_code,
|
| 45 |
+
router=request.url.path,
|
| 46 |
+
params=dict(request.query_params),
|
| 47 |
+
)
|
| 48 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 49 |
+
|
app/api/endpoints/tiktok_web.py
ADDED
|
@@ -0,0 +1,951 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
|
| 3 |
+
from fastapi import APIRouter, Query, Body, Request, HTTPException # 导入FastAPI组件
|
| 4 |
+
|
| 5 |
+
from app.api.models.APIResponseModel import ResponseModel, ErrorResponseModel # 导入响应模型
|
| 6 |
+
|
| 7 |
+
from crawlers.tiktok.web.web_crawler import TikTokWebCrawler # 导入TikTokWebCrawler类
|
| 8 |
+
|
| 9 |
+
router = APIRouter()
|
| 10 |
+
TikTokWebCrawler = TikTokWebCrawler()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# 获取单个作品数据
|
| 14 |
+
@router.get("/fetch_one_video",
|
| 15 |
+
response_model=ResponseModel,
|
| 16 |
+
summary="获取单个作品数据/Get single video data")
|
| 17 |
+
async def fetch_one_video(request: Request,
|
| 18 |
+
itemId: str = Query(example="7339393672959757570", description="作品id/Video id")):
|
| 19 |
+
"""
|
| 20 |
+
# [中文]
|
| 21 |
+
### 用途:
|
| 22 |
+
- 获取单个作品数据
|
| 23 |
+
### 参数:
|
| 24 |
+
- itemId: 作品id
|
| 25 |
+
### 返回:
|
| 26 |
+
- 作品数据
|
| 27 |
+
|
| 28 |
+
# [English]
|
| 29 |
+
### Purpose:
|
| 30 |
+
- Get single video data
|
| 31 |
+
### Parameters:
|
| 32 |
+
- itemId: Video id
|
| 33 |
+
### Return:
|
| 34 |
+
- Video data
|
| 35 |
+
|
| 36 |
+
# [示例/Example]
|
| 37 |
+
itemId = "7339393672959757570"
|
| 38 |
+
"""
|
| 39 |
+
try:
|
| 40 |
+
data = await TikTokWebCrawler.fetch_one_video(itemId)
|
| 41 |
+
return ResponseModel(code=200,
|
| 42 |
+
router=request.url.path,
|
| 43 |
+
data=data)
|
| 44 |
+
except Exception as e:
|
| 45 |
+
status_code = 400
|
| 46 |
+
detail = ErrorResponseModel(code=status_code,
|
| 47 |
+
router=request.url.path,
|
| 48 |
+
params=dict(request.query_params),
|
| 49 |
+
)
|
| 50 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# 获取用户的个人信息
|
| 54 |
+
@router.get("/fetch_user_profile",
|
| 55 |
+
response_model=ResponseModel,
|
| 56 |
+
summary="获取用户的个人信息/Get user profile")
|
| 57 |
+
async def fetch_user_profile(request: Request,
|
| 58 |
+
uniqueId: str = Query(default="tiktok", description="用户uniqueId/User uniqueId"),
|
| 59 |
+
secUid: str = Query(default="", description="用户secUid/User secUid"),):
|
| 60 |
+
"""
|
| 61 |
+
# [中文]
|
| 62 |
+
### 用途:
|
| 63 |
+
- 获取用户的个人信息
|
| 64 |
+
### 参数:
|
| 65 |
+
- secUid: 用户secUid
|
| 66 |
+
- uniqueId: 用户uniqueId
|
| 67 |
+
- secUid和uniqueId至少提供一个, 优先使用uniqueId, 也就是用户主页的链接中的用户名。
|
| 68 |
+
### 返回:
|
| 69 |
+
- 用户的个人信息
|
| 70 |
+
|
| 71 |
+
# [English]
|
| 72 |
+
### Purpose:
|
| 73 |
+
- Get user profile
|
| 74 |
+
### Parameters:
|
| 75 |
+
- secUid: User secUid
|
| 76 |
+
- uniqueId: User uniqueId
|
| 77 |
+
- At least one of secUid and uniqueId is provided, and uniqueId is preferred, that is, the username in the user's homepage link.
|
| 78 |
+
### Return:
|
| 79 |
+
- User profile
|
| 80 |
+
|
| 81 |
+
# [示例/Example]
|
| 82 |
+
secUid = "MS4wLjABAAAAv7iSuuXDJGDvJkmH_vz1qkDZYo1apxgzaxdBSeIuPiM"
|
| 83 |
+
uniqueId = "tiktok"
|
| 84 |
+
"""
|
| 85 |
+
try:
|
| 86 |
+
data = await TikTokWebCrawler.fetch_user_profile(secUid, uniqueId)
|
| 87 |
+
return ResponseModel(code=200,
|
| 88 |
+
router=request.url.path,
|
| 89 |
+
data=data)
|
| 90 |
+
except Exception as e:
|
| 91 |
+
status_code = 400
|
| 92 |
+
detail = ErrorResponseModel(code=status_code,
|
| 93 |
+
router=request.url.path,
|
| 94 |
+
params=dict(request.query_params),
|
| 95 |
+
)
|
| 96 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# 获取用户的作品列表
|
| 100 |
+
@router.get("/fetch_user_post",
|
| 101 |
+
response_model=ResponseModel,
|
| 102 |
+
summary="获取用户的作品列表/Get user posts")
|
| 103 |
+
async def fetch_user_post(request: Request,
|
| 104 |
+
secUid: str = Query(example="MS4wLjABAAAAv7iSuuXDJGDvJkmH_vz1qkDZYo1apxgzaxdBSeIuPiM",
|
| 105 |
+
description="用户secUid/User secUid"),
|
| 106 |
+
cursor: int = Query(default=0, description="翻页游标/Page cursor"),
|
| 107 |
+
count: int = Query(default=35, description="每页数量/Number per page"),
|
| 108 |
+
coverFormat: int = Query(default=2, description="封面格式/Cover format")):
|
| 109 |
+
"""
|
| 110 |
+
# [中文]
|
| 111 |
+
### 用途:
|
| 112 |
+
- 获取用户的作品列表
|
| 113 |
+
### 参数:
|
| 114 |
+
- secUid: 用户secUid
|
| 115 |
+
- cursor: 翻页游标
|
| 116 |
+
- count: 每页数量
|
| 117 |
+
- coverFormat: 封面格式
|
| 118 |
+
### 返回:
|
| 119 |
+
- 用户的作品列表
|
| 120 |
+
|
| 121 |
+
# [English]
|
| 122 |
+
### Purpose:
|
| 123 |
+
- Get user posts
|
| 124 |
+
### Parameters:
|
| 125 |
+
- secUid: User secUid
|
| 126 |
+
- cursor: Page cursor
|
| 127 |
+
- count: Number per page
|
| 128 |
+
- coverFormat: Cover format
|
| 129 |
+
### Return:
|
| 130 |
+
- User posts
|
| 131 |
+
|
| 132 |
+
# [示例/Example]
|
| 133 |
+
secUid = "MS4wLjABAAAAv7iSuuXDJGDvJkmH_vz1qkDZYo1apxgzaxdBSeIuPiM"
|
| 134 |
+
cursor = 0
|
| 135 |
+
count = 35
|
| 136 |
+
coverFormat = 2
|
| 137 |
+
"""
|
| 138 |
+
try:
|
| 139 |
+
data = await TikTokWebCrawler.fetch_user_post(secUid, cursor, count, coverFormat)
|
| 140 |
+
return ResponseModel(code=200,
|
| 141 |
+
router=request.url.path,
|
| 142 |
+
data=data)
|
| 143 |
+
except Exception as e:
|
| 144 |
+
status_code = 400
|
| 145 |
+
detail = ErrorResponseModel(code=status_code,
|
| 146 |
+
router=request.url.path,
|
| 147 |
+
params=dict(request.query_params),
|
| 148 |
+
)
|
| 149 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
# 获取用户的点赞列表
|
| 153 |
+
@router.get("/fetch_user_like",
|
| 154 |
+
response_model=ResponseModel,
|
| 155 |
+
summary="获取用户的点赞列表/Get user likes")
|
| 156 |
+
async def fetch_user_like(request: Request,
|
| 157 |
+
secUid: str = Query(
|
| 158 |
+
example="MS4wLjABAAAAq1iRXNduFZpY301UkVpJ1eQT60_NiWS9QQSeNqmNQEDJp0pOF8cpleNEdiJx5_IU",
|
| 159 |
+
description="用户secUid/User secUid"),
|
| 160 |
+
cursor: int = Query(default=0, description="翻页游标/Page cursor"),
|
| 161 |
+
count: int = Query(default=35, description="每页数量/Number per page"),
|
| 162 |
+
coverFormat: int = Query(default=2, description="封面格式/Cover format")):
|
| 163 |
+
"""
|
| 164 |
+
# [中文]
|
| 165 |
+
### 用途:
|
| 166 |
+
- 获取用户的点赞列表
|
| 167 |
+
- 注意: 该接口需要用户点赞列表为公开状态
|
| 168 |
+
### 参数:
|
| 169 |
+
- secUid: 用户secUid
|
| 170 |
+
- cursor: 翻页游标
|
| 171 |
+
- count: 每页数量
|
| 172 |
+
- coverFormat: 封面格式
|
| 173 |
+
### 返回:
|
| 174 |
+
- 用户的点赞列表
|
| 175 |
+
|
| 176 |
+
# [English]
|
| 177 |
+
### Purpose:
|
| 178 |
+
- Get user likes
|
| 179 |
+
- Note: This interface requires that the user's like list be public
|
| 180 |
+
### Parameters:
|
| 181 |
+
- secUid: User secUid
|
| 182 |
+
- cursor: Page cursor
|
| 183 |
+
- count: Number per page
|
| 184 |
+
- coverFormat: Cover format
|
| 185 |
+
### Return:
|
| 186 |
+
- User likes
|
| 187 |
+
|
| 188 |
+
# [示例/Example]
|
| 189 |
+
secUid = "MS4wLjABAAAAq1iRXNduFZpY301UkVpJ1eQT60_NiWS9QQSeNqmNQEDJp0pOF8cpleNEdiJx5_IU"
|
| 190 |
+
cursor = 0
|
| 191 |
+
count = 35
|
| 192 |
+
coverFormat = 2
|
| 193 |
+
"""
|
| 194 |
+
try:
|
| 195 |
+
data = await TikTokWebCrawler.fetch_user_like(secUid, cursor, count, coverFormat)
|
| 196 |
+
return ResponseModel(code=200,
|
| 197 |
+
router=request.url.path,
|
| 198 |
+
data=data)
|
| 199 |
+
except Exception as e:
|
| 200 |
+
status_code = 400
|
| 201 |
+
detail = ErrorResponseModel(code=status_code,
|
| 202 |
+
router=request.url.path,
|
| 203 |
+
params=dict(request.query_params),
|
| 204 |
+
)
|
| 205 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
# 获取用户的收藏列表
|
| 209 |
+
@router.get("/fetch_user_collect",
|
| 210 |
+
response_model=ResponseModel,
|
| 211 |
+
summary="获取用户的收藏列表/Get user favorites")
|
| 212 |
+
async def fetch_user_collect(request: Request,
|
| 213 |
+
cookie: str = Query(example="Your_Cookie", description="用户cookie/User cookie"),
|
| 214 |
+
secUid: str = Query(example="Your_SecUid", description="用户secUid/User secUid"),
|
| 215 |
+
cursor: int = Query(default=0, description="翻页游标/Page cursor"),
|
| 216 |
+
count: int = Query(default=30, description="每页数量/Number per page"),
|
| 217 |
+
coverFormat: int = Query(default=2, description="封面格式/Cover format")):
|
| 218 |
+
"""
|
| 219 |
+
# [中文]
|
| 220 |
+
### 用途:
|
| 221 |
+
- 获取用户的收藏列表
|
| 222 |
+
- 注意: 该接口目前只能获取自己的收藏列表,需要提供自己账号的cookie。
|
| 223 |
+
### 参数:
|
| 224 |
+
- cookie: 用户cookie
|
| 225 |
+
- secUid: 用户secUid
|
| 226 |
+
- cursor: 翻页游标
|
| 227 |
+
- count: 每页数量
|
| 228 |
+
- coverFormat: 封面格式
|
| 229 |
+
### 返回:
|
| 230 |
+
- 用户的收藏列表
|
| 231 |
+
|
| 232 |
+
# [English]
|
| 233 |
+
### Purpose:
|
| 234 |
+
- Get user favorites
|
| 235 |
+
- Note: This interface can currently only get your own favorites list, you need to provide your account cookie.
|
| 236 |
+
### Parameters:
|
| 237 |
+
- cookie: User cookie
|
| 238 |
+
- secUid: User secUid
|
| 239 |
+
- cursor: Page cursor
|
| 240 |
+
- count: Number per page
|
| 241 |
+
- coverFormat: Cover format
|
| 242 |
+
### Return:
|
| 243 |
+
- User favorites
|
| 244 |
+
|
| 245 |
+
# [示例/Example]
|
| 246 |
+
cookie = "Your_Cookie"
|
| 247 |
+
secUid = "Your_SecUid"
|
| 248 |
+
cursor = 0
|
| 249 |
+
count = 30
|
| 250 |
+
coverFormat = 2
|
| 251 |
+
"""
|
| 252 |
+
try:
|
| 253 |
+
data = await TikTokWebCrawler.fetch_user_collect(cookie, secUid, cursor, count, coverFormat)
|
| 254 |
+
return ResponseModel(code=200,
|
| 255 |
+
router=request.url.path,
|
| 256 |
+
data=data)
|
| 257 |
+
except Exception as e:
|
| 258 |
+
status_code = 400
|
| 259 |
+
detail = ErrorResponseModel(code=status_code,
|
| 260 |
+
router=request.url.path,
|
| 261 |
+
params=dict(request.query_params),
|
| 262 |
+
)
|
| 263 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
# 获取用户的播放列表
|
| 267 |
+
@router.get("/fetch_user_play_list",
|
| 268 |
+
response_model=ResponseModel,
|
| 269 |
+
summary="获取用户的播放列表/Get user play list")
|
| 270 |
+
async def fetch_user_play_list(request: Request,
|
| 271 |
+
secUid: str = Query(example="MS4wLjABAAAAv7iSuuXDJGDvJkmH_vz1qkDZYo1apxgzaxdBSeIuPiM",
|
| 272 |
+
description="用户secUid/User secUid"),
|
| 273 |
+
cursor: int = Query(default=0, description="翻页游标/Page cursor"),
|
| 274 |
+
count: int = Query(default=30, description="每页数量/Number per page")):
|
| 275 |
+
"""
|
| 276 |
+
# [中文]
|
| 277 |
+
### 用途:
|
| 278 |
+
- 获取用户的播放列表
|
| 279 |
+
### 参数:
|
| 280 |
+
- secUid: 用户secUid
|
| 281 |
+
- cursor: 翻页游标
|
| 282 |
+
- count: 每页数量
|
| 283 |
+
### 返回:
|
| 284 |
+
- 用户的播放列表
|
| 285 |
+
|
| 286 |
+
# [English]
|
| 287 |
+
### Purpose:
|
| 288 |
+
- Get user play list
|
| 289 |
+
### Parameters:
|
| 290 |
+
- secUid: User secUid
|
| 291 |
+
- cursor: Page cursor
|
| 292 |
+
- count: Number per page
|
| 293 |
+
### Return:
|
| 294 |
+
- User play list
|
| 295 |
+
|
| 296 |
+
# [示例/Eample]
|
| 297 |
+
secUid = "MS4wLjABAAAAv7iSuuXDJGDvJkmH_vz1qkDZYo1apxgzaxdBSeIuPiM"
|
| 298 |
+
cursor = 0
|
| 299 |
+
count = 30
|
| 300 |
+
"""
|
| 301 |
+
try:
|
| 302 |
+
data = await TikTokWebCrawler.fetch_user_play_list(secUid, cursor, count)
|
| 303 |
+
return ResponseModel(code=200,
|
| 304 |
+
router=request.url.path,
|
| 305 |
+
data=data)
|
| 306 |
+
except Exception as e:
|
| 307 |
+
status_code = 400
|
| 308 |
+
detail = ErrorResponseModel(code=status_code,
|
| 309 |
+
router=request.url.path,
|
| 310 |
+
params=dict(request.query_params),
|
| 311 |
+
)
|
| 312 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
# 获取用户的合辑列表
|
| 316 |
+
@router.get("/fetch_user_mix",
|
| 317 |
+
response_model=ResponseModel,
|
| 318 |
+
summary="获取用户的合辑列表/Get user mix list")
|
| 319 |
+
async def fetch_user_mix(request: Request,
|
| 320 |
+
mixId: str = Query(example="7101538765474106158",
|
| 321 |
+
description="合辑id/Mix id"),
|
| 322 |
+
cursor: int = Query(default=0, description="翻页游标/Page cursor"),
|
| 323 |
+
count: int = Query(default=30, description="每页数量/Number per page")):
|
| 324 |
+
"""
|
| 325 |
+
# [中文]
|
| 326 |
+
### 用途:
|
| 327 |
+
- 获取用户的合辑列表
|
| 328 |
+
### 参数:
|
| 329 |
+
- mixId: 合辑id
|
| 330 |
+
- cursor: 翻页游标
|
| 331 |
+
- count: 每页数量
|
| 332 |
+
### 返回:
|
| 333 |
+
- 用户的合辑列表
|
| 334 |
+
|
| 335 |
+
# [English]
|
| 336 |
+
### Purpose:
|
| 337 |
+
- Get user mix list
|
| 338 |
+
### Parameters:
|
| 339 |
+
- mixId: Mix id
|
| 340 |
+
- cursor: Page cursor
|
| 341 |
+
- count: Number per page
|
| 342 |
+
### Return:
|
| 343 |
+
- User mix list
|
| 344 |
+
|
| 345 |
+
# [示例/Eample]
|
| 346 |
+
mixId = "7101538765474106158"
|
| 347 |
+
cursor = 0
|
| 348 |
+
count = 30
|
| 349 |
+
"""
|
| 350 |
+
try:
|
| 351 |
+
data = await TikTokWebCrawler.fetch_user_mix(mixId, cursor, count)
|
| 352 |
+
return ResponseModel(code=200,
|
| 353 |
+
router=request.url.path,
|
| 354 |
+
data=data)
|
| 355 |
+
except Exception as e:
|
| 356 |
+
status_code = 400
|
| 357 |
+
detail = ErrorResponseModel(code=status_code,
|
| 358 |
+
router=request.url.path,
|
| 359 |
+
params=dict(request.query_params),
|
| 360 |
+
)
|
| 361 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
# 获取作品的评论列表
|
| 365 |
+
@router.get("/fetch_post_comment",
|
| 366 |
+
response_model=ResponseModel,
|
| 367 |
+
summary="获取作品的评论列表/Get video comments")
|
| 368 |
+
async def fetch_post_comment(request: Request,
|
| 369 |
+
aweme_id: str = Query(example="7304809083817774382", description="作品id/Video id"),
|
| 370 |
+
cursor: int = Query(default=0, description="翻页游标/Page cursor"),
|
| 371 |
+
count: int = Query(default=20, description="每页数量/Number per page"),
|
| 372 |
+
current_region: str = Query(default="", description="当前地区/Current region")):
|
| 373 |
+
"""
|
| 374 |
+
# [中文]
|
| 375 |
+
### 用途:
|
| 376 |
+
- 获取作品的评论列表
|
| 377 |
+
### 参数:
|
| 378 |
+
- aweme_id: 作品id
|
| 379 |
+
- cursor: 翻页游标
|
| 380 |
+
- count: 每页数量
|
| 381 |
+
- current_region: 当前地区,默认为空。
|
| 382 |
+
### 返回:
|
| 383 |
+
- 作品的评论列表
|
| 384 |
+
|
| 385 |
+
# [English]
|
| 386 |
+
### Purpose:
|
| 387 |
+
- Get video comments
|
| 388 |
+
### Parameters:
|
| 389 |
+
- aweme_id: Video id
|
| 390 |
+
- cursor: Page cursor
|
| 391 |
+
- count: Number per page
|
| 392 |
+
- current_region: Current region, default is empty.
|
| 393 |
+
### Return:
|
| 394 |
+
- Video comments
|
| 395 |
+
|
| 396 |
+
# [示例/Eample]
|
| 397 |
+
aweme_id = "7304809083817774382"
|
| 398 |
+
cursor = 0
|
| 399 |
+
count = 20
|
| 400 |
+
current_region = ""
|
| 401 |
+
"""
|
| 402 |
+
try:
|
| 403 |
+
data = await TikTokWebCrawler.fetch_post_comment(aweme_id, cursor, count, current_region)
|
| 404 |
+
return ResponseModel(code=200,
|
| 405 |
+
router=request.url.path,
|
| 406 |
+
data=data)
|
| 407 |
+
except Exception as e:
|
| 408 |
+
status_code = 400
|
| 409 |
+
detail = ErrorResponseModel(code=status_code,
|
| 410 |
+
router=request.url.path,
|
| 411 |
+
params=dict(request.query_params),
|
| 412 |
+
)
|
| 413 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
# 获���作品的评论回复列表
|
| 417 |
+
@router.get("/fetch_post_comment_reply",
|
| 418 |
+
response_model=ResponseModel,
|
| 419 |
+
summary="获取作品的评论回复列表/Get video comment replies")
|
| 420 |
+
async def fetch_post_comment_reply(request: Request,
|
| 421 |
+
item_id: str = Query(example="7304809083817774382", description="作品id/Video id"),
|
| 422 |
+
comment_id: str = Query(example="7304877760886588191",
|
| 423 |
+
description="评论id/Comment id"),
|
| 424 |
+
cursor: int = Query(default=0, description="翻页游标/Page cursor"),
|
| 425 |
+
count: int = Query(default=20, description="每页数量/Number per page"),
|
| 426 |
+
current_region: str = Query(default="", description="当前地区/Current region")):
|
| 427 |
+
"""
|
| 428 |
+
# [中文]
|
| 429 |
+
### 用途:
|
| 430 |
+
- 获取作品的评论回复列表
|
| 431 |
+
### 参数:
|
| 432 |
+
- item_id: 作品id
|
| 433 |
+
- comment_id: 评论id
|
| 434 |
+
- cursor: 翻页游标
|
| 435 |
+
- count: 每页数量
|
| 436 |
+
- current_region: 当前地区,默认为空。
|
| 437 |
+
### 返回:
|
| 438 |
+
- 作品的评论回复列表
|
| 439 |
+
|
| 440 |
+
# [English]
|
| 441 |
+
### Purpose:
|
| 442 |
+
- Get video comment replies
|
| 443 |
+
### Parameters:
|
| 444 |
+
- item_id: Video id
|
| 445 |
+
- comment_id: Comment id
|
| 446 |
+
- cursor: Page cursor
|
| 447 |
+
- count: Number per page
|
| 448 |
+
- current_region: Current region, default is empty.
|
| 449 |
+
### Return:
|
| 450 |
+
- Video comment replies
|
| 451 |
+
|
| 452 |
+
# [示例/Eample]
|
| 453 |
+
item_id = "7304809083817774382"
|
| 454 |
+
comment_id = "7304877760886588191"
|
| 455 |
+
cursor = 0
|
| 456 |
+
count = 20
|
| 457 |
+
current_region = ""
|
| 458 |
+
"""
|
| 459 |
+
try:
|
| 460 |
+
data = await TikTokWebCrawler.fetch_post_comment_reply(item_id, comment_id, cursor, count, current_region)
|
| 461 |
+
return ResponseModel(code=200,
|
| 462 |
+
router=request.url.path,
|
| 463 |
+
data=data)
|
| 464 |
+
except Exception as e:
|
| 465 |
+
status_code = 400
|
| 466 |
+
detail = ErrorResponseModel(code=status_code,
|
| 467 |
+
router=request.url.path,
|
| 468 |
+
params=dict(request.query_params),
|
| 469 |
+
)
|
| 470 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
# 获取用户的粉丝列表
|
| 474 |
+
@router.get("/fetch_user_fans",
|
| 475 |
+
response_model=ResponseModel,
|
| 476 |
+
summary="获取用户的粉丝列表/Get user followers")
|
| 477 |
+
async def fetch_user_fans(request: Request,
|
| 478 |
+
secUid: str = Query(example="MS4wLjABAAAAv7iSuuXDJGDvJkmH_vz1qkDZYo1apxgzaxdBSeIuPiM",
|
| 479 |
+
description="用户secUid/User secUid"),
|
| 480 |
+
count: int = Query(default=30, description="每页数量/Number per page"),
|
| 481 |
+
maxCursor: int = Query(default=0, description="最大游标/Max cursor"),
|
| 482 |
+
minCursor: int = Query(default=0, description="最小游标/Min cursor")):
|
| 483 |
+
"""
|
| 484 |
+
# [中文]
|
| 485 |
+
### 用途:
|
| 486 |
+
- 获取用户的粉丝列表
|
| 487 |
+
### 参数:
|
| 488 |
+
- secUid: 用户secUid
|
| 489 |
+
- count: 每页数量
|
| 490 |
+
- maxCursor: 最大游标
|
| 491 |
+
- minCursor: 最小游标
|
| 492 |
+
### 返回:
|
| 493 |
+
- 用户的粉丝列表
|
| 494 |
+
|
| 495 |
+
# [English]
|
| 496 |
+
### Purpose:
|
| 497 |
+
- Get user followers
|
| 498 |
+
### Parameters:
|
| 499 |
+
- secUid: User secUid
|
| 500 |
+
- count: Number per page
|
| 501 |
+
- maxCursor: Max cursor
|
| 502 |
+
- minCursor: Min cursor
|
| 503 |
+
### Return:
|
| 504 |
+
- User followers
|
| 505 |
+
|
| 506 |
+
# [示例/Example]
|
| 507 |
+
secUid = "MS4wLjABAAAAv7iSuuXDJGDvJkmH_vz1qkDZYo1apxgzaxdBSeIuPiM"
|
| 508 |
+
count = 30
|
| 509 |
+
maxCursor = 0
|
| 510 |
+
minCursor = 0
|
| 511 |
+
"""
|
| 512 |
+
try:
|
| 513 |
+
data = await TikTokWebCrawler.fetch_user_fans(secUid, count, maxCursor, minCursor)
|
| 514 |
+
return ResponseModel(code=200,
|
| 515 |
+
router=request.url.path,
|
| 516 |
+
data=data)
|
| 517 |
+
except Exception as e:
|
| 518 |
+
status_code = 400
|
| 519 |
+
detail = ErrorResponseModel(code=status_code,
|
| 520 |
+
router=request.url.path,
|
| 521 |
+
params=dict(request.query_params),
|
| 522 |
+
)
|
| 523 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
# 获取用户的关注列表
|
| 527 |
+
@router.get("/fetch_user_follow",
|
| 528 |
+
response_model=ResponseModel,
|
| 529 |
+
summary="获取用户的关注列表/Get user followings")
|
| 530 |
+
async def fetch_user_follow(request: Request,
|
| 531 |
+
secUid: str = Query(example="MS4wLjABAAAAv7iSuuXDJGDvJkmH_vz1qkDZYo1apxgzaxdBSeIuPiM",
|
| 532 |
+
description="用户secUid/User secUid"),
|
| 533 |
+
count: int = Query(default=30, description="每页数量/Number per page"),
|
| 534 |
+
maxCursor: int = Query(default=0, description="最大游标/Max cursor"),
|
| 535 |
+
minCursor: int = Query(default=0, description="最小游标/Min cursor")):
|
| 536 |
+
"""
|
| 537 |
+
# [中文]
|
| 538 |
+
### 用途:
|
| 539 |
+
- 获取用户的关注列表
|
| 540 |
+
### 参数:
|
| 541 |
+
- secUid: 用户secUid
|
| 542 |
+
- count: 每页数量
|
| 543 |
+
- maxCursor: 最大游标
|
| 544 |
+
- minCursor: 最小游标
|
| 545 |
+
### 返回:
|
| 546 |
+
- 用户的关注列表
|
| 547 |
+
|
| 548 |
+
# [English]
|
| 549 |
+
### Purpose:
|
| 550 |
+
- Get user followings
|
| 551 |
+
### Parameters:
|
| 552 |
+
- secUid: User secUid
|
| 553 |
+
- count: Number per page
|
| 554 |
+
- maxCursor: Max cursor
|
| 555 |
+
- minCursor: Min cursor
|
| 556 |
+
### Return:
|
| 557 |
+
- User followings
|
| 558 |
+
|
| 559 |
+
# [示例/Example]
|
| 560 |
+
secUid = "MS4wLjABAAAAv7iSuuXDJGDvJkmH_vz1qkDZYo1apxgzaxdBSeIuPiM"
|
| 561 |
+
count = 30
|
| 562 |
+
maxCursor = 0
|
| 563 |
+
minCursor = 0
|
| 564 |
+
"""
|
| 565 |
+
try:
|
| 566 |
+
data = await TikTokWebCrawler.fetch_user_follow(secUid, count, maxCursor, minCursor)
|
| 567 |
+
return ResponseModel(code=200,
|
| 568 |
+
router=request.url.path,
|
| 569 |
+
data=data)
|
| 570 |
+
except Exception as e:
|
| 571 |
+
status_code = 400
|
| 572 |
+
detail = ErrorResponseModel(code=status_code,
|
| 573 |
+
router=request.url.path,
|
| 574 |
+
params=dict(request.query_params),
|
| 575 |
+
)
|
| 576 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
"""-------------------------------------------------------utils接口列表-------------------------------------------------------"""
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
# 生成真实msToken
|
| 583 |
+
@router.get("/generate_real_msToken",
|
| 584 |
+
response_model=ResponseModel,
|
| 585 |
+
summary="生成真实msToken/Generate real msToken")
|
| 586 |
+
async def generate_real_msToken(request: Request):
|
| 587 |
+
"""
|
| 588 |
+
# [中文]
|
| 589 |
+
### 用途:
|
| 590 |
+
- 生成真实msToken
|
| 591 |
+
### 返回:
|
| 592 |
+
- 真实msToken
|
| 593 |
+
|
| 594 |
+
# [English]
|
| 595 |
+
### Purpose:
|
| 596 |
+
- Generate real msToken
|
| 597 |
+
### Return:
|
| 598 |
+
- Real msToken
|
| 599 |
+
"""
|
| 600 |
+
try:
|
| 601 |
+
data = await TikTokWebCrawler.fetch_real_msToken()
|
| 602 |
+
return ResponseModel(code=200,
|
| 603 |
+
router=request.url.path,
|
| 604 |
+
data=data)
|
| 605 |
+
except Exception as e:
|
| 606 |
+
status_code = 400
|
| 607 |
+
detail = ErrorResponseModel(code=status_code,
|
| 608 |
+
router=request.url.path,
|
| 609 |
+
params=dict(request.query_params),
|
| 610 |
+
)
|
| 611 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 612 |
+
|
| 613 |
+
|
| 614 |
+
# 生成ttwid
|
| 615 |
+
@router.get("/generate_ttwid",
|
| 616 |
+
response_model=ResponseModel,
|
| 617 |
+
summary="生成ttwid/Generate ttwid")
|
| 618 |
+
async def generate_ttwid(request: Request,
|
| 619 |
+
cookie: str = Query(example="Your_Cookie", description="用户cookie/User cookie")):
|
| 620 |
+
"""
|
| 621 |
+
# [中文]
|
| 622 |
+
### 用途:
|
| 623 |
+
- 生成ttwid
|
| 624 |
+
### 参数:
|
| 625 |
+
- cookie: 用户cookie
|
| 626 |
+
### 返回:
|
| 627 |
+
- ttwid
|
| 628 |
+
|
| 629 |
+
# [English]
|
| 630 |
+
### Purpose:
|
| 631 |
+
- Generate ttwid
|
| 632 |
+
### Parameters:
|
| 633 |
+
- cookie: User cookie
|
| 634 |
+
### Return:
|
| 635 |
+
- ttwid
|
| 636 |
+
|
| 637 |
+
# [示例/Example]
|
| 638 |
+
cookie = "Your_Cookie"
|
| 639 |
+
"""
|
| 640 |
+
try:
|
| 641 |
+
data = await TikTokWebCrawler.fetch_ttwid(cookie)
|
| 642 |
+
return ResponseModel(code=200,
|
| 643 |
+
router=request.url.path,
|
| 644 |
+
data=data)
|
| 645 |
+
except Exception as e:
|
| 646 |
+
status_code = 400
|
| 647 |
+
detail = ErrorResponseModel(code=status_code,
|
| 648 |
+
router=request.url.path,
|
| 649 |
+
params=dict(request.query_params),
|
| 650 |
+
)
|
| 651 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
# 生成xbogus
|
| 655 |
+
@router.get("/generate_xbogus",
|
| 656 |
+
response_model=ResponseModel,
|
| 657 |
+
summary="生成xbogus/Generate xbogus")
|
| 658 |
+
async def generate_xbogus(request: Request,
|
| 659 |
+
url: str = Query(
|
| 660 |
+
example="https://www.tiktok.com/api/item/detail/?WebIdLastTime=1712665533&aid=1988&app_language=en&app_name=tiktok_web&browser_language=en-US&browser_name=Mozilla&browser_online=true&browser_platform=Win32&browser_version=5.0%20%28Windows%29&channel=tiktok_web&cookie_enabled=true&device_id=7349090360347690538&device_platform=web_pc&focus_state=true&from_page=user&history_len=4&is_fullscreen=false&is_page_visible=true&language=en&os=windows&priority_region=US&referer=®ion=US&root_referer=https%3A%2F%2Fwww.tiktok.com%2F&screen_height=1080&screen_width=1920&webcast_language=en&tz_name=America%2FTijuana&msToken=AYFCEapCLbMrS8uTLBoYdUMeeVLbCdFQ_QF_-OcjzJw1CPr4JQhWUtagy0k4a9IITAqi5Qxr2Vdh9mgCbyGxTnvWLa4ZVY6IiSf6lcST-tr0IXfl-r_ZTpzvWDoQfqOVsWCTlSNkhAwB-tap5g==&itemId=7339393672959757570",
|
| 661 |
+
description="未签名的API URL/Unsigned API URL"),
|
| 662 |
+
user_agent: str = Query(
|
| 663 |
+
example="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
|
| 664 |
+
description="用户浏览器User-Agent/User browser User-Agent")):
|
| 665 |
+
"""
|
| 666 |
+
# [中文]
|
| 667 |
+
### 用途:
|
| 668 |
+
- 生成xbogus
|
| 669 |
+
### 参数:
|
| 670 |
+
- url: 未签名的API URL
|
| 671 |
+
- user_agent: 用户浏览器User-Agent
|
| 672 |
+
### 返回:
|
| 673 |
+
- xbogus
|
| 674 |
+
|
| 675 |
+
# [English]
|
| 676 |
+
### Purpose:
|
| 677 |
+
- Generate xbogus
|
| 678 |
+
### Parameters:
|
| 679 |
+
- url: Unsigned API URL
|
| 680 |
+
- user_agent: User browser User-Agent
|
| 681 |
+
### Return:
|
| 682 |
+
- xbogus
|
| 683 |
+
|
| 684 |
+
# [示例/Example]
|
| 685 |
+
url = "https://www.tiktok.com/api/item/detail/?WebIdLastTime=1712665533&aid=1988&app_language=en&app_name=tiktok_web&browser_language=en-US&browser_name=Mozilla&browser_online=true&browser_platform=Win32&browser_version=5.0%20%28Windows%29&channel=tiktok_web&cookie_enabled=true&device_id=7349090360347690538&device_platform=web_pc&focus_state=true&from_page=user&history_len=4&is_fullscreen=false&is_page_visible=true&language=en&os=windows&priority_region=US&referer=®ion=US&root_referer=https%3A%2F%2Fwww.tiktok.com%2F&screen_height=1080&screen_width=1920&webcast_language=en&tz_name=America%2FTijuana&msToken=AYFCEapCLbMrS8uTLBoYdUMeeVLbCdFQ_QF_-OcjzJw1CPr4JQhWUtagy0k4a9IITAqi5Qxr2Vdh9mgCbyGxTnvWLa4ZVY6IiSf6lcST-tr0IXfl-r_ZTpzvWDoQfqOVsWCTlSNkhAwB-tap5g==&itemId=7339393672959757570"
|
| 686 |
+
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
|
| 687 |
+
"""
|
| 688 |
+
try:
|
| 689 |
+
data = await TikTokWebCrawler.gen_xbogus(url, user_agent)
|
| 690 |
+
return ResponseModel(code=200,
|
| 691 |
+
router=request.url.path,
|
| 692 |
+
data=data)
|
| 693 |
+
except Exception as e:
|
| 694 |
+
status_code = 400
|
| 695 |
+
detail = ErrorResponseModel(code=status_code,
|
| 696 |
+
router=request.url.path,
|
| 697 |
+
params=dict(request.query_params),
|
| 698 |
+
)
|
| 699 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
# 提取列表用户id
|
| 703 |
+
@router.get("/get_sec_user_id",
|
| 704 |
+
response_model=ResponseModel,
|
| 705 |
+
summary="提取列表用户id/Extract list user id")
|
| 706 |
+
async def get_sec_user_id(request: Request,
|
| 707 |
+
url: str = Query(
|
| 708 |
+
example="https://www.tiktok.com/@tiktok",
|
| 709 |
+
description="用户主页链接/User homepage link")):
|
| 710 |
+
"""
|
| 711 |
+
# [中文]
|
| 712 |
+
### 用途:
|
| 713 |
+
- 提取列表用户id
|
| 714 |
+
### 参数:
|
| 715 |
+
- url: 用户主页链接
|
| 716 |
+
### 返回:
|
| 717 |
+
- 用户id
|
| 718 |
+
|
| 719 |
+
# [English]
|
| 720 |
+
### Purpose:
|
| 721 |
+
- Extract list user id
|
| 722 |
+
### Parameters:
|
| 723 |
+
- url: User homepage link
|
| 724 |
+
### Return:
|
| 725 |
+
- User id
|
| 726 |
+
|
| 727 |
+
# [示例/Example]
|
| 728 |
+
url = "https://www.tiktok.com/@tiktok"
|
| 729 |
+
"""
|
| 730 |
+
try:
|
| 731 |
+
data = await TikTokWebCrawler.get_sec_user_id(url)
|
| 732 |
+
return ResponseModel(code=200,
|
| 733 |
+
router=request.url.path,
|
| 734 |
+
data=data)
|
| 735 |
+
except Exception as e:
|
| 736 |
+
status_code = 400
|
| 737 |
+
detail = ErrorResponseModel(code=status_code,
|
| 738 |
+
router=request.url.path,
|
| 739 |
+
params=dict(request.query_params),
|
| 740 |
+
)
|
| 741 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
# 提取列表用户id
|
| 745 |
+
@router.post("/get_all_sec_user_id",
|
| 746 |
+
response_model=ResponseModel,
|
| 747 |
+
summary="提取列表用户id/Extract list user id")
|
| 748 |
+
async def get_all_sec_user_id(request: Request,
|
| 749 |
+
url: List[str] = Body(
|
| 750 |
+
example=["https://www.tiktok.com/@tiktok"],
|
| 751 |
+
description="用户主页链接/User homepage link")):
|
| 752 |
+
"""
|
| 753 |
+
# [中文]
|
| 754 |
+
### 用途:
|
| 755 |
+
- 提取列表用户id
|
| 756 |
+
### 参数:
|
| 757 |
+
- url: 用户主页链接
|
| 758 |
+
### 返回:
|
| 759 |
+
- 用户id
|
| 760 |
+
|
| 761 |
+
# [English]
|
| 762 |
+
### Purpose:
|
| 763 |
+
- Extract list user id
|
| 764 |
+
### Parameters:
|
| 765 |
+
- url: User homepage link
|
| 766 |
+
### Return:
|
| 767 |
+
- User id
|
| 768 |
+
|
| 769 |
+
# [示例/Example]
|
| 770 |
+
url = ["https://www.tiktok.com/@tiktok"]
|
| 771 |
+
"""
|
| 772 |
+
try:
|
| 773 |
+
data = await TikTokWebCrawler.get_all_sec_user_id(url)
|
| 774 |
+
return ResponseModel(code=200,
|
| 775 |
+
router=request.url.path,
|
| 776 |
+
data=data)
|
| 777 |
+
except Exception as e:
|
| 778 |
+
status_code = 400
|
| 779 |
+
detail = ErrorResponseModel(code=status_code,
|
| 780 |
+
router=request.url.path,
|
| 781 |
+
params=dict(request.query_params),
|
| 782 |
+
)
|
| 783 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 784 |
+
|
| 785 |
+
|
| 786 |
+
# 提取单个作品id
|
| 787 |
+
@router.get("/get_aweme_id",
|
| 788 |
+
response_model=ResponseModel,
|
| 789 |
+
summary="提取单个作品id/Extract single video id")
|
| 790 |
+
async def get_aweme_id(request: Request,
|
| 791 |
+
url: str = Query(
|
| 792 |
+
example="https://www.tiktok.com/@owlcitymusic/video/7218694761253735723",
|
| 793 |
+
description="作品链接/Video link")):
|
| 794 |
+
"""
|
| 795 |
+
# [中文]
|
| 796 |
+
### 用途:
|
| 797 |
+
- 提取单个作品id
|
| 798 |
+
### 参数:
|
| 799 |
+
- url: 作品链接
|
| 800 |
+
### 返回:
|
| 801 |
+
- 作品id
|
| 802 |
+
|
| 803 |
+
# [English]
|
| 804 |
+
### Purpose:
|
| 805 |
+
- Extract single video id
|
| 806 |
+
### Parameters:
|
| 807 |
+
- url: Video link
|
| 808 |
+
### Return:
|
| 809 |
+
- Video id
|
| 810 |
+
|
| 811 |
+
# [示例/Example]
|
| 812 |
+
url = "https://www.tiktok.com/@owlcitymusic/video/7218694761253735723"
|
| 813 |
+
"""
|
| 814 |
+
try:
|
| 815 |
+
data = await TikTokWebCrawler.get_aweme_id(url)
|
| 816 |
+
return ResponseModel(code=200,
|
| 817 |
+
router=request.url.path,
|
| 818 |
+
data=data)
|
| 819 |
+
except Exception as e:
|
| 820 |
+
status_code = 400
|
| 821 |
+
detail = ErrorResponseModel(code=status_code,
|
| 822 |
+
router=request.url.path,
|
| 823 |
+
params=dict(request.query_params),
|
| 824 |
+
)
|
| 825 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 826 |
+
|
| 827 |
+
|
| 828 |
+
# 提取列表作品id
|
| 829 |
+
@router.post("/get_all_aweme_id",
|
| 830 |
+
response_model=ResponseModel,
|
| 831 |
+
summary="提取列表作品id/Extract list video id")
|
| 832 |
+
async def get_all_aweme_id(request: Request,
|
| 833 |
+
url: List[str] = Body(
|
| 834 |
+
example=["https://www.tiktok.com/@owlcitymusic/video/7218694761253735723"],
|
| 835 |
+
description="作品链接/Video link")):
|
| 836 |
+
"""
|
| 837 |
+
# [中文]
|
| 838 |
+
### 用途:
|
| 839 |
+
- 提取列表作品id
|
| 840 |
+
### 参数:
|
| 841 |
+
- url: 作品链接
|
| 842 |
+
### 返回:
|
| 843 |
+
- 作品id
|
| 844 |
+
|
| 845 |
+
# [English]
|
| 846 |
+
### Purpose:
|
| 847 |
+
- Extract list video id
|
| 848 |
+
### Parameters:
|
| 849 |
+
- url: Video link
|
| 850 |
+
### Return:
|
| 851 |
+
- Video id
|
| 852 |
+
|
| 853 |
+
# [示例/Example]
|
| 854 |
+
url = ["https://www.tiktok.com/@owlcitymusic/video/7218694761253735723"]
|
| 855 |
+
"""
|
| 856 |
+
try:
|
| 857 |
+
data = await TikTokWebCrawler.get_all_aweme_id(url)
|
| 858 |
+
return ResponseModel(code=200,
|
| 859 |
+
router=request.url.path,
|
| 860 |
+
data=data)
|
| 861 |
+
except Exception as e:
|
| 862 |
+
status_code = 400
|
| 863 |
+
detail = ErrorResponseModel(code=status_code,
|
| 864 |
+
router=request.url.path,
|
| 865 |
+
params=dict(request.query_params),
|
| 866 |
+
)
|
| 867 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 868 |
+
|
| 869 |
+
|
| 870 |
+
# 获取用户unique_id
|
| 871 |
+
@router.get("/get_unique_id",
|
| 872 |
+
response_model=ResponseModel,
|
| 873 |
+
summary="获取用户unique_id/Get user unique_id")
|
| 874 |
+
async def get_unique_id(request: Request,
|
| 875 |
+
url: str = Query(
|
| 876 |
+
example="https://www.tiktok.com/@tiktok",
|
| 877 |
+
description="用户主页链接/User homepage link")):
|
| 878 |
+
"""
|
| 879 |
+
# [中文]
|
| 880 |
+
### 用途:
|
| 881 |
+
- 获取用户unique_id
|
| 882 |
+
### 参数:
|
| 883 |
+
- url: 用户主页链接
|
| 884 |
+
### 返回:
|
| 885 |
+
- unique_id
|
| 886 |
+
|
| 887 |
+
# [English]
|
| 888 |
+
### Purpose:
|
| 889 |
+
- Get user unique_id
|
| 890 |
+
### Parameters:
|
| 891 |
+
- url: User homepage link
|
| 892 |
+
### Return:
|
| 893 |
+
- unique_id
|
| 894 |
+
|
| 895 |
+
# [示例/Example]
|
| 896 |
+
url = "https://www.tiktok.com/@tiktok"
|
| 897 |
+
"""
|
| 898 |
+
try:
|
| 899 |
+
data = await TikTokWebCrawler.get_unique_id(url)
|
| 900 |
+
return ResponseModel(code=200,
|
| 901 |
+
router=request.url.path,
|
| 902 |
+
data=data)
|
| 903 |
+
except Exception as e:
|
| 904 |
+
status_code = 400
|
| 905 |
+
detail = ErrorResponseModel(code=status_code,
|
| 906 |
+
router=request.url.path,
|
| 907 |
+
params=dict(request.query_params),
|
| 908 |
+
)
|
| 909 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
| 910 |
+
|
| 911 |
+
|
| 912 |
+
# 获取列表unique_id列表
|
| 913 |
+
@router.post("/get_all_unique_id",
|
| 914 |
+
response_model=ResponseModel,
|
| 915 |
+
summary="获取列表unique_id/Get list unique_id")
|
| 916 |
+
async def get_all_unique_id(request: Request,
|
| 917 |
+
url: List[str] = Body(
|
| 918 |
+
example=["https://www.tiktok.com/@tiktok"],
|
| 919 |
+
description="用户主页链接/User homepage link")):
|
| 920 |
+
"""
|
| 921 |
+
# [中文]
|
| 922 |
+
### 用途:
|
| 923 |
+
- 获取列表unique_id
|
| 924 |
+
### 参数:
|
| 925 |
+
- url: 用户主页链接
|
| 926 |
+
### 返回:
|
| 927 |
+
- unique_id
|
| 928 |
+
|
| 929 |
+
# [English]
|
| 930 |
+
### Purpose:
|
| 931 |
+
- Get list unique_id
|
| 932 |
+
### Parameters:
|
| 933 |
+
- url: User homepage link
|
| 934 |
+
### Return:
|
| 935 |
+
- unique_id
|
| 936 |
+
|
| 937 |
+
# [示例/Example]
|
| 938 |
+
url = ["https://www.tiktok.com/@tiktok"]
|
| 939 |
+
"""
|
| 940 |
+
try:
|
| 941 |
+
data = await TikTokWebCrawler.get_all_unique_id(url)
|
| 942 |
+
return ResponseModel(code=200,
|
| 943 |
+
router=request.url.path,
|
| 944 |
+
data=data)
|
| 945 |
+
except Exception as e:
|
| 946 |
+
status_code = 400
|
| 947 |
+
detail = ErrorResponseModel(code=status_code,
|
| 948 |
+
router=request.url.path,
|
| 949 |
+
params=dict(request.query_params),
|
| 950 |
+
)
|
| 951 |
+
raise HTTPException(status_code=status_code, detail=detail.dict())
|
app/api/models/APIResponseModel.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import Body, FastAPI, Query, Request, HTTPException
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from typing import Any, Callable, Type, Optional, Dict
|
| 4 |
+
from functools import wraps
|
| 5 |
+
import datetime
|
| 6 |
+
|
| 7 |
+
app = FastAPI()
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# 定义响应模型
|
| 11 |
+
class ResponseModel(BaseModel):
|
| 12 |
+
code: int = 200
|
| 13 |
+
router: str = "Endpoint path"
|
| 14 |
+
data: Optional[Any] = {}
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# 定义错误响应模型
|
| 18 |
+
class ErrorResponseModel(BaseModel):
|
| 19 |
+
code: int = 400
|
| 20 |
+
message: str = "An error occurred."
|
| 21 |
+
support: str = "Please contact us on Github: https://github.com/Evil0ctal/Douyin_TikTok_Download_API"
|
| 22 |
+
time: str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 23 |
+
router: str
|
| 24 |
+
params: dict = {}
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# 混合解析响应模型
|
| 28 |
+
class HybridResponseModel(BaseModel):
|
| 29 |
+
code: int = 200
|
| 30 |
+
router: str = "Hybrid parsing single video endpoint"
|
| 31 |
+
data: Optional[Any] = {}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# iOS_Shortcut响应模型
|
| 35 |
+
class iOS_Shortcut(BaseModel):
|
| 36 |
+
version: str
|
| 37 |
+
update: str
|
| 38 |
+
link: str
|
| 39 |
+
link_en: str
|
| 40 |
+
note: str
|
| 41 |
+
note_en: str
|
app/api/router.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter
|
| 2 |
+
from app.api.endpoints import (
|
| 3 |
+
tiktok_web,
|
| 4 |
+
tiktok_app,
|
| 5 |
+
douyin_web,
|
| 6 |
+
bilibili_web,
|
| 7 |
+
hybrid_parsing, ios_shortcut, download,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
router = APIRouter()
|
| 11 |
+
|
| 12 |
+
# TikTok routers
|
| 13 |
+
router.include_router(tiktok_web.router, prefix="/tiktok/web", tags=["TikTok-Web-API"])
|
| 14 |
+
router.include_router(tiktok_app.router, prefix="/tiktok/app", tags=["TikTok-App-API"])
|
| 15 |
+
|
| 16 |
+
# Douyin routers
|
| 17 |
+
router.include_router(douyin_web.router, prefix="/douyin/web", tags=["Douyin-Web-API"])
|
| 18 |
+
|
| 19 |
+
# Bilibili routers
|
| 20 |
+
router.include_router(bilibili_web.router, prefix="/bilibili/web", tags=["Bilibili-Web-API"])
|
| 21 |
+
|
| 22 |
+
# Hybrid routers
|
| 23 |
+
router.include_router(hybrid_parsing.router, prefix="/hybrid", tags=["Hybrid-API"])
|
| 24 |
+
|
| 25 |
+
# iOS_Shortcut routers
|
| 26 |
+
router.include_router(ios_shortcut.router, prefix="/ios", tags=["iOS-Shortcut"])
|
| 27 |
+
|
| 28 |
+
# Download routers
|
| 29 |
+
router.include_router(download.router, tags=["Download"])
|
app/main.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ==============================================================================
|
| 2 |
+
# Copyright (C) 2021 Evil0ctal
|
| 3 |
+
#
|
| 4 |
+
# This file is part of the Douyin_TikTok_Download_API project.
|
| 5 |
+
#
|
| 6 |
+
# This project is licensed under the Apache License 2.0 (the "License");
|
| 7 |
+
# you may not use this file except in compliance with the License.
|
| 8 |
+
# You may obtain a copy of the License at:
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
# ==============================================================================
|
| 17 |
+
# __
|
| 18 |
+
# /> フ
|
| 19 |
+
# | _ _ l
|
| 20 |
+
# /` ミ_xノ
|
| 21 |
+
# / | Feed me Stars ⭐ ️
|
| 22 |
+
# / ヽ ノ
|
| 23 |
+
# │ | | |
|
| 24 |
+
# / ̄| | | |
|
| 25 |
+
# | ( ̄ヽ__ヽ_)__)
|
| 26 |
+
# \二つ
|
| 27 |
+
# ==============================================================================
|
| 28 |
+
#
|
| 29 |
+
# Contributor Link:
|
| 30 |
+
# - https://github.com/Evil0ctal
|
| 31 |
+
# - https://github.com/Johnserf-Seed
|
| 32 |
+
#
|
| 33 |
+
# ==============================================================================
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# FastAPI APP
|
| 37 |
+
import uvicorn
|
| 38 |
+
from fastapi import FastAPI
|
| 39 |
+
from app.api.router import router as api_router
|
| 40 |
+
|
| 41 |
+
# PyWebIO APP
|
| 42 |
+
from app.web.app import MainView
|
| 43 |
+
from pywebio.platform.fastapi import asgi_app
|
| 44 |
+
|
| 45 |
+
# OS
|
| 46 |
+
import os
|
| 47 |
+
|
| 48 |
+
# YAML
|
| 49 |
+
import yaml
|
| 50 |
+
|
| 51 |
+
# Load Config
|
| 52 |
+
|
| 53 |
+
# 读取上级再上级目录的配置文件
|
| 54 |
+
config_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config.yaml')
|
| 55 |
+
with open(config_path, 'r', encoding='utf-8') as file:
|
| 56 |
+
config = yaml.safe_load(file)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
Host_IP = config['API']['Host_IP']
|
| 60 |
+
Host_Port = config['API']['Host_Port']
|
| 61 |
+
|
| 62 |
+
# API Tags
|
| 63 |
+
tags_metadata = [
|
| 64 |
+
{
|
| 65 |
+
"name": "Hybrid-API",
|
| 66 |
+
"description": "**(混合数据接口/Hybrid-API data endpoints)**",
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"name": "Douyin-Web-API",
|
| 70 |
+
"description": "**(抖音Web数据接口/Douyin-Web-API data endpoints)**",
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"name": "TikTok-Web-API",
|
| 74 |
+
"description": "**(TikTok-Web-API数据接口/TikTok-Web-API data endpoints)**",
|
| 75 |
+
},
|
| 76 |
+
{
|
| 77 |
+
"name": "TikTok-App-API",
|
| 78 |
+
"description": "**(TikTok-App-API数据接口/TikTok-App-API data endpoints)**",
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"name": "Bilibili-Web-API",
|
| 82 |
+
"description": "**(Bilibili-Web-API数据接口/Bilibili-Web-API data endpoints)**",
|
| 83 |
+
},
|
| 84 |
+
{
|
| 85 |
+
"name": "iOS-Shortcut",
|
| 86 |
+
"description": "**(iOS快捷指令数据接口/iOS-Shortcut data endpoints)**",
|
| 87 |
+
},
|
| 88 |
+
{
|
| 89 |
+
"name": "Download",
|
| 90 |
+
"description": "**(下载数据接口/Download data endpoints)**",
|
| 91 |
+
},
|
| 92 |
+
]
|
| 93 |
+
|
| 94 |
+
version = config['API']['Version']
|
| 95 |
+
update_time = config['API']['Update_Time']
|
| 96 |
+
environment = config['API']['Environment']
|
| 97 |
+
|
| 98 |
+
description = f"""
|
| 99 |
+
### [中文]
|
| 100 |
+
|
| 101 |
+
#### 关于
|
| 102 |
+
- **Github**: [Douyin_TikTok_Download_API](https://github.com/Evil0ctal/Douyin_TikTok_Download_API)
|
| 103 |
+
- **版本**: `{version}`
|
| 104 |
+
- **更新时间**: `{update_time}`
|
| 105 |
+
- **环境**: `{environment}`
|
| 106 |
+
- **文档**: [API Documentation](https://douyin.wtf/docs)
|
| 107 |
+
#### 备注
|
| 108 |
+
- 本项目仅供学习交流使用,不得用于违法用途,否则后果自负。
|
| 109 |
+
- 如果你不想自己部署,可以直接使用我们的在线API服务:[Douyin_TikTok_Download_API](https://douyin.wtf/docs)
|
| 110 |
+
- 如果你需要更稳定以及更多功能的API服务,可以使用付费API服务:[TikHub API](https://api.tikhub.io/)
|
| 111 |
+
|
| 112 |
+
### [English]
|
| 113 |
+
|
| 114 |
+
#### About
|
| 115 |
+
- **Github**: [Douyin_TikTok_Download_API](https://github.com/Evil0ctal/Douyin_TikTok_Download_API)
|
| 116 |
+
- **Version**: `{version}`
|
| 117 |
+
- **Last Updated**: `{update_time}`
|
| 118 |
+
- **Environment**: `{environment}`
|
| 119 |
+
- **Documentation**: [API Documentation](https://douyin.wtf/docs)
|
| 120 |
+
#### Note
|
| 121 |
+
- This project is for learning and communication only, and shall not be used for illegal purposes, otherwise the consequences shall be borne by yourself.
|
| 122 |
+
- If you do not want to deploy it yourself, you can directly use our online API service: [Douyin_TikTok_Download_API](https://douyin.wtf/docs)
|
| 123 |
+
- If you need a more stable and feature-rich API service, you can use the paid API service: [TikHub API](https://api.tikhub.io)
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
docs_url = config['API']['Docs_URL']
|
| 127 |
+
redoc_url = config['API']['Redoc_URL']
|
| 128 |
+
|
| 129 |
+
app = FastAPI(
|
| 130 |
+
title="Douyin TikTok Download API",
|
| 131 |
+
description=description,
|
| 132 |
+
version=version,
|
| 133 |
+
openapi_tags=tags_metadata,
|
| 134 |
+
docs_url=docs_url, # 文档路径
|
| 135 |
+
redoc_url=redoc_url, # redoc文档路径
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
# API router
|
| 139 |
+
app.include_router(api_router, prefix="/api")
|
| 140 |
+
|
| 141 |
+
# PyWebIO APP
|
| 142 |
+
if config['Web']['PyWebIO_Enable']:
|
| 143 |
+
webapp = asgi_app(lambda: MainView().main_view())
|
| 144 |
+
app.mount("/", webapp)
|
| 145 |
+
|
| 146 |
+
if __name__ == '__main__':
|
| 147 |
+
uvicorn.run(app, host=Host_IP, port=Host_Port)
|
app/web/app.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# PyWebIO组件/PyWebIO components
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
import yaml
|
| 5 |
+
from pywebio import session, config as pywebio_config
|
| 6 |
+
from pywebio.input import *
|
| 7 |
+
from pywebio.output import *
|
| 8 |
+
|
| 9 |
+
from app.web.views.About import about_pop_window
|
| 10 |
+
from app.web.views.Document import api_document_pop_window
|
| 11 |
+
from app.web.views.Downloader import downloader_pop_window
|
| 12 |
+
from app.web.views.EasterEgg import a
|
| 13 |
+
from app.web.views.ParseVideo import parse_video
|
| 14 |
+
from app.web.views.Shortcuts import ios_pop_window
|
| 15 |
+
# PyWebIO的各个视图/Views of PyWebIO
|
| 16 |
+
from app.web.views.ViewsUtils import ViewsUtils
|
| 17 |
+
|
| 18 |
+
# 读取上级再上级目录的配置文件
|
| 19 |
+
config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'config.yaml')
|
| 20 |
+
with open(config_path, 'r', encoding='utf-8') as file:
|
| 21 |
+
_config = yaml.safe_load(file)
|
| 22 |
+
|
| 23 |
+
pywebio_config(theme=_config['Web']['PyWebIO_Theme'],
|
| 24 |
+
title=_config['Web']['Tab_Title'],
|
| 25 |
+
description=_config['Web']['Description'],
|
| 26 |
+
js_file=[
|
| 27 |
+
# 整一个看板娘,二次元浓度++
|
| 28 |
+
_config['Web']['Live2D_JS'] if _config['Web']['Live2D_Enable'] else None,
|
| 29 |
+
])
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class MainView:
|
| 33 |
+
def __init__(self):
|
| 34 |
+
self.utils = ViewsUtils()
|
| 35 |
+
|
| 36 |
+
# 主界面/Main view
|
| 37 |
+
def main_view(self):
|
| 38 |
+
# 左侧导航栏/Left navbar
|
| 39 |
+
with use_scope('main'):
|
| 40 |
+
# 设置favicon/Set favicon
|
| 41 |
+
favicon_url = _config['Web']['Favicon']
|
| 42 |
+
session.run_js(f"""
|
| 43 |
+
$('head').append('<link rel="icon" type="image/png" href="{favicon_url}">')
|
| 44 |
+
""")
|
| 45 |
+
# 修改footer/Remove footer
|
| 46 |
+
session.run_js("""$('footer').remove()""")
|
| 47 |
+
# 设置不允许referrer/Set no referrer
|
| 48 |
+
session.run_js("""$('head').append('<meta name=referrer content=no-referrer>');""")
|
| 49 |
+
# 设置标题/Set title
|
| 50 |
+
title = self.utils.t("TikTok/抖音无水印在线解析下载",
|
| 51 |
+
"Douyin/TikTok online parsing and download without watermark")
|
| 52 |
+
put_html(f"""
|
| 53 |
+
<div align="center">
|
| 54 |
+
<a href="/" alt="logo" ><img src="{favicon_url}" width="100"/></a>
|
| 55 |
+
<h1 align="center">{title}</h1>
|
| 56 |
+
</div>
|
| 57 |
+
""")
|
| 58 |
+
# 设置导航栏/Navbar
|
| 59 |
+
put_row(
|
| 60 |
+
[
|
| 61 |
+
put_button(self.utils.t("快捷指令", 'iOS Shortcut'),
|
| 62 |
+
onclick=lambda: ios_pop_window(), link_style=True, small=True),
|
| 63 |
+
put_button(self.utils.t("开放接口", 'Open API'),
|
| 64 |
+
onclick=lambda: api_document_pop_window(), link_style=True, small=True),
|
| 65 |
+
put_button(self.utils.t("下载器", "Downloader"),
|
| 66 |
+
onclick=lambda: downloader_pop_window(), link_style=True, small=True),
|
| 67 |
+
put_button(self.utils.t("关于", 'About'),
|
| 68 |
+
onclick=lambda: about_pop_window(), link_style=True, small=True),
|
| 69 |
+
])
|
| 70 |
+
|
| 71 |
+
# 设置功能选择/Function selection
|
| 72 |
+
options = [
|
| 73 |
+
# Index: 0
|
| 74 |
+
self.utils.t('🔍批量解析视频', '🔍Batch Parse Video'),
|
| 75 |
+
# Index: 1
|
| 76 |
+
self.utils.t('🔍解析用户主页视频', '🔍Parse User Homepage Video'),
|
| 77 |
+
# Index: 2
|
| 78 |
+
self.utils.t('🥚小彩蛋', '🥚Easter Egg'),
|
| 79 |
+
]
|
| 80 |
+
select_options = select(
|
| 81 |
+
self.utils.t('请在这里选择一个你想要的功能吧 ~', 'Please select a function you want here ~'),
|
| 82 |
+
required=True,
|
| 83 |
+
options=options,
|
| 84 |
+
help_text=self.utils.t('📎选上面的选项然后点击提交', '📎Select the options above and click Submit')
|
| 85 |
+
)
|
| 86 |
+
# 根据输入运行不同的函数
|
| 87 |
+
if select_options == options[0]:
|
| 88 |
+
parse_video()
|
| 89 |
+
elif select_options == options[1]:
|
| 90 |
+
put_markdown(self.utils.t('暂未开放,敬请期待~', 'Not yet open, please look forward to it~'))
|
| 91 |
+
elif select_options == options[2]:
|
| 92 |
+
a() if _config['Web']['Easter_Egg'] else put_markdown(self.utils.t('没有小彩蛋哦~', 'No Easter Egg~'))
|
app/web/views/About.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pywebio.output import popup, put_markdown, put_html, put_text, put_link, put_image
|
| 2 |
+
from app.web.views.ViewsUtils import ViewsUtils
|
| 3 |
+
|
| 4 |
+
t = ViewsUtils().t
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# 关于弹窗/About pop-up
|
| 8 |
+
def about_pop_window():
|
| 9 |
+
with popup(t('更多信息', 'More Information')):
|
| 10 |
+
put_html('<h3>👀{}</h3>'.format(t('访问记录', 'Visit Record')))
|
| 11 |
+
put_image('https://views.whatilearened.today/views/github/evil0ctal/TikTokDownload_PyWebIO.svg',
|
| 12 |
+
title='访问记录')
|
| 13 |
+
put_html('<hr>')
|
| 14 |
+
put_html('<h3>⭐Github</h3>')
|
| 15 |
+
put_markdown('[Douyin_TikTok_Download_API](https://github.com/Evil0ctal/Douyin_TikTok_Download_API)')
|
| 16 |
+
put_html('<hr>')
|
| 17 |
+
put_html('<h3>🎯{}</h3>'.format(t('反馈', 'Feedback')))
|
| 18 |
+
put_markdown('{}:[issues](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/issues)'.format(
|
| 19 |
+
t('Bug反馈', 'Bug Feedback')))
|
| 20 |
+
put_html('<hr>')
|
| 21 |
+
put_html('<h3>💖WeChat</h3>')
|
| 22 |
+
put_markdown('WeChat:[Evil0ctal](https://mycyberpunk.com/)')
|
| 23 |
+
put_html('<hr>')
|
app/web/views/Document.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pywebio.output import popup, put_markdown, put_html, put_text, put_link
|
| 2 |
+
from app.web.views.ViewsUtils import ViewsUtils
|
| 3 |
+
|
| 4 |
+
t = ViewsUtils().t
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# API文档弹窗/API documentation pop-up
|
| 8 |
+
def api_document_pop_window():
|
| 9 |
+
with popup(t("📑API文档", "📑API Document")):
|
| 10 |
+
put_markdown(t("> 介绍",
|
| 11 |
+
"> Introduction"))
|
| 12 |
+
put_markdown(t("你可以利用本项目提供的API接口来获取抖音/TikTok的数据,具体接口文档请参考下方链接。",
|
| 13 |
+
"You can use the API provided by this project to obtain Douyin/TikTok data. For specific API documentation, please refer to the link below."))
|
| 14 |
+
put_markdown(t("如果API不可用,请尝试自己部署本项目,然后再配置文件中修改cookie的值。",
|
| 15 |
+
"If the API is not available, please try to deploy this project by yourself, and then modify the value of the cookie in the configuration file."))
|
| 16 |
+
put_link('[API Docs]', '/docs', new_window=True)
|
| 17 |
+
put_markdown("----")
|
| 18 |
+
put_markdown(t("> 更多接口",
|
| 19 |
+
"> More APIs"))
|
| 20 |
+
put_markdown(
|
| 21 |
+
t("[TikHub.io](https://beta-web.tikhub.io/en-us/users/signin)是一个API平台,提供包括Douyin、TikTok在内的各种公开数据接口,如果您想支持 [Douyin_TikTok_Download_API](https://github.com/Evil0ctal/Douyin_TikTok_Download_API) 项目的开发,我们强烈建议您选择[TikHub.io](https://beta-web.tikhub.io/en-us/users/signin)。",
|
| 22 |
+
"[TikHub.io](https://beta-web.tikhub.io/en-us/users/signin) is an API platform that provides various public data interfaces including Douyin and TikTok. If you want to support the development of the [Douyin_TikTok_Download_API](https://github.com/Evil0ctal/Douyin_TikTok_Download_API) project, we strongly recommend that you choose [TikHub.io](https://beta-web.tikhub.io/en-us/users/signin)."))
|
| 23 |
+
put_markdown(
|
| 24 |
+
t("#### 特点:",
|
| 25 |
+
"#### Features:"))
|
| 26 |
+
put_markdown(
|
| 27 |
+
t("> 📦 开箱即用",
|
| 28 |
+
"> 📦 Ready to use"))
|
| 29 |
+
put_markdown(
|
| 30 |
+
t("简化使用流程,利用封装好的SDK迅速开展开发工作。所有API接口均依据RESTful架构设计,并使用OpenAPI规范进行描述和文档化,附带示例参数,确保调用更加简便。",
|
| 31 |
+
"Simplify the use process and quickly carry out development work using the encapsulated SDK. All API interfaces are designed based on the RESTful architecture and described and documented using the OpenAPI specification, with example parameters attached to ensure easier calls."))
|
| 32 |
+
put_markdown(
|
| 33 |
+
t("> 💰 成本优势",
|
| 34 |
+
"> 💰 Cost advantage"))
|
| 35 |
+
put_markdown(
|
| 36 |
+
t("不预设套餐限制,没有月度使用门槛,所有消费按实际使用量即时计费,并且根据用户每日的请求量进行阶梯式计费,同时可以通过每日签到在用户后台进行签到获取免费的额度,并且这些免费额度不会过期。",
|
| 37 |
+
"There is no preset package limit, no monthly usage threshold, all consumption is billed in real time according to the actual usage, and billed in a step-by-step manner according to the user's daily request volume. At the same time, you can sign in daily in the user background to get free quotas, and these free quotas will not expire."))
|
| 38 |
+
put_markdown(
|
| 39 |
+
t("> ⚡️ 快速支持",
|
| 40 |
+
"> ⚡️ Quick support"))
|
| 41 |
+
put_markdown(
|
| 42 |
+
t("我们有一个庞大的Discord社区服务器,管理员和其他用户会在服务器中快速的回复你,帮助你快速解决当前的问题。",
|
| 43 |
+
"We have a huge Discord community server, where administrators and other users will quickly reply to you in the server and help you quickly solve the current problem."))
|
| 44 |
+
put_markdown(
|
| 45 |
+
t("> 🎉 拥抱开源",
|
| 46 |
+
"> 🎉 Embrace open source"))
|
| 47 |
+
put_markdown(
|
| 48 |
+
t("TikHub的部分源代码会开源在Github上,并且会赞助一些开源项目的作者。",
|
| 49 |
+
"Some of TikHub's source code will be open sourced on Github, and will sponsor some open source project authors."))
|
| 50 |
+
put_markdown(
|
| 51 |
+
t("#### 链接:",
|
| 52 |
+
"#### Links:"))
|
| 53 |
+
put_markdown(
|
| 54 |
+
t("- Github: [TikHub Github](https://github.com/TikHubIO)",
|
| 55 |
+
"- Github: [TikHub Github](https://github.com/TikHubIO)"))
|
| 56 |
+
put_markdown(
|
| 57 |
+
t("- Discord: [TikHub Discord](https://discord.com/invite/aMEAS8Xsvz)",
|
| 58 |
+
"- Discord: [TikHub Discord](https://discord.com/invite/aMEAS8Xsvz)"))
|
| 59 |
+
put_markdown(
|
| 60 |
+
t("- Register: [TikHub signup](https://beta-web.tikhub.io/en-us/users/signup)",
|
| 61 |
+
"- Register: [TikHub signup](https://beta-web.tikhub.io/en-us/users/signup)"))
|
| 62 |
+
put_markdown(
|
| 63 |
+
t("- API Docs: [TikHub API Docs](https://api.tikhub.io/)",
|
| 64 |
+
"- API Docs: [TikHub API Docs](https://api.tikhub.io/)"))
|
| 65 |
+
put_markdown("----")
|
app/web/views/Downloader.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pywebio.output import popup, put_markdown, put_html, put_text, put_link
|
| 2 |
+
from app.web.views.ViewsUtils import ViewsUtils
|
| 3 |
+
|
| 4 |
+
t = ViewsUtils().t
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# 下载器弹窗/Downloader pop-up
|
| 8 |
+
def downloader_pop_window():
|
| 9 |
+
with popup(t("💾 下载器", "💾 Downloader")):
|
| 10 |
+
put_markdown(t("> 桌面端下载器", "> Desktop Downloader"))
|
| 11 |
+
put_markdown(t("你可以使用下面的开源项目在桌面端下载视频:",
|
| 12 |
+
"You can use the following open source projects to download videos on the desktop:"))
|
| 13 |
+
put_markdown("1. [TikTokDownload](https://github.com/Johnserf-Seed/TikTokDownload)")
|
| 14 |
+
put_markdown(t("> 备注", "> Note"))
|
| 15 |
+
put_markdown(t("1. 请注意下载器的使用规范,不要用于违法用途。",
|
| 16 |
+
"1. Please pay attention to the use specifications of the downloader and do not use it for illegal purposes."))
|
| 17 |
+
put_markdown(t("2. 下载器相关问题请咨询对应项目的开发者。",
|
| 18 |
+
"2. For issues related to the downloader, please consult the developer of the corresponding project."))
|
app/web/views/EasterEgg.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
import pyfiglet
|
| 5 |
+
from pywebio import start_server
|
| 6 |
+
from pywebio.output import put_text, clear, put_html
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def a():
|
| 10 |
+
H, W = 60, 80
|
| 11 |
+
g = np.random.choice([0, 1], size=(H, W))
|
| 12 |
+
|
| 13 |
+
def u():
|
| 14 |
+
n = g.copy()
|
| 15 |
+
for i in range(H):
|
| 16 |
+
for j in range(W):
|
| 17 |
+
t = sum([g[i, (j - 1) % W], g[i, (j + 1) % W], g[(i - 1) % H, j], g[(i + 1) % H, j],
|
| 18 |
+
g[(i - 1) % H, (j - 1) % W], g[(i - 1) % H, (j + 1) % W], g[(i + 1) % H, (j - 1) % W],
|
| 19 |
+
g[(i + 1) % H, (j + 1) % W]])
|
| 20 |
+
n[i, j] = 1 if g[i, j] == 0 and t == 3 else 0 if g[i, j] == 1 and (t < 2 or t > 3) else g[i, j]
|
| 21 |
+
return n
|
| 22 |
+
|
| 23 |
+
def m(s):
|
| 24 |
+
put_text(pyfiglet.figlet_format(s, font="slant"))
|
| 25 |
+
|
| 26 |
+
def c():
|
| 27 |
+
m(''.join([chr(int(c, 2)) for c in
|
| 28 |
+
['01000101', '01110110', '01101001', '01101100', '01001111', '01100011', '01110100', '01100001',
|
| 29 |
+
'01101100', '00001010', '01000111', '01000001', '01001101', '01000101', '00001010', '01001111',
|
| 30 |
+
'01000110', '00001010', '01001100', '01001001', '01000110', '01000101', '00001010', '00110010',
|
| 31 |
+
'00110000', '00110010', '00110100']]));
|
| 32 |
+
time.sleep(3)
|
| 33 |
+
for i in range(3, 0, -1): clear(); m(str(i)); time.sleep(1)
|
| 34 |
+
clear()
|
| 35 |
+
|
| 36 |
+
def h(g):
|
| 37 |
+
return '<table id="life-grid" style="table-layout: fixed; border-spacing:0;">' + ''.join('<tr>' + ''.join(
|
| 38 |
+
f'<td style="width:10px; height:10px; background:{"black" if c else "white"};"></td>' for c in r) + '</tr>'
|
| 39 |
+
for r in
|
| 40 |
+
g) + '</table>'
|
| 41 |
+
|
| 42 |
+
c();
|
| 43 |
+
put_html(h(g))
|
| 44 |
+
|
| 45 |
+
def r(g):
|
| 46 |
+
return f"<script>" + ''.join(
|
| 47 |
+
f'document.getElementById("life-grid").rows[{i}].cells[{j}].style.background = "{"black" if g[i, j] else "white"}";'
|
| 48 |
+
for i in range(H) for j in range(W)) + "</script>"
|
| 49 |
+
|
| 50 |
+
e = time.time() + 120
|
| 51 |
+
while time.time() < e:
|
| 52 |
+
time.sleep(0.1);
|
| 53 |
+
g = u();
|
| 54 |
+
put_html(r(g))
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
if __name__ == '__main__':
|
| 58 |
+
# A boring code is ready to run!
|
| 59 |
+
# 原神,启动!
|
| 60 |
+
start_server(a, port=80)
|
app/web/views/ParseVideo.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import os
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
import yaml
|
| 6 |
+
from pywebio.input import *
|
| 7 |
+
from pywebio.output import *
|
| 8 |
+
from pywebio_battery import put_video
|
| 9 |
+
|
| 10 |
+
from app.web.views.ViewsUtils import ViewsUtils
|
| 11 |
+
|
| 12 |
+
from crawlers.hybrid.hybrid_crawler import HybridCrawler
|
| 13 |
+
|
| 14 |
+
HybridCrawler = HybridCrawler()
|
| 15 |
+
|
| 16 |
+
# 读取上级再上级目录的配置文件
|
| 17 |
+
config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 'config.yaml')
|
| 18 |
+
with open(config_path, 'r', encoding='utf-8') as file:
|
| 19 |
+
config = yaml.safe_load(file)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# 校验输入值/Validate input value
|
| 23 |
+
def valid_check(input_data: str):
|
| 24 |
+
# 检索出所有链接并返回列表/Retrieve all links and return a list
|
| 25 |
+
url_list = ViewsUtils.find_url(input_data)
|
| 26 |
+
# 总共找到的链接数量/Total number of links found
|
| 27 |
+
total_urls = len(url_list)
|
| 28 |
+
if total_urls == 0:
|
| 29 |
+
warn_info = ViewsUtils.t('没有检测到有效的链接,请检查输入的内容是否正确。',
|
| 30 |
+
'No valid link detected, please check if the input content is correct.')
|
| 31 |
+
return warn_info
|
| 32 |
+
else:
|
| 33 |
+
# 最大接受提交URL的数量/Maximum number of URLs accepted
|
| 34 |
+
max_urls = config['Web']['Max_Take_URLs']
|
| 35 |
+
if total_urls > int(max_urls):
|
| 36 |
+
warn_info = ViewsUtils.t(f'输入的链接太多啦,当前只会处理输入的前{max_urls}个链接!',
|
| 37 |
+
f'Too many links input, only the first {max_urls} links will be processed!')
|
| 38 |
+
return warn_info
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# 错误处理/Error handling
|
| 42 |
+
def error_do(reason: str, value: str) -> None:
|
| 43 |
+
# 输出一个毫无用处的信息
|
| 44 |
+
put_html("<hr>")
|
| 45 |
+
put_error(
|
| 46 |
+
ViewsUtils.t("发生了一个错误,程序将跳过这个输入值,继续处理下一个输入值。",
|
| 47 |
+
"An error occurred, the program will skip this input value and continue to process the next input value."))
|
| 48 |
+
put_html(f"<h3>⚠{ViewsUtils.t('详情', 'Details')}</h3>")
|
| 49 |
+
put_table([
|
| 50 |
+
[
|
| 51 |
+
ViewsUtils.t('原因', 'reason'),
|
| 52 |
+
ViewsUtils.t('输入值', 'input value')
|
| 53 |
+
],
|
| 54 |
+
[
|
| 55 |
+
reason,
|
| 56 |
+
value
|
| 57 |
+
]
|
| 58 |
+
])
|
| 59 |
+
put_markdown(ViewsUtils.t('> 可能的原因:', '> Possible reasons:'))
|
| 60 |
+
put_markdown(ViewsUtils.t("- 视频已被删除或者链接不正确。",
|
| 61 |
+
"- The video has been deleted or the link is incorrect."))
|
| 62 |
+
put_markdown(ViewsUtils.t("- 接口风控,请求过于频繁。",
|
| 63 |
+
"- Interface risk control, request too frequent.")),
|
| 64 |
+
put_markdown(ViewsUtils.t("- 没有使用有效的Cookie,如果你部署后没有替换相应的Cookie,可能会导致解析失败。",
|
| 65 |
+
"- No valid Cookie is used. If you do not replace the corresponding Cookie after deployment, it may cause parsing failure."))
|
| 66 |
+
put_markdown(ViewsUtils.t("> 寻求帮助:", "> Seek help:"))
|
| 67 |
+
put_markdown(ViewsUtils.t(
|
| 68 |
+
"- 你可以尝试再次解析,或者尝试自行部署项目,然后替换`./app/crawlers/平台文件夹/config.yaml`中的`cookie`值。",
|
| 69 |
+
"- You can try to parse again, or try to deploy the project by yourself, and then replace the `cookie` value in `./app/crawlers/platform folder/config.yaml`."))
|
| 70 |
+
|
| 71 |
+
put_markdown(
|
| 72 |
+
"- GitHub Issue: [Evil0ctal/Douyin_TikTok_Download_API](https://github.com/Evil0ctal/Douyin_TikTok_Download_API/issues)")
|
| 73 |
+
put_html("<hr>")
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def parse_video():
|
| 77 |
+
placeholder = ViewsUtils.t(
|
| 78 |
+
"批量解析请直接粘贴多个口令或链接,无需使用符号分开,支持抖音和TikTok链接混合,暂时不支持作者主页链接批量解析。",
|
| 79 |
+
"Batch parsing, please paste multiple passwords or links directly, no need to use symbols to separate, support for mixing Douyin and TikTok links, temporarily not support for author home page link batch parsing.")
|
| 80 |
+
input_data = textarea(
|
| 81 |
+
ViewsUtils.t('请将抖音或TikTok的分享口令或网址粘贴于此',
|
| 82 |
+
"Please paste the share code or URL of [Douyin|TikTok] here"),
|
| 83 |
+
type=TEXT,
|
| 84 |
+
validate=valid_check,
|
| 85 |
+
required=True,
|
| 86 |
+
placeholder=placeholder,
|
| 87 |
+
position=0)
|
| 88 |
+
url_lists = ViewsUtils.find_url(input_data)
|
| 89 |
+
# 解析开始时间
|
| 90 |
+
start = time.time()
|
| 91 |
+
# 成功/失败统计
|
| 92 |
+
success_count = 0
|
| 93 |
+
failed_count = 0
|
| 94 |
+
# 链接总数
|
| 95 |
+
url_count = len(url_lists)
|
| 96 |
+
# 解析成功的url
|
| 97 |
+
success_list = []
|
| 98 |
+
# 解析失败的url
|
| 99 |
+
failed_list = []
|
| 100 |
+
# 输出一个提示条
|
| 101 |
+
with use_scope('loading_text'):
|
| 102 |
+
# 输出一个分行符
|
| 103 |
+
put_row([put_html('<br>')])
|
| 104 |
+
put_warning(ViewsUtils.t('Server酱正收到你输入的链接啦!(◍•ᴗ•◍)\n正在努力处理中,请稍等片刻...',
|
| 105 |
+
'ServerChan is receiving your input link! (◍•ᴗ•◍)\nEfforts are being made, please wait a moment...'))
|
| 106 |
+
# 结果页���题
|
| 107 |
+
put_scope('result_title')
|
| 108 |
+
# 遍历链接列表
|
| 109 |
+
for url in url_lists:
|
| 110 |
+
# 链接编号
|
| 111 |
+
url_index = url_lists.index(url) + 1
|
| 112 |
+
# 解析
|
| 113 |
+
try:
|
| 114 |
+
data = asyncio.run(HybridCrawler.hybrid_parsing_single_video(url, minimal=True))
|
| 115 |
+
except Exception as e:
|
| 116 |
+
error_msg = str(e)
|
| 117 |
+
with use_scope(str(url_index)):
|
| 118 |
+
error_do(reason=error_msg, value=url)
|
| 119 |
+
failed_count += 1
|
| 120 |
+
failed_list.append(url)
|
| 121 |
+
continue
|
| 122 |
+
|
| 123 |
+
# 创建一个视频/图集的公有变量
|
| 124 |
+
url_type = ViewsUtils.t('视频', 'Video') if data.get('type') == 'video' else ViewsUtils.t('图片', 'Image')
|
| 125 |
+
platform = data.get('platform')
|
| 126 |
+
table_list = [
|
| 127 |
+
[ViewsUtils.t('类型', 'type'), ViewsUtils.t('内容', 'content')],
|
| 128 |
+
[ViewsUtils.t('解析类型', 'Type'), url_type],
|
| 129 |
+
[ViewsUtils.t('平台', 'Platform'), platform],
|
| 130 |
+
[f'{url_type} ID', data.get('aweme_id')],
|
| 131 |
+
[ViewsUtils.t(f'{url_type}描述', 'Description'), data.get('desc')],
|
| 132 |
+
[ViewsUtils.t('作者昵称', 'Author nickname'), data.get('author').get('nickname')],
|
| 133 |
+
[ViewsUtils.t('作者ID', 'Author ID'), data.get('author').get('unique_id')],
|
| 134 |
+
[ViewsUtils.t('API链接', 'API URL'),
|
| 135 |
+
put_link(
|
| 136 |
+
ViewsUtils.t('点击查看', 'Click to view'),
|
| 137 |
+
f"/api/hybrid/video_data?url={url}&minimal=false",
|
| 138 |
+
new_window=True)],
|
| 139 |
+
[ViewsUtils.t('API链接-精简', 'API URL-Minimal'),
|
| 140 |
+
put_link(ViewsUtils.t('点击查看', 'Click to view'),
|
| 141 |
+
f"/api/hybrid/video_data?url={url}&minimal=true",
|
| 142 |
+
new_window=True)]
|
| 143 |
+
|
| 144 |
+
]
|
| 145 |
+
# 如果是视频/If it's video
|
| 146 |
+
if url_type == ViewsUtils.t('视频', 'Video'):
|
| 147 |
+
# 添加视频信息
|
| 148 |
+
wm_video_url_HQ = data.get('video_data').get('wm_video_url_HQ')
|
| 149 |
+
nwm_video_url_HQ = data.get('video_data').get('nwm_video_url_HQ')
|
| 150 |
+
if wm_video_url_HQ and nwm_video_url_HQ:
|
| 151 |
+
table_list.insert(4, [ViewsUtils.t('视频链接-水印', 'Video URL-Watermark'),
|
| 152 |
+
put_link(ViewsUtils.t('点击查看', 'Click to view'),
|
| 153 |
+
wm_video_url_HQ, new_window=True)])
|
| 154 |
+
table_list.insert(5, [ViewsUtils.t('视频链接-无水印', 'Video URL-No Watermark'),
|
| 155 |
+
put_link(ViewsUtils.t('点击查看', 'Click to view'),
|
| 156 |
+
nwm_video_url_HQ, new_window=True)])
|
| 157 |
+
table_list.insert(6, [ViewsUtils.t('视频下载-水印', 'Video Download-Watermark'),
|
| 158 |
+
put_link(ViewsUtils.t('点击下载', 'Click to download'),
|
| 159 |
+
f"/api/download?url={url}&prefix=true&with_watermark=true",
|
| 160 |
+
new_window=True)])
|
| 161 |
+
table_list.insert(7, [ViewsUtils.t('视频下载-无水印', 'Video Download-No-Watermark'),
|
| 162 |
+
put_link(ViewsUtils.t('点击下载', 'Click to download'),
|
| 163 |
+
f"/api/download?url={url}&prefix=true&with_watermark=false",
|
| 164 |
+
new_window=True)])
|
| 165 |
+
# 添加视频信息
|
| 166 |
+
table_list.insert(0, [
|
| 167 |
+
put_video(data.get('video_data').get('nwm_video_url_HQ'), poster=None, loop=True, width='50%')])
|
| 168 |
+
# 如果是图片/If it's image
|
| 169 |
+
elif url_type == ViewsUtils.t('图片', 'Image'):
|
| 170 |
+
# 添加图片下载链接
|
| 171 |
+
table_list.insert(4, [ViewsUtils.t('图片打包下载-水印', 'Download images ZIP-Watermark'),
|
| 172 |
+
put_link(ViewsUtils.t('点击下载', 'Click to download'),
|
| 173 |
+
f"/api/download?url={url}&prefix=true&with_watermark=true",
|
| 174 |
+
new_window=True)])
|
| 175 |
+
table_list.insert(5, [ViewsUtils.t('图片打包下载-无水印', 'Download images ZIP-No-Watermark'),
|
| 176 |
+
put_link(ViewsUtils.t('点击下载', 'Click to download'),
|
| 177 |
+
f"/api/download?url={url}&prefix=true&with_watermark=false",
|
| 178 |
+
new_window=True)])
|
| 179 |
+
# 添加图片信息
|
| 180 |
+
no_watermark_image_list = data.get('image_data').get('no_watermark_image_list')
|
| 181 |
+
for image in no_watermark_image_list:
|
| 182 |
+
table_list.append(
|
| 183 |
+
[ViewsUtils.t('图片预览(如格式可显示): ', 'Image preview (if the format can be displayed):'),
|
| 184 |
+
put_image(image, width='50%')])
|
| 185 |
+
table_list.append([ViewsUtils.t('图片直链: ', 'Image URL:'),
|
| 186 |
+
put_link(ViewsUtils.t('⬆️点击打开图片⬆️', '⬆️Click to open image⬆️'), image,
|
| 187 |
+
new_window=True)])
|
| 188 |
+
# 向网页输出表格/Put table on web page
|
| 189 |
+
with use_scope(str(url_index)):
|
| 190 |
+
# 显示进度
|
| 191 |
+
put_info(
|
| 192 |
+
ViewsUtils.t(f'正在解析第{url_index}/{url_count}个链接: ',
|
| 193 |
+
f'Parsing the {url_index}/{url_count}th link: '),
|
| 194 |
+
put_link(url, url, new_window=True), closable=True)
|
| 195 |
+
put_table(table_list)
|
| 196 |
+
put_html('<hr>')
|
| 197 |
+
scroll_to(str(url_index))
|
| 198 |
+
success_count += 1
|
| 199 |
+
success_list.append(url)
|
| 200 |
+
# print(success_count: {success_count}, success_list: {success_list}')
|
| 201 |
+
# 全部解析完成跳出for循环/All parsing completed, break out of for loop
|
| 202 |
+
with use_scope('result_title'):
|
| 203 |
+
put_row([put_html('<br>')])
|
| 204 |
+
put_markdown(ViewsUtils.t('## 📝解析结果:', '## 📝Parsing results:'))
|
| 205 |
+
put_row([put_html('<br>')])
|
| 206 |
+
with use_scope('result'):
|
| 207 |
+
# 清除进度条
|
| 208 |
+
clear('loading_text')
|
| 209 |
+
# 滚动至result
|
| 210 |
+
scroll_to('result')
|
| 211 |
+
# for循环结束,向网页输出成功提醒
|
| 212 |
+
put_success(ViewsUtils.t('解析完成啦 ♪(・ω・)ノ\n请查看以下统计信息,如果觉得有用的话请在GitHub上帮我点一个Star吧!',
|
| 213 |
+
'Parsing completed ♪(・ω・)ノ\nPlease check the following statistics, and if you think it\'s useful, please help me click a Star on GitHub!'))
|
| 214 |
+
# 将成功,失败以及总数量显示出来并且显示为代码方便复制
|
| 215 |
+
put_markdown(
|
| 216 |
+
f'**{ViewsUtils.t("成功", "Success")}:** {success_count} **{ViewsUtils.t("失败", "Failed")}:** {failed_count} **{ViewsUtils.t("总数量", "Total")}:** {success_count + failed_count}')
|
| 217 |
+
# 成功列表
|
| 218 |
+
if success_count != url_count:
|
| 219 |
+
put_markdown(f'**{ViewsUtils.t("成功列表", "Success list")}:**')
|
| 220 |
+
put_code('\n'.join(success_list))
|
| 221 |
+
# 失败列表
|
| 222 |
+
if failed_count > 0:
|
| 223 |
+
put_markdown(f'**{ViewsUtils.t("失败列表", "Failed list")}:**')
|
| 224 |
+
put_code('\n'.join(failed_list))
|
| 225 |
+
# 将url_lists显示为代码方便复制
|
| 226 |
+
put_markdown(ViewsUtils.t('**以下是您输入的所有链接:**', '**The following are all the links you entered:**'))
|
| 227 |
+
put_code('\n'.join(url_lists))
|
| 228 |
+
# 解析结束时间
|
| 229 |
+
end = time.time()
|
| 230 |
+
# 计算耗时,保留两位小数
|
| 231 |
+
time_consuming = round(end - start, 2)
|
| 232 |
+
# 显示耗时
|
| 233 |
+
put_markdown(f"**{ViewsUtils.t('耗时', 'Time consuming')}:** {time_consuming}s")
|
| 234 |
+
# 放置一个按钮,点击后跳转到顶部
|
| 235 |
+
put_button(ViewsUtils.t('回到顶部', 'Back to top'), onclick=lambda: scroll_to('1'), color='success',
|
| 236 |
+
outline=True)
|
| 237 |
+
# 返回主页链接
|
| 238 |
+
put_link(ViewsUtils.t('再来一波 (つ´ω`)つ', 'Another wave (つ´ω`)つ'), '/')
|
app/web/views/Shortcuts.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import yaml
|
| 3 |
+
from pywebio.output import popup, put_markdown, put_html, put_text, put_link
|
| 4 |
+
from app.web.views.ViewsUtils import ViewsUtils
|
| 5 |
+
|
| 6 |
+
t = ViewsUtils().t
|
| 7 |
+
|
| 8 |
+
# 读取上级再上级目录的配置文件
|
| 9 |
+
config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 'config.yaml')
|
| 10 |
+
with open(config_path, 'r', encoding='utf-8') as file:
|
| 11 |
+
config = yaml.safe_load(file)
|
| 12 |
+
config = config['iOS_Shortcut']
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# iOS快捷指令弹窗/IOS shortcut pop-up
|
| 16 |
+
def ios_pop_window():
|
| 17 |
+
with popup(t("iOS快捷指令", "iOS Shortcut")):
|
| 18 |
+
version = config["iOS_Shortcut_Version"]
|
| 19 |
+
update = config['iOS_Shortcut_Update_Time']
|
| 20 |
+
link = config['iOS_Shortcut_Link']
|
| 21 |
+
link_en = config['iOS_Shortcut_Link_EN']
|
| 22 |
+
note = config['iOS_Shortcut_Update_Note']
|
| 23 |
+
note_en = config['iOS_Shortcut_Update_Note_EN']
|
| 24 |
+
put_markdown(t('#### 📢 快捷指令介绍:', '#### 📢 Shortcut Introduction:'))
|
| 25 |
+
put_markdown(
|
| 26 |
+
t('快捷指令运行在iOS平台,本快捷指令可以快速调用本项目的公共API将抖音或TikTok的视频或图集下载到你的手机相册中,暂时只支持单个链接进行下载。',
|
| 27 |
+
'The shortcut runs on the iOS platform, and this shortcut can quickly call the public API of this project to download the video or album of Douyin or TikTok to your phone album. It only supports single link download for now.'))
|
| 28 |
+
put_markdown(t('#### 📲 使用方法 ①:', '#### 📲 Operation method ①:'))
|
| 29 |
+
put_markdown(t('在抖音或TikTok的APP内,浏览你想要无水印保存的视频或图集。',
|
| 30 |
+
'The shortcut needs to be used in the Douyin or TikTok app, browse the video or album you want to save without watermark.'))
|
| 31 |
+
put_markdown(t('然后点击右下角分享按钮,选择更多,然后下拉找到 "抖音TikTok无水印下载" 这个选项。',
|
| 32 |
+
'Then click the share button in the lower right corner, select more, and then scroll down to find the "Douyin TikTok No Watermark Download" option.'))
|
| 33 |
+
put_markdown(t('如遇到通知询问是否允许快捷指令访问xxxx (域名或服务器),需要点击允许才可以正常使用。',
|
| 34 |
+
'If you are asked whether to allow the shortcut to access xxxx (domain name or server), you need to click Allow to use it normally.'))
|
| 35 |
+
put_markdown(t('该快捷指令会在你相册创建一个新的相薄方便你浏览保存的内容。',
|
| 36 |
+
'The shortcut will create a new album in your photo album to help you browse the saved content.'))
|
| 37 |
+
put_markdown(t('#### 📲 使用方法 ②:', '#### 📲 Operation method ②:'))
|
| 38 |
+
put_markdown(t('在抖音或TikTok的视频下方点击分享,然后点击复制链接,然后去快捷指令APP中运行该快捷指令。',
|
| 39 |
+
'Click share below the video of Douyin or TikTok, then click to copy the link, then go to the shortcut command APP to run the shortcut command.'))
|
| 40 |
+
put_markdown(t('如果弹窗询问是否允许读取剪切板请同意,随后快捷指令将链接内容保存至相册中。',
|
| 41 |
+
'if the pop-up window asks whether to allow reading the clipboard, please agree, and then the shortcut command will save the link content to the album middle.'))
|
| 42 |
+
put_html('<hr>')
|
| 43 |
+
put_text(t(f"最新快捷指令版本: {version}", f"Latest shortcut version: {version}"))
|
| 44 |
+
put_text(t(f"快捷指令更新时间: {update}", f"Shortcut update time: {update}"))
|
| 45 |
+
put_text(t(f"快捷指令更新内容: {note}", f"Shortcut update content: {note_en}"))
|
| 46 |
+
put_link("[点击获取快捷指令 - 中文]", link, new_window=True)
|
| 47 |
+
put_html("<br>")
|
| 48 |
+
put_link("[Click get Shortcut - English]", link_en, new_window=True)
|
app/web/views/ViewsUtils.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
from pywebio.output import get_scope, clear
|
| 4 |
+
from pywebio.session import info as session_info
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ViewsUtils:
|
| 8 |
+
|
| 9 |
+
# 自动检测语言返回翻译/Auto detect language to return translation
|
| 10 |
+
@staticmethod
|
| 11 |
+
def t(zh: str, en: str) -> str:
|
| 12 |
+
return zh if 'zh' in session_info.user_language else en
|
| 13 |
+
|
| 14 |
+
# 清除前一个scope/Clear the previous scope
|
| 15 |
+
@staticmethod
|
| 16 |
+
def clear_previous_scope():
|
| 17 |
+
_scope = get_scope(-1)
|
| 18 |
+
clear(_scope)
|
| 19 |
+
|
| 20 |
+
# 解析抖音分享口令中的链接并返回列表/Parse the link in the Douyin share command and return a list
|
| 21 |
+
@staticmethod
|
| 22 |
+
def find_url(string: str) -> list:
|
| 23 |
+
url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)
|
| 24 |
+
return url
|
bash/install.sh
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Set script to exit on any errors.
|
| 4 |
+
set -e
|
| 5 |
+
|
| 6 |
+
echo 'Updating package lists... | 正在更新软件包列表...'
|
| 7 |
+
sudo apt-get update
|
| 8 |
+
|
| 9 |
+
echo 'Installing Git... | 正在安装Git...'
|
| 10 |
+
sudo apt-get install -y git
|
| 11 |
+
|
| 12 |
+
echo 'Installing Python3... | 正在安装Python3...'
|
| 13 |
+
sudo apt install -y python3
|
| 14 |
+
|
| 15 |
+
echo 'Installing PIP3... | 正在安装PIP3...'
|
| 16 |
+
sudo apt install -y python3-pip
|
| 17 |
+
|
| 18 |
+
echo 'Installing python3-venv... | 正在安装python3-venv...'
|
| 19 |
+
sudo apt install -y python3-venv
|
| 20 |
+
|
| 21 |
+
echo 'Creating path: /www/wwwroot | 正在创建路径: /www/wwwroot'
|
| 22 |
+
sudo mkdir -p /www/wwwroot
|
| 23 |
+
|
| 24 |
+
cd /www/wwwroot || { echo "Failed to change directory to /www/wwwroot | 无法切换到目录 /www/wwwroot"; exit 1; }
|
| 25 |
+
|
| 26 |
+
echo 'Cloning Douyin_TikTok_Download_API.git from Github! | 正在从Github克隆Douyin_TikTok_Download_API.git!'
|
| 27 |
+
sudo git clone https://github.com/Evil0ctal/Douyin_TikTok_Download_API.git
|
| 28 |
+
|
| 29 |
+
cd Douyin_TikTok_Download_API/ || { echo "Failed to change directory to Douyin_TikTok_Download_API | 无法切换到目录 Douyin_TikTok_Download_API"; exit 1; }
|
| 30 |
+
|
| 31 |
+
echo 'Creating a virtual environment | 正在创建虚拟环境'
|
| 32 |
+
python3 -m venv venv
|
| 33 |
+
|
| 34 |
+
echo 'Activating the virtual environment | 正在激活虚拟环境'
|
| 35 |
+
source venv/bin/activate
|
| 36 |
+
|
| 37 |
+
echo 'Setting pip to use the default PyPI index | 设置pip使用默认PyPI索引'
|
| 38 |
+
pip config set global.index-url https://pypi.org/simple/
|
| 39 |
+
|
| 40 |
+
echo 'Installing pip setuptools | 安装pip setuptools'
|
| 41 |
+
pip install setuptools
|
| 42 |
+
|
| 43 |
+
echo 'Installing dependencies from requirements.txt | 从requirements.txt安装依赖'
|
| 44 |
+
pip install -r requirements.txt
|
| 45 |
+
|
| 46 |
+
echo 'Deactivating the virtual environment | 正在停用虚拟环境'
|
| 47 |
+
deactivate
|
| 48 |
+
|
| 49 |
+
echo 'Adding Douyin_TikTok_Download_API to system service | 将Douyin_TikTok_Download_API添加到系统服务'
|
| 50 |
+
sudo cp daemon/* /etc/systemd/system/
|
| 51 |
+
|
| 52 |
+
echo 'Enabling Douyin_TikTok_Download_API service | 启用Douyin_TikTok_Download_API服务'
|
| 53 |
+
sudo systemctl enable Douyin_TikTok_Download_API.service
|
| 54 |
+
|
| 55 |
+
echo 'Starting Douyin_TikTok_Download_API service | 启动Douyin_TikTok_Download_API服务'
|
| 56 |
+
sudo systemctl start Douyin_TikTok_Download_API.service
|
| 57 |
+
|
| 58 |
+
echo 'Douyin_TikTok_Download_API installation complete! | Douyin_TikTok_Download_API安装完成!'
|
| 59 |
+
echo 'You can access the API at http://localhost:80 | 您可以在http://localhost:80访问API'
|
| 60 |
+
echo 'You can change the port in config.yaml under the /www/wwwroot/Douyin_TikTok_Download_API directory | 您可以在/www/wwwroot/Douyin_TikTok_Download_API目录下的config.yaml中更改端口'
|
| 61 |
+
echo 'If the API is not working, please change the cookie in config.yaml under the /www/wwwroot/Douyin_TikTok_Download_API/crawler/[Douyin/TikTok]/[APP/Web]/config.yaml directory | 如果API无法工作,请更改/www/wwwroot/Douyin_TikTok_Download_API/crawler/[Douyin/TikTok]/[APP/Web]/config.yaml目录下的cookie'
|
bash/update.sh
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
# Ask for confirmation to proceed with the update
|
| 4 |
+
read -r -p "Do you want to update Douyin_TikTok_Download_API? [y/n] " input
|
| 5 |
+
case $input in
|
| 6 |
+
[yY])
|
| 7 |
+
# Navigate to the project directory or exit if it fails
|
| 8 |
+
cd /www/wwwroot/Douyin_TikTok_Download_API || { echo "The directory does not exist."; exit 1; }
|
| 9 |
+
|
| 10 |
+
# Pull the latest changes from the repository
|
| 11 |
+
git pull
|
| 12 |
+
|
| 13 |
+
# Activate the virtual environment
|
| 14 |
+
source venv/bin/activate
|
| 15 |
+
|
| 16 |
+
# Optionally, update Python dependencies
|
| 17 |
+
pip install -r requirements.txt
|
| 18 |
+
|
| 19 |
+
# Deactivate the virtual environment
|
| 20 |
+
deactivate
|
| 21 |
+
|
| 22 |
+
# Restart the service to apply changes
|
| 23 |
+
echo "Restarting Douyin_TikTok_Download_API service"
|
| 24 |
+
sudo systemctl restart Douyin_TikTok_Download_API.service
|
| 25 |
+
echo "Successfully restarted all services!"
|
| 26 |
+
;;
|
| 27 |
+
[nN]|*)
|
| 28 |
+
echo "Exiting..."
|
| 29 |
+
exit 1
|
| 30 |
+
;;
|
| 31 |
+
esac
|
config.yaml
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Web
|
| 2 |
+
Web:
|
| 3 |
+
# APP Switch
|
| 4 |
+
PyWebIO_Enable: true # Enable APP | 启用APP
|
| 5 |
+
|
| 6 |
+
# APP Information
|
| 7 |
+
Domain: https://douyin.wtf # Web domain | Web域名
|
| 8 |
+
|
| 9 |
+
# APP Configuration
|
| 10 |
+
PyWebIO_Theme: minty # PyWebIO theme | PyWebIO主题
|
| 11 |
+
Max_Take_URLs: 30 # Maximum number of URLs that can be taken at a time | 一次最多可以取得的URL数量
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Web Information
|
| 15 |
+
Tab_Title: Douyin_TikTok_Download_API # Web title | Web标题
|
| 16 |
+
Description: Douyin_TikTok_Download_API is a free open-source API service for Douyin/TikTok. It provides a simple, fast, and stable API for developers to develop applications based on Douyin/TikTok. # Web description | Web描述
|
| 17 |
+
Favicon: https://raw.githubusercontent.com/Evil0ctal/Douyin_TikTok_Download_API/main/logo/logo192.png # Web favicon | Web图标
|
| 18 |
+
|
| 19 |
+
# Fun Configuration
|
| 20 |
+
Easter_Egg: true # Enable Easter Egg | 启用彩蛋
|
| 21 |
+
Live2D_Enable: true
|
| 22 |
+
Live2D_JS: https://fastly.jsdelivr.net/gh/TikHubIO/TikHub_live2d@latest/autoload.js
|
| 23 |
+
|
| 24 |
+
# API
|
| 25 |
+
API:
|
| 26 |
+
# Network Configuration
|
| 27 |
+
Host_IP: 0.0.0.0 # default IP | 默认IP
|
| 28 |
+
Host_Port: 80 # default port is 80 | 默认端口为80
|
| 29 |
+
Docs_URL: /docs # API documentation URL | API文档URL
|
| 30 |
+
Redoc_URL: /redoc # API documentation URL | API文档URL
|
| 31 |
+
|
| 32 |
+
# API Information
|
| 33 |
+
Version: V4.0.9 # API version | API版本
|
| 34 |
+
Update_Time: 2024/09/25 # API update time | API更新时间
|
| 35 |
+
Environment: Demo # API environment | API环境
|
| 36 |
+
|
| 37 |
+
# Download Configuration
|
| 38 |
+
Download_Switch: true # Enable download function | 启用下载功能
|
| 39 |
+
|
| 40 |
+
# File Configuration
|
| 41 |
+
Download_Path: "./download" # Default download directory | 默认下载目录
|
| 42 |
+
Download_File_Prefix: "douyin.wtf_" # Default download file prefix | 默认下载文件前缀
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# iOS Shortcut
|
| 46 |
+
iOS_Shortcut:
|
| 47 |
+
iOS_Shortcut_Version: 7.0
|
| 48 |
+
iOS_Shortcut_Update_Time: 2024/07/05
|
| 49 |
+
iOS_Shortcut_Link: https://www.icloud.com/shortcuts/06f891a026df40cfa967a907feaea632
|
| 50 |
+
iOS_Shortcut_Link_EN: https://www.icloud.com/shortcuts/06f891a026df40cfa967a907feaea632
|
| 51 |
+
iOS_Shortcut_Update_Note: 重构了快捷指令以兼容TikHub API。
|
| 52 |
+
iOS_Shortcut_Update_Note_EN: Refactored the shortcut to be compatible with the TikHub API.
|
crawlers/base_crawler.py
ADDED
|
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ==============================================================================
|
| 2 |
+
# Copyright (C) 2021 Evil0ctal
|
| 3 |
+
#
|
| 4 |
+
# This file is part of the Douyin_TikTok_Download_API project.
|
| 5 |
+
#
|
| 6 |
+
# This project is licensed under the Apache License 2.0 (the "License");
|
| 7 |
+
# you may not use this file except in compliance with the License.
|
| 8 |
+
# You may obtain a copy of the License at:
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
# ==============================================================================
|
| 17 |
+
# __
|
| 18 |
+
# /> フ
|
| 19 |
+
# | _ _ l
|
| 20 |
+
# /` ミ_xノ
|
| 21 |
+
# / | Feed me Stars ⭐ ️
|
| 22 |
+
# / ヽ ノ
|
| 23 |
+
# │ | | |
|
| 24 |
+
# / ̄| | | |
|
| 25 |
+
# | ( ̄ヽ__ヽ_)__)
|
| 26 |
+
# \二つ
|
| 27 |
+
# ==============================================================================
|
| 28 |
+
#
|
| 29 |
+
# Contributor Link:
|
| 30 |
+
# - https://github.com/Evil0ctal
|
| 31 |
+
# - https://github.com/Johnserf-Seed
|
| 32 |
+
#
|
| 33 |
+
# ==============================================================================
|
| 34 |
+
|
| 35 |
+
import httpx
|
| 36 |
+
import json
|
| 37 |
+
import asyncio
|
| 38 |
+
import re
|
| 39 |
+
|
| 40 |
+
from httpx import Response
|
| 41 |
+
|
| 42 |
+
from crawlers.utils.logger import logger
|
| 43 |
+
from crawlers.utils.api_exceptions import (
|
| 44 |
+
APIError,
|
| 45 |
+
APIConnectionError,
|
| 46 |
+
APIResponseError,
|
| 47 |
+
APITimeoutError,
|
| 48 |
+
APIUnavailableError,
|
| 49 |
+
APIUnauthorizedError,
|
| 50 |
+
APINotFoundError,
|
| 51 |
+
APIRateLimitError,
|
| 52 |
+
APIRetryExhaustedError,
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class BaseCrawler:
|
| 57 |
+
"""
|
| 58 |
+
基础爬虫客户端 (Base crawler client)
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
def __init__(
|
| 62 |
+
self,
|
| 63 |
+
proxies: dict = None,
|
| 64 |
+
max_retries: int = 3,
|
| 65 |
+
max_connections: int = 50,
|
| 66 |
+
timeout: int = 10,
|
| 67 |
+
max_tasks: int = 50,
|
| 68 |
+
crawler_headers: dict = {},
|
| 69 |
+
):
|
| 70 |
+
if isinstance(proxies, dict):
|
| 71 |
+
self.proxies = proxies
|
| 72 |
+
# [f"{k}://{v}" for k, v in proxies.items()]
|
| 73 |
+
else:
|
| 74 |
+
self.proxies = None
|
| 75 |
+
|
| 76 |
+
# 爬虫请求头 / Crawler request header
|
| 77 |
+
self.crawler_headers = crawler_headers or {}
|
| 78 |
+
|
| 79 |
+
# 异步的任务数 / Number of asynchronous tasks
|
| 80 |
+
self._max_tasks = max_tasks
|
| 81 |
+
self.semaphore = asyncio.Semaphore(max_tasks)
|
| 82 |
+
|
| 83 |
+
# 限制最大连接数 / Limit the maximum number of connections
|
| 84 |
+
self._max_connections = max_connections
|
| 85 |
+
self.limits = httpx.Limits(max_connections=max_connections)
|
| 86 |
+
|
| 87 |
+
# 业务逻辑重试次数 / Business logic retry count
|
| 88 |
+
self._max_retries = max_retries
|
| 89 |
+
# 底层连接重试次数 / Underlying connection retry count
|
| 90 |
+
self.atransport = httpx.AsyncHTTPTransport(retries=max_retries)
|
| 91 |
+
|
| 92 |
+
# 超时等待时间 / Timeout waiting time
|
| 93 |
+
self._timeout = timeout
|
| 94 |
+
self.timeout = httpx.Timeout(timeout)
|
| 95 |
+
# 异步客户端 / Asynchronous client
|
| 96 |
+
self.aclient = httpx.AsyncClient(
|
| 97 |
+
headers=self.crawler_headers,
|
| 98 |
+
proxies=self.proxies,
|
| 99 |
+
timeout=self.timeout,
|
| 100 |
+
limits=self.limits,
|
| 101 |
+
transport=self.atransport,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
async def fetch_response(self, endpoint: str) -> Response:
|
| 105 |
+
"""获取数据 (Get data)
|
| 106 |
+
|
| 107 |
+
Args:
|
| 108 |
+
endpoint (str): 接口地址 (Endpoint URL)
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
Response: 原始响应对象 (Raw response object)
|
| 112 |
+
"""
|
| 113 |
+
return await self.get_fetch_data(endpoint)
|
| 114 |
+
|
| 115 |
+
async def fetch_get_json(self, endpoint: str) -> dict:
|
| 116 |
+
"""获取 JSON 数据 (Get JSON data)
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
endpoint (str): 接口地址 (Endpoint URL)
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
dict: 解析后的JSON数据 (Parsed JSON data)
|
| 123 |
+
"""
|
| 124 |
+
response = await self.get_fetch_data(endpoint)
|
| 125 |
+
return self.parse_json(response)
|
| 126 |
+
|
| 127 |
+
async def fetch_post_json(self, endpoint: str, params: dict = {}, data=None) -> dict:
|
| 128 |
+
"""获取 JSON 数据 (Post JSON data)
|
| 129 |
+
|
| 130 |
+
Args:
|
| 131 |
+
endpoint (str): 接口地址 (Endpoint URL)
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
dict: 解析后的JSON数据 (Parsed JSON data)
|
| 135 |
+
"""
|
| 136 |
+
response = await self.post_fetch_data(endpoint, params, data)
|
| 137 |
+
return self.parse_json(response)
|
| 138 |
+
|
| 139 |
+
def parse_json(self, response: Response) -> dict:
|
| 140 |
+
"""解析JSON响应对象 (Parse JSON response object)
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
response (Response): 原始响应对象 (Raw response object)
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
dict: 解析后的JSON数据 (Parsed JSON data)
|
| 147 |
+
"""
|
| 148 |
+
if (
|
| 149 |
+
response is not None
|
| 150 |
+
and isinstance(response, Response)
|
| 151 |
+
and response.status_code == 200
|
| 152 |
+
):
|
| 153 |
+
try:
|
| 154 |
+
return response.json()
|
| 155 |
+
except json.JSONDecodeError as e:
|
| 156 |
+
# 尝试使用正则表达式匹配response.text中的json数据
|
| 157 |
+
match = re.search(r"\{.*\}", response.text)
|
| 158 |
+
try:
|
| 159 |
+
return json.loads(match.group())
|
| 160 |
+
except json.JSONDecodeError as e:
|
| 161 |
+
logger.error("解析 {0} 接口 JSON 失败: {1}".format(response.url, e))
|
| 162 |
+
raise APIResponseError("解析JSON数据失败")
|
| 163 |
+
|
| 164 |
+
else:
|
| 165 |
+
if isinstance(response, Response):
|
| 166 |
+
logger.error(
|
| 167 |
+
"获取数据失败。状态码: {0}".format(response.status_code)
|
| 168 |
+
)
|
| 169 |
+
else:
|
| 170 |
+
logger.error("无效响应类型。响应类型: {0}".format(type(response)))
|
| 171 |
+
|
| 172 |
+
raise APIResponseError("获取数据失败")
|
| 173 |
+
|
| 174 |
+
async def get_fetch_data(self, url: str):
|
| 175 |
+
"""
|
| 176 |
+
获取GET端点数据 (Get GET endpoint data)
|
| 177 |
+
|
| 178 |
+
Args:
|
| 179 |
+
url (str): 端点URL (Endpoint URL)
|
| 180 |
+
|
| 181 |
+
Returns:
|
| 182 |
+
response: 响应内容 (Response content)
|
| 183 |
+
"""
|
| 184 |
+
for attempt in range(self._max_retries):
|
| 185 |
+
try:
|
| 186 |
+
response = await self.aclient.get(url, follow_redirects=True)
|
| 187 |
+
if not response.text.strip() or not response.content:
|
| 188 |
+
error_message = "第 {0} 次响应内容为空, 状态码: {1}, URL:{2}".format(attempt + 1,
|
| 189 |
+
response.status_code,
|
| 190 |
+
response.url)
|
| 191 |
+
|
| 192 |
+
logger.warning(error_message)
|
| 193 |
+
|
| 194 |
+
if attempt == self._max_retries - 1:
|
| 195 |
+
raise APIRetryExhaustedError(
|
| 196 |
+
"获取端点数据失败, 次数达到上限"
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
await asyncio.sleep(self._timeout)
|
| 200 |
+
continue
|
| 201 |
+
|
| 202 |
+
# logger.info("响应状态码: {0}".format(response.status_code))
|
| 203 |
+
response.raise_for_status()
|
| 204 |
+
return response
|
| 205 |
+
|
| 206 |
+
except httpx.RequestError:
|
| 207 |
+
raise APIConnectionError("连接端点失败,检查网络环境或代理:{0} 代理:{1} 类名:{2}"
|
| 208 |
+
.format(url, self.proxies, self.__class__.__name__)
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
except httpx.HTTPStatusError as http_error:
|
| 212 |
+
self.handle_http_status_error(http_error, url, attempt + 1)
|
| 213 |
+
|
| 214 |
+
except APIError as e:
|
| 215 |
+
e.display_error()
|
| 216 |
+
|
| 217 |
+
async def post_fetch_data(self, url: str, params: dict = {}, data=None):
|
| 218 |
+
"""
|
| 219 |
+
获取POST端点数据 (Get POST endpoint data)
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
url (str): 端点URL (Endpoint URL)
|
| 223 |
+
params (dict): POST请求参数 (POST request parameters)
|
| 224 |
+
|
| 225 |
+
Returns:
|
| 226 |
+
response: 响应内容 (Response content)
|
| 227 |
+
"""
|
| 228 |
+
for attempt in range(self._max_retries):
|
| 229 |
+
try:
|
| 230 |
+
response = await self.aclient.post(
|
| 231 |
+
url,
|
| 232 |
+
json=None if not params else dict(params),
|
| 233 |
+
data=None if not data else data,
|
| 234 |
+
follow_redirects=True
|
| 235 |
+
)
|
| 236 |
+
if not response.text.strip() or not response.content:
|
| 237 |
+
error_message = "第 {0} 次响应内容为空, 状态码: {1}, URL:{2}".format(attempt + 1,
|
| 238 |
+
response.status_code,
|
| 239 |
+
response.url)
|
| 240 |
+
|
| 241 |
+
logger.warning(error_message)
|
| 242 |
+
|
| 243 |
+
if attempt == self._max_retries - 1:
|
| 244 |
+
raise APIRetryExhaustedError(
|
| 245 |
+
"获取端点数据失败, 次数达到上限"
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
await asyncio.sleep(self._timeout)
|
| 249 |
+
continue
|
| 250 |
+
|
| 251 |
+
# logger.info("响应状态码: {0}".format(response.status_code))
|
| 252 |
+
response.raise_for_status()
|
| 253 |
+
return response
|
| 254 |
+
|
| 255 |
+
except httpx.RequestError:
|
| 256 |
+
raise APIConnectionError(
|
| 257 |
+
"连接端点失败,检查网络环境或代理:{0} 代理:{1} 类名:{2}".format(url, self.proxies,
|
| 258 |
+
self.__class__.__name__)
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
except httpx.HTTPStatusError as http_error:
|
| 262 |
+
self.handle_http_status_error(http_error, url, attempt + 1)
|
| 263 |
+
|
| 264 |
+
except APIError as e:
|
| 265 |
+
e.display_error()
|
| 266 |
+
|
| 267 |
+
async def head_fetch_data(self, url: str):
|
| 268 |
+
"""
|
| 269 |
+
获取HEAD端点数据 (Get HEAD endpoint data)
|
| 270 |
+
|
| 271 |
+
Args:
|
| 272 |
+
url (str): 端点URL (Endpoint URL)
|
| 273 |
+
|
| 274 |
+
Returns:
|
| 275 |
+
response: 响应内容 (Response content)
|
| 276 |
+
"""
|
| 277 |
+
try:
|
| 278 |
+
response = await self.aclient.head(url)
|
| 279 |
+
# logger.info("响应状态码: {0}".format(response.status_code))
|
| 280 |
+
response.raise_for_status()
|
| 281 |
+
return response
|
| 282 |
+
|
| 283 |
+
except httpx.RequestError:
|
| 284 |
+
raise APIConnectionError("连接端点失败,检查网络环境或代理:{0} 代理:{1} 类名:{2}".format(
|
| 285 |
+
url, self.proxies, self.__class__.__name__
|
| 286 |
+
)
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
except httpx.HTTPStatusError as http_error:
|
| 290 |
+
self.handle_http_status_error(http_error, url, 1)
|
| 291 |
+
|
| 292 |
+
except APIError as e:
|
| 293 |
+
e.display_error()
|
| 294 |
+
|
| 295 |
+
def handle_http_status_error(self, http_error, url: str, attempt):
|
| 296 |
+
"""
|
| 297 |
+
处理HTTP状态错误 (Handle HTTP status error)
|
| 298 |
+
|
| 299 |
+
Args:
|
| 300 |
+
http_error: HTTP状态错误 (HTTP status error)
|
| 301 |
+
url: 端点URL (Endpoint URL)
|
| 302 |
+
attempt: 尝试次数 (Number of attempts)
|
| 303 |
+
Raises:
|
| 304 |
+
APIConnectionError: 连接端点失败 (Failed to connect to endpoint)
|
| 305 |
+
APIResponseError: 响应错误 (Response error)
|
| 306 |
+
APIUnavailableError: 服务不可用 (Service unavailable)
|
| 307 |
+
APINotFoundError: 端点不存在 (Endpoint does not exist)
|
| 308 |
+
APITimeoutError: 连接超时 (Connection timeout)
|
| 309 |
+
APIUnauthorizedError: 未授权 (Unauthorized)
|
| 310 |
+
APIRateLimitError: 请求频率过高 (Request frequency is too high)
|
| 311 |
+
APIRetryExhaustedError: 重试次数达到上限 (The number of retries has reached the upper limit)
|
| 312 |
+
"""
|
| 313 |
+
response = getattr(http_error, "response", None)
|
| 314 |
+
status_code = getattr(response, "status_code", None)
|
| 315 |
+
|
| 316 |
+
if response is None or status_code is None:
|
| 317 |
+
logger.error("HTTP状态错误: {0}, URL: {1}, 尝试次数: {2}".format(
|
| 318 |
+
http_error, url, attempt
|
| 319 |
+
)
|
| 320 |
+
)
|
| 321 |
+
raise APIResponseError(f"处理HTTP错误时遇到意外情况: {http_error}")
|
| 322 |
+
|
| 323 |
+
if status_code == 302:
|
| 324 |
+
pass
|
| 325 |
+
elif status_code == 404:
|
| 326 |
+
raise APINotFoundError(f"HTTP Status Code {status_code}")
|
| 327 |
+
elif status_code == 503:
|
| 328 |
+
raise APIUnavailableError(f"HTTP Status Code {status_code}")
|
| 329 |
+
elif status_code == 408:
|
| 330 |
+
raise APITimeoutError(f"HTTP Status Code {status_code}")
|
| 331 |
+
elif status_code == 401:
|
| 332 |
+
raise APIUnauthorizedError(f"HTTP Status Code {status_code}")
|
| 333 |
+
elif status_code == 429:
|
| 334 |
+
raise APIRateLimitError(f"HTTP Status Code {status_code}")
|
| 335 |
+
else:
|
| 336 |
+
logger.error("HTTP状态错误: {0}, URL: {1}, 尝试次数: {2}".format(
|
| 337 |
+
status_code, url, attempt
|
| 338 |
+
)
|
| 339 |
+
)
|
| 340 |
+
raise APIResponseError(f"HTTP状态错误: {status_code}")
|
| 341 |
+
|
| 342 |
+
async def close(self):
|
| 343 |
+
await self.aclient.aclose()
|
| 344 |
+
|
| 345 |
+
async def __aenter__(self):
|
| 346 |
+
return self
|
| 347 |
+
|
| 348 |
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
| 349 |
+
await self.aclient.aclose()
|
crawlers/bilibili/web/config.yaml
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
TokenManager:
|
| 2 |
+
bilibili:
|
| 3 |
+
headers:
|
| 4 |
+
'accept-language': zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6
|
| 5 |
+
'origin': https://www.bilibili.com
|
| 6 |
+
'referer': https://space.bilibili.com/
|
| 7 |
+
'origin_2': https://space.bilibili.com
|
| 8 |
+
'cookie': buvid4=748EC8F0-82E2-1672-A286-8445DDB2A80C06110-023112304-; buvid3=73EF1E2E-B7A9-78DD-F2AE-9AB2B476E27638524infoc; b_nut=1727075638; _uuid=77AA4910F-5C8F-9647-7DA3-F583C8108BD7942063infoc; buvid_fp=75b22e5d0c3dbc642b1c80956c62c7da; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjczNDI1NTYsImlhdCI6MTcyNzA4MzI5NiwicGx0IjotMX0.G3pvk6OC4FDWBL7GNgKkkVtUMl29UtNdgok_cANoKsw; bili_ticket_expires=1727342496; header_theme_version=CLOSE; enable_web_push=DISABLE; home_feed_column=5; browser_resolution=1488-712; b_lsid=5B4EDF8A_1921EAA1BDA
|
| 9 |
+
'user-agent': Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36
|
| 10 |
+
|
| 11 |
+
proxies:
|
| 12 |
+
http:
|
| 13 |
+
https:
|
crawlers/bilibili/web/endpoints.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class BilibiliAPIEndpoints:
|
| 2 |
+
|
| 3 |
+
"-------------------------------------------------------域名-domain-------------------------------------------------------"
|
| 4 |
+
# 哔哩哔哩接口域名
|
| 5 |
+
BILIAPI_DOMAIN = "https://api.bilibili.com"
|
| 6 |
+
|
| 7 |
+
# 哔哩哔哩直播域名
|
| 8 |
+
LIVE_DOMAIN = "https://api.live.bilibili.com"
|
| 9 |
+
|
| 10 |
+
"-------------------------------------------------------接口-api-------------------------------------------------------"
|
| 11 |
+
# 作品信息 (Post Detail)
|
| 12 |
+
POST_DETAIL = f"{BILIAPI_DOMAIN}/x/web-interface/view"
|
| 13 |
+
|
| 14 |
+
# 作品视频流
|
| 15 |
+
VIDEO_PLAYURL = f"{BILIAPI_DOMAIN}/x/player/wbi/playurl"
|
| 16 |
+
|
| 17 |
+
# 用户发布视频作品数据
|
| 18 |
+
USER_POST = f"{BILIAPI_DOMAIN}/x/space/wbi/arc/search"
|
| 19 |
+
|
| 20 |
+
# 收藏夹列表
|
| 21 |
+
COLLECT_FOLDERS = f"{BILIAPI_DOMAIN}/x/v3/fav/folder/created/list-all"
|
| 22 |
+
|
| 23 |
+
# 收藏夹视频
|
| 24 |
+
COLLECT_VIDEOS = f"{BILIAPI_DOMAIN}/x/v3/fav/resource/list"
|
| 25 |
+
|
| 26 |
+
# 用户个人信息
|
| 27 |
+
USER_DETAIL = f"{BILIAPI_DOMAIN}/x/space/wbi/acc/info"
|
| 28 |
+
|
| 29 |
+
# 综合热门
|
| 30 |
+
COM_POPULAR = f"{BILIAPI_DOMAIN}/x/web-interface/popular"
|
| 31 |
+
|
| 32 |
+
# 每周必看
|
| 33 |
+
WEEKLY_POPULAR = f"{BILIAPI_DOMAIN}/x/web-interface/popular/series/one"
|
| 34 |
+
|
| 35 |
+
# 入站必刷
|
| 36 |
+
PRECIOUS_POPULAR = f"{BILIAPI_DOMAIN}/x/web-interface/popular/precious"
|
| 37 |
+
|
| 38 |
+
# 视频评论
|
| 39 |
+
VIDEO_COMMENTS = f"{BILIAPI_DOMAIN}/x/v2/reply"
|
| 40 |
+
|
| 41 |
+
# 用户动态
|
| 42 |
+
USER_DYNAMIC = f"{BILIAPI_DOMAIN}/x/polymer/web-dynamic/v1/feed/space"
|
| 43 |
+
|
| 44 |
+
# 评论的回复
|
| 45 |
+
COMMENT_REPLY = f"{BILIAPI_DOMAIN}/x/v2/reply/reply"
|
| 46 |
+
|
| 47 |
+
# 视频分p信息
|
| 48 |
+
VIDEO_PARTS = f"{BILIAPI_DOMAIN}/x/player/pagelist"
|
| 49 |
+
|
| 50 |
+
# 直播间信息
|
| 51 |
+
LIVEROOM_DETAIL = f"{LIVE_DOMAIN}/room/v1/Room/get_info"
|
| 52 |
+
|
| 53 |
+
# 直播分区列表
|
| 54 |
+
LIVE_AREAS = f"{LIVE_DOMAIN}/room/v1/Area/getList"
|
| 55 |
+
|
| 56 |
+
# 直播间视频流
|
| 57 |
+
LIVE_VIDEOS = f"{LIVE_DOMAIN}/room/v1/Room/playUrl"
|
| 58 |
+
|
| 59 |
+
# 正在直播的主播
|
| 60 |
+
LIVE_STREAMER = f"{LIVE_DOMAIN}/xlive/web-interface/v1/second/getList"
|
| 61 |
+
|
| 62 |
+
|