Spaces:
Paused
Paused
taslim19 commited on
Commit ·
4051191
0
Parent(s):
drag
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .github/workflows/codeflix +1 -0
- .github/workflows/dev_deploy.yml +35 -0
- .github/workflows/prod_deploy.yml +39 -0
- .gitignore +16 -0
- Dockerfile +21 -0
- LICENSE +21 -0
- Procfile +1 -0
- alembic.ini +110 -0
- alembic/README +1 -0
- alembic/codeflix +1 -0
- alembic/env.py +83 -0
- alembic/script.py.mako +25 -0
- alembic/versions/1ad8012fafa0_first_migration.py +74 -0
- alembic/versions/71bd610aaa43_make_chapterfile_ids_optional.py +84 -0
- alembic/versions/c +1 -0
- app.json +50 -0
- bot.py +712 -0
- config.py +14 -0
- heroku.yml +3 -0
- img2cbz/__init__.py +0 -0
- img2cbz/codeflix +1 -0
- img2cbz/core.py +21 -0
- img2pdf/__init__.py +0 -0
- img2pdf/codeflix +1 -0
- img2pdf/core.py +103 -0
- img2tph/__init__.py +0 -0
- img2tph/codeflix +1 -0
- img2tph/core.py +18 -0
- logger.py +6 -0
- main.py +19 -0
- models/__init__.py +1 -0
- models/codeflix +1 -0
- models/db.py +100 -0
- pagination.py +16 -0
- plugins/__init__.py +18 -0
- plugins/asurascans.py +122 -0
- plugins/client.py +158 -0
- plugins/codeflix +1 -0
- plugins/kissmanga.py +125 -0
- plugins/mangabuddy.py +145 -0
- plugins/mangadex.py +167 -0
- plugins/mangahasu.py +125 -0
- plugins/mangakakalot.py +141 -0
- plugins/manganato.py +144 -0
- plugins/manganelo.py +128 -0
- plugins/mangasee.py +185 -0
- plugins/mangasin.py +145 -0
- plugins/mangatigre.py +150 -0
- plugins/manhuako.py +128 -0
- plugins/manhuaplus.py +125 -0
.github/workflows/codeflix
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
.github/workflows/dev_deploy.yml
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: DevDeploy
|
| 2 |
+
|
| 3 |
+
# Controls when the workflow will run
|
| 4 |
+
on:
|
| 5 |
+
# Allows you to run this workflow manually from the Actions tab
|
| 6 |
+
workflow_dispatch:
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
| 10 |
+
jobs:
|
| 11 |
+
DevDeploy:
|
| 12 |
+
runs-on: ubuntu-latest
|
| 13 |
+
steps:
|
| 14 |
+
- uses: actions/checkout@v3
|
| 15 |
+
|
| 16 |
+
- name: Set up ssh-agent
|
| 17 |
+
uses: appleboy/ssh-action@v0.1.6
|
| 18 |
+
with:
|
| 19 |
+
host: ${{ secrets.HOST }}
|
| 20 |
+
username: ${{ secrets.USERNAME }}
|
| 21 |
+
key: ${{ secrets.PRIVATE_KEY }}
|
| 22 |
+
passphrase: ${{ secrets.PASSPHRASE }}
|
| 23 |
+
script_stop: true
|
| 24 |
+
script: |
|
| 25 |
+
cd manga-bot/tg-manga-bot-dev
|
| 26 |
+
git pull origin master
|
| 27 |
+
docker container rm -f manga-bot-dev 2> /dev/null || true
|
| 28 |
+
docker build . -t manga-bot-dev
|
| 29 |
+
docker run --name manga-bot-dev --restart=always -dti \
|
| 30 |
+
--env BOT_TOKEN=${{ secrets.DEV_BOT_TOKEN }} \
|
| 31 |
+
--env API_ID=${{ secrets.API_ID }} \
|
| 32 |
+
--env API_HASH=${{ secrets.API_HASH }} \
|
| 33 |
+
--env DATABASE_URL_PRIMARY=${{ secrets.DEV_DATABASE_URI }} \
|
| 34 |
+
--env LOG_LEVEL=${{ secrets.DEV_LOG_LEVEL }} \
|
| 35 |
+
manga-bot-dev
|
.github/workflows/prod_deploy.yml
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: ProdDeploy
|
| 2 |
+
|
| 3 |
+
# Controls when the workflow will run
|
| 4 |
+
on:
|
| 5 |
+
# Allows you to run this workflow manually from the Actions tab
|
| 6 |
+
push:
|
| 7 |
+
branches: [ "prod" ]
|
| 8 |
+
workflow_dispatch:
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
| 12 |
+
jobs:
|
| 13 |
+
ProdDeploy:
|
| 14 |
+
runs-on: ubuntu-latest
|
| 15 |
+
steps:
|
| 16 |
+
- uses: actions/checkout@v3
|
| 17 |
+
|
| 18 |
+
- name: Set up ssh-agent
|
| 19 |
+
uses: appleboy/ssh-action@v0.1.6
|
| 20 |
+
with:
|
| 21 |
+
host: ${{ secrets.HOST }}
|
| 22 |
+
username: ${{ secrets.USERNAME }}
|
| 23 |
+
key: ${{ secrets.PRIVATE_KEY }}
|
| 24 |
+
passphrase: ${{ secrets.PASSPHRASE }}
|
| 25 |
+
script_stop: true
|
| 26 |
+
script: |
|
| 27 |
+
cd manga-bot/tg-manga-bot-prod
|
| 28 |
+
git pull origin prod
|
| 29 |
+
docker container rm -f manga-bot-prod 2> /dev/null || true
|
| 30 |
+
docker build . -t manga-bot-prod
|
| 31 |
+
docker run --name manga-bot-prod --restart=always -dti \
|
| 32 |
+
--env BOT_TOKEN=${{ secrets.PROD_BOT_TOKEN }} \
|
| 33 |
+
--env API_ID=${{ secrets.API_ID }} \
|
| 34 |
+
--env API_HASH=${{ secrets.API_HASH }} \
|
| 35 |
+
--env CHANNEL=${{ secrets.PROD_CHANNEL }} \
|
| 36 |
+
--env CACHE_CHANNEL=${{ secrets.CACHE_CHANNEL }} \
|
| 37 |
+
--env DATABASE_URL_PRIMARY=${{ secrets.PROD_DATABASE_URI }} \
|
| 38 |
+
--env LOG_LEVEL=${{ secrets.PROD_LOG_LEVEL }} \
|
| 39 |
+
manga-bot-prod
|
.gitignore
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Cache
|
| 2 |
+
__pycache__/
|
| 3 |
+
|
| 4 |
+
##PyCharm
|
| 5 |
+
.idea/
|
| 6 |
+
.vscode
|
| 7 |
+
|
| 8 |
+
.env
|
| 9 |
+
cache
|
| 10 |
+
bot.session
|
| 11 |
+
bot.session-journal
|
| 12 |
+
/test.db
|
| 13 |
+
|
| 14 |
+
venv
|
| 15 |
+
.venv
|
| 16 |
+
env.json
|
Dockerfile
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# For more information, please refer to https://aka.ms/vscode-docker-python
|
| 2 |
+
FROM python
|
| 3 |
+
|
| 4 |
+
# Keeps Python from generating .pyc files in the container
|
| 5 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 6 |
+
|
| 7 |
+
# Turns off buffering for easier container logging
|
| 8 |
+
ENV PYTHONUNBUFFERED=1
|
| 9 |
+
|
| 10 |
+
WORKDIR /app
|
| 11 |
+
|
| 12 |
+
# Install pip requirements
|
| 13 |
+
COPY requirements.txt .
|
| 14 |
+
RUN python -m pip install --no-cache-dir -r requirements.txt
|
| 15 |
+
|
| 16 |
+
COPY . /app
|
| 17 |
+
|
| 18 |
+
RUN alembic upgrade head
|
| 19 |
+
|
| 20 |
+
# During debugging, this entry point will be overridden. For more information, please refer to https://aka.ms/vscode-docker-python-debug
|
| 21 |
+
CMD ["bash", "start.sh"]
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2024 𝖢𝗈𝖽𝖾𝖿𝗅𝗂𝗑 𝖡𝗈𝗍𝗌
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
Procfile
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
worker: python main.py
|
alembic.ini
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A generic, single database configuration.
|
| 2 |
+
|
| 3 |
+
[alembic]
|
| 4 |
+
# path to migration scripts
|
| 5 |
+
script_location = alembic
|
| 6 |
+
|
| 7 |
+
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
|
| 8 |
+
# Uncomment the line below if you want the files to be prepended with date and time
|
| 9 |
+
# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
|
| 10 |
+
# for all available tokens
|
| 11 |
+
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
|
| 12 |
+
|
| 13 |
+
# sys.path path, will be prepended to sys.path if present.
|
| 14 |
+
# defaults to the current working directory.
|
| 15 |
+
prepend_sys_path = .
|
| 16 |
+
|
| 17 |
+
# timezone to use when rendering the date within the migration file
|
| 18 |
+
# as well as the filename.
|
| 19 |
+
# If specified, requires the python-dateutil library that can be
|
| 20 |
+
# installed by adding `alembic[tz]` to the pip requirements
|
| 21 |
+
# string value is passed to dateutil.tz.gettz()
|
| 22 |
+
# leave blank for localtime
|
| 23 |
+
# timezone =
|
| 24 |
+
|
| 25 |
+
# max length of characters to apply to the
|
| 26 |
+
# "slug" field
|
| 27 |
+
# truncate_slug_length = 40
|
| 28 |
+
|
| 29 |
+
# set to 'true' to run the environment during
|
| 30 |
+
# the 'revision' command, regardless of autogenerate
|
| 31 |
+
# revision_environment = false
|
| 32 |
+
|
| 33 |
+
# set to 'true' to allow .pyc and .pyo files without
|
| 34 |
+
# a source .py file to be detected as revisions in the
|
| 35 |
+
# versions/ directory
|
| 36 |
+
# sourceless = false
|
| 37 |
+
|
| 38 |
+
# version location specification; This defaults
|
| 39 |
+
# to alembic/versions. When using multiple version
|
| 40 |
+
# directories, initial revisions must be specified with --version-path.
|
| 41 |
+
# The path separator used here should be the separator specified by "version_path_separator" below.
|
| 42 |
+
# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions
|
| 43 |
+
|
| 44 |
+
# version path separator; As mentioned above, this is the character used to split
|
| 45 |
+
# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
|
| 46 |
+
# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
|
| 47 |
+
# Valid values for version_path_separator are:
|
| 48 |
+
#
|
| 49 |
+
# version_path_separator = :
|
| 50 |
+
# version_path_separator = ;
|
| 51 |
+
# version_path_separator = space
|
| 52 |
+
version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
|
| 53 |
+
|
| 54 |
+
# set to 'true' to search source files recursively
|
| 55 |
+
# in each "version_locations" directory
|
| 56 |
+
# new in Alembic version 1.10
|
| 57 |
+
# recursive_version_locations = false
|
| 58 |
+
|
| 59 |
+
# the output encoding used when revision files
|
| 60 |
+
# are written from script.py.mako
|
| 61 |
+
# output_encoding = utf-8
|
| 62 |
+
|
| 63 |
+
sqlalchemy.url = driver://user:pass@localhost/dbname
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
[post_write_hooks]
|
| 67 |
+
# post_write_hooks defines scripts or Python functions that are run
|
| 68 |
+
# on newly generated revision scripts. See the documentation for further
|
| 69 |
+
# detail and examples
|
| 70 |
+
|
| 71 |
+
# format using "black" - use the console_scripts runner, against the "black" entrypoint
|
| 72 |
+
# hooks = black
|
| 73 |
+
# black.type = console_scripts
|
| 74 |
+
# black.entrypoint = black
|
| 75 |
+
# black.options = -l 79 REVISION_SCRIPT_FILENAME
|
| 76 |
+
|
| 77 |
+
# Logging configuration
|
| 78 |
+
[loggers]
|
| 79 |
+
keys = root,sqlalchemy,alembic
|
| 80 |
+
|
| 81 |
+
[handlers]
|
| 82 |
+
keys = console
|
| 83 |
+
|
| 84 |
+
[formatters]
|
| 85 |
+
keys = generic
|
| 86 |
+
|
| 87 |
+
[logger_root]
|
| 88 |
+
level = WARN
|
| 89 |
+
handlers = console
|
| 90 |
+
qualname =
|
| 91 |
+
|
| 92 |
+
[logger_sqlalchemy]
|
| 93 |
+
level = WARN
|
| 94 |
+
handlers =
|
| 95 |
+
qualname = sqlalchemy.engine
|
| 96 |
+
|
| 97 |
+
[logger_alembic]
|
| 98 |
+
level = INFO
|
| 99 |
+
handlers =
|
| 100 |
+
qualname = alembic
|
| 101 |
+
|
| 102 |
+
[handler_console]
|
| 103 |
+
class = StreamHandler
|
| 104 |
+
args = (sys.stderr,)
|
| 105 |
+
level = NOTSET
|
| 106 |
+
formatter = generic
|
| 107 |
+
|
| 108 |
+
[formatter_generic]
|
| 109 |
+
format = %(levelname)-5.5s [%(name)s] %(message)s
|
| 110 |
+
datefmt = %H:%M:%S
|
alembic/README
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
Generic single-database configuration.
|
alembic/codeflix
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
alembic/env.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from logging.config import fileConfig
|
| 2 |
+
|
| 3 |
+
from sqlalchemy import engine_from_config
|
| 4 |
+
from sqlalchemy import pool
|
| 5 |
+
|
| 6 |
+
from alembic import context
|
| 7 |
+
from sqlmodel import SQLModel
|
| 8 |
+
|
| 9 |
+
from config import dbname
|
| 10 |
+
|
| 11 |
+
# this is the Alembic Config object, which provides
|
| 12 |
+
# access to the values within the .ini file in use.
|
| 13 |
+
config = context.config
|
| 14 |
+
config.set_main_option('sqlalchemy.url', dbname)
|
| 15 |
+
|
| 16 |
+
# Interpret the config file for Python logging.
|
| 17 |
+
# This line sets up loggers basically.
|
| 18 |
+
if config.config_file_name is not None:
|
| 19 |
+
fileConfig(config.config_file_name)
|
| 20 |
+
|
| 21 |
+
# add your model's MetaData object here
|
| 22 |
+
# for 'autogenerate' support
|
| 23 |
+
# from myapp import mymodel
|
| 24 |
+
# target_metadata = mymodel.Base.metadata
|
| 25 |
+
from models.db import *
|
| 26 |
+
target_metadata = SQLModel.metadata
|
| 27 |
+
|
| 28 |
+
# other values from the config, defined by the needs of env.py,
|
| 29 |
+
# can be acquired:
|
| 30 |
+
# my_important_option = config.get_main_option("my_important_option")
|
| 31 |
+
# ... etc.
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def run_migrations_offline() -> None:
|
| 35 |
+
"""Run migrations in 'offline' mode.
|
| 36 |
+
|
| 37 |
+
This configures the context with just a URL
|
| 38 |
+
and not an Engine, though an Engine is acceptable
|
| 39 |
+
here as well. By skipping the Engine creation
|
| 40 |
+
we don't even need a DBAPI to be available.
|
| 41 |
+
|
| 42 |
+
Calls to context.execute() here emit the given string to the
|
| 43 |
+
script output.
|
| 44 |
+
|
| 45 |
+
"""
|
| 46 |
+
url = config.get_main_option("sqlalchemy.url")
|
| 47 |
+
context.configure(
|
| 48 |
+
url=url,
|
| 49 |
+
target_metadata=target_metadata,
|
| 50 |
+
literal_binds=True,
|
| 51 |
+
dialect_opts={"paramstyle": "named"},
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
with context.begin_transaction():
|
| 55 |
+
context.run_migrations()
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def run_migrations_online() -> None:
|
| 59 |
+
"""Run migrations in 'online' mode.
|
| 60 |
+
|
| 61 |
+
In this scenario we need to create an Engine
|
| 62 |
+
and associate a connection with the context.
|
| 63 |
+
|
| 64 |
+
"""
|
| 65 |
+
connectable = engine_from_config(
|
| 66 |
+
config.get_section(config.config_ini_section, {}),
|
| 67 |
+
prefix="sqlalchemy.",
|
| 68 |
+
poolclass=pool.NullPool,
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
with connectable.connect() as connection:
|
| 72 |
+
context.configure(
|
| 73 |
+
connection=connection, target_metadata=target_metadata
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
with context.begin_transaction():
|
| 77 |
+
context.run_migrations()
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
if context.is_offline_mode():
|
| 81 |
+
run_migrations_offline()
|
| 82 |
+
else:
|
| 83 |
+
run_migrations_online()
|
alembic/script.py.mako
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""${message}
|
| 2 |
+
|
| 3 |
+
Revision ID: ${up_revision}
|
| 4 |
+
Revises: ${down_revision | comma,n}
|
| 5 |
+
Create Date: ${create_date}
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
from alembic import op
|
| 9 |
+
import sqlalchemy as sa
|
| 10 |
+
import sqlmodel
|
| 11 |
+
${imports if imports else ""}
|
| 12 |
+
|
| 13 |
+
# revision identifiers, used by Alembic.
|
| 14 |
+
revision = ${repr(up_revision)}
|
| 15 |
+
down_revision = ${repr(down_revision)}
|
| 16 |
+
branch_labels = ${repr(branch_labels)}
|
| 17 |
+
depends_on = ${repr(depends_on)}
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def upgrade() -> None:
|
| 21 |
+
${upgrades if upgrades else "pass"}
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def downgrade() -> None:
|
| 25 |
+
${downgrades if downgrades else "pass"}
|
alembic/versions/1ad8012fafa0_first_migration.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""first migration
|
| 2 |
+
|
| 3 |
+
Revision ID: 1ad8012fafa0
|
| 4 |
+
Revises:
|
| 5 |
+
Create Date: 2023-05-21 14:48:52.387894
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
from alembic import op
|
| 9 |
+
import sqlalchemy as sa
|
| 10 |
+
import sqlmodel
|
| 11 |
+
from sqlalchemy.engine import Inspector
|
| 12 |
+
|
| 13 |
+
# revision identifiers, used by Alembic.
|
| 14 |
+
revision = '1ad8012fafa0'
|
| 15 |
+
down_revision = None
|
| 16 |
+
branch_labels = None
|
| 17 |
+
depends_on = None
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def upgrade() -> None:
|
| 21 |
+
# ### commands auto generated by Alembic - please adjust! ###
|
| 22 |
+
conn = op.get_bind()
|
| 23 |
+
inspector = Inspector.from_engine(conn)
|
| 24 |
+
tables = inspector.get_table_names()
|
| 25 |
+
|
| 26 |
+
if 'chapterfile' not in tables:
|
| 27 |
+
op.create_table('chapterfile',
|
| 28 |
+
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 29 |
+
sa.Column('file_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 30 |
+
sa.Column('file_unique_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 31 |
+
sa.Column('cbz_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 32 |
+
sa.Column('cbz_unique_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 33 |
+
sa.Column('telegraph_url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 34 |
+
sa.PrimaryKeyConstraint('url')
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
if 'lastchapter' not in tables:
|
| 38 |
+
op.create_table('lastchapter',
|
| 39 |
+
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 40 |
+
sa.Column('chapter_url', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
| 41 |
+
sa.PrimaryKeyConstraint('url')
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
if 'manganame' not in tables:
|
| 45 |
+
op.create_table('manganame',
|
| 46 |
+
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 47 |
+
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
| 48 |
+
sa.PrimaryKeyConstraint('url')
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
if 'mangaoutput' not in tables:
|
| 52 |
+
op.create_table('mangaoutput',
|
| 53 |
+
sa.Column('user_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 54 |
+
sa.Column('output', sa.Integer(), nullable=True),
|
| 55 |
+
sa.PrimaryKeyConstraint('user_id')
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
if 'subscription' not in tables:
|
| 59 |
+
op.create_table('subscription',
|
| 60 |
+
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 61 |
+
sa.Column('user_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 62 |
+
sa.PrimaryKeyConstraint('url', 'user_id')
|
| 63 |
+
)
|
| 64 |
+
# ### end Alembic commands ###
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def downgrade() -> None:
|
| 68 |
+
# ### commands auto generated by Alembic - please adjust! ###
|
| 69 |
+
op.drop_table('subscription')
|
| 70 |
+
op.drop_table('mangaoutput')
|
| 71 |
+
op.drop_table('manganame')
|
| 72 |
+
op.drop_table('lastchapter')
|
| 73 |
+
op.drop_table('chapterfile')
|
| 74 |
+
# ### end Alembic commands ###
|
alembic/versions/71bd610aaa43_make_chapterfile_ids_optional.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Make chapterFile ids optional
|
| 2 |
+
|
| 3 |
+
Revision ID: 71bd610aaa43
|
| 4 |
+
Revises: 1ad8012fafa0
|
| 5 |
+
Create Date: 2023-05-28 14:30:33.114680
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
from alembic import op
|
| 9 |
+
import sqlalchemy as sa
|
| 10 |
+
import sqlmodel
|
| 11 |
+
|
| 12 |
+
# revision identifiers, used by Alembic.
|
| 13 |
+
revision = '71bd610aaa43'
|
| 14 |
+
down_revision = '1ad8012fafa0'
|
| 15 |
+
branch_labels = None
|
| 16 |
+
depends_on = None
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def upgrade() -> None:
|
| 20 |
+
# ### commands auto generated by Alembic - please adjust! ###
|
| 21 |
+
if op.get_bind().dialect.name == 'sqlite':
|
| 22 |
+
op.create_table('chapterfile_temp',
|
| 23 |
+
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 24 |
+
sa.Column('file_id', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
| 25 |
+
sa.Column('file_unique_id', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
| 26 |
+
sa.Column('cbz_id', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
| 27 |
+
sa.Column('cbz_unique_id', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
| 28 |
+
sa.Column('telegraph_url', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
|
| 29 |
+
sa.PrimaryKeyConstraint('url')
|
| 30 |
+
)
|
| 31 |
+
op.execute('INSERT INTO chapterfile_temp SELECT * FROM chapterfile')
|
| 32 |
+
op.drop_table('chapterfile')
|
| 33 |
+
op.rename_table('chapterfile_temp', 'chapterfile')
|
| 34 |
+
else:
|
| 35 |
+
op.alter_column('chapterfile', 'file_id',
|
| 36 |
+
existing_type=sa.VARCHAR(),
|
| 37 |
+
nullable=True)
|
| 38 |
+
op.alter_column('chapterfile', 'file_unique_id',
|
| 39 |
+
existing_type=sa.VARCHAR(),
|
| 40 |
+
nullable=True)
|
| 41 |
+
op.alter_column('chapterfile', 'cbz_id',
|
| 42 |
+
existing_type=sa.VARCHAR(),
|
| 43 |
+
nullable=True)
|
| 44 |
+
op.alter_column('chapterfile', 'cbz_unique_id',
|
| 45 |
+
existing_type=sa.VARCHAR(),
|
| 46 |
+
nullable=True)
|
| 47 |
+
op.alter_column('chapterfile', 'telegraph_url',
|
| 48 |
+
existing_type=sa.VARCHAR(),
|
| 49 |
+
nullable=True)
|
| 50 |
+
# ### end Alembic commands ###
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def downgrade() -> None:
|
| 54 |
+
# ### commands auto generated by Alembic - please adjust! ###
|
| 55 |
+
if op.get_bind().dialect.name == 'sqlite':
|
| 56 |
+
op.create_table('chapterfile_temp',
|
| 57 |
+
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 58 |
+
sa.Column('file_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 59 |
+
sa.Column('file_unique_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 60 |
+
sa.Column('cbz_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 61 |
+
sa.Column('cbz_unique_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 62 |
+
sa.Column('telegraph_url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
|
| 63 |
+
sa.PrimaryKeyConstraint('url')
|
| 64 |
+
)
|
| 65 |
+
op.execute('INSERT INTO chapterfile_temp SELECT * FROM chapterfile')
|
| 66 |
+
op.drop_table('chapterfile')
|
| 67 |
+
op.rename_table('chapterfile_temp', 'chapterfile')
|
| 68 |
+
else:
|
| 69 |
+
op.alter_column('chapterfile', 'telegraph_url',
|
| 70 |
+
existing_type=sa.VARCHAR(),
|
| 71 |
+
nullable=False)
|
| 72 |
+
op.alter_column('chapterfile', 'cbz_unique_id',
|
| 73 |
+
existing_type=sa.VARCHAR(),
|
| 74 |
+
nullable=False)
|
| 75 |
+
op.alter_column('chapterfile', 'cbz_id',
|
| 76 |
+
existing_type=sa.VARCHAR(),
|
| 77 |
+
nullable=False)
|
| 78 |
+
op.alter_column('chapterfile', 'file_unique_id',
|
| 79 |
+
existing_type=sa.VARCHAR(),
|
| 80 |
+
nullable=False)
|
| 81 |
+
op.alter_column('chapterfile', 'file_id',
|
| 82 |
+
existing_type=sa.VARCHAR(),
|
| 83 |
+
nullable=False)
|
| 84 |
+
# ### end Alembic commands ###
|
alembic/versions/c
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
app.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "TG-MANGA-BOT",
|
| 3 |
+
"description": "Read manga from your favourites websites on telegram.",
|
| 4 |
+
"keywords": [
|
| 5 |
+
"manga",
|
| 6 |
+
"pyrogram",
|
| 7 |
+
"pdf",
|
| 8 |
+
"telegram",
|
| 9 |
+
"bot"
|
| 10 |
+
],
|
| 11 |
+
"stack": "container",
|
| 12 |
+
"repository": "https://github.com/driverog/tg-manga-bot",
|
| 13 |
+
"success_url": "/welcome",
|
| 14 |
+
"env": {
|
| 15 |
+
"API_ID": {
|
| 16 |
+
"description": "Get this value from https://my.telegram.org/apps",
|
| 17 |
+
"value": "",
|
| 18 |
+
"required": true
|
| 19 |
+
},
|
| 20 |
+
"API_HASH": {
|
| 21 |
+
"description": "Get this value from https://my.telegram.org/apps",
|
| 22 |
+
"value": "",
|
| 23 |
+
"required": true
|
| 24 |
+
},
|
| 25 |
+
"BOT_TOKEN": {
|
| 26 |
+
"description": "Get this value from @BotFather in telegram",
|
| 27 |
+
"required": true
|
| 28 |
+
},
|
| 29 |
+
"CHANNEL": {
|
| 30 |
+
"description": "Channel that users must be subscribed in order to use the bot.",
|
| 31 |
+
"required": false
|
| 32 |
+
},
|
| 33 |
+
"DATABASE_URL_PRIMARY": {
|
| 34 |
+
"description": "Will be used as database url. If not given heroku database will be used instead.",
|
| 35 |
+
"required": false
|
| 36 |
+
}
|
| 37 |
+
},
|
| 38 |
+
"formation": {
|
| 39 |
+
"worker": {
|
| 40 |
+
"quantity": 1,
|
| 41 |
+
"size": "free"
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"image": "heroku/python",
|
| 45 |
+
"addons": [
|
| 46 |
+
{
|
| 47 |
+
"plan": "heroku-postgresql"
|
| 48 |
+
}
|
| 49 |
+
]
|
| 50 |
+
}
|
bot.py
ADDED
|
@@ -0,0 +1,712 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import enum
|
| 2 |
+
import shutil
|
| 3 |
+
from ast import arg
|
| 4 |
+
import asyncio
|
| 5 |
+
import re
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
import datetime as dt
|
| 8 |
+
import json
|
| 9 |
+
|
| 10 |
+
import pyrogram.errors
|
| 11 |
+
from pyrogram.types import Message, InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery, InputMediaDocument
|
| 12 |
+
|
| 13 |
+
from config import env_vars, dbname
|
| 14 |
+
from img2cbz.core import fld2cbz
|
| 15 |
+
from img2pdf.core import fld2pdf, fld2thumb
|
| 16 |
+
from img2tph.core import img2tph
|
| 17 |
+
from plugins import MangaClient, ManhuaKoClient, MangaCard, MangaChapter, ManhuaPlusClient, TMOClient, MangaDexClient, \
|
| 18 |
+
MangaSeeClient, MangasInClient, McReaderClient, MangaKakalotClient, ManganeloClient, ManganatoClient, \
|
| 19 |
+
KissMangaClient, MangatigreClient, MangaHasuClient, MangaBuddyClient, AsuraScansClient, NineMangaClient, Manhwa18Client
|
| 20 |
+
import os
|
| 21 |
+
|
| 22 |
+
from pyrogram import Client, filters
|
| 23 |
+
from typing import Dict, Tuple, List, TypedDict
|
| 24 |
+
from loguru import logger
|
| 25 |
+
|
| 26 |
+
from models.db import DB, ChapterFile, Subscription, LastChapter, MangaName, MangaOutput
|
| 27 |
+
from pagination import Pagination
|
| 28 |
+
from plugins.client import clean
|
| 29 |
+
from tools.aqueue import AQueue
|
| 30 |
+
from tools.flood import retry_on_flood
|
| 31 |
+
|
| 32 |
+
mangas: Dict[str, MangaCard] = dict()
|
| 33 |
+
chapters: Dict[str, MangaChapter] = dict()
|
| 34 |
+
pdfs: Dict[str, str] = dict()
|
| 35 |
+
paginations: Dict[int, Pagination] = dict()
|
| 36 |
+
queries: Dict[str, Tuple[MangaClient, str]] = dict()
|
| 37 |
+
full_pages: Dict[str, List[str]] = dict()
|
| 38 |
+
favourites: Dict[str, MangaCard] = dict()
|
| 39 |
+
language_query: Dict[str, Tuple[str, str]] = dict()
|
| 40 |
+
users_in_channel: Dict[int, dt.datetime] = dict()
|
| 41 |
+
locks: Dict[int, asyncio.Lock] = dict()
|
| 42 |
+
|
| 43 |
+
plugin_dicts: Dict[str, Dict[str, MangaClient]] = {
|
| 44 |
+
"🇬🇧 EN": {
|
| 45 |
+
"MangaDex": MangaDexClient(),
|
| 46 |
+
"Manhuaplus": ManhuaPlusClient(),
|
| 47 |
+
"Mangasee": MangaSeeClient(),
|
| 48 |
+
"McReader": McReaderClient(),
|
| 49 |
+
"MagaKakalot": MangaKakalotClient(),
|
| 50 |
+
"Manganelo": ManganeloClient(),
|
| 51 |
+
"Manganato": ManganatoClient(),
|
| 52 |
+
"KissManga": KissMangaClient(),
|
| 53 |
+
"MangaHasu": MangaHasuClient(),
|
| 54 |
+
"MangaBuddy": MangaBuddyClient(),
|
| 55 |
+
"AsuraScans": AsuraScansClient(),
|
| 56 |
+
"NineManga": NineMangaClient(),
|
| 57 |
+
"Manhwa18": Manhwa18Client(),
|
| 58 |
+
},
|
| 59 |
+
"🇪🇸 ES": {
|
| 60 |
+
"MangaDex": MangaDexClient(language=("es-la", "es")),
|
| 61 |
+
"ManhuaKo": ManhuaKoClient(),
|
| 62 |
+
"TMO": TMOClient(),
|
| 63 |
+
"Mangatigre": MangatigreClient(),
|
| 64 |
+
"NineManga": NineMangaClient(language='es'),
|
| 65 |
+
"MangasIn": MangasInClient(),
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
cache_dir = "cache"
|
| 70 |
+
if os.path.exists(cache_dir):
|
| 71 |
+
shutil.rmtree(cache_dir)
|
| 72 |
+
with open("tools/help_message.txt", "r") as f:
|
| 73 |
+
help_msg = f.read()
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class OutputOptions(enum.IntEnum):
|
| 77 |
+
PDF = 1
|
| 78 |
+
CBZ = 2
|
| 79 |
+
Telegraph = 4
|
| 80 |
+
|
| 81 |
+
def __and__(self, other):
|
| 82 |
+
return self.value & other
|
| 83 |
+
|
| 84 |
+
def __xor__(self, other):
|
| 85 |
+
return self.value ^ other
|
| 86 |
+
|
| 87 |
+
def __or__(self, other):
|
| 88 |
+
return self.value | other
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
disabled = ["[🇬🇧 EN] McReader", "[🇬🇧 EN] Manhuaplus", "[🇪🇸 ES] MangasIn"]
|
| 92 |
+
|
| 93 |
+
plugins = dict()
|
| 94 |
+
for lang, plugin_dict in plugin_dicts.items():
|
| 95 |
+
for name, plugin in plugin_dict.items():
|
| 96 |
+
identifier = f'[{lang}] {name}'
|
| 97 |
+
if identifier in disabled:
|
| 98 |
+
continue
|
| 99 |
+
plugins[identifier] = plugin
|
| 100 |
+
|
| 101 |
+
# subsPaused = ["[🇪🇸 ES] TMO"]
|
| 102 |
+
subsPaused = disabled + []
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def split_list(li):
|
| 106 |
+
return [li[x: x + 2] for x in range(0, len(li), 2)]
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def get_buttons_for_options(user_options: int):
|
| 110 |
+
buttons = []
|
| 111 |
+
for option in OutputOptions:
|
| 112 |
+
checked = "✅" if option & user_options else "❌"
|
| 113 |
+
text = f'{checked} {option.name}'
|
| 114 |
+
buttons.append([InlineKeyboardButton(text, f"options_{option.value}")])
|
| 115 |
+
return InlineKeyboardMarkup(buttons)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
bot = Client('bot',
|
| 119 |
+
api_id=int(env_vars.get('API_ID')),
|
| 120 |
+
api_hash=env_vars.get('API_HASH'),
|
| 121 |
+
bot_token=env_vars.get('BOT_TOKEN'),
|
| 122 |
+
max_concurrent_transmissions=3)
|
| 123 |
+
|
| 124 |
+
pdf_queue = AQueue()
|
| 125 |
+
|
| 126 |
+
if dbname:
|
| 127 |
+
DB(dbname)
|
| 128 |
+
else:
|
| 129 |
+
DB()
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@bot.on_message(filters=~(filters.private & filters.incoming))
|
| 133 |
+
async def on_chat_or_channel_message(client: Client, message: Message):
|
| 134 |
+
pass
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
@bot.on_message()
|
| 138 |
+
async def on_private_message(client: Client, message: Message):
|
| 139 |
+
channel = env_vars.get('CHANNEL')
|
| 140 |
+
if not channel:
|
| 141 |
+
return message.continue_propagation()
|
| 142 |
+
if in_channel_cached := users_in_channel.get(message.from_user.id):
|
| 143 |
+
if dt.datetime.now() - in_channel_cached < dt.timedelta(days=1):
|
| 144 |
+
return message.continue_propagation()
|
| 145 |
+
try:
|
| 146 |
+
if await client.get_chat_member(channel, message.from_user.id):
|
| 147 |
+
users_in_channel[message.from_user.id] = dt.datetime.now()
|
| 148 |
+
return message.continue_propagation()
|
| 149 |
+
except pyrogram.errors.UsernameNotOccupied:
|
| 150 |
+
logger.debug("Channel does not exist, therefore bot will continue to operate normally")
|
| 151 |
+
return message.continue_propagation()
|
| 152 |
+
except pyrogram.errors.ChatAdminRequired:
|
| 153 |
+
logger.debug("Bot is not admin of the channel, therefore bot will continue to operate normally")
|
| 154 |
+
return message.continue_propagation()
|
| 155 |
+
except pyrogram.errors.UserNotParticipant:
|
| 156 |
+
await message.reply("In order to use the bot you must join it's update channel.",
|
| 157 |
+
reply_markup=InlineKeyboardMarkup(
|
| 158 |
+
[[InlineKeyboardButton('ᴊᴏɪɴ ᴄʜᴀɴɴᴇʟ!', url=f't.me/{channel}')]]
|
| 159 |
+
))
|
| 160 |
+
except pyrogram.ContinuePropagation:
|
| 161 |
+
raise
|
| 162 |
+
except pyrogram.StopPropagation:
|
| 163 |
+
raise
|
| 164 |
+
except BaseException as e:
|
| 165 |
+
logger.exception(e)
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
@bot.on_message(filters=filters.command(['start']))
|
| 169 |
+
async def on_start(client: Client, message: Message):
|
| 170 |
+
logger.info(f"User {message.from_user.id} started the bot")
|
| 171 |
+
await message.reply("›› **__Welcome to the best manga pdf bot in telegram!!\n"
|
| 172 |
+
"\n"
|
| 173 |
+
"How to use? Just type the name of some manga you want to keep up to date.\n"
|
| 174 |
+
"\n"
|
| 175 |
+
"For example:\n"
|
| 176 |
+
"`Fire Force`\n"
|
| 177 |
+
"\n"
|
| 178 |
+
"Check /help for more information.__**")
|
| 179 |
+
logger.info(f"User {message.from_user.id} finished the start command")
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
@bot.on_message(filters=filters.command(['help']))
|
| 183 |
+
async def on_help(client: Client, message: Message):
|
| 184 |
+
await message.reply(help_msg)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
@bot.on_message(filters=filters.command(['queue']))
|
| 188 |
+
async def on_help(client: Client, message: Message):
|
| 189 |
+
await message.reply(f'Queue size: {pdf_queue.qsize()}')
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
@bot.on_message(filters=filters.command(['refresh']))
|
| 193 |
+
async def on_refresh(client: Client, message: Message):
|
| 194 |
+
text = message.reply_to_message.text or message.reply_to_message.caption
|
| 195 |
+
if text:
|
| 196 |
+
regex = re.compile(r'\[Read on telegraph]\((.*)\)')
|
| 197 |
+
match = regex.search(text.markdown)
|
| 198 |
+
else:
|
| 199 |
+
match = None
|
| 200 |
+
document = message.reply_to_message.document
|
| 201 |
+
if not (message.reply_to_message and message.reply_to_message.outgoing and
|
| 202 |
+
((document and document.file_name[-4:].lower() in ['.pdf', '.cbz']) or match)):
|
| 203 |
+
return await message.reply("This command only works when it replies to a manga file that bot sent to you")
|
| 204 |
+
db = DB()
|
| 205 |
+
if document:
|
| 206 |
+
chapter = await db.get_chapter_file_by_id(document.file_unique_id)
|
| 207 |
+
else:
|
| 208 |
+
chapter = await db.get_chapter_file_by_id(match.group(1))
|
| 209 |
+
if not chapter:
|
| 210 |
+
return await message.reply("This file was already refreshed")
|
| 211 |
+
await db.erase(chapter)
|
| 212 |
+
return await message.reply("File refreshed successfully!")
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
@bot.on_message(filters=filters.command(['subs']))
|
| 216 |
+
async def on_subs(client: Client, message: Message):
|
| 217 |
+
db = DB()
|
| 218 |
+
|
| 219 |
+
filter_ = message.text.split(maxsplit=1)[1] if message.text.split(maxsplit=1)[1:] else ''
|
| 220 |
+
filter_list = [filter_.strip() for filter_ in filter_.split(' ') if filter_.strip()]
|
| 221 |
+
|
| 222 |
+
subs = await db.get_subs(str(message.from_user.id), filter_list)
|
| 223 |
+
|
| 224 |
+
lines = []
|
| 225 |
+
for sub in subs[:10]:
|
| 226 |
+
lines.append(f'<a href="{sub.url}">{sub.name}</a>')
|
| 227 |
+
lines.append(f'`/cancel {sub.url}`')
|
| 228 |
+
lines.append('')
|
| 229 |
+
|
| 230 |
+
if not lines:
|
| 231 |
+
if filter_:
|
| 232 |
+
return await message.reply("You have no subscriptions with that filter.")
|
| 233 |
+
return await message.reply("You have no subscriptions yet.")
|
| 234 |
+
|
| 235 |
+
text = "\n".join(lines)
|
| 236 |
+
await message.reply(f'Your subscriptions:\n\n{text}\nTo see more subscriptions use `/subs filter`', disable_web_page_preview=True)
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
@bot.on_message(filters=filters.regex(r'^/cancel ([^ ]+)$'))
|
| 240 |
+
async def on_cancel_command(client: Client, message: Message):
|
| 241 |
+
db = DB()
|
| 242 |
+
sub = await db.get(Subscription, (message.matches[0].group(1), str(message.from_user.id)))
|
| 243 |
+
if not sub:
|
| 244 |
+
return await message.reply("You were not subscribed to that manga.")
|
| 245 |
+
await db.erase(sub)
|
| 246 |
+
return await message.reply("You will no longer receive updates for that manga.")
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
@bot.on_message(filters=filters.command(['options']))
|
| 250 |
+
async def on_options_command(client: Client, message: Message):
|
| 251 |
+
db = DB()
|
| 252 |
+
user_options = await db.get(MangaOutput, str(message.from_user.id))
|
| 253 |
+
user_options = user_options.output if user_options else (1 << 30) - 1
|
| 254 |
+
buttons = get_buttons_for_options(user_options)
|
| 255 |
+
return await message.reply("Select the desired output format.", reply_markup=buttons)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
@bot.on_message(filters=filters.regex(r'^/'))
|
| 259 |
+
async def on_unknown_command(client: Client, message: Message):
|
| 260 |
+
await message.reply("Unknown command")
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
@bot.on_message(filters=filters.text)
|
| 264 |
+
async def on_message(client, message: Message):
|
| 265 |
+
language_query[f"lang_None_{hash(message.text)}"] = (None, message.text)
|
| 266 |
+
for language in plugin_dicts.keys():
|
| 267 |
+
language_query[f"lang_{language}_{hash(message.text)}"] = (language, message.text)
|
| 268 |
+
await bot.send_message(message.chat.id, "Select search languages.", reply_markup=InlineKeyboardMarkup(
|
| 269 |
+
split_list([InlineKeyboardButton(language, callback_data=f"lang_{language}_{hash(message.text)}")
|
| 270 |
+
for language in plugin_dicts.keys()])
|
| 271 |
+
))
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
async def options_click(client, callback: CallbackQuery):
|
| 275 |
+
db = DB()
|
| 276 |
+
user_options = await db.get(MangaOutput, str(callback.from_user.id))
|
| 277 |
+
if not user_options:
|
| 278 |
+
user_options = MangaOutput(user_id=str(callback.from_user.id), output=(2 << 30) - 1)
|
| 279 |
+
option = int(callback.data.split('_')[-1])
|
| 280 |
+
user_options.output ^= option
|
| 281 |
+
buttons = get_buttons_for_options(user_options.output)
|
| 282 |
+
await db.add(user_options)
|
| 283 |
+
return await callback.message.edit_reply_markup(reply_markup=buttons)
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
async def language_click(client, callback: CallbackQuery):
|
| 287 |
+
lang, query = language_query[callback.data]
|
| 288 |
+
if not lang:
|
| 289 |
+
return await callback.message.edit("Select search languages.", reply_markup=InlineKeyboardMarkup(
|
| 290 |
+
split_list([InlineKeyboardButton(language, callback_data=f"lang_{language}_{hash(query)}")
|
| 291 |
+
for language in plugin_dicts.keys()])
|
| 292 |
+
))
|
| 293 |
+
for identifier, manga_client in plugin_dicts[lang].items():
|
| 294 |
+
queries[f"query_{lang}_{identifier}_{hash(query)}"] = (manga_client, query)
|
| 295 |
+
await callback.message.edit(f"Language: {lang}\n\nSelect search plugin.", reply_markup=InlineKeyboardMarkup(
|
| 296 |
+
split_list([InlineKeyboardButton(identifier, callback_data=f"query_{lang}_{identifier}_{hash(query)}")
|
| 297 |
+
for identifier in plugin_dicts[lang].keys() if f'[{lang}] {identifier}' not in disabled]) + [
|
| 298 |
+
[InlineKeyboardButton("◀️ Back", callback_data=f"lang_None_{hash(query)}")]]
|
| 299 |
+
))
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
async def plugin_click(client, callback: CallbackQuery):
|
| 303 |
+
manga_client, query = queries[callback.data]
|
| 304 |
+
results = await manga_client.search(query)
|
| 305 |
+
if not results:
|
| 306 |
+
await bot.send_message(callback.from_user.id, "No manga found for given query.")
|
| 307 |
+
return
|
| 308 |
+
for result in results:
|
| 309 |
+
mangas[result.unique()] = result
|
| 310 |
+
await bot.send_message(callback.from_user.id,
|
| 311 |
+
"This is the result of your search",
|
| 312 |
+
reply_markup=InlineKeyboardMarkup([
|
| 313 |
+
[InlineKeyboardButton(result.name, callback_data=result.unique())] for result in results
|
| 314 |
+
]))
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
async def manga_click(client, callback: CallbackQuery, pagination: Pagination = None):
|
| 318 |
+
if pagination is None:
|
| 319 |
+
pagination = Pagination()
|
| 320 |
+
paginations[pagination.id] = pagination
|
| 321 |
+
|
| 322 |
+
if pagination.manga is None:
|
| 323 |
+
manga = mangas[callback.data]
|
| 324 |
+
pagination.manga = manga
|
| 325 |
+
|
| 326 |
+
results = await pagination.manga.client.get_chapters(pagination.manga, pagination.page)
|
| 327 |
+
|
| 328 |
+
if not results:
|
| 329 |
+
await callback.answer("Ups, no chapters there.", show_alert=True)
|
| 330 |
+
return
|
| 331 |
+
|
| 332 |
+
full_page_key = f'full_page_{hash("".join([result.unique() for result in results]))}'
|
| 333 |
+
full_pages[full_page_key] = []
|
| 334 |
+
for result in results:
|
| 335 |
+
chapters[result.unique()] = result
|
| 336 |
+
full_pages[full_page_key].append(result.unique())
|
| 337 |
+
|
| 338 |
+
db = DB()
|
| 339 |
+
subs = await db.get(Subscription, (pagination.manga.url, str(callback.from_user.id)))
|
| 340 |
+
|
| 341 |
+
prev = [InlineKeyboardButton('<<', f'{pagination.id}_{pagination.page - 1}')]
|
| 342 |
+
next_ = [InlineKeyboardButton('>>', f'{pagination.id}_{pagination.page + 1}')]
|
| 343 |
+
footer = [prev + next_] if pagination.page > 1 else [next_]
|
| 344 |
+
|
| 345 |
+
fav = [[InlineKeyboardButton(
|
| 346 |
+
"Unsubscribe" if subs else "Subscribe",
|
| 347 |
+
f"{'unfav' if subs else 'fav'}_{pagination.manga.unique()}"
|
| 348 |
+
)]]
|
| 349 |
+
favourites[f"fav_{pagination.manga.unique()}"] = pagination.manga
|
| 350 |
+
favourites[f"unfav_{pagination.manga.unique()}"] = pagination.manga
|
| 351 |
+
|
| 352 |
+
full_page = [[InlineKeyboardButton('Full Page', full_page_key)]]
|
| 353 |
+
|
| 354 |
+
buttons = InlineKeyboardMarkup(fav + footer + [
|
| 355 |
+
[InlineKeyboardButton(result.name, result.unique())] for result in results
|
| 356 |
+
] + full_page + footer)
|
| 357 |
+
|
| 358 |
+
if pagination.message is None:
|
| 359 |
+
try:
|
| 360 |
+
message = await bot.send_photo(callback.from_user.id,
|
| 361 |
+
pagination.manga.picture_url,
|
| 362 |
+
f'{pagination.manga.name}\n'
|
| 363 |
+
f'{pagination.manga.get_url()}', reply_markup=buttons)
|
| 364 |
+
pagination.message = message
|
| 365 |
+
except pyrogram.errors.BadRequest as e:
|
| 366 |
+
file_name = f'pictures/{pagination.manga.unique()}.jpg'
|
| 367 |
+
await pagination.manga.client.get_cover(pagination.manga, cache=True, file_name=file_name)
|
| 368 |
+
message = await bot.send_photo(callback.from_user.id,
|
| 369 |
+
f'./cache/{pagination.manga.client.name}/{file_name}',
|
| 370 |
+
f'{pagination.manga.name}\n'
|
| 371 |
+
f'{pagination.manga.get_url()}', reply_markup=buttons)
|
| 372 |
+
pagination.message = message
|
| 373 |
+
else:
|
| 374 |
+
await bot.edit_message_reply_markup(
|
| 375 |
+
callback.from_user.id,
|
| 376 |
+
pagination.message.id,
|
| 377 |
+
reply_markup=buttons
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
users_lock = asyncio.Lock()
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
async def get_user_lock(chat_id: int):
|
| 384 |
+
async with users_lock:
|
| 385 |
+
lock = locks.get(chat_id)
|
| 386 |
+
if not lock:
|
| 387 |
+
locks[chat_id] = asyncio.Lock()
|
| 388 |
+
return locks[chat_id]
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
async def chapter_click(client, data, chat_id):
|
| 392 |
+
await pdf_queue.put(chapters[data], int(chat_id))
|
| 393 |
+
logger.debug(f"Put chapter {chapters[data].name} to queue for user {chat_id} - queue size: {pdf_queue.qsize()}")
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
async def send_manga_chapter(client: Client, chapter, chat_id):
|
| 397 |
+
db = DB()
|
| 398 |
+
|
| 399 |
+
chapter_file = await db.get(ChapterFile, chapter.url)
|
| 400 |
+
options = await db.get(MangaOutput, str(chat_id))
|
| 401 |
+
options = options.output if options else (1 << 30) - 1
|
| 402 |
+
|
| 403 |
+
error_caption = '\n'.join([
|
| 404 |
+
f'{chapter.manga.name} - {chapter.name}',
|
| 405 |
+
f'{chapter.get_url()}'
|
| 406 |
+
])
|
| 407 |
+
|
| 408 |
+
success_caption = f'{chapter.manga.name} - {chapter.name}\n'
|
| 409 |
+
|
| 410 |
+
download = not chapter_file
|
| 411 |
+
download = download or options & OutputOptions.PDF and not chapter_file.file_id
|
| 412 |
+
download = download or options & OutputOptions.CBZ and not chapter_file.cbz_id
|
| 413 |
+
download = download or options & OutputOptions.Telegraph and not chapter_file.telegraph_url
|
| 414 |
+
download = download and options & ((1 << len(OutputOptions)) - 1) != 0
|
| 415 |
+
|
| 416 |
+
if download:
|
| 417 |
+
pictures_folder = await chapter.client.download_pictures(chapter)
|
| 418 |
+
if not chapter.pictures:
|
| 419 |
+
return await client.send_message(chat_id,
|
| 420 |
+
f'There was an error parsing this chapter or chapter is missing' +
|
| 421 |
+
f', please check the chapter at the web\n\n{error_caption}')
|
| 422 |
+
thumb_path = fld2thumb(pictures_folder)
|
| 423 |
+
|
| 424 |
+
chapter_file = chapter_file or ChapterFile(url=chapter.url)
|
| 425 |
+
|
| 426 |
+
if download and not chapter_file.telegraph_url:
|
| 427 |
+
chapter_file.telegraph_url = await img2tph(chapter, clean(f'{chapter.manga.name} {chapter.name}'))
|
| 428 |
+
|
| 429 |
+
if options & OutputOptions.Telegraph:
|
| 430 |
+
success_caption += f'[Read on telegraph]({chapter_file.telegraph_url})\n'
|
| 431 |
+
success_caption += f'[Read on website]({chapter.get_url()})'
|
| 432 |
+
|
| 433 |
+
ch_name = clean(f'{clean(chapter.manga.name, 25)} - {chapter.name}', 45)
|
| 434 |
+
|
| 435 |
+
media_docs = []
|
| 436 |
+
|
| 437 |
+
if options & OutputOptions.PDF:
|
| 438 |
+
if chapter_file.file_id:
|
| 439 |
+
media_docs.append(InputMediaDocument(chapter_file.file_id))
|
| 440 |
+
else:
|
| 441 |
+
try:
|
| 442 |
+
pdf = await asyncio.get_running_loop().run_in_executor(None, fld2pdf, pictures_folder, ch_name)
|
| 443 |
+
except Exception as e:
|
| 444 |
+
logger.exception(f'Error creating pdf for {chapter.name} - {chapter.manga.name}\n{e}')
|
| 445 |
+
return await client.send_message(chat_id, f'There was an error making the pdf for this chapter. '
|
| 446 |
+
f'Forward this message to the bot group to report the '
|
| 447 |
+
f'error.\n\n{error_caption}')
|
| 448 |
+
media_docs.append(InputMediaDocument(pdf, thumb=thumb_path))
|
| 449 |
+
|
| 450 |
+
if options & OutputOptions.CBZ:
|
| 451 |
+
if chapter_file.cbz_id:
|
| 452 |
+
media_docs.append(InputMediaDocument(chapter_file.cbz_id))
|
| 453 |
+
else:
|
| 454 |
+
try:
|
| 455 |
+
cbz = await asyncio.get_running_loop().run_in_executor(None, fld2cbz, pictures_folder, ch_name)
|
| 456 |
+
except Exception as e:
|
| 457 |
+
logger.exception(f'Error creating cbz for {chapter.name} - {chapter.manga.name}\n{e}')
|
| 458 |
+
return await client.send_message(chat_id, f'There was an error making the cbz for this chapter. '
|
| 459 |
+
f'Forward this message to the bot group to report the '
|
| 460 |
+
f'error.\n\n{error_caption}')
|
| 461 |
+
media_docs.append(InputMediaDocument(cbz, thumb=thumb_path))
|
| 462 |
+
|
| 463 |
+
if len(media_docs) == 0:
|
| 464 |
+
messages: list[Message] = await retry_on_flood(client.send_message)(chat_id, success_caption)
|
| 465 |
+
else:
|
| 466 |
+
media_docs[-1].caption = success_caption
|
| 467 |
+
messages: list[Message] = await retry_on_flood(client.send_media_group)(chat_id, media_docs)
|
| 468 |
+
|
| 469 |
+
# Save file ids
|
| 470 |
+
if download and media_docs:
|
| 471 |
+
for message in [x for x in messages if x.document]:
|
| 472 |
+
if message.document.file_name.endswith('.pdf'):
|
| 473 |
+
chapter_file.file_id = message.document.file_id
|
| 474 |
+
chapter_file.file_unique_id = message.document.file_unique_id
|
| 475 |
+
elif message.document.file_name.endswith('.cbz'):
|
| 476 |
+
chapter_file.cbz_id = message.document.file_id
|
| 477 |
+
chapter_file.cbz_unique_id = message.document.file_unique_id
|
| 478 |
+
|
| 479 |
+
if download:
|
| 480 |
+
shutil.rmtree(pictures_folder, ignore_errors=True)
|
| 481 |
+
await db.add(chapter_file)
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
async def pagination_click(client: Client, callback: CallbackQuery):
|
| 485 |
+
pagination_id, page = map(int, callback.data.split('_'))
|
| 486 |
+
pagination = paginations[pagination_id]
|
| 487 |
+
pagination.page = page
|
| 488 |
+
await manga_click(client, callback, pagination)
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
async def full_page_click(client: Client, callback: CallbackQuery):
|
| 492 |
+
chapters_data = full_pages[callback.data]
|
| 493 |
+
for chapter_data in reversed(chapters_data):
|
| 494 |
+
try:
|
| 495 |
+
await chapter_click(client, chapter_data, callback.from_user.id)
|
| 496 |
+
except Exception as e:
|
| 497 |
+
logger.exception(e)
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
async def favourite_click(client: Client, callback: CallbackQuery):
|
| 501 |
+
action, data = callback.data.split('_')
|
| 502 |
+
fav = action == 'fav'
|
| 503 |
+
manga = favourites[callback.data]
|
| 504 |
+
db = DB()
|
| 505 |
+
subs = await db.get(Subscription, (manga.url, str(callback.from_user.id)))
|
| 506 |
+
if not subs and fav:
|
| 507 |
+
await db.add(Subscription(url=manga.url, user_id=str(callback.from_user.id)))
|
| 508 |
+
if subs and not fav:
|
| 509 |
+
await db.erase(subs)
|
| 510 |
+
if subs and fav:
|
| 511 |
+
await callback.answer("You are already subscribed", show_alert=True)
|
| 512 |
+
if not subs and not fav:
|
| 513 |
+
await callback.answer("You are not subscribed", show_alert=True)
|
| 514 |
+
reply_markup = callback.message.reply_markup
|
| 515 |
+
keyboard = reply_markup.inline_keyboard
|
| 516 |
+
keyboard[0] = [InlineKeyboardButton(
|
| 517 |
+
"Unsubscribe" if fav else "Subscribe",
|
| 518 |
+
f"{'unfav' if fav else 'fav'}_{data}"
|
| 519 |
+
)]
|
| 520 |
+
await bot.edit_message_reply_markup(callback.from_user.id, callback.message.id,
|
| 521 |
+
InlineKeyboardMarkup(keyboard))
|
| 522 |
+
db_manga = await db.get(MangaName, manga.url)
|
| 523 |
+
if not db_manga:
|
| 524 |
+
await db.add(MangaName(url=manga.url, name=manga.name))
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
def is_pagination_data(callback: CallbackQuery):
|
| 528 |
+
data = callback.data
|
| 529 |
+
match = re.match(r'\d+_\d+', data)
|
| 530 |
+
if not match:
|
| 531 |
+
return False
|
| 532 |
+
pagination_id = int(data.split('_')[0])
|
| 533 |
+
if pagination_id not in paginations:
|
| 534 |
+
return False
|
| 535 |
+
pagination = paginations[pagination_id]
|
| 536 |
+
if not pagination.message:
|
| 537 |
+
return False
|
| 538 |
+
if pagination.message.chat.id != callback.from_user.id:
|
| 539 |
+
return False
|
| 540 |
+
if pagination.message.id != callback.message.id:
|
| 541 |
+
return False
|
| 542 |
+
return True
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
@bot.on_callback_query()
|
| 546 |
+
async def on_callback_query(client, callback: CallbackQuery):
|
| 547 |
+
if callback.data in queries:
|
| 548 |
+
await plugin_click(client, callback)
|
| 549 |
+
elif callback.data in mangas:
|
| 550 |
+
await manga_click(client, callback)
|
| 551 |
+
elif callback.data in chapters:
|
| 552 |
+
await chapter_click(client, callback.data, callback.from_user.id)
|
| 553 |
+
elif callback.data in full_pages:
|
| 554 |
+
await full_page_click(client, callback)
|
| 555 |
+
elif callback.data in favourites:
|
| 556 |
+
await favourite_click(client, callback)
|
| 557 |
+
elif is_pagination_data(callback):
|
| 558 |
+
await pagination_click(client, callback)
|
| 559 |
+
elif callback.data in language_query:
|
| 560 |
+
await language_click(client, callback)
|
| 561 |
+
elif callback.data.startswith('options'):
|
| 562 |
+
await options_click(client, callback)
|
| 563 |
+
else:
|
| 564 |
+
await bot.answer_callback_query(callback.id, 'This is an old button, please redo the search', show_alert=True)
|
| 565 |
+
return
|
| 566 |
+
try:
|
| 567 |
+
await callback.answer()
|
| 568 |
+
except BaseException as e:
|
| 569 |
+
logger.warning(e)
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
async def remove_subscriptions(sub: str):
|
| 573 |
+
db = DB()
|
| 574 |
+
|
| 575 |
+
await db.erase_subs(sub)
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
async def update_mangas():
|
| 579 |
+
logger.debug("Updating mangas")
|
| 580 |
+
db = DB()
|
| 581 |
+
subscriptions = await db.get_all(Subscription)
|
| 582 |
+
last_chapters = await db.get_all(LastChapter)
|
| 583 |
+
manga_names = await db.get_all(MangaName)
|
| 584 |
+
|
| 585 |
+
subs_dictionary = dict()
|
| 586 |
+
chapters_dictionary = dict()
|
| 587 |
+
url_client_dictionary = dict()
|
| 588 |
+
client_url_dictionary = {client: set() for client in plugins.values()}
|
| 589 |
+
manga_dict = dict()
|
| 590 |
+
|
| 591 |
+
for subscription in subscriptions:
|
| 592 |
+
if subscription.url not in subs_dictionary:
|
| 593 |
+
subs_dictionary[subscription.url] = []
|
| 594 |
+
subs_dictionary[subscription.url].append(subscription.user_id)
|
| 595 |
+
|
| 596 |
+
for last_chapter in last_chapters:
|
| 597 |
+
chapters_dictionary[last_chapter.url] = last_chapter
|
| 598 |
+
|
| 599 |
+
for manga in manga_names:
|
| 600 |
+
manga_dict[manga.url] = manga
|
| 601 |
+
|
| 602 |
+
for url in subs_dictionary:
|
| 603 |
+
for ident, client in plugins.items():
|
| 604 |
+
if ident in subsPaused:
|
| 605 |
+
continue
|
| 606 |
+
if await client.contains_url(url):
|
| 607 |
+
url_client_dictionary[url] = client
|
| 608 |
+
client_url_dictionary[client].add(url)
|
| 609 |
+
|
| 610 |
+
for client, urls in client_url_dictionary.items():
|
| 611 |
+
logger.debug(f'Updating {client.name}')
|
| 612 |
+
logger.debug(f'Urls:\t{list(urls)}')
|
| 613 |
+
new_urls = [url for url in urls if not chapters_dictionary.get(url)]
|
| 614 |
+
logger.debug(f'New Urls:\t{new_urls}')
|
| 615 |
+
to_check = [chapters_dictionary[url] for url in urls if chapters_dictionary.get(url)]
|
| 616 |
+
if len(to_check) == 0:
|
| 617 |
+
continue
|
| 618 |
+
try:
|
| 619 |
+
updated, not_updated = await client.check_updated_urls(to_check)
|
| 620 |
+
except BaseException as e:
|
| 621 |
+
logger.exception(f"Error while checking updates for site: {client.name}, err: {e}")
|
| 622 |
+
updated = []
|
| 623 |
+
not_updated = list(urls)
|
| 624 |
+
for url in not_updated:
|
| 625 |
+
del url_client_dictionary[url]
|
| 626 |
+
logger.debug(f'Updated:\t{list(updated)}')
|
| 627 |
+
logger.debug(f'Not Updated:\t{list(not_updated)}')
|
| 628 |
+
|
| 629 |
+
updated = dict()
|
| 630 |
+
|
| 631 |
+
for url, client in url_client_dictionary.items():
|
| 632 |
+
try:
|
| 633 |
+
if url not in manga_dict:
|
| 634 |
+
continue
|
| 635 |
+
manga_name = manga_dict[url].name
|
| 636 |
+
if url not in chapters_dictionary:
|
| 637 |
+
agen = client.iter_chapters(url, manga_name)
|
| 638 |
+
last_chapter = await anext(agen)
|
| 639 |
+
await db.add(LastChapter(url=url, chapter_url=last_chapter.url))
|
| 640 |
+
await asyncio.sleep(10)
|
| 641 |
+
else:
|
| 642 |
+
last_chapter = chapters_dictionary[url]
|
| 643 |
+
new_chapters: List[MangaChapter] = []
|
| 644 |
+
counter = 0
|
| 645 |
+
async for chapter in client.iter_chapters(url, manga_name):
|
| 646 |
+
if chapter.url == last_chapter.chapter_url:
|
| 647 |
+
break
|
| 648 |
+
new_chapters.append(chapter)
|
| 649 |
+
counter += 1
|
| 650 |
+
if counter == 20:
|
| 651 |
+
break
|
| 652 |
+
if new_chapters:
|
| 653 |
+
last_chapter.chapter_url = new_chapters[0].url
|
| 654 |
+
await db.add(last_chapter)
|
| 655 |
+
updated[url] = list(reversed(new_chapters))
|
| 656 |
+
for chapter in new_chapters:
|
| 657 |
+
if chapter.unique() not in chapters:
|
| 658 |
+
chapters[chapter.unique()] = chapter
|
| 659 |
+
await asyncio.sleep(1)
|
| 660 |
+
except BaseException as e:
|
| 661 |
+
logger.exception(f'An exception occurred getting new chapters for url {url}: {e}')
|
| 662 |
+
|
| 663 |
+
blocked = set()
|
| 664 |
+
for url, chapter_list in updated.items():
|
| 665 |
+
for chapter in chapter_list:
|
| 666 |
+
logger.debug(f'Updating {chapter.manga.name} - {chapter.name}')
|
| 667 |
+
for sub in subs_dictionary[url]:
|
| 668 |
+
if sub in blocked:
|
| 669 |
+
continue
|
| 670 |
+
try:
|
| 671 |
+
await pdf_queue.put(chapter, int(sub))
|
| 672 |
+
logger.debug(f"Put chapter {chapter} to queue for user {sub} - queue size: {pdf_queue.qsize()}")
|
| 673 |
+
except pyrogram.errors.UserIsBlocked:
|
| 674 |
+
logger.info(f'User {sub} blocked the bot')
|
| 675 |
+
await remove_subscriptions(sub)
|
| 676 |
+
blocked.add(sub)
|
| 677 |
+
except BaseException as e:
|
| 678 |
+
logger.exception(f'An exception occurred sending new chapter: {e}')
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
async def manga_updater():
|
| 682 |
+
minutes = 5
|
| 683 |
+
while True:
|
| 684 |
+
wait_time = minutes * 60
|
| 685 |
+
try:
|
| 686 |
+
start = dt.datetime.now()
|
| 687 |
+
await update_mangas()
|
| 688 |
+
elapsed = dt.datetime.now() - start
|
| 689 |
+
wait_time = max((dt.timedelta(seconds=wait_time) - elapsed).total_seconds(), 0)
|
| 690 |
+
logger.debug(f'Time elapsed updating mangas: {elapsed}, waiting for {wait_time}')
|
| 691 |
+
except BaseException as e:
|
| 692 |
+
logger.exception(f'An exception occurred during chapters update: {e}')
|
| 693 |
+
if wait_time:
|
| 694 |
+
await asyncio.sleep(wait_time)
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
async def chapter_creation(worker_id: int = 0):
|
| 698 |
+
"""
|
| 699 |
+
This function will always run in the background
|
| 700 |
+
It will be listening for a channel which notifies whether there is a new request in the request queue
|
| 701 |
+
:return:
|
| 702 |
+
"""
|
| 703 |
+
logger.debug(f"Worker {worker_id}: Starting worker")
|
| 704 |
+
while True:
|
| 705 |
+
chapter, chat_id = await pdf_queue.get(worker_id)
|
| 706 |
+
logger.debug(f"Worker {worker_id}: Got chapter '{chapter.name}' from queue for user '{chat_id}'")
|
| 707 |
+
try:
|
| 708 |
+
await send_manga_chapter(bot, chapter, chat_id)
|
| 709 |
+
except:
|
| 710 |
+
logger.exception(f"Error sending chapter {chapter.name} to user {chat_id}")
|
| 711 |
+
finally:
|
| 712 |
+
pdf_queue.release(chat_id)
|
config.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
env_file = "env.json"
|
| 5 |
+
if os.path.exists(env_file):
|
| 6 |
+
with open(env_file) as f:
|
| 7 |
+
env_vars = json.loads(f.read())
|
| 8 |
+
else:
|
| 9 |
+
env_vars = dict(os.environ)
|
| 10 |
+
|
| 11 |
+
dbname = env_vars.get('DATABASE_URL_PRIMARY') or env_vars.get('DATABASE_URL') or 'sqlite:///test.db'
|
| 12 |
+
|
| 13 |
+
if dbname.startswith('postgres://'):
|
| 14 |
+
dbname = dbname.replace('postgres://', 'postgresql://', 1)
|
heroku.yml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
build:
|
| 2 |
+
docker:
|
| 3 |
+
worker: Dockerfile
|
img2cbz/__init__.py
ADDED
|
File without changes
|
img2cbz/codeflix
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
img2cbz/core.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import zipfile
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from typing import List
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def fld2cbz(folder: Path, name: str):
|
| 8 |
+
cbz = folder / f'{name}.cbz'
|
| 9 |
+
files = [file for file in folder.glob(r'*') if re.match(r'.*\.(jpg|png|jpeg|webp)', file.name)]
|
| 10 |
+
files.sort(key=lambda x: x.name)
|
| 11 |
+
img2cbz(files, cbz)
|
| 12 |
+
return cbz
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def img2cbz(files: List[Path], out: Path):
|
| 16 |
+
zip_file = zipfile.ZipFile(out, 'w') # parameter "out" must be a .zip file
|
| 17 |
+
|
| 18 |
+
for image_file in files:
|
| 19 |
+
zip_file.write(image_file, compress_type=zipfile.ZIP_DEFLATED)
|
| 20 |
+
|
| 21 |
+
zip_file.close()
|
img2pdf/__init__.py
ADDED
|
File without changes
|
img2pdf/codeflix
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
img2pdf/core.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from io import BytesIO
|
| 3 |
+
from typing import List, BinaryIO
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from fpdf import FPDF
|
| 6 |
+
import re
|
| 7 |
+
|
| 8 |
+
from PIL import Image
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def fld2pdf(folder: Path, out: str):
|
| 12 |
+
|
| 13 |
+
files = [file for file in folder.glob(r'*') if re.match(r'.*\.(jpg|png|jpeg|webp)', file.name)]
|
| 14 |
+
files.sort(key=lambda x: x.name)
|
| 15 |
+
pdf = folder / f'{out}.pdf'
|
| 16 |
+
img2pdf(files, pdf)
|
| 17 |
+
return pdf
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def new_img(path: Path) -> Image.Image:
|
| 21 |
+
img = Image.open(path)
|
| 22 |
+
if img.mode != 'RGB':
|
| 23 |
+
img = img.convert('RGB')
|
| 24 |
+
return img
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def old_img2pdf(files: List[Path], out: Path):
|
| 28 |
+
img_list = [new_img(img) for img in files]
|
| 29 |
+
img_list[0].save(out, resolution=100.0, save_all=True, append_images=img_list[1:])
|
| 30 |
+
for img in img_list:
|
| 31 |
+
img.close()
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def pil_image(path: Path) -> (BytesIO, int, int):
|
| 35 |
+
img = new_img(path)
|
| 36 |
+
width, height = img.width, img.height
|
| 37 |
+
try:
|
| 38 |
+
membuf = BytesIO()
|
| 39 |
+
img.save(membuf, format='JPEG')
|
| 40 |
+
finally:
|
| 41 |
+
img.close()
|
| 42 |
+
return membuf, width, height
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def unicode_to_latin1(s):
|
| 46 |
+
# Substitute the ' character
|
| 47 |
+
s = s.replace('\u2019', '\x92')
|
| 48 |
+
# Substitute the " character
|
| 49 |
+
s = s.replace('\u201d', '\x94')
|
| 50 |
+
# Substitute the - character
|
| 51 |
+
s = s.replace('\u2013', '\x96')
|
| 52 |
+
# Remove all other non-latin1 characters
|
| 53 |
+
s = s.encode('latin1', 'replace').decode('latin1')
|
| 54 |
+
return s
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def img2pdf(files: List[Path], out: Path):
|
| 58 |
+
pdf = FPDF('P', 'pt')
|
| 59 |
+
for imageFile in files:
|
| 60 |
+
img_bytes, width, height = pil_image(imageFile)
|
| 61 |
+
|
| 62 |
+
pdf.add_page(format=(width, height))
|
| 63 |
+
|
| 64 |
+
pdf.image(img_bytes, 0, 0, width, height)
|
| 65 |
+
|
| 66 |
+
img_bytes.close()
|
| 67 |
+
|
| 68 |
+
pdf.set_title(unicode_to_latin1(out.stem))
|
| 69 |
+
pdf.output(out, "F")
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def fld2thumb(folder: Path):
|
| 73 |
+
files = [file for file in folder.glob(r'*') if re.match(r'.*\.(jpg|png|jpeg|webp)', file.name)]
|
| 74 |
+
files.sort(key=lambda x: x.name)
|
| 75 |
+
thumb_path = make_thumb(folder, files)
|
| 76 |
+
return thumb_path
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def make_thumb(folder, files):
|
| 80 |
+
aspect_ratio = 0.7
|
| 81 |
+
if len(files) > 1:
|
| 82 |
+
with Image.open(files[1]) as img:
|
| 83 |
+
aspect_ratio = img.width / img.height
|
| 84 |
+
|
| 85 |
+
thumbnail = Image.open(files[0]).convert('RGB')
|
| 86 |
+
tg_max_size = (300, 300)
|
| 87 |
+
thumbnail = crop_thumb(thumbnail, aspect_ratio)
|
| 88 |
+
thumbnail.thumbnail(tg_max_size)
|
| 89 |
+
thumb_path = folder / 'thumbnail' / f'thumbnail.jpg'
|
| 90 |
+
os.makedirs(thumb_path.parent, exist_ok=True)
|
| 91 |
+
thumbnail.save(thumb_path)
|
| 92 |
+
thumbnail.close()
|
| 93 |
+
return thumb_path
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def crop_thumb(thumb: Image.Image, aspect_ratio):
|
| 97 |
+
w, h = thumb.width, thumb.height
|
| 98 |
+
if w * 2 <= h:
|
| 99 |
+
b = int(h - (w / aspect_ratio))
|
| 100 |
+
if b <= 0:
|
| 101 |
+
b = w
|
| 102 |
+
thumb = thumb.crop((0, 0, w, b))
|
| 103 |
+
return thumb
|
img2tph/__init__.py
ADDED
|
File without changes
|
img2tph/codeflix
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
img2tph/core.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
|
| 3 |
+
from telegraph.aio import Telegraph
|
| 4 |
+
|
| 5 |
+
from plugins import MangaChapter
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
async def img2tph(manga_chapter: MangaChapter, name: str):
|
| 9 |
+
lines = []
|
| 10 |
+
for img in manga_chapter.pictures:
|
| 11 |
+
a_tag = f'<img src="{img}"/>'
|
| 12 |
+
lines.append(a_tag)
|
| 13 |
+
content = '\n'.join(lines)
|
| 14 |
+
|
| 15 |
+
client = Telegraph()
|
| 16 |
+
await client.create_account('Alpha')
|
| 17 |
+
page = await client.create_page(name, author_name='AlphaBot', author_url='https://t.me/idkpythonbot', html_content=content)
|
| 18 |
+
return page['url']
|
logger.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from loguru import logger
|
| 2 |
+
from config import env_vars
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
logger.remove(0)
|
| 6 |
+
logger.add(sys.stdout, level=env_vars.get("LOG_LEVEL", "INFO"))
|
main.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio as aio
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
from logger import logger
|
| 5 |
+
from bot import bot, manga_updater, chapter_creation
|
| 6 |
+
from models import DB
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
async def async_main():
|
| 10 |
+
db = DB()
|
| 11 |
+
await db.connect()
|
| 12 |
+
|
| 13 |
+
if __name__ == '__main__':
|
| 14 |
+
loop = aio.get_event_loop_policy().get_event_loop()
|
| 15 |
+
loop.run_until_complete(async_main())
|
| 16 |
+
loop.create_task(manga_updater())
|
| 17 |
+
for i in range(10):
|
| 18 |
+
loop.create_task(chapter_creation(i + 1))
|
| 19 |
+
bot.run()
|
models/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .db import DB, ChapterFile, Subscription, LastChapter, MangaName
|
models/codeflix
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
models/db.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import Type, List, TypeVar, Optional
|
| 3 |
+
|
| 4 |
+
from sqlalchemy.ext.asyncio import create_async_engine
|
| 5 |
+
from sqlmodel import SQLModel, Field, Session, select, delete
|
| 6 |
+
from sqlmodel.ext.asyncio.session import AsyncSession
|
| 7 |
+
|
| 8 |
+
from tools import LanguageSingleton
|
| 9 |
+
|
| 10 |
+
T = TypeVar("T")
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ChapterFile(SQLModel, table=True):
|
| 14 |
+
url: str = Field(primary_key=True)
|
| 15 |
+
file_id: Optional[str]
|
| 16 |
+
file_unique_id: Optional[str]
|
| 17 |
+
cbz_id: Optional[str]
|
| 18 |
+
cbz_unique_id: Optional[str]
|
| 19 |
+
telegraph_url: Optional[str]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class MangaOutput(SQLModel, table=True):
|
| 23 |
+
user_id: str = Field(primary_key=True, regex=r'\d+')
|
| 24 |
+
output: int = Field
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class Subscription(SQLModel, table=True):
|
| 28 |
+
url: str = Field(primary_key=True)
|
| 29 |
+
user_id: str = Field(primary_key=True, regex=r'\d+')
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class LastChapter(SQLModel, table=True):
|
| 33 |
+
url: str = Field(primary_key=True)
|
| 34 |
+
chapter_url: str = Field
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class MangaName(SQLModel, table=True):
|
| 38 |
+
url: str = Field(primary_key=True)
|
| 39 |
+
name: str = Field
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class DB(metaclass=LanguageSingleton):
|
| 43 |
+
|
| 44 |
+
def __init__(self, dbname: str = 'sqlite+aiosqlite:///test.db'):
|
| 45 |
+
if dbname.startswith('postgres://'):
|
| 46 |
+
dbname = dbname.replace('postgres://', 'postgresql+asyncpg://', 1)
|
| 47 |
+
elif dbname.startswith('postgresql://'):
|
| 48 |
+
dbname = dbname.replace('postgresql://', 'postgresql+asyncpg://', 1)
|
| 49 |
+
elif dbname.startswith('sqlite'):
|
| 50 |
+
dbname = dbname.replace('sqlite', 'sqlite+aiosqlite', 1)
|
| 51 |
+
|
| 52 |
+
self.engine = create_async_engine(dbname)
|
| 53 |
+
|
| 54 |
+
async def connect(self):
|
| 55 |
+
async with self.engine.begin() as conn:
|
| 56 |
+
await conn.run_sync(SQLModel.metadata.create_all, checkfirst=True)
|
| 57 |
+
|
| 58 |
+
async def add(self, other: SQLModel):
|
| 59 |
+
async with AsyncSession(self.engine) as session: # type: AsyncSession
|
| 60 |
+
async with session.begin():
|
| 61 |
+
session.add(other)
|
| 62 |
+
|
| 63 |
+
async def get(self, table: Type[T], id) -> T:
|
| 64 |
+
async with AsyncSession(self.engine) as session: # type: AsyncSession
|
| 65 |
+
return await session.get(table, id)
|
| 66 |
+
|
| 67 |
+
async def get_all(self, table: Type[T]) -> List[T]:
|
| 68 |
+
async with AsyncSession(self.engine) as session: # type: AsyncSession
|
| 69 |
+
statement = select(table)
|
| 70 |
+
return await session.exec(statement=statement)
|
| 71 |
+
|
| 72 |
+
async def erase(self, other: SQLModel):
|
| 73 |
+
async with AsyncSession(self.engine) as session: # type: AsyncSession
|
| 74 |
+
async with session.begin():
|
| 75 |
+
await session.delete(other)
|
| 76 |
+
|
| 77 |
+
async def get_chapter_file_by_id(self, id: str):
|
| 78 |
+
async with AsyncSession(self.engine) as session: # type: AsyncSession
|
| 79 |
+
statement = select(ChapterFile).where((ChapterFile.file_unique_id == id) |
|
| 80 |
+
(ChapterFile.cbz_unique_id == id) |
|
| 81 |
+
(ChapterFile.telegraph_url == id))
|
| 82 |
+
return (await session.exec(statement=statement)).first()
|
| 83 |
+
|
| 84 |
+
async def get_subs(self, user_id: str, filters=None) -> List[MangaName]:
|
| 85 |
+
async with AsyncSession(self.engine) as session:
|
| 86 |
+
statement = (
|
| 87 |
+
select(MangaName)
|
| 88 |
+
.join(Subscription, Subscription.url == MangaName.url)
|
| 89 |
+
.where(Subscription.user_id == user_id)
|
| 90 |
+
)
|
| 91 |
+
for filter_ in filters or []:
|
| 92 |
+
statement = statement.where(MangaName.name.ilike(f'%{filter_}%') | MangaName.url.ilike(f'%{filter_}%'))
|
| 93 |
+
return (await session.exec(statement=statement)).all()
|
| 94 |
+
|
| 95 |
+
async def erase_subs(self, user_id: str):
|
| 96 |
+
async with AsyncSession(self.engine) as session:
|
| 97 |
+
async with session.begin():
|
| 98 |
+
statement = delete(Subscription).where(Subscription.user_id == user_id)
|
| 99 |
+
await session.exec(statement=statement)
|
| 100 |
+
|
pagination.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
|
| 3 |
+
from pyrogram.types import Message
|
| 4 |
+
|
| 5 |
+
from plugins import MangaCard
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Pagination:
|
| 9 |
+
pagination_id: int = 0
|
| 10 |
+
|
| 11 |
+
def __init__(self):
|
| 12 |
+
self.id = self.pagination_id
|
| 13 |
+
Pagination.pagination_id += 1
|
| 14 |
+
self.page = 1
|
| 15 |
+
self.message: Message = None
|
| 16 |
+
self.manga: MangaCard = None
|
plugins/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .client import MangaClient, MangaCard, MangaChapter
|
| 2 |
+
from .manhuako import ManhuaKoClient
|
| 3 |
+
from .manhuaplus import ManhuaPlusClient
|
| 4 |
+
from .tmo import TMOClient
|
| 5 |
+
from .mangadex import MangaDexClient
|
| 6 |
+
from .mangasee import MangaSeeClient
|
| 7 |
+
from .mangasin import MangasInClient
|
| 8 |
+
from .mcreader import McReaderClient
|
| 9 |
+
from .mangakakalot import MangaKakalotClient
|
| 10 |
+
from .manganelo import ManganeloClient
|
| 11 |
+
from .manganato import ManganatoClient
|
| 12 |
+
from .kissmanga import KissMangaClient
|
| 13 |
+
from .mangatigre import MangatigreClient
|
| 14 |
+
from .mangahasu import MangaHasuClient
|
| 15 |
+
from .mangabuddy import MangaBuddyClient
|
| 16 |
+
from .asurascans import AsuraScansClient
|
| 17 |
+
from .ninemanga import NineMangaClient
|
| 18 |
+
from .manhwa18 import Manhwa18Client
|
plugins/asurascans.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, AsyncIterable
|
| 2 |
+
from urllib.parse import urlparse, urljoin, quote, quote_plus
|
| 3 |
+
|
| 4 |
+
from bs4 import BeautifulSoup
|
| 5 |
+
|
| 6 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter, LastChapter
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class AsuraScansClient(MangaClient):
|
| 10 |
+
|
| 11 |
+
base_url = urlparse("https://asuracomic.net/")
|
| 12 |
+
search_url = base_url.geturl()
|
| 13 |
+
search_param = 's'
|
| 14 |
+
updates_url = base_url.geturl()
|
| 15 |
+
|
| 16 |
+
pre_headers = {
|
| 17 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
def __init__(self, *args, name="AsuraScans", **kwargs):
|
| 21 |
+
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
|
| 22 |
+
|
| 23 |
+
def mangas_from_page(self, page: bytes):
|
| 24 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 25 |
+
|
| 26 |
+
container = bs.find("div", {"class": "grid grid-cols-2 sm:grid-cols-2 md:grid-cols-5 gap-3 p-4"})
|
| 27 |
+
|
| 28 |
+
cards = container.find_all("div", {"class": "flex h-[250px] md:h-[200px] overflow-hidden relative hover:opacity-60"})
|
| 29 |
+
|
| 30 |
+
names = [containers.findChild('span', {'class': 'block text-[13.3px] font-bold'}).string.strip() for containers in container]
|
| 31 |
+
l = "https://asuracomic.net/"
|
| 32 |
+
url = [l + containers.get("href") for containers in container]
|
| 33 |
+
images = [card.findNext("img").get("src") for card in cards]
|
| 34 |
+
|
| 35 |
+
mangas = [MangaCard(self, *tup) for tup in zip(names, url, images)]
|
| 36 |
+
|
| 37 |
+
return mangas
|
| 38 |
+
|
| 39 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 40 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 41 |
+
|
| 42 |
+
li = bs.findAll("a", {"class": "block visited:text-themecolor"})
|
| 43 |
+
|
| 44 |
+
a = "https://asuracomic.net/series/"
|
| 45 |
+
links = [a + containers.get("href") for containers in li]
|
| 46 |
+
b = "Ch : "
|
| 47 |
+
texts = [b + (sub.split('/')[6]) for sub in links]
|
| 48 |
+
|
| 49 |
+
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
|
| 50 |
+
|
| 51 |
+
def updates_from_page(self, content):
|
| 52 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 53 |
+
|
| 54 |
+
manga_items = bs.find_all("span", {"class": "text-[15px] font-medium hover:text-themecolor hover:cursor-pointer"})
|
| 55 |
+
|
| 56 |
+
urls = dict()
|
| 57 |
+
|
| 58 |
+
for manga_item in manga_items:
|
| 59 |
+
manga_url = urljoin(self.base_url.geturl(), manga_item.findNext("a").get("href"))
|
| 60 |
+
|
| 61 |
+
if manga_url in urls:
|
| 62 |
+
continue
|
| 63 |
+
|
| 64 |
+
chapter_url = urljoin(self.base_url.geturl(), manga_item.findNext("span").findNext("a").get("href"))
|
| 65 |
+
|
| 66 |
+
urls[manga_url] = chapter_url
|
| 67 |
+
|
| 68 |
+
return urls
|
| 69 |
+
|
| 70 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 71 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 72 |
+
|
| 73 |
+
container = bs.find("div", {"class": "py-8 -mx-5 md:mx-0 flex flex-col items-center justify-center"})
|
| 74 |
+
|
| 75 |
+
images_url = [quote(containers.findNext("img").get("src"), safe=':/%') for containers in container]
|
| 76 |
+
|
| 77 |
+
return images_url
|
| 78 |
+
|
| 79 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 80 |
+
query = quote_plus(query)
|
| 81 |
+
|
| 82 |
+
request_url = self.search_url
|
| 83 |
+
|
| 84 |
+
if query:
|
| 85 |
+
request_url += f'series?page=1&name={query}'
|
| 86 |
+
|
| 87 |
+
content = await self.get_url(request_url)
|
| 88 |
+
|
| 89 |
+
return self.mangas_from_page(content)
|
| 90 |
+
|
| 91 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 92 |
+
|
| 93 |
+
request_url = f'{manga_card.url}'
|
| 94 |
+
|
| 95 |
+
content = await self.get_url(request_url)
|
| 96 |
+
|
| 97 |
+
return self.chapters_from_page(content, manga_card)[(page - 1) * 20:page * 20]
|
| 98 |
+
|
| 99 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 100 |
+
manga_card = MangaCard(self, manga_name, manga_url, '')
|
| 101 |
+
|
| 102 |
+
request_url = f'{manga_card.url}'
|
| 103 |
+
|
| 104 |
+
content = await self.get_url(request_url)
|
| 105 |
+
|
| 106 |
+
for chapter in self.chapters_from_page(content, manga_card):
|
| 107 |
+
yield chapter
|
| 108 |
+
|
| 109 |
+
async def contains_url(self, url: str):
|
| 110 |
+
return url.startswith(self.base_url.geturl())
|
| 111 |
+
|
| 112 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 113 |
+
|
| 114 |
+
content = await self.get_url(self.updates_url)
|
| 115 |
+
|
| 116 |
+
updates = self.updates_from_page(content)
|
| 117 |
+
|
| 118 |
+
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
|
| 119 |
+
not_updated = [lc.url for lc in last_chapters if
|
| 120 |
+
not updates.get(lc.url) or updates.get(lc.url) == lc.chapter_url]
|
| 121 |
+
|
| 122 |
+
return updated, not_updated
|
plugins/client.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from abc import abstractmethod, ABC
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import List, AsyncIterable
|
| 5 |
+
|
| 6 |
+
from aiohttp import ClientSession
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
from models import LastChapter
|
| 10 |
+
from tools import LanguageSingleton
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@dataclass
|
| 14 |
+
class MangaCard:
|
| 15 |
+
client: "MangaClient"
|
| 16 |
+
name: str
|
| 17 |
+
url: str
|
| 18 |
+
picture_url: str
|
| 19 |
+
|
| 20 |
+
def get_url(self):
|
| 21 |
+
return self.url
|
| 22 |
+
|
| 23 |
+
def unique(self):
|
| 24 |
+
return str(hash(self.url))
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@dataclass
|
| 28 |
+
class MangaChapter:
|
| 29 |
+
client: "MangaClient"
|
| 30 |
+
name: str
|
| 31 |
+
url: str
|
| 32 |
+
manga: MangaCard
|
| 33 |
+
pictures: List[str]
|
| 34 |
+
|
| 35 |
+
def get_url(self):
|
| 36 |
+
return self.url
|
| 37 |
+
|
| 38 |
+
def unique(self):
|
| 39 |
+
return str(hash(self.url))
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def clean(name, length=-1):
|
| 43 |
+
while ' ' in name:
|
| 44 |
+
name = name.replace(' ', ' ')
|
| 45 |
+
name = name.replace(':', '')
|
| 46 |
+
if length != -1:
|
| 47 |
+
name = name[:length]
|
| 48 |
+
return name
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class MangaClient(ClientSession, metaclass=LanguageSingleton):
|
| 52 |
+
|
| 53 |
+
def __init__(self, *args, name="client", **kwargs):
|
| 54 |
+
if name == "client":
|
| 55 |
+
raise NotImplementedError
|
| 56 |
+
super().__init__(*args, **kwargs)
|
| 57 |
+
self.name = name
|
| 58 |
+
|
| 59 |
+
async def get_url(self, url, *args, file_name=None, cache=False, req_content=True, method='get', data=None,
|
| 60 |
+
**kwargs):
|
| 61 |
+
def response():
|
| 62 |
+
pass
|
| 63 |
+
|
| 64 |
+
response.status = "200"
|
| 65 |
+
if cache:
|
| 66 |
+
path = Path(f'cache/{self.name}/{file_name}')
|
| 67 |
+
os.makedirs(path.parent, exist_ok=True)
|
| 68 |
+
try:
|
| 69 |
+
with open(path, 'rb') as f:
|
| 70 |
+
content = f.read()
|
| 71 |
+
except FileNotFoundError:
|
| 72 |
+
if method == 'get':
|
| 73 |
+
response = await self.get(url, *args, **kwargs)
|
| 74 |
+
elif method == 'post':
|
| 75 |
+
response = await self.post(url, data=data or {}, **kwargs)
|
| 76 |
+
else:
|
| 77 |
+
raise ValueError
|
| 78 |
+
if str(response.status).startswith('2'):
|
| 79 |
+
content = await response.read()
|
| 80 |
+
with open(path, 'wb') as f:
|
| 81 |
+
f.write(content)
|
| 82 |
+
else:
|
| 83 |
+
if method == 'get':
|
| 84 |
+
response = await self.get(url, *args, **kwargs)
|
| 85 |
+
elif method == 'post':
|
| 86 |
+
response = await self.post(url, data=data or {}, **kwargs)
|
| 87 |
+
else:
|
| 88 |
+
raise ValueError
|
| 89 |
+
content = await response.read()
|
| 90 |
+
if req_content:
|
| 91 |
+
return content
|
| 92 |
+
else:
|
| 93 |
+
return response
|
| 94 |
+
|
| 95 |
+
async def set_pictures(self, manga_chapter: MangaChapter):
|
| 96 |
+
requests_url = manga_chapter.url
|
| 97 |
+
|
| 98 |
+
# Set manga url as the referer if there is one
|
| 99 |
+
headers = {**self.headers}
|
| 100 |
+
if manga_chapter.manga:
|
| 101 |
+
headers['referer'] = manga_chapter.manga.url
|
| 102 |
+
|
| 103 |
+
response = await self.get(requests_url, headers=headers)
|
| 104 |
+
|
| 105 |
+
content = await response.read()
|
| 106 |
+
|
| 107 |
+
manga_chapter.pictures = await self.pictures_from_chapters(content, response)
|
| 108 |
+
|
| 109 |
+
return manga_chapter
|
| 110 |
+
|
| 111 |
+
async def download_pictures(self, manga_chapter: MangaChapter):
|
| 112 |
+
if not manga_chapter.pictures:
|
| 113 |
+
await self.set_pictures(manga_chapter)
|
| 114 |
+
|
| 115 |
+
folder_name = f'{clean(manga_chapter.manga.name)}/{clean(manga_chapter.name)}'
|
| 116 |
+
i = 0
|
| 117 |
+
for picture in manga_chapter.pictures:
|
| 118 |
+
ext = picture.split('.')[-1].split('?')[0].lower()
|
| 119 |
+
file_name = f'{folder_name}/{format(i, "05d")}.{ext}'
|
| 120 |
+
for _ in range(3):
|
| 121 |
+
req = await self.get_picture(manga_chapter, picture, file_name=file_name, cache=True,
|
| 122 |
+
req_content=False)
|
| 123 |
+
if str(req.status).startswith('2'):
|
| 124 |
+
break
|
| 125 |
+
else:
|
| 126 |
+
raise ValueError
|
| 127 |
+
i += 1
|
| 128 |
+
|
| 129 |
+
return Path(f'cache/{manga_chapter.client.name}') / folder_name
|
| 130 |
+
|
| 131 |
+
async def get_picture(self, manga_chapter: MangaChapter, url, *args, **kwargs):
|
| 132 |
+
return await self.get_url(url, *args, **kwargs)
|
| 133 |
+
|
| 134 |
+
async def get_cover(self, manga_card: MangaCard, *args, **kwargs):
|
| 135 |
+
return await self.get_url(manga_card.picture_url, *args, **kwargs)
|
| 136 |
+
|
| 137 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 138 |
+
return [lc.url for lc in last_chapters], []
|
| 139 |
+
|
| 140 |
+
@abstractmethod
|
| 141 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 142 |
+
raise NotImplementedError
|
| 143 |
+
|
| 144 |
+
@abstractmethod
|
| 145 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 146 |
+
raise NotImplementedError
|
| 147 |
+
|
| 148 |
+
@abstractmethod
|
| 149 |
+
async def contains_url(self, url: str):
|
| 150 |
+
raise NotImplementedError
|
| 151 |
+
|
| 152 |
+
@abstractmethod
|
| 153 |
+
async def iter_chapters(self, manga_url: str, manga_name: str) -> AsyncIterable[MangaChapter]:
|
| 154 |
+
raise NotImplementedError
|
| 155 |
+
|
| 156 |
+
@abstractmethod
|
| 157 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 158 |
+
raise NotImplementedError
|
plugins/codeflix
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
plugins/kissmanga.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, AsyncIterable
|
| 2 |
+
from urllib.parse import urlparse, urljoin, quote_plus
|
| 3 |
+
|
| 4 |
+
from bs4 import BeautifulSoup
|
| 5 |
+
|
| 6 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter, LastChapter
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class KissMangaClient(MangaClient):
|
| 10 |
+
|
| 11 |
+
base_url = urlparse("http://kissmanga.nl/")
|
| 12 |
+
search_url = urljoin(base_url.geturl(), "search")
|
| 13 |
+
search_param = 'q'
|
| 14 |
+
|
| 15 |
+
pre_headers = {
|
| 16 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
def __init__(self, *args, name="KissManga", **kwargs):
|
| 20 |
+
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
|
| 21 |
+
|
| 22 |
+
def mangas_from_page(self, page: bytes):
|
| 23 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 24 |
+
|
| 25 |
+
cards = bs.findAll("div", {"class": "mainpage-manga"})
|
| 26 |
+
|
| 27 |
+
mangas = [card.findNext('div', {"class": "media-body"}).findNext("a") for card in cards]
|
| 28 |
+
names = [manga.get('title') for manga in mangas]
|
| 29 |
+
url = [manga.get('href') for manga in mangas]
|
| 30 |
+
|
| 31 |
+
images = [card.findNext('img').get('src') for card in cards]
|
| 32 |
+
|
| 33 |
+
mangas = [MangaCard(self, *tup) for tup in zip(names, url, images)]
|
| 34 |
+
|
| 35 |
+
return mangas
|
| 36 |
+
|
| 37 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 38 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 39 |
+
|
| 40 |
+
ul = bs.findAll("div", {"class": "chapter-list"})[1]
|
| 41 |
+
|
| 42 |
+
lis = ul.findAll("h4")
|
| 43 |
+
|
| 44 |
+
items = [li.findNext('a') for li in lis]
|
| 45 |
+
|
| 46 |
+
links = [item.get('href') for item in items]
|
| 47 |
+
texts: List[str] = [item.get('title').strip() for item in items]
|
| 48 |
+
|
| 49 |
+
texts = [(text if not text.startswith(manga.name) else text[len(manga.name):].strip()) for text in texts]
|
| 50 |
+
|
| 51 |
+
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
|
| 52 |
+
|
| 53 |
+
@staticmethod
|
| 54 |
+
def updates_from_page(content):
|
| 55 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 56 |
+
|
| 57 |
+
manga_items = bs.find_all("div", {"class": "media-body"})
|
| 58 |
+
|
| 59 |
+
urls = dict()
|
| 60 |
+
|
| 61 |
+
for manga_item in manga_items:
|
| 62 |
+
manga_url = manga_item.findNext('a').get('href')
|
| 63 |
+
|
| 64 |
+
if manga_url in urls:
|
| 65 |
+
continue
|
| 66 |
+
|
| 67 |
+
chapter_url = manga_item.findNext('a', {'class': 'xanh'}).get('href')
|
| 68 |
+
|
| 69 |
+
urls[manga_url] = chapter_url
|
| 70 |
+
|
| 71 |
+
return urls
|
| 72 |
+
|
| 73 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 74 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 75 |
+
|
| 76 |
+
ul = bs.find("p", {"id": "arraydata"})
|
| 77 |
+
|
| 78 |
+
images_url = ul.text.split(',')
|
| 79 |
+
|
| 80 |
+
return images_url
|
| 81 |
+
|
| 82 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 83 |
+
query = quote_plus(query)
|
| 84 |
+
|
| 85 |
+
request_url = f'{self.search_url}'
|
| 86 |
+
|
| 87 |
+
if query:
|
| 88 |
+
request_url += f'?{self.search_param}={query}'
|
| 89 |
+
|
| 90 |
+
content = await self.get_url(request_url)
|
| 91 |
+
|
| 92 |
+
return self.mangas_from_page(content)
|
| 93 |
+
|
| 94 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 95 |
+
|
| 96 |
+
request_url = f'{manga_card.url}'
|
| 97 |
+
|
| 98 |
+
content = await self.get_url(request_url)
|
| 99 |
+
|
| 100 |
+
return self.chapters_from_page(content, manga_card)[(page - 1) * 20:page * 20]
|
| 101 |
+
|
| 102 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 103 |
+
manga_card = MangaCard(self, manga_name, manga_url, '')
|
| 104 |
+
|
| 105 |
+
request_url = f'{manga_card.url}'
|
| 106 |
+
|
| 107 |
+
content = await self.get_url(request_url)
|
| 108 |
+
|
| 109 |
+
for chapter in self.chapters_from_page(content, manga_card):
|
| 110 |
+
yield chapter
|
| 111 |
+
|
| 112 |
+
async def contains_url(self, url: str):
|
| 113 |
+
return url.startswith(self.base_url.geturl())
|
| 114 |
+
|
| 115 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 116 |
+
|
| 117 |
+
content = await self.get_url(self.base_url.geturl())
|
| 118 |
+
|
| 119 |
+
updates = self.updates_from_page(content)
|
| 120 |
+
|
| 121 |
+
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
|
| 122 |
+
not_updated = [lc.url for lc in last_chapters if
|
| 123 |
+
not updates.get(lc.url) or updates.get(lc.url) == lc.chapter_url]
|
| 124 |
+
|
| 125 |
+
return updated, not_updated
|
plugins/mangabuddy.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import List, AsyncIterable
|
| 4 |
+
from urllib.parse import urlparse, urljoin, quote, quote_plus
|
| 5 |
+
|
| 6 |
+
from bs4 import BeautifulSoup
|
| 7 |
+
|
| 8 |
+
from models import LastChapter
|
| 9 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@dataclass
|
| 13 |
+
class MangaBuddyCard(MangaCard):
|
| 14 |
+
read_url: str
|
| 15 |
+
|
| 16 |
+
def get_url(self):
|
| 17 |
+
return self.read_url
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class MangaBuddyClient(MangaClient):
|
| 21 |
+
base_url = urlparse("https://mangabuddy.com/")
|
| 22 |
+
search_url = urljoin(base_url.geturl(), "search")
|
| 23 |
+
search_param = 'q'
|
| 24 |
+
home_page = urljoin(base_url.geturl(), "home-page")
|
| 25 |
+
img_server = "https://s1.mbbcdnv1.xyz/file/img-mbuddy/manga/"
|
| 26 |
+
|
| 27 |
+
pre_headers = {
|
| 28 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
def __init__(self, *args, name="MangaBuddy", **kwargs):
|
| 32 |
+
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
|
| 33 |
+
|
| 34 |
+
def mangas_from_page(self, page: bytes):
|
| 35 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 36 |
+
|
| 37 |
+
cards = bs.find_all("div", {"class": "book-item"})
|
| 38 |
+
|
| 39 |
+
mangas = [card.a for card in cards if card.a is not None]
|
| 40 |
+
names = [manga.get("title").strip() for manga in mangas]
|
| 41 |
+
read_url = [urljoin(self.base_url.geturl(), manga.get('href').strip()) for manga in mangas]
|
| 42 |
+
url = [f'https://mangabuddy.com/api/manga{manga.get("href").strip()}/chapters?source=detail' for manga in mangas]
|
| 43 |
+
images = [manga.find("img").get('data-src').strip() for manga in mangas]
|
| 44 |
+
|
| 45 |
+
mangas = [MangaBuddyCard(self, *tup) for tup in zip(names, url, images, read_url)]
|
| 46 |
+
|
| 47 |
+
return mangas
|
| 48 |
+
|
| 49 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 50 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 51 |
+
|
| 52 |
+
ul = bs.find('ul', {'id': 'chapter-list'})
|
| 53 |
+
|
| 54 |
+
lis = ul.findAll('li')
|
| 55 |
+
a_elems = [li.find('a') for li in lis]
|
| 56 |
+
|
| 57 |
+
links = [urljoin(self.base_url.geturl(), a.get('href')) for a in a_elems]
|
| 58 |
+
texts = [a.findNext('strong', {'class': 'chapter-title'}).text.strip() for a in a_elems]
|
| 59 |
+
|
| 60 |
+
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
|
| 61 |
+
|
| 62 |
+
def updates_from_page(self, page: bytes):
|
| 63 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 64 |
+
|
| 65 |
+
div = bs.find('div', {'class': 'container__left'})
|
| 66 |
+
|
| 67 |
+
manga_items = div.findAll('div', {'class': 'book-item'})
|
| 68 |
+
|
| 69 |
+
urls = dict()
|
| 70 |
+
|
| 71 |
+
for manga_item in manga_items:
|
| 72 |
+
|
| 73 |
+
manga_url_part = manga_item.findNext('a').get('href')
|
| 74 |
+
manga_url = f'https://mangabuddy.com/api/manga{manga_url_part}/chapters?source=detail'
|
| 75 |
+
|
| 76 |
+
chapter_item = manga_item.findNext("div", {"class": "chap-item"})
|
| 77 |
+
if not chapter_item or not chapter_item.a:
|
| 78 |
+
continue
|
| 79 |
+
chapter_url = urljoin(self.base_url.geturl(), chapter_item.a.get('href'))
|
| 80 |
+
|
| 81 |
+
if manga_url not in urls:
|
| 82 |
+
urls[manga_url] = chapter_url
|
| 83 |
+
|
| 84 |
+
return urls
|
| 85 |
+
|
| 86 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 87 |
+
|
| 88 |
+
regex = rb"var chapImages = '(.*)'"
|
| 89 |
+
|
| 90 |
+
imgs = re.findall(regex, content)[0].decode().split(',')
|
| 91 |
+
|
| 92 |
+
images_url = [img for img in imgs]
|
| 93 |
+
|
| 94 |
+
return images_url
|
| 95 |
+
|
| 96 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 97 |
+
request_url = self.search_url
|
| 98 |
+
|
| 99 |
+
if query:
|
| 100 |
+
request_url = f'{request_url}?{self.search_param}={quote_plus(query)}'
|
| 101 |
+
|
| 102 |
+
content = await self.get_url(request_url)
|
| 103 |
+
|
| 104 |
+
return self.mangas_from_page(content)[(page - 1) * 20:page * 20]
|
| 105 |
+
|
| 106 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 107 |
+
|
| 108 |
+
request_url = f'{manga_card.url}'
|
| 109 |
+
|
| 110 |
+
content = await self.get_url(request_url)
|
| 111 |
+
|
| 112 |
+
return self.chapters_from_page(content, manga_card)[(page - 1) * 20:page * 20]
|
| 113 |
+
|
| 114 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 115 |
+
manga_card = MangaCard(self, manga_name, manga_url, '')
|
| 116 |
+
|
| 117 |
+
request_url = f'{manga_card.url}'
|
| 118 |
+
|
| 119 |
+
content = await self.get_url(request_url)
|
| 120 |
+
|
| 121 |
+
for chapter in self.chapters_from_page(content, manga_card):
|
| 122 |
+
yield chapter
|
| 123 |
+
|
| 124 |
+
async def contains_url(self, url: str):
|
| 125 |
+
return url.startswith(self.base_url.geturl())
|
| 126 |
+
|
| 127 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 128 |
+
|
| 129 |
+
content = await self.get_url(self.home_page)
|
| 130 |
+
|
| 131 |
+
updates = self.updates_from_page(content)
|
| 132 |
+
|
| 133 |
+
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
|
| 134 |
+
not_updated = [lc.url for lc in last_chapters if not updates.get(lc.url)
|
| 135 |
+
or updates.get(lc.url) == lc.chapter_url]
|
| 136 |
+
|
| 137 |
+
return updated, not_updated
|
| 138 |
+
|
| 139 |
+
async def get_cover(self, manga_card: MangaCard, *args, **kwargs):
|
| 140 |
+
headers = {**self.pre_headers, 'Referer': self.base_url.geturl()}
|
| 141 |
+
return await super(MangaBuddyClient, self).get_cover(manga_card, *args, headers=headers, **kwargs)
|
| 142 |
+
|
| 143 |
+
async def get_picture(self, manga_chapter: MangaChapter, url, *args, **kwargs):
|
| 144 |
+
headers = {**self.pre_headers, 'Referer': self.base_url.geturl()}
|
| 145 |
+
return await super(MangaBuddyClient, self).get_picture(manga_chapter, url, *args, headers=headers, **kwargs)
|
plugins/mangadex.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import List, AsyncIterable
|
| 4 |
+
from urllib.parse import urlparse, urljoin, quote
|
| 5 |
+
|
| 6 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter, LastChapter
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@dataclass
|
| 10 |
+
class MangaDexMangaCard(MangaCard):
|
| 11 |
+
id: str
|
| 12 |
+
|
| 13 |
+
def get_url(self):
|
| 14 |
+
return f"https://mangadex.org/title/{self.id}"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class MangaDexMangaChapter(MangaChapter):
|
| 19 |
+
id: str
|
| 20 |
+
|
| 21 |
+
def get_url(self):
|
| 22 |
+
return f"https://mangadex.org/chapter/{self.id}"
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class MangaDexClient(MangaClient):
|
| 26 |
+
base_url = urlparse("https://api.mangadex.org/")
|
| 27 |
+
search_url = urljoin(base_url.geturl(), "manga")
|
| 28 |
+
search_param = 'q'
|
| 29 |
+
latest_uploads = 'https://api.mangadex.org/chapter?limit=32&offset=0&includes[]=manga&contentRating[]=safe&contentRating[]=suggestive&contentRating[]=erotica&order[readableAt]=desc'
|
| 30 |
+
|
| 31 |
+
covers_url = urlparse("https://uploads.mangadex.org/covers")
|
| 32 |
+
|
| 33 |
+
pre_headers = {
|
| 34 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
def __init__(self, *args, name="MangaDex", language=None, **kwargs):
|
| 38 |
+
if language is None:
|
| 39 |
+
language = ("en",)
|
| 40 |
+
super().__init__(*args, name=f'{name}-{language[0]}', headers=self.pre_headers, **kwargs)
|
| 41 |
+
self.languages = language
|
| 42 |
+
self.language_param = '&'.join([f'translatedLanguage[]={lang}' for lang in self.languages])
|
| 43 |
+
|
| 44 |
+
def mangas_from_page(self, page: bytes):
|
| 45 |
+
dt = json.loads(page.decode())
|
| 46 |
+
|
| 47 |
+
cards = dt['data']
|
| 48 |
+
|
| 49 |
+
names = [list(card['attributes']['title'].values())[0] for card in cards]
|
| 50 |
+
ids = [card["id"] for card in cards]
|
| 51 |
+
|
| 52 |
+
url = [f'https://api.mangadex.org/manga/{card["id"]}/feed?{self.language_param}' for card in cards]
|
| 53 |
+
|
| 54 |
+
cover_filename = lambda x: list(filter(lambda y: y['type'] == 'cover_art', x))[0]['attributes']['fileName']
|
| 55 |
+
|
| 56 |
+
images = [f'https://uploads.mangadex.org/covers/{card["id"]}/{cover_filename(card["relationships"])}.512.jpg'
|
| 57 |
+
for card in cards]
|
| 58 |
+
|
| 59 |
+
mangas = [MangaDexMangaCard(self, *tup) for tup in zip(names, url, images, ids)]
|
| 60 |
+
|
| 61 |
+
return mangas
|
| 62 |
+
|
| 63 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 64 |
+
dt = json.loads(page.decode())
|
| 65 |
+
|
| 66 |
+
dt_chapters = dt['data']
|
| 67 |
+
|
| 68 |
+
visited = set()
|
| 69 |
+
chapters = []
|
| 70 |
+
for chapter in dt_chapters:
|
| 71 |
+
if chapter["attributes"]["chapter"] not in visited:
|
| 72 |
+
visited.add(chapter["attributes"]["chapter"])
|
| 73 |
+
chapters.append(chapter)
|
| 74 |
+
|
| 75 |
+
def chapter_name(c):
|
| 76 |
+
if c["attributes"]["title"]:
|
| 77 |
+
return f'{c["attributes"]["chapter"]} - {c["attributes"]["title"]}'
|
| 78 |
+
return f'{c["attributes"]["chapter"]}'
|
| 79 |
+
|
| 80 |
+
ids = [chapter.get("id") for chapter in chapters]
|
| 81 |
+
links = [f'https://api.mangadex.org/at-home/server/{chapter.get("id")}?forcePort443=false' for chapter in
|
| 82 |
+
chapters]
|
| 83 |
+
texts = [chapter_name(chapter) for chapter in chapters]
|
| 84 |
+
|
| 85 |
+
return list(map(lambda x: MangaDexMangaChapter(self, x[0], x[1], manga, [], x[2]), zip(texts, links, ids)))
|
| 86 |
+
|
| 87 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 88 |
+
dt = json.loads(content)
|
| 89 |
+
|
| 90 |
+
if dt.get('result') == 'error':
|
| 91 |
+
return []
|
| 92 |
+
|
| 93 |
+
base_url = dt['baseUrl']
|
| 94 |
+
chapter_hash = dt['chapter']['hash']
|
| 95 |
+
file_names = dt['chapter']['data']
|
| 96 |
+
|
| 97 |
+
images_url = [f"{base_url}/data/{chapter_hash}/{file}" for file in file_names]
|
| 98 |
+
|
| 99 |
+
return images_url
|
| 100 |
+
|
| 101 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 102 |
+
query = quote(query)
|
| 103 |
+
|
| 104 |
+
request_url = f'{self.search_url}?limit=20&offset={(page - 1) * 20}&includes[]=cover_art&includes[]=author&includes[' \
|
| 105 |
+
f']=artist&contentRating[]=safe&contentRating[]=suggestive&contentRating[' \
|
| 106 |
+
f']=erotica&title={query}&order[relevance]=desc'
|
| 107 |
+
|
| 108 |
+
content = await self.get_url(request_url)
|
| 109 |
+
|
| 110 |
+
return self.mangas_from_page(content)
|
| 111 |
+
|
| 112 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1, count: int = 10) -> List[MangaChapter]:
|
| 113 |
+
|
| 114 |
+
request_url = f'{manga_card.url}' \
|
| 115 |
+
f'&limit={count}&offset={(page - 1) * count}&includes[' \
|
| 116 |
+
f']=scanlation_group&includes[]=user&order[volume]=desc&order[' \
|
| 117 |
+
f'chapter]=desc&contentRating[]=safe&contentRating[]=suggestive&contentRating[' \
|
| 118 |
+
f']=erotica&contentRating[]=pornographic'
|
| 119 |
+
|
| 120 |
+
content = await self.get_url(request_url)
|
| 121 |
+
|
| 122 |
+
return self.chapters_from_page(content, manga_card)
|
| 123 |
+
|
| 124 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 125 |
+
manga = MangaCard(self, manga_name, manga_url, '')
|
| 126 |
+
page = 1
|
| 127 |
+
while page > 0:
|
| 128 |
+
chapters = await self.get_chapters(manga_card=manga, page=page, count=500)
|
| 129 |
+
if not chapters:
|
| 130 |
+
break
|
| 131 |
+
for chapter in chapters:
|
| 132 |
+
yield chapter
|
| 133 |
+
page += 1
|
| 134 |
+
|
| 135 |
+
async def contains_url(self, url: str):
|
| 136 |
+
return url.startswith(self.base_url.geturl()) and url.endswith(self.language_param)
|
| 137 |
+
|
| 138 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 139 |
+
|
| 140 |
+
content = await self.get_url(f'{self.latest_uploads}&{self.language_param}')
|
| 141 |
+
|
| 142 |
+
data = json.loads(content)['data']
|
| 143 |
+
|
| 144 |
+
updates = {}
|
| 145 |
+
for item in data:
|
| 146 |
+
ch_id = item['id']
|
| 147 |
+
manga_id = None
|
| 148 |
+
for rel in item['relationships']:
|
| 149 |
+
if rel['type'] == 'manga':
|
| 150 |
+
manga_id = rel['id']
|
| 151 |
+
if manga_id and not updates.get(manga_id):
|
| 152 |
+
updates[manga_id] = ch_id
|
| 153 |
+
|
| 154 |
+
updated = []
|
| 155 |
+
not_updated = []
|
| 156 |
+
|
| 157 |
+
for lc in last_chapters:
|
| 158 |
+
upd = False
|
| 159 |
+
for manga_id, ch_id in updates.items():
|
| 160 |
+
if manga_id in lc.url and not ch_id in lc.chapter_url:
|
| 161 |
+
upd = True
|
| 162 |
+
updated.append(lc.url)
|
| 163 |
+
break
|
| 164 |
+
if not upd:
|
| 165 |
+
not_updated.append(lc.url)
|
| 166 |
+
|
| 167 |
+
return updated, not_updated
|
plugins/mangahasu.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, AsyncIterable
|
| 2 |
+
from urllib.parse import urlparse, urljoin, quote, quote_plus
|
| 3 |
+
|
| 4 |
+
from bs4 import BeautifulSoup
|
| 5 |
+
|
| 6 |
+
from models import LastChapter
|
| 7 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class MangaHasuClient(MangaClient):
|
| 11 |
+
base_url = urlparse("https://mangahasu.se/")
|
| 12 |
+
search_url = urljoin(base_url.geturl(), "search/autosearch")
|
| 13 |
+
search_param = 'key'
|
| 14 |
+
|
| 15 |
+
pre_headers = {
|
| 16 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
def __init__(self, *args, name="MangaHasu", **kwargs):
|
| 20 |
+
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
|
| 21 |
+
|
| 22 |
+
def mangas_from_page(self, page: bytes):
|
| 23 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 24 |
+
|
| 25 |
+
cards = bs.find_all("li")[:-1]
|
| 26 |
+
|
| 27 |
+
mangas = [card.a for card in cards]
|
| 28 |
+
names = [manga.findNext('p', {'class': 'name'}).text.strip() for manga in mangas]
|
| 29 |
+
url = [manga.get('href').strip() for manga in mangas]
|
| 30 |
+
images = [manga.find("img").get('src').strip() for manga in mangas]
|
| 31 |
+
|
| 32 |
+
mangas = [MangaCard(self, *tup) for tup in zip(names, url, images)]
|
| 33 |
+
|
| 34 |
+
return mangas
|
| 35 |
+
|
| 36 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 37 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 38 |
+
|
| 39 |
+
div = bs.find("div", {"class": "list-chapter"})
|
| 40 |
+
|
| 41 |
+
lis = div.findAll('tr')[1:]
|
| 42 |
+
a_elems = [li.find('a') for li in lis]
|
| 43 |
+
|
| 44 |
+
links = [a.get('href') for a in a_elems]
|
| 45 |
+
texts = [(a.text if not a.text.startswith(manga.name) else a.text[len(manga.name):]).strip() for a in a_elems]
|
| 46 |
+
|
| 47 |
+
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
|
| 48 |
+
|
| 49 |
+
def updates_from_page(self, page: bytes):
|
| 50 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 51 |
+
|
| 52 |
+
div = bs.find('div', {'class': 'st_content'})
|
| 53 |
+
|
| 54 |
+
manga_items = div.find_all('div', {'class': 'info-manga'})
|
| 55 |
+
|
| 56 |
+
urls = dict()
|
| 57 |
+
|
| 58 |
+
for manga_item in manga_items:
|
| 59 |
+
|
| 60 |
+
manga_url = manga_item.findNext('a', {"class": "name-manga"}).get('href')
|
| 61 |
+
|
| 62 |
+
chapter_item = manga_item.findNext("a", {"class": "name-chapter"})
|
| 63 |
+
if not chapter_item:
|
| 64 |
+
continue
|
| 65 |
+
chapter_url = chapter_item.get('href')
|
| 66 |
+
|
| 67 |
+
if manga_url not in urls:
|
| 68 |
+
urls[manga_url] = chapter_url
|
| 69 |
+
|
| 70 |
+
return urls
|
| 71 |
+
|
| 72 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 73 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 74 |
+
|
| 75 |
+
div = bs.find('div', {'class': 'img'})
|
| 76 |
+
|
| 77 |
+
imgs = div.findAll('img')
|
| 78 |
+
|
| 79 |
+
images_url = [quote(img.get('src'), safe=':/%') for img in imgs]
|
| 80 |
+
|
| 81 |
+
return images_url
|
| 82 |
+
|
| 83 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 84 |
+
request_url = self.search_url
|
| 85 |
+
|
| 86 |
+
data = {
|
| 87 |
+
self.search_param: query
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
content = await self.get_url(request_url, data=data, method='post')
|
| 91 |
+
|
| 92 |
+
return self.mangas_from_page(content)
|
| 93 |
+
|
| 94 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 95 |
+
|
| 96 |
+
request_url = f'{manga_card.url}'
|
| 97 |
+
|
| 98 |
+
content = await self.get_url(request_url)
|
| 99 |
+
|
| 100 |
+
return self.chapters_from_page(content, manga_card)[(page - 1) * 20:page * 20]
|
| 101 |
+
|
| 102 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 103 |
+
manga_card = MangaCard(self, manga_name, manga_url, '')
|
| 104 |
+
|
| 105 |
+
request_url = f'{manga_card.url}'
|
| 106 |
+
|
| 107 |
+
content = await self.get_url(request_url)
|
| 108 |
+
|
| 109 |
+
for chapter in self.chapters_from_page(content, manga_card):
|
| 110 |
+
yield chapter
|
| 111 |
+
|
| 112 |
+
async def contains_url(self, url: str):
|
| 113 |
+
return url.startswith(self.base_url.geturl())
|
| 114 |
+
|
| 115 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 116 |
+
|
| 117 |
+
content = await self.get_url(self.base_url.geturl())
|
| 118 |
+
|
| 119 |
+
updates = self.updates_from_page(content)
|
| 120 |
+
|
| 121 |
+
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
|
| 122 |
+
not_updated = [lc.url for lc in last_chapters if not updates.get(lc.url)
|
| 123 |
+
or updates.get(lc.url) == lc.chapter_url]
|
| 124 |
+
|
| 125 |
+
return updated, not_updated
|
plugins/mangakakalot.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from typing import List, AsyncIterable
|
| 3 |
+
from urllib.parse import urlparse, urljoin, quote
|
| 4 |
+
import re
|
| 5 |
+
|
| 6 |
+
from bs4 import BeautifulSoup
|
| 7 |
+
from bs4.element import PageElement
|
| 8 |
+
|
| 9 |
+
from plugins.manganato import ManganatoClient
|
| 10 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter, LastChapter
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class MangaKakalotClient(MangaClient):
|
| 14 |
+
|
| 15 |
+
base_url = urlparse("https://mangakakalot.com/")
|
| 16 |
+
search_url = urljoin(base_url.geturl(), 'home_json_search')
|
| 17 |
+
search_param = 'searchword'
|
| 18 |
+
|
| 19 |
+
pre_headers = {
|
| 20 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
def __init__(self, *args, name="MangaKakalot", **kwargs):
|
| 24 |
+
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
|
| 25 |
+
|
| 26 |
+
def mangas_from_page(self, page: bytes):
|
| 27 |
+
li = json.loads(page)
|
| 28 |
+
|
| 29 |
+
pattern = re.compile(r'<span .*?>(.+?)</span>')
|
| 30 |
+
|
| 31 |
+
names = []
|
| 32 |
+
for item in li:
|
| 33 |
+
name = item['name']
|
| 34 |
+
while '</span>' in name:
|
| 35 |
+
name = re.sub(pattern, r'\1', name)
|
| 36 |
+
names.append(name.title())
|
| 37 |
+
|
| 38 |
+
url = [item['story_link'] for item in li]
|
| 39 |
+
images = [item['image'] for item in li]
|
| 40 |
+
|
| 41 |
+
mangas = [MangaCard(self if tup[1].startswith(self.base_url.geturl()) else ManganatoClient(), *tup)
|
| 42 |
+
for tup in zip(names, url, images)]
|
| 43 |
+
|
| 44 |
+
return mangas
|
| 45 |
+
|
| 46 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 47 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 48 |
+
|
| 49 |
+
ul = bs.find("div", {"class": "chapter-list"})
|
| 50 |
+
|
| 51 |
+
lis = ul.findAll("div", {"class": "row"})
|
| 52 |
+
|
| 53 |
+
items = [li.findNext('a') for li in lis]
|
| 54 |
+
|
| 55 |
+
links = [item.get('href') for item in items]
|
| 56 |
+
texts = [item.string.strip() for item in items]
|
| 57 |
+
|
| 58 |
+
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
|
| 59 |
+
|
| 60 |
+
def updates_from_page(self, page: bytes):
|
| 61 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 62 |
+
|
| 63 |
+
manga_items: List[PageElement] = bs.find_all("div", {"class": "itemupdate first"})
|
| 64 |
+
|
| 65 |
+
urls = dict()
|
| 66 |
+
|
| 67 |
+
for manga_item in manga_items:
|
| 68 |
+
|
| 69 |
+
manga_url = manga_item.findNext('a').get('href')
|
| 70 |
+
|
| 71 |
+
chapter_item = manga_item.findNext("a", {"class": "sts sts_1"})
|
| 72 |
+
if not chapter_item:
|
| 73 |
+
continue
|
| 74 |
+
chapter_url = chapter_item.get('href')
|
| 75 |
+
|
| 76 |
+
urls[manga_url] = chapter_url
|
| 77 |
+
|
| 78 |
+
return urls
|
| 79 |
+
|
| 80 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 81 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 82 |
+
|
| 83 |
+
ul = bs.find("div", {"class": "container-chapter-reader"})
|
| 84 |
+
|
| 85 |
+
images = ul.find_all('img')
|
| 86 |
+
|
| 87 |
+
images_url = [quote(img.get('src'), safe=':/%') for img in images]
|
| 88 |
+
|
| 89 |
+
return images_url
|
| 90 |
+
|
| 91 |
+
async def get_picture(self, manga_chapter: MangaChapter, url, *args, **kwargs):
|
| 92 |
+
headers = dict(self.headers)
|
| 93 |
+
headers['Referer'] = self.base_url.geturl()
|
| 94 |
+
|
| 95 |
+
return await super(MangaKakalotClient, self).get_picture(manga_chapter, url, headers=headers, *args, **kwargs)
|
| 96 |
+
|
| 97 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 98 |
+
query = query.lower().replace(' ', '_')
|
| 99 |
+
|
| 100 |
+
request_url = self.search_url
|
| 101 |
+
|
| 102 |
+
data = {
|
| 103 |
+
self.search_param: query
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
content = await self.get_url(request_url, data=data, method='post')
|
| 107 |
+
|
| 108 |
+
return self.mangas_from_page(content)
|
| 109 |
+
|
| 110 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 111 |
+
|
| 112 |
+
request_url = f'{manga_card.url}'
|
| 113 |
+
|
| 114 |
+
content = await self.get_url(request_url)
|
| 115 |
+
|
| 116 |
+
return self.chapters_from_page(content, manga_card)[(page - 1) * 20:page * 20]
|
| 117 |
+
|
| 118 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 119 |
+
manga_card = MangaCard(self, manga_name, manga_url, '')
|
| 120 |
+
|
| 121 |
+
request_url = f'{manga_card.url}'
|
| 122 |
+
|
| 123 |
+
content = await self.get_url(request_url)
|
| 124 |
+
|
| 125 |
+
for chapter in self.chapters_from_page(content, manga_card):
|
| 126 |
+
yield chapter
|
| 127 |
+
|
| 128 |
+
async def contains_url(self, url: str):
|
| 129 |
+
return url.startswith(self.base_url.geturl())
|
| 130 |
+
|
| 131 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 132 |
+
|
| 133 |
+
content = await self.get_url(self.base_url.geturl())
|
| 134 |
+
|
| 135 |
+
updates = self.updates_from_page(content)
|
| 136 |
+
|
| 137 |
+
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
|
| 138 |
+
not_updated = [lc.url for lc in last_chapters if not updates.get(lc.url)
|
| 139 |
+
or updates.get(lc.url) == lc.chapter_url]
|
| 140 |
+
|
| 141 |
+
return updated, not_updated
|
plugins/manganato.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from typing import List, AsyncIterable
|
| 3 |
+
from urllib.parse import urlparse, urljoin, quote
|
| 4 |
+
import re
|
| 5 |
+
|
| 6 |
+
from bs4 import BeautifulSoup
|
| 7 |
+
from bs4.element import PageElement
|
| 8 |
+
|
| 9 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter, LastChapter
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class ManganatoClient(MangaClient):
|
| 13 |
+
|
| 14 |
+
base_url = urlparse("https://manganato.com/")
|
| 15 |
+
search_url = urljoin(base_url.geturl(), 'getstorysearchjson')
|
| 16 |
+
search_param = 'searchword'
|
| 17 |
+
read_url = 'https://readmanganato.com/'
|
| 18 |
+
chap_url = 'https://chapmanganato.com/'
|
| 19 |
+
|
| 20 |
+
pre_headers = {
|
| 21 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
def __init__(self, *args, name="Manganato", **kwargs):
|
| 25 |
+
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
|
| 26 |
+
|
| 27 |
+
def mangas_from_page(self, page: bytes):
|
| 28 |
+
li = json.loads(page)
|
| 29 |
+
|
| 30 |
+
pattern = re.compile(r'<span .*?>(.+?)</span>')
|
| 31 |
+
|
| 32 |
+
items = li['searchlist']
|
| 33 |
+
names = []
|
| 34 |
+
for item in items:
|
| 35 |
+
name = item['name']
|
| 36 |
+
while '</span>' in name:
|
| 37 |
+
name = re.sub(pattern, r'\1', name)
|
| 38 |
+
names.append(name.title())
|
| 39 |
+
|
| 40 |
+
url = [item['url_story'] for item in items]
|
| 41 |
+
images = [item['image'] for item in items]
|
| 42 |
+
|
| 43 |
+
mangas = [MangaCard(self, *tup) for tup in zip(names, url, images)]
|
| 44 |
+
|
| 45 |
+
return mangas
|
| 46 |
+
|
| 47 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 48 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 49 |
+
|
| 50 |
+
lis = bs.find_all("li", {"class": "a-h"})
|
| 51 |
+
|
| 52 |
+
items = [li.findNext('a') for li in lis]
|
| 53 |
+
|
| 54 |
+
links = [item.get('href') for item in items]
|
| 55 |
+
texts = [item.string.strip() for item in items]
|
| 56 |
+
|
| 57 |
+
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
|
| 58 |
+
|
| 59 |
+
def updates_from_page(self, page: bytes):
|
| 60 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 61 |
+
|
| 62 |
+
manga_items: List[PageElement] = bs.find_all("div", {"class": "content-homepage-item"})
|
| 63 |
+
|
| 64 |
+
urls = dict()
|
| 65 |
+
|
| 66 |
+
for manga_item in manga_items:
|
| 67 |
+
|
| 68 |
+
manga_url = manga_item.findNext('a').get('href')
|
| 69 |
+
|
| 70 |
+
chapter_item = manga_item.findNext("p", {"class": "a-h item-chapter"})
|
| 71 |
+
if not chapter_item:
|
| 72 |
+
continue
|
| 73 |
+
chapter_url = chapter_item.findNext("a").get('href')
|
| 74 |
+
|
| 75 |
+
urls[manga_url] = chapter_url
|
| 76 |
+
|
| 77 |
+
return urls
|
| 78 |
+
|
| 79 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 80 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 81 |
+
|
| 82 |
+
ul = bs.find("div", {"class": "container-chapter-reader"})
|
| 83 |
+
|
| 84 |
+
images = ul.find_all('img')
|
| 85 |
+
|
| 86 |
+
images_url = [quote(img.get('src'), safe=':/%') for img in images]
|
| 87 |
+
|
| 88 |
+
return images_url
|
| 89 |
+
|
| 90 |
+
async def get_picture(self, manga_chapter: MangaChapter, url, *args, **kwargs):
|
| 91 |
+
pattern = re.compile(r'(.*\.com/)')
|
| 92 |
+
match = re.match(pattern, manga_chapter.url)
|
| 93 |
+
referer = match.group(1)
|
| 94 |
+
|
| 95 |
+
headers = dict(self.headers)
|
| 96 |
+
headers['Referer'] = referer
|
| 97 |
+
|
| 98 |
+
return await super(ManganatoClient, self).get_picture(manga_chapter, url, headers=headers, *args, **kwargs)
|
| 99 |
+
|
| 100 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 101 |
+
query = query.lower().replace(' ', '_')
|
| 102 |
+
|
| 103 |
+
request_url = self.search_url
|
| 104 |
+
|
| 105 |
+
data = {
|
| 106 |
+
self.search_param: query
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
content = await self.get_url(request_url, data=data, method='post')
|
| 110 |
+
|
| 111 |
+
return self.mangas_from_page(content)
|
| 112 |
+
|
| 113 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 114 |
+
|
| 115 |
+
request_url = f'{manga_card.url}'
|
| 116 |
+
|
| 117 |
+
content = await self.get_url(request_url)
|
| 118 |
+
|
| 119 |
+
return self.chapters_from_page(content, manga_card)[(page - 1) * 20:page * 20]
|
| 120 |
+
|
| 121 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 122 |
+
manga_card = MangaCard(self, manga_name, manga_url, '')
|
| 123 |
+
|
| 124 |
+
request_url = f'{manga_card.url}'
|
| 125 |
+
|
| 126 |
+
content = await self.get_url(request_url)
|
| 127 |
+
|
| 128 |
+
for chapter in self.chapters_from_page(content, manga_card):
|
| 129 |
+
yield chapter
|
| 130 |
+
|
| 131 |
+
async def contains_url(self, url: str):
|
| 132 |
+
return url.startswith(self.read_url) or url.startswith(self.base_url.geturl()) or url.startswith(self.chap_url)
|
| 133 |
+
|
| 134 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 135 |
+
|
| 136 |
+
content = await self.get_url(self.base_url.geturl())
|
| 137 |
+
|
| 138 |
+
updates = self.updates_from_page(content)
|
| 139 |
+
|
| 140 |
+
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
|
| 141 |
+
not_updated = [lc.url for lc in last_chapters if not updates.get(lc.url)
|
| 142 |
+
or updates.get(lc.url) == lc.chapter_url]
|
| 143 |
+
|
| 144 |
+
return updated, not_updated
|
plugins/manganelo.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, AsyncIterable
|
| 2 |
+
from urllib.parse import urlparse, urljoin, quote
|
| 3 |
+
|
| 4 |
+
from bs4 import BeautifulSoup
|
| 5 |
+
|
| 6 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter, LastChapter
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ManganeloClient(MangaClient):
|
| 10 |
+
|
| 11 |
+
base_url = urlparse("https://m.manganelo.com/")
|
| 12 |
+
search_url = urljoin(base_url.geturl(), "search/story/")
|
| 13 |
+
updates_url = urljoin(base_url.geturl(), "genre-all-update-latest")
|
| 14 |
+
chapter_url = "https://chapmanganelo.com/"
|
| 15 |
+
|
| 16 |
+
pre_headers = {
|
| 17 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
def __init__(self, *args, name="Manganelo", **kwargs):
|
| 21 |
+
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
|
| 22 |
+
|
| 23 |
+
def mangas_from_page(self, page: bytes):
|
| 24 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 25 |
+
|
| 26 |
+
cards = bs.find_all("div", {"class": "search-story-item"})
|
| 27 |
+
|
| 28 |
+
mangas = [card.findNext('a') for card in cards]
|
| 29 |
+
names = [manga.get('title') for manga in mangas]
|
| 30 |
+
url = [manga.get("href") for manga in mangas]
|
| 31 |
+
images = [manga.findNext("img").get("src") for manga in mangas]
|
| 32 |
+
|
| 33 |
+
mangas = [MangaCard(self, *tup) for tup in zip(names, url, images)]
|
| 34 |
+
|
| 35 |
+
return mangas
|
| 36 |
+
|
| 37 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 38 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 39 |
+
|
| 40 |
+
lis = bs.find_all("li", {"class": "a-h"})
|
| 41 |
+
|
| 42 |
+
items = [li.findNext('a') for li in lis]
|
| 43 |
+
|
| 44 |
+
links = [item.get("href") for item in items]
|
| 45 |
+
texts = [item.string for item in items]
|
| 46 |
+
|
| 47 |
+
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
|
| 48 |
+
|
| 49 |
+
def updates_from_page(self, content):
|
| 50 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 51 |
+
|
| 52 |
+
manga_items = bs.find_all("div", {"class": "content-genres-item"})
|
| 53 |
+
|
| 54 |
+
urls = dict()
|
| 55 |
+
|
| 56 |
+
for manga_item in manga_items:
|
| 57 |
+
manga_url = manga_item.findNext("a", {"class": "genres-item-img"}).get("href")
|
| 58 |
+
|
| 59 |
+
if manga_url in urls:
|
| 60 |
+
continue
|
| 61 |
+
|
| 62 |
+
chapter_url = manga_item.findNext('a', {'class': 'genres-item-chap'}).get('href')
|
| 63 |
+
|
| 64 |
+
urls[manga_url] = chapter_url
|
| 65 |
+
|
| 66 |
+
return urls
|
| 67 |
+
|
| 68 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 69 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 70 |
+
|
| 71 |
+
ul = bs.find("div", {"class": "container-chapter-reader"})
|
| 72 |
+
|
| 73 |
+
images = ul.find_all('img')
|
| 74 |
+
|
| 75 |
+
images_url = [quote(img.get('src'), safe=':/%') for img in images]
|
| 76 |
+
|
| 77 |
+
return images_url
|
| 78 |
+
|
| 79 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 80 |
+
query = quote(query.replace(' ', '_').lower())
|
| 81 |
+
|
| 82 |
+
request_url = f'{self.search_url}'
|
| 83 |
+
|
| 84 |
+
if query:
|
| 85 |
+
request_url += f'{query}'
|
| 86 |
+
|
| 87 |
+
content = await self.get_url(request_url)
|
| 88 |
+
|
| 89 |
+
return self.mangas_from_page(content)
|
| 90 |
+
|
| 91 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 92 |
+
|
| 93 |
+
request_url = f'{manga_card.url}'
|
| 94 |
+
|
| 95 |
+
content = await self.get_url(request_url)
|
| 96 |
+
|
| 97 |
+
return self.chapters_from_page(content, manga_card)[(page - 1) * 20:page * 20]
|
| 98 |
+
|
| 99 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 100 |
+
manga_card = MangaCard(self, manga_name, manga_url, '')
|
| 101 |
+
|
| 102 |
+
request_url = f'{manga_card.url}'
|
| 103 |
+
|
| 104 |
+
content = await self.get_url(request_url)
|
| 105 |
+
|
| 106 |
+
for chapter in self.chapters_from_page(content, manga_card):
|
| 107 |
+
yield chapter
|
| 108 |
+
|
| 109 |
+
def get_picture(self, manga_chapter: MangaChapter, url, *args, **kwargs):
|
| 110 |
+
headers = dict(self.headers)
|
| 111 |
+
headers['Referer'] = self.chapter_url
|
| 112 |
+
|
| 113 |
+
return self.get_url(url, headers=headers, *args, **kwargs)
|
| 114 |
+
|
| 115 |
+
async def contains_url(self, url: str):
|
| 116 |
+
return url.startswith(self.base_url.geturl()) or url.startswith(self.chapter_url)
|
| 117 |
+
|
| 118 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 119 |
+
|
| 120 |
+
content = await self.get_url(self.updates_url)
|
| 121 |
+
|
| 122 |
+
updates = self.updates_from_page(content)
|
| 123 |
+
|
| 124 |
+
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
|
| 125 |
+
not_updated = [lc.url for lc in last_chapters if
|
| 126 |
+
not updates.get(lc.url) or updates.get(lc.url) == lc.chapter_url]
|
| 127 |
+
|
| 128 |
+
return updated, not_updated
|
plugins/mangasee.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
from typing import List, AsyncIterable
|
| 4 |
+
from urllib.parse import urlparse, urljoin, quote_plus
|
| 5 |
+
|
| 6 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter, LastChapter
|
| 7 |
+
from .search_engine import search
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class MangaSeeClient(MangaClient):
|
| 11 |
+
base_url = urlparse("https://mangasee123.com/")
|
| 12 |
+
search_url = urljoin(base_url.geturl(), "_search.php")
|
| 13 |
+
manga_url = urljoin(base_url.geturl(), "manga")
|
| 14 |
+
chapter_url = urljoin(base_url.geturl(), "read-online")
|
| 15 |
+
cover_url = "https://cover.nep.li/cover"
|
| 16 |
+
|
| 17 |
+
pre_headers = {
|
| 18 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
def __init__(self, *args, name="Mangasee", **kwargs):
|
| 22 |
+
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
|
| 23 |
+
|
| 24 |
+
def mangas_from_page(self, documents: List):
|
| 25 |
+
names = [doc['s'] for doc in documents]
|
| 26 |
+
url = [f"{self.manga_url}/{doc['i']}" for doc in documents]
|
| 27 |
+
images = [f"{self.cover_url}/{doc['i']}.jpg" for doc in documents]
|
| 28 |
+
|
| 29 |
+
mangas = [MangaCard(self, *tup) for tup in zip(names, url, images)]
|
| 30 |
+
|
| 31 |
+
return mangas
|
| 32 |
+
|
| 33 |
+
def chapter_url_encode(self, chapter):
|
| 34 |
+
chapter = chapter['Chapter']
|
| 35 |
+
Index = ""
|
| 36 |
+
t = chapter[0:1]
|
| 37 |
+
if t != '1':
|
| 38 |
+
Index = "-index-" + t
|
| 39 |
+
n = int(chapter[1:-1])
|
| 40 |
+
m = ""
|
| 41 |
+
a = chapter[-1]
|
| 42 |
+
if a != '0':
|
| 43 |
+
m = "." + a
|
| 44 |
+
return "-chapter-" + str(n) + m + Index + ".html"
|
| 45 |
+
|
| 46 |
+
def chapter_display(self, chapter):
|
| 47 |
+
chapter = chapter['Chapter']
|
| 48 |
+
t = int(chapter[1:-1])
|
| 49 |
+
n = chapter[-1]
|
| 50 |
+
return t if n == '0' else str(t) + "." + n
|
| 51 |
+
|
| 52 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 53 |
+
|
| 54 |
+
chap_pat = re.compile('vm.Chapters = ([\s\S]*?);')
|
| 55 |
+
chapters_str_list = chap_pat.findall(page.decode())
|
| 56 |
+
if not chapters_str_list:
|
| 57 |
+
return []
|
| 58 |
+
|
| 59 |
+
chapter_list = json.loads(chapters_str_list[0])
|
| 60 |
+
|
| 61 |
+
index_pat = re.compile('vm.IndexName = ([\s\S]*?);')
|
| 62 |
+
index_str_list = index_pat.findall(page.decode())
|
| 63 |
+
if not index_str_list:
|
| 64 |
+
return []
|
| 65 |
+
|
| 66 |
+
index_str = json.loads(index_str_list[0])
|
| 67 |
+
|
| 68 |
+
for ch in chapter_list:
|
| 69 |
+
if not ch.get('Type'):
|
| 70 |
+
ch['Type'] = 'Chapter'
|
| 71 |
+
|
| 72 |
+
links = [f"{self.chapter_url}/{index_str}{self.chapter_url_encode(ch)}" for ch in chapter_list]
|
| 73 |
+
texts = [f"{ch.get('Type')} {self.chapter_display(ch)}" for ch in chapter_list]
|
| 74 |
+
|
| 75 |
+
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
|
| 76 |
+
|
| 77 |
+
def updates_from_page(self, page: bytes):
|
| 78 |
+
|
| 79 |
+
chap_pat = re.compile(r'vm.LatestJSON = (\[[\s\S]*?]);')
|
| 80 |
+
chapters_str_list = chap_pat.findall(page.decode())
|
| 81 |
+
if not chapters_str_list:
|
| 82 |
+
return []
|
| 83 |
+
|
| 84 |
+
chapter_list = json.loads(chapters_str_list[0])
|
| 85 |
+
|
| 86 |
+
urls = [f"{self.manga_url}/{ch['IndexName']}" for ch in chapter_list]
|
| 87 |
+
chapter_urls = [f"{self.chapter_url}/{ch['IndexName']}{self.chapter_url_encode(ch)}" for ch in chapter_list]
|
| 88 |
+
|
| 89 |
+
urls = dict(zip(urls[:32], chapter_urls[:32]))
|
| 90 |
+
|
| 91 |
+
return urls
|
| 92 |
+
|
| 93 |
+
def chapterImage(self, ChapterString):
|
| 94 |
+
Chapter = ChapterString[1:-1]
|
| 95 |
+
Odd = ChapterString[-1]
|
| 96 |
+
if Odd == '0':
|
| 97 |
+
return Chapter
|
| 98 |
+
else:
|
| 99 |
+
return Chapter + "." + Odd
|
| 100 |
+
|
| 101 |
+
def pageImage(self, PageString):
|
| 102 |
+
s = "000" + str(PageString)
|
| 103 |
+
return s[-3:]
|
| 104 |
+
|
| 105 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 106 |
+
|
| 107 |
+
chap_pat = re.compile('vm.CurChapter = ([\s\S]*?);')
|
| 108 |
+
chap_str_list = chap_pat.findall(content.decode())
|
| 109 |
+
if not chap_str_list:
|
| 110 |
+
return []
|
| 111 |
+
|
| 112 |
+
curChapter = json.loads(chap_str_list[0])
|
| 113 |
+
|
| 114 |
+
path_pat = re.compile('vm.CurPathName = ([\s\S]*?);')
|
| 115 |
+
path_str_list = path_pat.findall(content.decode())
|
| 116 |
+
if not path_str_list:
|
| 117 |
+
return []
|
| 118 |
+
|
| 119 |
+
curPath = json.loads(path_str_list[0])
|
| 120 |
+
|
| 121 |
+
index_pat = re.compile('vm.IndexName = ([\s\S]*?);')
|
| 122 |
+
index_str_list = index_pat.findall(content.decode())
|
| 123 |
+
if not index_str_list:
|
| 124 |
+
return []
|
| 125 |
+
|
| 126 |
+
index_str = json.loads(index_str_list[0])
|
| 127 |
+
|
| 128 |
+
pages = list(range(1, int(curChapter['Page']) + 1))
|
| 129 |
+
|
| 130 |
+
images_url = [
|
| 131 |
+
f"https://{curPath}/manga/{index_str}/{'' if curChapter['Directory'] == '' else curChapter['Directory'] + '/'}{self.chapterImage(curChapter['Chapter'])}-{self.pageImage(page)}.png"
|
| 132 |
+
for page in pages]
|
| 133 |
+
|
| 134 |
+
return images_url
|
| 135 |
+
|
| 136 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 137 |
+
def text_from_document(doc) -> str:
|
| 138 |
+
return doc['s'] + ' ' + ' '.join(doc['a'])
|
| 139 |
+
|
| 140 |
+
def title_from_document(doc) -> str:
|
| 141 |
+
return doc['i']
|
| 142 |
+
|
| 143 |
+
request_url = self.search_url
|
| 144 |
+
|
| 145 |
+
content = await self.get_url(request_url, method="post")
|
| 146 |
+
|
| 147 |
+
documents = json.loads(content)
|
| 148 |
+
|
| 149 |
+
results = search(query, documents, title_from_document, text_from_document)[(page - 1) * 20:page * 20]
|
| 150 |
+
|
| 151 |
+
return self.mangas_from_page(results)
|
| 152 |
+
|
| 153 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 154 |
+
|
| 155 |
+
request_url = f'{manga_card.url}'
|
| 156 |
+
|
| 157 |
+
content = await self.get_url(request_url)
|
| 158 |
+
|
| 159 |
+
return self.chapters_from_page(content, manga_card)[(page - 1) * 20:page * 20]
|
| 160 |
+
|
| 161 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 162 |
+
|
| 163 |
+
manga_card = MangaCard(self, manga_name, manga_url, '')
|
| 164 |
+
|
| 165 |
+
request_url = f'{manga_card.url}'
|
| 166 |
+
|
| 167 |
+
content = await self.get_url(request_url)
|
| 168 |
+
|
| 169 |
+
for ch in self.chapters_from_page(content, manga_card):
|
| 170 |
+
yield ch
|
| 171 |
+
|
| 172 |
+
async def contains_url(self, url: str):
|
| 173 |
+
return url.startswith(self.base_url.geturl())
|
| 174 |
+
|
| 175 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 176 |
+
|
| 177 |
+
content = await self.get_url(self.base_url.geturl())
|
| 178 |
+
|
| 179 |
+
updates = self.updates_from_page(content)
|
| 180 |
+
|
| 181 |
+
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
|
| 182 |
+
not_updated = [lc.url for lc in last_chapters if
|
| 183 |
+
not updates.get(lc.url) or updates.get(lc.url) == lc.chapter_url]
|
| 184 |
+
|
| 185 |
+
return updated, not_updated
|
plugins/mangasin.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from typing import List, AsyncIterable
|
| 3 |
+
import json
|
| 4 |
+
from urllib.parse import urlparse, urljoin, quote, quote_plus
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
|
| 7 |
+
from bs4 import BeautifulSoup
|
| 8 |
+
from bs4.element import PageElement
|
| 9 |
+
|
| 10 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter
|
| 11 |
+
from models import LastChapter
|
| 12 |
+
|
| 13 |
+
@dataclass
|
| 14 |
+
class MangaSinMangaCard(MangaCard):
|
| 15 |
+
data: str
|
| 16 |
+
|
| 17 |
+
class MangasInClient(MangaClient):
|
| 18 |
+
|
| 19 |
+
base_url = urlparse("https://mangas.in/")
|
| 20 |
+
search_url = urljoin(base_url.geturl(), "search")
|
| 21 |
+
search_param = 'q'
|
| 22 |
+
|
| 23 |
+
pre_headers = {
|
| 24 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
def __init__(self, *args, name="MangasIn", **kwargs):
|
| 28 |
+
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
|
| 29 |
+
|
| 30 |
+
def build_cover_url(self, data: str):
|
| 31 |
+
return f"{self.base_url.geturl()}uploads/manga/{data}/cover/cover_250x350.jpg"
|
| 32 |
+
|
| 33 |
+
def build_manga_url(self, data: str):
|
| 34 |
+
return f"{self.base_url.geturl()}manga/{data}"
|
| 35 |
+
|
| 36 |
+
def build_chapter_url(self, manga: MangaSinMangaCard, chapter: str):
|
| 37 |
+
return f"{self.base_url.geturl()}manga/{manga.data}/{chapter}"
|
| 38 |
+
|
| 39 |
+
def build_chapter_name(self, li_tag: PageElement):
|
| 40 |
+
name_div = li_tag.findNext('eee')
|
| 41 |
+
if not name_div or name_div.findPrevious('li') != li_tag:
|
| 42 |
+
name_div = li_tag.findNext('fff')
|
| 43 |
+
name = name_div.a.text
|
| 44 |
+
|
| 45 |
+
number = li_tag.findNext('a').get('data-number')
|
| 46 |
+
|
| 47 |
+
return f"{number} - {name}"
|
| 48 |
+
|
| 49 |
+
def mangas_from_page(self, page: bytes):
|
| 50 |
+
mangas = json.loads(page)
|
| 51 |
+
|
| 52 |
+
names = [manga['value'] for manga in mangas]
|
| 53 |
+
datas = [manga['data'] for manga in mangas]
|
| 54 |
+
url = [self.build_manga_url(data) for data in datas]
|
| 55 |
+
images = [self.build_cover_url(data) for data in datas]
|
| 56 |
+
|
| 57 |
+
mangas = [MangaSinMangaCard(self, *tup) for tup in zip(names, url, images, datas)]
|
| 58 |
+
|
| 59 |
+
return mangas
|
| 60 |
+
|
| 61 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 62 |
+
|
| 63 |
+
regex = rb"var .*? = (.*?);"
|
| 64 |
+
|
| 65 |
+
chapters_text = max([var for var in re.findall(regex, page, re.DOTALL) if b'manga_id' in var], key=len)
|
| 66 |
+
chapters = json.loads(chapters_text)
|
| 67 |
+
|
| 68 |
+
texts = [chapter['number'] for chapter in chapters]
|
| 69 |
+
links = [f"{manga.url}/{chapter['slug']}" for chapter in chapters]
|
| 70 |
+
|
| 71 |
+
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
|
| 72 |
+
|
| 73 |
+
def updates_from_page(self, page: bytes):
|
| 74 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 75 |
+
|
| 76 |
+
manga_items: List[PageElement] = bs.find_all("div", {"class": "manga-item"})
|
| 77 |
+
|
| 78 |
+
urls = dict()
|
| 79 |
+
|
| 80 |
+
for manga_item in manga_items:
|
| 81 |
+
|
| 82 |
+
manga_url = manga_item.findNext('a').findNextSibling('a').get('href')
|
| 83 |
+
|
| 84 |
+
chapter_item = manga_item.findNext("div", {"class": "manga-chapter"})
|
| 85 |
+
chapter_url = chapter_item.findNext("a").get('href')
|
| 86 |
+
|
| 87 |
+
urls[manga_url] = chapter_url
|
| 88 |
+
|
| 89 |
+
return urls
|
| 90 |
+
|
| 91 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 92 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 93 |
+
|
| 94 |
+
ul = bs.find("div", {"id": "all"})
|
| 95 |
+
|
| 96 |
+
images = ul.find_all('img')
|
| 97 |
+
|
| 98 |
+
images_url = [quote(img.get('data-src'), safe=':/%') for img in images]
|
| 99 |
+
|
| 100 |
+
return images_url
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 104 |
+
query = quote_plus(query)
|
| 105 |
+
|
| 106 |
+
request_url = f'{self.search_url}'
|
| 107 |
+
|
| 108 |
+
if query:
|
| 109 |
+
request_url += f'?{self.search_param}={query}'
|
| 110 |
+
|
| 111 |
+
content = await self.get_url(request_url)
|
| 112 |
+
|
| 113 |
+
return self.mangas_from_page(content)[(page - 1) * 10:page * 10]
|
| 114 |
+
|
| 115 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 116 |
+
|
| 117 |
+
request_url = f'{manga_card.url}'
|
| 118 |
+
|
| 119 |
+
content = await self.get_url(request_url)
|
| 120 |
+
|
| 121 |
+
return self.chapters_from_page(content, manga_card)[(page - 1) * 10:page * 10]
|
| 122 |
+
|
| 123 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 124 |
+
manga = MangaCard(self, manga_name, manga_url, '')
|
| 125 |
+
|
| 126 |
+
request_url = f'{manga.url}'
|
| 127 |
+
|
| 128 |
+
content = await self.get_url(request_url)
|
| 129 |
+
|
| 130 |
+
for chapter in self.chapters_from_page(content, manga):
|
| 131 |
+
yield chapter
|
| 132 |
+
|
| 133 |
+
async def contains_url(self, url: str):
|
| 134 |
+
return url.startswith(self.base_url.geturl())
|
| 135 |
+
|
| 136 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 137 |
+
|
| 138 |
+
content = await self.get_url(self.base_url.geturl())
|
| 139 |
+
|
| 140 |
+
updates = self.updates_from_page(content)
|
| 141 |
+
|
| 142 |
+
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
|
| 143 |
+
not_updated = [lc.url for lc in last_chapters if not updates.get(lc.url) or updates.get(lc.url) == lc.chapter_url]
|
| 144 |
+
|
| 145 |
+
return updated, not_updated
|
plugins/mangatigre.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from typing import List, AsyncIterable
|
| 3 |
+
from urllib.parse import urlparse, urljoin, quote, quote_plus
|
| 4 |
+
|
| 5 |
+
from aiohttp import ClientResponse
|
| 6 |
+
from bs4 import BeautifulSoup
|
| 7 |
+
from bs4.element import PageElement
|
| 8 |
+
|
| 9 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter, LastChapter
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class MangatigreClient(MangaClient):
|
| 13 |
+
|
| 14 |
+
base_url = urlparse("https://www.mangatigre.net/")
|
| 15 |
+
search_url = urljoin(base_url.geturl(), 'mangas/search')
|
| 16 |
+
manga_url = urljoin(base_url.geturl(), 'manga')
|
| 17 |
+
img_url = urlparse("https://i2.mtcdn.xyz/")
|
| 18 |
+
cover_url = urljoin(img_url.geturl(), "mangas")
|
| 19 |
+
search_param = 'query'
|
| 20 |
+
|
| 21 |
+
pre_headers = {
|
| 22 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
def __init__(self, *args, name="Mangatigre", **kwargs):
|
| 26 |
+
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
|
| 27 |
+
|
| 28 |
+
def mangas_from_page(self, page: bytes):
|
| 29 |
+
dt = json.loads(page)
|
| 30 |
+
mangas = dt['result']
|
| 31 |
+
|
| 32 |
+
names = [manga.get('name') for manga in mangas]
|
| 33 |
+
url = [f"{self.manga_url}/{manga.get('slug')}" for manga in mangas]
|
| 34 |
+
images = [f"{self.cover_url}/{manga.get('image')}" for manga in mangas]
|
| 35 |
+
|
| 36 |
+
mangas = [MangaCard(self, *tup) for tup in zip(names, url, images)]
|
| 37 |
+
|
| 38 |
+
return mangas
|
| 39 |
+
|
| 40 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 41 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 42 |
+
|
| 43 |
+
ul = bs.find('ul', {'class': 'list-unstyled'})
|
| 44 |
+
lis = ul.find_all("li")
|
| 45 |
+
|
| 46 |
+
items = [li.findNext('a') for li in lis]
|
| 47 |
+
|
| 48 |
+
links = [item.get('href') for item in items]
|
| 49 |
+
texts = [item.get('title').split(':')[0] for item in items]
|
| 50 |
+
|
| 51 |
+
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
|
| 52 |
+
|
| 53 |
+
def updates_from_page(self, page: bytes):
|
| 54 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 55 |
+
|
| 56 |
+
manga_items: List[PageElement] = bs.find_all("article", {"class": "chapter-block"})
|
| 57 |
+
|
| 58 |
+
urls = dict()
|
| 59 |
+
|
| 60 |
+
for manga_item in manga_items:
|
| 61 |
+
|
| 62 |
+
manga_url = manga_item.findNext('a').get('href')
|
| 63 |
+
|
| 64 |
+
chapter_item = manga_item.findNext("div", {"class": "chapter"})
|
| 65 |
+
chapter_url = chapter_item.findNext("a").get('href')
|
| 66 |
+
|
| 67 |
+
urls[manga_url] = chapter_url
|
| 68 |
+
|
| 69 |
+
return urls
|
| 70 |
+
|
| 71 |
+
async def pictures_from_chapters(self, content: bytes, response: ClientResponse = None):
|
| 72 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 73 |
+
|
| 74 |
+
btn = bs.find('button', {'data-read-type': 2})
|
| 75 |
+
if btn:
|
| 76 |
+
token = btn.get('data-token')
|
| 77 |
+
|
| 78 |
+
data = {
|
| 79 |
+
'_method': 'patch',
|
| 80 |
+
'_token': token,
|
| 81 |
+
'read_type': 2
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
content = await self.get_url(f'{response.url}/read-type', data=data, method='post')
|
| 85 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 86 |
+
|
| 87 |
+
ul = bs.find("div", {"class": "display-zone"})
|
| 88 |
+
|
| 89 |
+
images = ul.find_all('img')
|
| 90 |
+
images = [f"https:{img.get('data-src') or img.get('src')}" for img in images]
|
| 91 |
+
|
| 92 |
+
images_url = [quote(img, safe=':/%') for img in images]
|
| 93 |
+
|
| 94 |
+
return images_url
|
| 95 |
+
|
| 96 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 97 |
+
main_page = await self.get_url(self.base_url.geturl())
|
| 98 |
+
|
| 99 |
+
bs = BeautifulSoup(main_page, "html.parser")
|
| 100 |
+
div = bs.find('div', {'class': 'input-group'})
|
| 101 |
+
token = div.find('input').get('data-csrf')
|
| 102 |
+
|
| 103 |
+
request_url = self.search_url
|
| 104 |
+
|
| 105 |
+
data = {
|
| 106 |
+
self.search_param: query,
|
| 107 |
+
'_token': token
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
content = await self.get_url(request_url, data=data, method='post')
|
| 111 |
+
|
| 112 |
+
return self.mangas_from_page(content)[(page - 1) * 20:page * 20]
|
| 113 |
+
|
| 114 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 115 |
+
chapters = [x async for x in self.iter_chapters(manga_card.url, manga_card.name)]
|
| 116 |
+
return chapters[(page - 1) * 20:page * 20]
|
| 117 |
+
|
| 118 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 119 |
+
manga_card = MangaCard(self, manga_name, manga_url, '')
|
| 120 |
+
|
| 121 |
+
request_url = f'{manga_card.url}'
|
| 122 |
+
|
| 123 |
+
chapter_page = await self.get_url(request_url)
|
| 124 |
+
|
| 125 |
+
bs = BeautifulSoup(chapter_page, "html.parser")
|
| 126 |
+
btn = bs.find('button', {'class': 'btn-load-more-chapters'})
|
| 127 |
+
token = btn.get('data-token')
|
| 128 |
+
|
| 129 |
+
data = {'_token': token}
|
| 130 |
+
|
| 131 |
+
content = await self.get_url(request_url, data=data, method='post')
|
| 132 |
+
|
| 133 |
+
chapters = self.chapters_from_page(content, manga_card)
|
| 134 |
+
|
| 135 |
+
for chapter in chapters:
|
| 136 |
+
yield chapter
|
| 137 |
+
|
| 138 |
+
async def contains_url(self, url: str):
|
| 139 |
+
return url.startswith(self.base_url.geturl())
|
| 140 |
+
|
| 141 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 142 |
+
|
| 143 |
+
content = await self.get_url(self.base_url.geturl())
|
| 144 |
+
|
| 145 |
+
updates = self.updates_from_page(content)
|
| 146 |
+
|
| 147 |
+
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
|
| 148 |
+
not_updated = [lc.url for lc in last_chapters if not updates.get(lc.url) or updates.get(lc.url) == lc.chapter_url]
|
| 149 |
+
|
| 150 |
+
return updated, not_updated
|
plugins/manhuako.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, AsyncIterable
|
| 2 |
+
from urllib.parse import urlparse, urljoin, quote, quote_plus
|
| 3 |
+
|
| 4 |
+
from bs4 import BeautifulSoup
|
| 5 |
+
from bs4.element import PageElement
|
| 6 |
+
|
| 7 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter, LastChapter
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ManhuaKoClient(MangaClient):
|
| 11 |
+
|
| 12 |
+
base_url = urlparse("https://manhuako.com/")
|
| 13 |
+
search_url = urljoin(base_url.geturl(), "home/search")
|
| 14 |
+
search_param = 'mq'
|
| 15 |
+
|
| 16 |
+
pre_headers = {
|
| 17 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
def __init__(self, *args, name="Manhuako", **kwargs):
|
| 21 |
+
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
|
| 22 |
+
|
| 23 |
+
def mangas_from_page(self, page: bytes):
|
| 24 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 25 |
+
|
| 26 |
+
cards = bs.find_all("div", {"class": "card"})
|
| 27 |
+
|
| 28 |
+
cards = [card for card in cards if card.findNext('p', {'class': 'type'}).text != "Novela"]
|
| 29 |
+
|
| 30 |
+
mangas = [card.findNext('a', {'class': 'white-text'}) for card in cards]
|
| 31 |
+
names = [manga.string for manga in mangas]
|
| 32 |
+
url = [manga.get('href') for manga in mangas]
|
| 33 |
+
|
| 34 |
+
images = [card.findNext('img').get('src') for card in cards]
|
| 35 |
+
|
| 36 |
+
mangas = [MangaCard(self, *tup) for tup in zip(names, url, images)]
|
| 37 |
+
|
| 38 |
+
return mangas
|
| 39 |
+
|
| 40 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 41 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 42 |
+
|
| 43 |
+
table = bs.find("table", {"class": "table-chapters"})
|
| 44 |
+
trs = table.find_all('tr')
|
| 45 |
+
|
| 46 |
+
items = [tr.findNext('a') for tr in trs]
|
| 47 |
+
|
| 48 |
+
links = [item.get('href') for item in items]
|
| 49 |
+
texts = [item.string for item in items]
|
| 50 |
+
|
| 51 |
+
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
|
| 52 |
+
|
| 53 |
+
@staticmethod
|
| 54 |
+
def updates_from_page(content):
|
| 55 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 56 |
+
|
| 57 |
+
manga_items = bs.find_all("div", {"class": "card"})
|
| 58 |
+
|
| 59 |
+
urls = dict()
|
| 60 |
+
|
| 61 |
+
for manga_item in manga_items:
|
| 62 |
+
manga_url = manga_item.findNext('a', {'class': 'white-text'}).get('href')
|
| 63 |
+
|
| 64 |
+
if manga_url in urls:
|
| 65 |
+
continue
|
| 66 |
+
|
| 67 |
+
chapter_url = manga_item.findNext('a', {'class': 'chip'}).get('href')
|
| 68 |
+
|
| 69 |
+
urls[manga_url] = chapter_url
|
| 70 |
+
|
| 71 |
+
return urls
|
| 72 |
+
|
| 73 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 74 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 75 |
+
|
| 76 |
+
ul = bs.find("div", {"id": "pantallaCompleta"})
|
| 77 |
+
|
| 78 |
+
images = ul.find_all('img')
|
| 79 |
+
|
| 80 |
+
images_url = [quote(img.get('src'), safe=':/%') for img in images]
|
| 81 |
+
|
| 82 |
+
return images_url
|
| 83 |
+
|
| 84 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 85 |
+
query = quote_plus(query)
|
| 86 |
+
|
| 87 |
+
request_url = f'{self.search_url}/page/{page}'
|
| 88 |
+
|
| 89 |
+
if query:
|
| 90 |
+
request_url += f'?{self.search_param}={query}'
|
| 91 |
+
|
| 92 |
+
content = await self.get_url(request_url)
|
| 93 |
+
|
| 94 |
+
return self.mangas_from_page(content)
|
| 95 |
+
|
| 96 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 97 |
+
|
| 98 |
+
request_url = f'{manga_card.url}/page/{page}'
|
| 99 |
+
|
| 100 |
+
content = await self.get_url(request_url)
|
| 101 |
+
|
| 102 |
+
return self.chapters_from_page(content, manga_card)
|
| 103 |
+
|
| 104 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 105 |
+
manga = MangaCard(self, manga_name, manga_url, '')
|
| 106 |
+
page = 1
|
| 107 |
+
while page > 0:
|
| 108 |
+
chapters = await self.get_chapters(manga_card=manga, page=page)
|
| 109 |
+
if not chapters:
|
| 110 |
+
break
|
| 111 |
+
for chapter in chapters:
|
| 112 |
+
yield chapter
|
| 113 |
+
page += 1
|
| 114 |
+
|
| 115 |
+
async def contains_url(self, url: str):
|
| 116 |
+
return url.startswith(self.base_url.geturl())
|
| 117 |
+
|
| 118 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 119 |
+
|
| 120 |
+
content = await self.get_url(self.base_url.geturl())
|
| 121 |
+
|
| 122 |
+
updates = self.updates_from_page(content)
|
| 123 |
+
|
| 124 |
+
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
|
| 125 |
+
not_updated = [lc.url for lc in last_chapters if
|
| 126 |
+
not updates.get(lc.url) or updates.get(lc.url) == lc.chapter_url]
|
| 127 |
+
|
| 128 |
+
return updated, not_updated
|
plugins/manhuaplus.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, AsyncIterable
|
| 2 |
+
from urllib.parse import urlparse, urljoin, quote, quote_plus
|
| 3 |
+
|
| 4 |
+
from bs4 import BeautifulSoup
|
| 5 |
+
from bs4.element import PageElement
|
| 6 |
+
|
| 7 |
+
from plugins.client import MangaClient, MangaCard, MangaChapter, LastChapter
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ManhuaPlusClient(MangaClient):
|
| 11 |
+
|
| 12 |
+
base_url = urlparse("https://manhuaplus.com/")
|
| 13 |
+
search_url = base_url.geturl()
|
| 14 |
+
search_param = 's'
|
| 15 |
+
chapters = 'ajax/chapters/'
|
| 16 |
+
|
| 17 |
+
pre_headers = {
|
| 18 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
def __init__(self, *args, name="Manhuaplus", **kwargs):
|
| 22 |
+
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
|
| 23 |
+
|
| 24 |
+
def mangas_from_page(self, page: bytes):
|
| 25 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 26 |
+
|
| 27 |
+
cards = bs.find("div", {"class": "c-tabs-item"})
|
| 28 |
+
|
| 29 |
+
if not cards:
|
| 30 |
+
return []
|
| 31 |
+
|
| 32 |
+
mangas = cards.find_all('div', {'class': 'tab-thumb'})
|
| 33 |
+
names = [manga.a.get('title') for manga in mangas]
|
| 34 |
+
url = [manga.a.get('href') for manga in mangas]
|
| 35 |
+
|
| 36 |
+
images = [manga.findNext('img').get('data-src') for manga in mangas]
|
| 37 |
+
|
| 38 |
+
mangas = [MangaCard(self, *tup) for tup in zip(names, url, images)]
|
| 39 |
+
|
| 40 |
+
return mangas
|
| 41 |
+
|
| 42 |
+
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
|
| 43 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 44 |
+
|
| 45 |
+
lis = bs.find_all("li", {"class": "wp-manga-chapter"})
|
| 46 |
+
|
| 47 |
+
items = [li.findNext('a') for li in lis]
|
| 48 |
+
|
| 49 |
+
links = [item.get('href') for item in items]
|
| 50 |
+
texts = [item.string.strip() for item in items]
|
| 51 |
+
|
| 52 |
+
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
|
| 53 |
+
|
| 54 |
+
def updates_from_page(self, page: bytes):
|
| 55 |
+
bs = BeautifulSoup(page, "html.parser")
|
| 56 |
+
|
| 57 |
+
manga_items: List[PageElement] = bs.find_all("div", {"class": "page-item-detail"})
|
| 58 |
+
|
| 59 |
+
urls = dict()
|
| 60 |
+
|
| 61 |
+
for manga_item in manga_items:
|
| 62 |
+
|
| 63 |
+
manga_url = manga_item.findNext('a').get('href')
|
| 64 |
+
|
| 65 |
+
chapter_item = manga_item.findNext("div", {"class": "chapter-item"})
|
| 66 |
+
chapter_url = chapter_item.findNext("a").get('href')
|
| 67 |
+
|
| 68 |
+
urls[manga_url] = chapter_url
|
| 69 |
+
|
| 70 |
+
return urls
|
| 71 |
+
|
| 72 |
+
async def pictures_from_chapters(self, content: bytes, response=None):
|
| 73 |
+
bs = BeautifulSoup(content, "html.parser")
|
| 74 |
+
|
| 75 |
+
ul = bs.find("div", {"class": "reading-content"})
|
| 76 |
+
|
| 77 |
+
images = ul.find_all('img')
|
| 78 |
+
|
| 79 |
+
images_url = [quote(img.get('src'), safe=':/%') for img in images]
|
| 80 |
+
|
| 81 |
+
return images_url
|
| 82 |
+
|
| 83 |
+
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
|
| 84 |
+
query = quote_plus(query)
|
| 85 |
+
|
| 86 |
+
request_url = self.search_url
|
| 87 |
+
|
| 88 |
+
if query:
|
| 89 |
+
request_url += f'?{self.search_param}={query}&post_type=wp-manga'
|
| 90 |
+
|
| 91 |
+
content = await self.get_url(request_url)
|
| 92 |
+
|
| 93 |
+
return self.mangas_from_page(content)
|
| 94 |
+
|
| 95 |
+
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
|
| 96 |
+
|
| 97 |
+
request_url = f'{manga_card.url}{self.chapters}'
|
| 98 |
+
|
| 99 |
+
content = await self.get_url(request_url, method='post')
|
| 100 |
+
|
| 101 |
+
return self.chapters_from_page(content, manga_card)[(page - 1) * 20:page * 20]
|
| 102 |
+
|
| 103 |
+
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
|
| 104 |
+
manga_card = MangaCard(self, manga_name, manga_url, '')
|
| 105 |
+
|
| 106 |
+
request_url = f'{manga_card.url}{self.chapters}'
|
| 107 |
+
|
| 108 |
+
content = await self.get_url(request_url, method='post')
|
| 109 |
+
|
| 110 |
+
for chapter in self.chapters_from_page(content, manga_card):
|
| 111 |
+
yield chapter
|
| 112 |
+
|
| 113 |
+
async def contains_url(self, url: str):
|
| 114 |
+
return url.startswith(self.base_url.geturl())
|
| 115 |
+
|
| 116 |
+
async def check_updated_urls(self, last_chapters: List[LastChapter]):
|
| 117 |
+
|
| 118 |
+
content = await self.get_url(self.base_url.geturl())
|
| 119 |
+
|
| 120 |
+
updates = self.updates_from_page(content)
|
| 121 |
+
|
| 122 |
+
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
|
| 123 |
+
not_updated = [lc.url for lc in last_chapters if not updates.get(lc.url) or updates.get(lc.url) == lc.chapter_url]
|
| 124 |
+
|
| 125 |
+
return updated, not_updated
|