id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
161,169 | from __future__ import annotations
import math
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import List
from typing import Literal
import cv2
import numpy as np
from onnxruntime import InferenceSession
from .utils import nms, xywh2xyxy, draw_detections, sigmoid, multiclass_nms
def finetune_keypoint(name: str, point: List[int, int]) -> List[int, int]:
point = point.copy()
if name in ["nine", "9"]:
point[-1] = point[-1] + 8
point[0] = point[0] + 2
if name in ["two", "2"]:
point[-1] = point[-1] + 7
point[0] = point[0] + 4
return point | null |
161,170 | from __future__ import annotations
from dataclasses import dataclass
from dataclasses import field
from pathlib import Path
from typing import List, Literal, Iterable, Tuple
import onnxruntime
from PIL.Image import Image
from hcaptcha_challenger.components.prompt_handler import handle
from hcaptcha_challenger.onnx.clip import MossCLIP
from hcaptcha_challenger.onnx.modelhub import ModelHub, DataLake
from hcaptcha_challenger.onnx.utils import is_cuda_pipline_available
class MossCLIP:
visual_session: InferenceSession
textual_session: InferenceSession
_tokenizer = None
_preprocessor = None
def __post_init__(self):
self._tokenizer = Tokenizer()
self._preprocessor = Preprocessor()
def from_pluggable_model(cls, visual_model: InferenceSession, textual_model: InferenceSession):
return cls(visual_session=visual_model, textual_session=textual_model)
def encode_image(self, images: Iterable[Image.Image | np.ndarray]) -> np.ndarray:
"""
Compute the embeddings for a list of images.
:param images:
A list of images to run on. Each image must be a 3-channel(RGB) image.
Can be any size, as the preprocessing step will resize each image to size (224, 224).
:return:
An array of embeddings of shape (len(images), embedding_size).
"""
images = [self._preprocessor(image) for image in images]
batch = np.concatenate(images)
input_name = self.visual_session.get_inputs()[0].name
return self.visual_session.run(None, {input_name: batch})[0]
def encode_text(self, texts: Iterable[str]) -> np.ndarray:
"""
Compute the embeddings for a list of texts.
:param texts:
A list of texts to run on. Each entry can be at most 77 characters.
:return:
An array of embeddings of shape (len(texts), embedding_size).
"""
text = self._tokenizer(texts)
input_name = self.textual_session.get_inputs()[0].name
return self.textual_session.run(None, {input_name: text})[0]
def __call__(
self, images: Iterable[Image.Image | np.ndarray], candidate_labels, *args, **kwargs
):
"""
:param images:
:param candidate_labels:
:param args:
:param kwargs:
:return:
A list of dictionaries containing result, one dictionary per proposed label. The dictionaries contain the
following keys:
- **label** (`str`) -- The label identified by the model. It is one of the suggested `candidate_label`.
- **score** (`float`) -- The score attributed by the model for that label (between 0 and 1).
"""
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)
image_features = self.encode_image(images)
text_features = self.encode_text(candidate_labels)
image_features /= np.linalg.norm(image_features, axis=1, keepdims=True)
text_features /= np.linalg.norm(text_features, axis=1, keepdims=True)
text_probs = 100 * image_features @ text_features.T
text_probs = softmax(text_probs)
result = [
{"score": score, "label": label}
for score, label in sorted(zip(text_probs[0], candidate_labels), key=lambda x: -x[0])
]
return result
)
class ModelHub:
"""
Manage pluggable models. Provides high-level interfaces
such as model download, model cache, and model scheduling.
"""
models_dir = Path(__file__).parent.joinpath("models")
assets_dir = models_dir.joinpath("_assets")
objects_path = models_dir.joinpath("objects.yaml")
lang: str = "en"
label_alias: Dict[str, str] = field(default_factory=dict)
"""
Image classification
---
The most basic function
Storing a series of mappings from model names to short prompts,
I .e., what model to use to handle what challenge is determined by this dictionary.
"""
yolo_names: List[str] = field(default_factory=list)
ashes_of_war: Dict[str, List[str]] = field(default_factory=dict)
"""
Object Detection
---
Provide a series of object detection models applied to special tasks.
The yolo_names stores the label names of all task objects that the model can process.
"""
nested_categories: Dict[str, List[str]] = field(default_factory=dict)
"""
Model Rank.Strategy
---
Provide a string of small model clusters for a prompt to realize
"find the {z} pictures most similar to {y} in the {x_i} pictures"
"""
circle_segment_model: str = field(default=str)
"""
Image Segmentation
---
A model trained specifically for image segmentation tasks
that can separate background and foreground with close to 100 percent accuracy
"""
datalake: Dict[str, DataLake] = field(default_factory=dict)
"""
ViT zero-shot image classification
---
Used to generate prompt templates to intensify inserted CLIP model and improve accuracy.
"""
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP_RN50.openai.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP_RN50.openai.onnx"
"""
Available Model
--- 1180+ MiB
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP_ViT-B-32.openai.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP_ViT-B-32.openai.onnx"
--- 658.3 MiB
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP_RN50.openai.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP_RN50.openai.onnx"
--- 3300+ MiB
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP-ViT-L-14-DataComp.XL-s13B-b90K.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP-ViT-L-14-DataComp.XL-s13B-b90K.onnx"
"""
clip_candidates: Dict[str, List[str]] = field(default_factory=dict)
"""
CLIP self-supervised candidates
"""
release_url: str = ""
objects_url: str = ""
assets: Assets = None
_name2net: Dict[str, Net | InferenceSession] = field(default_factory=dict)
"""
{ model_name1.onnx: cv2.dnn.Net }
{ model_name2.onnx: onnxruntime.InferenceSession }
"""
def __post_init__(self):
self.assets_dir.mkdir(mode=0o777, parents=True, exist_ok=True)
def from_github_repo(cls, username: str = "QIN2DIM", lang: str = "en", **kwargs):
release_url = f"https://api.github.com/repos/{username}/hcaptcha-challenger/releases"
objects_url = f"https://raw.githubusercontent.com/{username}/hcaptcha-challenger/main/src/objects.yaml"
instance = cls(release_url=release_url, objects_url=objects_url, lang=lang)
instance.assets = Assets.from_release_url(release_url)
return instance
def pull_objects(self, upgrade: bool = False):
"""Network request"""
if (
upgrade
or not self.objects_path.exists()
or not self.objects_path.stat().st_size
or time.time() - self.objects_path.stat().st_mtime > 3600
):
request_resource(self.objects_url, self.objects_path)
def parse_objects(self):
"""Try to load label_alias from local database"""
if not self.objects_path.exists():
return
data = yaml.safe_load(self.objects_path.read_text(encoding="utf8"))
if not data:
os.remove(self.objects_path)
return
label_to_i18n_mapping: dict = data.get("label_alias", {})
if label_to_i18n_mapping:
for model_name, lang_to_prompts in label_to_i18n_mapping.items():
for lang, prompts in lang_to_prompts.items():
if lang != self.lang:
continue
self.label_alias.update({prompt.strip(): model_name for prompt in prompts})
yolo2names: Dict[str, List[str]] = data.get("ashes_of_war", {})
if yolo2names:
self.yolo_names = [cl for cc in yolo2names.values() for cl in cc]
self.ashes_of_war = yolo2names
nested_categories = data.get("nested_categories", {})
self.nested_categories = nested_categories or {}
self.circle_segment_model = data.get(
"circle_seg", "appears_only_once_2309_yolov8s-seg.onnx"
)
datalake = data.get("datalake", {})
if datalake:
for prompt, serialized_binary in datalake.items():
datalake[prompt] = DataLake.from_serialized(serialized_binary)
self.datalake = datalake or {}
clip_candidates = data.get("clip_candidates", {})
self.clip_candidates = clip_candidates or {}
def pull_model(self, focus_name: str):
"""
1. node_id: Record the insertion point
and indirectly judge the changes of the file with the same name
2. assets.List: Record the item list of the release attachment,
and directly determine whether there are undownloaded files
3. assets.size: Record the amount of bytes inserted into the file,
and directly determine whether the file is downloaded completely
:param focus_name: model_name.onnx Such as `mobile.onnx`
:return:
"""
focus_asset = self.assets.get_focus_asset(focus_name)
if not focus_asset:
return
# Matching conditions to trigger download tasks
model_path = self.models_dir.joinpath(focus_name)
if (
not model_path.exists()
or model_path.stat().st_size != focus_asset.size
or self.assets.is_outdated(focus_name)
):
try:
request_resource(focus_asset.browser_download_url, model_path.absolute())
except httpx.ConnectTimeout as err:
logger.error("Failed to download resource, try again", err=err)
else:
self.assets.archive_memory(focus_name, focus_asset.node_id)
def active_net(self, focus_name: str) -> Net | InferenceSession | None:
"""Load and register an existing model"""
model_path = self.models_dir.joinpath(focus_name)
if (
model_path.exists()
and model_path.stat().st_size
and not self.assets.is_outdated(focus_name)
):
if "yolo" in focus_name.lower() or "clip" in focus_name.lower():
net = onnxruntime.InferenceSession(
model_path, providers=onnxruntime.get_available_providers()
)
else:
net = cv2.dnn.readNetFromONNX(str(model_path))
self._name2net[focus_name] = net
return net
def match_net(
self, focus_name: str, *, install_only: bool = False
) -> Net | InferenceSession | None:
"""
When a PluggableONNXModel object is instantiated:
---
- It automatically reads and registers model objects specified in objects.yaml
that already exist in the designated directory.
- However, the model files corresponding to the label groups expressed in objects.yaml
do not necessarily all exist yet.
- No new network requests are made during initialization,
i.e. missing models are not downloaded during the initialization phase.
match_net models are passively pulled:
---
- Missing ONNX models used for handling specific binary classification tasks are
passively downloaded during the challenge.
- Matching models are automatically downloaded, registered, and returned.
- Models not on the objects.yaml list will not be downloaded.
[!] The newly inserted model can be used directly.
:param install_only:
:param focus_name: model_name with .onnx suffix
:return:
"""
net = self._name2net.get(focus_name)
if not net:
self.pull_model(focus_name)
if not install_only:
net = self.active_net(focus_name)
return net
def unplug(self):
for ash in self.ashes_of_war:
if ash not in self._name2net:
continue
del self._name2net[ash]
gc.collect()
for m in [self.DEFAULT_CLIP_TEXTUAL_MODEL, self.DEFAULT_CLIP_VISUAL_MODEL]:
if m in self._name2net:
del self._name2net[m]
gc.collect()
def apply_ash_of_war(self, ash: str) -> Tuple[str, List[str]]:
# Prelude - pending DensePose
if "head of " in ash and "animal" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "head" not in model_name:
continue
for class_name in covered_class:
if class_name.replace("-head", "") in ash:
return model_name, covered_class
# Prelude - Ordered dictionary
for model_name, covered_class in self.ashes_of_war.items():
for class_name in covered_class:
if class_name in ash:
return model_name, covered_class
# catch-all rules
return DEFAULT_KEYPOINT_MODEL, self.ashes_of_war[DEFAULT_KEYPOINT_MODEL]
def lookup_ash_of_war(self, ash: str): # fixme
"""catch-all default cases"""
if "can be eaten" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "can_be_eaten" in model_name:
yield model_name, covered_class
if "not an animal" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "notanimal" in model_name:
yield model_name, covered_class
if "head of " in ash and "animal" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "head" in model_name:
yield model_name, covered_class
if "animal" in ash and "not belong to the sea" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if (
"notseaanimal" in model_name
or "fantasia_elephant" in model_name
or "fantasia_cat" in model_name
):
yield model_name, covered_class
for model_name, covered_class in self.ashes_of_war.items():
binder = model_name.split("_")
if len(binder) > 2 and binder[-2].isdigit():
binder = " ".join(model_name.split("_")[:-2])
if binder in ash:
yield model_name, covered_class
else:
for class_name in covered_class:
if class_name in ash:
yield model_name, covered_class
is_cuda_pipline_available = is_torch_available() and is_transformers_available()
The provided code snippet includes necessary dependencies for implementing the `register_pipline` function. Write a Python function `def register_pipline( modelhub: ModelHub, *, fmt: Literal["onnx", "transformers"] = None, install_only: bool = False, **kwargs, )` to solve the following problem:
Ace Model: - laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K --> ONNX 1.7GB - QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336 --> ONNX :param install_only: :param modelhub: :param fmt: :param kwargs: :return:
Here is the function:
def register_pipline(
modelhub: ModelHub,
*,
fmt: Literal["onnx", "transformers"] = None,
install_only: bool = False,
**kwargs,
):
"""
Ace Model:
- laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K --> ONNX 1.7GB
- QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336 --> ONNX
:param install_only:
:param modelhub:
:param fmt:
:param kwargs:
:return:
"""
if fmt in ["transformers", None]:
fmt = "transformers" if is_cuda_pipline_available else "onnx"
if fmt in ["onnx"]:
v_net, t_net = None, None
if not modelhub.label_alias:
modelhub.parse_objects()
if visual_path := kwargs.get("visual_path"):
if not isinstance(visual_path, Path):
raise ValueError("visual_path should be a pathlib.Path")
if not visual_path.is_file():
raise FileNotFoundError(
"Select to use visual ONNX model, but the specified model does not exist -"
f" {visual_path=}"
)
v_net = onnxruntime.InferenceSession(
visual_path, providers=onnxruntime.get_available_providers()
)
if textual_path := kwargs.get("textual_path"):
if not isinstance(textual_path, Path):
raise ValueError("textual_path should be a pathlib.Path")
if not textual_path.is_file():
raise FileNotFoundError(
"Select to use textual ONNX model, but the specified model does not exist -"
f" {textual_path=}"
)
t_net = onnxruntime.InferenceSession(
textual_path, providers=onnxruntime.get_available_providers()
)
if not v_net:
visual_model = kwargs.get("visual_model", modelhub.DEFAULT_CLIP_VISUAL_MODEL)
v_net = modelhub.match_net(visual_model, install_only=install_only)
if not t_net:
textual_model = kwargs.get("textual_model", modelhub.DEFAULT_CLIP_TEXTUAL_MODEL)
t_net = modelhub.match_net(textual_model, install_only=install_only)
if not install_only:
_pipeline = MossCLIP.from_pluggable_model(v_net, t_net)
return _pipeline
if fmt in ["transformers"]:
from transformers import pipeline # type:ignore
import torch # type:ignore
device = "cuda" if torch.cuda.is_available() else "cpu"
checkpoint = kwargs.get("checkpoint", "laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K")
task = kwargs.get("task", "zero-shot-image-classification")
batch_size = kwargs.get("batch_size", 16)
_pipeline = pipeline(task=task, device=device, model=checkpoint, batch_size=batch_size)
return _pipeline | Ace Model: - laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K --> ONNX 1.7GB - QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336 --> ONNX :param install_only: :param modelhub: :param fmt: :param kwargs: :return: |
161,171 | from __future__ import annotations
from dataclasses import dataclass
from dataclasses import field
from pathlib import Path
from typing import List, Literal, Iterable, Tuple
import onnxruntime
from PIL.Image import Image
from hcaptcha_challenger.components.prompt_handler import handle
from hcaptcha_challenger.onnx.clip import MossCLIP
from hcaptcha_challenger.onnx.modelhub import ModelHub, DataLake
from hcaptcha_challenger.onnx.utils import is_cuda_pipline_available
def handle(x):
return split_prompt_message(label_cleaning(x), "en")
)
class DataLake:
positive_labels: List[str] = field(default_factory=list)
"""
Indicate the label with the meaning "True",
preferably an independent noun or clause
"""
negative_labels: List[str] = field(default_factory=list)
"""
Indicate the label with the meaning "False",
preferably an independent noun or clause
"""
joined_dirs: List[str] | Path | None = None
"""
Attributes reserved for AutoLabeling
Used to indicate the directory where the dataset is located
input_dir = db_dir.joinpath(*joined_dirs).absolute()
"""
raw_prompt: str = ""
"""
Challenge prompt or keywords after being divided
!! IMPORT !!
Only for unsupervised challenges.
Please do not read in during the initialization phase.
"""
PREMISED_YES: str = "This is a picture that looks like {}."
PREMISED_BAD: str = "This is a picture that don't look like {}."
"""
Insert self-supervised prompt
"""
def from_challenge_prompt(cls, raw_prompt: str):
return cls(raw_prompt=raw_prompt)
def from_serialized(cls, fields: Dict[str, List[str]]):
positive_labels = []
negative_labels = []
for kb, labels in fields.items():
kb = kb.lower()
if "pos" in kb or kb.startswith("t"):
positive_labels = labels
elif "neg" in kb or kb.startswith("f"):
negative_labels = labels
return cls(positive_labels=positive_labels, negative_labels=negative_labels)
def from_binary_labels(cls, positive_labels: List[str], negative_labels: List[str]):
return cls(positive_labels=positive_labels, negative_labels=negative_labels)
def format_datalake(dl: DataLake) -> Tuple[List[str], List[str]]:
positive_labels = dl.positive_labels.copy()
negative_labels = dl.negative_labels.copy()
# When the input is a challenge prompt, cut it into phrases
if dl.raw_prompt:
prompt = dl.raw_prompt
prompt = prompt.replace("_", " ")
true_label = handle(prompt)
if true_label not in positive_labels:
positive_labels.append(true_label)
if not negative_labels:
false_label = dl.PREMISED_BAD.format(true_label)
negative_labels.append(false_label)
# Insert hypothesis_template
for labels in [positive_labels, negative_labels]:
for i, label in enumerate(labels):
if "This is a" in label:
continue
labels[i] = dl.PREMISED_YES.format(label)
# Formatting model input
candidate_labels = positive_labels.copy()
if isinstance(negative_labels, list) and len(negative_labels) != 0:
candidate_labels.extend(negative_labels)
return positive_labels, candidate_labels | null |
161,172 | import re
def split_prompt_message(prompt_message: str, lang: str) -> str:
def label_cleaning(raw_label: str) -> str:
def diagnose_task(words: str) -> str:
def prompt2task(prompt: str, lang: str = "en") -> str:
prompt = split_prompt_message(prompt, lang)
prompt = label_cleaning(prompt)
prompt = diagnose_task(prompt)
return prompt | null |
161,173 | from __future__ import annotations
import asyncio
import sys
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Tuple, List
import httpx
from httpx import AsyncClient
from tenacity import *
DownloadList = List[Tuple[Path, str]]
class ImageDownloader(AshFramework):
async def control_driver(self, context: Any, client: AsyncClient):
(img_path, url) = context
resp = await client.get(url)
img_path.write_bytes(resp.content)
The provided code snippet includes necessary dependencies for implementing the `download_images` function. Write a Python function `def download_images(container: DownloadList)` to solve the following problem:
Download Challenge Image ### hcaptcha has a challenge duration limit If the page element is not manipulated for a period of time, the <iframe> box will disappear and the previously acquired Element Locator will be out of date. Need to use some modern methods to shorten the time of `getting the dataset` as much as possible. ### Solution 1. Coroutine Downloader Use the coroutine-based method to _pull the image to the local, the best practice (this method). In the case of poor network, _pull efficiency is at least 10 times faster than traversal download. 2. Screen cut There is some difficulty in coding. Directly intercept nine pictures of the target area, and use the tool function to cut and identify them. Need to weave the locator index yourself. :param container: :return:
Here is the function:
def download_images(container: DownloadList):
"""
Download Challenge Image
### hcaptcha has a challenge duration limit
If the page element is not manipulated for a period of time,
the <iframe> box will disappear and the previously acquired Element Locator will be out of date.
Need to use some modern methods to shorten the time of `getting the dataset` as much as possible.
### Solution
1. Coroutine Downloader
Use the coroutine-based method to _pull the image to the local, the best practice (this method).
In the case of poor network, _pull efficiency is at least 10 times faster than traversal download.
2. Screen cut
There is some difficulty in coding.
Directly intercept nine pictures of the target area, and use the tool function to cut and identify them.
Need to weave the locator index yourself.
:param container:
:return:
"""
ImageDownloader(container).execute() | Download Challenge Image ### hcaptcha has a challenge duration limit If the page element is not manipulated for a period of time, the <iframe> box will disappear and the previously acquired Element Locator will be out of date. Need to use some modern methods to shorten the time of `getting the dataset` as much as possible. ### Solution 1. Coroutine Downloader Use the coroutine-based method to _pull the image to the local, the best practice (this method). In the case of poor network, _pull efficiency is at least 10 times faster than traversal download. 2. Screen cut There is some difficulty in coding. Directly intercept nine pictures of the target area, and use the tool function to cut and identify them. Need to weave the locator index yourself. :param container: :return: |
161,174 | from __future__ import annotations
import asyncio
import sys
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Tuple, List
import httpx
from httpx import AsyncClient
from tenacity import *
DownloadList = List[Tuple[Path, str]]
def common_download(container: DownloadList):
for img_path, url in container:
resp = httpx.get(url)
img_path.write_bytes(resp.content) | null |
161,175 | from pathlib import Path
from typing import List
import cv2
from sklearn.cluster import SpectralClustering
def get_2d_image(path: Path):
image = cv2.imread(str(path))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return gray
def extract_features(img):
hog = cv2.HOGDescriptor()
h = hog.compute(img)
return h
def find_similar_objects(example_paths: List[Path], challenge_paths: List[Path]):
example_num = len(example_paths)
results: List[bool | None] = [None] * len(challenge_paths)
images_example = [get_2d_image(path) for path in example_paths]
example_shape = images_example[0].shape[0]
images_challenge = []
for i, path in enumerate(challenge_paths):
image = get_2d_image(path)
if image.shape[0] != example_shape:
results[i] = False
images_challenge.append(image)
images_merged = images_example + images_challenge
X = [extract_features(img) for img in images_merged]
clf = SpectralClustering(n_clusters=2, affinity="nearest_neighbors", n_neighbors=5)
y = clf.fit_predict(X)
ref_img_idx = 0
ref_label = y[ref_img_idx]
sim_idx_sequence = [i for i, rl in enumerate(y) if rl == ref_label]
for idx in sim_idx_sequence[example_num:]:
fit_idx = idx - example_num
if results[fit_idx] is None:
results[fit_idx] = True
return results | null |
161,176 | from __future__ import annotations
import asyncio
import hashlib
import shutil
import time
from contextlib import suppress
from pathlib import Path
from typing import Literal, List, Tuple
from hcaptcha_challenger.components.image_downloader import Cirilla
from hcaptcha_challenger.components.middleware import QuestionResp
from hcaptcha_challenger.onnx.modelhub import ModelHub, DataLake
from hcaptcha_challenger.onnx.resnet import ResNetControl
from hcaptcha_challenger.onnx.yolo import YOLOv8
)
class ModelHub:
"""
Manage pluggable models. Provides high-level interfaces
such as model download, model cache, and model scheduling.
"""
models_dir = Path(__file__).parent.joinpath("models")
assets_dir = models_dir.joinpath("_assets")
objects_path = models_dir.joinpath("objects.yaml")
lang: str = "en"
label_alias: Dict[str, str] = field(default_factory=dict)
"""
Image classification
---
The most basic function
Storing a series of mappings from model names to short prompts,
I .e., what model to use to handle what challenge is determined by this dictionary.
"""
yolo_names: List[str] = field(default_factory=list)
ashes_of_war: Dict[str, List[str]] = field(default_factory=dict)
"""
Object Detection
---
Provide a series of object detection models applied to special tasks.
The yolo_names stores the label names of all task objects that the model can process.
"""
nested_categories: Dict[str, List[str]] = field(default_factory=dict)
"""
Model Rank.Strategy
---
Provide a string of small model clusters for a prompt to realize
"find the {z} pictures most similar to {y} in the {x_i} pictures"
"""
circle_segment_model: str = field(default=str)
"""
Image Segmentation
---
A model trained specifically for image segmentation tasks
that can separate background and foreground with close to 100 percent accuracy
"""
datalake: Dict[str, DataLake] = field(default_factory=dict)
"""
ViT zero-shot image classification
---
Used to generate prompt templates to intensify inserted CLIP model and improve accuracy.
"""
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP_RN50.openai.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP_RN50.openai.onnx"
"""
Available Model
--- 1180+ MiB
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP_ViT-B-32.openai.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP_ViT-B-32.openai.onnx"
--- 658.3 MiB
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP_RN50.openai.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP_RN50.openai.onnx"
--- 3300+ MiB
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP-ViT-L-14-DataComp.XL-s13B-b90K.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP-ViT-L-14-DataComp.XL-s13B-b90K.onnx"
"""
clip_candidates: Dict[str, List[str]] = field(default_factory=dict)
"""
CLIP self-supervised candidates
"""
release_url: str = ""
objects_url: str = ""
assets: Assets = None
_name2net: Dict[str, Net | InferenceSession] = field(default_factory=dict)
"""
{ model_name1.onnx: cv2.dnn.Net }
{ model_name2.onnx: onnxruntime.InferenceSession }
"""
def __post_init__(self):
self.assets_dir.mkdir(mode=0o777, parents=True, exist_ok=True)
def from_github_repo(cls, username: str = "QIN2DIM", lang: str = "en", **kwargs):
release_url = f"https://api.github.com/repos/{username}/hcaptcha-challenger/releases"
objects_url = f"https://raw.githubusercontent.com/{username}/hcaptcha-challenger/main/src/objects.yaml"
instance = cls(release_url=release_url, objects_url=objects_url, lang=lang)
instance.assets = Assets.from_release_url(release_url)
return instance
def pull_objects(self, upgrade: bool = False):
"""Network request"""
if (
upgrade
or not self.objects_path.exists()
or not self.objects_path.stat().st_size
or time.time() - self.objects_path.stat().st_mtime > 3600
):
request_resource(self.objects_url, self.objects_path)
def parse_objects(self):
"""Try to load label_alias from local database"""
if not self.objects_path.exists():
return
data = yaml.safe_load(self.objects_path.read_text(encoding="utf8"))
if not data:
os.remove(self.objects_path)
return
label_to_i18n_mapping: dict = data.get("label_alias", {})
if label_to_i18n_mapping:
for model_name, lang_to_prompts in label_to_i18n_mapping.items():
for lang, prompts in lang_to_prompts.items():
if lang != self.lang:
continue
self.label_alias.update({prompt.strip(): model_name for prompt in prompts})
yolo2names: Dict[str, List[str]] = data.get("ashes_of_war", {})
if yolo2names:
self.yolo_names = [cl for cc in yolo2names.values() for cl in cc]
self.ashes_of_war = yolo2names
nested_categories = data.get("nested_categories", {})
self.nested_categories = nested_categories or {}
self.circle_segment_model = data.get(
"circle_seg", "appears_only_once_2309_yolov8s-seg.onnx"
)
datalake = data.get("datalake", {})
if datalake:
for prompt, serialized_binary in datalake.items():
datalake[prompt] = DataLake.from_serialized(serialized_binary)
self.datalake = datalake or {}
clip_candidates = data.get("clip_candidates", {})
self.clip_candidates = clip_candidates or {}
def pull_model(self, focus_name: str):
"""
1. node_id: Record the insertion point
and indirectly judge the changes of the file with the same name
2. assets.List: Record the item list of the release attachment,
and directly determine whether there are undownloaded files
3. assets.size: Record the amount of bytes inserted into the file,
and directly determine whether the file is downloaded completely
:param focus_name: model_name.onnx Such as `mobile.onnx`
:return:
"""
focus_asset = self.assets.get_focus_asset(focus_name)
if not focus_asset:
return
# Matching conditions to trigger download tasks
model_path = self.models_dir.joinpath(focus_name)
if (
not model_path.exists()
or model_path.stat().st_size != focus_asset.size
or self.assets.is_outdated(focus_name)
):
try:
request_resource(focus_asset.browser_download_url, model_path.absolute())
except httpx.ConnectTimeout as err:
logger.error("Failed to download resource, try again", err=err)
else:
self.assets.archive_memory(focus_name, focus_asset.node_id)
def active_net(self, focus_name: str) -> Net | InferenceSession | None:
"""Load and register an existing model"""
model_path = self.models_dir.joinpath(focus_name)
if (
model_path.exists()
and model_path.stat().st_size
and not self.assets.is_outdated(focus_name)
):
if "yolo" in focus_name.lower() or "clip" in focus_name.lower():
net = onnxruntime.InferenceSession(
model_path, providers=onnxruntime.get_available_providers()
)
else:
net = cv2.dnn.readNetFromONNX(str(model_path))
self._name2net[focus_name] = net
return net
def match_net(
self, focus_name: str, *, install_only: bool = False
) -> Net | InferenceSession | None:
"""
When a PluggableONNXModel object is instantiated:
---
- It automatically reads and registers model objects specified in objects.yaml
that already exist in the designated directory.
- However, the model files corresponding to the label groups expressed in objects.yaml
do not necessarily all exist yet.
- No new network requests are made during initialization,
i.e. missing models are not downloaded during the initialization phase.
match_net models are passively pulled:
---
- Missing ONNX models used for handling specific binary classification tasks are
passively downloaded during the challenge.
- Matching models are automatically downloaded, registered, and returned.
- Models not on the objects.yaml list will not be downloaded.
[!] The newly inserted model can be used directly.
:param install_only:
:param focus_name: model_name with .onnx suffix
:return:
"""
net = self._name2net.get(focus_name)
if not net:
self.pull_model(focus_name)
if not install_only:
net = self.active_net(focus_name)
return net
def unplug(self):
for ash in self.ashes_of_war:
if ash not in self._name2net:
continue
del self._name2net[ash]
gc.collect()
for m in [self.DEFAULT_CLIP_TEXTUAL_MODEL, self.DEFAULT_CLIP_VISUAL_MODEL]:
if m in self._name2net:
del self._name2net[m]
gc.collect()
def apply_ash_of_war(self, ash: str) -> Tuple[str, List[str]]:
# Prelude - pending DensePose
if "head of " in ash and "animal" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "head" not in model_name:
continue
for class_name in covered_class:
if class_name.replace("-head", "") in ash:
return model_name, covered_class
# Prelude - Ordered dictionary
for model_name, covered_class in self.ashes_of_war.items():
for class_name in covered_class:
if class_name in ash:
return model_name, covered_class
# catch-all rules
return DEFAULT_KEYPOINT_MODEL, self.ashes_of_war[DEFAULT_KEYPOINT_MODEL]
def lookup_ash_of_war(self, ash: str): # fixme
"""catch-all default cases"""
if "can be eaten" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "can_be_eaten" in model_name:
yield model_name, covered_class
if "not an animal" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "notanimal" in model_name:
yield model_name, covered_class
if "head of " in ash and "animal" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "head" in model_name:
yield model_name, covered_class
if "animal" in ash and "not belong to the sea" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if (
"notseaanimal" in model_name
or "fantasia_elephant" in model_name
or "fantasia_cat" in model_name
):
yield model_name, covered_class
for model_name, covered_class in self.ashes_of_war.items():
binder = model_name.split("_")
if len(binder) > 2 and binder[-2].isdigit():
binder = " ".join(model_name.split("_")[:-2])
if binder in ash:
yield model_name, covered_class
else:
for class_name in covered_class:
if class_name in ash:
yield model_name, covered_class
class ResNetControl:
net: Net
def from_pluggable_model(cls, net: Net):
return cls(net=net)
def binary_classify(self, img_stream: Any) -> bool | Tuple[bool, np.ndarray]:
img_arr = np.frombuffer(img_stream, np.uint8)
img = cv2.imdecode(img_arr, flags=1)
if img.shape[0] == ChallengeStyle.WATERMARK:
img = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
img = cv2.resize(img, (64, 64))
blob = cv2.dnn.blobFromImage(img, 1 / 255.0, (64, 64), (0, 0, 0), swapRB=True, crop=False)
# Use the delayed reflection mechanism
# to ensure the consistency of both ends of the distributed network
if self.net is None:
logger.debug("The remote network does not exist or the local cache has expired.")
return False
self.net.setInput(blob)
out = self.net.forward()
logit = out[0]
proba = expit(logit)
if not np.argmax(out, axis=1)[0]:
return True, proba
return False, proba
def execute(self, img_stream: bytes, **kwargs) -> bool | Tuple[bool, np.ndarray]:
"""Implementation process of solution"""
get_proba = kwargs.get("proba", False)
result = self.binary_classify(img_stream)
if result is None or isinstance(result, bool):
return result
if isinstance(result, tuple):
prediction, confidence = result
if get_proba is True:
return prediction, confidence
return prediction
def rank_models(
nested_models: List[str], example_paths: List[Path], modelhub: ModelHub
) -> Tuple[ResNetControl, str] | None:
# {{< Rank ResNet Models >}}
rank_ladder = []
for example_path in example_paths:
img_stream = example_path.read_bytes()
for model_name in reversed(nested_models):
if (net := modelhub.match_net(focus_name=model_name)) is None:
return
control = ResNetControl.from_pluggable_model(net)
result_, proba = control.execute(img_stream, proba=True)
if result_ and proba[0] > 0.68:
rank_ladder.append([control, model_name, proba])
if proba[0] > 0.87:
break
# {{< Catch-all Rules >}}
if rank_ladder:
alts = sorted(rank_ladder, key=lambda x: x[-1][0], reverse=True)
best_model, model_name = alts[0][0], alts[0][1]
return best_model, model_name | null |
161,177 | from __future__ import annotations
import asyncio
import hashlib
import shutil
import time
from contextlib import suppress
from pathlib import Path
from typing import Literal, List, Tuple
from hcaptcha_challenger.components.image_downloader import Cirilla
from hcaptcha_challenger.components.middleware import QuestionResp
from hcaptcha_challenger.onnx.modelhub import ModelHub, DataLake
from hcaptcha_challenger.onnx.resnet import ResNetControl
from hcaptcha_challenger.onnx.yolo import YOLOv8
)
class ModelHub:
"""
Manage pluggable models. Provides high-level interfaces
such as model download, model cache, and model scheduling.
"""
models_dir = Path(__file__).parent.joinpath("models")
assets_dir = models_dir.joinpath("_assets")
objects_path = models_dir.joinpath("objects.yaml")
lang: str = "en"
label_alias: Dict[str, str] = field(default_factory=dict)
"""
Image classification
---
The most basic function
Storing a series of mappings from model names to short prompts,
I .e., what model to use to handle what challenge is determined by this dictionary.
"""
yolo_names: List[str] = field(default_factory=list)
ashes_of_war: Dict[str, List[str]] = field(default_factory=dict)
"""
Object Detection
---
Provide a series of object detection models applied to special tasks.
The yolo_names stores the label names of all task objects that the model can process.
"""
nested_categories: Dict[str, List[str]] = field(default_factory=dict)
"""
Model Rank.Strategy
---
Provide a string of small model clusters for a prompt to realize
"find the {z} pictures most similar to {y} in the {x_i} pictures"
"""
circle_segment_model: str = field(default=str)
"""
Image Segmentation
---
A model trained specifically for image segmentation tasks
that can separate background and foreground with close to 100 percent accuracy
"""
datalake: Dict[str, DataLake] = field(default_factory=dict)
"""
ViT zero-shot image classification
---
Used to generate prompt templates to intensify inserted CLIP model and improve accuracy.
"""
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP_RN50.openai.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP_RN50.openai.onnx"
"""
Available Model
--- 1180+ MiB
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP_ViT-B-32.openai.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP_ViT-B-32.openai.onnx"
--- 658.3 MiB
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP_RN50.openai.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP_RN50.openai.onnx"
--- 3300+ MiB
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP-ViT-L-14-DataComp.XL-s13B-b90K.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP-ViT-L-14-DataComp.XL-s13B-b90K.onnx"
"""
clip_candidates: Dict[str, List[str]] = field(default_factory=dict)
"""
CLIP self-supervised candidates
"""
release_url: str = ""
objects_url: str = ""
assets: Assets = None
_name2net: Dict[str, Net | InferenceSession] = field(default_factory=dict)
"""
{ model_name1.onnx: cv2.dnn.Net }
{ model_name2.onnx: onnxruntime.InferenceSession }
"""
def __post_init__(self):
self.assets_dir.mkdir(mode=0o777, parents=True, exist_ok=True)
def from_github_repo(cls, username: str = "QIN2DIM", lang: str = "en", **kwargs):
release_url = f"https://api.github.com/repos/{username}/hcaptcha-challenger/releases"
objects_url = f"https://raw.githubusercontent.com/{username}/hcaptcha-challenger/main/src/objects.yaml"
instance = cls(release_url=release_url, objects_url=objects_url, lang=lang)
instance.assets = Assets.from_release_url(release_url)
return instance
def pull_objects(self, upgrade: bool = False):
"""Network request"""
if (
upgrade
or not self.objects_path.exists()
or not self.objects_path.stat().st_size
or time.time() - self.objects_path.stat().st_mtime > 3600
):
request_resource(self.objects_url, self.objects_path)
def parse_objects(self):
"""Try to load label_alias from local database"""
if not self.objects_path.exists():
return
data = yaml.safe_load(self.objects_path.read_text(encoding="utf8"))
if not data:
os.remove(self.objects_path)
return
label_to_i18n_mapping: dict = data.get("label_alias", {})
if label_to_i18n_mapping:
for model_name, lang_to_prompts in label_to_i18n_mapping.items():
for lang, prompts in lang_to_prompts.items():
if lang != self.lang:
continue
self.label_alias.update({prompt.strip(): model_name for prompt in prompts})
yolo2names: Dict[str, List[str]] = data.get("ashes_of_war", {})
if yolo2names:
self.yolo_names = [cl for cc in yolo2names.values() for cl in cc]
self.ashes_of_war = yolo2names
nested_categories = data.get("nested_categories", {})
self.nested_categories = nested_categories or {}
self.circle_segment_model = data.get(
"circle_seg", "appears_only_once_2309_yolov8s-seg.onnx"
)
datalake = data.get("datalake", {})
if datalake:
for prompt, serialized_binary in datalake.items():
datalake[prompt] = DataLake.from_serialized(serialized_binary)
self.datalake = datalake or {}
clip_candidates = data.get("clip_candidates", {})
self.clip_candidates = clip_candidates or {}
def pull_model(self, focus_name: str):
"""
1. node_id: Record the insertion point
and indirectly judge the changes of the file with the same name
2. assets.List: Record the item list of the release attachment,
and directly determine whether there are undownloaded files
3. assets.size: Record the amount of bytes inserted into the file,
and directly determine whether the file is downloaded completely
:param focus_name: model_name.onnx Such as `mobile.onnx`
:return:
"""
focus_asset = self.assets.get_focus_asset(focus_name)
if not focus_asset:
return
# Matching conditions to trigger download tasks
model_path = self.models_dir.joinpath(focus_name)
if (
not model_path.exists()
or model_path.stat().st_size != focus_asset.size
or self.assets.is_outdated(focus_name)
):
try:
request_resource(focus_asset.browser_download_url, model_path.absolute())
except httpx.ConnectTimeout as err:
logger.error("Failed to download resource, try again", err=err)
else:
self.assets.archive_memory(focus_name, focus_asset.node_id)
def active_net(self, focus_name: str) -> Net | InferenceSession | None:
"""Load and register an existing model"""
model_path = self.models_dir.joinpath(focus_name)
if (
model_path.exists()
and model_path.stat().st_size
and not self.assets.is_outdated(focus_name)
):
if "yolo" in focus_name.lower() or "clip" in focus_name.lower():
net = onnxruntime.InferenceSession(
model_path, providers=onnxruntime.get_available_providers()
)
else:
net = cv2.dnn.readNetFromONNX(str(model_path))
self._name2net[focus_name] = net
return net
def match_net(
self, focus_name: str, *, install_only: bool = False
) -> Net | InferenceSession | None:
"""
When a PluggableONNXModel object is instantiated:
---
- It automatically reads and registers model objects specified in objects.yaml
that already exist in the designated directory.
- However, the model files corresponding to the label groups expressed in objects.yaml
do not necessarily all exist yet.
- No new network requests are made during initialization,
i.e. missing models are not downloaded during the initialization phase.
match_net models are passively pulled:
---
- Missing ONNX models used for handling specific binary classification tasks are
passively downloaded during the challenge.
- Matching models are automatically downloaded, registered, and returned.
- Models not on the objects.yaml list will not be downloaded.
[!] The newly inserted model can be used directly.
:param install_only:
:param focus_name: model_name with .onnx suffix
:return:
"""
net = self._name2net.get(focus_name)
if not net:
self.pull_model(focus_name)
if not install_only:
net = self.active_net(focus_name)
return net
def unplug(self):
for ash in self.ashes_of_war:
if ash not in self._name2net:
continue
del self._name2net[ash]
gc.collect()
for m in [self.DEFAULT_CLIP_TEXTUAL_MODEL, self.DEFAULT_CLIP_VISUAL_MODEL]:
if m in self._name2net:
del self._name2net[m]
gc.collect()
def apply_ash_of_war(self, ash: str) -> Tuple[str, List[str]]:
# Prelude - pending DensePose
if "head of " in ash and "animal" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "head" not in model_name:
continue
for class_name in covered_class:
if class_name.replace("-head", "") in ash:
return model_name, covered_class
# Prelude - Ordered dictionary
for model_name, covered_class in self.ashes_of_war.items():
for class_name in covered_class:
if class_name in ash:
return model_name, covered_class
# catch-all rules
return DEFAULT_KEYPOINT_MODEL, self.ashes_of_war[DEFAULT_KEYPOINT_MODEL]
def lookup_ash_of_war(self, ash: str): # fixme
"""catch-all default cases"""
if "can be eaten" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "can_be_eaten" in model_name:
yield model_name, covered_class
if "not an animal" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "notanimal" in model_name:
yield model_name, covered_class
if "head of " in ash and "animal" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "head" in model_name:
yield model_name, covered_class
if "animal" in ash and "not belong to the sea" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if (
"notseaanimal" in model_name
or "fantasia_elephant" in model_name
or "fantasia_cat" in model_name
):
yield model_name, covered_class
for model_name, covered_class in self.ashes_of_war.items():
binder = model_name.split("_")
if len(binder) > 2 and binder[-2].isdigit():
binder = " ".join(model_name.split("_")[:-2])
if binder in ash:
yield model_name, covered_class
else:
for class_name in covered_class:
if class_name in ash:
yield model_name, covered_class
class DataLake:
positive_labels: List[str] = field(default_factory=list)
"""
Indicate the label with the meaning "True",
preferably an independent noun or clause
"""
negative_labels: List[str] = field(default_factory=list)
"""
Indicate the label with the meaning "False",
preferably an independent noun or clause
"""
joined_dirs: List[str] | Path | None = None
"""
Attributes reserved for AutoLabeling
Used to indicate the directory where the dataset is located
input_dir = db_dir.joinpath(*joined_dirs).absolute()
"""
raw_prompt: str = ""
"""
Challenge prompt or keywords after being divided
!! IMPORT !!
Only for unsupervised challenges.
Please do not read in during the initialization phase.
"""
PREMISED_YES: str = "This is a picture that looks like {}."
PREMISED_BAD: str = "This is a picture that don't look like {}."
"""
Insert self-supervised prompt
"""
def from_challenge_prompt(cls, raw_prompt: str):
return cls(raw_prompt=raw_prompt)
def from_serialized(cls, fields: Dict[str, List[str]]):
positive_labels = []
negative_labels = []
for kb, labels in fields.items():
kb = kb.lower()
if "pos" in kb or kb.startswith("t"):
positive_labels = labels
elif "neg" in kb or kb.startswith("f"):
negative_labels = labels
return cls(positive_labels=positive_labels, negative_labels=negative_labels)
def from_binary_labels(cls, positive_labels: List[str], negative_labels: List[str]):
return cls(positive_labels=positive_labels, negative_labels=negative_labels)
def match_datalake(modelhub: ModelHub, label: str) -> DataLake:
# prelude datalake
if dl := modelhub.datalake.get(label):
return dl
# prelude clip_candidates
for ket in reversed(modelhub.clip_candidates.keys()):
if ket in label:
candidates = modelhub.clip_candidates[ket]
if candidates and len(candidates) > 2:
dl = DataLake.from_binary_labels(candidates[:1], candidates[1:])
return dl
# catch-all
dl = DataLake.from_challenge_prompt(raw_prompt=label)
return dl | null |
161,178 | from __future__ import annotations
import asyncio
import hashlib
import shutil
import time
from contextlib import suppress
from pathlib import Path
from typing import Literal, List, Tuple
from hcaptcha_challenger.components.image_downloader import Cirilla
from hcaptcha_challenger.components.middleware import QuestionResp
from hcaptcha_challenger.onnx.modelhub import ModelHub, DataLake
from hcaptcha_challenger.onnx.resnet import ResNetControl
from hcaptcha_challenger.onnx.yolo import YOLOv8
)
class ModelHub:
"""
Manage pluggable models. Provides high-level interfaces
such as model download, model cache, and model scheduling.
"""
models_dir = Path(__file__).parent.joinpath("models")
assets_dir = models_dir.joinpath("_assets")
objects_path = models_dir.joinpath("objects.yaml")
lang: str = "en"
label_alias: Dict[str, str] = field(default_factory=dict)
"""
Image classification
---
The most basic function
Storing a series of mappings from model names to short prompts,
I .e., what model to use to handle what challenge is determined by this dictionary.
"""
yolo_names: List[str] = field(default_factory=list)
ashes_of_war: Dict[str, List[str]] = field(default_factory=dict)
"""
Object Detection
---
Provide a series of object detection models applied to special tasks.
The yolo_names stores the label names of all task objects that the model can process.
"""
nested_categories: Dict[str, List[str]] = field(default_factory=dict)
"""
Model Rank.Strategy
---
Provide a string of small model clusters for a prompt to realize
"find the {z} pictures most similar to {y} in the {x_i} pictures"
"""
circle_segment_model: str = field(default=str)
"""
Image Segmentation
---
A model trained specifically for image segmentation tasks
that can separate background and foreground with close to 100 percent accuracy
"""
datalake: Dict[str, DataLake] = field(default_factory=dict)
"""
ViT zero-shot image classification
---
Used to generate prompt templates to intensify inserted CLIP model and improve accuracy.
"""
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP_RN50.openai.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP_RN50.openai.onnx"
"""
Available Model
--- 1180+ MiB
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP_ViT-B-32.openai.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP_ViT-B-32.openai.onnx"
--- 658.3 MiB
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP_RN50.openai.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP_RN50.openai.onnx"
--- 3300+ MiB
DEFAULT_CLIP_VISUAL_MODEL: str = "visual_CLIP-ViT-L-14-DataComp.XL-s13B-b90K.onnx"
DEFAULT_CLIP_TEXTUAL_MODEL: str = "textual_CLIP-ViT-L-14-DataComp.XL-s13B-b90K.onnx"
"""
clip_candidates: Dict[str, List[str]] = field(default_factory=dict)
"""
CLIP self-supervised candidates
"""
release_url: str = ""
objects_url: str = ""
assets: Assets = None
_name2net: Dict[str, Net | InferenceSession] = field(default_factory=dict)
"""
{ model_name1.onnx: cv2.dnn.Net }
{ model_name2.onnx: onnxruntime.InferenceSession }
"""
def __post_init__(self):
self.assets_dir.mkdir(mode=0o777, parents=True, exist_ok=True)
def from_github_repo(cls, username: str = "QIN2DIM", lang: str = "en", **kwargs):
release_url = f"https://api.github.com/repos/{username}/hcaptcha-challenger/releases"
objects_url = f"https://raw.githubusercontent.com/{username}/hcaptcha-challenger/main/src/objects.yaml"
instance = cls(release_url=release_url, objects_url=objects_url, lang=lang)
instance.assets = Assets.from_release_url(release_url)
return instance
def pull_objects(self, upgrade: bool = False):
"""Network request"""
if (
upgrade
or not self.objects_path.exists()
or not self.objects_path.stat().st_size
or time.time() - self.objects_path.stat().st_mtime > 3600
):
request_resource(self.objects_url, self.objects_path)
def parse_objects(self):
"""Try to load label_alias from local database"""
if not self.objects_path.exists():
return
data = yaml.safe_load(self.objects_path.read_text(encoding="utf8"))
if not data:
os.remove(self.objects_path)
return
label_to_i18n_mapping: dict = data.get("label_alias", {})
if label_to_i18n_mapping:
for model_name, lang_to_prompts in label_to_i18n_mapping.items():
for lang, prompts in lang_to_prompts.items():
if lang != self.lang:
continue
self.label_alias.update({prompt.strip(): model_name for prompt in prompts})
yolo2names: Dict[str, List[str]] = data.get("ashes_of_war", {})
if yolo2names:
self.yolo_names = [cl for cc in yolo2names.values() for cl in cc]
self.ashes_of_war = yolo2names
nested_categories = data.get("nested_categories", {})
self.nested_categories = nested_categories or {}
self.circle_segment_model = data.get(
"circle_seg", "appears_only_once_2309_yolov8s-seg.onnx"
)
datalake = data.get("datalake", {})
if datalake:
for prompt, serialized_binary in datalake.items():
datalake[prompt] = DataLake.from_serialized(serialized_binary)
self.datalake = datalake or {}
clip_candidates = data.get("clip_candidates", {})
self.clip_candidates = clip_candidates or {}
def pull_model(self, focus_name: str):
"""
1. node_id: Record the insertion point
and indirectly judge the changes of the file with the same name
2. assets.List: Record the item list of the release attachment,
and directly determine whether there are undownloaded files
3. assets.size: Record the amount of bytes inserted into the file,
and directly determine whether the file is downloaded completely
:param focus_name: model_name.onnx Such as `mobile.onnx`
:return:
"""
focus_asset = self.assets.get_focus_asset(focus_name)
if not focus_asset:
return
# Matching conditions to trigger download tasks
model_path = self.models_dir.joinpath(focus_name)
if (
not model_path.exists()
or model_path.stat().st_size != focus_asset.size
or self.assets.is_outdated(focus_name)
):
try:
request_resource(focus_asset.browser_download_url, model_path.absolute())
except httpx.ConnectTimeout as err:
logger.error("Failed to download resource, try again", err=err)
else:
self.assets.archive_memory(focus_name, focus_asset.node_id)
def active_net(self, focus_name: str) -> Net | InferenceSession | None:
"""Load and register an existing model"""
model_path = self.models_dir.joinpath(focus_name)
if (
model_path.exists()
and model_path.stat().st_size
and not self.assets.is_outdated(focus_name)
):
if "yolo" in focus_name.lower() or "clip" in focus_name.lower():
net = onnxruntime.InferenceSession(
model_path, providers=onnxruntime.get_available_providers()
)
else:
net = cv2.dnn.readNetFromONNX(str(model_path))
self._name2net[focus_name] = net
return net
def match_net(
self, focus_name: str, *, install_only: bool = False
) -> Net | InferenceSession | None:
"""
When a PluggableONNXModel object is instantiated:
---
- It automatically reads and registers model objects specified in objects.yaml
that already exist in the designated directory.
- However, the model files corresponding to the label groups expressed in objects.yaml
do not necessarily all exist yet.
- No new network requests are made during initialization,
i.e. missing models are not downloaded during the initialization phase.
match_net models are passively pulled:
---
- Missing ONNX models used for handling specific binary classification tasks are
passively downloaded during the challenge.
- Matching models are automatically downloaded, registered, and returned.
- Models not on the objects.yaml list will not be downloaded.
[!] The newly inserted model can be used directly.
:param install_only:
:param focus_name: model_name with .onnx suffix
:return:
"""
net = self._name2net.get(focus_name)
if not net:
self.pull_model(focus_name)
if not install_only:
net = self.active_net(focus_name)
return net
def unplug(self):
for ash in self.ashes_of_war:
if ash not in self._name2net:
continue
del self._name2net[ash]
gc.collect()
for m in [self.DEFAULT_CLIP_TEXTUAL_MODEL, self.DEFAULT_CLIP_VISUAL_MODEL]:
if m in self._name2net:
del self._name2net[m]
gc.collect()
def apply_ash_of_war(self, ash: str) -> Tuple[str, List[str]]:
# Prelude - pending DensePose
if "head of " in ash and "animal" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "head" not in model_name:
continue
for class_name in covered_class:
if class_name.replace("-head", "") in ash:
return model_name, covered_class
# Prelude - Ordered dictionary
for model_name, covered_class in self.ashes_of_war.items():
for class_name in covered_class:
if class_name in ash:
return model_name, covered_class
# catch-all rules
return DEFAULT_KEYPOINT_MODEL, self.ashes_of_war[DEFAULT_KEYPOINT_MODEL]
def lookup_ash_of_war(self, ash: str): # fixme
"""catch-all default cases"""
if "can be eaten" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "can_be_eaten" in model_name:
yield model_name, covered_class
if "not an animal" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "notanimal" in model_name:
yield model_name, covered_class
if "head of " in ash and "animal" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if "head" in model_name:
yield model_name, covered_class
if "animal" in ash and "not belong to the sea" in ash:
for model_name, covered_class in self.ashes_of_war.items():
if (
"notseaanimal" in model_name
or "fantasia_elephant" in model_name
or "fantasia_cat" in model_name
):
yield model_name, covered_class
for model_name, covered_class in self.ashes_of_war.items():
binder = model_name.split("_")
if len(binder) > 2 and binder[-2].isdigit():
binder = " ".join(model_name.split("_")[:-2])
if binder in ash:
yield model_name, covered_class
else:
for class_name in covered_class:
if class_name in ash:
yield model_name, covered_class
class ResNetControl:
net: Net
def from_pluggable_model(cls, net: Net):
return cls(net=net)
def binary_classify(self, img_stream: Any) -> bool | Tuple[bool, np.ndarray]:
img_arr = np.frombuffer(img_stream, np.uint8)
img = cv2.imdecode(img_arr, flags=1)
if img.shape[0] == ChallengeStyle.WATERMARK:
img = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
img = cv2.resize(img, (64, 64))
blob = cv2.dnn.blobFromImage(img, 1 / 255.0, (64, 64), (0, 0, 0), swapRB=True, crop=False)
# Use the delayed reflection mechanism
# to ensure the consistency of both ends of the distributed network
if self.net is None:
logger.debug("The remote network does not exist or the local cache has expired.")
return False
self.net.setInput(blob)
out = self.net.forward()
logit = out[0]
proba = expit(logit)
if not np.argmax(out, axis=1)[0]:
return True, proba
return False, proba
def execute(self, img_stream: bytes, **kwargs) -> bool | Tuple[bool, np.ndarray]:
"""Implementation process of solution"""
get_proba = kwargs.get("proba", False)
result = self.binary_classify(img_stream)
if result is None or isinstance(result, bool):
return result
if isinstance(result, tuple):
prediction, confidence = result
if get_proba is True:
return prediction, confidence
return prediction
class YOLOv8:
conf_threshold: float = 0.5
iou_threshold: float = 0.5
classes: List[str] = field(default_factory=list)
session: InferenceSession = None
input_names = None
input_shape = None
input_height = None
input_width = None
output_names = None
img_height = None
img_width = None
def __post_init__(self):
model_inputs = self.session.get_inputs()
self.input_names = [model_inputs[i].name for i in range(len(model_inputs))]
self.input_shape = model_inputs[0].shape
self.input_height = self.input_shape[2]
self.input_width = self.input_shape[3]
model_outputs = self.session.get_outputs()
self.output_names = [model_outputs[i].name for i in range(len(model_outputs))]
def from_pluggable_model(cls, session: InferenceSession, classes: List[str]):
return cls(session=session, classes=classes)
def __call__(self, image: Path | bytes, shape_type: Literal["point", "bounding_box"] = "point"):
if isinstance(image, Path):
image = image.read_bytes()
np_array = np.frombuffer(image, np.uint8)
image = cv2.imdecode(np_array, flags=1)
boxes, scores, class_ids = self.detect_objects(image)
response = []
if shape_type == "point":
for i, class_id in enumerate(class_ids):
x1, y1, x2, y2 = boxes[i]
center_x, center_y = int((x2 - x1) / 2 + x1), int((y2 - y1) / 2 + y1)
response.append((self.classes[class_id], (center_x, center_y), scores[i]))
elif shape_type == "bounding_box":
for i, class_id in enumerate(class_ids):
x1, y1, x2, y2 = boxes[i]
point_start, point_end = (x1, y1), (x2, y2)
response.append((self.classes[class_id], point_start, point_end, scores[i]))
return response
def detect_objects(self, image: np.ndarray):
self.img_height, self.img_width = image.shape[:2]
input_tensor = self._prepare_input(image)
# Perform inference on the image
outputs = self.session.run(self.output_names, {self.input_names[0]: input_tensor})
boxes, scores, class_ids = self._process_output(outputs)
return boxes, scores, class_ids
def _prepare_input(self, image):
input_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Resize input image
input_img = cv2.resize(input_img, (self.input_width, self.input_height))
# Scale input pixel values to 0 to 1
input_img = input_img / 255.0
input_img = input_img.transpose(2, 0, 1)
input_tensor = input_img[np.newaxis, :, :, :].astype(np.float32)
return input_tensor
def _process_output(self, output):
predictions = np.squeeze(output[0]).T
# Filter out object confidence scores below threshold
scores = np.max(predictions[:, 4:], axis=1)
predictions = predictions[scores > self.conf_threshold, :]
scores = scores[scores > self.conf_threshold]
if len(scores) == 0:
return [], [], []
# Get the class with the highest confidence
class_ids = np.argmax(predictions[:, 4:], axis=1)
# Get bounding boxes for each object
# Extract boxes from predictions
boxes = predictions[:, :4]
# Scale boxes to original image dimensions
# Rescale boxes to original image dimensions
input_shape = np.array(
[self.input_width, self.input_height, self.input_width, self.input_height]
)
boxes = np.divide(boxes, input_shape, dtype=np.float32)
boxes *= np.array([self.img_width, self.img_height, self.img_width, self.img_height])
# Convert boxes to xyxy format
boxes = xywh2xyxy(boxes)
# Apply non-maxima suppression to suppress weak, overlapping bounding boxes
# indices = nms(boxes, scores, self.iou_threshold)
indices = multiclass_nms(boxes, scores, class_ids, self.iou_threshold)
return boxes[indices], scores[indices], class_ids[indices]
The provided code snippet includes necessary dependencies for implementing the `match_model` function. Write a Python function `def match_model( label: str, ash: str, modelhub: ModelHub, select: Literal["yolo", "resnet"] = None ) -> ResNetControl | YOLOv8` to solve the following problem:
match solution after `tactical_retreat`
Here is the function:
def match_model(
label: str, ash: str, modelhub: ModelHub, select: Literal["yolo", "resnet"] = None
) -> ResNetControl | YOLOv8:
"""match solution after `tactical_retreat`"""
focus_label = modelhub.label_alias.get(label, "")
# Match YOLOv8 model
if not focus_label or select == "yolo":
focus_name, classes = modelhub.apply_ash_of_war(ash=ash)
session = modelhub.match_net(focus_name=focus_name)
detector = YOLOv8.from_pluggable_model(session, classes)
return detector
# Match ResNet model
focus_name = focus_label
if not focus_name.endswith(".onnx"):
focus_name = f"{focus_name}.onnx"
net = modelhub.match_net(focus_name=focus_name)
control = ResNetControl.from_pluggable_model(net)
return control | match solution after `tactical_retreat` |
161,179 | from __future__ import annotations
import asyncio
import hashlib
import shutil
import time
from contextlib import suppress
from pathlib import Path
from typing import Literal, List, Tuple
from hcaptcha_challenger.components.image_downloader import Cirilla
from hcaptcha_challenger.components.middleware import QuestionResp
from hcaptcha_challenger.onnx.modelhub import ModelHub, DataLake
from hcaptcha_challenger.onnx.resnet import ResNetControl
from hcaptcha_challenger.onnx.yolo import YOLOv8
class Cirilla:
def __init__(self):
async def elder_blood(self, context):
class QuestionResp(BaseModel):
def check_requester_question_example(cls, v: str | List[str]):
def cache(self, tmp_dir: Path):
async def download_challenge_images(
qr: QuestionResp, label: str, tmp_dir: Path, ignore_examples: bool = False
):
request_type = qr.request_type
ks = list(qr.requester_restricted_answer_set.keys())
inv = {"\\", "/", ":", "*", "?", "<", ">", "|"}
for c in inv:
label = label.replace(c, "")
label = label.strip()
if len(ks) > 0:
typed_dir = tmp_dir.joinpath(request_type, label, ks[0])
else:
typed_dir = tmp_dir.joinpath(request_type, label)
typed_dir.mkdir(parents=True, exist_ok=True)
ciri = Cirilla()
container = []
tasks = []
for i, tk in enumerate(qr.tasklist):
challenge_img_path = typed_dir.joinpath(f"{time.time()}.{i}.png")
context = (challenge_img_path, tk.datapoint_uri)
container.append(context)
tasks.append(asyncio.create_task(ciri.elder_blood(context)))
examples = []
if not ignore_examples:
with suppress(Exception):
for i, uri in enumerate(qr.requester_question_example):
example_img_path = typed_dir.joinpath(f"{time.time()}.exp.{i}.png")
context = (example_img_path, uri)
examples.append(context)
tasks.append(asyncio.create_task(ciri.elder_blood(context)))
await asyncio.gather(*tasks)
# Optional deduplication
_img_paths = []
for src, _ in container:
cache = src.read_bytes()
dst = typed_dir.joinpath(f"{hashlib.md5(cache).hexdigest()}.png")
shutil.move(src, dst)
_img_paths.append(dst)
# Optional deduplication
_example_paths = []
if examples:
for src, _ in examples:
cache = src.read_bytes()
dst = typed_dir.joinpath(f"{hashlib.md5(cache).hexdigest()}.png")
shutil.move(src, dst)
_example_paths.append(dst)
return _img_paths, _example_paths | null |
161,180 | from __future__ import annotations
import asyncio
from pathlib import Path
from typing import Tuple
from istockphoto import Istock
tmp_dir = Path(__file__).parent
def select_phrase(phrase: str):
istock = Istock.from_phrase(phrase, tmp_dir)
istock.pages = 4
asyncio.run(istock.mining()) | null |
161,181 | from __future__ import annotations
import asyncio
from pathlib import Path
from typing import Tuple
from istockphoto import Istock
tmp_dir = Path(__file__).parent
def similar_phrase(phrase_with_id: Tuple[str, str] | None):
if not phrase_with_id:
return
phrase, istock_id = phrase_with_id
istock = Istock.from_phrase(phrase, tmp_dir)
istock.pages = 2
istock.more_like_this(istock_id)
asyncio.run(istock.mining()) | null |
161,182 | from __future__ import annotations
import asyncio
import hashlib
import os
import shutil
import time
import zipfile
from contextlib import suppress
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Dict, Any
from urllib.parse import quote
import cv2
import numpy as np
from github import Auth, Github
from github.GitRelease import GitRelease
from github.GitReleaseAsset import GitReleaseAsset
from github.Issue import Issue
from loguru import logger
from playwright.async_api import BrowserContext as ASyncContext, async_playwright
from hcaptcha_challenger import split_prompt_message, label_cleaning
from hcaptcha_challenger.agents import AgentT, Malenia
TEMPLATE_BINARY_DATASETS = """
> Automated deployment @ utc {now}
| Attributes | Details |
| ---------- | ---------------------------- |
| prompt | {prompt} |
| type | `{type}` |
| cases | {cases_num} |
| statistics | [#asset]({statistics}) |
| assets | [{zip_name}]({download_url}) |

"""
_post_label = "☄️ci: collector"
class Gravitas:
issue: Issue
challenge_prompt: str = field(default=str)
request_type: str = field(default=str)
sitelink: str = field(default=str)
mixed_label: str = field(default=str)
"""
binary --> challenge_prompt
area_select --> model_name
"""
parent_prompt: str = field(default=str)
typed_dir: Path = None
"""
init by collector
./automation/tmp_dir/image_label_binary/{mixed_label}/
./automation/tmp_dir/image_label_area_select/{question}/{mixed_label}
"""
cases_num: int = 0
def __post_init__(self):
body = [i for i in self.issue.body.split("\n") if i]
self.challenge_prompt = body[2]
self.request_type = body[4]
self.sitelink = body[6]
if "@" in self.issue.title:
self.mixed_label = self.issue.title.split(" ")[1].strip()
self.parent_prompt = self.issue.title.split("@")[-1].strip()
else:
self.mixed_label = split_prompt_message(self.challenge_prompt, lang="en")
self.parent_prompt = "image_label_binary"
def from_issue(cls, issue: Issue):
return cls(issue=issue)
def montage(self):
asset_name = f"{self.typed_dir.parent.name}.{self.typed_dir.name}"
label_diagnose_name = label_cleaning(asset_name)
__formats = ("%Y-%m-%d %H:%M:%S.%f", "%Y%m%d%H%M%f")
now = datetime.strptime(str(datetime.now()), __formats[0]).strftime(__formats[1])
thumbnail_path = self.typed_dir.parent.joinpath(
f"thumbnail.{label_diagnose_name}.{now}.png"
)
images = []
for img_name in os.listdir(self.typed_dir):
img_path = self.typed_dir.joinpath(img_name)
image = cv2.imread(str(img_path))
image = cv2.resize(image, (256, 256))
images.append(image)
if len(images) == 9:
break
thumbnail = np.hstack(images)
cv2.imwrite(str(thumbnail_path), thumbnail)
return thumbnail_path
def zip(self):
asset_name = f"{self.typed_dir.parent.name}.{self.typed_dir.name}"
label_diagnose_name = label_cleaning(asset_name)
__formats = ("%Y-%m-%d %H:%M:%S.%f", "%Y%m%d%H%M%f")
now = datetime.strptime(str(datetime.now()), __formats[0]).strftime(__formats[1])
zip_path = self.typed_dir.parent.joinpath(f"{label_diagnose_name}.{now}.zip")
logger.info("pack datasets", mixed=zip_path.name)
with zipfile.ZipFile(zip_path, "w") as zip_file:
for root, dirs, files in os.walk(self.typed_dir):
for file in files:
zip_file.write(os.path.join(root, file), file)
return zip_path
def to_asset(archive_release: GitRelease, zip_path: Path) -> GitReleaseAsset:
logger.info("upload datasets", mixed=zip_path.name)
res = archive_release.upload_asset(path=str(zip_path))
return res
class GravitasState:
done: bool
cases_num: int
typed_dir: Path | None = None
def upload_thumbnail(canvas_path: Path, mixed_label: str) -> str | None:
auth = Auth.Token(os.getenv("GITHUB_TOKEN"))
asset_repo = Github(auth=auth).get_repo("QIN2DIM/cdn-relay")
asset_path = f"challenge-thumbnail/{time.time()}.{mixed_label}.png"
branch = "main"
content = canvas_path.read_bytes()
asset_repo.update_file(
path=asset_path,
message="Automated deployment @ 2023-09-03 01:30:15 Asia/Shanghai",
content=content,
sha=hashlib.sha256(content).hexdigest(),
branch=branch,
)
asset_quote_path = quote(asset_path)
asset_url = (
f"https://github.com/{asset_repo.full_name}/blob/{branch}/{asset_quote_path}?raw=true"
)
logger.info(f"upload challenge-thumbnail", asset_url=asset_url)
return asset_url
def create_comment(asset: GitReleaseAsset, gravitas: Gravitas, gs: GravitasState):
montage_path = gravitas.montage()
montage_url = upload_thumbnail(canvas_path=montage_path, mixed_label=gravitas.mixed_label)
body = TEMPLATE_BINARY_DATASETS.format(
now=str(datetime.now()),
prompt=gravitas.challenge_prompt,
type=gravitas.request_type,
cases_num=gravitas.cases_num,
zip_name=asset.name,
download_url=asset.browser_download_url,
statistics=asset.url,
montage_url=montage_url,
)
comment = gravitas.issue.create_comment(body=body)
logger.success(f"create comment", html_url=comment.html_url)
if gs.done:
gravitas.issue.add_to_labels(issue_post_label) | null |
161,183 | from __future__ import annotations
import asyncio
import hashlib
import os
import shutil
import time
import zipfile
from contextlib import suppress
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Dict, Any
from urllib.parse import quote
import cv2
import numpy as np
from github import Auth, Github
from github.GitRelease import GitRelease
from github.GitReleaseAsset import GitReleaseAsset
from github.Issue import Issue
from loguru import logger
from playwright.async_api import BrowserContext as ASyncContext, async_playwright
from hcaptcha_challenger import split_prompt_message, label_cleaning
from hcaptcha_challenger.agents import AgentT, Malenia
issue_labels = ["🔥 challenge", "🏹 ci: sentinel"]lass Gravitas:
issue: Issue
challenge_prompt: str = field(default=str)
request_type: str = field(default=str)
sitelink: str = field(default=str)
mixed_label: str = field(default=str)
"""
binary --> challenge_prompt
area_select --> model_name
"""
parent_prompt: str = field(default=str)
typed_dir: Path = None
"""
init by collector
./automation/tmp_dir/image_label_binary/{mixed_label}/
./automation/tmp_dir/image_label_area_select/{question}/{mixed_label}
"""
cases_num: int = 0
def __post_init__(self):
body = [i for i in self.issue.body.split("\n") if i]
self.challenge_prompt = body[2]
self.request_type = body[4]
self.sitelink = body[6]
if "@" in self.issue.title:
self.mixed_label = self.issue.title.split(" ")[1].strip()
self.parent_prompt = self.issue.title.split("@")[-1].strip()
else:
self.mixed_label = split_prompt_message(self.challenge_prompt, lang="en")
self.parent_prompt = "image_label_binary"
def from_issue(cls, issue: Issue):
return cls(issue=issue)
def montage(self):
asset_name = f"{self.typed_dir.parent.name}.{self.typed_dir.name}"
label_diagnose_name = label_cleaning(asset_name)
__formats = ("%Y-%m-%d %H:%M:%S.%f", "%Y%m%d%H%M%f")
now = datetime.strptime(str(datetime.now()), __formats[0]).strftime(__formats[1])
thumbnail_path = self.typed_dir.parent.joinpath(
f"thumbnail.{label_diagnose_name}.{now}.png"
)
images = []
for img_name in os.listdir(self.typed_dir):
img_path = self.typed_dir.joinpath(img_name)
image = cv2.imread(str(img_path))
image = cv2.resize(image, (256, 256))
images.append(image)
if len(images) == 9:
break
thumbnail = np.hstack(images)
cv2.imwrite(str(thumbnail_path), thumbnail)
return thumbnail_path
def zip(self):
asset_name = f"{self.typed_dir.parent.name}.{self.typed_dir.name}"
label_diagnose_name = label_cleaning(asset_name)
__formats = ("%Y-%m-%d %H:%M:%S.%f", "%Y%m%d%H%M%f")
now = datetime.strptime(str(datetime.now()), __formats[0]).strftime(__formats[1])
zip_path = self.typed_dir.parent.joinpath(f"{label_diagnose_name}.{now}.zip")
logger.info("pack datasets", mixed=zip_path.name)
with zipfile.ZipFile(zip_path, "w") as zip_file:
for root, dirs, files in os.walk(self.typed_dir):
for file in files:
zip_file.write(os.path.join(root, file), file)
return zip_path
def to_asset(archive_release: GitRelease, zip_path: Path) -> GitReleaseAsset:
logger.info("upload datasets", mixed=zip_path.name)
res = archive_release.upload_asset(path=str(zip_path))
return res
def load_gravitas_from_issues() -> List[Gravitas]:
auth = Auth.Token(os.getenv("GITHUB_TOKEN"))
issue_repo = Github(auth=auth).get_repo("QIN2DIM/hcaptcha-challenger")
tasks = []
for issue in issue_repo.get_issues(
labels=issue_labels,
state="all", # fixme `open`
since=datetime.now() - timedelta(days=90), # fixme `24hours`
):
if not isinstance(issue.body, str):
continue
if "Automated deployment @" not in issue.body:
continue
tasks.append(Gravitas.from_issue(issue))
return tasks | null |
161,184 | from __future__ import annotations
import asyncio
import hashlib
import os
import shutil
import time
import zipfile
from contextlib import suppress
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Dict, Any
from urllib.parse import quote
import cv2
import numpy as np
from github import Auth, Github
from github.GitRelease import GitRelease
from github.GitReleaseAsset import GitReleaseAsset
from github.Issue import Issue
from loguru import logger
from playwright.async_api import BrowserContext as ASyncContext, async_playwright
from hcaptcha_challenger import split_prompt_message, label_cleaning
from hcaptcha_challenger.agents import AgentT, Malenia
def get_archive_release() -> GitRelease:
auth = Auth.Token(os.getenv("GITHUB_TOKEN"))
archive_release = (
Github(auth=auth).get_repo("CaptchaAgent/hcaptcha-whistleblower").get_release(120534711)
)
return archive_release | null |
161,185 | from typing import Union
from fastapi import FastAPI
async def read_root():
return {"Hello": "World"} | null |
161,186 | from typing import Union
from fastapi import FastAPI
async def read_item(item_id: int, q: Union[str, None] = None):
return {"item_id": item_id, "q": q} | null |
161,187 | import requests
import urllib3
import yaml
import os
from typing import Any, Dict, List, Optional
def fetch_html(url: str):
try:
resp: requests.Response = requests.get(url, verify=False, timeout=10)
resp.encoding = 'utf-8'
if resp.status_code != 200:
print(f'[!] Got HTTP Status Code {resp.status_code}')
return None
return resp.text
except Exception as expt:
print(url)
print(expt)
return None | null |
161,188 | import requests
import urllib3
import yaml
import os
from typing import Any, Dict, List, Optional
clash_output_tpl: str = './clash.config.template.yaml'
blacklist: List[str] = list(map(lambda l: l.strip().split(':'), open('./blacklists.txt').readlines()))
def merge_clash(configs: List[str]) -> str:
config_template: Dict[str, Any] = yaml.safe_load(open(clash_output_tpl).read())
proxies: List[Dict[str, Any]] = []
for i in range(len(configs)):
try:
tmp_config: Dict[str, Any] = yaml.safe_load(configs[i])
except Exception:
print(f'[!] Failed to Load a YAML')
continue
if 'proxies' not in tmp_config: continue
for j in range(len(tmp_config['proxies'])):
proxy: Dict[str, Any] = tmp_config['proxies'][j]
if any(filter(lambda p: p[0] == proxy['server'] and str(p[1]) == str(proxy['port']), blacklist)): continue
if any(filter(lambda p: p['server'] == proxy['server'] and p['port'] == proxy['port'], proxies)): continue
proxy['name'] = proxy['name'] + f'_{i}@{j}'
proxies.append(proxy)
node_names: List[str] = list(map(lambda n: n['name'], proxies))
config_template['proxies'] = proxies
for grp in config_template['proxy-groups']:
if 'xxx' in grp['proxies']:
grp['proxies'].remove('xxx')
grp['proxies'].extend(node_names)
return yaml.safe_dump(config_template, indent=1, allow_unicode=True) | null |
161,189 | import requests
import urllib3
import yaml
import os
from typing import Any, Dict, List, Optional
def merge_v2ray(configs: List[Optional[str]]) -> str:
return '\n'.join(filter(None, configs)) | null |
161,190 | import logging
import structlog
def configure_logging(pretty=True):
logging.basicConfig(
level=logging.DEBUG,
)
structlog.configure(
wrapper_class=structlog.make_filtering_bound_logger(logging.DEBUG),
)
if pretty:
processors = [
structlog.stdlib.add_log_level, # add log level
structlog.dev.set_exc_info, # add exception info
structlog.dev.ConsoleRenderer(colors=True),
]
else:
processors = []
structlog.configure(
processors=processors,
cache_logger_on_first_use=True,
) | null |
161,191 | import logging
import structlog
def get_logger(*args, **kwargs):
return structlog.get_logger(*args, **kwargs) | null |
161,192 | import glob
import os
import pydantic
import yaml
from autopr.models.config.entrypoints import TopLevelTriggerConfig
class TopLevelTriggerConfig(StrictModel):
triggers: list[Trigger] = Field(
default_factory=list
) # pyright: ignore[reportGeneralTypeIssues]
def get_all_triggers(
config_dir: str = ".autopr",
repo_path: str = ".",
):
trigger_paths = []
for path in [
os.path.join(repo_path, config_dir, "triggers.yaml"),
os.path.join(repo_path, config_dir, "triggers.yml"),
os.path.join(repo_path, config_dir, "triggers", "*.yaml"),
os.path.join(repo_path, config_dir, "triggers", "*.yml"),
]:
trigger_paths.extend(glob.glob(path))
triggers = []
for path in trigger_paths:
with open(path) as f:
contents = yaml.safe_load(f)
if contents is None:
continue
trigger_list = pydantic.parse_obj_as(TopLevelTriggerConfig, contents)
triggers.extend(trigger_list.triggers)
return triggers | null |
161,193 | import copy
import itertools
import math
from typing import Union, ForwardRef
import openai.error
import pydantic
import tenacity
import tiktoken
from jinja2 import Template
from tenacity import wait_exponential_jitter, retry_if_exception_type
from autopr.models.config.transform import (
TransformsInto,
ImplementsTransformsInContext,
RealType,
TransformsFrom,
)
from autopr.models.config.value_declarations import (
VarDeclaration,
TemplateDeclaration,
LambdaDeclaration,
ConstDeclaration,
)
from autopr.models.executable import TemplateString, ContextDict, ContextVarPath
def get_string_token_length(string: str, model: str):
enc = tiktoken.encoding_for_model(model)
return len(enc.encode(string)) | null |
161,194 | import copy
import itertools
import math
from typing import Union, ForwardRef
import openai.error
import pydantic
import tenacity
import tiktoken
from jinja2 import Template
from tenacity import wait_exponential_jitter, retry_if_exception_type
from autopr.models.config.transform import (
TransformsInto,
ImplementsTransformsInContext,
RealType,
TransformsFrom,
)
from autopr.models.config.value_declarations import (
VarDeclaration,
TemplateDeclaration,
LambdaDeclaration,
ConstDeclaration,
)
from autopr.models.executable import TemplateString, ContextDict, ContextVarPath
class PromptContext(pydantic.BaseModel, TransformsFrom):
"""
A dictionary mapping heading strings to context variable values.
Overrides `__str__` to format the context in a prompt-friendly way.
"""
def _get_config_type(cls):
return PromptContextInConfig
__root__: list[PromptContextEntry]
def get_token_length(self, model: str) -> int:
return get_string_token_length(self.as_string(), model)
def _resolve_template_string(self, template_string: TemplateString, context: ContextDict):
return Template(template_string).render(context)
def as_string(
self,
enclosure_mark: str = "```",
):
"""
Format the context as a string.
Parameters
----------
variable_headings
A dictionary mapping context keys to headings.
If not provided, the keys will be used as headings.
enclosure_mark
The string to use to enclose each variable.
"""
if len(self.__root__) == 0:
return ""
context_strings = []
for heading_entry in self.__root__:
value = heading_entry.value
heading = heading_entry.heading
# Format the value as a string
if isinstance(value, list):
valstr = "\n".join(str(item) for item in value)
else:
valstr = str(value)
# Add the variable to the context string
context_strings.append(
f"""{heading}:
{enclosure_mark}
{valstr}
{enclosure_mark}"""
)
return "\n\n".join(context_strings)
def __str__(self):
return self.as_string()
def trim_context(
prompt_context: PromptContext, max_token_length: int, strategy: str, model: str
) -> PromptContext:
# Count tokens in context entries
token_length = prompt_context.get_token_length(model)
# If context is short enough, return it
if token_length <= max_token_length:
return prompt_context
if strategy == "middle out":
trimmed_text = "\n\n\n... (trimmed) ...\n\n\n"
trimmed_text_char_length = len(trimmed_text)
# Create a copy of context entries
context_entries_copy = copy.deepcopy(prompt_context.__root__)
correct_order_context = PromptContext(
__root__=context_entries_copy,
)
# Sort context_entries by priority
sorted_entries = sorted(context_entries_copy, key=lambda x: x.priority)
# Group entries by priority
grouped_entries = itertools.groupby(sorted_entries, key=lambda x: x.priority)
# Calculate total length of context entries
total_token_length = correct_order_context.get_token_length(model)
total_char_length = len(prompt_context.as_string())
# Estimate characters needed to trim based on token and char length, rounded up
def get_chars_left_to_trim():
tokens_left_to_trim = correct_order_context.get_token_length(model) - max_token_length
chars_per_token = math.ceil(total_char_length / total_token_length)
return tokens_left_to_trim * chars_per_token
chars_to_trim = get_chars_left_to_trim()
# From each priority group, trim their middle in equal amounts
# Try to trim the necessary amount of characters from the lowest priority group first
# If you would trim the whole content of an entry, drop it from the context instead
for _, entries in grouped_entries:
if chars_to_trim <= 0:
break
entries = list(entries)
if len(PromptContext(__root__=entries).as_string()) <= chars_to_trim:
correct_order_context.__root__ = [
entry for entry in correct_order_context.__root__ if entry not in entries
]
chars_to_trim = get_chars_left_to_trim()
continue
# iteratively halve the amount of characters to trim until it fits
while chars_to_trim > 0:
for entry in entries[:]:
if chars_to_trim <= 0:
break
entry_char_length = len(PromptContext(__root__=[entry]).as_string())
truncate_char_amount = min(
entry_char_length // 2 + trimmed_text_char_length,
chars_to_trim + trimmed_text_char_length,
entry_char_length,
)
if truncate_char_amount >= entry_char_length - trimmed_text_char_length:
# Drop the entry
entries.remove(entry)
correct_order_context.__root__ = [
entry for entry in correct_order_context.__root__ if entry != entry
]
chars_to_trim = get_chars_left_to_trim()
continue
# Keep the start and end, drop the middle
entry_value_char_length = len(entry.value)
start = entry.value[: entry_value_char_length // 2 - truncate_char_amount // 2]
end = entry.value[entry_value_char_length // 2 + truncate_char_amount // 2 :]
entry.value = start + "\n\n\n... (trimmed) ...\n\n\n" + end
chars_to_trim = get_chars_left_to_trim()
return correct_order_context
raise ValueError(f"Invalid strategy: {strategy}") | null |
161,195 | import copy
import itertools
import math
from typing import Union, ForwardRef
import openai.error
import pydantic
import tenacity
import tiktoken
from jinja2 import Template
from tenacity import wait_exponential_jitter, retry_if_exception_type
from autopr.models.config.transform import (
TransformsInto,
ImplementsTransformsInContext,
RealType,
TransformsFrom,
)
from autopr.models.config.value_declarations import (
VarDeclaration,
TemplateDeclaration,
LambdaDeclaration,
ConstDeclaration,
)
from autopr.models.executable import TemplateString, ContextDict, ContextVarPath
async def invoke_openai(
prompt: str, instructions: str, model: str, temperature: float, max_response_tokens: int
) -> str:
result = await openai.ChatCompletion.acreate(
messages=[
{
"role": "system",
"content": instructions,
},
{
"role": "user",
"content": prompt,
},
],
model=model,
temperature=temperature,
max_tokens=max_response_tokens,
)
return result["choices"][0]["message"]["content"] # type: ignore[reportGeneralTypeIssues] | null |
161,196 | import json
from typing import Any
import pydantic
def truncate_strings(obj: Any, length: int = 100) -> Any:
def nested_to_dict(obj: Any) -> Any:
def format_for_publishing(obj: Any) -> str:
dict_obj = nested_to_dict(obj)
if isinstance(dict_obj, dict):
dict_obj = {
key: value
for key, value in dict_obj.items()
if not (key.startswith("__") and key.endswith("__"))
}
truncated_dict_obj = truncate_strings(dict_obj)
dumped_json = json.dumps(truncated_dict_obj, indent=2)
return dumped_json | null |
161,197 | import datetime
import json
import os
import random
import re
import sys
import time
import typing
from typing import Any, Union, Optional, Literal
import pydantic
from pydantic import Field
import yaml
from autopr.actions.base import get_actions_dict
from autopr.models.config.common import StrictModel, ExtraModel
from autopr.models.config.transform import TransformsFrom
from autopr.models.config.value_declarations import ValueDeclaration, EVAL_CONTEXT
from autopr.models.executable import (
LambdaString,
ContextVarPath,
ExecutableId,
Executable,
TemplateObject,
ContextVarName,
ContextDict,
StrictExecutable,
TemplateString,
)
class ActionConfig(ExecModel):
action: ExecutableId
inputs: Optional[ExtraModel] = None # [str, ValueDeclaration]
outputs: Optional[ExtraModel] = None # [str, ContextVarName]
class IterableActionConfig(IterableExecModel):
action: ExecutableId
inputs: Optional[ExtraModel] = None # [str, ValueDeclaration]
list_outputs: Optional[ExtraModel] = None # [str, ContextVarName]
StrictModel.update_forward_refs()
for action in ExecModel.__subclasses__():
action.update_forward_refs()
for action in IOSpecModel.__subclasses__():
action.update_forward_refs()
for action in IOValuesModel.__subclasses__():
action.update_forward_refs()
for action in ContextModel.__subclasses__():
action.update_forward_refs()
ActionConfig.update_forward_refs()
for action in ActionConfig.__subclasses__():
action.update_forward_refs()
def get_actions_dict() -> dict[ExecutableId, Type[Action[Any, Any]]]:
# initialize all Action subclass declarations in the folder
import autopr.actions # pyright: ignore[reportUnusedImport]
# return all subclasses of Action as registered in the metaclass
return ActionMeta.actions_registry
class StrictModel(pydantic.BaseModel):
class Config:
extra = pydantic.Extra.forbid
# Does not coerce when not necessary
smart_union = True
class TransformsFrom: # (Generic[ConfigType]):
"""
In the config, some IO types will have a different representation.
"""
def _get_config_type(cls): # -> type[ConfigType]:
raise NotImplementedError
ValueDeclaration = Union[
TemplateDeclaration, VarDeclaration, ConstDeclaration, LambdaDeclaration, ParamDeclaration
]
ContextVarName = str
def build_actions():
# Dynamically build action models from currently defined actions
# for best typehints and autocompletion possible in the jsonschema
def _templatify_model(
model: type[pydantic.BaseModel],
field_type: Optional[type] = None,
add_union: Optional[type] = None,
all_optional: bool = False,
) -> tuple[type[pydantic.BaseModel], Any]:
# Create a new model, put in a field of "field_type" for each input
template_fields = {}
for name_, field_ in model.__fields__.items():
# Get the type of the field, which may be different in context than in the action
type_ = field_.outer_type_
# Transform it if so annotated
if isinstance(type_, type) and issubclass(type_, TransformsFrom):
type_ = type_._get_config_type()
# Annotate optional fields with a default of None
if all_optional or not field_.required:
default = None
else:
default = ...
template_field = Field(
default=default,
alias=field_.alias,
)
if field_type is not None:
type_ = field_type
if add_union is not None:
# check that union does not collide with existing type
if (
isinstance(type_, type)
and typing.get_origin(type_) is None
and issubclass(type_, pydantic.BaseModel)
):
for field_name in type_.__fields__.keys():
if any(field_name in m.__fields__ for m in typing.get_args(add_union)):
raise ValueError(f"{field_name} is a restricted field name.")
# TODO if it's a template, enforce dict structure on the template
type_ = Union[type_, add_union]
template_fields[name_] = (type_, template_field)
inputs_template = pydantic.create_model(
action.id + model.__name__ + "ActionFieldTemplate",
__base__=StrictModel,
__module__=__name__,
**template_fields,
)
inputs_template.update_forward_refs()
# Annotate with a good default for the inputs themselves,
# given if any of the inputs are required
if not all_optional and any(f.required for f in model.__fields__.values()):
default = ...
else:
default = {}
return inputs_template, default
actions = get_actions_dict()
action_models = []
for action in actions.values():
# build input fields
fields = {"action": (Literal[action.id], ...)} # type: ignore
inputs = action._get_inputs_type()
outputs = action._get_outputs_type()
if not isinstance(None, inputs):
input_fields = _templatify_model(inputs, add_union=ValueDeclaration)
else:
input_fields = (type(None), None)
fields |= {"inputs": input_fields}
# build output fields
if not isinstance(None, outputs):
output_fields = _templatify_model(outputs, field_type=ContextVarName, all_optional=True)
else:
output_fields = (type(None), None)
invocation_fields = fields | {"outputs": output_fields}
iterable_invocation_fields = fields | {"list_outputs": output_fields}
# build action invocation model
action_basemodel = pydantic.create_model(
action.id + "ActionModel",
__base__=ActionConfig,
__module__=__name__,
**invocation_fields, # pyright: ignore[reportGeneralTypeIssues]
)
action_models.append(action_basemodel)
# build iterable action invocation model
iterable_action_basemodel = pydantic.create_model(
action.id + "IterableActionModel",
__base__=IterableActionConfig,
__module__=__name__,
**iterable_invocation_fields, # pyright: ignore[reportGeneralTypeIssues]
)
action_models.append(iterable_action_basemodel)
return action_models | null |
161,198 | import copy
import json
import typing
from typing import Any, Union, Optional, Literal, ForwardRef
import pydantic
from pydantic import Field
from autopr.actions.base import get_actions_dict, Action as ActionBase
from autopr.models.config.elements import (
ExecModel,
ActionConfig,
TopLevelWorkflowConfig,
StrictModel,
WorkflowInvocation,
IterableWorkflowInvocation,
IOSpecModel,
WorkflowDefinition,
IfLambda,
IfContextNotExists,
IfExistsContext,
SetVars,
ContextModel,
IOValuesModel,
ActionConfigs,
ContextActions,
ValueDeclaration,
IterableActionConfig,
Conditional,
)
from autopr.models.config.value_declarations import ParamDeclaration
from autopr.models.events import EventUnion, LabelEvent, CommentEvent, PushEvent, CronEvent
from autopr.models.executable import (
LambdaString,
ContextVarPath,
ExecutableId,
Executable,
TemplateObject,
ContextVarName,
ContextDict,
StrictExecutable,
)
from autopr.workflows import get_all_workflows
def get_params(
executable: Executable,
all_workflows: TopLevelWorkflowConfig,
inspected_workflows: Optional[set[ExecutableId]] = None,
) -> dict[str, Any]:
if inspected_workflows is None:
inspected_workflows = set()
if isinstance(executable, str) and executable not in all_workflows:
return {}
value_defs = []
if isinstance(
executable,
(
ActionConfig,
IterableActionConfig,
WorkflowInvocation,
IterableWorkflowInvocation,
),
):
if executable.inputs:
# the values of the model are the default values
for _, val in executable.inputs:
value_defs.append(val)
# value_defs.extend(executable.inputs.dict().values())
elif isinstance(executable, SetVars):
value_defs.extend(executable.set_vars.values())
params = {}
for value_def in value_defs:
if isinstance(value_def, ParamDeclaration):
params[value_def.param.name] = value_def.param.default
if isinstance(executable, list):
for substep in executable:
params |= get_params(substep, all_workflows, inspected_workflows)
elif isinstance(executable, str):
if executable in inspected_workflows:
return {}
inspected_workflows.add(executable)
target_workflow = all_workflows[ExecutableId(executable)]
for executable in target_workflow.steps:
params |= get_params(executable, all_workflows, inspected_workflows)
elif isinstance(executable, (WorkflowInvocation, IterableWorkflowInvocation)):
if executable.workflow in inspected_workflows:
return {}
inspected_workflows.add(executable.workflow)
target_workflow = all_workflows[executable.workflow]
for executable in target_workflow.steps:
params |= get_params(executable, all_workflows, inspected_workflows)
elif isinstance(executable, Conditional):
params |= get_params(executable.then, all_workflows, inspected_workflows)
if executable.else_:
params |= get_params(executable.else_, all_workflows, inspected_workflows)
return params
class WorkflowInvocation(IOValuesModel):
workflow: ExecutableId
parameters: Optional[ExtraModel] = Field(default=None)
class IterableWorkflowInvocation(IterableIOValuesModel):
workflow: ExecutableId
parameters: Optional[ExtraModel] = Field(default=None)
WorkflowInvocation.update_forward_refs()
StrictModel.update_forward_refs()
TopLevelWorkflowConfig = dict[ExecutableId, WorkflowDefinition]
ContextVarName = str
TemplateObject = Union[TemplateString, dict[str, Any], list[Any]]
def get_all_workflows() -> TopLevelWorkflowConfig:
# load default workflows
default_workflows_folder = os.path.dirname(__file__)
existing_actions = list(get_actions_dict())
workflows = _load_workflows_in_folder(
default_workflows_folder,
existing_actions=existing_actions,
)
# load test workflows (if any)
for path in _test_workflow_paths:
workflows = _collect_workflows(
path,
existing_actions=existing_actions,
existing_workflows=workflows,
)
return workflows
def build_workflows():
# Dynamically build workflow models from currently defined workflows
# for best typehints and autocompletion possible in the jsonschema
workflows: TopLevelWorkflowConfig = get_all_workflows()
workflow_models = []
for workflow_id, workflow in workflows.items():
fields = {"workflow": (Literal[workflow_id], ...)} # type: ignore
# Build the params model for each workflow, depending on all nested workflows
params = get_params(workflow_id, workflows)
params_model = pydantic.create_model(
workflow_id + "Params",
__base__=StrictModel,
__module__=__name__,
**{
name: (type(default_value), Field(default=default_value))
for name, default_value in params.items()
}, # pyright: ignore[reportGeneralTypeIssues]
)
fields |= {"parameters": (params_model, None)}
if workflow.inputs is not None:
input_fields_model = pydantic.create_model(
workflow_id + "Inputs",
__base__=StrictModel,
__module__=__name__,
**{
name: (Union[TemplateObject, ValueDeclaration], Field(default=...))
for name in workflow.inputs
}, # pyright: ignore[reportGeneralTypeIssues]
)
input_fields = (input_fields_model, ...)
else:
input_fields = (type(None), None)
fields |= {"inputs": input_fields}
if workflow.outputs is not None:
output_fields_model = pydantic.create_model(
workflow_id + "Outputs",
__base__=StrictModel,
__module__=__name__,
**{
name: (Optional[ContextVarName], Field(default=None))
for name in workflow.outputs
}, # pyright: ignore[reportGeneralTypeIssues]
)
output_fields = (output_fields_model, None)
else:
output_fields = (type(None), None)
invocation_fields = fields | {"outputs": output_fields}
iterable_invocation_fields = fields | {"list_outputs": output_fields}
# build workflow invocation model
workflow_basemodel = pydantic.create_model(
workflow_id + "WorkflowModel",
__base__=WorkflowInvocation,
__module__=__name__,
**invocation_fields, # pyright: ignore[reportGeneralTypeIssues]
)
workflow_models.append(workflow_basemodel)
# build iterable workflow invocation model
iterable_workflow_basemodel = pydantic.create_model(
workflow_id + "IterableWorkflowModel",
__base__=IterableWorkflowInvocation,
__module__=__name__,
**iterable_invocation_fields, # pyright: ignore[reportGeneralTypeIssues]
)
workflow_models.append(iterable_workflow_basemodel)
return workflow_models | null |
161,199 | import copy
import json
import typing
from typing import Any, Union, Optional, Literal, ForwardRef
import pydantic
from pydantic import Field
from autopr.actions.base import get_actions_dict, Action as ActionBase
from autopr.models.config.elements import (
ExecModel,
ActionConfig,
TopLevelWorkflowConfig,
StrictModel,
WorkflowInvocation,
IterableWorkflowInvocation,
IOSpecModel,
WorkflowDefinition,
IfLambda,
IfContextNotExists,
IfExistsContext,
SetVars,
ContextModel,
IOValuesModel,
ActionConfigs,
ContextActions,
ValueDeclaration,
IterableActionConfig,
Conditional,
)
from autopr.models.config.value_declarations import ParamDeclaration
from autopr.models.events import EventUnion, LabelEvent, CommentEvent, PushEvent, CronEvent
from autopr.models.executable import (
LambdaString,
ContextVarPath,
ExecutableId,
Executable,
TemplateObject,
ContextVarName,
ContextDict,
StrictExecutable,
)
from autopr.workflows import get_all_workflows
def get_actions_dict() -> dict[ExecutableId, Type[Action[Any, Any]]]:
# initialize all Action subclass declarations in the folder
import autopr.actions # pyright: ignore[reportUnusedImport]
# return all subclasses of Action as registered in the metaclass
return ActionMeta.actions_registry
def get_all_workflows() -> TopLevelWorkflowConfig:
# load default workflows
default_workflows_folder = os.path.dirname(__file__)
existing_actions = list(get_actions_dict())
workflows = _load_workflows_in_folder(
default_workflows_folder,
existing_actions=existing_actions,
)
# load test workflows (if any)
for path in _test_workflow_paths:
workflows = _collect_workflows(
path,
existing_actions=existing_actions,
existing_workflows=workflows,
)
return workflows
def get_all_executable_ids():
ids = []
actions = get_actions_dict()
ids.extend(actions.keys())
workflows = get_all_workflows()
ids.extend(workflows.keys())
return ids | null |
161,200 | import time
import requests
import logging
import json
import threading
import socket
import sys
import os
from ..client_launcher import ShadowsocksClient as SSClient
from ..client_launcher import ShadowsocksRClient as SSRClient
from ..client_launcher import V2RayClient
from ..config_parser import UniversalParser
from ..result import ExportResult
from ..result import importResult
from ..result import Sorter
from ..speed_test import SpeedTest
from ..utils import check_platform
from ..utils.port_checker import check_port
from config import config
lsa = [19, 5, 23, 1, 11, 25, 15, 21, 3, 17, 9, 7]
lsn = [7, 3, 1, 9]
domainls = ['/link/', '/sub/', '/1759/', '/v2/', 'token=']
def ModReverse(a, n):
arr = [0, 1, ]
gcd = EX_GCD(a, n, arr)
if gcd == 1:
return (arr[0] % n + n) % n
else:
return -1
def decrypt(sublink):
for i in domainls:
if i in sublink:
origin = sublink[:sublink.find(i) + len(i)]
key1 = sublink[sublink.find(i) + len(i):]
key2 = ""
ka = 0
kn = 0
flag = 1
for j in range(0, len(key1)):
o = key1[j]
if (o.isupper() and flag):
key2 += chr(ord("A") + ((ord(key1[j]) - ord("A")) * ModReverse(lsa[ka], 26)) % 26)
ka = (ka + 1) % 12
if (o.islower() and flag):
key2 += chr(ord("a") + ((ord(key1[j]) - ord("a")) * ModReverse(lsa[ka], 26)) % 26)
ka = (ka + 1) % 12
if (o.isdigit() and flag):
key2 += chr(ord("0") + ((ord(key1[j]) - ord("0")) * ModReverse(lsn[kn], 10)) % 10)
kn = (kn + 1) % 4
if (not o.isalnum()) or (not flag):
flag = 0
key2 += o
return origin + key2 | null |
161,201 | import logging
import sys
from optparse import OptionParser
def setOpts(parser):
parser.add_option(
"-c","--config",
action="store",
dest="guiConfig",
default="",
help="Load configurations from file."
)
parser.add_option(
"-u","--url",
action="store",
dest="url",
default="",
help="Load ssr config from subscription url."
)
parser.add_option(
"-m","--method",
action="store",
dest="test_method",
default="socket",
help="Select test method in [speedtestnet, fast, socket, stasync]."
)
parser.add_option(
"-M","--mode",
action="store",
dest="test_mode",
default="all",
help="Select test mode in [all,wps,pingonly]."
)
parser.add_option(
"--include",
action="callback",
callback = setArgsListCallback,
dest="filter",
default = [],
help="Filter nodes by group and remarks using keyword."
)
parser.add_option(
"--include-remark",
action="callback",
callback = setArgsListCallback,
dest="remarks",
default=[],
help="Filter nodes by remarks using keyword."
)
parser.add_option(
"--include-group",
action="callback",
callback = setArgsListCallback,
dest="group",
default=[],
help="Filter nodes by group name using keyword."
)
parser.add_option(
"--exclude",
action="callback",
callback = setArgsListCallback,
dest="efliter",
default = [],
help="Exclude nodes by group and remarks using keyword."
)
parser.add_option(
"--exclude-group",
action="callback",
callback = setArgsListCallback,
dest="egfilter",
default=[],
help="Exclude nodes by group using keyword."
)
parser.add_option(
"--exclude-remark",
action="callback",
callback = setArgsListCallback,
dest="erfilter",
default = [],
help="Exclude nodes by remarks using keyword."
)
parser.add_option(
"--use-ssr-cs",
action="store_true",
dest="use_ssr_cs",
default = False,
help="Replace the ShadowsocksR-libev with the ShadowsocksR-C# (Only Windows)."
)
parser.add_option(
"-g",
action="store",
dest="group_override",
default="",
help="Manually set group."
)
'''
parser.add_option(
"-t","--type",
action="store",
dest="proxy_type",
default = "ssr",
help="Select proxy type in [ssr,ssr-cs,ss,v2ray],default ssr."
)
'''
parser.add_option(
"-y","--yes",
action="store_true",
dest="confirmation",
default=False,
help="Skip node list confirmation before test."
)
parser.add_option(
"-C","--color",
action="store",
dest="result_color",
default="",
help="Set the colors when exporting images.."
)
'''
parser.add_option(
"-s","--split",
action="store",
dest="split_count",
default="-1",
help="Set the number of nodes displayed in a single image when exporting images."
'''
parser.add_option(
"-s","--sort",
action="store",
dest="sort_method",
default="",
help="Select sort method in [speed,rspeed,ping,rping],default not sorted."
)
parser.add_option(
"-i","--import",
action="store",
dest="import_file",
default="",
help="Import test result from json file and export it."
)
parser.add_option(
"--skip-requirements-check",
action="store_true",
dest="skip_requirements_check",
default=False,
help="Skip requirements check."
)
parser.add_option(
"--debug",
action="store_true",
dest="debug",
default=False,
help="Run program in debug mode."
)
parser.add_option(
"--paolu",
action="store_true",
dest="paolu",
default=False,
help="如题"
)
def init(VERSION):
parser = OptionParser(usage="Usage: %prog [options] arg1 arg2...",version="SSR Speed Tool " + VERSION)
setOpts(parser)
if (len(sys.argv) == 1):
parser.print_help()
sys.exit(0)
(options,args) = parser.parse_args()
return (options,args) | null |
161,202 | import logging
import sys
from optparse import OptionParser
from config import config
def setArgsListCallback(option,opt_str,value,parser):
assert value is None
value = []
def floatable(arg):
try:
float(arg)
return True
except ValueError:
return False
for arg in parser.rargs:
if (arg[:2] == "--" and len(arg) > 2):
break
if (arg[:1] == "-" and len(arg) > 1 and not floatable(arg)):
break
if (arg.replace(" ","") == ""):
continue
value.append(arg)
del parser.rargs[:len(value)]
setattr(parser.values,option.dest,value) | null |
161,203 | import logging
import sys
from optparse import OptionParser
from config import config
def setOpts(parser):
def init(VERSION):
parser = OptionParser(usage="Usage: %prog [options] arg1 arg2...",version="SSR Speed Web Api " + VERSION)
setOpts(parser)
(options,args) = parser.parse_args()
return (options,args) | null |
161,204 | import requests
import os
import sys
import logging
import time
logger = logging.getLogger("Sub")
from config import config
config = config["uploadResult"]
config = {
"VERSION": __version__,
"WEB_API_VERSION": __web_api_version__
}
def pushToServer(filename):
result = {
"status":-1,
"code":-1
}
try:
logger.info("Pushing %s to server." % filename)
files = {
"file":open(filename,"rb")
}
param = {
"token":config["apiToken"],
"remark":config["remark"]
}
rep = requests.post(config["server"],files=files,data=param,timeout=10)
result["status"] = rep.status_code
if (rep.status_code == 200):
if (rep.text == "ok"):
result["code"] = 0
return result
except requests.exceptions.Timeout:
logger.error("Connect to server timeout.")
return result
except:
logger.exception("Pushing result to server error.")
return result | null |
161,205 | import json
def importResult(filename):
fi = None
with open(filename,"r",encoding="utf-8") as f:
fi = json.loads(f.read())
return fi | null |
161,206 | import copy
_ShadowsocksConfig = {
"server":"",
"server_port":-1,
"method":"",
"protocol":"",
"obfs":"",
"plugin":"",
"password":"",
"protocol_param":"",
"obfsparam":"",
"plugin_opts":"",
"plugin_args":"",
"remarks":"",
"group":"N/A",
"timeout":0,
"local_port":0,
"local_address":"",
"fastopen":False
}
def getConfig(
local_address: str = "127.0.0.1",
local_port: int = 1087,
timeout: int = 10
):
res = copy.deepcopy(_ShadowsocksConfig)
res["local_port"] = local_port
res["local_address"] = local_address
res["timeout"] = timeout
return res | null |
161,207 | import socket
def check_port(port: int):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
s.connect(("127.0.0.1", port))
s.shutdown(2) | null |
161,208 | import base64
def encode(s):
s = s.encode("utf-8")
return base64.urlsafe_b64encode(s) | null |
161,209 | import base64
def _url_safe_decode(s: str):
s = fillb64(s)
s = s.replace("-", "+").replace("_", "/")
return base64.b64decode(s, validate=True)
def decode(s):
return _url_safe_decode(s) | null |
161,210 | import json
import urllib.parse
from flask import request
from .parseqsplus import parse_qs_plus
def parse_qs_plus(_dict):
data = {}
if(type(_dict) != dict):
return _dict
for k,v in _dict.items():
if (type(v) == list):
if (len(v) == 0):
data[k] = []
elif (len(v) == 1):
data[k] = v[0]
else:
_list = []
for item in v:
_list.append(parse_qs_plus(item))
data[k] = _list
else:
data[k] = v
return data
def getPostData():
#print(request.content_type)
data = {}
if (request.content_type.startswith('application/json')):
data = request.get_data()
return json.loads(data.decode("utf-8"))
elif(request.content_type.startswith("application/x-www-form-urlencoded")):
#print(1)
#print(urllib.parse.parse_qs(request.get_data().decode("utf-8")))
return parse_qs_plus(urllib.parse.parse_qs(request.get_data().decode("utf-8")))
else:
for key, value in request.form.items():
if key.endswith('[]'):
data[key[:-2]] = request.form.getlist(key)
else:
data[key] = value
return data | null |
161,211 | import platform
import logging
logger = logging.getLogger("Sub")
def check_platform():
tmp = platform.platform()
logger.info("Platform Info : {}".format(str(tmp)))
if ("Windows" in tmp):
return "Windows"
elif("Linux" in tmp):
return "Linux"
elif("Darwin" in tmp or "mac" in tmp):
return "MacOS"
else:
return "Unknown" | null |
161,212 | import os
import sys
import time
import re
import socket
import requests
import logging
from bs4 import BeautifulSoup
logger = logging.getLogger("Sub")
from config import config
LOCAL_PORT = config["localPort"]
def parseLocation():
try:
logger.info("Starting parse location.")
rep = requests.get("https://api.ip.sb/geoip",proxies = {
"http":"socks5h://127.0.0.1:%d" % LOCAL_PORT,
"https":"socks5h://127.0.0.1:%d" % LOCAL_PORT
},timeout=5)
tmp = rep.json()
logger.info("Server Country Code : %s,Continent Code : %s,ISP : %s" % (tmp["country_code"],tmp["continent_code"],tmp["organization"]))
return (True,tmp["country_code"],tmp["continent_code"],tmp["organization"])
except requests.exceptions.ReadTimeout:
logger.error("Parse location timeout.")
except:
logger.exception("Parse location failed.")
try:
logger.error(rep.content)
except:
pass
return(False,"DEFAULT","DEFAULT","DEFAULT") | null |
161,213 | import os
import sys
import time
import re
import socket
import requests
import logging
from bs4 import BeautifulSoup
logger = logging.getLogger("Sub")
from config import config
def checkIPv4(ip):
r = re.compile(r"\b((?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:(?<!\.)\b|\.)){4}")
rm = r.match(ip)
if (rm):
if (rm.group(0) == ip):
return True
return False
def domain2ip(domain):
logger.info("Translating {} to ipv4.".format(domain))
if (checkIPv4(domain)):
return domain
ip = "N/A"
try:
ip = socket.gethostbyname(domain)
return ip
except:
logger.exception("Translate {} to ipv4 failed.".format(domain))
return "N/A" | null |
161,214 | import os
import sys
import time
import re
import socket
import requests
import logging
from bs4 import BeautifulSoup
logger = logging.getLogger("Sub")
from config import config
LOCAL_PORT = config["localPort"]
def checkIPv4(ip):
r = re.compile(r"\b((?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:(?<!\.)\b|\.)){4}")
rm = r.match(ip)
if (rm):
if (rm.group(0) == ip):
return True
return False
def IPLoc(ip = ""):
try:
if (ip != "" and not checkIPv4(ip)):
logger.error("Invalid IP : {}".format(ip))
return {}
logger.info("Starting Geo IP.")
if (ip == "N/A"):
ip = ""
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept - Encoding':'gzip, deflate, br',
'Accept-Language':'en-US,en;q=0.9,zh;q=0.8,zh-CN;q=0.7',
'Connection':'Keep-Alive',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.88 Safari/537.36'}
rep = requests.get("https://api.ip.sb/geoip/{}".format(ip),proxies = {
"http":"socks5h://127.0.0.1:%d" % LOCAL_PORT,
"https":"socks5h://127.0.0.1:%d" % LOCAL_PORT
}, timeout=5, headers=headers)
tmp = rep.json()
return tmp
except requests.exceptions.ReadTimeout:
logger.error("Geo IP Timeout.")
return {}
except:
logger.exception("Geo IP Failed.")
try:
logger.error(rep.content)
except:
pass
return {} | null |
161,215 | import time
import sys
import os
import json
import threading
import urllib.parse
import logging
from config import config
from flask import Flask,request,redirec
from flask_cors import CORS
from werkzeug.utils import secure_filename
from ssrspeed.utils import RequirementsCheck, check_platform
from ssrspeed.utils.web import getPostData
from ssrspeed.core.ssrspeed_core import SSRSpeedCore
from ssrspeed.shell import web_cli as console_cfg
from ssrspeed.result import ExportResult
from ssrspeed.result import importResult
from ssrspeed.types.errors.webapi.error_file_not_allowed import FileNotAllowed
from ssrspeed.types.errors.webapi.error_file_common import WebFileCommonError
def index():
return redirect("https://web1.ospf.in/", 301)
#return render_template(
# "index.html"
# ) | null |
161,216 | import time
import sys
import os
import json
import threading
import urllib.parse
import logging
from config import config
from flask import Flask,request,redirec
from flask_cors import CORS
from werkzeug.utils import secure_filename
from ssrspeed.utils import RequirementsCheck, check_platform
from ssrspeed.utils.web import getPostData
from ssrspeed.core.ssrspeed_core import SSRSpeedCore
from ssrspeed.shell import web_cli as console_cfg
from ssrspeed.result import ExportResult
from ssrspeed.result import importResult
from ssrspeed.types.errors.webapi.error_file_not_allowed import FileNotAllowed
from ssrspeed.types.errors.webapi.error_file_common import WebFileCommonError
config = {
"VERSION": __version__,
"WEB_API_VERSION": __web_api_version__
}
def getVersion():
return json.dumps(
{
"main":config["VERSION"],
"webapi":config["WEB_API_VERSION"]
}
) | null |
161,217 | import time
import sys
import os
import json
import threading
import urllib.parse
import logging
from config import config
from flask import Flask,request,redirec
from flask_cors import CORS
from werkzeug.utils import secure_filename
from ssrspeed.utils import RequirementsCheck, check_platform
from ssrspeed.utils.web import getPostData
from ssrspeed.core.ssrspeed_core import SSRSpeedCore
from ssrspeed.shell import web_cli as console_cfg
from ssrspeed.result import ExportResult
from ssrspeed.result import importResult
from ssrspeed.types.errors.webapi.error_file_not_allowed import FileNotAllowed
from ssrspeed.types.errors.webapi.error_file_common import WebFileCommonError
sc = None
def status():
return sc.web_get_status() | null |
161,218 | import time
import sys
import os
import json
import threading
import urllib.parse
import logging
from config import config
from flask import Flask,request,redirec
from flask_cors import CORS
from werkzeug.utils import secure_filename
from ssrspeed.utils import RequirementsCheck, check_platform
from ssrspeed.utils.web import getPostData
from ssrspeed.core.ssrspeed_core import SSRSpeedCore
from ssrspeed.shell import web_cli as console_cfg
from ssrspeed.result import ExportResult
from ssrspeed.result import importResult
from ssrspeed.types.errors.webapi.error_file_not_allowed import FileNotAllowed
from ssrspeed.types.errors.webapi.error_file_common import WebFileCommonError
sc = None
def readSubscriptions():
if (request.method == "POST"):
data = getPostData()
if (sc.web_get_status() == "running"):
return 'running'
subscriptionUrl = data.get("url","")
#proxyType = data.get("proxyType","SSR")
if (not subscriptionUrl):
return "invalid url."
return json.dumps(sc.web_read_subscription(subscriptionUrl)) | null |
161,219 | import time
import sys
import os
import json
import threading
import urllib.parse
import logging
from config import config
from flask import Flask,request,redirec
from flask_cors import CORS
from werkzeug.utils import secure_filename
from ssrspeed.utils import RequirementsCheck, check_platform
from ssrspeed.utils.web import getPostData
from ssrspeed.core.ssrspeed_core import SSRSpeedCore
from ssrspeed.shell import web_cli as console_cfg
from ssrspeed.result import ExportResult
from ssrspeed.result import importResult
from ssrspeed.types.errors.webapi.error_file_not_allowed import FileNotAllowed
from ssrspeed.types.errors.webapi.error_file_common import WebFileCommonError
if (not os.path.exists("./logs/")):
os.mkdir("./logs/")
if (not os.path.exists("./results/")):
os.mkdir("./results/")
logger = logging.getLogger(__name__)
app = Flask(__name__,
template_folder=TEMPLATE_FOLDER,
static_folder=STATIC_FOLDER,
static_url_path=""
)
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
sc = None
def check_file_allowed(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
config = {
"VERSION": __version__,
"WEB_API_VERSION": __web_api_version__
}
class FileNotAllowed(WebErrorBase):
errMsg = "File type not allowed"
errTag = "FILE_NOT_ALLOWED"
def __init__(self):
super(FileNotAllowed, self).__init__()
class WebFileCommonError(WebErrorBase):
errMsg = "Upload failed."
errTag = "FILE_COMMON_ERROR"
def __init__(self):
super(WebFileCommonError, self).__init__()
def readFileConfig():
if request.method == "POST":
if (sc.web_get_status() == "running"):
return 'running'
ufile = request.files["file"]
#data = getPostData()
if ufile:
if check_file_allowed(ufile.filename):
filename = secure_filename(ufile.filename)
tmpFilename = os.path.join(app.config["UPLOAD_FOLDER"], filename)
ufile.save(tmpFilename)
logger.info("Tmp config file saved as {}".format(tmpFilename))
return json.dumps(sc.web_read_config_file(tmpFilename))
else:
logger.error("Disallowed file {}".format(ufile.filename))
return FileNotAllowed.errMsg
else:
logger.error("File upload failed or unknown error.")
return WebFileCommonError.errMsg | null |
161,220 | import time
import sys
import os
import json
import threading
import urllib.parse
import logging
from config import config
from flask import Flask,request,redirec
from flask_cors import CORS
from werkzeug.utils import secure_filename
from ssrspeed.utils import RequirementsCheck, check_platform
from ssrspeed.utils.web import getPostData
from ssrspeed.core.ssrspeed_core import SSRSpeedCore
from ssrspeed.shell import web_cli as console_cfg
from ssrspeed.result import ExportResult
from ssrspeed.result import importResult
from ssrspeed.types.errors.webapi.error_file_not_allowed import FileNotAllowed
from ssrspeed.types.errors.webapi.error_file_common import WebFileCommonError
sc = None
def getColors():
return json.dumps(sc.web_get_colors()) | null |
161,221 | import time
import sys
import os
import json
import threading
import urllib.parse
import logging
from config import config
from flask import Flask,request,redirec
from flask_cors import CORS
from werkzeug.utils import secure_filename
from ssrspeed.utils import RequirementsCheck, check_platform
from ssrspeed.utils.web import getPostData
from ssrspeed.core.ssrspeed_core import SSRSpeedCore
from ssrspeed.shell import web_cli as console_cfg
from ssrspeed.result import ExportResult
from ssrspeed.result import importResult
from ssrspeed.types.errors.webapi.error_file_not_allowed import FileNotAllowed
from ssrspeed.types.errors.webapi.error_file_common import WebFileCommonError
sc = None
def startTest():
if (request.method == "POST"):
data = getPostData()
# return "SUCCESS"
if (sc.web_get_status() == "running"):
return 'running'
configs = data.get("configs",[])
if (not configs):
return "No configs"
#proxyType =data.get("proxyType","SSR")
testMethod =data.get("testMethod", "ST_ASYNC")
colors =data.get("colors", "origin")
sortMethod =data.get("sortMethod", "")
testMode = data.get("testMode", "")
use_ssr_cs = data.get("useSsrCSharp", False)
group = data.get("group", "")
sc.web_setup(
testMode = testMode,
testMethod = testMethod,
colors = colors,
sortMethod = sortMethod
)
sc.clean_result()
sc.web_set_configs(configs)
if group:
sc.set_group(group)
sc.start_test(use_ssr_cs)
return 'done'
return 'invalid method' | null |
161,222 | import time
import sys
import os
import json
import threading
import urllib.parse
import logging
from config import config
from flask import Flask,request,redirec
from flask_cors import CORS
from werkzeug.utils import secure_filename
from ssrspeed.utils import RequirementsCheck, check_platform
from ssrspeed.utils.web import getPostData
from ssrspeed.core.ssrspeed_core import SSRSpeedCore
from ssrspeed.shell import web_cli as console_cfg
from ssrspeed.result import ExportResult
from ssrspeed.result import importResult
from ssrspeed.types.errors.webapi.error_file_not_allowed import FileNotAllowed
from ssrspeed.types.errors.webapi.error_file_common import WebFileCommonError
sc = None
def getResults():
return json.dumps(sc.web_get_results()) | null |
161,223 |
def getKeys(key_list):
return key_list
#return key_list + ['plan'] # append the column name 'plan' | null |
161,224 |
def isTurnOn(row):
return True
#return row['plan'] == 'B' # then judge here | null |
161,225 | import traceback
from shadowsocks import shell, common
from configloader import load_config, get_config
import random
import getopt
import sys
import json
import base64
def print_server_help():
print('''usage: python mujson_manage.py -a|-d|-e|-c|-l [OPTION]...
Actions:
-a add/edit a user
-d delete a user
-e edit a user
-c set u&d to zero
-l display a user infomation or all users infomation
Options:
-u USER the user name
-p PORT server port (only this option must be set if add a user)
-k PASSWORD password
-m METHOD encryption method, default: aes-128-ctr
-O PROTOCOL protocol plugin, default: auth_aes128_md5
-o OBFS obfs plugin, default: tls1.2_ticket_auth_compatible
-G PROTOCOL_PARAM protocol plugin param
-g OBFS_PARAM obfs plugin param
-t TRANSFER max transfer for G bytes, default: 8388608 (8 PB or 8192 TB)
-f FORBID set forbidden ports. Example (ban 1~79 and 81~100): -f "1-79,81-100"
-i MUID set sub id to display (only work with -l)
-s SPEED set speed_limit_per_con
-S SPEED set speed_limit_per_user
General options:
-h, --help show this help message and exit
''') | null |
161,226 | #!port importloader
g_config = None
#!
def load_config():
global g_config
g_config = importloader.loads(['userapiconfig', 'apiconfig']) | null |
161,227 | #!port importloader
g_config = None
def get_config():
return g_config | null |
161,228 | from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import logging
import struct
import errno
import random
import binascii
import traceback
import threading
from shadowsocks import encrypt, obfs, eventloop, lru_cache, common, shell
from shadowsocks.common import pre_parse_header, parse_header, pack_addr
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af) | null |
161,229 | from __future__ import absolute_import, division, print_function, \
with_statement
import logging
from ctypes import CDLL, c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
libcrypto = None
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, buf
from ctypes.util import find_library
for p in ('crypto', 'eay32', 'libeay32'):
libcrypto_path = find_library(p)
if libcrypto_path:
break
else:
raise Exception('libcrypto(OpenSSL) not found')
logging.info('loading libcrypto from %s', libcrypto_path)
libcrypto = CDLL(libcrypto_path)
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True | null |
161,230 | from __future__ import absolute_import, division, print_function, \
with_statement
import logging
from ctypes import CDLL, c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
libcrypto = None
def load_cipher(cipher_name):
func_name = b'EVP_' + cipher_name.replace(b'-', b'_')
if bytes != str:
func_name = str(func_name, 'utf-8')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None | null |
161,231 | from __future__ import absolute_import, division, print_function, \
with_statement
import logging
from ctypes import CDLL, c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
def run_method(method):
from shadowsocks.crypto import util
cipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method(b'aes-128-cfb') | null |
161,232 | from __future__ import absolute_import, division, print_function, \
with_statement
import logging
from ctypes import CDLL, c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
def run_method(method):
from shadowsocks.crypto import util
cipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_256_cfb():
run_method(b'aes-256-cfb') | null |
161,233 | from __future__ import absolute_import, division, print_function, \
with_statement
import logging
from ctypes import CDLL, c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
def run_method(method):
from shadowsocks.crypto import util
cipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb8():
run_method(b'aes-128-cfb8') | null |
161,234 | from __future__ import absolute_import, division, print_function, \
with_statement
import logging
from ctypes import CDLL, c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
def run_method(method):
from shadowsocks.crypto import util
cipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_256_ofb():
run_method(b'aes-256-ofb') | null |
161,235 | from __future__ import absolute_import, division, print_function, \
with_statement
import logging
from ctypes import CDLL, c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
def run_method(method):
from shadowsocks.crypto import util
cipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_256_ctr():
run_method(b'aes-256-ctr') | null |
161,236 | from __future__ import absolute_import, division, print_function, \
with_statement
import logging
from ctypes import CDLL, c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
def run_method(method):
from shadowsocks.crypto import util
cipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_bf_cfb():
run_method(b'bf-cfb') | null |
161,237 | from __future__ import absolute_import, division, print_function, \
with_statement
import logging
from ctypes import CDLL, c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
def run_method(method):
from shadowsocks.crypto import util
cipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = CtypesCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_rc4():
run_method(b'rc4') | null |
161,238 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import logging
def find_library(possible_lib_names, search_symbol, library_name):
import ctypes.util
from ctypes import CDLL
paths = []
if type(possible_lib_names) not in (list, tuple):
possible_lib_names = [possible_lib_names]
lib_names = []
for lib_name in possible_lib_names:
lib_names.append(lib_name)
lib_names.append('lib' + lib_name)
for name in lib_names:
if os.name == "nt":
paths.extend(find_library_nt(name))
else:
path = ctypes.util.find_library(name)
if path:
paths.append(path)
if not paths:
# We may get here when find_library fails because, for example,
# the user does not have sufficient privileges to access those
# tools underlying find_library on linux.
import glob
for name in lib_names:
patterns = [
'/usr/local/lib*/lib%s.*' % name,
'/usr/lib*/lib%s.*' % name,
'lib%s.*' % name,
'%s.dll' % name]
for pat in patterns:
files = glob.glob(pat)
if files:
paths.extend(files)
for path in paths:
try:
lib = CDLL(path)
if hasattr(lib, search_symbol):
logging.info('loading %s from %s', library_name, path)
return lib
else:
logging.warn('can\'t find symbol %s in %s', search_symbol,
path)
except Exception:
if path == paths[-1]:
raise
return None
def test_find_library():
assert find_library('c', 'strcpy', 'libc') is not None
assert find_library(['c'], 'strcpy', 'libc') is not None
assert find_library(('c',), 'strcpy', 'libc') is not None
assert find_library(('crypto', 'eay32'), 'EVP_CipherUpdate',
'libcrypto') is not None
assert find_library('notexist', 'strcpy', 'libnotexist') is None
assert find_library('c', 'symbol_not_exist', 'c') is None
assert find_library(('notexist', 'c', 'crypto', 'eay32'),
'EVP_CipherUpdate', 'libc') is not None | null |
161,239 | from __future__ import absolute_import, division, print_function, \
with_statement
import hashlib
from shadowsocks.crypto import openssl
def create_cipher(alg, key, iv, op, key_as_bytes=0, d=None, salt=None,
i=1, padding=1):
md5 = hashlib.md5()
md5.update(key)
md5.update(iv)
rc4_key = md5.digest()
return openssl.OpenSSLCrypto(b'rc4', rc4_key, b'', op) | null |
161,240 | from __future__ import absolute_import, division, print_function, \
with_statement
import string
import struct
import hashlib
cached_tables = {}
def get_table(key):
m = hashlib.md5()
m.update(key)
s = m.digest()
a, b = struct.unpack('<QQ', s)
table = maketrans(b'', b'')
table = [table[i: i + 1] for i in range(len(table))]
for i in range(1, 1024):
table.sort(key=lambda x: int(a % (ord(x) + i)))
return table
def init_table(key):
if key not in cached_tables:
encrypt_table = b''.join(get_table(key))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
cached_tables[key] = [encrypt_table, decrypt_table]
return cached_tables[key] | null |
161,241 | from __future__ import absolute_import, division, print_function, \
with_statement
import string
import struct
import hashlib
def get_table(key):
m = hashlib.md5()
m.update(key)
s = m.digest()
a, b = struct.unpack('<QQ', s)
table = maketrans(b'', b'')
table = [table[i: i + 1] for i in range(len(table))]
for i in range(1, 1024):
table.sort(key=lambda x: int(a % (ord(x) + i)))
return table
ord = compat_ord
def test_table_result():
from shadowsocks.common import ord
target1 = [
[60, 53, 84, 138, 217, 94, 88, 23, 39, 242, 219, 35, 12, 157, 165, 181,
255, 143, 83, 247, 162, 16, 31, 209, 190, 171, 115, 65, 38, 41, 21,
245, 236, 46, 121, 62, 166, 233, 44, 154, 153, 145, 230, 49, 128, 216,
173, 29, 241, 119, 64, 229, 194, 103, 131, 110, 26, 197, 218, 59, 204,
56, 27, 34, 141, 221, 149, 239, 192, 195, 24, 155, 170, 183, 11, 254,
213, 37, 137, 226, 75, 203, 55, 19, 72, 248, 22, 129, 33, 175, 178,
10, 198, 71, 77, 36, 113, 167, 48, 2, 117, 140, 142, 66, 199, 232,
243, 32, 123, 54, 51, 82, 57, 177, 87, 251, 150, 196, 133, 5, 253,
130, 8, 184, 14, 152, 231, 3, 186, 159, 76, 89, 228, 205, 156, 96,
163, 146, 18, 91, 132, 85, 80, 109, 172, 176, 105, 13, 50, 235, 127,
0, 189, 95, 98, 136, 250, 200, 108, 179, 211, 214, 106, 168, 78, 79,
74, 210, 30, 73, 201, 151, 208, 114, 101, 174, 92, 52, 120, 240, 15,
169, 220, 182, 81, 224, 43, 185, 40, 99, 180, 17, 212, 158, 42, 90, 9,
191, 45, 6, 25, 4, 222, 67, 126, 1, 116, 124, 206, 69, 61, 7, 68, 97,
202, 63, 244, 20, 28, 58, 93, 134, 104, 144, 227, 147, 102, 118, 135,
148, 47, 238, 86, 112, 122, 70, 107, 215, 100, 139, 223, 225, 164,
237, 111, 125, 207, 160, 187, 246, 234, 161, 188, 193, 249, 252],
[151, 205, 99, 127, 201, 119, 199, 211, 122, 196, 91, 74, 12, 147, 124,
180, 21, 191, 138, 83, 217, 30, 86, 7, 70, 200, 56, 62, 218, 47, 168,
22, 107, 88, 63, 11, 95, 77, 28, 8, 188, 29, 194, 186, 38, 198, 33,
230, 98, 43, 148, 110, 177, 1, 109, 82, 61, 112, 219, 59, 0, 210, 35,
215, 50, 27, 103, 203, 212, 209, 235, 93, 84, 169, 166, 80, 130, 94,
164, 165, 142, 184, 111, 18, 2, 141, 232, 114, 6, 131, 195, 139, 176,
220, 5, 153, 135, 213, 154, 189, 238, 174, 226, 53, 222, 146, 162,
236, 158, 143, 55, 244, 233, 96, 173, 26, 206, 100, 227, 49, 178, 34,
234, 108, 207, 245, 204, 150, 44, 87, 121, 54, 140, 118, 221, 228,
155, 78, 3, 239, 101, 64, 102, 17, 223, 41, 137, 225, 229, 66, 116,
171, 125, 40, 39, 71, 134, 13, 193, 129, 247, 251, 20, 136, 242, 14,
36, 97, 163, 181, 72, 25, 144, 46, 175, 89, 145, 113, 90, 159, 190,
15, 183, 73, 123, 187, 128, 248, 252, 152, 24, 197, 68, 253, 52, 69,
117, 57, 92, 104, 157, 170, 214, 81, 60, 133, 208, 246, 172, 23, 167,
160, 192, 76, 161, 237, 45, 4, 58, 10, 182, 65, 202, 240, 185, 241,
79, 224, 132, 51, 42, 126, 105, 37, 250, 149, 32, 243, 231, 67, 179,
48, 9, 106, 216, 31, 249, 19, 85, 254, 156, 115, 255, 120, 75, 16]]
target2 = [
[124, 30, 170, 247, 27, 127, 224, 59, 13, 22, 196, 76, 72, 154, 32,
209, 4, 2, 131, 62, 101, 51, 230, 9, 166, 11, 99, 80, 208, 112, 36,
248, 81, 102, 130, 88, 218, 38, 168, 15, 241, 228, 167, 117, 158, 41,
10, 180, 194, 50, 204, 243, 246, 251, 29, 198, 219, 210, 195, 21, 54,
91, 203, 221, 70, 57, 183, 17, 147, 49, 133, 65, 77, 55, 202, 122,
162, 169, 188, 200, 190, 125, 63, 244, 96, 31, 107, 106, 74, 143, 116,
148, 78, 46, 1, 137, 150, 110, 181, 56, 95, 139, 58, 3, 231, 66, 165,
142, 242, 43, 192, 157, 89, 175, 109, 220, 128, 0, 178, 42, 255, 20,
214, 185, 83, 160, 253, 7, 23, 92, 111, 153, 26, 226, 33, 176, 144,
18, 216, 212, 28, 151, 71, 206, 222, 182, 8, 174, 205, 201, 152, 240,
155, 108, 223, 104, 239, 98, 164, 211, 184, 34, 193, 14, 114, 187, 40,
254, 12, 67, 93, 217, 6, 94, 16, 19, 82, 86, 245, 24, 197, 134, 132,
138, 229, 121, 5, 235, 238, 85, 47, 103, 113, 179, 69, 250, 45, 135,
156, 25, 61, 75, 44, 146, 189, 84, 207, 172, 119, 53, 123, 186, 120,
171, 68, 227, 145, 136, 100, 90, 48, 79, 159, 149, 39, 213, 236, 126,
52, 60, 225, 199, 105, 73, 233, 252, 118, 215, 35, 115, 64, 37, 97,
129, 161, 177, 87, 237, 141, 173, 191, 163, 140, 234, 232, 249],
[117, 94, 17, 103, 16, 186, 172, 127, 146, 23, 46, 25, 168, 8, 163, 39,
174, 67, 137, 175, 121, 59, 9, 128, 179, 199, 132, 4, 140, 54, 1, 85,
14, 134, 161, 238, 30, 241, 37, 224, 166, 45, 119, 109, 202, 196, 93,
190, 220, 69, 49, 21, 228, 209, 60, 73, 99, 65, 102, 7, 229, 200, 19,
82, 240, 71, 105, 169, 214, 194, 64, 142, 12, 233, 88, 201, 11, 72,
92, 221, 27, 32, 176, 124, 205, 189, 177, 246, 35, 112, 219, 61, 129,
170, 173, 100, 84, 242, 157, 26, 218, 20, 33, 191, 155, 232, 87, 86,
153, 114, 97, 130, 29, 192, 164, 239, 90, 43, 236, 208, 212, 185, 75,
210, 0, 81, 227, 5, 116, 243, 34, 18, 182, 70, 181, 197, 217, 95, 183,
101, 252, 248, 107, 89, 136, 216, 203, 68, 91, 223, 96, 141, 150, 131,
13, 152, 198, 111, 44, 222, 125, 244, 76, 251, 158, 106, 24, 42, 38,
77, 2, 213, 207, 249, 147, 113, 135, 245, 118, 193, 47, 98, 145, 66,
160, 123, 211, 165, 78, 204, 80, 250, 110, 162, 48, 58, 10, 180, 55,
231, 79, 149, 74, 62, 50, 148, 143, 206, 28, 15, 57, 159, 139, 225,
122, 237, 138, 171, 36, 56, 115, 63, 144, 154, 6, 230, 133, 215, 41,
184, 22, 104, 254, 234, 253, 187, 226, 247, 188, 156, 151, 40, 108,
51, 83, 178, 52, 3, 31, 255, 195, 53, 235, 126, 167, 120]]
encrypt_table = b''.join(get_table(b'foobar!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target1[0][i] == ord(encrypt_table[i]))
assert (target1[1][i] == ord(decrypt_table[i]))
encrypt_table = b''.join(get_table(b'barfoo!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target2[0][i] == ord(encrypt_table[i]))
assert (target2[1][i] == ord(decrypt_table[i])) | null |
161,242 | from __future__ import absolute_import, division, print_function, \
with_statement
import string
import struct
import hashlib
class TableCipher(object):
def __init__(self, cipher_name, key, iv, op):
self._encrypt_table, self._decrypt_table = init_table(key)
self._op = op
def update(self, data):
if self._op:
return translate(data, self._encrypt_table)
else:
return translate(data, self._decrypt_table)
def test_encryption():
from shadowsocks.crypto import util
cipher = TableCipher('table', b'test', b'', 1)
decipher = TableCipher('table', b'test', b'', 0)
util.run_cipher(cipher, decipher) | null |
161,243 | from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
libcrypto = None
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None | null |
161,244 | from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method('aes-128-cfb') | null |
161,245 | from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_256_cfb():
run_method('aes-256-cfb') | null |
161,246 | from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb8():
run_method('aes-128-cfb8') | null |
161,247 | from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_256_ofb():
run_method('aes-256-ofb') | null |
161,248 | from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
def run_method(method):
def test_aes_256_ctr():
run_method('aes-256-ctr') | null |
161,249 | from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_bf_cfb():
run_method('bf-cfb') | null |
161,250 | from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
def run_method(method):
def test_rc4():
run_method('rc4') | null |
161,251 | from __future__ import absolute_import, division, print_function, \
with_statement
import logging
from ctypes import CDLL, c_char_p, c_int, c_ulonglong, byref, \
create_string_buffer, c_void_p
libsodium = None
loaded = False
buf_size = 2048
def load_libsodium():
global loaded, libsodium, buf
from ctypes.util import find_library
for p in ('sodium',):
libsodium_path = find_library(p)
if libsodium_path:
break
else:
raise Exception('libsodium not found')
logging.info('loading libsodium from %s', libsodium_path)
libsodium = CDLL(libsodium_path)
libsodium.sodium_init.restype = c_int
libsodium.crypto_stream_salsa20_xor_ic.restype = c_int
libsodium.crypto_stream_salsa20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
libsodium.crypto_stream_chacha20_xor_ic.restype = c_int
libsodium.crypto_stream_chacha20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
libsodium.sodium_init()
buf = create_string_buffer(buf_size)
loaded = True | null |
161,252 | from __future__ import absolute_import, division, print_function, \
with_statement
import logging
from ctypes import CDLL, c_char_p, c_int, c_ulonglong, byref, \
create_string_buffer, c_void_p
class Salsa20Crypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_libsodium()
self.key = key
self.iv = iv
self.key_ptr = c_char_p(key)
self.iv_ptr = c_char_p(iv)
if cipher_name == b'salsa20':
self.cipher = libsodium.crypto_stream_salsa20_xor_ic
elif cipher_name == b'chacha20':
self.cipher = libsodium.crypto_stream_chacha20_xor_ic
else:
raise Exception('Unknown cipher')
# byte counter, not block counter
self.counter = 0
def update(self, data):
global buf_size, buf
l = len(data)
# we can only prepend some padding to make the encryption align to
# blocks
padding = self.counter % BLOCK_SIZE
if buf_size < padding + l:
buf_size = (padding + l) * 2
buf = create_string_buffer(buf_size)
if padding:
data = (b'\0' * padding) + data
self.cipher(byref(buf), c_char_p(data), padding + l,
self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr)
self.counter += l
# buf is copied to a str object when we access buf.raw
# strip off the padding
return buf.raw[padding:padding + l]
def test_salsa20():
from shadowsocks.crypto import util
cipher = Salsa20Crypto(b'salsa20', b'k' * 32, b'i' * 16, 1)
decipher = Salsa20Crypto(b'salsa20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher) | null |
161,253 | from __future__ import absolute_import, division, print_function, \
with_statement
import logging
from ctypes import CDLL, c_char_p, c_int, c_ulonglong, byref, \
create_string_buffer, c_void_p
class Salsa20Crypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_libsodium()
self.key = key
self.iv = iv
self.key_ptr = c_char_p(key)
self.iv_ptr = c_char_p(iv)
if cipher_name == b'salsa20':
self.cipher = libsodium.crypto_stream_salsa20_xor_ic
elif cipher_name == b'chacha20':
self.cipher = libsodium.crypto_stream_chacha20_xor_ic
else:
raise Exception('Unknown cipher')
# byte counter, not block counter
self.counter = 0
def update(self, data):
global buf_size, buf
l = len(data)
# we can only prepend some padding to make the encryption align to
# blocks
padding = self.counter % BLOCK_SIZE
if buf_size < padding + l:
buf_size = (padding + l) * 2
buf = create_string_buffer(buf_size)
if padding:
data = (b'\0' * padding) + data
self.cipher(byref(buf), c_char_p(data), padding + l,
self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr)
self.counter += l
# buf is copied to a str object when we access buf.raw
# strip off the padding
return buf.raw[padding:padding + l]
def test_chacha20():
from shadowsocks.crypto import util
cipher = Salsa20Crypto(b'chacha20', b'k' * 32, b'i' * 16, 1)
decipher = Salsa20Crypto(b'chacha20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher) | null |
161,254 | from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_ulong, c_ulonglong, byref, \
create_string_buffer, c_void_p
from shadowsocks.crypto import util
libsodium = None
loaded = False
buf_size = 2048
def load_libsodium():
global loaded, libsodium, buf
libsodium = util.find_library('sodium', 'crypto_stream_salsa20_xor_ic',
'libsodium')
if libsodium is None:
raise Exception('libsodium not found')
libsodium.crypto_stream_salsa20_xor_ic.restype = c_int
libsodium.crypto_stream_salsa20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
libsodium.crypto_stream_chacha20_xor_ic.restype = c_int
libsodium.crypto_stream_chacha20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
try:
libsodium.crypto_stream_chacha20_ietf_xor_ic.restype = c_int
libsodium.crypto_stream_chacha20_ietf_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulong,
c_char_p)
except:
pass
buf = create_string_buffer(buf_size)
loaded = True | null |
161,255 | from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_ulong, c_ulonglong, byref, \
create_string_buffer, c_void_p
from shadowsocks.crypto import util
class SodiumCrypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_libsodium()
self.key = key
self.iv = iv
self.key_ptr = c_char_p(key)
self.iv_ptr = c_char_p(iv)
if cipher_name == 'salsa20':
self.cipher = libsodium.crypto_stream_salsa20_xor_ic
elif cipher_name == 'chacha20':
self.cipher = libsodium.crypto_stream_chacha20_xor_ic
elif cipher_name == 'chacha20-ietf':
self.cipher = libsodium.crypto_stream_chacha20_ietf_xor_ic
else:
raise Exception('Unknown cipher')
# byte counter, not block counter
self.counter = 0
def update(self, data):
global buf_size, buf
l = len(data)
# we can only prepend some padding to make the encryption align to
# blocks
padding = self.counter % BLOCK_SIZE
if buf_size < padding + l:
buf_size = (padding + l) * 2
buf = create_string_buffer(buf_size)
if padding:
data = (b'\0' * padding) + data
self.cipher(byref(buf), c_char_p(data), padding + l,
self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr)
self.counter += l
# buf is copied to a str object when we access buf.raw
# strip off the padding
return buf.raw[padding:padding + l]
def test_salsa20():
cipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher) | null |
161,256 | from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_ulong, c_ulonglong, byref, \
create_string_buffer, c_void_p
from shadowsocks.crypto import util
class SodiumCrypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_libsodium()
self.key = key
self.iv = iv
self.key_ptr = c_char_p(key)
self.iv_ptr = c_char_p(iv)
if cipher_name == 'salsa20':
self.cipher = libsodium.crypto_stream_salsa20_xor_ic
elif cipher_name == 'chacha20':
self.cipher = libsodium.crypto_stream_chacha20_xor_ic
elif cipher_name == 'chacha20-ietf':
self.cipher = libsodium.crypto_stream_chacha20_ietf_xor_ic
else:
raise Exception('Unknown cipher')
# byte counter, not block counter
self.counter = 0
def update(self, data):
global buf_size, buf
l = len(data)
# we can only prepend some padding to make the encryption align to
# blocks
padding = self.counter % BLOCK_SIZE
if buf_size < padding + l:
buf_size = (padding + l) * 2
buf = create_string_buffer(buf_size)
if padding:
data = (b'\0' * padding) + data
self.cipher(byref(buf), c_char_p(data), padding + l,
self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr)
self.counter += l
# buf is copied to a str object when we access buf.raw
# strip off the padding
return buf.raw[padding:padding + l]
def test_chacha20():
cipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher) | null |
161,257 | from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_ulong, c_ulonglong, byref, \
create_string_buffer, c_void_p
from shadowsocks.crypto import util
class SodiumCrypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_libsodium()
self.key = key
self.iv = iv
self.key_ptr = c_char_p(key)
self.iv_ptr = c_char_p(iv)
if cipher_name == 'salsa20':
self.cipher = libsodium.crypto_stream_salsa20_xor_ic
elif cipher_name == 'chacha20':
self.cipher = libsodium.crypto_stream_chacha20_xor_ic
elif cipher_name == 'chacha20-ietf':
self.cipher = libsodium.crypto_stream_chacha20_ietf_xor_ic
else:
raise Exception('Unknown cipher')
# byte counter, not block counter
self.counter = 0
def update(self, data):
global buf_size, buf
l = len(data)
# we can only prepend some padding to make the encryption align to
# blocks
padding = self.counter % BLOCK_SIZE
if buf_size < padding + l:
buf_size = (padding + l) * 2
buf = create_string_buffer(buf_size)
if padding:
data = (b'\0' * padding) + data
self.cipher(byref(buf), c_char_p(data), padding + l,
self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr)
self.counter += l
# buf is copied to a str object when we access buf.raw
# strip off the padding
return buf.raw[padding:padding + l]
def test_chacha20_ietf():
cipher = SodiumCrypto('chacha20-ietf', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('chacha20-ietf', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher) | null |
161,258 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork, PortRange
from shadowsocks import encrypt
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1) | null |
161,259 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork, PortRange
from shadowsocks import encrypt
def __version():
version_str = ''
try:
import pkg_resources
version_str = pkg_resources.get_distribution('shadowsocks').version
except Exception:
try:
from shadowsocks import version
version_str = version.version()
except Exception:
pass
return version_str
def log_shadowsocks_version():
logging.info('ShadowsocksR %s' % __version()) | null |
161,260 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork, PortRange
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def print_shadowsocks():
print('ShadowsocksR %s' % __version())
def find_config():
user_config_path = 'user-config.json'
config_path = 'config.json'
def sub_find(file_name):
if os.path.exists(file_name):
return file_name
file_name = os.path.join(os.path.abspath('..'), file_name)
return file_name if os.path.exists(file_name) else None
return sub_find(user_config_path) or sub_find(config_path)
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if 'server_port' in config and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warning('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warning('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if config.get('timeout', 300) < 100:
logging.warning('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warning('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-o OBFS obfsplugin, default: http_simple
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def remove_comment(json):
fmt = JSFormat()
return "".join([fmt.push(c) for c in json])
def parse_json_in_str(data):
# parse json and convert everything from unicode to str
return json.loads(data, object_hook=_decode_dict)
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self.addrs_str = addrs
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def __cmp__(self, other):
return cmp(self.addrs_str, other.addrs_str)
def __eq__(self, other):
return self.addrs_str == other.addrs_str
def __ne__(self, other):
return self.addrs_str != other.addrs_str
class PortRange(object):
def __init__(self, range_str):
self.range_str = to_str(range_str)
self.range = set()
range_str = to_str(range_str).split(',')
for item in range_str:
try:
int_range = item.split('-')
if len(int_range) == 1:
if item:
self.range.add(int(item))
elif len(int_range) == 2:
int_range[0] = int(int_range[0])
int_range[1] = int(int_range[1])
if int_range[0] < 0:
int_range[0] = 0
if int_range[1] > 65535:
int_range[1] = 65535
i = int_range[0]
while i <= int_range[1]:
self.range.add(i)
i += 1
except Exception as e:
logging.error(e)
def __contains__(self, val):
return val in self.range
def __cmp__(self, other):
return cmp(self.range_str, other.range_str)
def __eq__(self, other):
return self.range_str == other.range_str
def __ne__(self, other):
return self.range_str != other.range_str
def get_config(is_local):
global verbose
config = {}
config_path = None
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:O:o:G:g:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:O:o:G:g:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
elif key in ('-h', '--help'):
print_help(is_local)
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
else:
continue
if config_path is None:
config_path = find_config()
if config_path:
logging.debug('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = parse_json_in_str(remove_comment(f.read().decode('utf8')))
except ValueError as e:
logging.error('found an error in config.json: %s', str(e))
sys.exit(1)
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-O':
config['protocol'] = to_str(value)
elif key == '-o':
config['obfs'] = to_str(value)
elif key == '-G':
config['protocol_param'] = to_str(value)
elif key == '-g':
config['obfs_param'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
else:
continue
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['protocol'] = to_str(config.get('protocol', 'origin'))
config['protocol_param'] = to_str(config.get('protocol_param', ''))
config['obfs'] = to_str(config.get('obfs', 'plain'))
config['obfs_param'] = to_str(config.get('obfs_param', ''))
config['port_password'] = config.get('port_password', None)
config['additional_ports'] = config.get('additional_ports', {})
config['additional_ports_only'] = config.get('additional_ports_only', False)
config['timeout'] = int(config.get('timeout', 300))
config['udp_timeout'] = int(config.get('udp_timeout', 120))
config['udp_cache'] = int(config.get('udp_cache', 64))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocksr.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocksr.log')
config['verbose'] = config.get('verbose', False)
config['connect_verbose_info'] = config.get('connect_verbose_info', 0)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
try:
config['forbidden_port'] = PortRange(config.get('forbidden_port', ''))
except Exception as e:
logging.error(e)
sys.exit(2)
try:
config['ignore_bind'] = \
IPNetwork(config.get('ignore_bind', '127.0.0.0/8,::1/128,10.0.0.0/8,192.168.0.0/16'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', 8388)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(filename)s:%(lineno)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config | null |
161,261 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
def detect_ipv6_supprot():
if 'has_ipv6' in dir(socket):
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
print('IPv6 support')
return True
except:
pass
print('IPv6 not support')
return False | null |
161,262 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
QCLASS_IN = 1
def build_address(address):
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass | null |
161,263 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None | null |
161,264 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d_-]{1,63}(?<!-)$", re.IGNORECASE)
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.')) | null |
161,265 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import logging
import signal
import time
from shadowsocks import common, shell
def daemon_start(pid_file, log_file):
def handle_exit(signum, _):
if signum == signal.SIGTERM:
sys.exit(0)
sys.exit(1)
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGTERM, handle_exit)
# fork only once because we are sure parent will exit
pid = os.fork()
assert pid != -1
if pid > 0:
# parent waits for its child
time.sleep(5)
sys.exit(0)
# child signals its parent to exit
ppid = os.getppid()
pid = os.getpid()
if write_pid_file(pid_file, pid) != 0:
os.kill(ppid, signal.SIGINT)
sys.exit(1)
os.setsid()
signal.signal(signal.SIG_IGN, signal.SIGHUP)
print('started')
os.kill(ppid, signal.SIGTERM)
sys.stdin.close()
try:
freopen(log_file, 'a', sys.stdout)
freopen(log_file, 'a', sys.stderr)
except IOError as e:
shell.print_exception(e)
sys.exit(1)
def daemon_stop(pid_file):
import errno
try:
with open(pid_file) as f:
buf = f.read()
pid = common.to_str(buf)
if not buf:
logging.error('not running')
except IOError as e:
shell.print_exception(e)
if e.errno == errno.ENOENT:
# always exit 0 if we are sure daemon is not running
logging.error('not running')
return
sys.exit(1)
pid = int(pid)
if pid > 0:
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == errno.ESRCH:
logging.error('not running')
# always exit 0 if we are sure daemon is not running
return
shell.print_exception(e)
sys.exit(1)
else:
logging.error('pid is not positive: %d', pid)
# sleep for maximum 10s
for i in range(0, 200):
try:
# query for the pid
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
break
time.sleep(0.05)
else:
logging.error('timed out when stopping pid %d', pid)
sys.exit(1)
print('stopped')
os.unlink(pid_file)
def daemon_exec(config):
if 'daemon' in config:
if os.name != 'posix':
raise Exception('daemon mode is only supported on Unix')
command = config['daemon']
if not command:
command = 'start'
pid_file = config['pid-file']
log_file = config['log-file']
if command == 'start':
daemon_start(pid_file, log_file)
elif command == 'stop':
daemon_stop(pid_file)
# always exit after daemon_stop
sys.exit(0)
elif command == 'restart':
daemon_stop(pid_file)
daemon_start(pid_file, log_file)
else:
raise Exception('unsupported daemon command %s' % command) | null |
161,266 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import logging
import signal
import time
from shadowsocks import common, shell
def set_user(username):
if username is None:
return
import pwd
import grp
try:
pwrec = pwd.getpwnam(username)
except KeyError:
logging.error('user not found: %s' % username)
raise
user = pwrec[0]
uid = pwrec[2]
gid = pwrec[3]
cur_uid = os.getuid()
if uid == cur_uid:
return
if cur_uid != 0:
logging.error('can not set user as nonroot user')
# will raise later
# inspired by supervisor
if hasattr(os, 'setgroups'):
groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]]
groups.insert(0, gid)
os.setgroups(groups)
os.setgid(gid)
os.setuid(uid) | null |
161,267 | from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.obfsplugin import plain, http_simple, obfs_tls, verify, auth, auth_chain
def mu_protocol():
return ["auth_aes128_md5", "auth_aes128_sha1", "auth_chain_a"] | null |
161,268 | from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
class Manager(object):
def __init__(self, config):
self._config = config
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._statistics = collections.defaultdict(int)
self._control_client_addr = None
try:
manager_address = common.to_str(config['manager_address'])
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
self.add_port(a_config)
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("server already exists at %s:%d" % (config['server'],
port))
return
logging.info("adding server at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
stat_callback=self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
stat_callback=self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("removing server at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("server not exist at %s:%d" % (config['server'],
port))
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
parsed = self._parse_command(data)
if parsed:
command, config = parsed
a_config = self._config.copy()
if config:
# let the command override the configuration file
a_config.update(config)
if 'server_port' not in a_config:
logging.error('can not find server_port in config')
else:
if command == 'add':
self.add_port(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
self.remove_port(a_config)
self._send_control_data(b'ok')
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(b'stat: ' + data)
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
send_data(r)
r.clear()
i = 0
if len(r) > 0 :
send_data(r)
self._statistics.clear()
def _send_control_data(self, data):
if self._control_client_addr:
try:
self._control_socket.sendto(data, self._control_client_addr)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
def run(self):
self._loop.run()
def run(config):
Manager(config).run() | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.