id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
3377563 | <filename>data_processing/processing_core.py<gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@module : processing_core.py
@author : ayaya
@contact : <EMAIL>
@time : 2022/3/25 9:11 下午
"""
import copy
import json
import os
import random
import time
import concurrent.futures
from typing import List
import pymysql
import requests
from atri_bot.twitter.tw import start_observe_tweets, get_users, escape_regular_text
from atri_bot.weibo import WeiboAPI
from data_processing.common.Riko import Riko
from data_processing.common.connect import Connect
from data_processing.common.setting import (
PROFILE_IMAGE_PATH,
TWITTER_URL,
HEADERS,
MEDIA_IMAGE_PATH,
MEDIA_VIDEO_PATH,
WEIBO_COOKIES_PATH,
WEIBO_COOKIES,
)
WEIBO_TEMPLATE = """{name}
(a){username}
{created_at}
{text}
{url}"""
class ProcessingCore(object):
def __init__(self):
with open("data_processing/atri_bot_db.json", "r") as file:
config = json.loads(file.read())
config["cursorclass"] = pymysql.cursors.DictCursor
Riko.db_config = config
self.connect = Connect()
self._create_folder()
self.spider_user_list = list()
self.need_update_spider_user_list = list()
self.error_user_list = list()
self._init_start_user_list()
WeiboAPI.load_from_cookies_str(WEIBO_COOKIES).save_cookies_object(
WEIBO_COOKIES_PATH
)
self.weibo_api = WeiboAPI.load_from_cookies_object(WEIBO_COOKIES_PATH)
self.executor = concurrent.futures.ThreadPoolExecutor(1) # WeiboAPI 不是线程安全的,不要调整worker数量
def bot_star(self):
start_observe_tweets(
usernames=self.spider_user_list,
callback=lambda twitters: self._bot_controller(twitters),
)
def _init_start_user_list(self) -> None:
need_update_list = self._get_need_update_spider()
text_list = self._read_user_list_in_txt()
if len(need_update_list) == 0:
self.spider_user_list = text_list
else:
self.need_update_spider_user_list = need_update_list
self.spider_user_list = copy.deepcopy(need_update_list)
def _create_folder(self) -> None:
path_list = [PROFILE_IMAGE_PATH, MEDIA_IMAGE_PATH, MEDIA_VIDEO_PATH]
for path in path_list:
if not os.path.exists(path):
os.mkdir(path)
def _read_user_list_in_txt(self) -> list:
text_spider_user_list = []
with open("data_processing/spider_user.txt") as file:
for text in file.readlines():
user_name = text.replace("@", "").replace("\n", "")
text_spider_user_list.append(user_name)
return text_spider_user_list
def _check_user_info_change(self, user_info_list: List[dict]):
change_dict = dict()
for user_info in user_info_list:
check_user_info_list = get_users(user_info["uid"])[0]
for key, value in check_user_info_list.iterm():
if user_info[key] == value:
continue
if key == "username":
self.connect.update_spider_user_info(
username=user_info[key],
info_dict={"username": check_user_info_list.get(key)},
)
if key == "profile_image_url":
change_dict[key] = self._save_profile_image(
check_user_info_list[key]
)
change_dict[key] = check_user_info_list.get(key)
if len(change_dict) == 0:
continue
self.connect.update_user_info(uid=user_info["uid"], info_dict=change_dict)
change_dict = dict()
def _get_need_update_spider(self) -> list:
need_update_spider_user_list = []
spider_user_info_in_db = [
db_info["username"] for db_info in self.connect.get_spider_user_info()
]
user_info_in_db = [db_info["uid"] for db_info in self.connect.get_user_info()]
if len(spider_user_info_in_db) != 0:
for username in spider_user_info_in_db:
update_dict = {
"last_check_time": time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime(time.time())
)
}
self.connect.update_spider_user_info(
username=username, info_dict=update_dict
)
if len(user_info_in_db) != 0:
for uid in user_info_in_db:
update_dict = {
"last_check_time": time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime(time.time())
)
}
self.connect.update_user_info(uid=uid, info_dict=update_dict)
for spider_user_info in self._read_user_list_in_txt():
if spider_user_info not in spider_user_info_in_db:
need_update_spider_user_list.append(spider_user_info)
return need_update_spider_user_list
def _save_profile_image(self, image_url: str) -> str:
request = requests.get(url=image_url, headers=HEADERS)
image_path = f"{PROFILE_IMAGE_PATH}/{image_url.split('/')[-1]}"
if request.status_code == 200:
with open(image_path, "wb") as file:
file.write(request.content)
else:
image_path = None
return image_path
def _save_media_file(self, media_url_list: list, media_type_list: list) -> list:
image_path_list = list()
for flag in range(len(media_url_list)):
request = requests.get(url=media_url_list[flag], headers=HEADERS)
image_path = None
if len(media_url_list[flag]) == 0:
continue
if media_type_list[flag] == "photo":
image_path = os.path.join(
MEDIA_IMAGE_PATH, media_url_list[flag].split("/")[-1]
)
if request.status_code == 200:
with open(image_path, "wb") as file:
file.write(request.content)
else:
image_path_list.append("")
elif media_type_list[flag] == "video":
pass
image_path_list.append(image_path)
return image_path_list
def _download_video(self):
pass
def _get_media_url_info(self, media_data: List[dict], get_key: str) -> list:
media_data_list = list()
if len(media_data) == 0:
return []
for data in media_data:
media_value = data.get(get_key)
if media_value is None or media_value == "":
continue
media_data_list.append(media_value)
return media_data_list
def _check_hashtag(self, hash_tag: List[dict]) -> object:
if hash_tag is None:
return None
return str(hash_tag)[1:-1]
def update_new_spider_user_info(self, users_list: list) -> None:
users_info_list = get_users(users_list)
for user in users_info_list:
try:
self.connect.insert_spider_user_info(
uid=user.get("id"),
username=user.get("username"),
add_time=time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime(time.time())
),
)
except pymysql.err.IntegrityError:
pass
try:
self.connect.insert_user_info(
uid=user.get("id"),
name=user.get("name"),
username=user.get("username"),
description=user.get("description"),
profile_image_url=user.get("profile_image_url"),
profile_image_path=self._save_profile_image(
user.get("profile_image_url")
),
add_time=time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime(time.time())
),
)
except pymysql.err.IntegrityError:
pass
def update_new_text_info(self, need_update_info: List[dict]) -> None:
for text_info in need_update_info:
twitter_url = f"{TWITTER_URL}/{text_info.get('user').get('username')}/status/{text_info.get('tid')}"
try:
self.connect.insert_message_info(
tid=text_info.get("tid"),
uid=text_info.get("uid"),
name=text_info.get("user").get("name"),
username=text_info.get("user").get("username"),
text=text_info.get("text"),
time=text_info.get("created_at"),
twi_url=twitter_url,
tag=self._check_hashtag(text_info.get("hashtags")),
media_url=",".join(
self._get_media_url_info(text_info.get("media"), "url")
),
media_key=",".join(
self._get_media_url_info(text_info.get("media"), "type")
),
media_path=",".join(
self._save_media_file(
self._get_media_url_info(text_info.get("media"), "url"),
self._get_media_url_info(text_info.get("media"), "type"),
)
),
status=0,
enter_time=time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime(time.time())
),
)
except pymysql.err.IntegrityError:
continue
def _update_send_message_status(self, message_status: dict) -> None:
info_dict = {
"status": message_status.get("status"),
"send_time": time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime(time.time()),
),
}
if message_status.get("status") == -1:
info_dict["error_message"] = message_status.get("error_message")
self.connect.update_message_info_by_tid(
tid=message_status.get("tid"), info_dict=info_dict
)
def send_message(self):
message_list = self.connect.get_message_info_by_status(status=0)
for m in message_list:
def run():
try:
self.weibo_api.send_weibo(
WEIBO_TEMPLATE.format_map(
{
"name": m.get("name"),
"username": escape_regular_text(m.get("username")),
"created_at": m.get("time"),
"text": m.get("text"),
"url": m.get("twi_url"),
}
),
m.get('media_path').split(',') if m.get('media_path') else None, # TODO: 不支持视频,需要额外检查
)
self._update_send_message_status({"tid": m["tid"], "status": 1})
except Exception as err:
self._update_send_message_status(
{"tid": m["tid"], "status": -1, "error_message": err}
)
self.executor.submit(run)
def _bot_controller(self, twitters: List[dict]):
update_spider_user_list = self._get_need_update_spider()
if len(update_spider_user_list) != 0:
self.update_new_spider_user_info(self.spider_user_list)
self.need_update_spider_user_list = copy.deepcopy(update_spider_user_list)
start_observe_tweets(
usernames=update_spider_user_list,
max_results=20,
callback=lambda twitter: self._bot_controller(twitter),
)
if len(self.need_update_spider_user_list) != 0:
self.spider_user_list.extend(self.need_update_spider_user_list)
self.need_update_spider_user_list.clear()
self.update_new_text_info(twitters)
self.send_message()
start_observe_tweets(
usernames=self.spider_user_list,
interval=60,
max_results=10,
callback=lambda twitter: self._bot_controller(twitter),
)
def error_user(self, error_user_list: list):
pass
if __name__ == "__main__":
ProcessingCore().bot_star()
| StarcoderdataPython |
6452151 | import os
import spacy
import streamlit.components.v1 as components
_RELEASE = True
if not _RELEASE:
_component_func = components.declare_component(
"st_ner_annotate", url="http://localhost:5000",
)
else:
parent_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(parent_dir, "frontend/public")
_component_func = components.declare_component(
"st_ner_annotate", path=build_dir)
def st_ner_annotate(label, text, ents, key=None):
"""st_edit_named_entities.
Parameters
----------
text: str
Text to render
ents: object
Entities found in text
key: str or None
An optional key that uniquely identifies this component. If this is
None, and the component's arguments are changed, the component will
be re-mounted in the Streamlit frontend and lose its current state.
Returns
-------
object
Entities that have been selected
"""
component_value = _component_func(
label=label, text=text, ents=ents, key=key, default=ents)
return component_value
# app: `$ streamlit run my_component/__init__.py`
if not _RELEASE:
import streamlit as st
st.title("Named entity recognition demo")
text = """Manhattan traces its origins to a trading post founded by colonists
from the Dutch Republic in 1624 on Lower Manhattan; the post was named New
Amsterdam in 1626. Manhattan is historically documented to have been purchased
by Dutch colonists from Native Americans in 1626 for 60 guilders, which equals
roughly $1059 in current terms. The territory and its surroundings came under
English control in 1664 and were renamed New York after King Charles II of
England granted the lands to his brother, the Duke of York. New York, based
in present-day Manhattan, served as the capital of the United States from 1785
until 1790. The Statue of Liberty greeted millions of immigrants as they came
to America by ship in the late 19th century and is a world symbol of the United
States and its ideals of liberty and peace. Manhattan became a borough during
the consolidation of New York City in 1898.
"""
nlp = spacy.load("en_core_web_sm")
entity_labels = nlp.get_pipe('ner').labels
doc = nlp(text)
ents = doc.to_json()['ents']
current_entity_type = st.selectbox("Mark for Entity Type", entity_labels)
entities = st_ner_annotate(current_entity_type, text, ents, key=42)
st.json(entities)
| StarcoderdataPython |
9611260 | <filename>cogs/misc.py<gh_stars>0
from discord.ext import commands
import discord
import tools
class Misc(commands.Cog):
"""Random commands for the bot"""
def __init__(self, bot):
self.bot = bot
self.database = bot.database
@commands.command(name='invite', aliases=["getinvite", "botinvite"],
usage="invite", description="Invite the bot to your own server")
@tools.has_perm()
async def invite(self, ctx):
await ctx.send(
"You can invite me here: https://discord.com/oauth2/authorize?client_id=809122042573357106&permissions=808840439&scope=bot%20applications.commands")
@commands.command(name="credit", description="Get the names of the people who developed the bot", usage="credit")
async def credit(self, ctx):
embed = discord.Embed(colour=0x36a39f, title="The list of contributors", description="FluxedScript")
embed.set_footer(text="Ploxy | Contributor list")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Misc(bot))
| StarcoderdataPython |
193741 | <reponame>vovawed/fastapi-cloudauth
import pytest
from fastapi_cloudauth.messages import (NO_PUBLICKEY, NOT_AUTHENTICATED,
NOT_VALIDATED_CLAIMS, NOT_VERIFIED,
SCOPE_NOT_MATCHED)
from tests.helpers import assert_get_response
from tests.test_auth0 import Auth0Client
from tests.test_cognito import CognitoClient
from tests.test_firebase import FirebaseClient
class BaseTestCloudAuth:
cloud_auth = None
@classmethod
def setup_class(cls):
"""set credentials and create test user"""
cls.cloud_auth = cls.cloud_auth()
cls.cloud_auth.setup()
# get access token and id token
cls.ACCESS_TOKEN = cls.cloud_auth.ACCESS_TOKEN
cls.SCOPE_ACCESS_TOKEN = cls.cloud_auth.SCOPE_ACCESS_TOKEN
cls.ID_TOKEN = cls.cloud_auth.ID_TOKEN
# set application for testing
cls.client = cls.cloud_auth.TESTCLIENT
@classmethod
def teardown_class(cls):
"""delete test user"""
cls.cloud_auth.teardown()
def test_decode_token(self):
self.cloud_auth.decode()
class AccessTokenTestCase(BaseTestCloudAuth):
def success_case(self, path: str, token: str = None):
return assert_get_response(
client=self.client, endpoint=path, token=token, status_code=200
)
def userinfo_success_case(self, path: str, token: str = None):
response = self.success_case(path, token)
for value in response.json().values():
assert value, f"{response.content} failed to parse"
return response
def failure_case(self, path: str, token: str = None, detail=""):
return assert_get_response(
client=self.client,
endpoint=path,
token=token,
status_code=403,
detail=detail,
)
def test_valid_token(self):
self.success_case("/", self.ACCESS_TOKEN)
def test_no_token(self):
self.failure_case("/")
# not auto_error
self.success_case("no-error")
def test_incompatible_kid_token(self):
# manipulate header
token = self.ACCESS_TOKEN.split(".", 1)[-1]
token = (
"<KEY>
+ token
)
self.failure_case("/", token, detail=NO_PUBLICKEY)
# not auto_error
self.success_case("no-error", token)
def test_no_kid_token(self):
# manipulate header
token = self.ACCESS_TOKEN.split(".", 1)[-1]
token = "<KEY>." + token
self.failure_case("/", token, detail=NOT_AUTHENTICATED)
# not auto_error
self.success_case("no-error", token)
def test_not_verified_token(self):
# manipulate public_key
token = self.ACCESS_TOKEN[:-3] + "aaa"
self.failure_case("/", token, detail=NOT_VERIFIED)
# not auto_error
self.success_case("no-error", token)
def test_valid_scope(self):
self.success_case("/scope/", self.SCOPE_ACCESS_TOKEN)
def test_invalid_scope(self):
self.failure_case("/scope/", self.ACCESS_TOKEN, detail=SCOPE_NOT_MATCHED)
self.success_case("/scope/no-error/", self.ACCESS_TOKEN)
def test_valid_token_extraction(self):
self.userinfo_success_case("/access/user", self.ACCESS_TOKEN)
def test_no_token_extraction(self):
self.failure_case("/access/user")
# not auto_error
self.success_case("/access/user/no-error")
def test_insufficient_user_info_from_access_token(self):
# verified but token does not contains user info
self.failure_case(
"/access/user/invalid/", self.ACCESS_TOKEN, detail=NOT_VALIDATED_CLAIMS
)
# not auto_error
self.success_case("/access/user/invalid/no-error", self.ACCESS_TOKEN)
class IdTokenTestCase(BaseTestCloudAuth):
def success_case(self, path: str, token: str = None):
return assert_get_response(
client=self.client, endpoint=path, token=token, status_code=200
)
def user_success_case(self, path: str, token: str = None):
response = self.success_case(path, token)
for value in response.json().values():
assert value, f"{response.content} failed to parse"
return response
def failure_case(self, path: str, token: str = None, detail=""):
return assert_get_response(
client=self.client,
endpoint=path,
token=token,
status_code=403,
detail=detail,
)
def test_valid_id_token(self):
self.user_success_case("/user/", self.ID_TOKEN)
def test_no_id_token(self):
# handle in fastapi.security.HTTPBearer
self.failure_case("/user/")
# not auto_error
self.success_case("/user/no-error")
def test_incompatible_kid_id_token(self):
# manipulate header
token = self.ID_TOKEN.split(".", 1)[-1]
token = (
"<KEY>
+ token
)
self.failure_case("/user/", token, detail=NO_PUBLICKEY)
# not auto_error
self.success_case("/user/no-error/", token)
def test_no_kid_id_token(self):
# manipulate header
token = self.ID_TOKEN.split(".", 1)[-1]
token = "<KEY>." + token
self.failure_case("/user/", token, detail=NOT_AUTHENTICATED)
# not auto_error
self.success_case("/user/no-error", token)
def test_not_verified_id_token(self):
# manipulate public_key
token = f"{self.ID_TOKEN}"[:-3] + "aaa"
self.failure_case("/user/", token, detail=NOT_VERIFIED)
# not auto_error
self.success_case("/user/no-error", token)
def test_insufficient_current_user_info(self):
# verified but token does not contains user info
self.failure_case("/user/invalid/", self.ID_TOKEN, detail=NOT_VALIDATED_CLAIMS)
# not auto_error
self.success_case("/user/invalid/no-error", self.ID_TOKEN)
@pytest.mark.auth0
class TestAuth0(AccessTokenTestCase, IdTokenTestCase):
cloud_auth = Auth0Client
@pytest.mark.cognito
class TestCognito(AccessTokenTestCase, IdTokenTestCase):
cloud_auth = CognitoClient
@pytest.mark.firebase
class TestFirebase(IdTokenTestCase):
cloud_auth = FirebaseClient
| StarcoderdataPython |
6553000 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
def main():
user_file_name = input("Please enter file name: ")
try:
read_file = open(user_file_name, 'r')
except:
print(f'Error! I could not find/read "{user_file_name}"')
sys.exit()
nums = read_file.read()
print(nums)
if __name__=='__main__':
main()
| StarcoderdataPython |
216881 | <filename>src/xeda/xedaproject.py
from __future__ import annotations
import json
import os
from pathlib import Path
from typing import Dict, List, Optional, Union
import yaml
from .dataclass import XedaBaseModel
from .design import Design
from .utils import WorkingDirectory, toml_load
class XedaProject(XedaBaseModel):
# validate to concrete Designs to verify the whole xedaproject
designs: List[Design]
# keep raw dict as flows are dynamically discovered
flows: Dict[str, dict] = {}
@classmethod
def from_file(cls, file: Union[str, os.PathLike, Path]):
"""load xedaproject from file"""
if not isinstance(file, Path):
file = Path(file)
ext = file.suffix.lower()
if ext == ".toml":
data = toml_load(file)
else:
with open(file) as f:
if ext == ".json":
data = json.load(f)
elif ext == ".yaml":
data = yaml.safe_load(f)
else:
raise ValueError(
f"File {file} has unknown extension {ext}. Supported formats are TOML, JSON, and YAML."
)
if not isinstance(data, dict) or not data:
raise ValueError("Invalid xedaproject!")
designs = data.get("design") or data.get("designs")
if not designs:
raise ValueError("No designs found in the xedaproject file!")
flows = data.get("flow") or data.get("flows")
with WorkingDirectory(file.parent):
return cls(designs=designs, flows=flows)
@property
def design_names(self) -> List[str]:
return [d.name for d in self.designs]
def get_design(self, name: Optional[str] = None) -> Optional[Design]:
if name is None:
return self.designs[0] if len(self.designs) == 1 else None
try:
return self.designs[self.design_names.index(name)]
except ValueError:
return None
| StarcoderdataPython |
1908113 | class Solution:
def longestCommonPrefix(self, strs):
size = len(strs)
if size == 1:
return strs[0]
prefix = strs[0]
while(len(prefix) > 0):
flag = True
for i in range(1, size):
flag &= strs[i].startswith(prefix)
if flag:
return prefix
prefix = prefix[:-1]
return prefix
s = Solution()
strs = ["flower","flow","flight"]
result = s.longestCommonPrefix(strs) # fl
print(result)
strs = ["dog","racecar","car"]
result = s.longestCommonPrefix(strs) # ""
print(result) | StarcoderdataPython |
3494338 | # Generated by Django 3.0.8 on 2020-07-17 21:06
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('manager', '0002_auto_20200717_2204'),
]
operations = [
migrations.AlterField(
model_name='hardware',
name='expiration_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='hardware',
name='installation_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='software',
name='expiration_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='software',
name='installation_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| StarcoderdataPython |
3599840 | <reponame>kigensky/awwards<filename>awwards/migrations/0003_auto_20210531_2202.py
# Generated by Django 3.1.7 on 2021-05-31 19:02
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('awwards', '0002_auto_20210531_2049'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='image',
),
migrations.AddField(
model_name='project',
name='cloudinary_image',
field=cloudinary.models.CloudinaryField(max_length=255, null=True, verbose_name='image'),
),
]
| StarcoderdataPython |
285408 | import unittest
from sphinxcontrib.autohttp.bottle import get_routes
from bottle import Bottle, Route
def create_app():
app = Bottle()
@app.route("/bottle")
def bottle_bottle():
return 12
@app.post("/bottle/post/")
def bottle_bottle_post():
return 23
return app
def create_app_mount():
app = create_app()
another_app = Bottle()
@another_app.route("/mount/")
def another_mount():
pass
app.mount("/mount/", another_app)
return app
def create_app_filter():
app = Bottle()
@app.route("/hello/<name>")
def bottle_hello_name(name):
return name
return app
class BottleTest(unittest.TestCase):
def test_get_routes(self):
routes = list(get_routes(create_app()))
# order is not deterministic:
routes = sorted(routes, key=lambda x: x[1])
self.assertEqual(len(routes), 2)
self.assertEqual(len(routes[0]), 3)
self.assertEqual(routes[0][0], "GET")
self.assertEqual(routes[0][1], "/bottle")
self.assertEqual(routes[0][2].callback(), 12)
self.assertEqual(type(routes[0][2]), Route)
self.assertEqual(len(routes[1]), 3)
self.assertEqual(routes[1][0], "POST")
self.assertEqual(routes[1][1], "/bottle/post/")
self.assertEqual(routes[1][2].callback(), 23)
self.assertEqual(type(routes[1][2]), Route)
def test_get_routes_mount(self):
routes = list(get_routes(create_app_mount()))
routes = sorted(routes, key=lambda x: x[1])
self.assertEqual(len(routes), 3)
# not sure about this:
self.assertEqual(routes[2][1], "/mount/(:re:.*)")
def test_get_routes_filter(self):
routes = list(get_routes(create_app_filter()))
routes = sorted(routes, key=lambda x: x[1])
self.assertEqual(len(routes), 1)
self.assertEqual(routes[0][1], "/hello/(name)")
| StarcoderdataPython |
5086367 | import numpy as np
import torch
from a2c_ppo_acktr import utils
from a2c_ppo_acktr.envs import make_vec_envs
def evaluate(actor_critic, ob_rms, env_name, seed, num_processes, eval_log_dir,
device):
eval_envs = make_vec_envs(env_name, seed + num_processes, num_processes,
None, eval_log_dir, device, True)
vec_norm = utils.get_vec_normalize(eval_envs)
if vec_norm is not None:
vec_norm.eval()
vec_norm.obs_rms = ob_rms
eval_episode_rewards = []
obs = eval_envs.reset()
eval_recurrent_hidden_states = torch.zeros(
num_processes, actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(num_processes, 1, device=device)
while len(eval_episode_rewards) < 10:
with torch.no_grad():
_, action, _, eval_recurrent_hidden_states = actor_critic.act(
obs,
eval_recurrent_hidden_states,
eval_masks,
deterministic=True)
# Obser reward and next obs
obs, _, done, infos = eval_envs.step(action)
eval_masks = torch.tensor(
[[0.0] if done_ else [1.0] for done_ in done],
dtype=torch.float32,
device=device)
for info in infos:
if 'episode' in info.keys():
eval_episode_rewards.append(info['episode']['r'])
eval_envs.close()
print(" Evaluation using {} episodes: mean reward {:.5f}\n".format(
len(eval_episode_rewards), np.mean(eval_episode_rewards)))
| StarcoderdataPython |
6403788 | <filename>server/services/user.py
from flask import Blueprint, jsonify, request, current_app
from datetime import datetime, timedelta
from server.utils.view_utils import wrapped_response, serialize_list
from server.models.key import Key
from server.models.user import User
from server.utils.core_utils import logger
from server import db
import jwt
user = Blueprint("/users/v1", __name__)
@user.route('/register', methods=['POST'])
def register():
data = request.get_json()
new_user = User(**data)
db.session.add(new_user)
db.session.commit()
return jsonify(new_user.to_dict()), 201
@user.route('/login', methods=['POST'])
def login():
data = request.get_json()
current_user = User.authenticate(**data)
if not current_user:
return jsonify({'message': 'Invalid credentials', 'authenticated': False}), 401
token = jwt.encode({
'sub': current_user.email,
'iat': datetime.utcnow(),
'exp': datetime.utcnow() + timedelta(minutes=30)},
current_app.config['SECRET_KEY'])
return jsonify({'token': token.decode('UTF-8')})
| StarcoderdataPython |
4803707 | import random as r
import math as m
def aproximation_pi(n_points):
# Number of darts that land inside.
inside = 0
# Iterate for the number of darts.
i = 0
while i < n_points:
# Generate random x, y in [0, 1].
x2 = r.random()**2
y2 = r.random()**2
# Increment if inside unit circle.
if m.sqrt(x2 + y2) < 1.0:
inside += 1
i += 1
# It works!
return float(inside)
def integration_exp(n, a, b):
sum = 0.0
for i in range(n):
x = -a + (a + b)*r.random()
sum += (x*m.exp(x))
return sum/float(n) | StarcoderdataPython |
6448855 | # BB Keyboard Driver
#
# Released under The MIT License (MIT)
#
# Copyright (c) 2021 <NAME>
from arambadge import badge
KBD_ADDRESS = 0x42
CMD_BACKLIGHT_ON = 0x03
CMD_RESET = 1 << 7
RESP_RESET = 0xfe
RESP_EOF = 0xff
RESP_FLAG_KEYDOWN = 1 << 6
RESP_FLAG_KEYUP = 1 << 7
kbd_matrix = [
'QERUO',
'WSGHL',
('sym', 'D', 'T', 'Y', 'I'),
('A', 'P', 'RShift', 'Enter', 'Backspace'),
('Alt', 'X', 'V', 'B', '$'),
' ZCNM',
('Microphone', 'LShift', 'F', 'J', 'K'),
]
kbd_matrix_alt = [
'#23_+',
'14/:"',
('sym', '5', '(', ')', '-'),
('*', '@', 'RShift', 'Enter', 'Backspace'),
('Alt', '8', '?', '!', 'Speaker'),
' 79,.',
('0', 'LShift', '6', ';', '\''),
]
def kbd_init():
badge.i2c.try_lock()
try:
badge.i2c.writeto(KBD_ADDRESS, bytes([CMD_RESET | CMD_BACKLIGHT_ON]))
finally:
badge.i2c.unlock()
def kbd_read():
buf = bytearray(1)
badge.i2c.try_lock()
try:
badge.i2c.readfrom_into(KBD_ADDRESS, buf)
if buf[0] != RESP_RESET and buf[0] != RESP_EOF:
return buf[0]
return None
finally:
badge.i2c.unlock()
def kbd_decode_key(code, alt = False):
matrix = kbd_matrix_alt if alt else kbd_matrix
col = code & 7
row = (code >> 3) & 7
return matrix[row-1][col-1]
def kbd_decode_event(event, alt = False):
key = kbd_decode_key(event, alt)
if event & RESP_FLAG_KEYDOWN:
return ("keydown", key)
if event & RESP_FLAG_KEYUP:
return ("keyup", key)
return ("repeat", key)
class BBKeyboard:
def __init__(self):
self.reset()
def reset(self):
kbd_init()
self.shift = False
self.alt = False
self.last_key = None
def read(self):
event = kbd_read()
if not event:
return None
alt = self.alt
(event_type, key) = kbd_decode_event(event, alt)
if event_type != 'keydown':
return None
self.last_key = key
self.alt = False
if len(key) == 1:
shift = self.shift
self.shift = False
return key if shift else key.lower()
elif key == 'Enter':
return '\n'
elif key == 'Backspace':
return '\b'
elif key == 'LShift' or key == 'RShift':
self.shift = True
elif key == 'Alt':
self.alt = True
return None
| StarcoderdataPython |
1754920 | <filename>tests/test_dsm.py
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from mne_rsa import searchlight, dsm_array, compute_dsm, compute_dsm_cv
from mne_rsa.dsm import _ensure_condensed, _n_items_from_dsm
class TestDsm:
"""Test computing a DSM"""
def test_basic(self):
"""Test basic invocation of compute_dsm."""
data = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
dsm = compute_dsm(data)
assert dsm.shape == (1,)
assert_allclose(dsm, 0, atol=1E-15)
def test_invalid_input(self):
"""Test giving invalid input to compute_dsm."""
data = np.array([[1], [1]])
with pytest.raises(ValueError, match='single feature'):
compute_dsm(data, metric='correlation')
def test_set_metric(self):
"""Test setting distance metric for computing DSMs."""
data = np.array([[1, 2, 3, 4], [2, 4, 6, 8]])
dsm = compute_dsm(data, metric='euclidean')
assert dsm.shape == (1,)
assert_allclose(dsm, 5.477226)
class TestDsmCV:
"""Test computing a DSM with cross-validation."""
def test_basic(self):
"""Test basic invocation of compute_dsm_cv."""
data = np.array([[[1, 2, 3, 4], [1, 2, 3, 4]],
[[1, 2, 3, 4], [1, 2, 3, 4]]])
dsm = compute_dsm_cv(data)
assert dsm.shape == (1,)
assert_allclose(dsm, 0, atol=1E-15)
def test_invalid_input(self):
"""Test giving invalid input to compute_dsm."""
data = np.array([[[1], [1]]])
with pytest.raises(ValueError, match='single feature'):
compute_dsm_cv(data, metric='correlation')
def test_set_metric(self):
"""Test setting distance metric for computing DSMs."""
data = np.array([[[1, 2, 3, 4], [2, 4, 6, 8]],
[[1, 2, 3, 4], [2, 4, 6, 8]]])
dsm = compute_dsm_cv(data, metric='euclidean')
assert dsm.shape == (1,)
assert_allclose(dsm, 5.477226)
class TestEnsureCondensed:
"""Test the _ensure_condensed function."""
def test_basic(self):
"""Test basic invocation of _ensure_condensed."""
dsm = _ensure_condensed(np.array([[0, 1, 2],
[1, 0, 3],
[2, 3, 0]]),
var_name='test')
assert dsm.shape == (3,)
assert_equal(dsm, [1, 2, 3])
def test_list(self):
"""Test invocation of _ensure_condensed on a list."""
full = [np.array([[0, 1, 2],
[1, 0, 3],
[2, 3, 0]]),
np.array([[0, 1, 2],
[1, 0, 3],
[2, 3, 0]])]
dsm = _ensure_condensed(full, var_name='full')
assert len(dsm) == 2
assert dsm[0].shape == (3,)
assert dsm[1].shape == (3,)
assert_equal(dsm, [[1, 2, 3], [1, 2, 3]])
def test_condensed(self):
"""Test invocation of _ensure_condensed on already condensed DSM."""
dsm = _ensure_condensed(np.array([1, 2, 3]), var_name='test')
assert dsm.shape == (3,)
assert_equal(dsm, [1, 2, 3])
def test_invalid(self):
"""Test _ensure_condensed with invalid inputs."""
# Not a square matrix
with pytest.raises(ValueError, match='square matrix'):
_ensure_condensed(np.array([[0, 1],
[1, 0],
[2, 3]]),
var_name='test')
# Too many dimensions
with pytest.raises(ValueError, match='Invalid dimensions'):
_ensure_condensed(np.array([[[[[0, 1, 2, 3]]]]]), var_name='test')
# Invalid type
with pytest.raises(TypeError, match='NumPy array'):
_ensure_condensed([1, 2, 3], var_name='test')
class TestNItemsFromDSM:
"""Test the _n_items_from_dsm function."""
def test_basic(self):
"""Test basic invocation of _n_items_from_dsm."""
assert _n_items_from_dsm(np.array([1, 2, 3])) == 3
assert _n_items_from_dsm(np.array([[0, 1, 2],
[1, 0, 3],
[2, 3, 0]])) == 3
class TestDsmsSearchlight:
"""Test computing DSMs with searchlight patches."""
def test_temporal(self):
"""Test computing DSMs using a temporal searchlight."""
data = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
patches = searchlight(data.shape, temporal_radius=1)
dsms = dsm_array(data, patches, dist_metric='euclidean')
assert len(dsms) == len(patches)
assert dsms.shape == (2, 1)
assert_equal(list(dsms), [0, 0])
def test_spatial(self):
"""Test computing DSMs using a spatial searchlight."""
dist = np.array([[0, 1, 2, 3],
[1, 0, 1, 2],
[2, 1, 0, 1],
[3, 2, 1, 0]])
data = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
patches = searchlight(data.shape, dist, spatial_radius=1)
dsms = dsm_array(data, patches, dist_metric='euclidean')
assert len(dsms) == len(patches)
assert dsms.shape == (4, 1)
assert_equal(list(dsms), [0, 0, 0, 0])
def test_spatio_temporal(self):
"""Test computing DSMs using a spatio-temporal searchlight."""
data = np.array([[[1, 2, 3], [2, 3, 4]],
[[2, 3, 4], [3, 4, 5]],
[[3, 4, 5], [4, 5, 6]]])
dist = np.array([[0, 1, 2],
[1, 0, 1],
[2, 1, 0]])
patches = searchlight(data.shape, dist, spatial_radius=1,
temporal_radius=1)
dsms = dsm_array(data, patches, dist_metric='correlation')
assert len(dsms) == len(patches)
assert dsms.shape == (2, 1, 3)
assert_allclose(list(dsms), [[0, 0, 0], [0, 0, 0]], atol=1E-15)
def test_single_patch(self):
"""Test computing DSMs using a single searchlight patch."""
data = np.array([[[1, 2, 3], [2, 3, 4]],
[[2, 3, 4], [3, 4, 5]],
[[3, 4, 5], [4, 5, 6]]])
dsms = dsm_array(data, dist_metric='correlation')
assert len(dsms) == 1
assert dsms.shape == (3,)
assert_allclose(list(dsms), [[0, 0, 0]], atol=1E-15)
def test_crossvalidation(self):
"""Test computing DSMs using a searchlight and cross-validation."""
data = np.array([[[1, 2, 3], [2, 3, 4]],
[[2, 3, 4], [3, 4, 5]],
[[3, 4, 5], [4, 5, 6]],
[[1, 2, 3], [2, 3, 4]],
[[2, 3, 4], [3, 4, 5]],
[[3, 4, 5], [4, 5, 6]]])
dist = np.array([[0, 1, 2],
[1, 0, 1],
[2, 1, 0]])
patches = searchlight(data.shape, dist, spatial_radius=1,
temporal_radius=1)
dsms = dsm_array(data, patches, y=[1, 2, 3, 1, 2, 3], n_folds=2)
assert len(dsms) == len(patches)
assert dsms.shape == (2, 1, 3)
assert_allclose(list(dsms), [[0, 0, 0], [0, 0, 0]], atol=1E-15)
| StarcoderdataPython |
3458378 | <reponame>sadimer/nfv_tosca_translator<filename>translator/translator.py
import os
import yaml
import logging
import sys
import translator.utils as utils
from toscaparser.tosca_template import ToscaTemplate
from translator.template import ToscaNormativeTemplate
VNF_DEF_PATH = '/definitions/VNF_types/'
NFV_DEF_PATH = '/definitions/NFV_definintion_1_0.yaml'
TOSCA_DEF_PATH = '/definitions/TOSCA_definition_1_0.yaml'
MAP_PATH = '/definitions/TOSCA_NFV_mapping'
PROJECT_NAME = 'nfv_tosca_translator'
def translate(template_file, validate_only, orchestrator, provider, log_level='info'):
"""
Функция трансляции шаблонов TOSCA NFV в TOSCA NORMATIVE
Вход:
template_file - путь к файлу с исходным шаблном в нотации TOSCA NFV
validate_only - true, если нужно только валидировать шаблон
log_level='info' - уровень логирования nfv_tosca_translator.log
Выход:
output_dict - yaml dict итогового шаблона
generated_scripts - dict of lists строк ansible скриптов для настройки compute узлов
Принцип работы:
1) читаем файл template_file в yaml dict
2) добавляем imports с файлами NFV_definition_1_0.yaml и TOSCA_definition_1_0.yaml в которых содержатся описания
типов узлов и отношений
3) вызываем tosca-parser для валидации шаблона, если validate_only то возвращаем сообщение об успехе/ошибке и
пустой dict generated_scripts
4) читаем файл маппинга в yaml dict
5) вызываем конструктор класса транслятора
6) возвращаем результат обработки транслятором
"""
log_map = dict(
debug=logging.DEBUG,
info=logging.INFO,
warning=logging.WARNING,
error=logging.ERROR,
critical=logging.ERROR
)
MAP_PATH = '/definitions/TOSCA_NFV_mapping_' + orchestrator + '.yaml'
logging_format = "%(asctime)s %(levelname)s %(message)s"
logging.basicConfig(filename=os.path.join(utils.get_project_root_path() + "/", 'nfv_tosca_translator.log'),
filemode='a', level=log_map[log_level],
format=logging_format, datefmt='%Y-%m-%d %H:%M:%S')
with open(template_file, "r") as f:
try:
tpl = yaml.load(f, Loader=yaml.SafeLoader)
logging.info("Template successfully loaded from file.")
except yaml.scanner.ScannerError as e:
logging.error("Error parsing TOSCA template: %s %s." % (e.problem, e.context_mark))
sys.exit(1)
tosca_def = utils.get_project_root_path() + TOSCA_DEF_PATH
nfv_def = utils.get_project_root_path() + NFV_DEF_PATH
if 'imports' in tpl:
if isinstance(tpl['imports'], list):
tpl['imports'].append(tosca_def)
tpl['imports'].append(nfv_def)
logging.info("Imports added to template.")
else:
logging.error("Error parsing imports in TOSCA template.")
sys.exit(1)
else:
tpl['imports'] = []
tpl['imports'].append(tosca_def)
tpl['imports'].append(nfv_def)
logging.info("Imports added to template.")
try:
tosca_parser_tpl = ToscaTemplate(yaml_dict_tpl=tpl)
except:
logging.exception("Got exception from OpenStack tosca-parser.")
sys.exit(1)
if validate_only:
logging.info("Template successfully passed validation.")
tpl = {"template successfully passed validation": template_file}
return tpl, {}
map_file = utils.get_project_root_path() + MAP_PATH
with open(map_file, "r") as f:
try:
mapping = yaml.load(f, Loader=yaml.SafeLoader)
logging.info("Mapping successfully loaded from file.")
except yaml.scanner.ScannerError as e:
logging.error("Error parsing NFV mapping: %s %s." % (e.problem, e.context_mark))
sys.exit(1)
try:
tosca_normative_tpl = ToscaNormativeTemplate(tosca_parser_tpl=tosca_parser_tpl, yaml_dict_mapping=mapping,
orchestrator=orchestrator, provider=provider)
logging.info("Template successfully passed translation to normative TOSCA.")
except:
logging.exception("Got exception on translating NFV to TOSCA.")
sys.exit(1)
return tosca_normative_tpl.get_result()
| StarcoderdataPython |
1682694 | <gh_stars>1-10
# coding: utf-8
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from citeproc.py2compat import *
import unicodedata
from . import (parse_argument, eat_whitespace, parse_macro_name,
OPEN_SCOPE, CLOSE_SCOPE, START_MACRO)
__all__ = ['MACROS', 'NewCommand', 'Macro']
class MacroBase(object):
def parse_arguments(self, tokens):
raise NotImplementedError
def parse_arguments_and_expand(self, tokens):
raise NotImplementedError
class NewCommand(MacroBase):
r""" \newcommand{cmd}[args]{def} """
def __init__(self, macros):
self.macros = macros
@staticmethod
def _parse_macro_name(tokens):
eat_whitespace(tokens)
token = next(tokens)
in_group = token.type == OPEN_SCOPE
if in_group:
eat_whitespace(tokens)
token = next(tokens)
assert token.type == START_MACRO
name = parse_macro_name(tokens)
if in_group:
eat_whitespace(tokens)
assert next(tokens).type == CLOSE_SCOPE
return name
@staticmethod
def _parse_optional_arguments(tokens, macros):
eat_whitespace(tokens)
if tokens.peek().value == '[':
next(tokens)
num_args = ''
for token in tokens:
if token.value == ']':
break
num_args += token.value
else:
num_args = 0
return int(num_args)
def parse_arguments_and_expand(self, tokens, macros):
name = self._parse_macro_name(tokens)
num_args = self._parse_optional_arguments(tokens, macros)
definition = parse_argument(tokens, macros)
for i in range(10):
definition = definition.replace('#{}'.format(i + 1),
'{' + str(i) + '}')
self.macros[name] = Macro(num_args, definition)
return ''
class Macro(object):
def __init__(self, num_args, format_string):
self.num_args = num_args
self.format_string = format_string
def parse_arguments_and_expand(self, tokens, macros):
args = [parse_argument(tokens, macros) for _ in range(self.num_args)]
return self.expand(args)
def expand(self, arguments):
assert len(arguments) == self.num_args
return self.format_string.format(*arguments)
class Symbol(Macro):
def __init__(self, symbol):
super(Symbol, self).__init__(0, symbol)
class SymbolByName(Macro):
def __init__(self, unicode_symbol_name):
unicode_symbol = unicodedata.lookup(unicode_symbol_name)
super(SymbolByName, self).__init__(0, unicode_symbol)
class Combining(Macro):
def __init__(self, unicode_accent_name):
unicode_accent = unicodedata.lookup('COMBINING ' + unicode_accent_name)
super(Combining, self).__init__(1, '{0}' + unicode_accent)
DOTTED_CHARS = {'ı': 'i',
'ȷ': 'j'}
def expand(self, arguments):
assert len(arguments) == self.num_args
accented, rest = arguments[0][0], arguments[0][1:]
accented = self.DOTTED_CHARS.get(accented, accented)
expanded = super(Combining, self).expand([accented])
return unicodedata.normalize('NFC', expanded) + rest
MACROS = {
# accents
'`': Combining('GRAVE ACCENT'),
"'": Combining('ACUTE ACCENT'),
'^': Combining('CIRCUMFLEX ACCENT'),
'"': Combining('DIAERESIS'),
'H': Combining('DOUBLE ACUTE ACCENT'),
'~': Combining('TILDE'),
'c': Combining('CEDILLA'),
'k': Combining('OGONEK'),
'=': Combining('MACRON'),
'b': Combining('MACRON BELOW'),
'.': Combining('DOT ABOVE'),
'd': Combining('DOT BELOW'),
'r': Combining('RING ABOVE'),
'u': Combining('BREVE'),
'v': Combining('CARON'),
# '|': Combining('VERTICAL LINE ABOVE'),
# 'h': Combining('HOOK ABOVE'),
'G': Combining('DOUBLE GRAVE ACCENT'),
'U': Combining('DOUBLE VERTICAL LINE ABOVE'),
't': Combining('DOUBLE INVERTED BREVE'),
'textcircled': Combining('ENCLOSING CIRCLE'),
# symbols
'o': SymbolByName('LATIN SMALL LETTER O WITH STROKE'),
'O': SymbolByName('LATIN CAPITAL LETTER O WITH STROKE'),
'i': SymbolByName('LATIN SMALL LETTER DOTLESS I'),
'l': SymbolByName('LATIN SMALL LETTER L WITH STROKE'),
'L': SymbolByName('LATIN CAPITAL LETTER L WITH STROKE'),
'oe': SymbolByName('LATIN SMALL LIGATURE OE'),
'OE': SymbolByName('LATIN CAPITAL LIGATURE OE'),
'ae': SymbolByName('LATIN SMALL LETTER AE'),
'AE': SymbolByName('LATIN CAPITAL LETTER AE'),
'aa': SymbolByName('LATIN SMALL LETTER A WITH RING ABOVE'),
'AA': SymbolByName('LATIN CAPITAL LETTER A WITH RING ABOVE'),
'ss': SymbolByName('LATIN SMALL LETTER SHARP S'),
'dh': SymbolByName('LATIN SMALL LETTER ETH'),
'DH': SymbolByName('LATIN CAPITAL LETTER ETH'),
'dj': SymbolByName('LATIN SMALL LETTER D WITH STROKE'),
'DJ': SymbolByName('LATIN CAPITAL LETTER D WITH STROKE'),
'ng': SymbolByName('LATIN SMALL LETTER ENG'),
'NG': SymbolByName('LATIN CAPITAL LETTER ENG'),
'th': SymbolByName('LATIN SMALL LETTER THORN'),
'TH': SymbolByName('LATIN CAPITAL LETTER THORN'),
'dag': SymbolByName('DAGGER'),
'ddag': SymbolByName('DOUBLE DAGGER'),
'dots': SymbolByName('HORIZONTAL ELLIPSIS'),
'P': SymbolByName('PILCROW SIGN'),
'S': SymbolByName('SECTION SIGN'),
'copyright': SymbolByName('COPYRIGHT SIGN'),
'pounds': SymbolByName('POUND SIGN'),
'guillemotleft': SymbolByName('LEFT-POINTING DOUBLE ANGLE QUOTATION MARK'),
'guillemotright': SymbolByName('RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK'),
'guilsinglleft': SymbolByName('SINGLE LEFT-POINTING ANGLE QUOTATION MARK'),
'guilsinglright': SymbolByName('SINGLE RIGHT-POINTING ANGLE QUOTATION MARK'),
'quotedblbase': SymbolByName('DOUBLE LOW-9 QUOTATION MARK'),
'quotesinglbase': SymbolByName('SINGLE LOW-9 QUOTATION MARK'),
'textasciicircum': Symbol('^'),
'textasciitilde': Symbol('~'),
'textasteriskcentered': Symbol('*'),
'textbackslash': Symbol('\\'),
'textbar': Symbol('|'),
'textbraceleft': Symbol('{'),
'textbraceright': Symbol('}'),
'textbullet': SymbolByName('BULLET'),
'textcopyright': SymbolByName('COPYRIGHT SIGN'),
'textdagger': SymbolByName('DAGGER'),
'textdaggerdbl': SymbolByName('DOUBLE DAGGER'),
'textdollar': Symbol('$'),
'textellipsis': SymbolByName('HORIZONTAL ELLIPSIS'),
'textemdash': SymbolByName('EM DASH'),
'textendash': SymbolByName('EN DASH'),
'textexclamdown': SymbolByName('INVERTED EXCLAMATION MARK'),
'textgreater': Symbol('>'),
'textless': Symbol('<'),
'textordfeminine': SymbolByName('FEMININE ORDINAL INDICATOR'),
'textordmasculine': SymbolByName('MASCULINE ORDINAL INDICATOR'),
'textparagraph': SymbolByName('PILCROW SIGN'),
'textperiodcentered': SymbolByName('MIDDLE DOT'),
'textquestiondown': SymbolByName('INVERTED QUESTION MARK'),
'textquotedbl': Symbol('"'),
'textquotedblleft': SymbolByName('LEFT DOUBLE QUOTATION MARK'),
'textquotedblright': SymbolByName('RIGHT DOUBLE QUOTATION MARK'),
'textquoteleft': SymbolByName('LEFT SINGLE QUOTATION MARK'),
'textquoteright': SymbolByName('RIGHT SINGLE QUOTATION MARK'),
'textregistered': SymbolByName('REGISTERED SIGN'),
'textsection': SymbolByName('SECTION SIGN'),
'textsterling': SymbolByName('POUND SIGN'),
'texttrademark': SymbolByName('TRADE MARK SIGN'),
'textunderscore': Symbol('_'),
'textvisiblespace': SymbolByName('OPEN BOX'),
'TeX': Macro(0, 'TeX'),
# escaped characters
' ': Symbol(' '),
'&': Symbol('&'),
'$': Symbol('$'),
'{': Symbol('{'),
'}': Symbol('}'),
'%': Symbol('%'),
'#': Symbol('#'),
'_': Symbol('_'),
}
| StarcoderdataPython |
4805353 | <filename>bot_scheduler/actions.py<gh_stars>0
from typing import Callable
class Action:
"""Encapsulates the calling of a callback with an object."""
def __init__(self, actor: Callable):
"""Initialise the Action with a callback.
Args:
actor: Takes one argument and acts on it.
"""
if not callable(actor):
raise ValueError('The interactor argument should be callable.')
self._interactor = actor
def act(self, obj: object):
self._interactor(obj)
class EvaluatingAction(Action):
"""Encapsulations the conditional calling of a callback with an object."""
def __init__(self, actor: Callable, evaluator: Callable):
"""Initialise the EvaluatingAction with two callbacks.
Args:
actor: Takes one argument and acts on it.
evaluator: Takes one argument and determines whether to act on it.
"""
if not callable(evaluator):
raise ValueError('The evaluator argument should be callable.')
self._evaluator = evaluator
super().__init__(actor)
def act(self, obj: object):
if self._evaluator(obj):
super().act(obj)
| StarcoderdataPython |
5039642 | <gh_stars>1-10
from random import randint
from scribbler.document.document import Document
from scribbler.document.parser import parse_document
from scribbler.resources.resources_helper import list_resources
from scribbler.dataset import Dataset
from scribbler.document.document_handwriting_line import DocumentHandwrittingLine
from scribbler.document.document_text_line import DocumentTextLine
from scribbler.utils.image.image import image_pillow_to_numpy
class LineGenerator(Dataset):
def __init__(self, height=64):
self.hand_text_line = DocumentHandwrittingLine(height=height)
self.hand_text_documnt = Document(height=height)
self.hand_text_documnt.append_document(self.hand_text_line)
self.printed_text_line = DocumentTextLine(height=height)
self.printed_text_document = Document(height=height)
self.printed_text_document.append_document(self.printed_text_line)
def count(self):
return self.printed_text_line.count_ressource() + self.hand_text_line.count_ressource()
def get(self, index):
if index >= self.printed_text_line.count_ressource():
index = index - self.printed_text_line.count_ressource()
text_line = self.hand_text_line
text_document = self.hand_text_documnt
else:
text_line = self.printed_text_line
text_document = self.printed_text_document
text_document.generate_random(index)
return text_document.to_image(), text_line.get_text()
class DocumentGenerator(Dataset):
def __init__(self):
self.documents = [parse_document(path) for path in list_resources("structures")]
def count(self):
return 1024
def get(self, index):
document = self.documents[randint(0, len(self.documents) - 1)]
document.generate_random()
image = document.to_image()
# image = image_pillow_to_numpy(image)
return image, document.get_baselines()
| StarcoderdataPython |
1990502 | <gh_stars>0
# from rdkit import Chem
# from rdkit.Chem import Draw, AllChem
# from rdkit.Chem.Draw import IPythonConsole #Needed to show molecules
# from rdkit.Chem.Draw.MolDrawing import MolDrawing, DrawingOptions #Only needed if modifying defaults
# for i in range(1,2):
# opts = DrawingOptions()
# opts.includeAtomNumbers=True
# opts.bondLineWidth=1.0
# with open('train278.txt','r') as f:
# t1 = f.readlines()
# j = 0
# for x in range(len(t1)):
# j = j + 1
# print("Index " + str(j))
# rxn_s=str(t1[x]).replace('\n','')
# reaction = AllChem.ReactionFromSmarts(rxn_s.split(' ')[0])
# rimage = Draw.ReactionToImage(reaction, subImgSize=(600, 600))
# rimage.save('reaction'+ str(j) + '.png')
#
# f.close()
# oldf = open('train278.txt', 'r')
# train = open('train.txt', 'w')
# test = open('test.txt', 'w')
# val = open('val.txt', 'w')
#
# valList = [n for n in range(3, 278, 7)]
# testList = [n for n in range(1, 278, 7)]
# trainList = [n for n in range(0, 278)]
#
# for n in (testList):
# if n in trainList:
# trainList.remove(n)
#
# for n in (valList):
# if n in trainList:
# trainList.remove(n)
#
# print(len(trainList))
# print(len(valList))
# print(len(testList))
# print(trainList)
# print(valList)
# print(testList)
#
# lines = oldf.readlines()
# for i in trainList:
# train.write(lines[i])
# for i in testList:
# test.write(lines[i])
# for i in valList:
# val.write(lines[i])
# oldf.close()
# train.close()
# test.close()
# val.close()
# it = 1
# with open("train.txt", 'r') as f, open("src-train.txt", 'w') as fin1, open("tgt-train.txt", "w") as fin2:
# for line in f.readlines():
# print(it)
# r = line.split('>>')[0]
# p = line.split('>>')[1]
# fin1.write(r + '\n')
# fin2.write(p)
# it = it + 1
#
# f.close()
# fin1.close()
# fin2.close()
#
# it = 1
# with open("val.txt", 'r') as f, open("src-val.txt", 'w') as fin1, open("tgt-val.txt", "w") as fin2:
# for line in f.readlines():
# print(it)
# r = line.split('>>')[0]
# p = line.split('>>')[1]
# fin1.write(r + '\n')
# fin2.write(p)
# it = it + 1
#
# f.close()
# fin1.close()
# fin2.close()
#
# it = 1
# with open("test.txt", 'r') as f, open("src-test.txt", 'w') as fin1, open("tgt-test.txt", "w") as fin2:
# for line in f.readlines():
# print(it)
# r = line.split('>>')[0]
# p = line.split('>>')[1]
# fin1.write(r + '\n')
# fin2.write(p)
# it = it + 1
#
# f.close()
# fin1.close()
# fin2.close()
# from rdkit import Chem
# smi = 'CC2=CC=C(C(C)=C)C=C2'
# random_equivalent_smiles = Chem.MolToSmiles(Chem.MolFromSmiles(smi), doRandom=True)
# print(random_equivalent_smiles)
#------------augm----------------------------------
from rdkit import Chem
it = 1
out1 = []
out2 = []
with open("train.txt", 'r') as f, open("src-train0.txt", 'w') as fin1, open("tgt-train0.txt", "w") as fin2:
for line in f.readlines():
print(it)
r = line.split('>>')[0]
r1 = r.split('.')[0]
r1 = Chem.MolToSmiles(Chem.MolFromSmiles(r1), doRandom=True)
r2 = r.split('.')[1]
r2 = Chem.MolToSmiles(Chem.MolFromSmiles(r2), doRandom=True)
r12 = r1 + '.' + r2 + '\n'
out1.append(r12)
p = line.split('>>')[1]
out2.append(Chem.MolToSmiles(Chem.MolFromSmiles(p), doRandom=True) + '\n')
fin1.write(r + '\n')
fin2.write(p)
it = it + 1
fin1.writelines(out1)
fin2.writelines(out2)
f.close()
fin1.close()
fin2.close()
out1.clear()
out2.clear()
with open("val.txt", 'r') as f, open("src-val0.txt", 'w') as fin1, open("tgt-val0.txt", "w") as fin2:
for line in f.readlines():
print(it)
r = line.split('>>')[0]
p = line.split('>>')[1]
fin1.write(r + '\n')
fin2.write(p)
it = it + 1
f.close()
fin1.close()
fin2.close()
with open("test.txt", 'r') as f, open("src-test0.txt", 'w') as fin1, open("tgt-test0.txt", "w") as fin2:
for line in f.readlines():
print(it)
r = line.split('>>')[0]
p = line.split('>>')[1]
fin1.write(r + '\n')
fin2.write(p)
it = it + 1
f.close()
fin1.close()
fin2.close()
| StarcoderdataPython |
172874 | <filename>tests/test_main_redirects.py
# -*- coding: utf-8 -*-
# vim: set noai syntax=python ts=4 sw=4:
#
# Copyright (c) 2018-2022 <NAME>
# stats.wwdt.me is released under the terms of the Apache License 2.0
"""Testing Main Redirects Module and Blueprint Views"""
import pytest
def test_favicon(client):
"""Testing main_redirects.favicon"""
response = client.get("/favicon.ico")
assert response.status_code == 302
assert response.location
def test_guest(client):
"""Testing main_redirects.guest"""
response = client.get("/guest")
assert response.status_code == 302
assert response.location
def test_help(client):
"""Testing main_redirects.help"""
response = client.get("/help")
assert response.status_code == 302
assert response.location
def test_host(client):
"""Testing main_redirects.host"""
response = client.get("/host")
assert response.status_code == 302
assert response.location
def test_location(client):
"""Testing main_redirects.location"""
response = client.get("/guest")
assert response.status_code == 302
assert response.location
def test_scorekeeper(client):
"""Testing main_redirects.scorekeeper"""
response = client.get("/guest")
assert response.status_code == 302
assert response.location
def test_search(client):
"""Testing main_redirects.search"""
response = client.get("/search")
assert response.status_code == 302
assert response.location
def test_show(client):
"""Testing main_redirects.show"""
response = client.get("/show")
assert response.status_code == 302
assert response.location
@pytest.mark.parametrize("show_date", ["2018-10-27"])
def test_npr_show_redirect(client, show_date: str):
"""Testing main_redirects.guest"""
response = client.get(f"/s/{show_date}")
assert response.status_code == 302
assert response.location
| StarcoderdataPython |
3312195 | <reponame>stckwok/cashInterop
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test emergent consensus scenarios
import time
import random
import pdb
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.join(dir_path, "..")
if sys.version_info[0] < 3:
raise "Use Python 3"
import logging
logging.basicConfig(format='%(asctime)s.%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import *
from test_framework.script import CScript, OP_TRUE, OP_CHECKSIG, OP_DROP, OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG
from interopUtils import *
NODE_BITCOIN_CASH = (1 << 5)
ONE_MB = 1000000
TWO_MB = 2000000
FIVE_MB = 5000000
SCRIPT_WORDS = b"this is junk data. this is junk data. this is junk data. this is junk data. this is junk data."
ITERATIONS = 4 # number of iterations to create 12500 bytes of useless script words
def wastefulOutput(btcAddress):
"""
Create useless data for CScript to generate many transactions used for Large Block size of 1MB to 32MB
Input:
btcAddress : BTC address - public key of the receiver
Return:
CScript with useless data
Warning:
Creates outputs that can't be spent by bitcoind
"""
data = b""
# Concatenate len(SCRIPT_WORDS) for 100 times to get data of 12500 bytes
for _ in range(ITERATIONS):
data += SCRIPT_WORDS
ret = CScript([data, OP_DROP, OP_DUP, OP_HASH160, bitcoinAddress2bin(btcAddress), OP_EQUALVERIFY, OP_CHECKSIG])
return ret
def p2pkh(btcAddress):
""" create a pay-to-public-key-hash script"""
ret = CScript([OP_DUP, OP_HASH160, bitcoinAddress2bin(btcAddress), OP_EQUALVERIFY, OP_CHECKSIG])
return ret
def createrawtransaction(inputs, outputs, outScriptGenerator=p2pkh):
"""
Create a transaction with the exact input and output syntax as the bitcoin-cli "createrawtransaction" command.
If you use the default outScriptGenerator, this function will return a hex string that exactly matches the
output of bitcoin-cli createrawtransaction.
"""
if not type(inputs) is list:
inputs = [inputs]
tx = CTransaction()
for i in inputs:
tx.vin.append(CTxIn(COutPoint(i["txid"], i["vout"]), b"", 0xffffffff))
for addr, amount in outputs.items():
if addr == "data":
tx.vout.append(CTxOut(0, CScript([OP_RETURN, unhexlify(amount)])))
else:
tx.vout.append(CTxOut(amount * BTC, outScriptGenerator(addr)))
tx.rehash()
return hexlify(tx.serialize()).decode("utf-8")
def generateTx(node, txBytes, addrs, data=None):
wallet = node.listunspent()
wallet.sort(key=lambda x: x["amount"], reverse=False)
logging.info("Wallet length is %d" % len(wallet))
size = 0
count = 0
decContext = decimal.getcontext().prec
decimal.getcontext().prec = 8 + 8 # 8 digits to get to 21million, and each bitcoin is 100 million satoshis
while size < txBytes:
count += 1
utxo = wallet.pop()
outp = {}
# Make the tx bigger by adding addtl outputs so it validates faster
payment = satoshi_round(utxo["amount"] / decimal.Decimal(8.0))
for x in range(0, 8):
outp[addrs[(count + x) % len(addrs)]] = payment
if data:
outp["data"] = data
txn = createrawtransaction([utxo], outp, wastefulOutput)
# txn2 = node.createrawtransaction([utxo], outp)
signedtxn = node.signrawtransaction(txn)
size += len(binascii.unhexlify(signedtxn["hex"]))
node.sendrawtransaction(signedtxn["hex"])
logging.info("%d tx %d length" % (count, size))
decimal.getcontext().prec = decContext
return (count, size)
def mostly_sync_mempools(rpc_connections, difference=50, wait=1, verbose=1):
"""
Wait until everybody has the most of the same transactions in their memory
pools. There is no guarantee that mempools will ever sync due to the
filterInventoryKnown bloom filter.
"""
iterations = 0
while True:
iterations += 1
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
poolLen = [len(pool)]
for i in range(1, len(rpc_connections)):
tmp = set(rpc_connections[i].getrawmempool())
if tmp == pool:
num_match = num_match + 1
if iterations > 10 and len(tmp.symmetric_difference(pool)) < difference:
num_match = num_match + 1
poolLen.append(len(tmp))
if verbose:
logging.info("sync mempool: " + str(poolLen))
if num_match == len(rpc_connections):
break
time.sleep(wait)
def print_bestblockhash(node, nodeId):
"""
Helper to print bestblockhash and block size
Input:
node : node to get the block information
nodeId : Id of the mode
Return:
block_size : block size
best_block_hash : hash of the best block (tip of the chain)
"""
best_blockhash = node.getbestblockhash()
block_size = node.getblock(best_blockhash, True)['size']
best_blockhash = int(best_blockhash, 16)
logging.info("> Node%d block_size = %d" %(nodeId, block_size))
logging.info("> Blockhash = %s" %best_blockhash)
return block_size, best_blockhash
@assert_capture()
def test_default_values(self):
"""
Test system default values of MG and EB
Criteria:
BUIP-HF Technical Specification:
MB = 2000000
EB = 8000000
# Bitcoin Cash node
forkTime = 1501590000 #corresponding to Tue 1 Aug 2017 12:20:00 UTC
Input:
self : test object
Assertions:
fail when not match with default
"""
logging.info(">>> Entered : test_default_values \n")
try:
for index, n in enumerate(self.nodes):
nodeInfo = n.getnetworkinfo()
t = n.get("mining.fork*")
assert(t['mining.forkBlockSize'] == 2000000) # REQ-4-2
assert(t['mining.forkExcessiveBlock'] == 8000000) # REQ-4-1
if int(nodeInfo["localservices"],16)&NODE_BITCOIN_CASH:
assert(t['mining.forkTime'] == 1501590000) # Bitcoin Cash release REQ-2
else:
assert(t['mining.forkTime'] == 0) # main release default
except (Exception, JSONRPCException) as e1:
logging.info(e1)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
raise TestAssertionError({"file_name": fname, "line_num": exc_tb.tb_lineno, \
"error_type": exc_type.__name__, "error_msg": str( e1 ), \
"n1" : self.bins[index], "n2" : "N/A", "amount" : "N/A", "numsig" : "N/A"})
@assert_capture()
def test_setting_values(self, nodeId=0):
"""
Test setting and getting values of MG and EB. Assumes the default
excessive at 8MB and mining at 2MB
Criteria:
MB must be less than or equal to EB
Input:
self : test object
Assertions:
fail when not match with defaults
"""
logging.info(">>> Entered : test_setting_values \n")
node = self.nodes[nodeId]
# excessive block size is smaller than your proposed mined block size
try:
node.setminingmaxblock(32000000)
except JSONRPCException as e:
logging.info(">> PASS : %s " %e.error["message"])
pass
else:
assert(0) # was able to set the mining size > the excessive size
# max generated block size must be greater than 100 byte
try:
node.setminingmaxblock(99)
except JSONRPCException as e:
logging.info(">> PASS : %s " %e.error["message"])
pass
else:
assert(0) # was able to set the mining size below our arbitrary minimum
# maximum mined block is larger than your proposed excessive size
try:
node.setexcessiveblock(1000, 10)
except JSONRPCException as e:
logging.info(">> PASS : %s " %e.error["message"])
pass
else:
assert(0) # was able to set the excessive size < the mining size
@assert_capture()
def test_sync_clear_mempool(self):
"""
Test mempool synchornization and clearance
Input:
self : test object
Raise:
Encounter RPC error
"""
logging.info(">>> Entered : test_sync_clear_mempool \n")
try:
# clear out the mempool
mostly_sync_mempools(self.nodes)
for index1, n in enumerate(self.nodes):
n.generate(2)
sync_blocks(self.nodes)
for index2, n in enumerate(self.nodes):
while len(n.getrawmempool()):
n.generate(1)
sync_blocks(self.nodes)
logging.info("cleared mempool: %s" % str([len(x) for x in [y.getrawmempool() for y in self.nodes]]))
base = [x.getrawmempool() for x in self.nodes]
assert_equal(base, [base[0]] * 4)
except (Exception, JSONRPCException) as e1:
logging.info(e1)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
raise TestAssertionError({"file_name": fname, "line_num": exc_tb.tb_lineno, \
"error_type": exc_type.__name__, "error_msg": str( e1 ), \
"n1" : self.bins[index1], "n2" : self.bins[index2], "amount" : "N/A", "numsig" : "N/A"})
@assert_capture()
def test_accept_depth(self, nodeOneId, nodeTwoId):
"""
Test accept depth case
Input:
self : test object
nodeOneId : first node ID
nodeTwoId : second node ID
Raise:
Encounter RPC error or General Exception
"""
logging.info(">>> Entered : test_accept_depth \n")
try:
self.nodes[nodeTwoId].setminingmaxblock(1000)
self.nodes[nodeTwoId].setexcessiveblock(1010, 4)
# Mine an excessive block. Node One should not accept it
addr = self.nodes[nodeTwoId].getnewaddress()
for i in range(0,10):
self.nodes[nodeOneId].sendtoaddress(addr, 1.0)
self.nodes[nodeOneId].generate(1)
time.sleep(2) #give blocks a chance to fully propagate
counts = [ x.getblockcount() for x in self.nodes[0:2] ]
logging.info("Counts: Node1 = %d and Node2 = %d " %(counts[0], counts[1]))
assert_equal(counts[0]-counts[1], 1)
# Mine a block on top. Node 1 should still not accept it
self.nodes[nodeOneId].generate(1)
time.sleep(2) #give blocks a chance to fully propagate
counts = [ x.getblockcount() for x in self.nodes[0:2] ]
logging.info("Counts: Node1 = %d and Node2 = %d " %(counts[0], counts[1]))
assert_equal(counts[0]-counts[1], 2)
# Change node 1 to AD=2. The assertion will fail if it doesn't accept the chain now
self.nodes[nodeTwoId].setexcessiveblock(1010, 2)
self.nodes[nodeOneId].generate(1)
time.sleep(2) #give blocks a chance to fully propagate !!!!
counts = [ x.getblockcount() for x in self.nodes[0:2] ]
logging.info("Counts: Node1 = %d and Node2 = %d " %(counts[0], counts[1]))
assert_equal(counts[0]-counts[1], 0)
except (Exception, JSONRPCException) as e1:
logging.info(e1)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
raise TestAssertionError({"file_name": fname, "line_num": exc_tb.tb_lineno, \
"error_type": exc_type.__name__, "error_msg": str( e1 ), \
"n1" : self.bins[nodeOneId], "n2" : self.bins[nodeTwoId], "amount" : "N/A", "numsig" : "N/A"})
@assert_capture()
def test_excessive_Sigops(self):
"""
Test Excessive Sig Ops
Note: Re-use existing test from BU/qa/rpc-tests/excessive.py
Input:
self : test object
"""
logging.info("Entered : test_excessive_Sigops \n")
try:
testExcessiveSigops(self)
except (Exception, JSONRPCException) as e1:
logging.info(e1)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
raise TestAssertionError({"file_name": fname, "line_num": exc_tb.tb_lineno, \
"error_type": exc_type.__name__, "error_msg": str( e1 ), \
"n1" : "N/A", "n2" : "N/A", "amount" : "N/A", "numsig" : "N/A"})
def testExcessiveSigops(self):
"""This test checks the behavior of the nodes in the presence of transactions that take a long time to validate.
"""
NUM_ADDRS = 50
logging.info("testExcessiveSigops: Cleaning up node state")
# We are not testing excessively sized blocks so make these large
self.nodes[0].set("net.excessiveBlock=5000000")
self.nodes[1].set("net.excessiveBlock=5000000")
self.nodes[2].set("net.excessiveBlock=5000000")
self.nodes[3].set("net.excessiveBlock=5000000")
self.nodes[0].setminingmaxblock(FIVE_MB)
self.nodes[1].setminingmaxblock(FIVE_MB)
self.nodes[2].setminingmaxblock(FIVE_MB)
self.nodes[3].setminingmaxblock(FIVE_MB)
# Stagger the accept depths so we can see the block accepted stepwise
self.nodes[0].set("net.excessiveAcceptDepth=0")
self.nodes[1].set("net.excessiveAcceptDepth=1")
self.nodes[2].set("net.excessiveAcceptDepth=2")
self.nodes[3].set("net.excessiveAcceptDepth=3")
for n in self.nodes:
n.generate(10)
self.sync_blocks()
self.nodes[0].generate(100) # create a lot of BTC for spending
self.sync_all()
self.nodes[0].set("net.excessiveSigopsPerMb=100") # Set low so txns will fail if its used
self.nodes[1].set("net.excessiveSigopsPerMb=5000")
self.nodes[2].set("net.excessiveSigopsPerMb=1000")
self.nodes[3].set("net.excessiveSigopsPerMb=100")
logging.info("Creating addresses...")
self.nodes[0].keypoolrefill(NUM_ADDRS)
addrs = [self.nodes[0].getnewaddress() for _ in range(NUM_ADDRS)]
# test that a < 1MB block ignores the sigops parameter
self.nodes[0].setminingmaxblock(ONE_MB)
# if excessive Sigops was heeded, this txn would not make it into the block
self.createUtxos(self.nodes[0], addrs, NUM_ADDRS)
mpool = self.nodes[0].getmempoolinfo()
assert_equal(mpool["size"], 0)
# test that a < 1MB block ignores the sigops parameter, even if the max block size is less
self.nodes[0].setminingmaxblock(FIVE_MB)
# if excessive Sigops was heeded, this txn would not make it into the block
self.createUtxos(self.nodes[0], addrs, NUM_ADDRS)
mpool = self.nodes[0].getmempoolinfo()
assert_equal(mpool["size"], 0)
if self.extended: # creating 1MB+ blocks is too slow for travis due to the signing cost
self.createUtxos(self.nodes[0], addrs, 10000) # we need a lot to generate 1MB+ blocks
wallet = self.nodes[0].listunspent()
wallet.sort(key=lambda x: x["amount"], reverse=True)
self.nodes[0].set("net.excessiveSigopsPerMb=100000") # Set this huge so all txns are accepted by this node
logging.info("Generate > 1MB block with excessive sigops")
generateTx(self.nodes[0], 1100000, addrs)
counts = [x.getblockcount() for x in self.nodes]
base = counts[0]
self.nodes[0].generate(1)
assert_equal(True, self.expectHeights([base + 1, base, base, base], 30))
logging.info("Test excessive block propagation to nodes with different AD")
self.nodes[0].generate(1)
# it takes a while to sync all the txns
assert_equal(True, self.expectHeights([base + 2, base + 2, base, base], 500))
self.nodes[0].generate(1)
assert_equal(True, self.expectHeights([base + 3, base + 3, base + 3, base], 90))
self.nodes[0].generate(1)
assert_equal(True, self.expectHeights([base + 4, base + 4, base + 4, base + 4], 90))
logging.info("Excessive sigops test completed")
# set it all back to defaults
for n in self.nodes:
n.generate(150)
self.sync_blocks()
self.nodes[0].set("net.excessiveSigopsPerMb=20000") # Set low so txns will fail if its used
self.nodes[1].set("net.excessiveSigopsPerMb=20000")
self.nodes[2].set("net.excessiveSigopsPerMb=20000")
self.nodes[3].set("net.excessiveSigopsPerMb=20000")
self.nodes[0].setminingmaxblock(ONE_MB)
self.nodes[1].setminingmaxblock(ONE_MB)
self.nodes[2].setminingmaxblock(ONE_MB)
self.nodes[3].setminingmaxblock(ONE_MB)
self.nodes[0].set("net.excessiveBlock=1000000")
self.nodes[1].set("net.excessiveBlock=1000000")
self.nodes[2].set("net.excessiveBlock=1000000")
self.nodes[3].set("net.excessiveBlock=1000000")
class TestInterOpExcessive(BitcoinTestFramework):
def __init__(self, build_variant, client_dirs, extended=False):
BitcoinTestFramework.__init__(self)
self.buildVariant = build_variant
self.clientDirs = client_dirs
self.extended = extended
self.bins = [ os.path.join(base_dir, x, self.buildVariant, "src","bitcoind") for x in clientDirs]
logging.info(self.bins)
def setup_network(self, split=False):
self.nodes = start_nodes(len(self.clientDirs), self.options.tmpdir,binary=self.bins, timewait=60*60)
# Connect each node to the other
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,1,3)
connect_nodes_bi(self.nodes,2,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
BitcoinTestFramework.run_test(self)
test_default_values(self)
test_setting_values(self, nodeId=0)
test_setting_values(self, nodeId=1)
test_setting_values(self, nodeId=2)
test_setting_values(self, nodeId=3)
test_sync_clear_mempool(self)
# Fixed-6: Insufficient funds
self.nodes[0].generate(101)
sync_blocks(self.nodes)
test_accept_depth(self, nodeOneId=0, nodeTwoId=1)
test_excessive_Sigops(self)
reporter.display_report()
def createUtxos(self, node, addrs, amt):
wallet = node.listunspent()
wallet.sort(key=lambda x: x["amount"], reverse=True)
# Create a LOT of UTXOs
logging.info("Create lots of UTXOs...")
n = 0
group = min(100, amt)
count = 0
for w in wallet:
count += group
split_transaction(node, [w], addrs[n:group + n])
n += group
if n >= len(addrs):
n = 0
if count > amt:
break
self.sync_all()
logging.info("mine blocks")
node.generate(1) # mine all the created transactions
logging.info("sync all blocks and mempools")
self.sync_all()
def expectHeights(self, blockHeights, waittime=10):
loop = 0
count = []
while loop < waittime:
counts = [x.getblockcount() for x in self.nodes]
if counts == blockHeights:
return True # success!
time.sleep(1)
loop += 1
if ((loop % 30) == 0):
logging.info("...waiting %s" % loop)
return False
def generateAndPrintBlock(self, node):
hsh = node.generate(1)
inf = node.getblock(hsh[0])
logging.info("block %d size %d" % (inf["height"], inf["size"]))
return hsh
def main(longTest):
t = TestInterOpExcessive("debug", clientDirs, longTest)
t.drop_to_pdb = True
bitcoinConf = {
"debug": ["net", "blk", "thin", "mempool", "req", "bench", "evict"],
"blockprioritysize": 2000000000
}
# folder to store bitcoin runtime data and logs
tmpdir = "--tmpdir=/tmp/cashInterop"
for arg in sys.argv[1:]:
if "--tmpdir=" in arg:
tmpdir = str(arg)
logging.info("# User input : %s" %tmpdir)
t.main([tmpdir], bitcoinConf, None)
def Test(longTest=False):
if str(longTest).lower() == 'true':
main(True)
else:
main(False)
if __name__ == "__main__":
if "--extensive" in sys.argv:
longTest = True
# we must remove duplicate 'extensive' arg here
while True:
try:
sys.argv.remove('--extensive')
except:
break
logging.info("Running extensive tests")
else:
longTest = False
main(longTest)
| StarcoderdataPython |
68092 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import json
import logging
import os
import yaml
from tests import common
from testtools import testcase
log = logging.getLogger('test_hook_salt')
slsok = """
testit:
environ.setenv:
- name: does_not_matter
- value:
foo: {{ opts['fooval'] }}
bar: {{ opts['barval'] }}
"""
slsfail = """
failure:
test.echo:
- text: I don't work
"""
slsnotallowed = """
default to accept:
iptables.set_policy:
- chain: INPUT
- policy: REJECT
"""
class HookSaltTest(common.RunScriptTest):
data = {
'id': 'fake_stack',
'name': 'fake_resource_name',
'group': 'salt',
'inputs': [
{'name': 'fooval', 'value': 'bar'},
{'name': 'barval', 'value': 'foo'}
],
'outputs': [
{'name': 'first_output'},
{'name': 'second_output'}
],
'config': None
}
def setUp(self):
super(HookSaltTest, self).setUp()
self.hook_path = self.relative_path(
__file__,
'..',
'heat-config-salt/install.d/hook-salt.py')
self.working_dir = self.useFixture(fixtures.TempDir())
self.minion_config_dir = self.useFixture(fixtures.TempDir())
self.minion_cach_dir = self.useFixture(fixtures.TempDir())
self.minion_conf = self.minion_config_dir.join("minion")
self.env = os.environ.copy()
self.env.update({
'HEAT_SALT_WORKING': self.working_dir.join(),
'SALT_MINION_CONFIG': self.minion_conf
})
with open(self.minion_conf, "w+") as conf_file:
conf_file.write("cachedir: %s\n" % self.minion_cach_dir.join())
conf_file.write("log_level: DEBUG\n")
@testcase.skip("Skip untill salt issue resolved")
def test_hook(self):
self.data['config'] = slsok
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(self.data))
self.assertEqual(0, returncode, stderr)
ret = yaml.safe_load(stdout)
self.assertEqual(0, ret['deploy_status_code'])
self.assertIsNone(ret['deploy_stderr'])
self.assertIsNotNone(ret['deploy_stdout'])
resp = yaml.safe_load(ret['deploy_stdout'])
self.assertTrue(list(resp.values())[0]['result'])
self.assertEqual({'bar': 'foo', 'foo': 'bar'},
list(resp.values())[0]['changes'])
@testcase.skip("Skip untill salt issue resolved")
def test_hook_salt_failed(self):
self.data['config'] = slsfail
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(self.data))
self.assertEqual(0, returncode)
self.assertIsNotNone(stderr)
self.assertIsNotNone(stdout)
jsonout = json.loads(stdout.decode('utf-8'))
self.assertIsNone(jsonout.get("deploy_stdout"),
jsonout.get("deploy_stdout"))
self.assertEqual(2, jsonout.get("deploy_status_code"))
self.assertIsNotNone(jsonout.get("deploy_stderr"))
self.assertIn("was not found in SLS", jsonout.get("deploy_stderr"))
@testcase.skip("Skip untill salt issue resolved")
def test_hook_salt_retcode(self):
self.data['config'] = slsnotallowed
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(self.data))
self.assertEqual(0, returncode, stderr)
self.assertIsNotNone(stdout)
self.assertIsNotNone(stderr)
ret = json.loads(stdout.decode('utf-8'))
self.assertIsNone(ret['deploy_stdout'])
self.assertIsNotNone(ret['deploy_stderr'])
resp = list(yaml.safe_load(ret['deploy_stderr']).values())[0]
self.assertFalse(resp['result'])
| StarcoderdataPython |
4986617 | import time
import binascii
import json
import requests
res = requests.get('https://biblia.sk/api/preklady')
with open('preklady.json', 'wb') as f:
f.write(res.content)
preklady = res.json()['data']
preklad = preklady[2] # roh
identifier = preklad['identifier']
books = preklad['books']
params = {
'timestamp': int(time.time()*1000),
'key': '<KEY>',
}
for i, book in enumerate(books):
chapters = {}
abbr = book['abbreviation']
for chapter in book['chapters']:
params['timestamp'] = int(time.time()*1000)
code = '|'.join([identifier, abbr, str(chapter)])
code = binascii.b2a_base64(code.encode()).strip().decode()
while True:
res = requests.get('https://biblia.sk/api/text/%s' % (code, ), params=params)
print(res.status_code, res.url)
if res.status_code == 429:
print('Too Many Requests, sleeping')
time.sleep(40)
continue
break
try:
text = res.json()
except Exception as e:
print(res.content)
raise e
chapters[chapter] = [v['content'] for v in text['data']]
book['chapters'] = chapters
print(chapters)
with open(identifier+'.json', 'w') as f:
json.dump(preklad, f, indent=2)
| StarcoderdataPython |
1977177 | import collections
import sys
from abc import ABCMeta
from typing import *
import functools
import types
import operator
__all__ = ['TypedDict', 'Final', 'Literal']
GenericAlias = List[int]
class _Final:
"""Mixin to prohibit subclassing"""
__slots__ = ('__weakref__',)
def __init_subclass__(self, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
def _is_dunder(attr):
return attr.startswith('__') and attr.endswith('__')
def _collect_type_vars(types):
"""Collect all type variable contained in types in order of
first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
tvars = []
for t in types:
if isinstance(t, TypeVar) and t not in tvars:
tvars.append(t)
# if isinstance(t, (_GenericAlias, GenericAlias)):
# tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
class _TypingEmpty:
"""Internal placeholder for () or []. Used by TupleMeta and CallableMeta
to allow empty list/tuple in specific places, without allowing them
to sneak in where prohibited.
"""
class _TypingEllipsis:
"""Internal placeholder for ... (ellipsis)."""
_cleanups = []
def _tp_cache(func):
"""Internal wrapper caching __getitem__ of generic types with a fallback to
original function for non-hashable arguments.
"""
cached = functools.lru_cache()(func)
_cleanups.append(cached.cache_clear)
@functools.wraps(func)
def inner(*args, **kwds):
try:
return cached(*args, **kwds)
except TypeError:
pass # All real errors (not unhashable args) are raised below.
return func(*args, **kwds)
return inner
def _check_generic(cls, parameters, elen):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
alen = len(parameters)
if alen != elen:
raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
f" actual {alen}, expected {elen}")
def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, type):
if obj.__module__ == 'builtins':
return obj.__qualname__
return f'{obj.__module__}.{obj.__qualname__}'
if obj is ...:
return('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj)
class Generic:
"""Abstract base class for generic types.
A generic type is typically declared by inheriting from
this class parameterized with one or more type variables.
For example, a generic mapping type might be defined as::
class Mapping(Generic[KT, VT]):
def __getitem__(self, key: KT) -> VT:
...
# Etc.
This class can then be used as follows::
def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:
try:
return mapping[key]
except KeyError:
return default
"""
__slots__ = ()
_is_protocol = False
@_tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple):
params = (params,)
if not params and cls is not Tuple:
raise TypeError(
f"Parameter list to {cls.__qualname__}[...] cannot be empty")
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
if cls in (Generic, Protocol):
# Generic and Protocol can only be subscripted with unique type variables.
if not all(isinstance(p, TypeVar) for p in params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be type variables")
if len(set(params)) != len(params):
raise TypeError(
f"Parameters to {cls.__name__}[...] must all be unique")
else:
# Subscripting a regular Generic subclass.
_check_generic(cls, params, len(cls.__parameters__))
return _GenericAlias(cls, params)
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
tvars = []
if '__orig_bases__' in cls.__dict__:
error = Generic in cls.__orig_bases__
else:
error = Generic in cls.__bases__ and cls.__name__ != 'Protocol'
if error:
raise TypeError("Cannot inherit from plain Generic")
if '__orig_bases__' in cls.__dict__:
tvars = _collect_type_vars(cls.__orig_bases__)
# Look for Generic[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...].
gvars = None
for base in cls.__orig_bases__:
if (isinstance(base, _GenericAlias) and
base.__origin__ is Generic):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] multiple types.")
gvars = base.__parameters__
if gvars is not None:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
s_args = ', '.join(str(g) for g in gvars)
raise TypeError(f"Some type variables ({s_vars}) are"
f" not listed in Generic[{s_args}]")
tvars = gvars
cls.__parameters__ = tuple(tvars)
class _SpecialForm(_Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return 'typing.' + self._name
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@_tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
class _BaseGenericAlias(_Final, _root=True):
"""The central part of internal API.
This represents a generic version of type 'origin' with type arguments 'params'.
There are two kind of these aliases: user defined and special. The special ones
are wrappers around builtin collections and ABCs in collections.abc. These must
have 'name' always set. If 'inst' is False, then the alias can't be instantiated,
this is used by e.g. typing.List and typing.Dict.
"""
def __init__(self, origin, *, inst=True, name=None):
self._inst = inst
self._name = name
self.__origin__ = origin
self.__slots__ = None # This is not documented.
def __call__(self, *args, **kwargs):
if not self._inst:
raise TypeError(f"Type {self._name} cannot be instantiated; "
f"use {self.__origin__.__name__}() instead")
result = self.__origin__(*args, **kwargs)
try:
result.__orig_class__ = self
except AttributeError:
pass
return result
def __mro_entries__(self, bases):
res = []
if self.__origin__ not in bases:
res.append(self.__origin__)
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) or issubclass(b, Generic):
break
else:
res.append(Generic)
return tuple(res)
def __getattr__(self, attr):
# We are careful for copy and pickle.
# Also for simplicity we just don't relay all dunder names
if '__origin__' in self.__dict__ and not _is_dunder(attr):
return getattr(self.__origin__, attr)
raise AttributeError(attr)
def __setattr__(self, attr, val):
if _is_dunder(attr) or attr in ('_name', '_inst', '_nparams'):
super().__setattr__(attr, val)
else:
setattr(self.__origin__, attr, val)
def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
def __subclasscheck__(self, cls):
raise TypeError("Subscripted generics cannot be used with"
" class and instance checks")
class _GenericAlias(_BaseGenericAlias, _root=True):
def __init__(self, origin, params, *, inst=True, name=None):
super().__init__(origin, inst=inst, name=name)
if not isinstance(params, tuple):
params = (params,)
self.__args__ = tuple(... if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in params)
self.__parameters__ = _collect_type_vars(params)
if not name:
self.__module__ = origin.__module__
def __eq__(self, other):
if not isinstance(other, _GenericAlias):
return NotImplemented
return (self.__origin__ == other.__origin__
and self.__args__ == other.__args__)
def __hash__(self):
return hash((self.__origin__, self.__args__))
@_tp_cache
def __getitem__(self, params):
if self.__origin__ in (Generic, Protocol):
# Can't subscript Generic[...] or Protocol[...].
raise TypeError(f"Cannot subscript already-subscripted {self}")
if not isinstance(params, tuple):
params = (params,)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
_check_generic(self, params, len(self.__parameters__))
subst = dict(zip(self.__parameters__, params))
new_args = []
for arg in self.__args__:
if isinstance(arg, TypeVar):
arg = subst[arg]
elif isinstance(arg, (_GenericAlias, GenericAlias)):
subparams = arg.__parameters__
if subparams:
subargs = tuple(subst[x] for x in subparams)
arg = arg[subargs]
new_args.append(arg)
return self.copy_with(tuple(new_args))
def copy_with(self, params):
return self.__class__(self.__origin__, params, name=self._name, inst=self._inst)
def __repr__(self):
if self._name:
name = 'typing.' + self._name
else:
name = _type_repr(self.__origin__)
args = ", ".join([_type_repr(a) for a in self.__args__])
return f'{name}[{args}]'
def __reduce__(self):
if self._name:
origin = globals()[self._name]
else:
origin = self.__origin__
args = tuple(self.__args__)
if len(args) == 1 and not isinstance(args[0], tuple):
args, = args
return operator.getitem, (origin, args)
def __mro_entries__(self, bases):
if self._name: # generic version of an ABC or built-in class
return super().__mro_entries__(bases)
if self.__origin__ is Generic:
if Protocol in bases:
return ()
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) and b is not self:
return ()
return (self.__origin__,)
def _type_check(arg, msg, is_argument=True):
"""Check that the argument is a type, and return it (internal helper).
As a special case, accept None and return type(None) instead. Also wrap strings
into ForwardRef instances. Consider several corner cases, for example plain
special forms like Union are not valid, while Union[int, str] is OK, etc.
The msg argument is a human-readable error message, e.g::
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
invalid_generic_forms = (Generic, Protocol)
if is_argument:
invalid_generic_forms = invalid_generic_forms + (ClassVar, Final)
if arg is None:
return type(None)
if isinstance(arg, str):
return ForwardRef(arg)
if (isinstance(arg, _GenericAlias) and
arg.__origin__ in invalid_generic_forms):
raise TypeError(f"{arg} is not valid as type argument")
if arg in (Any, NoReturn):
return arg
if isinstance(arg, _SpecialForm) or arg in (Generic, Protocol):
raise TypeError(f"Plain {arg} is not valid as type argument")
if isinstance(arg, (type, TypeVar, ForwardRef)):
return arg
if not callable(arg):
raise TypeError(f"{msg} Got {arg!r:.100}.")
return arg
class _TypedDictMeta(type):
def __new__(cls, name, bases, ns, total=True):
"""Create new typed dict class object.
This method is called when TypedDict is subclassed,
or when TypedDict is instantiated. This way
TypedDict supports all three syntax forms described in its docstring.
Subclasses and instances of TypedDict return actual dictionaries.
"""
for base in bases:
if type(base) is not _TypedDictMeta:
raise TypeError('cannot inherit from both a TypedDict type '
'and a non-TypedDict base class')
tp_dict = type.__new__(_TypedDictMeta, name, (dict,), ns)
annotations = {}
own_annotations = ns.get('__annotations__', {})
own_annotation_keys = set(own_annotations.keys())
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
own_annotations = {
n: _type_check(tp, msg) for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
for base in bases:
annotations.update(base.__dict__.get('__annotations__', {}))
required_keys.update(base.__dict__.get('__required_keys__', ()))
optional_keys.update(base.__dict__.get('__optional_keys__', ()))
annotations.update(own_annotations)
if total:
required_keys.update(own_annotation_keys)
else:
optional_keys.update(own_annotation_keys)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__call__ = dict # static method
def __subclasscheck__(cls, other):
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
__instancecheck__ = __subclasscheck__
def TypedDict(typename, fields=None, *, total=True, **kwargs):
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, where each key is
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
By default, all keys must be present in a TypedDict. It is possible
to override this by specifying totality.
Usage::
class point2D(TypedDict, total=False):
x: int
y: int
This means that a point2D TypedDict can have any of the keys omitted.A type
checker is only expected to support a literal False or True as the value of
the total argument. True is the default, and makes all items defined in the
class body be required.
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields), '__total__': total}
try:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return _TypedDictMeta(typename, (), ns)
_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
TypedDict.__mro_entries__ = lambda bases: (_TypedDict,)
_TYPING_INTERNALS = ['__parameters__', '__orig_bases__', '__orig_class__',
'_is_protocol', '_is_runtime_protocol']
_SPECIAL_NAMES = ['__abstractmethods__', '__annotations__', '__dict__', '__doc__',
'__init__', '__module__', '__new__', '__slots__',
'__subclasshook__', '__weakref__', '__class_getitem__']
# These special attributes will be not collected as protocol members.
EXCLUDED_ATTRIBUTES = _TYPING_INTERNALS + _SPECIAL_NAMES + ['_MutableMapping__marker']
def _get_protocol_attrs(cls):
"""Collect protocol members from a protocol class objects.
This includes names actually defined in the class dictionary, as well
as names that appear in annotations. Special names (above) are skipped.
"""
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in ('Protocol', 'Generic'):
continue
annotations = getattr(base, '__annotations__', {})
for attr in list(base.__dict__.keys()) + list(annotations.keys()):
if not attr.startswith('_abc_') and attr not in EXCLUDED_ATTRIBUTES:
attrs.add(attr)
return attrs
def _is_callable_members_only(cls):
# PEP 544 prohibits using issubclass() with protocols that have non-method members.
return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
def _allow_reckless_class_cheks():
"""Allow instance and class checks for special stdlib modules.
The abc and functools modules indiscriminately call isinstance() and
issubclass() on the whole MRO of a user class, which may contain protocols.
"""
try:
return sys._getframe(3).f_globals['__name__'] in ['abc', 'functools']
except (AttributeError, ValueError): # For platforms without _getframe().
return True
_PROTO_WHITELIST = {
'collections.abc': [
'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
],
'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'],
}
class _ProtocolMeta(ABCMeta):
# This metaclass is really unfortunate and exists only because of
# the lack of __instancehook__.
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if ((not getattr(cls, '_is_protocol', False) or
_is_callable_members_only(cls)) and
issubclass(instance.__class__, cls)):
return True
if cls._is_protocol:
if all(hasattr(instance, attr) and
# All *methods* can be blocked by setting them to None.
(not callable(getattr(cls, attr, None)) or
getattr(instance, attr) is not None)
for attr in _get_protocol_attrs(cls)):
return True
return super().__instancecheck__(instance)
class Protocol(Generic, metaclass=_ProtocolMeta):
"""Base class for protocol classes.
Protocol classes are defined as::
class Proto(Protocol):
def meth(self) -> int:
...
Such classes are primarily used with static type checkers that recognize
structural subtyping (static duck-typing), for example::
class C:
def meth(self) -> int:
return 0
def func(x: Proto) -> int:
return x.meth()
func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with
@typing.runtime_checkable act as simple-minded runtime protocols that check
only the presence of given attributes, ignoring their type signatures.
Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]):
def meth(self) -> T:
...
"""
__slots__ = ()
_is_protocol = True
_is_runtime_protocol = False
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', False):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
def _proto_hook(other):
if not cls.__dict__.get('_is_protocol', False):
return NotImplemented
# First, perform various sanity checks.
if not getattr(cls, '_is_runtime_protocol', False):
if _allow_reckless_class_cheks():
return NotImplemented
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if not _is_callable_members_only(cls):
if _allow_reckless_class_cheks():
return NotImplemented
raise TypeError("Protocols with non-method members"
" don't support issubclass()")
if not isinstance(other, type):
# Same error message as for issubclass(1, int).
raise TypeError('issubclass() arg 1 must be a class')
# Second, perform the actual structural compatibility check.
for attr in _get_protocol_attrs(cls):
for base in other.__mro__:
# Check if the members appears in the class dictionary...
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
# ...or in annotations, if it is a sub-protocol.
annotations = getattr(base, '__annotations__', {})
if (isinstance(annotations, collections.abc.Mapping) and
attr in annotations and
issubclass(other, Generic) and other._is_protocol):
break
else:
return NotImplemented
return True
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# We have nothing more to do for non-protocols...
if not cls._is_protocol:
return
# ... otherwise check consistency of bases, and prohibit instantiation.
for base in cls.__bases__:
if not (base in (object, Generic) or
base.__module__ in _PROTO_WHITELIST and
base.__name__ in _PROTO_WHITELIST[base.__module__] or
issubclass(base, Generic) and base._is_protocol):
raise TypeError('Protocols can only inherit from other'
' protocols, got %r' % base)
cls.__init__ = _no_init
def final(f):
"""A decorator to indicate final methods and final classes.
Use this decorator to indicate to type checkers that the decorated
method cannot be overridden, and decorated class cannot be subclassed.
For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties.
"""
return f
@_SpecialForm
def Final(self, parameters):
"""Special typing construct to indicate final names to type checkers.
A final name cannot be re-assigned or overridden in a subclass.
For example:
MAX_SIZE: Final = 9000
MAX_SIZE += 1 # Error reported by type checker
class Connection:
TIMEOUT: Final[int] = 10
class FastConnector(Connection):
TIMEOUT = 1 # Error reported by type checker
There is no runtime checking of these properties.
"""
item = _type_check(parameters, f'{self} accepts only single type.')
return _GenericAlias(self, (item,))
@_SpecialForm
def Literal(self, parameters):
"""Special typing form to define literal types (a.k.a. value types).
This form can be used to indicate to type checkers that the corresponding
variable or function parameter has a value equivalent to the provided
literal (or one of several literals):
def validate_simple(data: Any) -> Literal[True]: # always returns True
...
MODE = Literal['r', 'rb', 'w', 'wb']
def open_helper(file: str, mode: MODE) -> str:
...
open_helper('/some/path', 'r') # Passes type check
open_helper('/other/path', 'typo') # Error in type checker
Literal[...] cannot be subclassed. At runtime, an arbitrary value
is allowed as type argument to Literal[...], but type checkers may
impose restrictions.
"""
# There is no '_type_check' call because arguments to Literal[...] are
# values, not types.
return _GenericAlias(self, parameters)
| StarcoderdataPython |
6404435 | <gh_stars>0
from authx.authenticate import authenticator
from authx.exception import (InvalidUsername, NotLoggedInError,
NotPermittedError, PermissionError)
class Authorizer:
def __init__(self, authenticator):
self.authenticator = authenticator
self.permissions = {}
def add_permission(self, perm_name):
"""
Create a new permission that users can be added to
"""
try:
perm_set = self.permissions[perm_name]
except KeyError:
self.permissions[perm_name] = set()
else:
raise PermissionError("Permission Exists")
def permit_user(self, perm_name, username):
"""
Grant the given permission to the user
"""
try:
perm_set = self.permissions[perm_name]
except KeyError:
raise PermissionError("Permission does not exists!")
else:
if username not in self.authenticator.users:
raise InvalidUsername(username)
perm_set.add(username)
def check_permission(self, perm_name, username):
"""
Checks whether a user has a specific permission or not.
In order to evaluate if a permission is granted a user
- needs to be logged in
- be in the set of users tied to a `perm_name`
else an exception is raised.
"""
if not self.authenticator.is_logged_in(username):
raise NotLoggedInError(username)
try:
perm_set = self.permissions[perm_name]
except KeyError:
raise PermissionError("Permission does not exist")
else:
if not username in perm_set:
return NotPermittedError(username)
else:
return True
authorizer = Authorizer(authenticator)
| StarcoderdataPython |
6550620 | #!/usr/bin/env python
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import math
from functools import reduce
import pyquil.quil as pq
from pyquil import api
from pyquil.gates import H
from six.moves import range
def qubits_needed(n):
"""
The number of qubits needed for a die of n faces.
"""
return int(math.ceil(math.log(n, 2)))
def die_program(n):
"""
Generate a quantum program to roll a die of n faces.
"""
prog = pq.Program()
qubits = qubits_needed(n)
# Hadamard initialize.
for q in range(qubits):
prog.inst(H(q))
# Measure everything.
for q in range(qubits):
prog.measure(q, [q])
return prog
def process_result(r):
"""
Convert a list of measurements to a die value.
"""
return reduce(lambda s, x: 2*s + x, r, 0)
BATCH_SIZE = 10
dice = {}
qvm = api.QVMConnection()
def roll_die(n):
"""
Roll an n-sided quantum die.
"""
addresses = list(range(qubits_needed(n)))
if not n in dice:
dice[n] = die_program(n)
die = dice[n]
# Generate results and do rejection sampling.
while True:
results = qvm.run(die, addresses, BATCH_SIZE)
for r in results:
x = process_result(r)
if 0 < x <= n:
return x
if __name__ == '__main__':
number_of_sides = int(input("Please enter number of sides: "))
while True:
print(roll_die(number_of_sides))
| StarcoderdataPython |
5057104 | # -*- coding: utf-8 -*-
import numpy
import numpy.random
import time
import os
import magdynlab.instruments
import magdynlab.controllers
import magdynlab.data_types
import threading_decorators as ThD
import matplotlib.pyplot as plt
@ThD.gui_safe
def Plot_ColorMap(Data):
f = plt.figure('PNA-FMR', (5, 4))
extent = numpy.array([Data['h'].min(),
Data['h'].max(),
Data['f'].min()/1E9,
Data['f'].max()/1E9])
if not(f.axes):
plt.subplot()
ax = f.axes[0]
ax.clear()
ax.imshow(Data['ColorMap'].T,
aspect='auto',
origin='lower',
extent=extent)
ax.set_xlabel('Field (Oe)')
ax.set_ylabel('Freq (GHz)')
f.tight_layout()
f.canvas.draw()
@ThD.gui_safe
def Plot_dPdH(Data):
f = plt.figure('VNA-FMR dP/dH', (5, 4))
if not(f.axes):
plt.subplot()
ax = f.axes[0]
if not(ax.lines):
ax.plot([],[],'b.-')
if 'Fixed frequency' in Data['Mode']:
ax.set_xlim([Data['h'].min(), Data['h'].max()])
else:
ax.set_xlim([Data['f'].min()/1E9, Data['f'].max()/1E9])
ax.set_ylim([-1E-10, 1E-10])
line = ax.lines[-1]
if 'Fixed frequency' in Data['Mode']:
line.set_data(Data['h'], Data['dP/dH']*1000)
ax.set_xlabel('Field (Oe)')
else:
line.set_data(Data['f']/1E9, Data['dP/dH']*1000)
ax.set_xlabel('Freq (GHz)')
ax.set_ylabel('dP/dH')
ax.grid(True)
#check Y scale
ymax = numpy.nan_to_num(Data['dP/dH']).max()*1000
ymin = numpy.nan_to_num(Data['dP/dH']).min()*1000
dy = ymax - ymin
yc = (ymax + ymin)/2
ymin, ymax = ax.get_ylim()
ymax = numpy.max([yc + dy*1.1/2, ymax])
ymin = numpy.min([yc - dy*1.1/2, ymin])
ax.set_ylim([ymin, ymax])
f.tight_layout()
f.canvas.draw()
class PNA_FMR_1P(object):
def __init__(self, ResouceNames={}):
logFile = os.path.expanduser('~/MagDynLab.log')
defaultRN = dict(RN_Kepco = 'GPIB0::6::INSTR',
RN_PNA = 'TCPIP::192.168.13.10::INSTR')
defaultRN.update(ResouceNames)
RN_Kepco = defaultRN['RN_Kepco']
RN_PNA = defaultRN['RN_PNA']
PowerSource = magdynlab.instruments.KEPCO_BOP(ResourceName=RN_Kepco,
logFile=logFile)
VNA = magdynlab.instruments.KEYSIGHT_PNA(ResourceName=RN_PNA,
logFile=logFile)
self.VNAC = magdynlab.controllers.VNA_Controller(VNA)
self.FC = magdynlab.controllers.FieldController(PowerSource)
self.FC.Kepco.Voltage = 15
## Data containers ##
#Data for S11 vs hs vs fs
self.Data = magdynlab.data_types.DataContainer()
self.Data.file_id = '.VNA_1P_Raw'
#Key:
#'S11': S11 vs hs vs fs [h_i, f_i]
#'h': Fields [h_i]
#'f': Frequencies [f_i]
#'S11_Ref': S11 reference [f_i]
#*'Colormap': Colormap data [h_i, f_i]
#'Info'
#Data for S11 variation vs (hs of fs) at fixed (f or h)
self.Data_dPdH = magdynlab.data_types.DataContainer()
self.Data_dPdH.file_id = '.VNA_dPdH_Raw'
#Key:
#'S11': [S11+, S11-] vs (hs or fs) vs reps [(h_i or f_i), 2, rep_i]
#'h': Fields [h_i] or fixed field [1]
#'f': Fixed frequencyy [1] or Frequencies [f_i]
#'dH': Variation amplitude of the field stimulus
#'Mode': Stimulus mode
#'dP/dH': Power variation agaist the stimulus [(h_i or f_i)]
#'Info'
self.Info = ''
self._stop = False
def SetTraces(self):
self.VNAC.set_traces_SParameters_1P()
def MeasureRef(self):
self.Data['S11_Ref'] = self.VNAC.getSData(0, True)
def PlotColorMap(self, i=None):
Pabs_ref = 1 - numpy.abs(self.Data['S11_Ref'])**2
if i is not None:
# Update only i column
Pabs = 1 - numpy.abs(self.Data['S11'][i])**2
if self.Data['h'][0] > self.Data['h'][-1]:
i = -1 - i
self.Data['ColorMap'][i] = Pabs - Pabs_ref
else:
Pabs = 1 - numpy.abs(self.Data['S11'])**2
self.Data['ColorMap'] = Pabs - Pabs_ref[None,:]
if self.Data['h'][0] > self.Data['h'][-1]:
self.Data['ColorMap'] = Pabs[::-1] - Pabs_ref[None,:]
Plot_ColorMap(self.Data)
@ThD.as_thread
def Measure(self, fields, file_name, hold_time=0.0):
Data = self.Data
Data['h'] = fields
Data['f'] = self.VNAC.frequencies
data_shape = (len(Data['h']), len(Data['f']))
Data['S11'] = numpy.zeros(data_shape, dtype=complex)
Data['ColorMap'] = numpy.zeros(data_shape, dtype=float) + numpy.nan
Data.info = self.Info
# Loop for each field
for i, h in enumerate(fields):
self.FC.setField(h)
time.sleep(hold_time)
Data['S11'][i] = self.VNAC.getSData(0, True)
self.PlotColorMap(i)
ThD.check_stop()
if file_name is not None:
Data.save(file_name)
self.FC.TurnOff()
self.FC.Kepco.BEEP()
@ThD.as_thread
def Measure_dPdH(self, fields, freq, file_name,
dH=2, average_factor=10, repetitions=2,
hold_time=0.0, dH_hold_time=0.01):
Data = self.Data_dPdH
Data['Mode'] = 'Fixed frequency'
self.VNAC.backup_sweep()
self.VNAC.VNA.Ch1.SetSweep(start=freq, stop=freq, np=1)
Data['h'] = fields
Data['f'] = freq
Data['dH'] = dH
data_shape = (len(Data['h']), 2, average_factor*repetitions)
Data['S11'] = numpy.zeros(data_shape, dtype=complex) + numpy.nan
Data['dP/dH'] = numpy.zeros_like(fields) + numpy.nan
extra_info = ['',
'Mode : Fixed frequency',
'Frequency : %(f)0.6f GHz' % {'f':freq/1E9},
'Average factor : %(av)d Oe' % {'av':average_factor},
'Repetitions : %(rep)d' % {'rep':repetitions},
'']
Data.info = self.Info + '\n'.join(extra_info)
# Loop for each DC field
for hi, h in enumerate(fields):
self.FC.setField(h)
time.sleep(hold_time)
i0 = self.FC.Kepco.current
i_pos = i0 + dH/self.FC.HperOut
i_neg = i0 - dH/self.FC.HperOut
for r_i in range(repetitions):
self.FC.Kepco.current = i_pos
time.sleep(dH_hold_time)
for av_i in range(average_factor):
n_i = r_i*average_factor + av_i
Data['S11'][hi,0,n_i] = self.VNAC.getSData(0, True)
self.FC.Kepco.current = i_neg
time.sleep(dH_hold_time)
for av_i in range(average_factor):
n_i = r_i*average_factor + av_i
Data['S11'][hi,1,n_i] = self.VNAC.getSData(0, True)
ThD.check_stop()
PAbs_hi = 1 - numpy.abs(Data['S11'][hi])**2
P_pos = numpy.nanmean(PAbs_hi[0,:])
P_neg = numpy.nanmean(PAbs_hi[1,:])
Data['dP/dH'][hi] = (P_pos - P_neg)/(2*dH)
Plot_dPdH(Data)
ThD.check_stop()
if file_name is not None:
Data.save(file_name)
Data.savetxt(file_name + '.dPxH', keys=['h', 'dP/dH'])
self.VNAC.restore_sweep()
self.FC.TurnOff()
self.FC.Kepco.BEEP()
@ThD.as_thread
def Measure_dPdH_freq(self, field, file_name, dH_source='Solenoid',
dH=2, average_factor=20, repetitions=20,
hold_time=0.0, dH_hold_time=0.01):
Data = self.Data_dPdH
Data['Mode'] = 'Fixed field ' + dH_source
Data['h'] = field
Data['f'] = self.VNAC.frequencies
Data['dH'] = numpy.atleast_1d(dH)
data_shape = (len(Data['f']), 2, average_factor*repetitions)
Data['S11'] = numpy.zeros(data_shape, dtype=complex) + numpy.nan
Data['dP/dH'] = numpy.zeros_like(Data['f']) + numpy.nan
extra_info = ['',
'Mode : Fixed field %s' % dH_source,
'Field : %0.2f Oe' % field,
'Freq : %0.2f - %0.2f GHz' % (Data['f'].min()/1E9, Data['f'].max()/1E9),
'dH : %s' % dH,
'Average factor : %d' % average_factor,
'Repetitions : %d' % repetitions,
'']
Data.info = self.Info + '\n'.join(extra_info)
print('*** Measuring dP/dH fixed field***')
print('\n'.join(extra_info))
self.FC.setField(field)
i0 = self.FC.Kepco.current
if len(Data['dH']) == 1:
Data['dH'] = [dH, -dH]
i_pos = i0 + Data['dH'][0]/self.FC.HperOut
i_neg = i0 + Data['dH'][-1]/self.FC.HperOut
for r_i in range(repetitions):
if dH_source is 'Solenoid':
self.FC.Kepco.current = i_pos
elif dH_source is 'Auxiliar':
self.VNAC.VNA.auxiliar_voltage_output1 = Data['dH'][0]
time.sleep(dH_hold_time)
for av_i in range(average_factor):
n_i = r_i*average_factor + av_i
Data['S11'][:,0,n_i] = self.VNAC.getSData(0, True)
if dH_source is 'Solenoid':
self.FC.Kepco.current = i_neg
elif dH_source is 'Auxiliar':
self.VNAC.VNA.auxiliar_voltage_output1 = Data['dH'][-1]
time.sleep(dH_hold_time)
for av_i in range(average_factor):
n_i = r_i*average_factor + av_i
Data['S11'][:,1,n_i] = self.VNAC.getSData(0, True)
ThD.check_stop()
PAbs = 1 - numpy.abs(Data['S11'])**2
P_pos = numpy.nanmean(PAbs[:,0,:], axis=-1)
P_neg = numpy.nanmean(PAbs[:,1,:], axis=-1)
Data['dP/dH'] = (P_pos - P_neg)/(2*(Data['dH'][0]-Data['dH'][-1]))
Plot_dPdH(Data)
ThD.check_stop()
print('Done!')
if dH_source is 'Solenoid':
self.FC.Kepco.current = i0
elif dH_source is 'Auxiliar':
self.VNAC.VNA.auxiliar_voltage_output1 = 0
if file_name is not None:
Data.save(file_name)
Data.savetxt(file_name + '.dPxf', keys=['f', 'dP/dH'])
@ThD.as_thread
def Improve_dPdH_freq(self, file_name=None,
average_factor=20, repetitions=1,
hold_time=0.0, dH_hold_time=0.01):
Data = self.Data_dPdH
if 'Solenoid' in Data['Mode']:
dH_source = 'Solenoid'
elif 'Auxiliar' in Data['Mode']:
dH_source = 'Auxiliar'
field = Data['h']
dH = Data['dH']
data_shape = (len(Data['f']), 2, average_factor*repetitions)
extraS11 = numpy.zeros(data_shape, dtype=complex) + numpy.nan
offset = Data['S11'].shape[2]-1
Data['S11'] = numpy.dstack((Data['S11'], extraS11))
extra_info = ['Extra Average factor : %d' % average_factor,
'Extra Repetitions : %d' % repetitions,
'']
Data.info = self.Info + '\n'.join(extra_info)
print('*** Improving dP/dH fixed field***')
print('\n'.join(extra_info))
self.FC.setField(field)
i0 = self.FC.Kepco.current
i_pos = i0 + Data['dH'][0]/self.FC.HperOut
i_neg = i0 - Data['dH'][-1]/self.FC.HperOut
for r_i in range(repetitions):
if dH_source is 'Solenoid':
self.FC.Kepco.current = i_pos
elif dH_source is 'Auxiliar':
self.VNAC.VNA.auxiliar_voltage_output1 = Data['dH'][0]
time.sleep(dH_hold_time)
for av_i in range(average_factor):
n_i = r_i*average_factor + av_i + offset
Data['S11'][:,0,n_i] = self.VNAC.getSData(0, True)
if dH_source is 'Solenoid':
self.FC.Kepco.current = i_neg
elif dH_source is 'Auxiliar':
self.VNAC.VNA.auxiliar_voltage_output1 = Data['dH'][-1]
time.sleep(dH_hold_time)
for av_i in range(average_factor):
n_i = r_i*average_factor + av_i + offset
Data['S11'][:,1,n_i] = self.VNAC.getSData(0, True)
ThD.check_stop()
PAbs = 1 - numpy.abs(Data['S11'])**2
P_pos = numpy.nanmean(PAbs[:,0,:], axis=-1)
P_neg = numpy.nanmean(PAbs[:,1,:], axis=-1)
Data['dP/dH'] = (P_pos - P_neg)/(2*(Data['dH'][0]-Data['dH'][-1]))
Plot_dPdH(Data)
ThD.check_stop()
print('Done!')
if dH_source is 'Solenoid':
self.FC.Kepco.current = i0
elif dH_source is 'Auxiliar':
self.VNAC.VNA.auxiliar_voltage_output1 = 0
if file_name is not None:
Data.save(file_name)
Data.savetxt(file_name + '.dPxf', keys=['f', 'dP/dH'])
def Stop(self, TurnOff=True):
print('Stoping...')
self.FC.BEEP()
self.Measure.stop()
if self.Measure.thread is not None:
self.Measure.thread.join()
time.sleep(1)
self.FC.BEEP()
time.sleep(0.1)
self.FC.BEEP()
print('DONE')
if TurnOff:
print('Turning field OFF')
self.FC.TurnOff()
print('DONE')
def field_span(center, span, n_pts, hmin=0, hmax=20000):
crv = numpy.linspace(center-span/2, center+span/2, n_pts)
mask = (crv >= hmin) * (crv <= hmax)
return crv[mask]
| StarcoderdataPython |
11323290 | <gh_stars>1-10
with open("../input/day8.txt", 'r') as inputFile:
data = inputFile.read()
data = data.split(" ")
class Node():
def __init__(self):
self.children = []
self.metadata = []
def parseChild(parent):
newNode = Node()
if parent is not None:
parent.children.append(newNode)
numChildren = int(data.pop(0))
numMetadata = int(data.pop(0))
if numChildren > 0:
for i in range(numChildren):
parseChild(newNode)
for i in range(numMetadata):
value = int(data.pop(0))
newNode.metadata.append(value)
return newNode
root = parseChild(None)
# Part 1
def traversePart1(node):
mySum = sum([traversePart1(child) for child in node.children])
mySum += sum(node.metadata)
return mySum
print(traversePart1(root))
# Part 2
def traversePart2(node):
mySum = 0
if len(node.children) > 0:
for i in range(len(node.metadata)):
childIdx = node.metadata[i] - 1
if childIdx < len(node.children):
mySum += traversePart2(node.children[childIdx])
else:
mySum += sum(node.metadata)
return mySum
print(traversePart2(root)) | StarcoderdataPython |
5082834 | from .core.models import Squad, Group, Project, Build, Environment, Test, Metric, TestRun, SquadObjectException
from .utils import split_build_url, first, split_group_project_slug
squad = Squad()
def compare_builds(baseline_id, build_id):
return Project.compare_builds(baseline_id, build_id)
def retrieve_latest_builds(project_full_name, count=10):
return squad.builds(count=count, project__full_name=project_full_name)
def retrieve_build_results(build_url):
group_slug, project_slug, build_version = split_build_url(build_url)
project_full_name = '%s/%s' % (group_slug, project_slug)
builds = squad.builds(count=1, project__full_name=project_full_name, version=build_version)
build = first(builds)
if not build:
return None
results = {}
testruns = build.testruns(bucket_suites=True)
if len(testruns):
for _id in testruns.keys():
testrun = testruns[_id]
test_suites = {}
for suite in testrun.test_suites:
test_suites[suite.name] = {t.short_name: t.status for t in suite.tests().values()}
metric_suites = {}
for suite in testrun.metric_suites:
metric_suites[suite.name] = {m.name: m.result for m in suite.metrics.values()}
results[testrun.environment.slug] = {'tests': test_suites, 'metrics': metric_suites}
return results
def submit_results(group_project_slug=None, build_version=None, env_slug=None, tests={}, metrics={}, log=None, metadata=None, attachments=None):
group_slug, project_slug = split_group_project_slug(group_project_slug)
# TODO: validate input
group = Group()
project = Project()
build = Build()
testrun = TestRun()
environment = Environment()
group.slug = group_slug
project.group = group
project.slug = project_slug
environment.slug = env_slug
environment.project = project
build.project = project
build.version = build_version
testrun.log = log
testrun.build = build
testrun.metadata = metadata
testrun.attachments = attachments
testrun.environment = environment
for test_name in tests.keys():
test = Test()
test.name = test_name
testrun.add_test(test)
if type(tests[test_name]) is dict:
test.status = tests[test_name].get('result') # raise error if result is invalid
test.log = tests[test_name].get('log')
else:
test.status = tests[test_name]
for metric_name in metrics.keys():
metric = Metric()
metric.name = metric_name
metric.result = metrics[metric_name]
testrun.add_metric(metric)
return testrun.submit_results()
def create_or_update_project(group_slug=None, slug=None, name=None, description=None, settings=None,
is_public=None, html_mail=None, moderate_notifications=None, is_archived=None,
email_template=None, plugins=None, important_metadata_keys=None,
wait_before_notification_timeout=None, notification_timeout=None,
data_retention=None, overwrite=False):
errors = []
group = None
project = None
if group_slug is None:
errors.append('Group slug is required')
return None, errors
group = first(squad.groups(slug=group_slug))
if group is None:
errors.append('Group "%s" not found' % group_slug)
return None, errors
if slug is None:
errors.append('Project slug is required')
return None, errors
project = group.project(slug)
if project is not None:
if not overwrite:
errors.append('Project exists already')
return None, errors
else:
project = Project()
project.group = group
project.slug = slug
if name:
project.name = name
if plugins:
project.enabled_plugins_list = plugins
if settings:
project.project_settings = settings
if html_mail is not None:
project.html_mail = html_mail
if is_public is not None:
project.is_public = is_public
if is_archived is not None:
project.is_archived = is_archived
if description:
project.description = description
if data_retention is not None:
project.data_retention_days = data_retention
if notification_timeout is not None:
project.notification_timeout = notification_timeout
if moderate_notifications is not None:
project.moderate_notifications = moderate_notifications
if important_metadata_keys:
project.important_metadata_keys = '\n'.join(important_metadata_keys) if type(important_metadata_keys) == list else important_metadata_keys
if wait_before_notification_timeout is not None:
project.wait_before_notification = wait_before_notification_timeout
try:
project.save()
except SquadObjectException as e:
errors.append(str(e))
if len(errors):
return None, errors
return project, []
| StarcoderdataPython |
6615505 | import psutil
import re
from pypresence import Presence
from time import time, sleep
from win32gui import GetForegroundWindow, GetWindowText
from win32process import GetWindowThreadProcessId
client = Presence(659889732939022366)
client.connect()
last_activity = ''
def get_current_process():
fw = GetForegroundWindow()
pid = GetWindowThreadProcessId(fw)
window_title = GetWindowText(fw)
proc_name = psutil.Process(pid[-1]).name()
return window_title, proc_name
while True:
w_t, p_n = get_current_process()
activity = w_t or 'desktop'
# cleaned_pn = p_n.split('.')[0]
# if re.match(r'^[a-z]', cleaned_pn):
# cleaned_pn = cleaned_pn.title()
if activity != last_activity:
last_activity = activity
client.update(details=activity[:128], state=p_n, start=time(), instance=False)
sleep(15)
else:
sleep(2)
| StarcoderdataPython |
9661812 | <gh_stars>10-100
"""
Core logic
"""
from maya_mock.base.node import MockedNode
from maya_mock.base.port import MockedPort
from maya_mock.base.connection import MockedConnection
from maya_mock.base.session import MockedSession
from maya_mock.base.schema import MockedSessionSchema
| StarcoderdataPython |
1794252 | <reponame>Joan95/TFM
"""
<Name>
tuf/encoding/snapshot_asn1_coder.py
<Purpose>
This module contains conversion functions (get_asn_signed and get_json_signed)
for converting Snapshot metadata from TUF's standard Python dictionary
metadata format (usually serialized as JSON) to an ASN.1 format that conforms
to pyasn1 specifications and TUF's new ASN.1 specification.
<Functions>
get_asn_signed(pydict_signed)
get_json_signed(asn_signed) # TODO: Rename to get_pydict_signed in all mods
"""
from __future__ import unicode_literals
from pyasn1.type import tag
from tuf.encoding.metadata_asn1_definitions import *
from tuf.encoding import hex_from_octetstring
import tuf.conf
import calendar
from datetime import datetime #import datetime
def get_asn_signed(pydict_signed):
"""
Given a Python dictionary conformant to TUF's standard data specification for
Snapshot metadata (tuf.formats.SNAPSHOT_SCHEMA), convert to the new ASN.1
format for Snapshot metadata, which derives from Snapshot*.asn1.
"""
json_fileinfos = pydict_signed['meta']
target_role_fileinfos = TargetRoleFileInfos().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
number_of_target_role_files = 0
root_fileinfo = None
sorted_filenames = sorted(json_fileinfos)
for filename in sorted_filenames:
pydict_fileinfo = json_fileinfos[filename]
# TODO: Consider checking the file itself to determine format... but have
# to make sure we only mess with the real root metadata role file. (Don't
# accidentally hit other metadata files?)
if filename == 'root.' + tuf.conf.METADATA_FORMAT:
# If we're dealing with the root metadata file, we expect hashes and
# length in addition to just filename and version.
# TODO: Check if we've already added a root file. Raise error.
# TODO: Add ASN1_Conversion
root_fileinfo = RootRoleFileInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))
root_fileinfo['filename'] = filename
root_fileinfo['version'] = pydict_fileinfo['version']
if 'length' not in pydict_fileinfo or 'hashes' not in pydict_fileinfo:
# TODO: Better error
raise tuf.Error('ASN1 Conversion failure for Snapshot role: given '
'fileinfo for assumed root metadata file (filename: ' +
repr(filename) + '), found either hashes or length missing.')
root_fileinfo['length'] = pydict_fileinfo['length']
hashes = Hashes().subtype(
implicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatSimple, 4))
number_of_hashes = 0
# We're going to generate a list of hashes from the dictionary of hashes.
# The DER will contain this list, and the order of items in this list will
# affect hashing of the DER, and therefore signature verification.
# We have to make the order deterministic.
sorted_hashtypes = sorted(pydict_fileinfo['hashes'])
for hashtype in sorted_hashtypes:
hashval = pydict_fileinfo['hashes'][hashtype]
hash = Hash()
hash['function'] = int(HashFunction(hashtype))
hash['digest'] = OctetString(hexValue=hashval)
hashes[number_of_hashes] = hash
number_of_hashes += 1
root_fileinfo['hashes'] = hashes
root_fileinfo['numberOfHashes'] = number_of_hashes
else:
# Otherwise (if we're not dealing with the fileinfo for the root metadata
# file), we're dealing with a target role file (the main Targets role
# file or a delegated Targets role file), so we only expect filename and
# version.
if 'length' in pydict_fileinfo or 'hashes' in pydict_fileinfo:
# TODO: Better error
raise tuf.Error('ASN1 Conversion failure for Snapshot role: given '
'fileinfo for assumed Targets or delegated metadata file '
'(filename: ' +repr(filename) + '), found either hashes or length, '
'which are not expected in Snapshot for a Targets role file.')
fileinfo = TargetRoleFileInfo()
fileinfo['filename'] = filename
fileinfo['version'] = pydict_fileinfo['version']
target_role_fileinfos[number_of_target_role_files] = fileinfo
number_of_target_role_files += 1
# Loop complete, all fileinfo (root, targets, any delegated targets)
# loaded into target_role_fileinfos and root_fileinfo.
if len(target_role_fileinfos) < 1:
raise tuf.Error('ASN1 Conversion failure for Snapshot role: Found no '
'Targets role file info entries or conversion failed for all fileinfo '
'for Targets role files.')
if root_fileinfo is None:
raise tuf.Error('ASN1 Conversion failure for Snapshot role: Found no '
'Root role file info entry or conversion failed for Root fileinfo.')
snapshot_metadata = SnapshotMetadata().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatConstructed, 2))
snapshot_metadata['numberOfTargetRoleFiles'] = number_of_target_role_files
snapshot_metadata['targetRoleFileInfos'] = target_role_fileinfos
snapshot_metadata['rootRoleFileInfo'] = root_fileinfo
# Construct the 'signed' entry in the Snapshot metadata file, in ASN.1.
asn_signed = Signed().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatConstructed, 0))
asn_signed['type'] = int(RoleType('snapshot'))
asn_signed['expires'] = calendar.timegm(datetime.strptime(
pydict_signed['expires'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
asn_signed['version'] = pydict_signed['version']
asn_signed['body'] = SignedBody().subtype(explicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatConstructed, 3))
asn_signed['body']['snapshotMetadata'] = snapshot_metadata
return asn_signed
def get_json_signed(asn_metadata):
"""
Given an ASN.1 object conforming to the new ASN.1 metadata definitions
derived from Snapshot*.asn1, return a Python dictionary containing the same
information, conformant to TUF's standard data specification for Snapshot
metadata (tuf.formats.SNAPSHOT_SCHEMA).
TUF internally does not use the ASN.1, converting it in and out of the
standard Python dictionary formats defined in tuf.formats.
"""
pydict_signed = {}
# TODO: Normalize this function's interface: the asn_metadata given is
# actually both 'signed' and 'signatures', which is strange since the
# get_asn_signed function takes only the contents of the 'signed' entry, and
# this function only returns the contents of a corresponding 'signed' entry.
# (It is confusingly inconsistent to take the full object, return a converted
# partial object, and have parallel naming and placement with a function that
# takes and returns a partial object.)
# This change has to percolate across all modules, however.
asn_signed = asn_metadata['signed'] # This should be the argument instead of asn_metadata.
# Should check this from the ASN, but... the ASN definitions don't actually
# USE a type, so I'm entirely basing the type encoded on the filename. This
# is bad, I think. Could it be a security issue to not sign the metadata type
# in there? The metadata types are pretty distinct, but... it's still best to
# fix this at some point.
pydict_signed['_type'] = 'Snapshot'
pydict_signed['expires'] = datetime.utcfromtimestamp(
asn_signed['expires']).isoformat()+'Z'
pydict_signed['version'] = int(asn_signed['version'])
# Next, extract the fileinfo for each role file described in the ASN.1
# Snapshot metadata.
snapshot_metadata = asn_signed['body']['snapshotMetadata']
number_of_target_role_files = int(
snapshot_metadata['numberOfTargetRoleFiles'])
asn_target_fileinfos = snapshot_metadata['targetRoleFileInfos']
pydict_fileinfos = {}
# Copy the Targets and delegated roles fileinfos:
for i in range(number_of_target_role_files):
asn_role_fileinfo = asn_target_fileinfos[i]
filename = str(asn_role_fileinfo['filename'])
pydict_fileinfos[filename] = {'version': int(asn_role_fileinfo['version'])}
# Add in the Root role fileinfo:
# In the Python dictionary format for Snapshot metadata, these all exist in
# one dictionary.
filename = str(snapshot_metadata['rootRoleFileInfo']['filename'])
version = int(snapshot_metadata['rootRoleFileInfo']['version'])
length = int(snapshot_metadata['rootRoleFileInfo']['length'])
if filename in pydict_fileinfos:
raise tuf.Error('ASN1 Conversion failure for Snapshot role: duplicate '
'fileinfo entries detected: filename ' + str(filename) + ' identified '
'both as Root role and Targets role in Snapshot metadata.')
# Populate the hashes in the fileinfo describing the Root role.
hashes = {}
for i in range(snapshot_metadata['rootRoleFileInfo']['numberOfHashes']):
asn_hash_info = snapshot_metadata['rootRoleFileInfo']['hashes'][i]
# This is how we'd extract the name of the hash function from the
# enumeration (namedValues) that is in the class (HashFunction), indexed by
# the underlying "value" of asn_hash_info. The [0] at the end selects
# the string description from a 2-tuple of e.g. ('sha256', 1), where 1 is
# the value in the enum.
# TODO: Should probably make this its own function. The following should
# work:
# def translate_pyasn_enum_to_value(asn_enum_value):
# return asn_enum_value.namedValues[asn_enum_value][0]
#
hashtype = asn_hash_info['function'].namedValues[asn_hash_info['function']]
hashval = hex_from_octetstring(asn_hash_info['digest'])
hashes[hashtype] = hashval
# Finally, add all the information gathered about the Root role.
pydict_fileinfos[filename] = {
'version': version,
'length': length,
'hashes': hashes}
pydict_signed['meta'] = pydict_fileinfos
return pydict_signed
| StarcoderdataPython |
5197672 | <gh_stars>1-10
from django.conf.urls import url
from django.contrib.auth.views.login import login, logout_then_login
from usuarios.views import RegistrarUsuarioView
urlpatterns = [
url(r'^registrar/$', RegistrarUsuarioView.as_view(), name='registrar'),
url(r'^login/$', login,
{'template_name': 'login.html'}, name='login'),
url(r'^logout/$', logout_then_login,
{'login_url': '/login/'}, name='login'),
]
| StarcoderdataPython |
320328 | <gh_stars>0
import requests
import json
db = "data/"
def test_face_match():
url = 'http://0.0.0.0:5000/face_match'
# open file in binary mode
files = {'file1': open(db+'1.jpg', 'rb'),
'file2': open(db+'2.jpeg', 'rb')}
resp = requests.post(url, files=files)
print(json.dumps(resp.json()) )
if __name__ == '__main__':
test_face_match() | StarcoderdataPython |
5072962 | <reponame>MatthewScholefield/autodo
from collections import namedtuple
import pandas
from os.path import join
from prettyparse import Usage
from autodo.scripts.base_script import BaseScript
from autodo.stage_three_predictor import StageThreePredictor
StageThreeRow = namedtuple('StageThreeRow', 'image_id box_id x y z')
class RunStageThreeScript(BaseScript):
usage = Usage('''
Train the stage three nethreerk
:model_file str
Model file to load from
:stage_two_file str
Csv file with output from stage two
:image_folder str
Folder with source images
:-g --gpu
Train with GPU
:-o --output-file str -
Output csv to write to
''')
def run(self):
args = self.args
boxes = pandas.read_csv(args.stage_two_file)
predictor = StageThreePredictor(args.model_file, args.gpu)
rows = []
try:
for image_id, image_rows in boxes.groupby('image_id'):
image_filename = join(args.image_folder, '{}.jpg'.format(image_id))
xcenters, ycenters = image_rows[['center_x', 'center_y']].values.T
label = predictor.predict([image_filename], [xcenters], [ycenters])[0]
for box_id, pos in enumerate(label):
rows.append(StageThreeRow(image_id, box_id, *pos))
print(label)
except KeyboardInterrupt:
print('Stopping...')
finally:
if args.output_file:
pandas.DataFrame(data=rows).to_csv(args.output_file)
main = RunStageThreeScript.run_main
| StarcoderdataPython |
351374 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
from .. import utilities, tables
class GetVaultResult(object):
"""
A collection of values returned by getVault.
"""
def __init__(__self__, location=None, sku=None, tags=None, id=None):
if location and not isinstance(location, str):
raise TypeError('Expected argument location to be a str')
__self__.location = location
"""
The Azure location where the resource resides.
"""
if sku and not isinstance(sku, str):
raise TypeError('Expected argument sku to be a str')
__self__.sku = sku
"""
The vault's current SKU.
"""
if tags and not isinstance(tags, dict):
raise TypeError('Expected argument tags to be a dict')
__self__.tags = tags
"""
A mapping of tags assigned to the resource.
"""
if id and not isinstance(id, str):
raise TypeError('Expected argument id to be a str')
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
async def get_vault(name=None, resource_group_name=None):
"""
Use this data source to access information about an existing Recovery Services Vault.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__ret__ = await pulumi.runtime.invoke('azure:recoveryservices/getVault:getVault', __args__)
return GetVaultResult(
location=__ret__.get('location'),
sku=__ret__.get('sku'),
tags=__ret__.get('tags'),
id=__ret__.get('id'))
| StarcoderdataPython |
4928816 |
from abc import ABCMeta, abstractmethod
class Strategy(metaclass=ABCMeta):
@abstractmethod
def key_exists(self, key):
pass
@abstractmethod
def delete_key(self, key):
pass
@abstractmethod
def get_data(self, key):
pass
@abstractmethod
def update_data(self, key, value):
pass
class Memory(Strategy):
def __init__(self):
pass
class File(Strategy):
pass
| StarcoderdataPython |
3436544 | <gh_stars>0
class Solution:
def knightDialer(self, N: int) -> int:
| StarcoderdataPython |
11261699 | <reponame>troyready/runway
"""Split lookup."""
# pylint: disable=arguments-differ,unused-argument
from runway.lookups.handlers.base import LookupHandler
TYPE_NAME = "split"
class SplitLookup(LookupHandler):
"""Split lookup."""
@classmethod
def handle(cls, value, context=None, provider=None, **kwargs):
"""Split the supplied string on the given delimiter, providing a list.
Args:
value (str): Parameter(s) given to this lookup.
context (:class:`runway.cfngin.context.Context`): Context instance.
provider (:class:`runway.cfngin.providers.base.BaseProvider`):
Provider instance.
Returns:
str: Looked up value.
Format of value::
<delimiter>::<value>
Example:
::
Subnets: ${split ,::subnet-1,subnet-2,subnet-3}
Would result in the variable `Subnets` getting a list consisting
of::
["subnet-1", "subnet-2", "subnet-3"]
This is particularly useful when getting an output from another
stack that contains a list. For example, the standard vpc blueprint
outputs the list of Subnets it creates as a pair of Outputs
(``PublicSubnets``, ``PrivateSubnets``) that are comma separated,
so you could use this in your config::
Subnets: ${split ,::${output vpc::PrivateSubnets}}
"""
try:
delimiter, text = value.split("::", 1)
except ValueError:
raise ValueError(
"Invalid value for split: %s. Must be in "
"<delimiter>::<text> format." % value
)
return text.split(delimiter)
| StarcoderdataPython |
9670517 | import discord
from discord.ext.commands import Greedy
from redbot.core import commands, checks
from redbot.core.utils.chat_formatting import box
from redbot.core.utils.menus import menu, DEFAULT_CONTROLS
from .constants import ACTION_CONFIRMATION
from .rules.config.models import BlackOrWhiteList
from .utils import (
error_message,
check_success,
chunks,
thumbs_up_success,
docstring_parameter,
transform_bool,
get_option_reaction,
yes_or_no,
)
from .converters import ToggleBool
from tabulate import tabulate
groups = {
"mentionspamrule": "mention spam",
"wallspamrule": "wall spam",
"inviterule": "discord invites",
"spamrule": "general spam",
"maxwordsrule": "maximum words",
"maxcharsrule": "maximum characters",
"wordfilterrule": "word filter",
"imagedetectionrule": "image detection",
"allowedextensionsrule": "Allowed extensions",
}
NEW_LINE = "\n"
# thanks Jackenmen#6607 <3
class GroupCommands:
def __init__(self, *args, **kwargs):
self.bot = kwargs.get("bot")
"""
Commands specific to allowedextensions
"""
async def _handle_adding_extension(
self,
guild: discord.Guild,
channels: [discord.TextChannel],
white_or_blacklist: BlackOrWhiteList,
extensions: [str],
):
extensions = [ex.lower() for ex in extensions]
if white_or_blacklist == BlackOrWhiteList.Blacklist:
await self.allowedextensionsrule.set_blacklist_extensions(guild, extensions, channels)
else:
await self.allowedextensionsrule.set_whitelist_extensions(guild, extensions, channels)
@staticmethod
async def clean_and_validate_extensions(extensions: str) -> []:
"""
Rejects extensions with periods, splits list on csv
Parameters
----------
extensions
csv string
Returns
-------
list of extensions
"""
extensions = extensions.split(",")
has_periods = [ext.startswith(".") for ext in extensions]
if any(has_periods):
raise ValueError("Extensions must not contain periods.")
return extensions
@commands.group()
@checks.mod_or_permissions(manage_messages=True)
async def allowedextensionsrule(self, ctx):
"""Blacklist or whitelist extensions"""
pass
@allowedextensionsrule.group(name="allowlist")
@checks.mod_or_permissions(manage_messages=True)
async def _allowlist_extension_group(self, ctx):
"""Add extensions to an allow list"""
pass
@_allowlist_extension_group.command(name="channel")
@checks.mod_or_permissions(manage_messages=True)
async def _add_ext_channel_whitelist(
self, ctx, channels: Greedy[discord.TextChannel], extensions
):
"""Allow list extensions in channels
Extensions must be a comma separated list, without the period.
Example: jpeg,png,pfd"""
try:
extensions = await self.clean_and_validate_extensions(extensions)
await self._handle_adding_extension(
ctx.guild, channels, BlackOrWhiteList.Whitelist, extensions
)
return await ctx.send(
embed=await self.allowedextensionsrule.extension_added_embed(extensions, channels)
)
except ValueError as e:
await ctx.send(error_message(e.args[0]))
@_allowlist_extension_group.command(name="group")
@checks.mod_or_permissions(manage_messages=True)
async def _add_ext_group_whitelist(self, ctx, group_name: str, extensions):
"""Allow list extensions in a group
Extensions must be a comma separated list, without the period.
Example: jpeg,png,pfd
Groups must be pre-configured.
"""
try:
extensions = await self.clean_and_validate_extensions(extensions)
channel_groups = await self.get_channel_groups(ctx.guild)
if group_name not in channel_groups:
return await ctx.send(error_message(f"`{group_name}` Could not find group."))
channels = [ctx.guild.get_channel(ch) for ch in channel_groups[group_name]]
await self._handle_adding_extension(
ctx.guild, channels, BlackOrWhiteList.Whitelist, extensions
)
return await ctx.send(
embed=await self.allowedextensionsrule.extension_added_embed(extensions, channels)
)
except ValueError as e:
await ctx.send(error_message(e.args[0]))
@_allowlist_extension_group.command(name="show")
@checks.mod_or_permissions(manage_messages=True)
async def _show_ext_whitelist_command(self, ctx):
"""Display currently enabled allow list extensions"""
current_extensions = await self.allowedextensionsrule.get_whitelist_extensions(ctx.guild)
embeds = []
values = []
if not current_extensions:
return await ctx.send("No extensions have been whitelisted.")
for index, extension in enumerate(current_extensions):
embed = discord.Embed(
title="Whitelisted extensions",
description=f"To delete run: `{ctx.prefix}allowedextensionsrule allowlist delete {index}`",
)
value = box(
NEW_LINE.join(
"+ {0}".format(str(ctx.guild.get_channel(ch))) for ch in extension.channels
)
if extension.channels
else "+ Global",
"diff",
)
extensions = ", ".join(extension.extensions)
embed.add_field(name=extensions, value=value)
embeds.append(embed)
if len(embeds) > 1:
await menu(ctx, embeds, DEFAULT_CONTROLS)
else:
await ctx.send(embed=embeds[0])
@_allowlist_extension_group.command(name="delete", aliases=["rem"])
@checks.mod_or_permissions(manage_messages=True)
async def _delete_whitelist_ext_command(self, ctx, index: int):
"""Delete extension settings"""
try:
deleted = await self.allowedextensionsrule.delete_whitelist_extensions(
ctx.guild, index
)
embed = await self.allowedextensionsrule.deleted_extensions_embed(ctx.guild, deleted)
return await ctx.send(embed=embed)
except IndexError:
return await ctx.send(error_message("Invalid index to delete."))
except AttributeError:
return await ctx.send(error_message("There is no extensions currently whitelist."))
@allowedextensionsrule.group(name="denylist")
@checks.mod_or_permissions(manage_messages=True)
async def _denylist_extension_group(self, ctx):
"""Add extensions to deny list"""
pass
@_denylist_extension_group.command(name="show")
@checks.mod_or_permissions(manage_messages=True)
async def _show_ext_blacklist_command(self, ctx):
"""Display currently enabled deny list extensions"""
current_extensions = await self.allowedextensionsrule.get_blacklist_extensions(ctx.guild)
embeds = []
if not current_extensions:
return await ctx.send("No extensions have been blacklisted.")
for index, extension in enumerate(current_extensions):
embed = discord.Embed(
title="Blacklisted extensions",
description=f"To delete run: `{ctx.prefix}allowedextensionsrule blacklist delete {index}`",
)
value = box(
NEW_LINE.join(
"+ {0}".format(str(ctx.guild.get_channel(ch))) for ch in extension.channels
)
if extension.channels
else "+ Global",
"diff",
)
embed.add_field(name=", ".join(extension.extensions), value=value)
embeds.append(embed)
if len(embeds) > 1:
await menu(ctx, embeds, DEFAULT_CONTROLS)
else:
await ctx.send(embed=embeds[0])
@_denylist_extension_group.command(name="delete", aliases=["rem"])
@checks.mod_or_permissions(manage_messages=True)
async def _delete_blacklist_ext_command(self, ctx, index: int):
"""Delete extension settings"""
try:
deleted = await self.allowedextensionsrule.delete_blacklist_extensions(
ctx.guild, index
)
return await ctx.send(
embed=await self.allowedextensionsrule.deleted_extensions_embed(ctx.guild, deleted)
)
except IndexError:
return await ctx.send(error_message("Invalid index to delete."))
except AttributeError:
return await ctx.send(error_message("There is no extensions currently blacklisted."))
@_denylist_extension_group.command(name="channel")
@checks.mod_or_permissions(manage_messages=True)
async def _add_ext_channel_blacklist(
self, ctx, channels: Greedy[discord.TextChannel], extensions
):
"""Denylist extensions in channels
Extensions must be a comma seperated list, without the period.
Example: jpeg,png,pfd"""
try:
extensions = await self.clean_and_validate_extensions(extensions)
await self._handle_adding_extension(
ctx.guild, channels, BlackOrWhiteList.Blacklist, extensions
)
return await ctx.send(
embed=await self.allowedextensionsrule.extension_added_embed(extensions, channels)
)
except ValueError as e:
await ctx.send(error_message(e.args[0]))
@_denylist_extension_group.command(name="group")
@checks.mod_or_permissions(manage_messages=True)
async def _add_ext_group_blacklist(self, ctx, group_name: str, extensions: str):
"""Denylist extensions in a group
Extensions must be a comma seperated list, without the period.
Example: jpeg,png,pfd
"""
try:
extensions = await self.clean_and_validate_extensions(extensions)
channel_groups = await self.get_channel_groups(ctx.guild)
if group_name not in channel_groups:
return await ctx.send(error_message(f"`{group_name}` Could not find group."))
channels = [ctx.guild.get_channel(ch) for ch in channel_groups[group_name]]
await self._handle_adding_extension(
ctx.guild, channels, BlackOrWhiteList.Blacklist, extensions
)
return await ctx.send(embed=await self.extension_added_embed(extensions, channels))
except ValueError as e:
await ctx.send(error_message(e.args[0]))
# commands specific to filterword
@commands.group()
@checks.mod_or_permissions(manage_messages=True)
async def wordfilterrule(self, ctx):
"""
Detects if a word matches list of forbidden words.
This has an optional attribute of `is_cleaned` which will attempt to remove all punctuation from the word
in sentence. This can aid against people attempting to evade, example: `f.ilte.red`
"""
pass
@wordfilterrule.command(name="remove", aliases=["del"])
@checks.mod_or_permissions(manage_messages=True)
async def _remove_filter(self, ctx, word: str):
"""Remove a word from the list of filtered words"""
current_filtered = await self.wordfilterrule.get_filtered_words(ctx.guild)
current_filtered = [x["word"] for x in current_filtered]
if word not in current_filtered:
return await ctx.send(error_message(f"`{word}` is not being filtered."))
await self.wordfilterrule.remove_filter(ctx.guild, word)
return await ctx.send(
check_success(f"`{word}` has been removed from the list of filtered words.")
)
@wordfilterrule.command(name="list")
async def _show_all_filtered_words(self, ctx, word: str = None):
"""
Show all the filtered words
`word` adding a word parameter will show information about a single word."""
current_filtered = await self.wordfilterrule.get_filtered_words(ctx.guild)
if not word:
amount_filtered = len(current_filtered)
channels_filtering = [x["channel"] for x in current_filtered]
channels_filtering = len(channels_filtering)
chunked = chunks(current_filtered, 4)
embeds = []
for chunk in chunked:
embed = discord.Embed(
title="Filtered words",
description=f"To show information about a single rule: `{ctx.prefix}{ctx.command} <word>`",
)
embed.set_footer(
text=f"Filtering {amount_filtered} words across {channels_filtering} channels"
)
for word in chunk:
channels = word["channel"]
chans = (
"\n".join("#{0}".format(ctx.guild.get_channel(w)) for w in channels)
if channels
else "[Global]"
)
table = [
[
(
f"Word : [{word['word']}]\n"
f"Added by : [{self.bot.get_user(word['author'])}]\n"
f"Cleaned : [{word['is_cleaned']}]\n"
),
chans,
],
]
tab = box(tabulate(table, ["Meta", "Channels"], tablefmt="presto"), "ini")
embed.add_field(name=f"`{word['word']}`", value=tab, inline=False)
embeds.append(embed)
if embeds:
return await menu(ctx, embeds, DEFAULT_CONTROLS)
else:
return await ctx.send("There is currently no words being filtered.")
else:
try:
current_word = [x for x in current_filtered if x["word"] == word.lower()][0]
channels = current_word["channel"]
chans = (
"\n".join("#{0}".format(ctx.guild.get_channel(w)) for w in channels)
if channels
else "[Global]"
)
author = self.bot.get_user(current_word["author"]) or "Not found user."
embed = discord.Embed(
title="Word filtering",
description=box(
f"Word : [{current_word['word']}]\n"
f"Cleaned : [{current_word['is_cleaned']}]\n"
f"Added by: [{author}]\n"
f"--------\n"
f"Channels\n"
f"--------\n"
f"{chans}",
"ini",
),
)
return await ctx.send(embed=embed)
except IndexError:
return await ctx.send(await error_message(f"`{word}` is not being filtered."))
@wordfilterrule.group(name="add")
@checks.mod_or_permissions(manage_messages=True)
async def add_word_to_filter(self, ctx):
pass
@add_word_to_filter.command(name="channel")
async def _add_to_channels(
self,
ctx,
word: str,
channels: Greedy[discord.TextChannel] = None,
is_cleaned: bool = False,
):
"""Add a word to the list of forbidden words
`word`: the word to add to the filter
`channels`: a list of channels to add this word two
`is_cleaned`: an optional True/False argument that will remove punctuation from the word
"""
await self.handle_adding_to_filter(ctx, word, channels, is_cleaned)
@add_word_to_filter.command(name="group")
async def _add_to_group(self, ctx, word: str, group_name: str, is_cleaned: bool = False):
"""Add a word to a predefined group of channels
`word`: the word to add to the filter
`group`: the key name of the group of channels
`is_cleaned`: an optional True/False argument that will remove punctuation from the message
"""
channel_groups = await self.get_channel_groups(ctx.guild)
if group_name not in channel_groups:
return await ctx.send(await error_message(f"`{group_name}` Could not find group."))
channels = [ctx.guild.get_channel(ch) for ch in channel_groups[group_name]]
await self.handle_adding_to_filter(ctx, word, channels, is_cleaned)
async def handle_adding_to_filter(
self, ctx, word: str, channels: [discord.TextChannel] = None, is_cleaned: bool = False
):
word = word.lower()
current_filtered = await self.wordfilterrule.get_filtered_words(ctx.guild)
for values in current_filtered:
if word in values["word"]:
return await ctx.send(error_message(f"`{word}` is already being filtered."))
await self.wordfilterrule.add_to_filter(
guild=ctx.guild, word=word, author=ctx.author, channels=channels, is_cleaned=is_cleaned
)
nl = "\n"
chans = nl.join("+ {0}".format(w) for w in channels) if channels else "+ Global"
fmt_box = box(f"Word : [{word}]\nCleaned : [{is_cleaned}]\n", "ini")
embed = discord.Embed(
title=f"Word added",
description=f"You can remove this word from the filter by running the command: `{ctx.prefix}wordfilterrule remove {word}`",
)
embed.add_field(name="Word details", value=fmt_box)
embed.add_field(name="Channels", value=box(chans, "diff"))
return await ctx.send(embed=embed)
# commands specific to maxwords
@commands.group()
@checks.mod_or_permissions(manage_messages=True)
async def maxwordsrule(self, ctx):
"""
Detects the maximum allowed length of individual words in a single message
"""
pass
@maxwordsrule.command(name="threshold")
@checks.mod_or_permissions(manage_messages=True)
async def _maxwords_threshold(self, ctx, max_length: int):
"""Set the threshold for the amount of individual words allowed
For example, if the threshold is set to 4 this sentence would be caught:
`The quick brown fox`
"""
await self.maxwordsrule.set_max_words_length(ctx.guild, max_length)
await ctx.send(f"`💬` The maximum number of words in one message is set to `{max_length}`")
# commands specific to maxchars
@commands.group()
@checks.mod_or_permissions(manage_messages=True)
async def maxcharsrule(self, ctx):
"""Detects the maximum allowed individual characters in a single message"""
pass
@maxcharsrule.command(name="threshold")
@checks.mod_or_permissions(manage_messages=True)
async def _max_chars_threshold(self, ctx, max_length: int):
"""Set the threshold for the amount of individual characters allowed
For example, if the threshold is set to 10 this sentence would be caught:
`This is too long`
"""
await self.maxcharsrule.set_max_chars_length(ctx.guild, max_length)
await ctx.send(
f"`💬` The maximum number of characters in one message is set to `{max_length}`"
)
# commands specific to spamrule
@commands.group()
@checks.mod_or_permissions(manage_messages=True)
async def spamrule(self, ctx):
"""
Mass spamming by user or content
1) It checks if a user has spammed more than 10 times in 12 seconds
2) It checks if the content has been spammed 15 times in 17 seconds.
"""
pass
# commands specific to mention spam rule
@commands.group()
@checks.mod_or_permissions(manage_messages=True)
async def mentionspamrule(self, ctx):
"""Individual mentions spam settings"""
pass
@mentionspamrule.command()
@checks.mod_or_permissions(manage_messages=True)
async def threshold(self, ctx, threshold: int):
"""Set the max amount of individual mentions allowed
This overrides the default number of 4 individual mentions on the Mention Spam rule
"""
before, after = await self.mentionspamrule.set_threshold(ctx, threshold)
await ctx.send(f"`🎯` Mention threshold changed from `{before}` to `{after}`")
"""
Commands specific to wall spam rule
"""
@commands.group()
@checks.mod_or_permissions(manage_messages=True)
async def wallspamrule(self, ctx):
"""Walls of text/emojis settings"""
pass
@wallspamrule.group(name="emptyline")
async def _emptyline_group(self, ctx):
"""Emptyline wallspam settings.
Emptyline wallspam is defined as a message that has multiple empty lines.
**Example:**
```
start
\n\n\n\n\n
end
```
"""
pass
@_emptyline_group.command(name="enable")
async def _emptyline_enable_command(self, ctx, enable: bool):
"""Toggle whether to treat emptyline spam as wallspam"""
await self.wallspamrule.set_is_emptyline_offensive(ctx.guild, enable)
return await ctx.send(
thumbs_up_success(f"Set treating emptyline as wallspam to `{enable}`.")
)
@_emptyline_group.command(name="threshold")
async def _emptyline_threshold_command(self, ctx, threshold: int):
"""Set the amount of new lines to consider a emptyline spam message offensive at.
Defaults to 5.
"""
if threshold <= 1:
return await ctx.send("Emptyline threshold must be above 1.")
await self.wallspamrule.set_emptyline_threshold(ctx.guild, threshold)
return await ctx.send(thumbs_up_success(f"Set the emptyline threshold to `{threshold}`"))
"""
Commands specific to discord invite rule
"""
@commands.group()
@checks.mod_or_permissions(manage_messages=True)
async def inviterule(self, ctx):
"""Filters discord invites
Supported type of discord links:
`discord.gg/inviteCode`
`discordapp.com/invite/inviteCode`
"""
pass
@inviterule.group()
@checks.mod_or_permissions(manage_messages=True)
async def whitelistlink(self, ctx):
"""Add/remove/show links allowed
Adding a link to the whitelist will allow it to be immune from automod actions"""
pass
@whitelistlink.command(name="add")
@checks.mod_or_permissions(manage_messages=True)
async def add_link(self, ctx, link: str):
"""
Add a link to not be filtered.
This must be the full link, supported types:
discord.gg/inviteCode
discordapp.com/invite/inviteCode
"""
try:
await self.inviterule.add_allowed_link(ctx.guild, link)
except ValueError:
return await ctx.send("`👆` That link already exists.")
return await ctx.send(thumbs_up_success(f"Added `{link}` to the allowed links list."))
@whitelistlink.command(name="delete")
@checks.mod_or_permissions(manage_messages=True)
async def delete_link(self, ctx, link: str):
"""
Deletes a link from the ignore list
This must be the full exact match of a link in the list.
"""
try:
await self.inviterule.delete_allowed_link(ctx.guild, link)
except ValueError as e:
await ctx.send(error_message(f"{e.args[0]}"))
@whitelistlink.command(name="show")
@checks.mod_or_permissions(manage_messages=True)
async def show_links(self, ctx):
"""
Show a list of links that are not filtered.
"""
allowed_links = await self.inviterule.get_allowed_links(ctx.guild)
if allowed_links is not None:
embed = discord.Embed(
title="Links that are not filtered by the rule",
description=", ".join("`{0}`".format(w) for w in allowed_links),
)
await ctx.send(embed=embed)
else:
await ctx.send(error_message("No links currently allowed."))
"""
Commands specific to ImageDetection
"""
@commands.group()
@checks.mod_or_permissions(manage_messages=True)
async def imagedetectionrule(self, ctx):
"""
Detects gore/racy/porn images
"""
pass
@imagedetectionrule.command(name="setendpoint")
async def _set_endpoint(self, ctx, guild_id: int, endpoint: str):
"""Set the endpoint displayed in your Azure Portal"""
if ctx.guild:
return await ctx.send("Please run this command in DMs.")
try:
guild = self.bot.get_guild(guild_id)
if not guild:
return await ctx.send("That is not a guild.")
await self.imagedetectionrule.set_endpoint(guild, endpoint)
await ctx.send("Your endpoint has been set.")
except ValueError as e:
await ctx.send(e.args[0])
@imagedetectionrule.command(name="setkey")
async def _set_key(self, ctx, guild_id: int, endpoint: str):
"""Set the key 1 displayed in your Azure Portal"""
if ctx.guild:
return await ctx.send("Please run this command in DMs.")
try:
guild = self.bot.get_guild(guild_id)
if not guild:
return await ctx.send("That is not a guild.")
await self.imagedetectionrule.set_key(guild, endpoint)
await ctx.send("Your key has been set.")
except ValueError as e:
await ctx.send(e.args[0])
def enable_rule_wrapper(group, name, friendly_name):
@group.command(name="toggle")
@checks.mod_or_permissions(manage_messages=True)
@docstring_parameter(ToggleBool.fmt_box)
async def _enable_rule_command(self, ctx, toggle: ToggleBool):
"""
Enable or disable this rule
{0}
"""
rule = getattr(self, name)
is_enabled = await rule.is_enabled(ctx.guild)
if toggle is None:
return await ctx.send(f"{name} is `{transform_bool(is_enabled)}`.")
if is_enabled == toggle:
return await ctx.send(f"{name} is already `{transform_bool(is_enabled)}`")
before, after = await rule.toggle_enabled(ctx.guild, toggle)
await ctx.send(
f"**{friendly_name.title()}** set from `{transform_bool(before)}` to `{transform_bool(after)}`"
)
return _enable_rule_command
def action_to_take__wrapper(group, name, friendly_name):
@group.command(name="action")
@checks.mod_or_permissions(manage_messages=True)
async def _action_to_take_command(self, ctx):
"""
Choose which action to take on an offensive message
1) Nothing (still fires event for third-party integration)
2) DM a role\n
3) Add a role to offender (Mute role for example)
4) Kick offender
5) Ban offender
"""
rule = getattr(self, name)
embed = discord.Embed(
title=f"What action should be taken against {friendly_name}?",
description=f":one: Nothing (still fires event for third-party integration)\n"
f":two: DM a role\n"
f":three: Add a role to offender (Mute role for example)\n"
f":four: Kick offender\n"
f":five: Ban offender",
)
action = await get_option_reaction(ctx, embed=embed)
if not action:
return await ctx.send("Okay. Nothings changed.")
await ctx.send(thumbs_up_success(ACTION_CONFIRMATION[action]))
if action == "add_role":
mute_role = await rule.get_mute_role(ctx.guild)
if mute_role is None:
await ctx.send(
error_message(
f"There is no role set. Add one with: `{ctx.prefix}{name} role <role>`"
)
)
await rule.set_action_to_take(action, ctx.guild)
return _action_to_take_command
def delete_message_wrapper(group, name, friendly_name):
@group.command(name="delete")
@checks.mod_or_permissions(manage_messages=True)
async def _delete_message_command(self, ctx):
"""
Toggles whether message should be deleted on offence
`manage_messages` perms are needed for this to run.
"""
rule = getattr(self, name)
before, after = await rule.toggle_to_delete_message(ctx.guild)
await ctx.send(
f"Deleting messages set from `{transform_bool(before)}` to `{transform_bool(after)}`"
)
return _delete_message_command
def whitelist_wrapper(group, name, friendly_name):
@group.group(name="whitelistrole")
@checks.mod_or_permissions(manage_messages=True)
async def whitelistrole(self, ctx):
"""Whitelisting roles settings
Adding a role to the whitelist means that this role will be immune to automod actions
"""
pass
return whitelistrole
def whitelistrole_add_wrapper(group, name, friendly_name):
@group.command(name="add")
@checks.mod_or_permissions(manage_messages=True)
async def _whitelistrole_add_command(self, ctx, role: discord.Role):
"""
Add a role to be ignored by automod actions"
Passing a role already whitelisted will prompt for deletion
"""
rule = getattr(self, name)
try:
await rule.append_whitelist_role(ctx.guild, role)
except ValueError:
await ctx.send(f"`{role}` is already whitelisted.", delete_after=30)
result = await yes_or_no(ctx, f"Would you like to remove `{role}` from the whitelist?")
if result:
await rule.remove_whitelist_role(ctx.guild, role)
await ctx.send(f"`{role}` added to the whitelist.")
return _whitelistrole_add_command
def whitelistrole_delete_wrapper(group, name, friendly_name):
@group.command(name="delete")
@checks.mod_or_permissions(manage_messages=True)
async def _whitelistrole_delete_command(self, ctx, role: discord.Role):
"""Delete a role from being ignored by automod actions"""
rule = getattr(self, name)
try:
await rule.remove_whitelist_role(ctx.guild, role)
return await ctx.send(f"Removed `{role}` from the whitelist.")
except ValueError:
return await ctx.send(f"`{role}` is not whitelisted.")
return _whitelistrole_delete_command
def whitelistrole_show_wrapper(group, name, friendly_name):
@group.command(name="show")
@checks.mod_or_permissions(manage_messages=True)
async def _whitelistrole_show_command(self, ctx):
"""Show all whitelisted roles"""
rule = getattr(self, name)
all_roles = await rule.get_all_whitelisted_roles(ctx.guild)
if all_roles:
desc = ", ".join("`{0}`".format(role) for role in all_roles)
em = discord.Embed(
title="Whitelisted roles", description=desc, color=discord.Color.greyple(),
)
await ctx.send(embed=em)
else:
await ctx.send("`❌` No roles currently whitelisted.")
return _whitelistrole_show_command
def add_role_wrapper(group, name, friendly_name):
@group.command(name="role")
@checks.mod_or_permissions(manage_messages=True)
async def _add_role_command(self, ctx, role: discord.Role):
"""
Set the role to add to offender
When a rule offence is found and action to take is set to "Add Role", this role is the one that will be added.
"""
rule = getattr(self, name)
before, after = await rule.set_mute_role(ctx.guild, role)
await ctx.send(f"Role to add set from `{before}` to `{after}`")
return _add_role_command
def add_channel_wrapper(group, name, friendly_name):
@group.command(name="channels")
@checks.mod_or_permissions(manage_messages=True)
async def _add_channel_command(self, ctx, channels: commands.Greedy[discord.TextChannel]):
"""
Set the channels to enforce this rule on.
The default setting is global, passing nothing will reset to global.
"""
rule = getattr(self, name)
if not channels:
should_clear = await yes_or_no(ctx, "Would you like to clear the channels?")
if should_clear:
channels = []
else:
return await ctx.send("Okay, no channels changed.")
enforcing = await rule.set_enforced_channels(ctx.guild, channels)
enforcing_string = "\n".join(
"• {0}".format(ctx.guild.get_channel(channel).mention) for channel in enforcing
)
if channels:
await ctx.send(f"Okay, done. Enforcing these channels:\n{enforcing_string}")
else:
await ctx.send("Channels cleared.")
return _add_channel_command
def announce_wrapper(group, name, friendly_name):
@group.group(name="announce")
@checks.mod_or_permissions(manage_messages=True)
async def _announce_wrapper(self, ctx):
"""Announce settings"""
pass
return _announce_wrapper
def announce_channel_wrapper(group, name, friendly_name):
@group.command(name="channel")
@checks.mod_or_permissions(manage_messages=True)
async def _add_announce_channel_command(self, ctx, channel: discord.TextChannel):
"""
Choose the channel where announcements should go.
Selecting a channel enables the rule specific channel announcements.
"""
rule = getattr(self, name)
await rule.set_specific_announce_channel(ctx.guild, channel)
return await ctx.send(
thumbs_up_success(f"{name} announcements will now be sent to {channel}")
)
return _add_announce_channel_command
def announce_clear_wrapper(group, name, friendly_name):
@group.command(name="clear")
@checks.mod_or_permissions(manage_messages=True)
async def _clear_announce_channel_command(self, ctx):
"""
Clear and disable rule announcing
"""
rule = getattr(self, name)
announce_channel = await rule.get_specific_announce_channel(discord.Guild)
if announce_channel is None:
return await ctx.send("No channel has been set.")
await rule.clear_specific_announce_channel(ctx.guild)
return await ctx.send(thumbs_up_success(f"Cleared the announcement channel."))
return _clear_announce_channel_command
def announce_show_wrapper(group, name, friendly_name):
@group.command(name="show", aliases=["settings", "info"])
@checks.mod_or_permissions(manage_messages=True)
async def _clear_announce_channel_command(self, ctx):
"""
Show current settings for rule specific announcing
"""
rule = getattr(self, name)
announce_channel = await rule.get_specific_announce_channel(ctx.guild)
if announce_channel is None:
return await ctx.send("No channel has been set.")
return await ctx.send(f"{name} will announce in {announce_channel.mention}")
return _clear_announce_channel_command
def settings_wrapper(group, name, friendly_name):
@group.command(name="settings")
@checks.mod_or_permissions(manage_messages=True)
async def _invoke_settings(self, ctx):
"""
Show settings for this rule
"""
await ctx.invoke(self.bot.get_command(f"automodset show"), name)
return _invoke_settings
for name, friendly_name in groups.items():
group = getattr(GroupCommands, name)
settings = settings_wrapper(group, name, friendly_name)
settings.__name__ = f"settings_{name}"
setattr(GroupCommands, f"settings_{name}", settings)
enable_rule = enable_rule_wrapper(group, name, friendly_name)
enable_rule.__name__ = f"enable_{name}"
setattr(GroupCommands, f"enable_{name}", enable_rule)
action_to_take = action_to_take__wrapper(group, name, friendly_name)
action_to_take.__name__ = f"action_{name}"
setattr(GroupCommands, f"action_{name}", action_to_take)
delete_message = delete_message_wrapper(group, name, friendly_name)
delete_message.__name__ = f"delete_{name}"
setattr(GroupCommands, f"delete_{name}", delete_message)
# whitelist settings
# whitelist commands inherit whitelist role group
whitelistrole = whitelist_wrapper(group, name, friendly_name)
whitelistrole.__name__ = f"whitelistrole_{name}"
setattr(GroupCommands, f"whitelistrole_{name}", whitelistrole)
# whitelist group
whitelistrole_delete = whitelistrole_delete_wrapper(whitelistrole, name, friendly_name)
whitelistrole_delete.__name__ = f"whitelistrole_delete_{name}"
setattr(GroupCommands, f"whitelistrole_delete_{name}", whitelistrole_delete)
# whitelist group
whitelistrole_add = whitelistrole_add_wrapper(whitelistrole, name, friendly_name)
whitelistrole_add.__name__ = f"whitelistrole_add_{name}"
setattr(GroupCommands, f"whitelistrole_add_{name}", whitelistrole_add)
# whitelist group
whitelistrole_show = whitelistrole_show_wrapper(whitelistrole, name, friendly_name)
whitelistrole_show.__name__ = f"whitelistrole_show_{name}"
setattr(GroupCommands, f"whitelistrole_show_{name}", whitelistrole_show)
"""
Rule specific announce Settings
"""
announce_group = announce_wrapper(group, name, friendly_name)
announce_group.__name__ = f"announce_group_{name}"
setattr(GroupCommands, f"announce_group_{name}", announce_group)
announce_add_channel = announce_channel_wrapper(announce_group, name, friendly_name)
announce_add_channel.__name__ = f"announce_add_channel_{name}"
setattr(GroupCommands, f"announce_add_channel_{name}", announce_add_channel)
announce_clear_channel = announce_clear_wrapper(announce_group, name, friendly_name)
announce_clear_channel.__name__ = f"announce_clear_channel_{name}"
setattr(GroupCommands, f"announce_clear_channel_{name}", announce_clear_channel)
announce_show_channel = announce_show_wrapper(announce_group, name, friendly_name)
announce_show_channel.__name__ = f"announce_show_channel_{name}"
setattr(GroupCommands, f"announce_show_channel_{name}", announce_show_channel)
add_role = add_role_wrapper(group, name, friendly_name)
add_role.__name__ = f"add_role_{name}"
setattr(GroupCommands, f"add_role_{name}", add_role)
add_channel = add_channel_wrapper(group, name, friendly_name)
add_channel.__name__ = f"add_channel_{name}"
setattr(GroupCommands, f"add_channel_{name}", add_channel)
| StarcoderdataPython |
8091916 | import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from efficient_frontier import EfficientFrontier
class Stock(object):
# Object that contains information about a stock.
def __init__(self, data):
self.data = data
class Portfolio(object):
# Object that contains information about a investment portfolio.
def __init__(self):
self.portfolio = pd.DataFrame()
self.stocks = {}
self.data = pd.DataFrame()
self.risk_free_rate = 0.03
self.freq = 252
self.ef = None
@property
def freq(self):
return self.__freq
@freq.setter
def freq(self, val):
self.__freq = val
# self._update()
@property
def risk_free_rate(self):
return self.__risk_free_rate
@risk_free_rate.setter
def risk_free_rate(self, val):
self.__risk_free_rate = val
def add_stock(self, stock):
# also add stock data of stock to the dataframe
self._add_stock_data(stock.data)
def _add_stock_data(self, df):
# loop over columns in given dataframe
for datacol in df.columns:
cols = len(self.data.columns)
self.data.insert(loc=cols, column=datacol, value=df[datacol].values)
# set index correctly
self.data.set_index(df.index.values, inplace=True)
# set index name:
self.data.index.rename("Date", inplace=True)
def comp_daily_returns(self):
# Computes the daily returns (percentage change) of all stocks in the portfolio.
# return daily_returns(self.data)
results = self.data.pct_change().dropna(how="all").replace([np.inf, -np.inf], np.nan)
return results
def comp_mean_returns(self, freq=252):
# Computes the mean returns based on historical stock price data.
# return historical_mean_return(self.data, freq=freq)
return self.comp_daily_returns().mean() * freq
def comp_stock_volatility(self, freq=252):
# Computes the Volatilities of all the stocks individually
return self.comp_daily_returns().std() * np.sqrt(freq)
def comp_cov(self):
# Compute the covariance matrix of the portfolio.
daily_returns = self.comp_daily_returns()
return daily_returns.cov()
# optimising the investments with the efficient frontier class
def _get_ef(self):
"""If self.ef does not exist, create and return an instance of
efficient_frontier.EfficientFrontier, else, return the existing instance.
"""
if self.ef is None:
# create instance of EfficientFrontier
self.ef = EfficientFrontier(
self.comp_mean_returns(freq=1),
self.comp_cov(),
risk_free_rate=self.risk_free_rate,
freq=self.freq,
)
return self.ef
def ef_maximum_sharpe_ratio(self, verbose=False):
# Finds the portfolio with the maximum Sharpe Ratio
ef = self._get_ef()
# perform optimisation
opt_weights = ef.maximum_sharpe_ratio()
return opt_weights
def ef_efficient_frontier(self, targets=None):
ef = self._get_ef()
# perform optimisation
efrontier = ef.efficient_frontier(targets)
return efrontier
def ef_plot_efrontier(self):
# Plots the Efficient Frontier
ef = self._get_ef()
# plot efficient frontier
ef.plot_efrontier()
def ef_plot_optimal_portfolios(self):
ef = self._get_ef()
# plot the optimal_portfolios
ef.plot_optimal_portfolios()
def plot_stocks(self, freq=252):
# Plots the Expected annual Returns over annual Volatility of
# the stocks of the portfolio.
# annual mean returns of all stocks
stock_returns = self.comp_mean_returns(freq=freq)
stock_volatility = self.comp_stock_volatility(freq=freq)
# adding stocks of the portfolio to the plot
# plot stocks individually:
plt.scatter(stock_volatility, stock_returns, marker="o", s=50, label="Stocks")
# adding text to stocks in plot:
for i, txt in enumerate(stock_returns.index):
plt.annotate(
txt,
(stock_volatility[i], stock_returns[i]),
xytext=(10, 0),
textcoords="offset points",
label=i,
)
plt.legend()
def _generate_pf_allocation(names=None, data=None):
names = data.columns
weights = [1.0 / len(names) for i in range(len(names))]
return pd.DataFrame({"Allocation": weights, "Name": names})
def build_portfolio(data, pf_allocation=None, datacolumns=["Adj. Close"]):
if pf_allocation is None:
pf_allocation = _generate_pf_allocation(data=data)
# building portfolio:
pf = Portfolio()
for i in range(len(pf_allocation)):
# get name of stock
name = pf_allocation.loc[i].Name
# extract data column(s) of said stock
stock_data = data.loc[:, [name]].copy(deep=True)
# create Stock instance and add it to portfolio
pf.add_stock(Stock(data=stock_data))
return pf
| StarcoderdataPython |
6668096 | <filename>07_list.py
def average_temps(temps):
sum_of_temps = 0
for temp in temps:
sum_of_temps += temp
return sum_of_temps / len(temps)
if __name__ == '__main__':
temps = [21,24,24,22,20,36,59]
average = average_temps(temps)
print('La temperatura promedio es: {}'.format(average)) | StarcoderdataPython |
11273770 | <reponame>jschavesr/LibCST
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from typing import Dict, Mapping, Optional, Set, Union
import libcst as cst
from libcst.helpers.common import ensure_type
TEMPLATE_PREFIX: str = "__LIBCST_MANGLED_NAME_"
TEMPLATE_SUFFIX: str = "_EMAN_DELGNAM_TSCBIL__"
ValidReplacementType = Union[
cst.BaseExpression,
cst.Annotation,
cst.AssignTarget,
cst.Param,
cst.Parameters,
cst.Arg,
cst.BaseStatement,
cst.BaseSmallStatement,
cst.BaseSuite,
cst.BaseSlice,
cst.SubscriptElement,
cst.Decorator,
]
def mangled_name(var: str) -> str:
return f"{TEMPLATE_PREFIX}{var}{TEMPLATE_SUFFIX}"
def unmangled_name(var: str) -> Optional[str]:
if TEMPLATE_PREFIX in var and TEMPLATE_SUFFIX in var:
prefix, name_and_suffix = var.split(TEMPLATE_PREFIX, 1)
name, suffix = name_and_suffix.split(TEMPLATE_SUFFIX, 1)
if not prefix and not suffix:
return name
# This is not a valid mangled name
return None
def mangle_template(template: str, template_vars: Set[str]) -> str:
if TEMPLATE_PREFIX in template or TEMPLATE_SUFFIX in template:
raise Exception("Cannot parse a template containing reserved strings")
for var in template_vars:
original = f"{{{var}}}"
if original not in template:
raise Exception(
f'Template string is missing a reference to "{var}" referred to in kwargs'
)
template = template.replace(original, mangled_name(var))
return template
class TemplateTransformer(cst.CSTTransformer):
def __init__(
self, template_replacements: Mapping[str, ValidReplacementType]
) -> None:
self.simple_replacements: Dict[str, cst.BaseExpression] = {
name: value
for name, value in template_replacements.items()
if isinstance(value, cst.BaseExpression)
}
self.annotation_replacements: Dict[str, cst.Annotation] = {
name: value
for name, value in template_replacements.items()
if isinstance(value, cst.Annotation)
}
self.assignment_replacements: Dict[str, cst.AssignTarget] = {
name: value
for name, value in template_replacements.items()
if isinstance(value, cst.AssignTarget)
}
self.param_replacements: Dict[str, cst.Param] = {
name: value
for name, value in template_replacements.items()
if isinstance(value, cst.Param)
}
self.parameters_replacements: Dict[str, cst.Parameters] = {
name: value
for name, value in template_replacements.items()
if isinstance(value, cst.Parameters)
}
self.arg_replacements: Dict[str, cst.Arg] = {
name: value
for name, value in template_replacements.items()
if isinstance(value, cst.Arg)
}
self.small_statement_replacements: Dict[str, cst.BaseSmallStatement] = {
name: value
for name, value in template_replacements.items()
if isinstance(value, cst.BaseSmallStatement)
}
self.statement_replacements: Dict[str, cst.BaseStatement] = {
name: value
for name, value in template_replacements.items()
if isinstance(value, cst.BaseStatement)
}
self.suite_replacements: Dict[str, cst.BaseSuite] = {
name: value
for name, value in template_replacements.items()
if isinstance(value, cst.BaseSuite)
}
self.subscript_element_replacements: Dict[str, cst.SubscriptElement] = {
name: value
for name, value in template_replacements.items()
if isinstance(value, cst.SubscriptElement)
}
self.subscript_index_replacements: Dict[str, cst.BaseSlice] = {
name: value
for name, value in template_replacements.items()
if isinstance(value, cst.BaseSlice)
}
self.decorator_replacements: Dict[str, cst.Decorator] = {
name: value
for name, value in template_replacements.items()
if isinstance(value, cst.Decorator)
}
# Figure out if there are any variables that we can't support
# inserting into templates.
supported_vars = {
*[name for name in self.simple_replacements],
*[name for name in self.annotation_replacements],
*[name for name in self.assignment_replacements],
*[name for name in self.param_replacements],
*[name for name in self.parameters_replacements],
*[name for name in self.arg_replacements],
*[name for name in self.small_statement_replacements],
*[name for name in self.statement_replacements],
*[name for name in self.suite_replacements],
*[name for name in self.subscript_element_replacements],
*[name for name in self.subscript_index_replacements],
*[name for name in self.decorator_replacements],
}
unsupported_vars = {
name for name in template_replacements if name not in supported_vars
}
if unsupported_vars:
raise Exception(
f'Template replacement for "{next(iter(unsupported_vars))}" is unsupported'
)
def leave_Name(
self, original_node: cst.Name, updated_node: cst.Name
) -> cst.BaseExpression:
var_name = unmangled_name(updated_node.value)
if var_name is None or var_name not in self.simple_replacements:
# This is not a valid name, don't modify it
return updated_node
return self.simple_replacements[var_name].deep_clone()
def leave_Annotation(
self,
original_node: cst.Annotation,
updated_node: cst.Annotation,
) -> cst.Annotation:
# We can't use matchers here due to circular imports
annotation = updated_node.annotation
if isinstance(annotation, cst.Name):
var_name = unmangled_name(annotation.value)
if var_name in self.annotation_replacements:
return self.annotation_replacements[var_name].deep_clone()
return updated_node
def leave_AssignTarget(
self,
original_node: cst.AssignTarget,
updated_node: cst.AssignTarget,
) -> cst.AssignTarget:
# We can't use matchers here due to circular imports
target = updated_node.target
if isinstance(target, cst.Name):
var_name = unmangled_name(target.value)
if var_name in self.assignment_replacements:
return self.assignment_replacements[var_name].deep_clone()
return updated_node
def leave_Param(
self,
original_node: cst.Param,
updated_node: cst.Param,
) -> cst.Param:
var_name = unmangled_name(updated_node.name.value)
if var_name in self.param_replacements:
return self.param_replacements[var_name].deep_clone()
return updated_node
def leave_Parameters(
self,
original_node: cst.Parameters,
updated_node: cst.Parameters,
) -> cst.Parameters:
# A very special case for when we use a template variable for all
# function parameters.
if (
len(updated_node.params) == 1
and updated_node.star_arg == cst.MaybeSentinel.DEFAULT
and len(updated_node.kwonly_params) == 0
and updated_node.star_kwarg is None
and len(updated_node.posonly_params) == 0
and updated_node.posonly_ind == cst.MaybeSentinel.DEFAULT
):
# This parameters node has only one argument, which is possibly
# a replacement.
var_name = unmangled_name(updated_node.params[0].name.value)
if var_name in self.parameters_replacements:
return self.parameters_replacements[var_name].deep_clone()
return updated_node
def leave_Arg(self, original_node: cst.Arg, updated_node: cst.Arg) -> cst.Arg:
# We can't use matchers here due to circular imports
arg = updated_node.value
if isinstance(arg, cst.Name):
var_name = unmangled_name(arg.value)
if var_name in self.arg_replacements:
return self.arg_replacements[var_name].deep_clone()
return updated_node
def leave_SimpleStatementLine(
self,
original_node: cst.SimpleStatementLine,
updated_node: cst.SimpleStatementLine,
) -> cst.BaseStatement:
# We can't use matchers here due to circular imports. We take advantage of
# the fact that a name on a single line will be parsed as an Expr node
# contained in a SimpleStatementLine, so we check for these and see if they
# should be expanded template-wise to a statement of some type.
if len(updated_node.body) == 1:
body_node = updated_node.body[0]
if isinstance(body_node, cst.Expr):
name_node = body_node.value
if isinstance(name_node, cst.Name):
var_name = unmangled_name(name_node.value)
if var_name in self.statement_replacements:
return self.statement_replacements[var_name].deep_clone()
return updated_node
def leave_Expr(
self,
original_node: cst.Expr,
updated_node: cst.Expr,
) -> cst.BaseSmallStatement:
# We can't use matchers here due to circular imports. We do a similar trick
# to the above stanza handling SimpleStatementLine to support templates
# which are trying to substitute a BaseSmallStatement.
name_node = updated_node.value
if isinstance(name_node, cst.Name):
var_name = unmangled_name(name_node.value)
if var_name in self.small_statement_replacements:
return self.small_statement_replacements[var_name].deep_clone()
return updated_node
def leave_SimpleStatementSuite(
self,
original_node: cst.SimpleStatementSuite,
updated_node: cst.SimpleStatementSuite,
) -> cst.BaseSuite:
# We can't use matchers here due to circular imports. We take advantage of
# the fact that a name in a simple suite will be parsed as an Expr node
# contained in a SimpleStatementSuite, so we check for these and see if they
# should be expanded template-wise to a base suite of some type.
if len(updated_node.body) == 1:
body_node = updated_node.body[0]
if isinstance(body_node, cst.Expr):
name_node = body_node.value
if isinstance(name_node, cst.Name):
var_name = unmangled_name(name_node.value)
if var_name in self.suite_replacements:
return self.suite_replacements[var_name].deep_clone()
return updated_node
def leave_IndentedBlock(
self,
original_node: cst.IndentedBlock,
updated_node: cst.IndentedBlock,
) -> cst.BaseSuite:
# We can't use matchers here due to circular imports. We take advantage of
# the fact that a name in an indented block will be parsed as an Expr node
# contained in a SimpleStatementLine, so we check for these and see if they
# should be expanded template-wise to a base suite of some type.
if len(updated_node.body) == 1:
statement_node = updated_node.body[0]
if (
isinstance(statement_node, cst.SimpleStatementLine)
and len(statement_node.body) == 1
):
body_node = statement_node.body[0]
if isinstance(body_node, cst.Expr):
name_node = body_node.value
if isinstance(name_node, cst.Name):
var_name = unmangled_name(name_node.value)
if var_name in self.suite_replacements:
return self.suite_replacements[var_name].deep_clone()
return updated_node
def leave_Index(
self,
original_node: cst.Index,
updated_node: cst.Index,
) -> cst.BaseSlice:
# We can't use matchers here due to circular imports
expr = updated_node.value
if isinstance(expr, cst.Name):
var_name = unmangled_name(expr.value)
if var_name in self.subscript_index_replacements:
return self.subscript_index_replacements[var_name].deep_clone()
return updated_node
def leave_SubscriptElement(
self,
original_node: cst.SubscriptElement,
updated_node: cst.SubscriptElement,
) -> cst.SubscriptElement:
# We can't use matchers here due to circular imports. We use the trick
# similar to above stanzas where a template replacement variable will
# always show up as a certain type (in this case an Index inside of a
# SubscriptElement) in order to successfully replace subscript elements
# in templates.
index = updated_node.slice
if isinstance(index, cst.Index):
expr = index.value
if isinstance(expr, cst.Name):
var_name = unmangled_name(expr.value)
if var_name in self.subscript_element_replacements:
return self.subscript_element_replacements[var_name].deep_clone()
return updated_node
def leave_Decorator(
self, original_node: cst.Decorator, updated_node: cst.Decorator
) -> cst.Decorator:
# We can't use matchers here due to circular imports
decorator = updated_node.decorator
if isinstance(decorator, cst.Name):
var_name = unmangled_name(decorator.value)
if var_name in self.decorator_replacements:
return self.decorator_replacements[var_name].deep_clone()
return updated_node
class TemplateChecker(cst.CSTVisitor):
def __init__(self, template_vars: Set[str]) -> None:
self.template_vars = template_vars
def visit_Name(self, node: cst.Name) -> None:
for var in self.template_vars:
if node.value == mangled_name(var):
raise Exception(f'Template variable "{var}" was not replaced properly')
def unmangle_nodes(
tree: cst.CSTNode,
template_replacements: Mapping[str, ValidReplacementType],
) -> cst.CSTNode:
unmangler = TemplateTransformer(template_replacements)
return ensure_type(tree.visit(unmangler), cst.CSTNode)
_DEFAULT_PARTIAL_PARSER_CONFIG: cst.PartialParserConfig = cst.PartialParserConfig()
def parse_template_module(
template: str,
config: cst.PartialParserConfig = _DEFAULT_PARTIAL_PARSER_CONFIG,
**template_replacements: ValidReplacementType,
) -> cst.Module:
"""
Accepts an entire python module template, including all leading and trailing
whitespace. Any :class:`~libcst.CSTNode` provided as a keyword argument to
this function will be inserted into the template at the appropriate location
similar to an f-string expansion. For example::
module = parse_template_module("from {mod} import Foo\\n", mod=Name("bar"))
The above code will parse to a module containing a single
:class:`~libcst.FromImport` statement, referencing module ``bar`` and importing
object ``Foo`` from it. Remember that if you are parsing a template as part
of a substitution inside a transform, its considered
:ref:`best practice <libcst-config_best_practice>` to pass in a ``config``
from the current module under transformation.
Note that unlike :func:`~libcst.parse_module`, this function does not support
bytes as an input. This is due to the fact that it is processed as a template
before parsing as a module.
"""
source = mangle_template(template, {name for name in template_replacements})
module = cst.parse_module(source, config)
new_module = ensure_type(unmangle_nodes(module, template_replacements), cst.Module)
new_module.visit(TemplateChecker({name for name in template_replacements}))
return new_module
def parse_template_statement(
template: str,
config: cst.PartialParserConfig = _DEFAULT_PARTIAL_PARSER_CONFIG,
**template_replacements: ValidReplacementType,
) -> Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]:
"""
Accepts a statement template followed by a trailing newline. If a trailing
newline is not provided, one will be added. Any :class:`~libcst.CSTNode`
provided as a keyword argument to this function will be inserted into the
template at the appropriate location similar to an f-string expansion. For
example::
statement = parse_template_statement("assert x > 0, {msg}", msg=SimpleString('"Uh oh!"'))
The above code will parse to an assert statement checking that some variable
``x`` is greater than zero, or providing the assert message ``"Uh oh!"``.
Remember that if you are parsing a template as part of a substitution inside
a transform, its considered :ref:`best practice <libcst-config_best_practice>`
to pass in a ``config`` from the current module under transformation.
"""
source = mangle_template(template, {name for name in template_replacements})
statement = cst.parse_statement(source, config)
new_statement = unmangle_nodes(statement, template_replacements)
if not isinstance(
new_statement, (cst.SimpleStatementLine, cst.BaseCompoundStatement)
):
raise Exception(
f"Expected a statement but got a {new_statement.__class__.__name__}!"
)
new_statement.visit(TemplateChecker({name for name in template_replacements}))
return new_statement
def parse_template_expression(
template: str,
config: cst.PartialParserConfig = _DEFAULT_PARTIAL_PARSER_CONFIG,
**template_replacements: ValidReplacementType,
) -> cst.BaseExpression:
"""
Accepts an expression template on a single line. Leading and trailing whitespace
is not valid (there’s nowhere to store it on the expression node). Any
:class:`~libcst.CSTNode` provided as a keyword argument to this function will
be inserted into the template at the appropriate location similar to an
f-string expansion. For example::
expression = parse_template_expression("x + {foo}", foo=Name("y")))
The above code will parse to a :class:`~libcst.BinaryOperation` expression
adding two names (``x`` and ``y``) together.
Remember that if you are parsing a template as part of a substitution inside
a transform, its considered :ref:`best practice <libcst-config_best_practice>`
to pass in a ``config`` from the current module under transformation.
"""
source = mangle_template(template, {name for name in template_replacements})
expression = cst.parse_expression(source, config)
new_expression = ensure_type(
unmangle_nodes(expression, template_replacements), cst.BaseExpression
)
new_expression.visit(TemplateChecker({name for name in template_replacements}))
return new_expression
| StarcoderdataPython |
4926398 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Class Recommendation System.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1cErX1ARVB1TLtVZVXof1HIzsuGl3K8Rw
"""
import pandas as pd
from rake_nltk import Rake
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
import json
# fake json string
data = '{"":{"row1":1,"row2":2,"row3":3},"col2":{"row1":"x","row2":"y","row3":"z"}}'
# read json to data frame
df = pd.read_json(data)
print(df)
pd.set_option('display.max_columns', 10)
# df = pd.read_csv('/Recommendation/data_file.csv') <- probably won't need this
print(df.head())
print(df.shape)
df = df[['Class-Title', 'Class-Type', 'Tutor', 'Desc']]
print(df.head())
# discarding the commas between the tutor's full names and getting only the first three names
df['Tutor'] = df['Tutor'].map(lambda x: x.split(','))
# merging together first and last name for each tutor, so it's considered as one word
for index, row in df.iterrows():
row['Tutor'] = [x.lower().replace(' ', '') for x in row['Tutor']]
print(df.head(10))
# initializing the new column
df['Key_words'] = ""
for index, row in df.iterrows():
description = row['Desc']
# instantiating Rake, uses english stopwords from NLTK and discard all puntuation characters
x = Rake()
# extracting the words by passing the text
x.extract_keywords_from_text(description)
# getting the dictionary whith key words and their scores
key_words_dict_scores = x.get_word_degrees()
# assigning the key words to the new column
row['Key_words'] = list(key_words_dict_scores.keys())
# dropping the description column
df.drop(columns=['Desc'], inplace=True)
df.set_index('Class-Title', inplace=True)
print(df.head())
df['combined_words'] = ''
columns = df.columns
for index, row in df.iterrows():
words = ''
for col in columns:
words = words + ' '.join(row[col]) + ' '
row['combined_words'] = words
df.drop(columns=[col for col in df.columns if col !=
'combined_words'], inplace=True)
print(df.head())
# instantiating and generating the count matrix
count = CountVectorizer()
count_matrix = count.fit_transform(df['combined_words'])
# creating a Series for the class titles so they are associated to an ordered numerical
indices = pd.Series(df.index)
print(indices[:5])
# generating the cosine similarity matrix
cosine_sim = cosine_similarity(count_matrix, count_matrix)
print(cosine_sim)
# creating a Series for the class titles so they are associated to an ordered numerical
def recommendations(title, cosine_sim=cosine_sim):
recommended_classes = []
# getting the index of the class that matches the title
idx = indices[indices == title].index[0]
# creating a Series with the similarity scores in descending order
score_series = pd.Series(cosine_sim[idx]).sort_values(ascending=False)
# getting the indexes of the 5 most similar classes
top_indexes = list(score_series.iloc[1:6].index)
for i in top_indexes:
recommended_classes.append(list(df.index)[i])
return recommended_classes
| StarcoderdataPython |
1844311 | import os, time, sys
pipe_name = 'pipe_test'
def child():
pipeout = os.open(pipe_name, os.O_WRONLY)
counter = 0
while True:
time.sleep(1)
os.write(pipeout, 'Number %03d\n' % counter)
counter = (counter + 1 ) % 5
def parent():
pipein = open(pipe_name, 'r')
while True:
line = pipein.readine()[:-1]
print ('Parent %d got "%s" at %s', (os.getpid(), line, time.time()))
if not (os.path.exists(pipe_name)):
os.mkfifo(pipe_name)
pid = os.fork()
if pid != 0:
parent()
else:
child()
| StarcoderdataPython |
2031 | <filename>build/scripts-3.6/fit_background_model.py<gh_stars>0
#!python
import numpy as np
from numpy import inf
from numpy import nan
from scipy.optimize import fmin
from scipy.stats import beta
from scipy.special import beta as B
from scipy.special import comb
import argparse
import sys
def parseArgs():
'''Function for parsing arguments'''
parser = argparse.ArgumentParser(description="Pipeline for analyzing barcoded amplicon \
sequencing data with Unique molecular \
identifiers (UMI)")
parser.add_argument('-cons', '--cons_file', dest='cons_file', help='Path to cons file, for fitting parameters of the bgmodel')
parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile',
help='Path to file with non-background positions')
parser.add_argument('-out', '--out_file',dest='out_file',help="name of output file, default = %(default)s]",default="bgmodel.params")
parser.add_argument('-f','--fsize',dest='fsize', help='Family size cutoff (consensus cutoff) for variant calling. [default = %(default)s]', default=3)
args = parser.parse_args(sys.argv[1:])
return(args)
def parse_cons_file(filename,fsize=3):
n1=[]
f1=[]
c1=[]
posx=[]
data=[]
with open(filename) as f:
for line in f:
if not line.startswith('Sample Name'):
line=line.rstrip('\n')
parts=line.split('\t')
pos=parts[1]+':'+parts[2]
name=parts[3]
#print(name)
if name not in "":
famsize=parts[-4]
if int(famsize)==fsize:
frac=float(parts[-2])
alt=parts[-1]
count=parts[-3]
if frac > 0 and alt not in 'N':
cov=int(parts[-5])
f1.append(float(frac))
n1.append(int(cov))
c1.append(int(count))
posx.append(pos)
data.append(line)
#print(name)
#print(famsize)
return(f1,n1,c1,posx,data)
def betaNLL(params,*args):
a,b = params
data = np.array(args[0])
pdf=beta.pdf(data,a,b,loc=0,scale=1)
lg=np.log(pdf)
#lg=np.where(lg==-np.inf,0,lg)
mask = np.isfinite(lg)
nll = -lg[mask].sum()
nll=-1*np.sum(lg)
return(nll)
def get_beta_parameters(data):
m=np.mean(data)
v=np.var(data)
a0=m*(m * (1-m) / v-1 )
b0=(1-m)*(m * (1-m) / v-1 )
result=fmin(betaNLL,[a0,b0],args=(data,))
return(result)
def run_fit_bgmodel(args):
spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120]
if args.nonbgposfile:
nonbgpos=[]
with open(args.nonbgposfile) as f:
for line in f:
line=line.rstrip()
nonbgpos.append(line)
else:
nonbgpos=spikepositions
if not args.cons_file:
args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0]
args.fsize=int(args.fsize)
f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize)
f1 = np.array(f1)
n1 = np.array(n1)
a1 = np.array(a1)
pos = np.array(pos)
data = np.array(data)
result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True])
#a=prob_bb(n1,a1,result[0],result[1])
print(pos,nonbgpos,np.isin(pos,nonbgpos))
with open(args.out_file,'w') as g:
g.write('{}\n'.format(result[0]))
g.write('{}\n'.format(result[1]))
#a[a==inf]=1e-10
#a[np.isnan(a)]=1e-10
#Q = -10*np.log10(a)
#data=np.array(data)
#plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png')
#if args.vc_method.lower()=='bbmodel':
# rout=data[Q >= float(args.qvalue_threshold)]
# Qsig=Q[Q >= float(args.qvalue_threshold)]
#else:
# rout=data[a1 >= float(args.count_cutoff)]
# Qsig=Q[a1 >= float(args.count_cutoff)]
#outfilename=args.output_path+'/'+args.sample_name+'2.vcf'
#write_vcf(outfilename,rout,Qsig,args.reference_file)
if __name__=='__main__':
args=parseArgs()
run_fit_bgmodel(args)
| StarcoderdataPython |
234772 | <reponame>ChaoticMarauder/Project_Rosalind
def dict_words(string):
dict_word={}
list_words=string.split()
for word in list_words:
if(dict_word.get(word)!=None):
dict_word[word]+=1
else:
dict_word[word]=1
return dict_word
def main():
with open('datasets/rosalind_ini6.txt') as input_file:
string=input_file.read().strip()
dict_word=dict_words(string)
for key, value in dict_word.items():
print(str(key)+' '+str(value))
with open('solutions/rosalind_ini6.txt', 'w') as output_file:
for key, value in dict_word.items():
output_file.write(str(key)+' '+str(value)+'\n')
if(__name__=='__main__'):
main() | StarcoderdataPython |
231606 | <filename>src/minimax.py
from typing import Tuple, List
from sys import maxsize as MAX_INT
from board import Board, Direction
def maximize(state: Board, a: int, b: int, d: int) -> Tuple[Board, int]:
max_child, max_util = None, -1
if d == 0 or state.player_cannot_move_anymore():
return None, state.utility()
for direction in state.available_moves_for_player():
board = Board()
board.board = state.get_copy()
board.move(direction)
_, util = minimize(board, a, b, d - 1)
if util > max_util:
max_child, max_util = board, util
if max_util >= b:
break
if max_util > a:
a = max_util
return max_child, max_util
def minimize(state: Board, a: int, b:int, d: int) -> Tuple[Board, int]:
min_child, min_util = None, MAX_INT
if d == 0 or state.board_is_full():
return None, state.utility()
for row, col, value in state.available_moves_for_game():
board = Board()
board.board = state.get_copy()
board.place(row, col, value)
_, util = maximize(board, a, b, d - 1)
if util < min_util:
min_child, min_util = board, util
if min_util <= a:
break
if min_util < b:
b = min_util
return min_child, min_util
def get_best_move(board: Board, depth: int = 5):
child, util = maximize(Board(board.get_copy()), -1, MAX_INT, depth)
return board.get_move_to_grid(child), util
| StarcoderdataPython |
3305304 | <gh_stars>1-10
"Test mansel module"
from collections import Counter
import os
from pathlib import Path
import sys
import tempfile
try:
from PySide2 import QtCore, QtWidgets
except ImportError:
try:
from PyQt5 import QtCore, QtWidgets
except ImportError:
raise ImportError("PySide2 or other Qt binding required!") from None
import pytest
# pylint: disable=wrong-import-position
import mansel # NOQA
# pylint: enable=wrong-import-position
FILES = ("f0", "d0/f1", "d1/d2/d3/f2", "d1/d2/d3/f3", "d1/d4/d5/f4")
FILESIZE = 10000
DIRS = tuple(
set([str(Path(p).parent) for p in FILES if len(Path(p).parts) > 1])
)
def set_path(model: mansel.CheckableFileSystemModel, path: Path, state: int):
"Set a path in the CheckableFilesystemModel model"
index = model.index(str(path))
model.setData(index, state, QtCore.Qt.CheckStateRole)
# pylint: disable=redefined-outer-name
@pytest.fixture
def tmp_root_dir():
"Temporary file hierarchy for CheckableFileSystemModel"
base = Path(tempfile.TemporaryDirectory().name)
for dir_ in DIRS:
(base / dir_).mkdir(parents=True)
for file_ in FILES:
with open(base / file_, "wb") as fhandle:
fhandle.seek(FILESIZE - 1)
fhandle.write(b"\0")
yield base
def test_dirtree():
"Test DirTree class"
path_strs = ("a", "b/c", "b/d")
tree = mansel.DirTree(path_strs)
paths = [Path(p) for p in path_strs]
assert tree.check(Path("b")) == "parent"
for path in paths:
assert tree.check(path) == "leaf"
for path in paths:
tree.remove(path)
assert tree.check(path) == "unselected"
for path in paths:
tree.insert(path)
assert tree.check(path)
with pytest.raises(mansel.PathConflict):
tree.insert(Path("b/c/d"))
# This test fails in horrible ways, and I am not sure why
#
# def test_model_qtmodeltester(tmp_root_dir, qtbot, qtmodeltester):
# 'Basic checks on CheckableFileSystemModel'
# model = mansel.CheckableFileSystemModel(preselection=DIRS[:-1],
# track_selection_size=False)
# model.setRootPath(str(tmp_root_dir))
# with qtbot.waitSignal(model.preselectionProcessed, timeout=None):
# qtmodeltester.check(model)
def test_preselection(tmp_root_dir, qtbot):
"Test preselecting items in CheckableFilesystemModel"
dialog = QtWidgets.QDialog()
qtbot.addWidget(dialog)
preselection = DIRS[:-1]
model = mansel.CheckableFileSystemModel(
parent=dialog, preselection=preselection, track_selection_size=False
)
assert all(model.preselection.check(Path(p)) for p in preselection)
# Wait for preseleciton to be processed
with qtbot.waitSignal(model.preselectionProcessed, timeout=None):
model.setRootPath(str(tmp_root_dir))
assert not model.preselection.root
# Absolute paths
selected_paths = [
model.filePath(QtCore.QModelIndex(i)) for i in model.selected
]
# Relative paths as strings
selected_paths = [
str(Path(p).relative_to(model.rootPath())) for p in selected_paths
]
assert set(selected_paths) == set(preselection)
def test_selection(tmp_root_dir, qtbot):
"Test basic item selection"
dialog = QtWidgets.QDialog()
qtbot.addWidget(dialog)
model = mansel.CheckableFileSystemModel(
parent=dialog, track_selection_size=False
)
model.setRootPath(str(tmp_root_dir))
# Select files
for file_ in FILES:
set_path(model, tmp_root_dir / file_, QtCore.Qt.Checked)
for file_ in FILES:
index = model.index(str(tmp_root_dir / file_))
assert model.data(index, QtCore.Qt.CheckStateRole) == QtCore.Qt.Checked
assert len(model.selected) == len(FILES)
# Unselect files
for file_ in FILES:
set_path(model, tmp_root_dir / file_, QtCore.Qt.Unchecked)
for file_ in FILES:
index = model.index(str(tmp_root_dir / file_))
assert (
model.data(index, QtCore.Qt.CheckStateRole) == QtCore.Qt.Unchecked
)
assert not model.selected
# Test selecting something twice
for _ in range(2):
set_path(model, tmp_root_dir / FILES[0], QtCore.Qt.Checked)
index = model.index(str(tmp_root_dir / FILES[0]))
assert model.data(index, QtCore.Qt.CheckStateRole) == QtCore.Qt.Checked
def test_partial_selection(tmp_root_dir, qtbot):
"Test ancesotrs/descendants are partially selected"
dialog = QtWidgets.QDialog()
qtbot.addWidget(dialog)
model = mansel.CheckableFileSystemModel(
parent=dialog, track_selection_size=False
)
model.setRootPath(str(tmp_root_dir))
deep_file = Path(max(FILES, key=lambda x: len(Path(x).parts)))
assert len(deep_file.parts) >= 3
paths = [
Path(".").joinpath(*deep_file.parts[:depth])
for depth, _ in enumerate(deep_file.parts, 1)
]
# Check each path part and make sure all parents/children are
# partially checked
for depth, part in enumerate(paths):
set_path(model, str(tmp_root_dir / part), QtCore.Qt.Checked)
for depth_, part_ in enumerate(paths):
index = model.index(str(tmp_root_dir / part_))
if depth == depth_:
assert (
model.data(index, QtCore.Qt.CheckStateRole)
== QtCore.Qt.Checked
)
else:
assert (
model.data(index, QtCore.Qt.CheckStateRole)
== QtCore.Qt.PartiallyChecked
)
# Check and uncheck each path part and make sure all
# parents/children are unchecked
for depth, part in enumerate(paths):
set_path(model, str(tmp_root_dir / part), QtCore.Qt.Checked)
set_path(model, str(tmp_root_dir / part), QtCore.Qt.Unchecked)
for depth_, part_ in enumerate(paths):
index = model.index(str(tmp_root_dir / part_))
assert (
model.data(index, QtCore.Qt.CheckStateRole)
== QtCore.Qt.Unchecked
)
def test_main_dialog(tmp_root_dir, qtbot):
"Test main app does not crash"
selection = tempfile.NamedTemporaryFile("w")
selection.write(FILES[0])
selection.seek(0)
dialog = mansel.main_dialog(
args_in=["-p", str(tmp_root_dir), "-s", selection.name]
)
qtbot.addWidget(dialog)
with qtbot.waitSignal(dialog.model.preselectionProcessed, timeout=None):
dialog.model.setRootPath(str(tmp_root_dir))
# Absolute paths
selected_paths = [
dialog.model.filePath(QtCore.QModelIndex(i))
for i in dialog.model.selected
]
# Relative paths as strings
selected_paths = [
str(Path(p).relative_to(dialog.model.rootPath()))
for p in selected_paths
]
assert set(selected_paths) == set([FILES[0]])
with qtbot.waitSignal(dialog.model.tracker_thread.finished, timeout=None):
# Run through main methods to make sure they don't crash
# (i.e. this doesn't validate that they do the right thing!)
dialog.indicate_calculating()
dialog.update_size(0)
dialog.update_view()
dialog.print_selection_and_close()
def test_model_no_parent(tmp_root_dir, qtbot):
"Test no error for model without parent if shut down cleanly"
model = mansel.CheckableFileSystemModel()
model.setRootPath(str(tmp_root_dir))
with qtbot.waitSignal(model.tracker_thread.finished, timeout=None):
model.tracker_thread.quit()
def test_track_size(tmp_root_dir, qtbot):
"Test no error for model without parent if shut down cleanly"
model = mansel.CheckableFileSystemModel()
model.setRootPath(str(tmp_root_dir))
for file_ in FILES:
set_path(model, tmp_root_dir / file_, QtCore.Qt.Checked)
with qtbot.waitSignal(model.newSelectionSize, timeout=None) as blocker:
model.calculate_selection_size()
files_size = blocker.args[0]
assert files_size == FILESIZE * len(FILES)
for path in tmp_root_dir.iterdir():
set_path(model, path, QtCore.Qt.Checked)
with qtbot.waitSignal(model.newSelectionSize, timeout=None) as blocker:
model.calculate_selection_size()
total_size = blocker.args[0]
assert total_size == files_size
with qtbot.waitSignal(model.tracker_thread.finished, timeout=None):
model.tracker_thread.quit()
def test_dir_size_fetcher(tmp_root_dir, qtbot):
"Test DirSizeFetcher can get directory sizes"
# Find top level directory with most files below it
dirs = Counter(Path(f).parts[0] for f in FILES)
dir_, count = dirs.most_common(1)[0]
# Find an intermediate dir to select first, to test cached lookup
# during higher level lookup
for path in FILES:
path = Path(path)
if path.parts[0] == dir_ and len(path.parts) >= 3:
inter_dir = path.parent
break
inter_dir_count = 0
for path in FILES:
try:
Path(path).relative_to(inter_dir)
except ValueError:
continue
inter_dir_count += 1
model = mansel.CheckableFileSystemModel(track_selection_size=False)
model.setRootPath(str(tmp_root_dir))
qtbot.addWidget(model)
set_path(model, tmp_root_dir / dir_, QtCore.Qt.Checked)
fetcher = mansel.DirSizeFetcher(model)
qtbot.addWidget(fetcher)
# Test intermediate dir
# import pudb; pudb.set_trace()
with qtbot.waitSignal(fetcher.resultReady, timeout=None) as blocker:
fetcher.fetch_size(str(tmp_root_dir / inter_dir))
assert blocker.args[1] == inter_dir_count * FILESIZE
# Test top level dir
# Test twice to test initial lookup and cached lookup
for _ in range(2):
with qtbot.waitSignal(fetcher.resultReady, timeout=None) as blocker:
fetcher.fetch_size(str(tmp_root_dir / dir_))
assert blocker.args[1] == count * FILESIZE
def test_output(tmp_root_dir, qtbot, capsys):
"Test dialog prints out the right selection at end"
dialog = mansel.main_dialog(args_in=["-p", str(tmp_root_dir)])
qtbot.addWidget(dialog)
with qtbot.waitSignal(dialog.model.tracker_thread.finished, timeout=None):
for file_ in FILES:
set_path(dialog.model, tmp_root_dir / file_, QtCore.Qt.Checked)
dialog.print_selection_and_close()
captured = capsys.readouterr()
assert set(captured.out.splitlines()) == set(FILES)
| StarcoderdataPython |
1927212 | #!/usr/bin/env python3
import os
FILE_DIR = os.path.dirname(os.path.relpath(__file__))
SNIPPETS_DIR = "snippets"
base_template = """
import os
from base import TestBase
class {cls}Test(TestBase):
snippet_dir = "{dir}"
"""
test_template = """
def test_{name}(self):
self.validate_snippet(self.get_snippet_path("{name}"))
"""
def create_test_case(name):
test_name = name + "_test.py"
# TODO: capitalize
capitalized = "".join([x.title() for x in name.split("_")])
template = base_template.format(
cls=capitalized,
dir=name
)
for name in os.listdir(os.path.join(FILE_DIR, SNIPPETS_DIR, name)):
if name == "." or name == "..":
continue
template += test_template.format(name=name)
with open(os.path.join(FILE_DIR, test_name), "w+") as f:
f.write(template)
for name in os.listdir(os.path.join(FILE_DIR, SNIPPETS_DIR)):
if name == "." or name == "..":
continue
create_test_case(name)
| StarcoderdataPython |
4906407 | <reponame>alviproject/alvi<filename>alvi/client/data_generators/random.py
import random
from alvi.client.data_generators.base import DataGenerator
class RandomDataGenerator(DataGenerator):
def _values(self):
qty = self.quantity()
return (random.randint(1, qty) for _ in range(qty)).__iter__() | StarcoderdataPython |
3380769 | <gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020-2021 <NAME>.
#
# Invenio-Utilities-TUW is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Some utilities for InvenioRDM."""
from invenio_rdm_records.proxies import current_rdm_records
UTILITIES_TUW_BASE_TEMPLATE = "invenio_utilities_tuw/base.html"
"""Default base template for the demo page."""
UTILITIES_TUW_RECORD_SERVICE_FACTORY = lambda: current_rdm_records.records_service
"""Factory function for creating a RecordService."""
UTILITIES_TUW_RECORD_FILES_SERVICE_FACTORY = (
lambda: current_rdm_records.record_files_service
)
"""Factory function for creating a RecordFileService."""
UTILITIES_TUW_DRAFT_FILES_SERVICE_FACTORY = (
lambda: current_rdm_records.draft_files_service
)
"""Factory function for creating a DraftFileService."""
| StarcoderdataPython |
11244521 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""AFN for 8-bit time series from the Rössler oscillator.
Since each point in the time series is an 8-bit integer (i.e., it's in
the range [-127, 127]), the reconstructed phase space is essentially a
grid with zero dimension. To actually measure the dimension of this
data set, we have to "kick" points off the grid a little bit by adding
an insignificant amount of noise. See Example 6.4 in Kantz & Schreiber
(2004).
But the quality of reconstruction depends on the noise level. Adding
an insignificant amount of noise does not help at all! This is
probably one of the rare case where a higher level of additive noise
improves the results.
"""
from nolitsa import data, dimension, utils
import matplotlib.pyplot as plt
import numpy as np
# Generate data.
x = data.roessler(length=5000)[1][:, 0]
# Convert to 8-bit.
x = np.int8(utils.rescale(x, (-127, 127)))
# Add uniform noise of two different noise levels.
y1 = x + (-0.001 + 0.002 * np.random.random(len(x)))
y2 = x + (-0.5 + 1.0 * np.random.random(len(x)))
# AFN algorithm.
dim = np.arange(1, 10 + 2)
F, Fs = dimension.afn(y1, tau=14, dim=dim, window=40)
F1, F2 = F[1:] / F[:-1], Fs[1:] / Fs[:-1]
E, Es = dimension.afn(y2, tau=14, dim=dim, window=40)
E1, E2 = E[1:] / E[:-1], Es[1:] / Es[:-1]
plt.figure(1)
plt.title(r'AFN after corrupting with uniform noise in $[-0.001, 0.001]$')
plt.xlabel(r'Embedding dimension $d$')
plt.ylabel(r'$E_1(d)$ and $E_2(d)$')
plt.plot(dim[:-1], F1, 'bo-', label=r'$E_1(d)$')
plt.plot(dim[:-1], F2, 'go-', label=r'$E_2(d)$')
plt.legend()
plt.figure(2)
plt.title(r'AFN after corrupting with uniform noise in $[-0.5, 0.5]$')
plt.xlabel(r'Embedding dimension $d$')
plt.ylabel(r'$E_1(d)$ and $E_2(d)$')
plt.plot(dim[:-1], E1, 'bo-', label=r'$E_1(d)$')
plt.plot(dim[:-1], E2, 'go-', label=r'$E_2(d)$')
plt.legend()
plt.show()
| StarcoderdataPython |
4808558 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 31 23:07:30 2020
@author: virati
DO_Phase portrait and dynamics work
"""
import sys
sys.path.append('/home/virati/Dropbox/projects/Research/MDD-DBS/Ephys/DBSpace/')
import DBSpace as dbo
from DBSpace import nestdict
import DBSpace.control.dyn_osc as DO
from matplotlib import cm
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import numpy as np
import math
import scipy.signal as sig
from matplotlib.gridspec import GridSpec
#%%
Ephys = DO.Ephys
Phase = 'TurnOn'
pt = '906'
condit = 'OffTarget'
#%%
def scatter_phase(pt,condit):
timeseries = dbo.load_BR_dict(Ephys[pt][condit]['Filename'],sec_offset=0)
end_time = timeseries['Left'].shape[0]/422
if pt == '903':
tidxs = np.arange(231200,329300) #DBS903
if pt == '906':
tidxs = np.arange(256000,330200) #DBS903
sos_lpf = sig.butter(10,10,output='sos',fs = 422)
filt_L = sig.sosfilt(sos_lpf,timeseries['Left'])
#filt_L = sig.decimate(filt_L,2)[tidxs] #-211*60*8:
filt_R = sig.sosfilt(sos_lpf,timeseries['Right'])
#filt_R = sig.decimate(filt_R,2)[tidxs]
#pdb.set_trace()
plt.figure()
plt.plot(filt_L[tidxs],filt_R[tidxs],alpha=0.1)
t = np.linspace(0,1,filt_L[tidxs[0::50]].shape[0])
plt.scatter(filt_L[tidxs[0::50]],filt_R[tidxs[0::50]],c=t,cmap='plasma',alpha=0.5,rasterized=True)
plt.xlabel('Left')
plt.ylabel('Right')
plt.colorbar()
plt.figure()
plt.plot(filt_L[tidxs],rasterized=True)
#fundamentals('903','Left','OffTarget')
#scatter_phase('906','OffTarget')
# Now we're going to do a simple-minded 'grid' map of average change vector in each grid cell
timeseries = dbo.load_BR_dict(Ephys[pt][condit]['Filename'],sec_offset=0)
end_time = timeseries['Left'].shape[0]/422
if pt == '903':
tidxs = np.arange(231200,329300) #DBS903
if pt == '906':
tidxs = np.arange(256000,330200) #DBS903
sos_lpf = sig.butter(10,10,output='sos',fs = 422)
filt_L = sig.sosfilt(sos_lpf,timeseries['Left'])
#filt_L = sig.decimate(filt_L,2)[tidxs] #-211*60*8:
filt_R = sig.sosfilt(sos_lpf,timeseries['Right'])
#filt_R = sig.decimate(filt_R,2)[tidxs]
plt.figure()
plt.plot(filt_L[tidxs],filt_R[tidxs],alpha=0.1)
t = np.linspace(0,1,filt_L[tidxs[0::50]].shape[0])
plt.scatter(filt_L[tidxs[0::50]],filt_R[tidxs[0::50]],c=t,cmap='plasma',alpha=0.5,rasterized=True)
plt.xlabel('Left')
plt.ylabel('Right')
plt.colorbar()
#%%
plt.figure()
plt.subplot(211)
plt.plot(timeseries['Left'])
plt.plot(timeseries['Right'])
plt.subplot(212)
plt.plot(filt_L[tidxs],rasterized=True)
plt.plot(filt_R[tidxs],rasterized=True)
state = np.vstack((filt_L,filt_R))
sd = np.diff(state,axis=1,append=0)
#%%
if 0:
# A vector field approach
min_x = np.min(state[0,:])
max_x = np.max(state[0,:])
min_y = np.min(state[1,:])
max_y = np.max(state[1,:])
xg = np.linspace(min_x,max_x,num=10)
yg = np.linspace(min_y,max_y,num=10)
#xg,yg = np.meshgrid(xg,yg)
diffgrid = np.zeros(shape=(10,10,2))
for ii in range(xg.shape[0]-1):
for jj in range(yg.shape[0]-1):
pts_in_cell = np.where(np.logical_and(np.logical_and(state[0,:] < xg[ii+1],state[0,:] > xg[ii]),np.logical_and(state[1,:] < yg[jj+1],state[1,:] > yg[jj])))
if len(pts_in_cell[0]) != 0:
try: changes = np.median(sd[:,pts_in_cell].squeeze(),axis=1)
except: ipdb.set_trace()
#pdb.set_trace()
diffgrid[ii,jj,:] = changes
#ipdb.set_trace()
plt.figure()
xg,yg = np.meshgrid(xg,yg)
plt.quiver(xg,yg,diffgrid[:,:,0],diffgrid[:,:,1])
#%%
import pysindy as ps
import scipy.stats as stats
plt.close('all')
## Now we get into subwindows
pt_windows = {'906':np.arange(255550,296095), '903':np.arange(231200,329300)}
#pt_regimes= {'906':np.array([0,800,3500,6350,9200,12300,30000]),'903':np.array([0,1470,6260,27020,80000,97940])}
pt_regimes= {'906':np.array([0,800,3500,6350,9200,12300,30000]),'903':np.array([0,1470,6260,27020,80000,97940])}
window = pt_windows[pt]
subwindow_e = pt_regimes[pt]
#Let's take out the BL stim first from the raw timeseries
chirp = sig.decimate(state[:,window],q=1)
#plt.figure()
#plt.plot(chirp.T)
#chirp[0,:] = stats.zscore(chirp[0,:])
#chirp[1,:] = stats.zscore(chirp[1,:])
coeffs = []
# if you want to plot for documents
for ii in range(subwindow_e.shape[0]-1):
print(ii)
#sliding window linewidth
chirplet = chirp[:,subwindow_e[ii]:subwindow_e[ii+1]]
fig,ax = plt.subplots()
plt.plot(chirp.T)
plt.ylim((-0.5,1.0))
axins = ax.inset_axes([0.5,0.5,0.5,0.5])
axins.plot(chirp.T)
x1,x2,y1,y2 = subwindow_e[ii],subwindow_e[ii+1],-0.4,0.4
axins.set_xlim(x1,x2)
axins.set_ylim(y1,y2)
ax.indicate_inset_zoom(axins)
#plt.plot(chirplet.T)
plt.figure()
#fig, ax = plt.subplot(2,2)
plt.scatter(chirplet.T[:,0],chirplet.T[:,1],c=np.arange(0,chirplet.shape[1]))
plt.plot(chirplet.T[:,0],chirplet.T[:,1],alpha=0.2)
#plt.xlim((-0.4,0.4))
#plt.ylim((-0.4,0.4))
dt = 1/422
model = ps.SINDy()
model.fit(chirplet.T, t=dt)
#model.print()
t_test = np.arange(0, 50, dt)
# test the prediction now
x_sim = model.simulate(chirplet.T[0,:],t_test)
#ax[1,1].subplot(2,2,2)
#plt.scatter(x_sim[:,0],x_sim[:,1])
plt.plot(x_sim[:,0],x_sim[:,1],linewidth=2,alpha=1)
plt.text(0.1,0.1,model.print())
coeffs.append(model.coefficients().reshape(-1,1))
#%%
plt.figure()
plt.imshow(np.array(coeffs).squeeze().T,clim=(-5,5))
plt.colorbar(cmap='jet')
plt.title(pt + ' ' + condit)
#plt.figure()
#plt.plot(t_test,x_sim)
#%%
#Audio fun
from scipy.io.wavfile import write
import pandas as pd
#mono
#data = chirp[0,:]
#both
data = np.copy(chirp.T)
scaled = np.int16(data/np.max(np.abs(data))*32767)
updata = sig.resample(scaled,160000).astype('int16')
write('906_both.wav', 44100, updata) | StarcoderdataPython |
1751547 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
from compas.geometry import length_vector_sqrd
from compas.utilities import Colormap
from compas_rhino.artists import MeshArtist
__all__ = ['CablenetArtist']
class CablenetArtist(MeshArtist):
"""The :class:`CablenetArtist` provides functionality for visualisation of cablenet components in Rhino.
Examples
--------
.. code-block:: python
artist = CablenetArtist(cablenet, layer="Cablenet")
artist.clear_layer()
artist.draw_vertices()
artist.draw_edges()
artist.draw_faces()
artist.draw_forces()
artist.draw_reactions()
artist.redraw()
"""
def __init__(self, cablenet, layer=None):
super(CablenetArtist, self).__init__(cablenet, layer=layer)
self.settings.update({
'color.forces:compression': (0, 0, 255),
'color.forces:tension': (255, 0, 0),
'color.reactions': (0, 255, 0),
'color.residuals': (0, 255, 255),
'color.loads': (0, 0, 255),
'color.selfweight': (255, 255, 255),
'scale.forces': 0.1,
'scale.reactions': 1.0,
'scale.residuals': 1.0,
'scale.loads': 1.0,
'scale.selfweight': 1.0,
'tol.reactions': 1e-3,
'tol.residuals': 1e-3,
'tol.forces': 1e-3})
def clear(self):
"""Clear all components previously drawn by the artist."""
super(CablenetArtist, self).clear()
self.clear_forces()
self.clear_reactions()
self.clear_residuals()
self.clear_loads()
self.clear_selfweight()
self.clear_stress()
def clear_(self, name):
guids = compas_rhino.get_objects(name="{}.{}.*".format(self.mesh.name, name))
compas_rhino.delete_objects(guids)
def clear_forces(self):
"""Clear all internal forces drawn by the artist."""
self.clear_('force')
def clear_reactions(self):
"""Clear all reaction forces drawn by the artist."""
self.clear_('reaction')
def clear_residuals(self):
"""Clear all residual forces drawn by the artist."""
self.clear_('residual')
def clear_loads(self):
"""Clear all loads drawn by the artist."""
self.clear_('load')
def clear_selfweight(self):
"""Clear all selfweight drawn by the artist."""
self.clear_('selfweight')
def clear_stress(self):
"""Clear all stress drawn by the artist."""
self.clear_('stress')
def _draw_lines(self, lines):
compas_rhino.draw_lines(lines, layer=self.layer, clear=False, redraw=True)
def _draw_cylinders(self, cylinders):
compas_rhino.draw_cylinders(cylinders, layer=self.layer, clear=False, redraw=True, cap=True)
def _draw_spheres(self, spheres):
compas_rhino.draw_spheres(spheres, layer=self.layer, clear=False, redraw=True)
# ==========================================================================
# geometry
# ==========================================================================
# ==========================================================================
# statics
# ==========================================================================
def draw_forces(self, compression=None, tension=None, scale=None):
"""Draw the internal forces in the cablenet.
Parameters
----------
compression : color specification, optional
The color of the compression forces.
This defaults to the color set in the attributes of the cablenet.
`self.settings['color.forces:compression']`
tension : color specification, optional
The color of the tension forces.
This defaults to the color set in the attributes of the cablenet.
`self.settings['color.forces:tension']`
scale : float, optional
The scale of the forces.
This defaults to the scale set in the attributes of the cablenet.
`self.settings['scale.forces']`
Notes
-----
The forces are drawn as cylinders around the edges of the cablenet datastructure.
THe radius of the cylinder is proportional to the size of the force and scaled by
the specified factor.
If the radius is smaller than a specified tolerance, the drawing of the specific
cylinder is skipped.
"""
self.clear_forces()
compression = compression or self.settings['color.forces:compression']
tension = tension or self.settings['color.forces:tension']
scale = scale or self.settings['scale.forces']
tol = self.settings['tol.forces']
tol2 = tol**2
lines = []
for u, v in self.mesh.edges_where({'is_edge': True}):
f = self.mesh.edge_attribute((u, v), 'f')
sp, ep = self.mesh.edge_coordinates(u, v)
if f ** 2 < tol2:
continue
if f > 0.0:
color = tension
if f < 0.0:
color = compression
f = -f
radius = scale * f
lines.append({
'start': sp,
'end': ep,
'radius': radius,
'color': color,
'name': "{}.force.{}-{}".format(self.mesh.name, u, v)
})
self._draw_cylinders(lines)
def draw_reactions(self, color=None, scale=None):
self.clear_reactions()
color = color or self.settings['color.reactions']
scale = scale or self.settings['scale.reactions']
tol = self.settings['tol.reactions']
tol2 = tol**2
lines = []
for key, attr in self.mesh.vertices(True):
if not attr['is_anchor']:
continue
r = rx, ry, rz = self.mesh.vertex_attributes(key, ('rx', 'ry', 'rz'))
if length_vector_sqrd(r) <= tol2:
continue
sp = x, y, z = self.mesh.vertex_coordinates(key)
ep = x - scale * rx, y - scale * ry, z - scale * rz
lines.append({
'start': sp,
'end': ep,
'name': "{}.reaction.{}".format(self.mesh.name, key),
'color': color,
'arrow': 'end'
})
self._draw_lines(lines)
def draw_residuals(self, color=None, scale=None, tol=None):
self.clear_residuals()
color = color or self.settings['color.residuals']
scale = scale or self.settings['scale.residuals']
tol = tol or self.settings['tol.residuals']
tol2 = tol**2
lines = []
for key, attr in self.mesh.vertices(True):
if attr['is_anchor']:
continue
r = rx, ry, rz = self.mesh.vertex_attributes(key, ('rx', 'ry', 'rz'))
if length_vector_sqrd(r) <= tol2:
continue
sp = x, y, z = self.mesh.vertex_coordinates(key)
ep = x + scale * rx, y + scale * ry, z + scale * rz
lines.append({
'start': sp,
'end': ep,
'name': "{}.residual.{}".format(self.mesh.name, key),
'color': color,
'arrow': 'end'
})
self._draw_lines(lines)
def draw_loads(self, color=None, scale=None):
self.clear_loads()
color = color or self.settings['color.loads']
scale = scale or self.settings['scale.loads']
lines = []
for key, attr in self.mesh.vertices(True):
if attr['is_anchor']:
continue
px, py, pz = self.mesh.vertex_attributes(key, ('px', 'py', 'pz'))
sp = x, y, z = self.mesh.vertex_coordinates(key)
ep = x + scale * px, y + scale * py, z + scale * pz
lines.append({
'start': sp,
'end': ep,
'name': "{}.load.{}".format(self.mesh.name, key),
'color': color,
'arrow': 'end'
})
self._draw_lines(lines)
def draw_selfweight(self, color=None, scale=None):
self.clear_selfweight()
color = color or self.settings['color.selfweight']
scale = scale or self.settings['scale.selfweight']
rho = self.mesh.attributes['density']
if not rho:
return
lines = []
for key, attr in self.mesh.vertices(True):
if attr['is_anchor']:
continue
thickness = self.mesh.vertex_attribute(key, 't')
area = self.mesh.vertex_area(key)
volume = area * thickness
weight = rho * volume
sp = x, y, z = self.mesh.vertex_coordinates(key)
ep = x, y, z - scale * weight
lines.append({
'start': sp,
'end': ep,
'name': "{}.selfweight.{}".format(self.mesh.name, key),
'color': color,
'arrow': 'end'
})
self._draw_lines(lines)
def draw_stress(self, scale=None):
self.clear_stress()
scale = scale or self.settings['scale.stress']
stress = [self.mesh.stress(key) for key in self.mesh.edges_where({'is_edge': True})]
cmap = Colormap(stress, 'rgb')
lines = []
for index, (u, v) in enumerate(self.mesh.edges_where({'is_edge': True})):
sp, ep = self.mesh.edge_coordinates(u, v)
lines.append({
'start': sp,
'end': ep,
'radius': scale,
'color': cmap(stress[index]),
'name': "{}.stress.{}-{}".format(self.mesh.name, u, v)
})
self._draw_cylinders(lines)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| StarcoderdataPython |
4958397 | <gh_stars>0
from django.apps import AppConfig
class AcheveMgtConfig(AppConfig):
name = 'acheve_mgt'
verbose_name = '成绩管理'
| StarcoderdataPython |
6610539 | <gh_stars>0
from urllib.request import urlopen
from bs4 import BeautifulSoup
import bs4
import datetime
from datetime import date
import calendar
import ssl
import re
import openpyxl
from algorithms import checkResult
import getpass
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
cdate = date.today()
day = str(cdate.day) + '-' + (calendar.month_name[cdate.month])[:3] + '-' + str(cdate.year)
def scripList(internetTime):
url = 'http://www.moneycontrol.com/stocks/marketinfo/meetings.php?opttopic=brdmeeting'
tstart = datetime.datetime.now().timestamp()
html = urlopen(url, context=ctx).read()
tend = datetime.datetime.now().timestamp()
internetTime = internetTime + (tend - tstart)
soup = BeautifulSoup(html, 'html.parser')
tags = soup('tr')
scrip = []
pattern = '/stockpricequote/'
for tag in tags:
for each in tag:
if (type(each) == bs4.element.Tag):
if(type(each.contents[0]) == bs4.element.Tag):
url = each.contents[0].get('href', None)
if (url is not None):
if (pattern in url):
temp = (re.findall('/stockpricequote/[a-z 0-9]*/[a-z 0-9]*/([^/]+)', str(url)))
if len(temp) > 0:
temp_scrip = []
temp_scrip.append(each.contents[0].contents[0].contents[0])
temp_scrip.append(temp[0])
else:
break
else:
break
else:
break
if(type(each.contents[0] == bs4.element.NavigableString)):
if (each.contents[0] == day):
scrip.append(temp_scrip)
return scrip, internetTime
def getNameInSmall(name):
f_name = name.lower()
f_name = f_name.replace(' ', '')
return f_name
def findMonth():
if(cdate.month >= 1 and cdate.month <= 3):
month = (calendar.month_name[12])[:3] + ' \'' + str(cdate.year - 2001)
elif(cdate.month >= 4 and cdate.month <= 6):
month = (calendar.month_name[3])[:3] + ' \'' + str(cdate.year - 2000)
elif(cdate.month >= 7 and cdate.month <= 9):
month = (calendar.month_name[6])[:3] + ' \'' + str(cdate.year - 2000)
elif(cdate.month >= 10 and cdate.month <= 12):
month = (calendar.month_name[9])[:3] + ' \'' + str(cdate.year - 2000)
return month
def findingResult(scrip, fCount, internetTime):
fix_url_start = 'http://www.moneycontrol.com/financials/'
fix_url_end = '/results/quarterly-results/'
ind = 0
month = findMonth()
for val in scrip:
ind = ind + 1
print(str(ind) + '. Finding result for : ', val[0])
small_name = getNameInSmall(val[0])
code = val[1]
url = fix_url_start + small_name + fix_url_end + code + '#' + code
tstart = datetime.datetime.now().timestamp()
try:
html = urlopen(url, context=ctx).read()
except:
continue
tend = datetime.datetime.now().timestamp()
internetTime = internetTime + (tend - tstart)
soup = BeautifulSoup(html, 'html.parser')
tags = soup('tr')
netSales = []
st1 = False
st2 = False
st3 = False
st4 = False
eps = []
for tag in tags:
for each in tag:
if(type(each) == bs4.element.Tag):
try:
if(len(each.contents) > 0):
if(each.contents[0] == month):
st4 = True
if(st4):
if(each.contents[0] == 'Net Sales/Income from operations'):
st1 = True
i = 0
elif(st1 and i < 3):
netSales.append(each.contents[0])
i = i+1
if(i == 3):
st1 = False
elif(each.contents[0] == 'EPS After Extra Ordinary'):
st2 = True
elif(st2 and each.contents[0] == 'Basic EPS'):
i = 0
elif(st2 and i < 3):
eps.append(each.contents[0])
i = i + 1
if(i == 3):
st2 = False
st3 = True
break
except:
pass
if(st3):
result = checkResult(netSales, eps)
val.append(result)
fCount = fCount + 1
break
return scrip, fCount, internetTime
def main():
internetTime = 0.0
fCount = 0
startTime = datetime.datetime.now().timestamp()
print('Hey!')
print('Finding list of scrips announced today.')
scrip, internetTime = scripList(internetTime)
length = len(scrip)
if(length > 0):
print('Total ', length, ' companies announced results today.')
print('Finding Results')
result, fCount, internetTime = findingResult(scrip, fCount, internetTime)
print('Results found.')
print('Exporting results to excel fiile.....')
wb = openpyxl.Workbook()
sheet = wb.active
sheet.title = 'Result ' + day
i = 1
for val in result:
sheet['A' + str(i)] = val[0]
if(len(val) > 2):
if(val[2]):
sheet['B' + str(i)] = 'Yes'
else:
sheet['B' + str(i)] = 'No'
i = i + 1
wb.save('/home/'+getpass.getuser()+'/Desktop/Result ' + day + '.xlsx')
print('Result exported. File saved as Result ' + day + '.xlsx at desktop.')
endTime = datetime.datetime.now().timestamp()
totalTime = ((endTime - startTime)/60)
internetTime = internetTime / 60
print('Total time taken by program to find result of ', fCount, 'is : ', totalTime, ' minutes.')
print('Total time taken for getting data from internet : ', internetTime, ' minutes.')
print('Shutting Down.....!!')
if __name__ == "__main__":
main() | StarcoderdataPython |
3378666 | # coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v2.api_client import ApiClient, Endpoint
from datadog_api_client.v2.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from datadog_api_client.v2.model.api_error_response import APIErrorResponse
from datadog_api_client.v2.model.service_create_request import ServiceCreateRequest
from datadog_api_client.v2.model.service_response import ServiceResponse
from datadog_api_client.v2.model.service_update_request import ServiceUpdateRequest
from datadog_api_client.v2.model.services_response import ServicesResponse
class ServicesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __create_service(
self,
body,
**kwargs
):
"""Create a new service # noqa: E501
Creates a new service. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_service(body, async_req=True)
>>> result = thread.get()
Args:
body (ServiceCreateRequest): Service Payload.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ServiceResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['body'] = \
body
return self.call_with_http_info(**kwargs)
self.create_service = Endpoint(
settings={
'response_type': (ServiceResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v2/services',
'operation_id': 'create_service',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'body',
],
'required': [
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body':
(ServiceCreateRequest,),
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_service
)
def __delete_service(
self,
service_id,
**kwargs
):
"""Delete an existing service # noqa: E501
Deletes an existing service. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_service(service_id, async_req=True)
>>> result = thread.get()
Args:
service_id (str): The ID of the service.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['service_id'] = \
service_id
return self.call_with_http_info(**kwargs)
self.delete_service = Endpoint(
settings={
'response_type': None,
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v2/services/{service_id}',
'operation_id': 'delete_service',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'service_id',
],
'required': [
'service_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'service_id':
(str,),
},
'attribute_map': {
'service_id': 'service_id',
},
'location_map': {
'service_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_service
)
def __get_service(
self,
service_id,
**kwargs
):
"""Get details of a service # noqa: E501
Get details of a service. If the `include[users]` query parameter is provided, the included attribute will contain the users related to these services # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_service(service_id, async_req=True)
>>> result = thread.get()
Args:
service_id (str): The ID of the service.
Keyword Args:
include (str): Specifies which types of related objects should be included in the response.. [optional] if omitted the server will use the default value of "users"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ServiceResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['service_id'] = \
service_id
return self.call_with_http_info(**kwargs)
self.get_service = Endpoint(
settings={
'response_type': (ServiceResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v2/services/{service_id}',
'operation_id': 'get_service',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'service_id',
'include',
],
'required': [
'service_id',
],
'nullable': [
],
'enum': [
'include',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('include',): {
"USERS": "users"
},
},
'openapi_types': {
'service_id':
(str,),
'include':
(str,),
},
'attribute_map': {
'service_id': 'service_id',
'include': 'include',
},
'location_map': {
'service_id': 'path',
'include': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_service
)
def __get_services(
self,
**kwargs
):
"""Get a list of all services # noqa: E501
Get all services for the requesting user's organization. If the `include[users]` query parameter is provided, the included attribute will contain the users related to these services. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_services(async_req=True)
>>> result = thread.get()
Keyword Args:
include (str): Specifies which types of related objects should be included in the response.. [optional] if omitted the server will use the default value of "users"
page_size (int): Size for a given page.. [optional] if omitted the server will use the default value of 10
page_offset (int): Specific offset to use as the beginning of the returned page.. [optional] if omitted the server will use the default value of 0
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ServicesResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_services = Endpoint(
settings={
'response_type': (ServicesResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v2/services',
'operation_id': 'get_services',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'include',
'page_size',
'page_offset',
],
'required': [],
'nullable': [
],
'enum': [
'include',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('include',): {
"USERS": "users"
},
},
'openapi_types': {
'include':
(str,),
'page_size':
(int,),
'page_offset':
(int,),
},
'attribute_map': {
'include': 'include',
'page_size': 'page[size]',
'page_offset': 'page[offset]',
},
'location_map': {
'include': 'query',
'page_size': 'query',
'page_offset': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_services
)
def __update_service(
self,
service_id,
body,
**kwargs
):
"""Update an existing service # noqa: E501
Updates an existing service. Only provide the attributes which should be updated as this request is a partial update. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_service(service_id, body, async_req=True)
>>> result = thread.get()
Args:
service_id (str): The ID of the service.
body (ServiceUpdateRequest): Service Payload.
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ServiceResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['service_id'] = \
service_id
kwargs['body'] = \
body
return self.call_with_http_info(**kwargs)
self.update_service = Endpoint(
settings={
'response_type': (ServiceResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v2/services/{service_id}',
'operation_id': 'update_service',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'service_id',
'body',
],
'required': [
'service_id',
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'service_id':
(str,),
'body':
(ServiceUpdateRequest,),
},
'attribute_map': {
'service_id': 'service_id',
},
'location_map': {
'service_id': 'path',
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__update_service
)
| StarcoderdataPython |
11250253 | <gh_stars>1-10
import os
import sys
import argparse
from shutil import copyfile, copytree, rmtree
GODOT_PATH = os.environ["GODOT_PATH"]
def patch_script(godot_root, filename):
current_path = os.getcwd()
patch_path = os.path.join(current_path, filename)
os.chdir(godot_root)
os.system("git am %s"%patch_path)
os.chdir(current_path)
def compile_godot(godot_root, platform='x11', target='release_debug', bits=64):
current_path = os.getcwd()
os.chdir(godot_root)
os.system("scons platform=%s tools=yes target=%s bits=%d"%(platform, target, bits))
os.system("scons platform=%s tools=no target=%s bits=%d"%(platform, target, bits))
os.chdir(current_path)
def install_module(godot_root, compile=True, rewrite=False):
module_dir = os.path.join(godot_root, 'modules/GodotSharedMemory')
if (not os.path.exists(module_dir)):
copytree('GodotModule', module_dir)
elif rewrite:
rmtree(module_dir)
copytree('GodotModule', module_dir)
if compile:
compile_godot(godot_root, platform='x11', target='release_debug', bits=64)
def install_python_module():
current_path = os.getcwd()
os.chdir('PythonModule')
os.system('python setup.py install')
os.chdir(current_path)
if __name__=='__main__':
# patch_script(godot_root=GODOT_PATH, filename="PhysicsPatch/patch")
install_module(godot_root=GODOT_PATH, compile=True, rewrite=True)
install_python_module()
| StarcoderdataPython |
5125358 | import pandas as pd
import numpy as np
import igraph as ig
from scipy.spatial import distance_matrix
from scipy.spatial.distance import pdist, squareform
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
import scipy
from copy import deepcopy
import pprint
history_x = []
history_y = []
history_graph = []
debug = False
class MyTakeStep(object):
def __init__(self, stepsize=1000, min_links=0, max_links=99999):
self.stepsize = stepsize
self.min_links = min_links
self.max_links = max_links
def __call__(self, x):
s = self.stepsize
x = int(np.random.normal(x, s))
if ( x < self.min_links ):
x = self.min_links
if ( x > self.max_links ):
x = self.max_links
return x
def normalize(x, domain_min, domain_max, range_min, range_max):
'''normalize
Method for changing values from domain to range (like P5 map)
'''
return (range_max-range_min)/(domain_max-domain_min)*(x-domain_min)+range_min
def normalize_matrix(matrix):
max_value = np.max(matrix)
return matrix/max_value
def get_bin(x, bin_size):
return x/bin_size
def assign_bins(data, nr_bins):
bins = list(pd.cut(data['lens'], nr_bins, labels=range(nr_bins)))
data['bin'] = bins
return data
def create_dist_matrix(values):
return euclidean_distances(values)
def create_network(x, mst, non_mst):
'''create_network
Generate a network with a given number of non-mst links
'''
extras = non_mst.take(range(0,x), axis=0)
network = np.vstack((mst, extras))
links = network[:,[0,1]].astype(int)
nodes = np.unique(np.hstack((links[:,0], links[:,1])))
link_tuples = list(map(tuple, links))
network_graph = ig.Graph()
network_graph.add_vertices(nodes)
network_graph.add_edges(link_tuples)
similarities = network[:,2]
distances = np.max(similarities) - similarities
network_graph.es["distance"] = distances
network_graph.es["similarity"] = similarities
network_graph.es['color'] = np.repeat('black',len(distances))
# For debugging => remove afterwards
for e in network_graph.es:
if len(non_mst[(non_mst[:,0] == e.tuple[0])
& (non_mst[:,1] == e.tuple[1])]) == 1:
e['color'] = 'red'
else:
e['color'] = 'blue'
return network_graph
def dist2sim(dist_matrix):
max_value = np.max(dist_matrix)
return max_value-dist_matrix
def alter_dist_matrix_1step(dist_matrix, data):
'''create_dist_matrix_for_bins
Takes the full distance matrix, and:
* leaves distances between points within the same bin and adjacent bins alone
* sets distances between points in _nonadjacent_ bins to NA
'''
dist_matrix_for_bins = deepcopy(dist_matrix)
max_idx = len(dist_matrix)
# Should max_value be the maximal distance within the bins, or overall?
max_value = np.max(dist_matrix)
# max_value = 0
# bins = range(0, np.max(data['bin']+1))
# for b in bins:
# elements = list(data.loc[data['bin'] == b]['id'])
# for i in range(len(elements)):
# for j in range(i+1,len(elements)):
# if dist_matrix[elements[i]][elements[j]] > max_value:
# max_value = dist_matrix[elements[i]][elements[j]]
#### end max_value
for i in range(max_idx):
record_i = data.iloc[[i]]
for j in range(i+1, max_idx):
record_j = data.iloc[[j]]
dist = abs(int(record_i['bin']) - int(record_j['bin']))
if dist > 1:
dist_matrix_for_bins[i][j] = 100*max_value
dist_matrix_for_bins[j][i] = 100*max_value
return dist_matrix_for_bins
def create_mst(dist_matrix):
pd_dist_matrix = pd.DataFrame(dist_matrix)
# Transform distance matrix
# input: nxn distance matrix
# output: numpy array
# columns: from to weight mst order
# 22 118 0.817978 0 0
# 14 118 0.831283 0 1
# 22 122 0.832234 0 2
# 33 118 0.836029 0 3
# 22 108 0.836048 0 4
# 'mst' all 0, but will become 1 for all links in the MST
# 'order' = order in which to add links (because df is sorted by value)
pd_melted_dist_matrix = pd_dist_matrix.stack()
pd_melted_dist_matrix = pd_melted_dist_matrix.reset_index()
mdm = pd_melted_dist_matrix.values # mdm = melted_dist_matrix
# Only keep those distances where from_node < to_node (symmetrical distance matrix)
cmdm = mdm[mdm[:,0] < mdm[:,1]]
# Add column of zeros, indicating that none of the links are part of the MST
# (will be corrected further down)
z = np.zeros((len(cmdm),4))
z[:,:-1] = cmdm
cmdm = z
# Add column representing the order in which links are added in addition to the MST
# Depends on the weight
cmdm = cmdm[cmdm[:,2].argsort()[::-1]] # order by weight
# cmdm = cmdm[cmdm[:,2].argsort()[::]] # order by weight
a = np.array(range(len(cmdm))).reshape(len(cmdm),1)
cmdm = np.hstack((cmdm, a))
# Order by from, then by to (columns 0 and 1), so that we can easily compare with the unit-graph
# see http://numpy-discussion.10968.n7.nabble.com/how-to-do-a-proper-2-column-sort-on-a-2-dimensional-array-td19927.html
i = np.lexsort((cmdm[:,1], cmdm[:,0]))
cmdm = cmdm[i]
# Mark the MST links
# e.g. link 33->118 is in MST
## columns: from to weight mst order
# 22 118 0.817978 0 0
# 14 118 0.831283 1 1
# 22 122 0.832234 1 2
# 33 118 0.836029 0 3
# 22 108 0.836048 1 4
if ( debug ):
print("Working on the network...")
links = cmdm[:,[0,1,2]]
edges = links[:,[0,1]].astype(int)
vertices = np.unique(np.hstack((edges[:,0], edges[:,1])))
edge_tuples = list(map(tuple, edges))
full_graph = ig.Graph()
full_graph.add_vertices(vertices)
full_graph.add_edges(edge_tuples)
distances = links[:,2]
similarities = np.max(distances) - distances
full_graph.es["distance"] = distances
full_graph.es["similarity"] = similarities
mst_graph = full_graph.spanning_tree(weights = distances)
if ( debug ):
print("Updating cmdm with mst information")
print("Number of edits to make: ", str(len(mst_graph.es)))
for e in mst_graph.es:
record_to_change = np.where((cmdm[:,0] == e.tuple[0]) & (cmdm[:,1] == e.tuple[1]))[0][0]
cmdm[record_to_change][3] = 1
non_mst = cmdm[np.where(cmdm[:,3] == 0)]
non_mst = non_mst[non_mst[:,2].argsort()]
mst = cmdm[np.where(cmdm[:,3] == 1)]
# dm_distances array = distance matrix distances = what to compare each proposal to
dm_distances = cmdm[:,2]
return [mst_graph, mst, non_mst, cmdm, dm_distances]
# def remove_mst_links_between_communities(mst_graph):
# a = mst_graph.community_walktrap(weights='distance',steps=20)
# def create_dist_matrix_for_connected_components(dist_matrix, data):
# return false
def objective_function(x, args):
'''objective_function
Function to be minimized: distance between original distance matrix and graph-based distance matrix
'''
global history_x
global history_y
global history_graph
# global mst
# global non_mst
# global cmdm
global debug
mst=args['mst']
non_mst=args['non_mst']
cmdm=args['cmdm']
dm_distances=args['dm_distances']
i = int(x)
if ( debug ):
print("Running objective function for: ", str(i))
proposal_graph = create_network(i, mst, non_mst)
proposal_dist = proposal_graph.shortest_paths()
l = len(proposal_dist)
graph_distances = []
for from_node in range(l):
for to_node in range(from_node + 1, l):
graph_distances.append([from_node, to_node, proposal_dist[from_node][to_node]])
graph_distances = np.asarray(graph_distances)
# Order by 'from' and 'to'
s = np.lexsort((graph_distances[:,1], graph_distances[:,0]))
graph_distances = graph_distances[s]
corr = abs(np.corrcoef(graph_distances[:,2], dm_distances)[0][1])
if ( debug ):
print("nr edges = ", str(i), " -> correlation = ", str(corr))
history_graph.append(list(map(lambda e:e.tuple, proposal_graph.es)))
history_x.append(i)
history_y.append(corr)
return (1-corr) # Because basinhopping MINIMIZES
def create_complete_plot(dist_matrix, res = 50):
'''create_complete_plot
Creates overview picture of effect of nr of non-mst links on objective function
'''
global history_x
global history_y
global history_graph
global debug
history_x = []
history_y = []
history_graph = []
resolution = res # How many points do we want to compute?
mst_graph, mst, non_mst, cmdm, dm_distances = create_mst(dist_matrix)
search_space = list(range(len(cmdm) - len(mst)))
stepsize = int((search_space[-1]-search_space[0])/resolution)
if ( debug ):
print(int(search_space[0]))
print(int(search_space[-1]))
print(stepsize)
args = {'mst':mst, 'non_mst':non_mst, 'cmdm':cmdm, 'dm_distances':dm_distances}
for i in range(0,stepsize*resolution,stepsize):
i = str(i)
# if ( debug ):
# print("Running objective function for: ", str(i))
objective_function(i, args)
return [history_x, history_y, history_graph]
def find_stad_optimum(dist_matrix, T=0.5, interval=100):
'''find_stad_optimum
Finds the optimum nr of links
'''
global history_x
global history_y
global history_graph
history_x = []
history_y = []
history_graph = []
global debug
mst_graph, mst, non_mst, cmdm, dm_distances = create_mst(dist_matrix)
search_space = list(range(len(cmdm) - len(mst)))
stepsize = int(search_space[-1]-search_space[0])/100
print("Stepsize: ", str(stepsize))
print("Search space min: ", str(search_space[0]))
print("Search space max: ", str(search_space[-1]))
print("history_y: ", history_y)
minimizer_kwargs = {'args':{'mst':mst, 'non_mst':non_mst, 'cmdm':cmdm, 'dm_distances':dm_distances}}
result = scipy.optimize.basinhopping(
objective_function, 0,
minimizer_kwargs=minimizer_kwargs, #disp=True,
T=T, interval=interval,
take_step=MyTakeStep(stepsize=stepsize, min_links=search_space[0], max_links=search_space[-1]))
print([int(result.x[0]), result.fun])
return [int(result.x[0]), result.fun]
# def alter_dist_matrix_phase1(dist_matrix, data):
# '''create_dist_matrix_for_bins
# Takes the full distance matrix, and:
# * leaves distances between points within the same bin alone
# * increases distances between points in _adjacent_ bins with max value
# * sets distances between points in _nonadjacent_ bins to NA
# '''
# dist_matrix_for_bins = deepcopy(dist_matrix)
# max_idx = len(dist_matrix)
# # Should max_value be the maximal distance within the bins, or overall?
# max_value = np.max(dist_matrix)
# # max_value = 0
# # bins = range(0, np.max(data['bin']+1))
# # for b in bins:
# # elements = list(data.loc[data['bin'] == b]['id'])
# # for i in range(len(elements)):
# # for j in range(i+1,len(elements)):
# # if dist_matrix[elements[i]][elements[j]] > max_value:
# # max_value = dist_matrix[elements[i]][elements[j]]
# #### end max_value
# for i in range(max_idx):
# record_i = data.iloc[[i]]
# for j in range(i+1, max_idx):
# record_j = data.iloc[[j]]
# dist = abs(int(record_i['bin']) - int(record_j['bin']))
# if dist == 1:
# dist_matrix_for_bins[i][j] += 2*max_value
# dist_matrix_for_bins[j][i] += 2*max_value
# elif dist > 1:
# dist_matrix_for_bins[i][j] = None
# dist_matrix_for_bins[j][i] = None
# return dist_matrix_for_bins
# def add_communities_to_data(communities, data):
# data['community'] = [None] * len(data)
# for i, comm in enumerate(communities):
# for node in comm:
# data['community'][node] = i
# return data
# def split_connected_components(graph, bins, dist_matrix, communities, data):
# #### Remove bad links
# # Make communities a hash so that I can search quicker
# community2node_hash = {}
# node2community_hash = {}
# links_to_remove = []
# for i,comm in enumerate(communities):
# for node in comm:
# node2community_hash[node] = i
# community2node_hash[i] = node
# for e in graph.es:
# if not node2community_hash[e.tuple[0]] == node2community_hash[e.tuple[1]]:
# links_to_remove.append((e.tuple[0], e.tuple[1]))
# graph.delete_edges(links_to_remove)
# return graph
# def connect_connected_components(graph, bins, dist_matrix, communities, data):
# #### Remove bad links
# # Make communities a hash so that I can search quicker
# community2node_hash = {}
# node2community_hash = {}
# #### Link the connected components together again
# # To make searching easier, check which bin each connected comp is in
# bin2communities_hash = {}
# community2bin_hash = {}
# for b in bins:
# bin2communities_hash[b] = []
# for i, comm in enumerate(communities):
# b = data['bin'][comm[0]]
# bin2communities_hash[b].append(i)
# community2bin_hash[i] = b
# # print("community2bin_hash")
# # pprint.pprint(community2bin_hash)
# # print('---')
# # print("bin2communities_hash:")
# # pprint.pprint(bin2communities_hash)
# # For each community
# links_to_add = []
# for i, comm in enumerate(communities):
# # print('----------------')
# # which are the communities in the same and the adjacent bins
# # print("DEBUG: looking at community ", i)
# communities_to_search = []
# b = community2bin_hash[i]
# # print(" in bin:", b)
# communities_to_search = deepcopy(bin2communities_hash[b])
# # print(" communities initial: ")
# # print(sorted(set(communities_to_search)))
# if b > 0:
# # print(" bin-1:", b-1)
# communities_to_search += bin2communities_hash[b-1]
# # print(sorted(set(bin2communities_hash[b-1])))
# if b < max(bins):
# # print(" bin+1:", b+1)
# communities_to_search += bin2communities_hash[b+1]
# # print(sorted(set(bin2communities_hash[b+1])))
# communities_to_search = set(communities_to_search)
# nodes_to_search = []
# for c in communities_to_search:
# for n in communities[c]:
# nodes_to_search.append(n)
# # print(nodes_to_search)
# # print(len(nodes_to_search))
# # remove nodes from the comm we're looking at itself
# nodes_to_search = [x for x in nodes_to_search if x not in comm]
# # print("nr nodes_to_search: ", len(nodes_to_search))
# # print("DEBUG: new nodes to search: ")
# # print(nodes_to_search)
# # print(len(nodes_to_search))
# if not len(nodes_to_search) == 0:
# min_distance = 999999
# node_with_link = None
# for node in comm:
# distances = list(map(lambda x:dist_matrix[node][x], nodes_to_search))
# min_distance_for_node = np.min(distances)
# if min_distance_for_node < min_distance:
# min_distance_idx = np.argmin(distances)
# min_distance = min_distance_for_node
# node_with_link = node
# links_to_add.append((node_with_link, nodes_to_search[min_distance_idx]))
# # print("DEBUG: links to add")
# # print(links_to_add)
# graph.add_edges(links_to_add)
# return graph
# def alter_dist_matrix_phase2(dist_matrix, communities):
# '''
# alter_dist_matrix_phase2
# * takes original (!) distance matrix
# * adds max_value to all distances between points that are not in the same community
# '''
# dist_matrix_for_communities = deepcopy(dist_matrix)
# max_idx = len(dist_matrix)
# print(max_idx)
# max_value = np.max(dist_matrix[~np.isnan(dist_matrix)]) # need to filter out the Nan...
# print(max_value)
# for i in range(max_idx):
# for j in range(i+1, max_idx):
# found_in_same_community = False
# for comm in communities:
# if not found_in_same_community:
# # if all(elem in comm for elem in [i,j]): # check this!!!
# if set([i,j]) <= set(comm):
# found_in_same_community = True
# if not found_in_same_community:
# dist_matrix_for_communities[i][j] += 2*max_value
# dist_matrix_for_communities[j][i] += 2*max_value
# return dist_matrix_for_communities
# def alter_dist_matrix_phase2(dist_matrix, communities):
# dist_matrix_for_communities = deepcopy(dist_matrix)
# max_idx = len(dist_matrix)
# for i in range(max_idx):
# for j in range(i+1, max_idx):
# found_in_same_community = False
# for comm in communities:
# if not found_in_same_community:
# if all(elem in comm for elem in [i,j]): # check this!!!
# found_in_same_community = True
# if not found_in_same_community:
# dist_matrix_for_communities[i][j] *= 10
# dist_matrix_for_communities[j][i] *= 10
# return dist_matrix_for_communities
| StarcoderdataPython |
8045152 | from bs4 import BeautifulSoup
from bs4.dammit import EntitySubstitution
from bs4.element import Comment
from bs4.element import Doctype
from bs4.element import NavigableString
from bs4.element import ProcessingInstruction
from bs4.element import Tag
from zpretty.attributes import PrettyAttributes
from zpretty.text import endswith_whitespace
from zpretty.text import lstrip_first_line
from zpretty.text import rstrip_last_line
from zpretty.text import startswith_whitespace
class OpenTagException(Exception):
"""We want this element to be closed"""
def __init__(self, el):
"""el is a PrettyElement instance"""
self.el = el
def __str__(self):
return "Known self closing tag %r is not closed" % self.el.context
def memo(f):
"""Simple memoize"""
key = "__zpretty_memo__" + f.__name__
def wrapped(obj):
if not hasattr(obj, key):
setattr(obj, key, f(obj))
return getattr(obj, key)
return wrapped
class PrettyElement(object):
"""A pretty element class that can render prettified html"""
null_tag_name = "null_tag_name"
knownself_closing_elements = [
"area",
"base",
"basefont",
"br",
"col",
"embed",
"frame",
"hr",
"img",
"input",
"link",
"meta",
"param",
"source",
"track",
"wbr",
]
indent = " "
attribute_klass = PrettyAttributes
first_attribute_on_new_line = False
before_closing_multiline = ""
self_closing_singleline_attributeless_template = "{prefix}<{tag} />"
self_closing_singleline_attributefull_template = "{prefix}<{tag} {attributes} />"
self_closing_multiline_template = "\n".join(
("{prefix}<{tag} {attributes}", "{prefix}{before_closing_multiline}/>")
)
start_tag_singleline_attributeless_template = "{prefix}<{tag}>"
start_tag_singleline_attributefull_template = "{prefix}<{tag} {attributes}>"
start_tag_multiline_template = "\n".join(
("{prefix}<{tag} {attributes}", "{prefix}{before_closing_multiline}>")
)
escaper = EntitySubstitution()
def __init__(self, context, level=0):
"""Take something a (bs4) element and an indentation level"""
self.context = context
self.level = level
def __str__(self):
"""Reuse the context method"""
return str(self.context)
def __repr__(self):
"""Try to make evident:
- the element type
- the level
"""
if self.is_comment():
tag = "!--"
if self.is_text():
tag = '""'
else:
tag = self.tag
return "<pretty:{level}:{tag} />".format(tag=tag, level=self.level)
def is_comment(self):
"""Check if this element is a comment"""
return isinstance(self.context, Comment)
def is_doctype(self):
"""Check if this element is a doctype"""
return isinstance(self.context, Doctype)
def is_text(self):
"""Check if this element is a text
Also comments and processing instructions
are instances of NavigableString,
so we have to make additional checks
"""
if not isinstance(self.context, NavigableString):
return False
if self.is_comment() or self.is_doctype() or self.is_processing_instruction():
return False
return True
def is_tag(self):
"""Check if this element is a notmal tag"""
return isinstance(self.context, Tag)
def is_self_closing(self):
"""Is this element self closing?"""
if not self.is_tag():
raise ValueError("This is not a tag")
# First check if element has some content.
# If it has it cannot be self closing
tag_name = self.tag
if self.getchildren():
if tag_name in self.knownself_closing_elements:
raise OpenTagException(self)
return False
# Then we have some know elements that we want to be self closing
if tag_name in self.knownself_closing_elements:
return True
# Also elements in the tal namespace may be prettified as self closing
# if needed, e.g.: <tal:name replace="${here/title}" />
if tag_name.startswith("tal:"):
return True
if tag_name.startswith("metal:"):
return True
# All the other elements will have an open an close tag
return False
def is_null(self):
"""We define a special tag null_tag_name to wrap text"""
return self.context.name == self.null_tag_name
def is_processing_instruction(self):
"""Check if this element is a processing instruction like <?xml...>"""
return isinstance(self.context, ProcessingInstruction)
@memo
def getparent(self):
"""Return the element parent as an instance of this class"""
parent = self.context.parent
if not parent or parent.name == BeautifulSoup.ROOT_TAG_NAME:
return None
return self.__class__(parent)
@memo
def getchildren(self):
"""Return this element children as instances of this class"""
children = []
next_level = self.level + 1
for child in getattr(self.context, "children", []):
child = self.__class__(child, next_level)
try:
child.is_tag() and child.is_self_closing()
except OpenTagException:
# Fix open tags and repeat
nephews = reversed(tuple(child.context.children))
for nephew in nephews:
child.context.insert_after(nephew)
return self.getchildren()
children.append(child)
return children
@property
def tag(self):
"""Return the tag name"""
return self.context.name
@property
def text(self):
"""Return the text contained in this element (if any)
Convert the text characters to html entities
"""
if not isinstance(self.context, NavigableString):
return ""
if self.is_comment():
return self.context
return self.escaper.substitute_html(self.context.string)
@property
@memo
def attributes(self):
"""Return the wrapped attributes"""
attributes = getattr(self.context, "attrs", {})
return self.attribute_klass(attributes, self)
@memo
def render_content(self):
"""Render a properly indented the contents of this element"""
parts = []
previous_part = ""
for idx, child in enumerate(self.getchildren()):
part = child()
if child.is_text():
part = lstrip_first_line(part)
elif not endswith_whitespace(previous_part):
part = lstrip_first_line(part)
else:
parts[-1] = rstrip_last_line(parts[-1])
parts.append(part)
previous_part = child()
content = "".join(parts)
if endswith_whitespace(content):
content = rstrip_last_line(content)
return content
@property
def prefix(self):
return self.indent * self.level
def render_comment(self):
"""Render a properly indented comment"""
return f"{self.prefix}<!--{self.text}-->"
def render_doctype(self):
"""Render a properly indented comment"""
return f"{self.prefix}<!DOCTYPE {self.text}>"
def render_processing_instruction(self):
"""Render a properly indented processing instruction"""
return f"{self.prefix}<?{self.text}?>"
def render_text(self):
"""Render a properly indented text
If the text starts with spaces, strip them and add a newline.
If the text end with spaces, strip them.
"""
text = self.text
lines = text.split("\n")
if not lines:
return ""
prefix = self.prefix
if len(lines) == 1:
line = lines[0]
if not line.strip():
return "\n"
if startswith_whitespace(line):
line = f"\n{prefix}{line.lstrip()}"
if endswith_whitespace(line):
line = f"{line.rstrip()}\n"
return line
if not lines[0].strip():
rendered_lines = ["\n"]
elif startswith_whitespace(lines[0]):
rendered_lines = [f"\n{prefix}{lines[0].rstrip()}\n"]
else:
rendered_lines = [f"{lines[0]}\n"]
for line in lines[1:-1]:
if not line.strip():
rendered_lines.append("\n")
else:
rendered_lines.append(f"{line.rstrip()}\n")
if lines[-1].strip():
if lines[-1].rstrip() == lines[-1]:
if lines[-1].lstrip() == lines[-1]:
rendered_lines.append(lines[-1])
else:
rendered_lines.append(prefix + lines[-1].lstrip())
else:
rendered_lines.append("%s\n" % lines[-1].rstrip())
else:
rendered_lines.append("")
text = "".join(rendered_lines)
return text
def _render_template(self, template):
return template.format(
before_closing_multiline=self.before_closing_multiline,
attributes=self.attributes.lstrip(),
prefix=self.prefix,
tag=self.tag,
)
def render_self_closing(self):
"""Render a properly indented a self closing tag"""
attributes_len = len(self.attributes)
if attributes_len == 0:
template = self.self_closing_singleline_attributeless_template
elif attributes_len == 1:
template = self.self_closing_singleline_attributefull_template
else:
template = self.self_closing_multiline_template
return self._render_template(template)
def render_not_self_closing(self):
"""Render a properly indented not self closing tag"""
attributes_len = len(self.attributes)
if attributes_len == 0:
open_tag_template = self.start_tag_singleline_attributeless_template
elif attributes_len == 1:
open_tag_template = self.start_tag_singleline_attributefull_template
else:
open_tag_template = self.start_tag_multiline_template
text = self.text and self.render_text() or self.render_content()
if endswith_whitespace(text):
if text[-1] != "\n":
text = f"{rstrip_last_line(text)}\n"
close_tag_template = "{prefix}</{tag}>"
else:
close_tag_template = "</{tag}>"
open_tag = self._render_template(open_tag_template)
close_tag = self._render_template(close_tag_template)
return "{open_tag}{text}{close_tag}".format(
close_tag=close_tag, open_tag=open_tag, text=text
)
@memo
def __call__(self):
"""Render the element and its contents properly indented"""
if self.is_null():
return self.render_content()
if self.is_comment():
return self.render_comment()
if self.is_tag():
if self.is_self_closing():
return self.render_self_closing()
else:
return self.render_not_self_closing()
if self.is_processing_instruction():
return self.render_processing_instruction()
if self.is_doctype():
return self.render_doctype()
return self.render_text()
| StarcoderdataPython |
192096 | #!/usr/bin/env python
from setuptools import setup
setup(version_format='{tag}.dev{commits}')
| StarcoderdataPython |
6627808 | <reponame>jasonwee/asus-rt-n14uhp-mrtg
import argparse
parser = argparse.ArgumentParser(description='Short sample app')
parser.add_argument('-a', action="store_true", default=False)
parser.add_argument('-b', action="store", dest="b")
parser.add_argument('-c', action="store", dest="c", type=int)
print(parser.parse_args(['-a', '-bval', '-c', '3']))
| StarcoderdataPython |
1708468 | <filename>ntc_rosetta_conf/usr_datastore.py
from jetconf.data import JsonDatastore
class UserDatastore(JsonDatastore):
pass
| StarcoderdataPython |
6503473 | """
Modeling: Mass Total + Source Parametric
========================================
This script gives a profile of a `DynestyStatic` model-fit to an `Imaging` dataset where the lens model is initialized,
where:
- The lens galaxy's light is omitted (and is not present in the simulated data).
- The lens galaxy's total mass distribution is an `EllIsothermal` and `ExternalShear`.
- The source galaxy's light is a parametric `EllSersic`.
"""
# %matplotlib inline
# from pyprojroot import here
# workspace_path = str(here())
# %cd $workspace_path
# print(f"Working Directory has been set to `{workspace_path}`")
import os
from os import path
cwd = os.getcwd()
from autoconf import conf
conf.instance.push(new_path=path.join(cwd, "config", "parallel"))
import autofit as af
"""
__Paths__
"""
dataset_name = "gaussian_x1"
path_prefix = path.join("parallel")
"""
__Search__
"""
search = af.DynestyStatic(
path_prefix=path_prefix,
name="DynestyStatic_x8",
unique_tag=dataset_name,
nlive=250,
walks=10,
iterations_per_update=100000,
number_of_cores=8,
)
"""
__Data__
This example fits a single 1D Gaussian, we therefore load and plot data containing one Gaussian.
"""
dataset_path = path.join("dataset", "example_1d", "gaussian_x1")
data = af.util.numpy_array_from_json(file_path=path.join(dataset_path, "data.json"))
noise_map = af.util.numpy_array_from_json(
file_path=path.join(dataset_path, "noise_map.json")
)
"""
__Model + Analysis__
We create the model and analysis, which in this example is a single `Gaussian` and therefore has dimensionality N=3.
"""
model = af.Model(af.ex.Gaussian)
model.centre = af.UniformPrior(lower_limit=0.0, upper_limit=100.0)
model.normalization = af.UniformPrior(lower_limit=1e-2, upper_limit=1e2)
model.sigma = af.UniformPrior(lower_limit=0.0, upper_limit=30.0)
analysis = af.ex.Analysis(data=data, noise_map=noise_map)
result = search.fit(model=model, analysis=analysis)
"""
Finished.
"""
| StarcoderdataPython |
3470939 | <reponame>XiaopeiZhang/CS450
# This program was written under Python 2.7. Please test with Python 2.7 if Python 3 does not work well.
# It will ask for 3 variables. If you just press enter, it will use default values 20 stack size, 5 discs per bucket and 3 folfers.
__author__ = 'Xiaopei'
from threading import Thread, Semaphore
from time import sleep
import random
rng = random.Random()
rng.seed(100)
class DiscGolfRange:
def __init__(self, sizeStash, numDisc, numFrolfer):
self.size_stash = sizeStash
self.num_disc = numDisc
self.num_frolfer = numFrolfer
self.discs_on_field = 0
self.multiplex = Semaphore(self.num_frolfer) # multiplex for frolfers
self.empty_stash = Semaphore(0)
self.full_stash = Semaphore(0)
def frolfer(self, id):
discs = 0
while True:
self.multiplex.acquire()
if discs == 0:
print('Frolfer {:>1d} calling for bucket'.format(id))
if self.size_stash < self.num_disc:
self.empty_stash.release()
self.full_stash.acquire()
discs = self.num_disc
self.size_stash -= self.num_disc
print('Frolfer {:1d} got {:>1d} discs; Stash = {:>1d}'.format(id, self.num_disc, self.size_stash))
self.multiplex.release()
sleep(1) # frolfers do not have to throw right after get discs
self.multiplex.acquire()
print('Frolfer {:1d} threw disc {:>1d}'.format(id, self.num_disc-discs))
self.discs_on_field += 1
discs -= 1
self.multiplex.release()
sleep(rng.random())
def cart(self):
while True:
self.empty_stash.acquire()
for i in range(self.num_frolfer-1): # make sure no other frolfers can do anything
self.multiplex.acquire()
print('################################################################################')
print('stash = {:>1d}; Cart entering field'.format(self.size_stash))
self.size_stash += self.discs_on_field
print('Cart done, gathered {:>1d} discs; Stash = {:>1d}'.format(self.discs_on_field, self.size_stash))
print('################################################################################')
self.discs_on_field = 0
for i in range(self.num_frolfer-1):
self.multiplex.release()
self.full_stash.release()
def main():
stash = 20
discs = 5
num = 3
user_input = raw_input('Enter the size of the stash: ')
if user_input.isdigit():
stash = int(user_input)
print('The size of the stash is ' + str(stash))
user_input = raw_input('Enter the number of discs per bucket: ')
if user_input.isdigit():
discs = int(user_input)
print('The number of discs per bucket is ' + str(discs))
user_input = raw_input('Enter the number of frolfer threads: ')
if user_input.isdigit():
num = int(user_input)
print('The number of frolfer threads is ' + str(num))
ts = []
disc_golf_range = DiscGolfRange(stash, discs, num)
for i in range(num):
t = Thread(target=disc_golf_range.frolfer, args=[i])
ts.append(t)
t = Thread(target=disc_golf_range.cart)
ts.append(t)
for t in ts:t.start()
for t in ts:t.join()
if __name__ == '__main__':
main()
| StarcoderdataPython |
252754 | import gzip
from pathlib import Path
import pandas as pd
from src.data.paths import DataDirs
def read_compressed_file(file_path: Path) -> bytes:
"""Read in .tsv.gz file from disk"""
try:
with gzip.open(file_path, "rb") as f:
return f.read()
except FileNotFoundError:
raise FileNotFoundError(f"{file_path!r} not found on disk")
def write_decompressed_file(tsv_content: bytes, output_path: Path) -> None:
"""Write extracted tsv file to disk"""
with open(output_path, "wb") as f:
f.write(tsv_content)
def read_from_tsv(file_path: Path):
"""Read in tsv file from external data"""
try:
read_kwargs = {"dtype": "object", "sep": "\t", "na_values": [r"\N"]}
return pd.read_csv(file_path, **read_kwargs)
except FileNotFoundError as err:
raise FileNotFoundError(f"Could not find file {file_path.name!r}") from err
def write_to_tsv(df: pd.DataFrame, file_path: Path):
"""Write dataframe to csv format"""
df.to_csv(file_path, sep="\t", index=False)
def read_processed_data():
"""Read in data that was preprocessed, specifying data types"""
file_path = DataDirs.processed / "processed.tsv"
try:
return pd.read_csv(file_path, sep="\t")
except FileNotFoundError as err:
raise FileNotFoundError(f"Could not find file {file_path.name!r}") from err
| StarcoderdataPython |
12853757 | <filename>Python/index_finder.py
#!/usr/bin/env python3
# Author: <NAME>, Dec 2018
# Script for checking index clashes
# Input one or several nucleotide sequences and print any matches found in
# the index reference file. This version is only good for checking for
# full matches.
# It is pretty useful though to list overlapping indexes in the reference file.
# Usage:
# index_finder --ref <reference_list> <index_seq>...
# TODO: Show sequences matching the first six bases not just complete matches
# TODO: Specify cache dir
import sys
import argparse
import re
import hashlib
import json
import os
import errno
COMPL_MAP = {"A": "T", "T": "A", "C": "G", "G": "C"}
def file_hash(path):
BUF_SIZE = 65536
md5_hash = hashlib.md5()
with open(path, "rb") as f:
data = f.read(BUF_SIZE)
while data:
md5_hash.update(data)
data = f.read(BUF_SIZE)
return md5_hash.hexdigest()
def rev(seq):
return seq[::-1]
def compl(seq):
c = [COMPL_MAP[nt] for nt in seq]
return "".join(c)
def rev_compl(seq):
rc = [COMPL_MAP[nt] for nt in seq[::-1]]
return "".join(rc)
# Build a dict of know index sequences from a text file:
def build_index_dict(path, length):
ref_dict = {}
if length is None:
seq_pattern = re.compile(r"(?<![ATCG])[ATCGN]{4,}")
else:
seq_pattern = re.compile(r"(?<![ATCG])[ATCGN]{{{}}}".format(length))
with open(path, "r") as ref:
for line in ref:
match = set(seq_pattern.findall(line))
if match:
for m in match:
ref_dict.setdefault(m, []).append(line.strip())
return ref_dict
def load_index_dict(path):
with open(path, "r") as f:
d = json.load(f)
return d
def save_index_dict(obj, path):
with open(path, "w") as f:
json.dump(obj, f)
def print_index_dict(ref_dict):
for seq, matches in ref_dict.items():
if len(matches) > 1:
print(seq)
for match in matches:
print("\t{}".format(match))
def main(args):
if not os.path.isfile(args.ref):
# File not found
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), args.ref)
md5 = file_hash(args.ref)
cache = "{}{}.json".format(md5, args.length or "")
if not args.rebuild and os.path.isfile(cache):
print("Loading cached index dict ({})".format(cache), file=sys.stderr)
ref_dict = load_index_dict(cache)
else:
ref_dict = build_index_dict(args.ref, args.length)
print("Caching index dict ({})".format(cache), file=sys.stderr)
save_index_dict(ref_dict, cache)
if args.list:
print_index_dict(ref_dict)
n = 0
for x in ref_dict.values():
n += len(x)
print("\nTotal barcodes parsed in reference dict: {}".format(n))
print("Unique barcodes in reference dict: {}".format(len(ref_dict)))
else:
for arg in args.seqs:
if args.length:
seq = arg[:args.length]
else:
seq = arg
if seq in ref_dict:
matches = ref_dict[seq]
print("{} found in:".format(seq))
for m in matches:
print("\t{}".format(m))
else:
print("{}: No matches found".format(seq))
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Find index clashes")
g = p.add_mutually_exclusive_group(required=True)
g.add_argument("--seqs", nargs="+", help="All sequences to search for")
g.add_argument("--list", action="store_true", default=False,
help="Print non-unique indexes in the reference list")
p.add_argument("--ref", required=True, help="Reference text file containing"
" known index sequences")
p.add_argument("--rebuild", action="store_true", help="Don't use any cached"
" reference object")
p.add_argument("--length", type=int, choices=range(4,8), help="Set the "
"number of letters to consider, both in the query strings and "
"when building the reference")
main(p.parse_args())
| StarcoderdataPython |
11346167 | <filename>scripts/us_bjs/nps/import_data_test.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pandas as pd
from preprocess_data import preprocess_df
from import_data import save_csv
class TestPreprocess(unittest.TestCase):
def test_preprocess(self):
input_path = "./test/test_data.tsv"
output_path = "./test/test_data_processed"
expected_path = "./test/expected_data.csv"
input_df = pd.read_csv(input_path, delimiter='\t')
save_csv(preprocess_df(input_df), output_path)
# Get the content from the processed file.
with open(output_path + ".csv", 'r+') as actual_f:
actual: str = actual_f.read()
# Get the content of the expected output.
with open(expected_path, 'r+') as expected_f:
expected: str = expected_f.read()
self.assertEqual(actual, expected)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3581638 | #Functions implementing potential parameter space symmetries
#<NAME> (2019) NASA-GSFC
#
#These functions are of a standard form needed for specifying (potential)
#symmetries of the parameter state space, and can be exploited as
#specialized MCMC proposals.
#Implementing potential parameter space symmetries
#These class definitions are of a standard form needed for specifying (potential)
#symmetries of the parameter state space, and can be exploited as
#specialized MCMC proposals.
#TDI A/E symmetric (in stationary/low-freq limit) half rotation of constellation or quarter rotation with polarization flip
#uses 1 random var
import math
import ptmcmc
PI=3.1415926535897932384626433832795029
halfpi=PI/2;
idist=0
iphi=1
iinc=2
ilamb=3
ibeta=4
ipsi=5;
reverse_phi_sign=False
####just for debugging:
I = complex(0.0, 1.0)
import numpy as np
def funcsa(d, phi, inc, lambd, beta, psi):
Daplus = I * ( 3./4 * (3 - np.cos(2*beta)) * np.cos(2*lambd - PI/3) )
Dacross = I * (3.0*np.sin(beta) * np.sin(2*lambd - PI/3))
a22 = 0.5/d * np.sqrt(5/PI) * pow(np.cos(inc/2), 4) * np.exp(2.*I*(-phi-psi)) * 0.5*(Daplus + I*Dacross)
a2m2 = 0.5/d * np.sqrt(5/PI) * pow(np.sin(inc/2), 4) * np.exp(2.*I*(-phi+psi)) * 0.5*(Daplus - I*Dacross)
return a22 + a2m2
def funcse(d, phi, inc, lambd, beta, psi):
Deplus = -I*(3./4 * (3 - np.cos(2*beta)) * np.sin(2*lambd - PI/3))
Decross = I*(3*np.sin(beta) * np.cos(2*lambd - PI/3))
e22 = 0.5/d * np.sqrt(5/PI) * pow(np.cos(inc/2), 4) * np.exp(2.*I*(-phi-psi)) * 0.5*(Deplus + I*Decross)
e2m2 = 0.5/d * np.sqrt(5/PI) * pow(np.sin(inc/2), 4) * np.exp(2.*I*(-phi+psi)) * 0.5*(Deplus - I*Decross)
return e22 + e2m2
def simpleCalculateLogLCAmpPhase(d, phiL, inc, lambdL, betaL, psiL):
#Simple likelihood for runcan 22 mode, frozen LISA, lowf, snr 200
#normalization factor and injection values sainj, seinj hardcoded - read from Mathematica
factor = 216147.866077
sainj = 0.33687296665053773 + I*0.087978055005482114
seinj = -0.12737105239204741 + I*0.21820079314765678
#sa2 = simple_likelihood_funcs.funcsa(d, phiL, inc, lambdL, betaL, psiL)
sa = funcsa(d, phiL, inc, lambdL, betaL, psiL)
#print('sa compare:',sa,sa2)
#sys.stdout.flush()
se = funcse(d, phiL, inc, lambdL, betaL, psiL)
simplelogL = -1./2 * factor * (pow(abs(sa - sainj), 2) + pow(abs(se - seinj), 2))
#simplelogL = -1./2 * factor * ( (sa - sainj).real**2+(sa-sainj).imag**2 + (se - seinj).real**2+ (se-seinj).imag**2)
return simplelogL
######
def LISA_quarter_rotation_symmetry_transf(s, randoms):
#print("applying quarter rotation")
#sp=s.getSpace()
#ilamb=sp.requireIndex("lambda")
#ipsi=sp.requireIndex("psi") #Takes an extra us to do this step, slows testing
parvals=s.get_params()
nrot=randoms[0]
nrot=int(abs(randoms[0])*2)+1
if(randoms[0]<0):nrot=-nrot;
lamb=parvals[ilamb]
psi=parvals[ipsi]
lamb+=nrot*halfpi;
if(abs(nrot)%2==1):psi+=halfpi;
parvals[ilamb]=lamb
parvals[ipsi]=psi
#print("applied quarter rotation")
return ptmcmc.state(s,parvals);
def source_quarter_rotation_symmetry_transf(s, randoms):
#sp=s.getSpace()
#ilamb=sp.requireIndex("lambda")
#ipsi=sp.requireIndex("psi") #Takes an extra us to do this step, slows testing
param=s.get_params()
nrot=randoms[0]
nrot=int(abs(randoms[0])*2)+1
if(randoms[0]<0):nrot=-nrot;
phi=param[iphi]
psi=param[ipsi]
phi+=nrot*halfpi;
if(abs(nrot)%2==1):psi+=halfpi;
param[iphi]=phi
param[ipsi]=psi
return ptmcmc.state(s,param);
#TDI A/E symmetric (in stationary/low-freq limit) relection through constellation plane, simultaneous with source plane reflection and polarization flip
#Uses 0 random vars
def LISA_plane_reflection_symmetry_transf(s, randoms):
param=s.get_params()
beta=param[ibeta]
psi=param[ipsi]
inc=param[iinc]
inc=PI-inc;
beta=-beta;
psi=PI-psi;
param[iinc]=inc
param[ibeta]=beta
param[ipsi]=psi
return ptmcmc.state(s,param);
def transmit_receive_inc_swap_symmetry_transf(s, randoms):
param=s.get_params()
phi=param[iphi]
inc=param[iinc]
lamb=param[ilamb]
beta=param[ibeta]
psi=param[ipsi]
theta=halfpi-beta
twopsi=2*psi
ti4=math.tan(inc/2)**4;
tt4=math.tan(theta/2)**4;
Phi=math.atan2(math.sin(twopsi)*(ti4-tt4),math.cos(twopsi)*(ti4+tt4))/2;
param[iinc]=theta
param[ibeta]=halfpi-inc
if reverse_phi_sign:
param[iphi]=phi-Phi#sign differs from that in notes
else:
param[iphi]=phi+Phi#sign differs from that in simplelikelihood
param[ilamb]=lamb-Phi
return ptmcmc.state(s,param);
#Approximate distance inclination symmetry
#uses 2 random var
dist_inc_jump_size=0.1;
def dist_inc_scale_symmetry_transf(s, randoms):
#We apply a symmetry to preserve d'*F(x')=d*F(x) where F(x)=1/cos(x)^2
#Depending on the sign of the second random number, x is either the source inclination, or the
#line-of-sight inclination relative to the LISA plane, theta=pi/2-beta;
#To avoid issues at the edges we make sure that the transformation of the inclination
#never crosses its limits.
#Note that f:x->ln(pi/x-1) has range (inf,-inf) on domain (0,pi) with f(pi-x)=-f(x)
#and inverse pi/(exp(f(x))+1)=x
#We then step uniformly in f(x). So, for random number y,
# x'=finv(f(x)+y)
#print(s.show())
param=s.get_params()
use_theta=False;
if(abs(randoms[1]*2)<1): #Half of the time we apply the transformation to theta (LISA includination) rather than source inclination
use_theta=True
oldalt=halfpi-param[ibeta]
else:
oldalt=param[iinc]
dist=param[idist]
df=randoms[0]*dist_inc_jump_size #Uniformly transform reparameterized inc
#print("oldalt=",oldalt)
oldf=math.log(PI/oldalt-1);
newf=oldf+df;
newalt=PI/(math.exp(newf)+1);
cosold=math.cos(oldalt)
cosnew=math.cos(newalt)
fac=cosnew/cosold;
#double fac=(cosnew*cosnew+1)/(cosold*cosold+1);
dist=dist*fac;
param[idist]=dist
if(use_theta):
param[ibeta]=halfpi-newalt #convert back to beta
else:
param[iinc]=newalt
return ptmcmc.state(s,param);
#Approximate distance inclination symmetry jacobian
#uses 1 random var
def dist_inc_scale_symmetry_jac(s, randoms):
#The transformation has the form:
# d' = d F(x)/F(x')
# x' = finv( f(x) + y )
# y' = -y
#where x is the selected inclination variable and y is the random number.
#The Jacobian is then -F(x)f'(x) / (F(x')f'(x'))
#Because the random step is performed on the rescaled inclination f(x)=ln(pi/x-1)
#we have 1/f'(x) = x(1-x/pi)
param=s.get_params()
use_theta=False
if(abs(randoms[1]*2)<1): #Half of the time we apply the transformation to theta (LISA includination) rather than source inclination
use_theta=True
oldalt=halfpi-param[ibeta]
else:
oldalt=param[iinc]
dist=param[idist]
df=randoms[0]*dist_inc_jump_size #Uniformly transform reparameterized inc
oldf=math.log(PI/oldalt-1);
newf=oldf+df;
newalt=PI/(math.exp(newf)+1);
cosold=math.cos(oldalt)
cosnew=math.cos(newalt)
fac=cosnew/cosold;
fac*=(PI-newalt)*newalt/(PI-oldalt)/oldalt;
#if fac<=0:print("fac=",fac)
return abs(fac);
#Exact-in-limit distance-altitude-polarization 2-D symmetry
dist_alt_pol_psi_size=0.5;
dist_alt_pol_w_size=0.5;
lncut=math.log(100)*0
Tlimit=1e8
xxcount=0
xxcut=0
def dist_alt_pol_symmetry_transf(s, randoms):
'''
This transform exercizes the an exact version of the distance-altitude-polarization symmetry
Which exists exactly for signals from a quadrupolar rotatoring source detected by a full
polarization non-accelerating detector that is small compared to the wavelength. It uses
2 random vars.
The symmetry is realized by a random step in two variables in a transformed coordinate system.
'''
param=s.get_params()
#params
iota=param[iinc]
theta=halfpi-param[ibeta]
psi=param[ipsi]
dist=param[idist]
lamb=param[ilamb]
phi=param[iphi]
#intermediates
x=math.tan(theta/2)**4
y=math.tan(iota/2)**4
x2=x*x
y2=y*y
twoc4psi=2*math.cos(4*psi)
ztwoc4psi=x*y*twoc4psi
R=(x2+y2+ztwoc4psi)/(1+x2*y2+ztwoc4psi)
Delta=(1-x2)*(1-y2)/(dist**2*((1+math.sqrt(x))*(1+math.sqrt(y)))**4)
#The notes (currently) apply when R<=1, we extend to R>1 noting that
#R->1/R when z<->w:
#Thus in the case R>1 we need to swap the definitions of z and w, meaning wee change y->1/y in these exprs
ypow=1
if R>1: ypow=-1
z=x*y**ypow
#z2=z*z
if z<1:sgn=-1
else: sgn=1
s2psi=math.sin(2*psi)
c2psi=math.cos(2*psi)
Psip=math.atan2((y-x)*s2psi,(x+y)*c2psi)
Psim=math.atan2((x*y-1)*s2psi,(x*y+1)*c2psi)
#forward reparameterization
w=x/y**ypow
#transform
psit=psi+dist_alt_pol_psi_size*randoms[0]
c4psit=math.cos(4*psit)
wt=sgn*w+dist_alt_pol_w_size*randoms[1]
if wt>0:sgn=1
else:sgn=-1
wt=abs(wt)
#reverse reparameterization and intermediates
T = ( (wt+1/wt)/2 + c4psit*(1-R**ypow) )/R**ypow
if True and T>Tlimit: #limiting case near poles
rootr=math.sqrt(R)
if ypow>0:
if wt>0:
xt=1/rootr
yt=2*T*rootr
else:
xt=2*T*rootr
yt=1/rootr
else:
if wt<0:
xt=rootr/2/T
yt=rootr
else:
xt=rootr
yt=rootr/2/T
zt=xt*yt**ypow
else:
zt = T+sgn*math.sqrt(T*T-1)
xt = math.sqrt(zt*wt)
if xt<wt*1e-60: yt=R #handle pathological case
else: yt = (xt/wt)**ypow
if (1-xt*xt)*(1-yt*yt)/Delta<0:
print('dist',T,math.sqrt(R),(1-xt*xt),(1-yt*yt),Delta,(1-xt*xt)*(1-yt*yt)/Delta,sgn,ypow)
print('old',w,z,x,y)
print('new',wt,zt,xt,yt)
distt = math.sqrt((1-xt*xt)*(1-yt*yt)/Delta) / ((1+math.sqrt(xt))*(1+math.sqrt(yt)))**2
s2psi=math.sin(2*psit)
c2psi=math.cos(2*psit)
Psipt=math.atan2((yt-xt)*s2psi,(xt+yt)*c2psi)
Psimt=math.atan2((xt*yt-1)*s2psi,(xt*yt+1)*c2psi)
#test
#Rt=(xt**2+yt**2+xt*yt*2*c4psit)/(1+xt**2*yt**2+xt*yt*2*c4psit)
#Deltat=(1-xt**2)*(1-yt**2)/(distt**2*((1+math.sqrt(xt))*(1+math.sqrt(yt)))**4)
#print("check: ",x,y,z,w,dist,T,R,Delta)
#print("checkt:",xt,yt,zt,wt,distt,T,Rt,Deltat)
#complete angle params
thetat=math.atan(xt**0.25)*2
iotat=math.atan(yt**0.25)*2
lambt=lamb+0.25*(Psim-Psimt-Psip+Psipt)
global reverse_phi_sign
if reverse_phi_sign:
phit = phi-0.25*(Psim-Psimt+Psip-Psipt)
else:
phit = phi+0.25*(Psim-Psimt+Psip-Psipt)
#hacks
eps=1e-60
#Next line restricts to just cases where xy<<1 or xy>>1
global xxcount,xxcut
xxcount+=1
if abs(math.log(x*y+eps))<lncut or abs(math.log(xt*yt+eps))<lncut:
xxcut+=1
return ptmcmc.state(s,param);
else:
"""
sa=funcsa(dist, phi, iota, lamb, halfpi-theta, psi)
se=funcse(dist, phi, iota, lamb, halfpi-theta, psi)
sat=funcsa(distt, phit, iotat, lambt, halfpi-thetat, psit)
set=funcse(distt, phit, iotat, lambt, halfpi-thetat, psit)
sigp=sa+I*se
sigm=sa-I*se
sigpt=sat+I*set
sigmt=sat-I*set
Rt=(xt**2+yt**2+xt*yt*2*c4psit)/(1+xt**2*yt**2+xt*yt*2*c4psit)
Deltat=(1-xt**2)*(1-yt**2)/(distt**2*((1+math.sqrt(xt))*(1+math.sqrt(yt)))**4)
L= simpleCalculateLogLCAmpPhase(dist, phi, iota, lamb, halfpi-theta, psi);
Lt=simpleCalculateLogLCAmpPhase(distt, phit, iotat, lambt, halfpi-thetat, psit);
if abs(1-Lt/L)>1e-2:
print("check: ",x,y,z,w,dist,Psim,Psip,T,math.sqrt(R),Delta,L)
print("checkt:",xt,yt,zt,wt,distt,Psimt,Psipt,T,math.sqrt(Rt),Deltat,Lt)
print(sa,se,sigp,sigm,abs(sa),abs(se),abs(sigp),abs(sigm),abs(sigm/sigp)**2,16*(abs(sigp)**2-abs(sigm)**2))
print(sat,set,sigpt,sigmt,abs(sat),abs(set),abs(sigpt),abs(sigmt),abs(sigmt/sigpt)**2,16*(abs(sigpt)**2-abs(sigmt)**2))
"""
pass
#if xxcount%2000==0:print("cutfrac=",xxcut/xxcount)
#store new params
param[ipsi]=psit
param[idist]=distt
param[ibeta]=halfpi-thetat
param[iinc]=iotat
param[ilamb]=lambt
param[iphi]=phit
return ptmcmc.state(s,param);
#Exact-in-limit distance-altitude-polarization 2-D symmetry
dist_alt_pol_size=0.1;
def dist_alt_pol_symmetry_jac(s, randoms):
'''
This transform exercizes the an exact version of the distance-altitude-polarization symmetry
Which exists exactly for signals from a quadrupolar rotatoring source detected by a full
polarization non-accelerating detector that is small compared to the wavelength. It uses
2 random vars.
The symmetry is realized by a random step in two variables in a transformed coordinate system.
'''
param=s.get_params()
#params
iota=param[iinc]
theta=halfpi-param[ibeta]
psi=param[ipsi]
dist=param[idist]
#intermediates
x=math.tan(theta/2)**4
y=math.tan(iota/2)**4
x2=x*x
y2=y*y
twoc4psi=2*math.cos(4*psi)
ztwoc4psi=x*y*twoc4psi
R=(x2+y2+ztwoc4psi)/(1+x2*y2+ztwoc4psi)
Delta=(1-x2)*(1-y2)/(dist**2*((1+math.sqrt(x))*(1+math.sqrt(y)))**4)
#The notes (currently) apply when R<=1, we extend to R>1 noting that
#R->1/R when z<->w:
#Thus in the case R>1 we need to swap the definitions of z and w, meaning wee change y->1/y in these exprs
ypow=1
if R>1: ypow=-1
z=x*y**ypow
#z2=z*z
if z<1:sgn=-1
else: sgn=1
s2psi=math.sin(2*psi)
c2psi=math.cos(2*psi)
#forward reparameterization
w=x/y**ypow
#transform
psit=psi+dist_alt_pol_psi_size*randoms[0]*0
c4psit=math.cos(4*psit)
wt=sgn*w+dist_alt_pol_w_size*randoms[1]
if wt>0:sgn=1
else:sgn=-1
wt=abs(wt)
#reverse reparameterization and intermediates
T = ( (wt+1/wt)/2 + c4psit*(1-R**ypow) )/R**ypow
if True and T>Tlimit: #limiting case near poles
rootr=math.sqrt(R)
if ypow>0:
zt=1/(2*T)
if wt>1:
xt=1/rootr
yt=2*T*rootr
else:
xt=2*T*rootr
yt=1/rootr
else:
zt=2*T
if wt<0:
xt=rootr/2/T
yt=rootr
else:
xt=rootr
yt=rootr/2/T
zt=xt*yt**ypow
else:
zt = T+sgn*math.sqrt(T*T-1)
xt = math.sqrt(zt*wt)
if xt<wt*1e-60: yt=R #handle pathological case
else: yt = (xt/wt)**ypow
distt = math.sqrt((1-xt*xt)*(1-yt*yt)/Delta) / ((1+math.sqrt(xt))*(1+math.sqrt(yt)))**2
#test
#Rt=(xt**2+yt**2+xt*yt*2*c4psit)/(1+xt**2*yt**2+xt*yt*2*c4psit)
#Deltat=(1-xt**2)*(1-yt**2)/(distt**2*((1+math.sqrt(xt))*(1+math.sqrt(yt)))**4)
#print("check: ",x,y,z,w,dist,T,R,Delta)
#print("checkt:",xt,yt,xt*yt,wt,distt,T)
#Jacobian
#Ratio of the forward transformation to the directly transformed coords at old point over new point
#theta->x: dx/dtheta ~ x*(x^1/4+x^-1/4)
#iota->y: dy/diota ~ y*(y^1/4+y^-1/4)
#(x,y,d)->(w,R,Delta): ~ w*(1-z2)/(z*d*(1+z2+2zcos4psi))
#combined: w/d*(x^1/4+x^-1/4)*(y^1/4+y^-1/4)*(1-z2)/(1+z2+2zcos4psi)
eps=1e-60
x4th=x**0.25+eps
y4th=y**0.25+eps
xt4th=xt**0.25+eps
yt4th=yt**0.25+eps
zt=zt+eps
jac=w/(dist+eps)*(x4th+1/x4th)*(y4th+1/y4th)*(z-1/z)/(z+1/z+twoc4psi)
jact=wt/(distt+eps)*(xt4th+1/xt4th)*(yt4th+1/yt4th)*(zt-1/zt)/(zt+1/zt+2*c4psit)
#hacks
eps=1e-60
#Next line restricts to just cases where xy<<1 or xy>>1
if abs(math.log(x*y+eps))<lncut or abs(math.log(xt*yt+eps))<lncut:
return 1
return abs(jac/jact)
| StarcoderdataPython |
11260751 | <reponame>danuluma/dannstore
import os
import sys
import unittest
import json
# local imports
LOCALPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, LOCALPATH + '/../../../')
from run import create_app
from app.api.v1.auth import create_admin, clear_users
from app.api.v1.products_view import clear_books
from app.api.v1.sales_view import clear_records
class SalesTest(unittest.TestCase):
""" Tests for api /records endpoints """
def setUp(self):
self.app = create_app("testing")
self.client = self.app.test_client
self.test_user = {"username": "dann", "password": "<PASSWORD>", "role": 2}
self.test_admin = {"username": "owner", "password": "<PASSWORD>"}
self.test_book = {
'id': 1,
'title': "Coming soon",
'description': "LOrem ipsum",
'price': 100,
'quantity': 20,
'minimun': 5,
'image_url': 'coming_soon'
}
def tearDown(self):
clear_records()
clear_users()
clear_books()
def test_get_sales_record_without_admin_rights(self):
"""Tests /sales endpoint. One has to be an admin to access"""
create_admin()
response = self.client().post('/api/v1/login', json=self.test_admin)
json_data = json.loads(response.data)
access_token = json_data.get('access_token')
self.client().post('/api/v1/reg',
headers={"Authorization": "Bearer " + access_token}, json=self.test_user)
response = self.client().post('/api/v1/login', json=self.test_user)
json_data = json.loads(response.data)
access_token = json_data.get('access_token')
response = self.client().get('/api/v1/sales',
headers={"Authorization": "Bearer " + access_token})
json_data = json.loads(response.data)
self.assertTrue(json_data.get('Error'))
self.assertEqual(json_data.get('Error'),
"Only admins are allowed to view all sales records")
self.assertEqual(response.status_code, 403)
def test_get_empty_sales_record(self):
"""Tests /sales endpoint. There are no sales records yet"""
create_admin()
response = self.client().post('/api/v1/login', json=self.test_admin)
json_data = json.loads(response.data)
access_token = json_data.get('access_token')
response = self.client().get('/api/v1/sales',
headers={"Authorization": "Bearer " + access_token})
json_data = json.loads(response.data)
self.assertTrue(json_data.get('Error'))
self.assertEqual(json_data.get('Error'), "There are no sale records")
self.assertEqual(response.status_code, 404)
def test_try_add_a_sales_record_as_admin(self):
"""Tests POST /sales endpoint. Only attendants can access this"""
create_admin()
response = self.client().post('/api/v1/login', json=self.test_admin)
json_data = json.loads(response.data)
access_token = json_data.get('access_token')
self.client().post('/api/v1/products',
headers={"Authorization": "Bearer " + access_token}, json=self.test_book)
response = self.client().post('/api/v1/sales',
headers={"Authorization": "Bearer " + access_token}, json={'book_id': 1})
json_data = json.loads(response.data)
self.assertTrue(json_data.get('Error'))
self.assertEqual(json_data.get('Error'),
"Only store attendants can create sale records")
self.assertEqual(response.status_code, 403)
def test_add_a_sales_record_as_attendant(self):
"""Tests POST /sales endpoint. Only attendants can access this"""
create_admin()
response = self.client().post('/api/v1/login', json=self.test_admin)
json_data = json.loads(response.data)
access_token = json_data.get('access_token')
self.client().post('/api/v1/products',
headers={"Authorization": "Bearer " + access_token}, json=self.test_book)
self.client().post('/api/v1/reg',
headers={"Authorization": "Bearer " + access_token}, json=self.test_user)
response = self.client().post('/api/v1/login', json=self.test_user)
json_data = json.loads(response.data)
access_token = json_data.get('access_token')
response = self.client().post('/api/v1/sales',
headers={"Authorization": "Bearer " + access_token}, json={'book_id': 1})
json_data = json.loads(response.data)
self.assertTrue(json_data.get('message'))
self.assertEqual(json_data.get('message'), "Success! Sale recorded")
self.assertEqual(response.status_code, 201)
def test_get_sales_record(self):
"""Tests /sales endpoint."""
create_admin()
response = self.client().post('/api/v1/login', json=self.test_admin)
json_data = json.loads(response.data)
access_token = json_data.get('access_token')
self.client().post('/api/v1/products',
headers={"Authorization": "Bearer " + access_token}, json=self.test_book)
self.client().post('/api/v1/reg',
headers={"Authorization": "Bearer " + access_token}, json=self.test_user)
response = self.client().post('/api/v1/login', json=self.test_user)
json_data = json.loads(response.data)
access_token = json_data.get('access_token')
self.client().post('/api/v1/sales',
headers={"Authorization": "Bearer " + access_token}, json={'book_id': 1})
response = self.client().post('/api/v1/login', json=self.test_admin)
json_data = json.loads(response.data)
access_token = json_data.get('access_token')
response = self.client().get('/api/v1/sales',
headers={"Authorization": "Bearer " + access_token})
json_data = json.loads(response.data)
self.assertTrue(json_data.get('Sales'))
self.assertEqual(response.status_code, 200)
def test_get_non_existent_sale(self):
"""Tests /sales/<saleId> endpoint. There are no sales records yet"""
create_admin()
response = self.client().post('/api/v1/login', json=self.test_admin)
json_data = json.loads(response.data)
access_token = json_data.get('access_token')
response = self.client().get('/api/v1/sales/0',
headers={"Authorization": "Bearer " + access_token})
json_data = json.loads(response.data)
self.assertTrue(json_data.get('Error'))
self.assertEqual(json_data.get('Error'),
"That sale record does not exist")
self.assertEqual(response.status_code, 404)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
12836733 | <filename>8_2.py
from numpy import zeros, sign
# Define bisection function
def bisection(f,a,b,n):
c = zeros(n)
for i in range(n):
c[i] = (a + b)/2.0
if sign(f(c[i])) == sign(f(a)):
a = c[i]
else:
b = c[i]
return c
# Define function
def f(x):
return -x**2 + 6.0 * x - 5.0
# Execute bisection function
a = -2.0
b = 3.0
n = 7
xb = bisection(f,a,b,n)
# Print results
print("%5s %8s" % ('k','c'))
for k in range(n):
print("%5d %9.4f" % (k+1,xb[k]))
| StarcoderdataPython |
3456326 | # Exercise 1: Create VLANs and Assign IP using SSH
from netmiko import ConnectHandler
from getpass import getpass
# user input
password = <PASSWORD>()
secret = getpass("Enter secret: ")
#Creat a dictionary for a perticular device
CoreSW = {
'device_type': 'cisco_ios',
'ip': '192.168.100.20',
'username': 'admin',
'password': password,
'secret' : secret
}
'''calling the ConnectHandler Library [**iosv_l2 means telling
python to consider the contents of the dictionary as key value pairs
instead of single elements.'''
net_connect = ConnectHandler(**CoreSW)
net_connect.enable()
#Sending a command in to the switch --->
output = net_connect.send_command("show ip int br")
print(output)
#Create a list that includes all the commands that we need to execute
config_commands = ['int vlan 5', 'ip add 5.5.5.1 255.255.255.0']
output = net_connect.send_config_set(config_commands)
print(output)
for n in range (10, 20):
print("Creating VLAN " + str(n))
config_commands = ['vlan ' + str(n), 'name DevOps_VLAN ' + str(n)]
output = net_connect.send_config_set(config_commands)
print(output)
| StarcoderdataPython |
1698759 | <reponame>hhh123123123/ESP32-Webserver<filename>py/Webclient.py
import socket
import gc
class HttpRequest:
RequestHeader_template = '''{0} {1} HTTP/1.1
host: {2}
Content-Type: application/json
cache-control: no-cache
content-length: {3}
'''
# url = 'https://www.example.com/info/sendair'
method = 'POST'
proto = 'http'
host = 'www.example.com'
port = 80
sockinfotmp = () # 暂存域名解析信息,防止域名解析失败后无法建立请求
requestCount = 0
requestSuccess = 0
def __init__(self, config):
self.method = config['method']
self.proto = config['proto']
self.host = config['host']
self.port = config['port']
def send_json(self, data, path):
# You must use getaddrinfo() even for numeric addresses 您必须使用getaddrinfo(),即使是用于数字型地址
# [(2, 1, 0, 'www.example.com', ('12.34.56.78', 80))](family, type, proto, canonname, sockaddr)
try:
self.requestCount += 1
try:
sockinfo = socket.getaddrinfo(self.host, self.port)[0]
self.sockinfotmp = sockinfo
except IndexError:
sockinfo = self.sockinfotmp
s = socket.socket(sockinfo[0], sockinfo[1], sockinfo[2])
RequestHeader = self.RequestHeader_template.format(self.method, path, self.host, len(data))
s.settimeout(10)
s.connect(sockinfo[-1])
# https占用资源巨大慎用
if self.proto == 'https':
import ssl
s = ssl.wrap_socket(s)
s.write(RequestHeader)
s.write(data)
while True:
line = s.readline()
if line == b"" or line == b"\r\n":
break
elif line == b'HTTP/1.1 200 OK\r\n':
self.requestSuccess += 1
# print(line)
s.close()
except Exception as e:
print('WebClient:', e)
finally:
gc.collect()
| StarcoderdataPython |
6638319 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2015-09-17 11:41:18
# @Email: <EMAIL>
# @Last modified by: etrott
# @Last Modified time: 2015-09-17 11:41:37
| StarcoderdataPython |
149854 | <reponame>tzeikob/walle
#!/usr/bin/env python3
# An executable script resolving system data
import argparse
import signal
import json
import time
from common import globals
from util.logger import Router
from resolvers import static
from resolvers import uptime
from resolvers import monitor
from resolvers import network
from listeners import keyboard, mouse
# Parse command line arguments schema
parser = argparse.ArgumentParser(prog='resolver')
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument('--no-debug', dest='debug', action='store_false')
parser.set_defaults(debug=False)
opts = parser.parse_args()
# Initialize logging router
logger = Router('resolver', globals.LOG_FILE_PATH)
if opts.debug:
logger.set_level('DEBUG')
# Terminates the main and child threads
def shutdown (*args):
uptime.stop()
monitor.stop()
network.stop()
keyboard.stop()
mouse.stop()
state['up'] = False
# Attach shutdown handlers
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGTERM, shutdown)
state = {
'up': True
}
# Resolve once the system's static information
static.resolve()
# Start monitoring system uptime, loads and network
uptime.start()
monitor.start()
network.start()
# Start listening for keyboard and mouse events
keyboard.start()
mouse.start()
while state['up']:
data = {}
# Read the static resolver state
data['static'] = static.state['data']
# Read monitoring data
data['uptime'] = uptime.state['data']
data['monitor'] = monitor.state['data']
data['network'] = network.state['data']
# Read keyboard and mouse actions
actions = {}
keyboard_data = keyboard.state['data']
mouse_data = mouse.state['data']
# Calculate keyboard and mouse action rates
actions['strokes'] = keyboard_data['strokes']
actions['clicks'] = mouse_data['clicks']
actions['scrolls'] = mouse_data['scrolls']
actions['moves'] = mouse_data['moves']
actions['total'] = actions['strokes'] + actions['clicks'] + actions['scrolls'] + actions['moves']
actions['strokes_rate'] = 0
actions['clicks_rate'] = 0
actions['scrolls_rate'] = 0
actions['moves_rate'] = 0
if actions['total'] > 0:
actions['strokes_rate'] = actions['strokes'] / actions['total']
actions['clicks_rate'] = actions['clicks'] / actions['total']
actions['scrolls_rate'] = actions['scrolls'] / actions['total']
actions['moves_rate'] = actions['moves'] / actions['total']
data['actions'] = actions
with open(globals.DATA_FILE_PATH, 'w') as data_file:
data_file.write(json.dumps(data))
logger.disk.debug('turning into the next resolve cycle...')
# Wait before start the next cycle
time.sleep(1)
logger.disk.info('shutting down gracefully...') | StarcoderdataPython |
3364163 | # from chatterbot import ChatBot
# from chatterbot.trainers import ListTrainer
# from chatterbot.ext.django_chatterbot import settings
# from chatterbot.trainers import ChatterBotCorpusTrainer
# chatterbot = ChatBot(**settings.CHATTERBOT)
# trainer = ChatterBotCorpusTrainer(chatterbot)
# trainer.train(
# "chatterbot.corpus.english"
# )
| StarcoderdataPython |
1766587 | # Copyright (c) 2019 fortiss GmbH
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import unittest
import os
import matplotlib
from serialization.scenario_set_serializer import ScenarioSetSerializer
from modules.runtime.commons.parameters import ParameterServer
class ScenarioSetSerializerTests(unittest.TestCase):
def test_highway_merging(self):
scenario_param_file =os.path.join("database","scenario_sets", "highway_merging", "test_1.json")
param_server = ParameterServer(filename = scenario_param_file)
param_server["Scenario"]["Generation"]["NumScenarios"] = 1 # set this down to reduce test runtime
scenario_set_serializer = ScenarioSetSerializer(params=param_server)
scenario_set_serializer.dump(os.path.join("database", "scenario_sets", "highway_merging"))
scenario_set_serializer.load()
test_result = scenario_set_serializer.test(num_scenarios=1, num_steps=5, visualize_test=False)
self.assertTrue(test_result)
test_result = scenario_set_serializer.test(num_scenarios=1, num_steps=5, visualize_test=True)
self.assertTrue(test_result)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
315759 | <reponame>kedz/cuttsum<filename>trec2015/cuttsum/l2s/_simple.py
import pyvw
from cuttsum.l2s._base import _SearchBase
import pandas as pd
class SelectBasicNextBias(_SearchBase):
def setup_cache(self):
return None
def basic_cols(self):
return [
"BASIC length", "BASIC char length", "BASIC doc position",
"BASIC all caps ratio", "BASIC upper ratio", "BASIC lower ratio",
"BASIC punc ratio", "BASIC person ratio", "BASIC organization ratio",
"BASIC date ratio", "BASIC time ratio", "BASIC duration ratio",
"BASIC number ratio", "BASIC ordinal ratio", "BASIC percent ratio",
"BASIC money ratio", "BASIC set ratio", "BASIC misc ratio"]
def update_cache(self, pred, sents, df, cache):
return cache
def make_select_example(self, sent, sents, df, cache):
return self.example(lambda: {
"b": [x for x in df.iloc[sent][self.basic_cols()].iteritems()],},
labelType=self.vw.lCostSensitive)
def make_next_example(self, sents, df, cache, is_oracle):
return self.example(lambda: {"n": ["bias"],},
labelType=self.vw.lCostSensitive)
def get_feature_weights(self, dataframes):
ex = self.vw.example(
{"b": self.basic_cols(),
"n": ["bias"],
},
labelType=self.vw.lCostSensitive)
fw = []
for i, feat in enumerate(self.basic_cols()):
w = self.vw.get_weight(ex.feature("b", i))
fw.append(("b:" + feat, w))
fw.append(("n:bias", self.vw.get_weight(ex.feature("n", 0))))
fw.sort(key=lambda x: x[1])
return fw
class SelectBasicNextBiasDocAvg(_SearchBase):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.IS_LDF )
self._with_scores = False
def setup_cache(self):
return pd.DataFrame(columns=self.basic_cols())
def basic_cols(self):
return [
"BASIC length", "BASIC char length", "BASIC doc position",
"BASIC all caps ratio", "BASIC upper ratio", "BASIC lower ratio",
"BASIC punc ratio", "BASIC person ratio", "BASIC organization ratio",
"BASIC date ratio", "BASIC time ratio", "BASIC duration ratio",
"BASIC number ratio", "BASIC ordinal ratio", "BASIC percent ratio",
"BASIC money ratio", "BASIC set ratio", "BASIC misc ratio",
"LM domain avg lp", "LM gw avg lp"]
def update_cache(self, pred, sents, df, cache):
series = df.iloc[pred][self.basic_cols()]
cache = cache.append(series, ignore_index=True)
return cache
def make_select_example(self, sent, sents, df, cache):
if len(cache) > 0:
return self.example(lambda: {
"a": [x for x in df.iloc[sent][self.basic_cols()].iteritems()],
"b": [x for x in df.iloc[sents][self.basic_cols()].mean().iteritems()],
"c": [x for x in cache.mean().iteritems()]
},
labelType=self.vw.lCostSensitive)
else:
return self.example(lambda: {
"a": [x for x in df.iloc[sent][self.basic_cols()].iteritems()],
"b": [x for x in df.iloc[sents][self.basic_cols()].mean().iteritems()],
},
labelType=self.vw.lCostSensitive)
def make_next_example(self, sents, df, cache, is_oracle):
if len(sents) > 0 and len(cache) > 0:
return self.example(lambda: {
"d": ["bias"],
"e": [x for x in df.iloc[sents][
self.basic_cols()].mean().iteritems()],
"f": [x for x in cache.mean().iteritems()]
},
labelType=self.vw.lCostSensitive)
elif len(sents) > 0 and len(cache) == 0:
return self.example(lambda: {
"d": ["bias"],
"e": [x for x in df.iloc[sents][
self.basic_cols()].mean().iteritems()],
},
labelType=self.vw.lCostSensitive)
elif len(sents) == 0 and len(cache) > 0:
return self.example(lambda: {
"d": ["bias"],
"f": [x for x in cache.mean().iteritems()]
},
labelType=self.vw.lCostSensitive)
else:
return self.example(lambda: {
"d": ["bias"],
},
labelType=self.vw.lCostSensitive)
def get_feature_weights(self, dataframes):
ex = self.vw.example(
{"a": self.basic_cols(),
"b": self.basic_cols(),
"c": self.basic_cols(),
"d": ["bias"],
"e": self.basic_cols(),
"f": self.basic_cols(),
},
labelType=self.vw.lCostSensitive)
fw = []
for i, feat in enumerate(self.basic_cols()):
w = self.vw.get_weight(ex.feature("a", i))
fw.append(("a:" + feat, w))
for i, feat in enumerate(self.basic_cols()):
w = self.vw.get_weight(ex.feature("b", i))
fw.append(("b:" + feat, w))
for i, feat in enumerate(self.basic_cols()):
w = self.vw.get_weight(ex.feature("c", i))
fw.append(("c:" + feat, w))
for i, feat in enumerate(self.basic_cols()):
w = self.vw.get_weight(ex.feature("e", i))
fw.append(("e:" + feat, w))
for i, feat in enumerate(self.basic_cols()):
w = self.vw.get_weight(ex.feature("f", i))
fw.append(("f:" + feat, w))
fw.append(("d:bias", self.vw.get_weight(ex.feature("d", 0))))
fw.sort(key=lambda x: x[1])
return fw
| StarcoderdataPython |
12803828 | <gh_stars>1-10
#!/usr/bin/python
#
# Copyright (c) 2018 <NAME>, <<EMAIL>>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_webappslot
version_added: "2.8"
short_description: Manage Azure Web App slot.
description:
- Create, update and delete Azure Web App slot.
options:
resource_group:
description:
- Name of the resource group to which the resource belongs.
required: True
name:
description:
- Unique name of the deployment slot to create or update.
required: True
webapp_name:
description:
- Web app name which this deployment slot belongs to.
required: True
location:
description:
- Resource location. If not set, location from the resource group will be used as default.
configuration_source:
description:
- Source slot to clone configurations from when creating slot. Use webapp's name to refer to the production slot.
auto_swap_slot_name:
description:
- Used to configure target slot name to auto swap, or disable auto swap.
- Set it target slot name to auto swap.
- Set it to False to disable auto slot swap.
swap:
description:
- Swap deployment slots of a web app.
suboptions:
action:
description:
- Swap types.
- preview is to apply target slot settings on source slot first.
- swap is to complete swapping.
- reset is to reset the swap.
choices:
- preview
- swap
- reset
default: preview
target_slot:
description:
- Name of target slot to swap. If set to None, then swap with production slot.
preserve_vnet:
description:
- True to preserve virtual network to the slot during swap. Otherwise False.
type: bool
default: True
frameworks:
description:
- Set of run time framework settings. Each setting is a dictionary.
- See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
suboptions:
name:
description:
- Name of the framework.
- Supported framework list for Windows web app and Linux web app is different.
- For Windows web app, supported names(June 2018) java, net_framework, php, python, node. Multiple framework can be set at same time.
- For Linux web app, supported names(June 2018) java, ruby, php, dotnetcore, node. Only one framework can be set.
- Java framework is mutually exclusive with others.
choices:
- java
- net_framework
- php
- python
- ruby
- dotnetcore
- node
version:
description:
- Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info.
- net_framework supported value sample, 'v4.0' for .NET 4.6 and 'v3.0' for .NET 3.5.
- php supported value sample, 5.5, 5.6, 7.0.
- python supported value sample, e.g., 5.5, 5.6, 7.0.
- node supported value sample, 6.6, 6.9.
- dotnetcore supported value sample, 1.0, 1,1, 1.2.
- ruby supported value sample, 2.3.
- java supported value sample, 1.8, 1.9 for windows web app. 8 for linux web app.
settings:
description:
- List of settings of the framework.
suboptions:
java_container:
description: Name of Java container. This is supported by specific framework C(java) only. e.g. Tomcat, Jetty.
java_container_version:
description:
- Version of Java container. This is supported by specific framework C(java) only.
- For Tomcat, e.g. 8.0, 8.5, 9.0. For Jetty, e.g. 9.1, 9.3.
container_settings:
description: Web app slot container settings.
suboptions:
name:
description: Name of container. eg. "imagename:tag"
registry_server_url:
description: Container registry server url. eg. mydockerregistry.io
registry_server_user:
description: The container registry server user name.
registry_server_password:
description:
- The container registry server password.
startup_file:
description:
- The slot startup file.
- This only applies for linux web app slot.
app_settings:
description:
- Configure web app slot application settings. Suboptions are in key value pair format.
purge_app_settings:
description:
- Purge any existing application settings. Replace slot application settings with app_settings.
type: bool
deployment_source:
description:
- Deployment source for git
suboptions:
url:
description:
- Repository url of deployment source.
branch:
description:
- The branch name of the repository.
app_state:
description:
- Start/Stop/Restart the slot.
type: str
choices:
- started
- stopped
- restarted
default: started
state:
description:
- Assert the state of the Web App deployment slot.
- Use C(present) to create or update a slot and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "<NAME>(@yungezz)"
'''
EXAMPLES = '''
- name: Create a webapp slot
azure_rm_webapp_slot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
configuration_source: myJavaWebApp
app_settings:
testkey: testvalue
- name: swap the slot with production slot
azure_rm_webapp_slot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
swap:
action: swap
- name: stop the slot
azure_rm_webapp_slot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
app_state: stopped
- name: udpate a webapp slot app settings
azure_rm_webapp_slot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
app_settings:
testkey: testvalue2
- name: udpate a webapp slot frameworks
azure_rm_webapp_slot:
resource_group: myResourceGroup
webapp_name: myJavaWebApp
name: stage
frameworks:
- name: "node"
version: "10.1"
'''
RETURN = '''
id:
description: Id of current slot.
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/testapp/slots/stage1
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrest.serialization import Model
from azure.mgmt.web.models import (
site_config, app_service_plan, Site,
AppServicePlan, SkuDescription, NameValuePair
)
except ImportError:
# This is handled in azure_rm_common
pass
swap_spec = dict(
action=dict(
type='str',
choices=[
'preview',
'swap',
'reset'
],
default='preview'
),
target_slot=dict(
type='str'
),
preserve_vnet=dict(
type='bool',
default=True
)
)
container_settings_spec = dict(
name=dict(type='str', required=True),
registry_server_url=dict(type='str'),
registry_server_user=dict(type='str'),
registry_server_password=dict(type='str', no_log=True)
)
deployment_source_spec = dict(
url=dict(type='str'),
branch=dict(type='str')
)
framework_settings_spec = dict(
java_container=dict(type='str', required=True),
java_container_version=dict(type='str', required=True)
)
framework_spec = dict(
name=dict(
type='str',
required=True,
choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']),
version=dict(type='str', required=True),
settings=dict(type='dict', options=framework_settings_spec)
)
def webapp_to_dict(webapp):
return dict(
id=webapp.id,
name=webapp.name,
location=webapp.location,
client_cert_enabled=webapp.client_cert_enabled,
enabled=webapp.enabled,
reserved=webapp.reserved,
client_affinity_enabled=webapp.client_affinity_enabled,
server_farm_id=webapp.server_farm_id,
host_names_disabled=webapp.host_names_disabled,
https_only=webapp.https_only if hasattr(webapp, 'https_only') else None,
skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None,
ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None,
state=webapp.state,
tags=webapp.tags if webapp.tags else None
)
def slot_to_dict(slot):
return dict(
id=slot.id,
resource_group=slot.resource_group,
server_farm_id=slot.server_farm_id,
target_swap_slot=slot.target_swap_slot,
enabled_host_names=slot.enabled_host_names,
slot_swap_status=slot.slot_swap_status,
name=slot.name,
location=slot.location,
enabled=slot.enabled,
reserved=slot.reserved,
host_names_disabled=slot.host_names_disabled,
state=slot.state,
repository_site_name=slot.repository_site_name,
default_host_name=slot.default_host_name,
kind=slot.kind,
site_config=slot.site_config,
tags=slot.tags if slot.tags else None
)
class Actions:
NoAction, CreateOrUpdate, UpdateAppSettings, Delete = range(4)
class AzureRMWebAppSlots(AzureRMModuleBase):
"""Configuration class for an Azure RM Web App slot resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
webapp_name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
configuration_source=dict(
type='str'
),
auto_swap_slot_name=dict(
type='raw'
),
swap=dict(
type='dict',
options=swap_spec
),
frameworks=dict(
type='list',
elements='dict',
options=framework_spec
),
container_settings=dict(
type='dict',
options=container_settings_spec
),
deployment_source=dict(
type='dict',
options=deployment_source_spec
),
startup_file=dict(
type='str'
),
app_settings=dict(
type='dict'
),
purge_app_settings=dict(
type='bool',
default=False
),
app_state=dict(
type='str',
choices=['started', 'stopped', 'restarted'],
default='started'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
mutually_exclusive = [['container_settings', 'frameworks']]
self.resource_group = None
self.name = None
self.webapp_name = None
self.location = None
self.auto_swap_slot_name = None
self.swap = None
self.tags = None
self.startup_file = None
self.configuration_source = None
self.clone = False
# site config, e.g app settings, ssl
self.site_config = dict()
self.app_settings = dict()
self.app_settings_strDic = None
# siteSourceControl
self.deployment_source = dict()
# site, used at level creation, or update.
self.site = None
# property for internal usage, not used for sdk
self.container_settings = None
self.purge_app_settings = False
self.app_state = 'started'
self.results = dict(
changed=False,
id=None,
)
self.state = None
self.to_do = Actions.NoAction
self.frameworks = None
# set site_config value from kwargs
self.site_config_updatable_frameworks = ["net_framework_version",
"java_version",
"php_version",
"python_version",
"linux_fx_version"]
self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java']
self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java']
super(AzureRMWebAppSlots, self).__init__(derived_arg_spec=self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
if key == "scm_type":
self.site_config[key] = kwargs[key]
old_response = None
response = None
to_be_updated = False
# set location
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
# get web app
webapp_response = self.get_webapp()
if not webapp_response:
self.fail("Web app {0} does not exist in resource group {1}.".format(self.webapp_name, self.resource_group))
# get slot
old_response = self.get_slot()
# set is_linux
is_linux = True if webapp_response['reserved'] else False
if self.state == 'present':
if self.frameworks:
# java is mutually exclusive with other frameworks
if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks):
self.fail('Java is mutually exclusive with other frameworks.')
if is_linux:
if len(self.frameworks) != 1:
self.fail('Can specify one framework only for Linux web app.')
if self.frameworks[0]['name'] not in self.supported_linux_frameworks:
self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name']))
self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper()
if self.frameworks[0]['name'] == 'java':
if self.frameworks[0]['version'] != '8':
self.fail("Linux web app only supports java 8.")
if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \
self.frameworks[0]['settings']['java_container'].lower() != 'tomcat':
self.fail("Linux web app only supports tomcat container.")
if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \
self.frameworks[0]['settings']['java_container'].lower() == 'tomcat':
self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8'
else:
self.site_config['linux_fx_version'] = 'JAVA|8-jre8'
else:
for fx in self.frameworks:
if fx.get('name') not in self.supported_windows_frameworks:
self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name')))
else:
self.site_config[fx.get('name') + '_version'] = fx.get('version')
if 'settings' in fx and fx['settings'] is not None:
for key, value in fx['settings'].items():
self.site_config[key] = value
if not self.app_settings:
self.app_settings = dict()
if self.container_settings:
linux_fx_version = 'DOCKER|'
if self.container_settings.get('registry_server_url'):
self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
linux_fx_version += self.container_settings['registry_server_url'] + '/'
linux_fx_version += self.container_settings['name']
self.site_config['linux_fx_version'] = linux_fx_version
if self.container_settings.get('registry_server_user'):
self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user']
if self.container_settings.get('registry_server_password'):
self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password']
# set auto_swap_slot_name
if self.auto_swap_slot_name and isinstance(self.auto_swap_slot_name, str):
self.site_config['auto_swap_slot_name'] = self.auto_swap_slot_name
if self.auto_swap_slot_name is False:
self.site_config['auto_swap_slot_name'] = None
# init site
self.site = Site(location=self.location, site_config=self.site_config)
# check if the slot already present in the webapp
if not old_response:
self.log("Web App slot doesn't exist")
to_be_updated = True
self.to_do = Actions.CreateOrUpdate
self.site.tags = self.tags
# if linux, setup startup_file
if self.startup_file:
self.site_config['app_command_line'] = self.startup_file
# set app setting
if self.app_settings:
app_settings = []
for key in self.app_settings.keys():
app_settings.append(NameValuePair(name=key, value=self.app_settings[key]))
self.site_config['app_settings'] = app_settings
# clone slot
if self.configuration_source:
self.clone = True
else:
# existing slot, do update
self.log("Web App slot already exists")
self.log('Result: {0}'.format(old_response))
update_tags, self.site.tags = self.update_tags(old_response.get('tags', None))
if update_tags:
to_be_updated = True
# check if site_config changed
old_config = self.get_configuration_slot(self.name)
if self.is_site_config_changed(old_config):
to_be_updated = True
self.to_do = Actions.CreateOrUpdate
self.app_settings_strDic = self.list_app_settings_slot(self.name)
# purge existing app_settings:
if self.purge_app_settings:
to_be_updated = True
self.to_do = Actions.UpdateAppSettings
self.app_settings_strDic = dict()
# check if app settings changed
if self.purge_app_settings or self.is_app_settings_changed():
to_be_updated = True
self.to_do = Actions.UpdateAppSettings
if self.app_settings:
for key in self.app_settings.keys():
self.app_settings_strDic[key] = self.app_settings[key]
elif self.state == 'absent':
if old_response:
self.log("Delete Web App slot")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_slot()
self.log('Web App slot deleted')
else:
self.log("Web app slot {0} not exists.".format(self.name))
if to_be_updated:
self.log('Need to Create/Update web app')
self.results['changed'] = True
if self.check_mode:
return self.results
if self.to_do == Actions.CreateOrUpdate:
response = self.create_update_slot()
self.results['id'] = response['id']
if self.clone:
self.clone_slot()
if self.to_do == Actions.UpdateAppSettings:
self.update_app_settings_slot()
slot = None
if response:
slot = response
if old_response:
slot = old_response
if slot:
if (slot['state'] != 'Stopped' and self.app_state == 'stopped') or \
(slot['state'] != 'Running' and self.app_state == 'started') or \
self.app_state == 'restarted':
self.results['changed'] = True
if self.check_mode:
return self.results
self.set_state_slot(self.app_state)
if self.swap:
self.results['changed'] = True
if self.check_mode:
return self.results
self.swap_slot()
return self.results
# compare site config
def is_site_config_changed(self, existing_config):
for fx_version in self.site_config_updatable_frameworks:
if self.site_config.get(fx_version):
if not getattr(existing_config, fx_version) or \
getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper():
return True
if self.auto_swap_slot_name is False and existing_config.auto_swap_slot_name is not None:
return True
elif self.auto_swap_slot_name and self.auto_swap_slot_name != getattr(existing_config, 'auto_swap_slot_name', None):
return True
return False
# comparing existing app setting with input, determine whether it's changed
def is_app_settings_changed(self):
if self.app_settings:
if len(self.app_settings_strDic) != len(self.app_settings):
return True
if self.app_settings_strDic != self.app_settings:
return True
return False
# comparing deployment source with input, determine whether it's changed
def is_deployment_source_changed(self, existing_webapp):
if self.deployment_source:
if self.deployment_source.get('url') \
and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']:
return True
if self.deployment_source.get('branch') \
and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']:
return True
return False
def create_update_slot(self):
'''
Creates or updates Web App slot with the specified configuration.
:return: deserialized Web App instance state dictionary
'''
self.log(
"Creating / Updating the Web App slot {0}".format(self.name))
try:
response = self.web_client.web_apps.create_or_update_slot(resource_group_name=self.resource_group,
slot=self.name,
name=self.webapp_name,
site_envelope=self.site)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Web App slot instance.')
self.fail("Error creating the Web App slot: {0}".format(str(exc)))
return slot_to_dict(response)
def delete_slot(self):
'''
Deletes specified Web App slot in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Web App slot {0}".format(self.name))
try:
response = self.web_client.web_apps.delete_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=self.name)
except CloudError as e:
self.log('Error attempting to delete the Web App slot.')
self.fail(
"Error deleting the Web App slots: {0}".format(str(e)))
return True
def get_webapp(self):
'''
Gets the properties of the specified Web App.
:return: deserialized Web App instance state dictionary
'''
self.log(
"Checking if the Web App instance {0} is present".format(self.webapp_name))
response = None
try:
response = self.web_client.web_apps.get(resource_group_name=self.resource_group,
name=self.webapp_name)
# Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
if response is not None:
self.log("Response : {0}".format(response))
self.log("Web App instance : {0} found".format(response.name))
return webapp_to_dict(response)
except CloudError as ex:
pass
self.log("Didn't find web app {0} in resource group {1}".format(
self.webapp_name, self.resource_group))
return False
def get_slot(self):
'''
Gets the properties of the specified Web App slot.
:return: deserialized Web App slot state dictionary
'''
self.log(
"Checking if the Web App slot {0} is present".format(self.name))
response = None
try:
response = self.web_client.web_apps.get_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=self.name)
# Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
if response is not None:
self.log("Response : {0}".format(response))
self.log("Web App slot: {0} found".format(response.name))
return slot_to_dict(response)
except CloudError as ex:
pass
self.log("Does not find web app slot {0} in resource group {1}".format(self.name, self.resource_group))
return False
def list_app_settings(self):
'''
List webapp application settings
:return: deserialized list response
'''
self.log("List webapp application setting")
try:
response = self.web_client.web_apps.list_application_settings(
resource_group_name=self.resource_group, name=self.webapp_name)
self.log("Response : {0}".format(response))
return response.properties
except CloudError as ex:
self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
def list_app_settings_slot(self, slot_name):
'''
List application settings
:return: deserialized list response
'''
self.log("List application setting")
try:
response = self.web_client.web_apps.list_application_settings_slot(
resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name)
self.log("Response : {0}".format(response))
return response.properties
except CloudError as ex:
self.fail("Failed to list application settings for web app slot {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
def update_app_settings_slot(self, slot_name=None, app_settings=None):
'''
Update application settings
:return: deserialized updating response
'''
self.log("Update application setting")
if slot_name is None:
slot_name = self.name
if app_settings is None:
app_settings = self.app_settings_strDic
try:
response = self.web_client.web_apps.update_application_settings_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=slot_name,
kind=None,
properties=app_settings)
self.log("Response : {0}".format(response))
return response.as_dict()
except CloudError as ex:
self.fail("Failed to update application settings for web app slot {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
return response
def create_or_update_source_control_slot(self):
'''
Update site source control
:return: deserialized updating response
'''
self.log("Update site source control")
if self.deployment_source is None:
return False
self.deployment_source['is_manual_integration'] = False
self.deployment_source['is_mercurial'] = False
try:
response = self.web_client.web_client.create_or_update_source_control_slot(
resource_group_name=self.resource_group,
name=self.webapp_name,
site_source_control=self.deployment_source,
slot=self.name)
self.log("Response : {0}".format(response))
return response.as_dict()
except CloudError as ex:
self.fail("Failed to update site source control for web app slot {0} in resource group {1}: {2}".format(
self.name, self.resource_group, str(ex)))
def get_configuration(self):
'''
Get web app configuration
:return: deserialized web app configuration response
'''
self.log("Get web app configuration")
try:
response = self.web_client.web_apps.get_configuration(
resource_group_name=self.resource_group, name=self.webapp_name)
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
self.fail("Failed to get configuration for web app {0} in resource group {1}: {2}".format(
self.webapp_name, self.resource_group, str(ex)))
def get_configuration_slot(self, slot_name):
'''
Get slot configuration
:return: deserialized slot configuration response
'''
self.log("Get web app slot configuration")
try:
response = self.web_client.web_apps.get_configuration_slot(
resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name)
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
self.fail("Failed to get configuration for web app slot {0} in resource group {1}: {2}".format(
slot_name, self.resource_group, str(ex)))
def update_configuration_slot(self, slot_name=None, site_config=None):
'''
Update slot configuration
:return: deserialized slot configuration response
'''
self.log("Update web app slot configuration")
if slot_name is None:
slot_name = self.name
if site_config is None:
site_config = self.site_config
try:
response = self.web_client.web_apps.update_configuration_slot(
resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name, site_config=site_config)
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
self.fail("Failed to update configuration for web app slot {0} in resource group {1}: {2}".format(
slot_name, self.resource_group, str(ex)))
def set_state_slot(self, appstate):
'''
Start/stop/restart web app slot
:return: deserialized updating response
'''
try:
if appstate == 'started':
response = self.web_client.web_apps.start_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
elif appstate == 'stopped':
response = self.web_client.web_apps.stop_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
elif appstate == 'restarted':
response = self.web_client.web_apps.restart_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
else:
self.fail("Invalid web app slot state {0}".format(appstate))
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
request_id = ex.request_id if ex.request_id else ''
self.fail("Failed to {0} web app slot {1} in resource group {2}, request_id {3} - {4}".format(
appstate, self.name, self.resource_group, request_id, str(ex)))
def swap_slot(self):
'''
Swap slot
:return: deserialized response
'''
self.log("Swap slot")
try:
if self.swap['action'] == 'swap':
if self.swap['target_slot'] is None:
response = self.web_client.web_apps.swap_slot_with_production(resource_group_name=self.resource_group,
name=self.webapp_name,
target_slot=self.name,
preserve_vnet=self.swap['preserve_vnet'])
else:
response = self.web_client.web_apps.swap_slot_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=self.name,
target_slot=self.swap['target_slot'],
preserve_vnet=self.swap['preserve_vnet'])
elif self.swap['action'] == 'preview':
if self.swap['target_slot'] is None:
response = self.web_client.web_apps.apply_slot_config_to_production(resource_group_name=self.resource_group,
name=self.webapp_name,
target_slot=self.name,
preserve_vnet=self.swap['preserve_vnet'])
else:
response = self.web_client.web_apps.apply_slot_configuration_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=self.name,
target_slot=self.swap['target_slot'],
preserve_vnet=self.swap['preserve_vnet'])
elif self.swap['action'] == 'reset':
if self.swap['target_slot'] is None:
response = self.web_client.web_apps.reset_production_slot_config(resource_group_name=self.resource_group,
name=self.webapp_name)
else:
response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=self.swap['target_slot'])
response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group,
name=self.webapp_name,
slot=self.name)
self.log("Response : {0}".format(response))
return response
except CloudError as ex:
self.fail("Failed to swap web app slot {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex)))
def clone_slot(self):
if self.configuration_source:
src_slot = None if self.configuration_source.lower() == self.webapp_name.lower() else self.configuration_source
if src_slot is None:
site_config_clone_from = self.get_configuration()
else:
site_config_clone_from = self.get_configuration_slot(slot_name=src_slot)
self.update_configuration_slot(site_config=site_config_clone_from)
if src_slot is None:
app_setting_clone_from = self.list_app_settings()
else:
app_setting_clone_from = self.list_app_settings_slot(src_slot)
if self.app_settings:
app_setting_clone_from.update(self.app_settings)
self.update_app_settings_slot(app_settings=app_setting_clone_from)
def main():
"""Main execution"""
AzureRMWebAppSlots()
if __name__ == '__main__':
main()
| StarcoderdataPython |
5052927 | <filename>blend.py<gh_stars>0
from glob import *
def blend_subtract(col1, col2):
if not var['grey']['var']:
r = max(min(255, (col1[0]-col2[0])), 0)
g = max(min(255, (col1[1]-col2[1])), 0)
b = max(min(255, (col1[2]-col2[2])), 0)
color = (r, g, b)
else:
c = max(min(255, ((sum(col1)/3)-(sum(col2)/3))), 0)
color = (c, c, c)
return color
def blend_add(col1, col2):
if not var['grey']['var']:
r = max(min(255, (col1[0]+col2[0])), 0)
g = max(min(255, (col1[1]+col2[1])), 0)
b = max(min(255, (col1[2]+col2[2])), 0)
color = (r, g, b)
else:
c = max(min(255, ((sum(col1)/3)+(sum(col2)/3))), 0)
color = (c, c, c)
return color
def blend_combine(col1, col2):
if not var['grey']['var']:
r = max(min(255, ((col1[0]+col2[0])/2)), 0)
g = max(min(255, ((col1[1]+col2[1])/2)), 0)
b = max(min(255, ((col1[2]+col2[2])/2)), 0)
color = (r, g, b)
else:
c = max(min(255, (((sum(col1)/3)+(sum(col2)/3))/2)), 0)
color = (c, c, c)
return color
| StarcoderdataPython |
9778787 | <filename>opensfm/actions/detect_features.py
import logging
from timeit import default_timer as timer
from opensfm import features_processing, io
from opensfm.dataset_base import DataSetBase
logger = logging.getLogger(__name__)
def run_dataset(data: DataSetBase):
"""Compute features for all images."""
start = timer()
features_processing.run_features_processing(data, data.images(), False)
end = timer()
write_report(data, end - start)
def write_report(data: DataSetBase, wall_time: float):
image_reports = []
for image in data.images():
try:
txt = data.load_report("features/{}.json".format(image))
image_reports.append(io.json_loads(txt))
except IOError:
logger.warning("No feature report image {}".format(image))
report = {"wall_time": wall_time, "image_reports": image_reports}
data.save_report(io.json_dumps(report), "features.json")
| StarcoderdataPython |
1808943 | <filename>examples/controller.py
"""
This example:
1. Connects to current controller.
2. Creates a new model.
3. Deploys an application on the new model.
4. Disconnects from the model
5. Destroys the model
"""
import asyncio
import logging
from juju.controller import Controller
from juju import loop
async def main():
controller = Controller()
await controller.connect_current()
model = await controller.add_model(
'my-test-model',
'aws',
'aws-tim',
)
await model.deploy(
'ubuntu-0',
application_name='ubuntu',
series='trusty',
channel='stable',
)
await model.disconnect()
await controller.destroy_model(model.info.uuid)
await controller.disconnect()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
ws_logger = logging.getLogger('websockets.protocol')
ws_logger.setLevel(logging.INFO)
loop.run(main())
| StarcoderdataPython |
287674 | <filename>utils/metric.py
import numpy as np
import math
from skimage.measure import compare_ssim
"""
img1, img2 should be in numpy format with type uint8.
"""
def psnr(img1, img2):
assert (img1.dtype == img2.dtype == np.uint8)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
def ssim(img1, img2):
assert (img1.dtype == img2.dtype == np.uint8)
assert (img1.ndim == img2.ndim == 2)
return compare_ssim(img1,img2)
| StarcoderdataPython |
1902814 | <gh_stars>0
import pytest
from collections import defaultdict
from breaking_changes import collector
# TODO: add a fixture to keep clean up the result every time
@pytest.fixture(autouse=True)
def reset_result():
collector.result = defaultdict(list)
def test_collector_decorator(reset_result):
@collector.collect
def func():
return 100
assert func() == 100
assert dict(collector.result) == {'tests.test_collector.func':
[collector.Trace(ret=100, args=tuple(), kwargs={})]}
| StarcoderdataPython |
8133073 | <reponame>Stanford-PERTS/triton
import sys
import os.path
# Tell python to look in some extra places for module files for easy importing.
subdirs = [
('app',), # /app, python server code
('lib',), # /lib, python libraries
('gae_server',),
# include subdirectories, e.g. dir1/dir2, like this:
#('dir1', 'dir2')
]
for path_parts in subdirs:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), *path_parts))
| StarcoderdataPython |
4907492 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 20:43:18 2019
@author: blose
"""
#%%
import numpy as np
from tqdm import tqdm
import time
def read_data(filename):
lines = open(filename).read().split('\n')
data = []
for line in lines[:-1]:
data.append(line.split(', '))
data = np.array(data, dtype='object')
return data
def inner_prod(x, w):
return w[0] + sum([i*j for i,j in zip(x,w[1:])])
def predict(x,w):
return 1 if inner_prod(x, w) > 0 else -1
def train_perceptron(X_train, y_train, X_dev, y_dev, epochs):
m, n = X_train.shape
w = np.array([0 for i in range(n+1)])
for epoch in range(epochs):
updates = 0
for i in range(m):
pred = inner_prod(X_train[i], w)
if y_train[i]*pred <= 0:
updates += 1
w[0] = w[0] + y_train[i]
w[1:] = w[1:] + y_train[i]*X_train[i]
y_pred = np.zeros(X_dev.shape[0])
for i in range(X_dev.shape[0]):
y_pred[i] = predict(X_dev[i], w)
print('epoch', epoch, 'updates', updates, \
'('+str(np.round(updates/m*100,2))+'%)', 'dev_err',
np.round(np.mean(y_pred != y_dev)*100,2), '(+:'+str(np.round(100*(y_pred > 0).mean(),2))+'%)')
return w
def train_perceptron_average(X_train, y_train, X_dev, y_dev, epochs):
m, n = X_train.shape
w = np.array([0 for i in range(n+1)])
ws = np.array([0 for i in range(n+1)])
for epoch in range(epochs):
updates = 0
for i in range(m):
pred = inner_prod(X_train[i], w)
if y_train[i]*pred <= 0:
updates += 1
w[0] = w[0] + y_train[i]
w[1:] = w[1:] + y_train[i]*X_train[i]
ws = ws + w
y_pred = np.zeros(X_dev.shape[0])
for i in range(X_dev.shape[0]):
y_pred[i] = predict(X_dev[i], ws)
print('epoch', epoch, 'updates', updates, \
'('+str(np.round(updates/m*100,2))+'%)', 'dev_err',
np.round(np.mean(y_pred != y_dev)*100,2), '(+:'+str(np.round(100*(y_pred > 0).mean(),2))+'%)')
return ws
def knn(X_train, y_train, X_test, n_neighbors = 3, metric='euclidian'):
y_pred = []
if metric =='euclidian':
dist = lambda A,b: np.sqrt(((A - b)**2).sum(axis=1))
elif metric =='manhatan':
dist = lambda A,b: np.abs(A - b).sum(axis=1)
for row in tqdm(range(X_test.shape[0])):
dists = dist(X_train, X_test[row,:])
indx = np.argsort(dists)
most = y_train[indx[:n_neighbors]]
target0 = (most == 0).sum()
target1 = (most == 1).sum()
if target0 >= target1:
y_pred.append(0)
else:
y_pred.append(1)
return np.array(y_pred)
#%%
train = read_data('hw1-data/income.train.txt.5k')
dev = read_data('hw1-data/income.dev.txt')
mapping = {}
encoded = []
k = 0
for col in range(train.shape[1]):
items = np.unique(train[:,col])
thiscol = np.zeros((train.shape[0], items.shape[0]))
for i, item in enumerate(items):
mapping[k] = (item, col)
k += 1
thiscol[train[:,col] == item, i] = 1
encoded.append(thiscol)
encoded = np.concatenate(encoded, axis=1)
X_train = encoded[:, :-2]
y_train = (-1)**encoded[:, -2]
dev_encoded = np.zeros((dev.shape[0], encoded.shape[1]))
for key, val in mapping.items():
for i in range(dev.shape[1]):
dev_encoded[dev[:,i] == val[0], key] = 1
X_dev = dev_encoded[:, :-2]
y_dev = (-1)**dev_encoded[:, -2]
w = train_perceptron(X_train, y_train, X_dev, y_dev, 5)
ws = train_perceptron_average(X_train, y_train, X_dev, y_dev, 5)
indx = np.argsort(ws[1:])
for i in indx[:5]:
print(ws[i+1], mapping[i])
indx = np.argsort(ws[1:])
for i in indx[-5:]:
print(ws[i+1], mapping[i])
print('Bias:', ws[0])
#3.2
start = time.time()
y_pred = y_pred = knn(X_train, y_train, X_dev, k)
print('KNN Runtime:', time.time()-start)
start = time.time()
ws= train_perceptron_average(X_train, y_train, X_dev, y_dev, 5)
print('Perceptron Runtime:', time.time()-start)
# 4.1
sorted_index = np.argsort(-y_train)
w = train_perceptron(X_train[sorted_index], y_train[sorted_index], X_dev, y_dev, 5)
ws = train_perceptron_average(X_train[sorted_index], y_train[sorted_index], X_dev, y_dev, 5)
# 4.2 (a)
X_train2 = np.concatenate((X_train, train[:,[0,7]].astype(int)), axis=1)
X_dev2 = np.concatenate((X_dev, dev[:,[0,7]].astype(int)), axis=1)
ws = train_perceptron_average(X_train2, y_train, X_dev2, y_dev, 5)
# 4.2 (b)
num = train[:,[0,7]].astype(int)
num = num - num.mean(axis=0)
X_train3 = np.concatenate((X_train, num), axis=1)
num = dev[:,[0,7]].astype(int)
num = num - num.mean(axis=0)
X_dev3 = np.concatenate((X_dev, num), axis=1)
ws = train_perceptron_average(X_train3, y_train, X_dev3, y_dev, 5)
# 4.2 (c)
num = train[:,[0,7]].astype(int)
num = (num - num.mean(axis=0))/num.std(axis=0)
X_train4 = np.concatenate((X_train, num), axis=1)
num = dev[:,[0,7]].astype(int)
num = (num - num.mean(axis=0))/num.std(axis=0)
X_dev4 = np.concatenate((X_dev, num), axis=1)
ws = train_perceptron_average(X_train4, y_train, X_dev4, y_dev, 5)
# 4.2 (d)
combs_train = np.zeros((train.shape[0], 2*5))
combs_dev = np.zeros((dev.shape[0], 2*5))
k = 0
for sex in np.unique(train[:,6]):
for race in np.unique(train[:,5]):
combs_train[(train[:,6] == sex) & (train[:,5] == race), k] = 1
combs_dev[(dev[:,6] == sex) & (dev[:,5] == race), k] = 1
k += 1
X_train5 = np.concatenate((X_train, combs_train), axis=1)
X_dev5 = np.concatenate((X_dev, combs_dev), axis=1)
ws = train_perceptron_average(X_train5, y_train, X_dev5, y_dev, 5)
test = read_data('hw1-data/income.test.blind')
X_test = np.zeros((test.shape[0], X_train.shape[1]))
for key, val in list(mapping.items())[:-2]:
X_test[test[:,val[1]] == val[0], key] = 1
ws = train_perceptron_average(X_train4, y_train, X_dev4, y_dev, 1)
y_test = []
for i in range(X_test.shape[0]):
y_test.append(predict(X_test[i], ws))
y_test = np.array(y_test)
print('Test set positive rate:', (y_test > 0).mean())
target = np.array(['<=50K' if x == -1 else '>50K' for x in y_test])
test = np.concatenate((test, target.reshape(-1,1)), axis=1)
with open('income.test.predicted', 'w') as myfile:
for i in range(test.shape[0]):
myfile.write(', '.join(test[i,:])+'\n') | StarcoderdataPython |
5101080 | # -*- coding: utf-8 -*-
import sys, getopt
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
import re
import nltk
import regex
from nltk.corpus import stopwords
import json
import cPickle as pickle
from sklearn.feature_extraction.text import CountVectorizer
#from pprint import pprint
from math import *
from gensim.models import Word2Vec
import string
from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition import PCA
import nltk
from nltk.corpus import brown
import re
import regex
import operator
import json
import cPickle as pickle
import sys, getopt
import numpy as np
from nltk.corpus import stopwords
import string
import time
nltk.data.path.append("/media/nltk")
brown_train = brown.tagged_sents(categories='news')
regexp_tagger = nltk.RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'),
(r'(-|:|;)$', ':'),
(r'\'*$', 'MD'),
(r'(The|the|A|a|An|an)$', 'AT'),
(r'.*able$', 'JJ'),
(r'^[A-Z].*$', 'NNP'),
(r'.*ness$', 'NN'),
(r'.*ly$', 'RB'),
(r'.*s$', 'NNS'),
(r'.*ing$', 'VBG'),
(r'.*ed$', 'VBD'),
(r'.*', 'NN')
])
unigram_tagger = nltk.UnigramTagger(brown_train, backoff=regexp_tagger)
bigram_tagger = nltk.BigramTagger(brown_train, backoff=unigram_tagger)
cfg = {}
cfg["NNP+NNP"] = "NNP"
cfg["NN+NN"] = "NNI"
cfg["NNI+NN"] = "NNI"
cfg["JJ+JJ"] = "JJ"
cfg["JJ+NN"] = "NNI"
def clean(raw_comment):
'''
Function to convert a raw comment to a string of words
The input is a single string and
the output is a single string
This function filters out non-Latin characters,
HTML formatting, stop words from English language,
'''
if raw_comment=="deleted": return ""
else:
# remove non-ascii characters
raw_comment = filter(lambda x: x in string.printable, raw_comment)
# remove urls
raw_comment = re.sub(r"http\S+", "", raw_comment) #re.sub(r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''', '', raw_comment, flags=re.MULTILINE)
# Remove non-letters
latin_only = regex.sub(ur'[^\p{Latin}]', u' ', raw_comment)
letters_only = re.sub(r'\s+', ' ', latin_only)
# letters_only = re.sub("[^a-zA-Z]", " ", raw_comment)
# Convert to lower case, split into individual words
words = letters_only.lower().split()
# In Python, searching a set is much faster than searching
# a list, so convert the stop words to a set
stops = set(stopwords.words("english"))
# Remove stop words
meaningful_words = [w for w in words if not w in stops]
# Join the words back into one string separated by space,
# and return the result.
return ( " ".join( meaningful_words ))
def create_ts(input_file):
'''
Creates a training set from input file
'''
print "Reading json file..."
dicts = []
f = open(input_file)
count = 0
for line in iter(f):
if count%100000==0:
print "line #",count
dicts.append(json.loads(line))
count = count + 1
print "loading json dicts..."
train = json.loads(json.dumps(dicts))
f.close()
print ('Size of the dataset was {:d} samples'.format(len(train)))
return train
class NPExtractor(object):
def __init__(self, sentence):
self.sentence = sentence
# Split the sentence into singlw words/tokens
def tokenize_sentence(self, sentence):
tokens = nltk.word_tokenize(sentence)
return tokens
# Normalize brown corpus' tags ("NN", "NN-PL", "NNS" > "NN")
def normalize_tags(self, tagged):
n_tagged = []
for t in tagged:
if t[1] == "NP-TL" or t[1] == "NP":
n_tagged.append((t[0], "NNP"))
continue
if t[1].endswith("-TL"):
n_tagged.append((t[0], t[1][:-3]))
continue
if t[1].endswith("S"):
n_tagged.append((t[0], t[1][:-1]))
continue
n_tagged.append((t[0], t[1]))
return n_tagged
# Extract the main topics from the sentence
def extract(self):
tokens = self.tokenize_sentence(self.sentence)
tags = self.normalize_tags(bigram_tagger.tag(tokens))
merge = True
while merge:
merge = False
for x in range(0, len(tags) - 1):
t1 = tags[x]
t2 = tags[x + 1]
key = "%s+%s" % (t1[1], t2[1])
value = cfg.get(key, '')
if value:
merge = True
tags.pop(x)
tags.pop(x)
match = "%s %s" % (t1[0], t2[0])
pos = value
tags.insert(x, (match, pos))
break
matches = []
for t in tags:
if t[1] == "NNP" or t[1] == "NNI":
#if t[1] == "NNP" or t[1] == "NNI" or t[1] == "NN":
matches.append(t[0])
return matches
input_file = "xaa"# pickle.loads(channel.receive())
# info_channel = channel.receive()
train = create_ts("/media/jan2015/tmp/" + input_file)
comments = []
clean_comments = []
subreddits = []
# load the list of top subreddits
# subreddits = [k for k,v in pickle.load(open("/media/fin/pickled_top_subreddits","r+"))]
for entry in train:
if entry["subreddit"] not in subreddits:
subreddits.append(entry["subreddit"])
# dictionary with key=subreddit and value = list of clean comment
collection = {}
count = 0
for subreddit in subreddits:
collection[subreddit] = list()
for entry in train:
if entry["subreddit"] == subreddit:
count += 1
collection[subreddit].append(clean(entry["body"]))
# in topics: key=subreddit, value = string of topics (verb,adjective pairs)
topics = {}
for (subreddit,comments) in collection.iteritems():
tmp = []
for sentence in comments:
np_extractor = NPExtractor(sentence)
result = np_extractor.extract()
tmp.extend(result)
topics[subreddit] = tmp
# changed collection: key=subreddit, value=dictionary of words and their frequencies
result = []
for (subreddit,topic) in topics.iteritems():
# print topic
count = {}
tmp = []
for word in [r.split() for r in topic]:
tmp.extend(word)
for word in tmp:
count[word] = tmp.count(word)
collection[subreddit] = count
if __name__ == '__channelexec__':
# fileid = pickle.loads(channel.receive())
# tagger = pickle.loads(channel.receive())
# corpus_name = channel.receive()
# corpus = getattr(nltk.corpus, corpus_name)
while 1:
fileid = channel.receive()
new_channel = channel.receive()
input_file = "/media/tiny/" + pickle.loads(fileid)
# train = create_ts(input_file)
# comments = []
# clean_comments = []
# for entry in train:
# comments.append(entry["body"])
# #channel.send(len(comments))
# for comment in comments:
# clean_comments.append(clean(comment))
# voc = fit_data().get_feature_names()
# channel.send(pickle.dumps(voc))
# voc = pickle.loads(new_channel.receive())
# new_voc = {k for k,v in pickle.loads(new_voc).iteritems()}
new_channel.send(new_channel.receive())
| StarcoderdataPython |
3433245 | <filename>cogs/helper_files/crossword_cog_helper.py<gh_stars>0
import datetime
from firebase_config import ibaelia_db
from dateutil.parser import parse
import discord
def get_scores_by_id(user_id, guild, time, limit=7):
dates = get_past_num_days(time, limit)[::-1]
final_scores = {key:None for key in dates}
all_scores = ibaelia_db.child("scores").order_by_child("time").get().val()
if all_scores is None:
return final_scores
list_scores = [score for score in list(all_scores.values()) if score["user_id"] == user_id][::-1][:limit]
# Check if a score's date is in the date array
# If so, add score to score list
# If not, then add None to score list
count = 0
for score_idx in range(count, len(list_scores)):
for date in dates:
if list_scores[score_idx]["time"].split(" ")[0] == str(date):
final_scores[date] = list_scores[score_idx]
count += 1
break
return final_scores
def get_scores_by_time(time, guild):
final_scores = []
all_scores = ibaelia_db.child("scores").order_by_child("score").get().val()
if all_scores is None:
return []
list_scores = [score for score in list(all_scores.values()) if score['time'].split(" ")[0] == time]
for score in list_scores:
user = ibaelia_db.child("users").order_by_child("id").equal_to(score["user_id"]).get().val()
user_vals = list(user.values())[0]
if guild in user_vals['guilds']:
final_scores.append(score)
return final_scores
def check_date(time):
correctDate = None
try:
year, month, day = map(int, time.split("-"))
newDate = datetime.datetime(year, month, day)
correctDate = True
except ValueError:
correctDate = False
return correctDate
def get_past_num_days(time, num):
today = parse(time)
week_array = [(today + datetime.timedelta(days=i)).date() for i in range(1-num, 1)]
return week_array
def push_score(user_id, username, score, time, guild):
# get users
# check time
# if user not in users list, create new user
# add to scores
ids = add_user_to_database(user_id, username, guild)
add_to_server(user_id, guild, ids)
valid, prev_score = is_valid_score(user_id, username, score, time, guild)
if valid:
new_score = {
"user_id": user_id,
"name": username,
"score": score,
"time": time,
"guild": guild
}
ibaelia_db.child("scores").push(new_score)
return [valid, prev_score]
def add_user_to_database(user_id, username, guild):
all_users = ibaelia_db.child("users").get()
all_users_vals = all_users.val()
ids = []
if all_users_vals:
for user in all_users.each():
ids.append(user.val()['id'])
if user_id not in ids:
new_user = {
"id": user_id,
"name": username,
"guilds": [guild]
}
ibaelia_db.child("users").push(new_user)
return ids
def add_to_server(user_id, guild, ids):
if user_id in ids:
curr_user = ibaelia_db.child("users").order_by_child("id").equal_to(user_id).get()
curr_user_vals = list(curr_user.val().values())[0]
curr_user_key = curr_user.each()[0].key()
if guild not in curr_user_vals['guilds']:
guilds = curr_user_vals['guilds']
guilds.append(guild)
ibaelia_db.child("users").child(curr_user_key).update({'guilds': guilds})
def is_valid_score(user_id, username, score, time, guild):
all_scores = ibaelia_db.child("scores").order_by_child("time").get().val()
if all_scores is None:
return [True, None]
list_scores = [score for score in list(all_scores.values()) if score["user_id"] == user_id]
for score in list_scores:
if score['time'].split(" ")[0] == time.split(" ")[0]:
return [False, score['score']]
return [True, None]
def format_scoreboard_embed(embed, scores):
files = []
digits = {4: "four", 5: "five", 6: "six", 7: "seven", 8: "eight", 9: "nine"}
# I don't know if creating the discord.File object is necessary to get the icon url?
crossword_icon = "crossword_images/crossword_icon.jpg"
file = discord.File(crossword_icon, filename="crossword_icon.jpg")
files.append(file)
embed = embed.set_thumbnail(url="attachment://crossword_icon.jpg")
# The first field will be the current leader shout out
current_leader_name = "-------------------:small_blue_diamond: CURRENT LEADER :small_blue_diamond:-------------------"
current_leader_value = "\n"
if len(scores) != 0:
leader = scores[0]["name"].split("#")[0]
leader_fullname = scores[0]["name"]
else:
leader = "no_one"
leader_fullname = "no_<PASSWORD>#rip"
# Compute the number of tabs to offset the name by based on name length
name_length = len(leader)
num_tabs = round((38 - (name_length + 2)) / 4.)
current_leader_value += "```ini\n>" + " " * num_tabs
current_leader_value += f"[{leader}]" + " " * num_tabs + "<```"
# Add bottom border
current_leader_value += "\n" + ("=" * 41)
embed.add_field(name=current_leader_name, value=current_leader_value, inline=False)
# Add blank field for spacing
embed.add_field(name="\u200b", value="\u200b", inline=False)
# First place gets a special trophy as well
if len(scores) != 0:
time = f"```fix\n{scores[0]['score']}```\n"
else:
time = "```fix\ninfinity```\n"
embed.add_field(name=f":first_place: \u2003{leader_fullname}", value=time,
inline=True)
embed.add_field(name=":trophy:", value="\u200b", inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
for idx in range(len(scores)):
if idx == 0:
# Already did first place
continue
elif idx == 1:
placement = ":second_place:"
time = f"```python\n{scores[idx]['score']}```\n"
elif idx == 2:
placement = ":third_place:"
time = f"```python\n{scores[idx]['score']}```\n"
elif idx < 9:
placement = f":{digits[idx + 1]}:"
time = f"```{scores[idx]['score']}```\n"
else:
placement = f"{idx + 1}."
time = f"```{scores[idx]['score']}```\n"
# We use inline to shorten the code block width
embed.add_field(name=f"{placement} \u2003{scores[idx]['name']}", value=time,
inline=True)
# Add blank field for other columns
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.timestamp = datetime.datetime.now()
embed = embed.set_footer(text=f"uwu")
# Footer image
crossword_icon = "ibaelia_images/wow_irelia.jpg"
file = discord.File(crossword_icon, filename="wow_irelia.jpg")
files.append(file)
embed = embed.set_image(url="attachment://wow_irelia.jpg")
return files, embed
| StarcoderdataPython |
316192 | from argparse import ArgumentParser
from snapchat_bots import SnapchatBot, Snap
import random
class RandoBot(SnapchatBot):
def initialize(self):
self.connections = self.get_friends()
#If your bot ever gets blocked, uncomment these lines.
#Of course, make sure you have your old users backed up
#to the users.txt file! So you must uncomment the first
#three lines, while logged into the blocked bot, then
#uncomment the rest to re-add all users from the old bot.
#with open('users.txt', 'w') as file:
# for item in self.connections:
# print>>file, item
#f = open('users.txt', 'r')
#for line in f:
# self.add_friend(line)
# print(line)
print(self.connections)
def connect(self,user):
self.log("Added user: %s to the array!" % (user))
self.connections.append(user)
def on_friend_add(self,friend):
self.add_friend(friend)
self.connect(friend)
def on_friend_delete(self,friend):
self.delete_friend(friend)
self.connections.remove(friend)
def find_random_user(self,username):
if len(self.connections) <= 1:
return None
newuser = random.choice(self.connections)
while(newuser == username):
newuser = random.choice(self.connections)
return newuser
def on_snap(self,sender,snap):
connection = self.find_random_user(sender)
if sender not in self.connections:
self.send_snap([sender], Snap.from_file("../resources/rando_addme.png"))
if connection:
self.send_snap([connection],snap)
print("%s sent snap to %s" % (sender,[connection]))
else:
self.send_snap([sender], Snap.from_file("../resources/rando_welcome.png"))
if __name__ == '__main__':
parser = ArgumentParser("RandoBot Bot")
parser.add_argument('-u', '--username', required=True, type=str, help="Username of the account to run the bot on")
parser.add_argument('-p', '--password', required=True, type=str, help="Password of the account to run the bot on")
args = parser.parse_args()
bot = RandoBot(args.username, args.password)
bot.listen(timeout=33)
| StarcoderdataPython |
9716458 | import typing
import torch
from .base import *
from .prim import *
from .aten import *
from .quantized import *
OPERATOR_CONVERTER_DICT: typing.Dict[str, OperatorConverter] = {
"prim::Constant": PrimConstantConverter,
"prim::TupleConstruct": PrimTupleConstructConverter,
"prim::ListConstruct": PrimListConstructConverter,
"prim::ListUnpack": PrimListUnpackConverter,
"prim::GetAttr": PrimGetAttrConverter,
"prim::ConstantChunk": PrimConstantChunkConverter,
"prim::NumToTensor": PrimNumToTensorConverter,
"prim::If": PrimIfConverter,
"aten::__getitem__": PrimGetItemConverter,
# aten
"aten::t": ATenTOperator,
"aten::view": ATenViewOperator,
"aten::reshape": ATenReshapeOperator,
"aten::relu": ATenReluOperator,
"aten::relu_": ATenReluOperator,
"aten::relu6": ATenRelu6Operator,
"aten::relu6_": ATenRelu6Operator,
"aten::prelu": ATenPreluOperator,
"aten::leaky_relu": ATenLeakyReluOperator,
"aten::leaky_relu_": ATenLeakyReluOperator,
"aten::elu": ATenEluOperator,
"aten::elu_": ATenEluOperator,
"aten::conv2d": ATenConv2dOperator,
"aten::_convolution": ATenConvolutionOperator,
"aten::batch_norm": ATenBatchNormOperator,
"aten::avg_pool2d": ATenAvgPool2dOperator,
"aten::max_pool2d": ATenMaxPool2dOperator,
"aten::adaptive_avg_pool2d": ATenAdaptiveAvgPool2dOperator,
"aten::mean": ATenMeanOperator,
"aten::softmax": ATenSoftmaxOperator,
"aten::log_softmax": ATenLogSoftmaxOperator,
"aten::addmm": ATenAddmmOperator,
"aten::dropout": ATenDropoutOperator,
"aten::dropout_": ATenDropoutOperator,
"aten::contiguous": ATenContiguousOperator,
"aten::permute": ATenPermuteOperator,
"aten::sin": ATenSinOperator,
"aten::cos": ATenCosOperator,
"aten::tanh": ATenTanhOperator,
"aten::pow": ATenPowOperator,
"aten::sqrt": ATenSqrtOperator,
"aten::sigmoid": ATenSigmoidOperator,
"aten::sigmoid_": ATenSigmoidOperator,
"aten::add": ATenAddOperator,
"aten::add_": ATenAddOperator,
"aten::sub": ATenSubOperator,
"aten::sub_": ATenSubOperator,
"aten::mul": ATenMulOperator,
"aten::mul_": ATenMulOperator,
"aten::div": ATenDivOperator,
"aten::div_": ATenDivOperator,
"aten::reciprocal": ATenReciprocalOperator,
"aten::reciprocal_": ATenReciprocalOperator,
"aten::rsqrt": ATenRsqrtOperator,
"aten::rsqrt_": ATenRsqrtOperator,
"aten::atan2": ATenAtan2Operator,
"aten::constant_pad_nd": ATenConstantPadNdOperator,
"aten::reflection_pad1d": ATenReflectionPad1dOperator,
"aten::reflection_pad2d": ATenReflectionPad2dOperator,
"aten::select": ATenSelectOperator,
"aten::unsqueeze": ATenUnsqueezeOperator,
"aten::squeeze": ATenSqueezeOperator,
"aten::slice": ATenSliceOperator,
"aten::stack": ATenStackOperator,
"aten::cat": ATenCatOperator,
"aten::chunk": ATenChunkOperator,
"aten::embedding": ATenEmbeddingOperator,
"aten::linear": ATenLinearOperator,
"aten::lstm": ATenLstmOperator,
"aten::transpose": ATenTransposeOperator,
"aten::hardtanh": ATenHardtanhOperator,
"aten::hardtanh_": ATenHardtanhOperator,
"aten::flip": ATenFlipOperator,
"aten::floor": ATenFloorOperator,
"aten::floor_divide": ATenFloorDivideOperator,
"aten::leaky_relu": ATenLeakyReluOperator,
"aten::matmul": ATenMatmulOperator,
"aten::mm": ATenMmOperator,
"aten::flatten": ATenFlattenOperator,
"aten::upsample_bilinear2d": ATenUpsampleBilinear2dOperator,
"aten::upsample_nearest2d": ATenUpsampleNearest2dOperator,
"aten::clamp": ATenClampOperator,
"aten::exp": ATenExpOperator,
"aten::log": ATenLogOperator,
"aten::to": ATenToOperator,
"aten::ne": ATenNeOperator,
"aten::softplus": ATenSoftplusOperator,
"aten::layer_norm": ATenLayerNormOperator,
"aten::instance_norm": ATenInstanceNormOperator,
"aten::index": ATenIndexOperator,
"aten::clone": ATenCloneOperator,
"aten::repeat": ATenRepeatOperator,
"aten::hardswish": ATenHardswishOperator,
"aten::hardswish_": ATenHardswishOperator,
"aten::hardsigmoid": ATenHardsigmoidOperator,
"aten::hardsigmoid_": ATenHardsigmoidOperator,
"aten::silu": ATenSiluOperator,
"aten::silu_": ATenSiluOperator,
"aten::std": ATenStdOperator,
"aten::var": ATenVarOperator,
"aten::split": ATenSplitOperator,
"aten::split_with_sizes": ATenSplitWithSizesOperator,
"aten::pixel_shuffle": ATenPixelShuffleOperator,
"aten::pixel_unshuffle": ATenPixelUnshuffleOperator,
"aten::argmax": ATenArgmaxOperator,
"aten::argmin": ATenArgminOperator,
"aten::expand": ATenExpandOperator,
"aten::gather": ATenGatherOperator,
"aten::gelu": ATenGeluOperator,
"aten::gelu_": ATenGeluOperator,
"aten::copy_": ATenCopyOperator,
"aten::bmm": ATenBmmOperator,
"aten::eq": ATenEqOperator,
"aten::neg": ATenNegOperator,
"aten::bitwise_not": ATenBitwiseNotOperator,
"aten::bitwise_and": ATenBitwiseAndOperator,
"aten::bitwise_or": ATenBitwiseOrOperator,
"aten::__and__": ATenAndOperator,
"aten::__or__": ATenOrOperator,
"aten::sum": ATenSumOperator,
"aten::prod": ATenProdOperator,
"aten::min": ATenMinOperator,
"aten::max": ATenMaxOperator,
"aten::amin": ATenAminOperator,
"aten::amax": ATenAmaxOperator,
"aten::glu": ATenGluOperator,
"aten::glu_": ATenGluOperator,
"aten::masked_fill": ATenMaskedFillOperator,
"aten::masked_fill_": ATenMaskedFillOperator,
"aten::gt": ATenGtOperator,
"aten::lt": ATenLtOperator,
"aten::ge": ATenGeOperator,
"aten::le": ATenLeOperator,
"aten::remainder": ATenRemainderOperator,
"aten::where": ATenWhereOperator,
"aten::type_as": ATenTypeAsOperator,
"aten::topk": ATenTopkOperator,
"aten::cumsum": ATenCumsumOperator,
# quantized
"aten::quantize_per_tensor": ATenQuantizePerTensorOperator,
"aten::fake_quantize_per_tensor_affine": ATenFakeQuantizePerTensorAffineOperator,
"aten::fake_quantize_per_channel_affine": ATenFakeQuantizePerChannelAffineOperator,
"aten::quantized_lstm": ATenQuantizedLstmOperator,
"aten::dequantize": ATenDequantizeOperator,
"quantized::conv1d": QuantizedConv1dOperator,
"quantized::conv1d_relu": QuantizedConv1dReluOperator,
"quantized::conv2d": QuantizedConv2dOperator,
"quantized::conv2d_relu": QuantizedConv2dReluOperator,
"quantized::linear": QuantizedLinearOperator,
"quantized::linear_relu": QuantizedLinearReluOperator,
"quantized::relu6": QuantizedRelu6Operator,
"quantized::mul": QuantizedMulOperator,
"quantized::mul_scalar": QuantizedMulScalarOperator,
"quantized::cat": QuantizedCatOperator,
"quantized::batch_norm1d": QuantizedBatchNorm1dOperator,
"quantized::batch_norm2d": QuantizedBatchNorm2dOperator,
"quantized::batch_norm2d_relu": QuantizedBatchNorm2dReluOperator,
"quantized::add": QuantizedAddOperator,
"quantized::add_relu": QuantizedAddReluOperator,
"quantized::add_scalar": QuantizedAddScalarOperator,
"quantized::conv_transpose1d": QuantizedConvTranspose1dOperator,
"quantized::conv_transpose2d": QuantizedConvTranspose2dOperator,
"quantized::hardswish": QuantizedHardswishOperator,
"quantized::leaky_relu": QuantizedLeakyReluOperator,
"quantized::linear_dynamic": QuantizedLinearDynamicOperator,
"quantized::linear_relu_dynamic": QuantizedLinearReluDynamicOperator,
"quantized::elu": QuantizedEluOperator,
# non tracking
"aten::Int": NoTrackOperator,
"aten::zeros": NoTrackOperator,
"aten::detach": NoTrackOperator,
"aten::size": NoTrackOperator,
"aten::arange": NoTrackOperator,
"aten::ones": NoTrackOperator,
"aten::ones_like": NoTrackOperator,
"aten::zeros_like": NoTrackOperator,
"aten::empty": NoTrackOperator,
}
| StarcoderdataPython |
11248903 | # vestlus:settings
import os
CRISPY_TEMPLATE_PACK = 'bootstrap4'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': os.environ.get('ELASTICSEARCH_URL', 'http://127.0.0.1:9200/'),
'INDEX_NAME': 'haystack',
},
}
| StarcoderdataPython |
4804254 | <reponame>rafa-evangelista/PYTHON<gh_stars>0
preco=float(input('Qual o preço do produto a ser adquirido: R$ '))
print('O preço original do produto era de R$ {} mas na liquidação o seu novo preço é de R$ {}.'.format(preco, preco*0.95)) | StarcoderdataPython |
1678134 | from .env_wrapper import *
from .utils import *
from .ddpg import *
from .networks import *
__all__ = [ 'EnvWrapper', 'RLTrainingLogger',
'DDPGAgent', 'TrainDDPG']
| StarcoderdataPython |
3429307 | <filename>2017-09-15/github_bot/git_bot.py
#!/usr/bin/env python
import argparse
from decouple import config
from github import Github
# definimos configuracoes
github_username = config('github_username')
github_password = config('github_password')
github_api = Github(github_username, github_password)
escopo_do_usuario = github_api.get_user()
todos_repos = escopo_do_usuario.get_repos()
parser = argparse.ArgumentParser(description='Git bode')
parser.add_argument('--create-repo', dest='createrepo',
required=False, help='create repository')
args = parser.parse_args()
if args.createrepo:
criando_repo = escopo_do_usuario.create_repo(args.createrepo)
for nome_do_repositorio in todos_repos:
print(nome_do_repositorio)
| StarcoderdataPython |
3549183 | #!/usr/bin/python
# A daemon which serializes create-slice.sh and delete-slice.sh requests, to
# avoid multiple simultaneous requests to the Ansible scripts
from pymongo import MongoClient
import subprocess
import time
import sys
import os
import json
import datetime
#
# Connect to the db server on the mongo container. This needs to be reset here
# if it changes. Really should read from config.json
#
# Put in error-checking code when we read the docs
#
client = MongoClient('mongodb://mongodb:27017/')
db = client.gee_master
request_collection = db.slice_requests
slice_collection = db.slices
create_log_collection = db.slice_create_records
delete_log_collection = db.slice_delete_records
#
# Open the logfile
#
import logging
# logging.basicConfig(filename='slice_daemon.log',level=logging.DEBUG)
# supervisorctl expects logger output on stderr, so don't redirect if we're running
# under supervisorctl
logging.basicConfig(level=logging.DEBUG)
#
# Pull a slice request
#
def getNextOutstandingRequest():
return request_collection.find_one()
#
# Get all of the host ports being used by all of the slices
#
def getAllPorts():
slices = slice_collection.find({})
ports = []
for slice in slices:
if slice['ports']:
ports = ports + [portMap['host'] for portMap in slice['ports']]
return ports
#
# the tarfile for the slice
#
def makeTarfile(sliceName):
return "/root/slice_files/" + sliceName + ".tgz"
#
# get directory of this script
def getScriptPath():
return os.path.dirname(os.path.realpath(sys.argv[0]))
#
# get port string: from a port specification, get the ports and put them into a
# form for create-slice.sh
#
def getPortString(ports=None):
if (ports == None): return "[]"
if (len(ports) == 0): return "[]"
portStringArray = ["'%s:%s'" % (port['host'], port['container']) for port in ports]
return '[' + ','.join(portStringArray) + ']'
#
# create a slice
#
def createSlice(user, sliceName, imageName, date, ports):
try:
scriptdir = getScriptPath()
if (ports == None): ports = []
portString = getPortString(ports)
print (scriptdir + '/create-slice.sh %s %s %s %s' % (sliceName, makeTarfile(sliceName), imageName, portString))
error_string = subprocess.check_output([scriptdir + '/create-slice.sh', sliceName, makeTarfile(sliceName), imageName, portString],
stderr=subprocess.STDOUT)
slice_collection.update({"user": user}, {"$set": {"status":"Running"}})
sliceRecord = slice_collection.find_one({"sliceName": sliceName})
# log the creation event
create_log_collection.insert_one({"user" : user, "sliceName": sliceName, "imageName":imageName, "ports": ports,
"sliceNum": sliceRecord['sliceNum'], "expires": sliceRecord['expires'], "date": date})
if (ports and len(ports) > 0):
slice_collection.update({"user": user}, {"$set": {"ports": ports}})
logging.info('slice ' + sliceName + ' created for user ' + user)
except subprocess.CalledProcessError, e:
logging.error('Error in creating slice: ' + sliceName + ': ' + e.output)
slice_collection.update({"user": user}, {"$set": {"status":"Error"}})
#
# delete a slice
#
def deleteSlice(sliceName, date):
try:
scriptdir = getScriptPath()
error_string = subprocess.check_output([scriptdir + '/delete-slice.sh', sliceName], stderr=subprocess.STDOUT)
sliceRecord = slice_collection.find_one({"sliceName": sliceName})
delete_log_collection.insert_one({'date': date, 'sliceName': sliceName})
logging.info('slice ' + sliceName + ' deleted')
slice_collection.delete_one({"sliceName": sliceName})
except subprocess.CalledProcessError, e:
logging.error('Error in deleting slice: ' + sliceName + ': ' + e.output)
#
# service a request
#
def doRequest(aRequest):
logString = "Performing request %s for user %s and slice %s" % (aRequest['action'], aRequest['user'], aRequest['sliceName'])
if 'imageName' in aRequest.keys():
logString += ' with image: ' + aRequest['imageName']
if 'ports' in aRequest.keys():
logString += ' wth port request: ' + getPortString(aRequest['ports'])
logging.info(logString)
date = datetime.datetime.now()
if aRequest['action'] == 'create':
createSlice(aRequest['user'], aRequest['sliceName'], aRequest['imageName'], date, aRequest['ports'])
else:
deleteSlice(aRequest['sliceName'], date)
request_collection.remove({'action':aRequest['action'], 'sliceName': aRequest['sliceName']})
#
# check a request
#
def checkRequest(aRequest):
requiredFields = ['action', 'user', 'sliceName']
fieldPresent = [field in aRequest.keys() for field in requiredFields]
ok = not (False in fieldPresent)
return ok
#
# main loop
#
if __name__ == '__main__':
while True:
request = getNextOutstandingRequest()
if request:
if checkRequest(request):
doRequest(request)
else:
fieldsAsStrings = ["%s:%s" % (field, repr(request[field])) for field in request.keys()]
logging.error("Bad slice request found: " + ', '.join(fieldsAsStrings))
request_collection.remove({"_id": request["_id"]})
else:
time.sleep(15)
| StarcoderdataPython |
4986237 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import shlex
import unittest
from dockermap.shortcuts import (str_arg, get_user_group, rm, chown, chmod, curl, wget, tar, untar, targz, untargz,
mkdir, mkdir_chown, addgroup, CmdArgMappings, assignuser, adduser, addgroupuser)
def _split_cmd_args(cmd):
current = None
split_items = shlex.split(cmd)
if split_items:
first = split_items.pop(0)
else:
first = None
if split_items and not split_items[-1][0] == '-':
last = split_items.pop()
else:
last = None
misc_args = []
for ci in split_items:
if ci.startswith('-') or not current:
if current:
misc_args.append(current)
current = ci
else:
misc_args.append('{0} {1}'.format(current, shlex.quote(ci)))
current = None
if current:
misc_args.append(current)
return first, last, misc_args
class ShortcutTest(unittest.TestCase):
def assertContainsAllArgs(self, cmd, start=None, end=None, *seq):
first, last, misc_args = _split_cmd_args(cmd)
if start:
self.assertEqual(start, first)
if end:
self.assertEqual(end, last)
self.assertSetEqual(set(seq), set(misc_args))
def test_str_arg(self):
self.assertEqual(r"'abc def'", str_arg("abc def"))
self.assertEqual("123", str_arg(123))
self.assertEqual(r"' '", str_arg(" "))
self.assertEqual(r"' '", str_arg(" "))
def test_get_user_group(self):
self.assertEqual('user:group', get_user_group(('user', 'group')))
self.assertEqual('1000:1001', get_user_group((1000, 1001)))
self.assertEqual('user:user', get_user_group('user'))
self.assertEqual('1000:1000', get_user_group(1000))
self.assertEqual('user:group', get_user_group('user:group'))
def test_rm(self):
self.assertEqual('rm path/to/rm', rm('path/to/rm'))
self.assertEqual('rm -R path/to/rm', rm('path/to/rm', recursive=True))
self.assertEqual('rm -f path/to/rm', rm('path/to/rm', force=True))
self.assertEqual('rm -R -f path/to/rm', rm('path/to/rm', recursive=1, force=1))
def test_chown(self):
self.assertEqual('chown user:group path/to/chown', chown('user:group', 'path/to/chown', recursive=False))
self.assertEqual('chown user:group path/to/chown', chown('user:group', 'path/to/chown', recursive=0))
self.assertEqual('chown -R user:group path/to/chown', chown('user:group', 'path/to/chown'))
self.assertEqual('chown -R user:group path/to/chown', chown('user:group', 'path/to/chown', recursive=True))
self.assertEqual('chown -R user:group path/to/chown', chown('user:group', 'path/to/chown', recursive='true'))
def test_chmod(self):
self.assertEqual('chmod -R 0700 path/to/chmod', chmod('0700', 'path/to/chmod'))
self.assertEqual('chmod u+x path/to/chmod', chmod('u+x', 'path/to/chmod', recursive=False))
def test_curl(self):
self.assertEqual('curl https://example.com', curl('https://example.com'))
self.assertEqual('curl -o out-filename https://example.com', curl('https://example.com', 'out-filename'))
def test_wget(self):
self.assertEqual('wget https://example.com', wget('https://example.com'))
self.assertEqual('wget -o out-filename https://example.com', wget('https://example.com', 'out-filename'))
def test_adduser(self):
self.assertContainsAllArgs(
adduser('user1'),
'adduser', 'user1', '--no-create-home', '--disabled-login', '--disabled-password', "--gecos ''")
self.assertContainsAllArgs(
adduser('user1', arg_mapping=CmdArgMappings.CENTOS),
'adduser', 'user1', '--no-create-home', '-s /sbin/nologin')
self.assertContainsAllArgs(
adduser('user1', gecos="User 1", no_login=False, arg_mapping=CmdArgMappings.CENTOS),
'adduser', 'user1', "--comment 'User 1'")
self.assertContainsAllArgs(
adduser('user1', arg_mapping=CmdArgMappings.BUSYBOX),
'adduser', 'user1', '-D', '-H', '-s /sbin/nologin')
self.assertEqual("adduser --gecos '' user1",
adduser('user1', no_login=False))
self.assertContainsAllArgs(
adduser('user1', no_login=False, no_password=True),
'adduser', 'user1', "--gecos ''", '--disabled-password')
self.assertRaises(ValueError, adduser,
'user1', no_login=False, no_password=True, arg_mapping=CmdArgMappings.CENTOS)
self.assertContainsAllArgs(
adduser('user1', no_login=False, no_password=True, arg_mapping=CmdArgMappings.BUSYBOX),
'adduser', 'user1', '-D')
def test_addgroup(self):
self.assertEqual('addgroup groupname', addgroup('groupname'))
self.assertEqual('addgroup -g 2000 groupname', addgroup('groupname', gid=2000))
self.assertContainsAllArgs(
addgroup('groupname', gid=2000, system='x', arg_mapping=CmdArgMappings.DEBIAN),
'addgroup', 'groupname', '-g 2000', '--system')
self.assertContainsAllArgs(
addgroup('groupname', gid=2000, system=True, arg_mapping=CmdArgMappings.CENTOS),
'addgroup', 'groupname', '-g 2000', '--system')
self.assertContainsAllArgs(
addgroup('groupname', gid=2000, system=True, arg_mapping=CmdArgMappings.BUSYBOX),
'addgroup', 'groupname', '-g 2000', '-S')
def test_assignuser(self):
self.assertEqual('usermod -aG group1 username', assignuser('username', ['group1']))
self.assertEqual('usermod -aG group1,group2 username', assignuser('username', ['group1', 'group2']))
self.assertEqual('usermod -aG group1,group2 username',
assignuser('username', ['group1', 'group2'], arg_mapping=CmdArgMappings.CENTOS))
self.assertEqual('adduser username group1 && adduser username group2',
assignuser('username', ['group1', 'group2'], arg_mapping=CmdArgMappings.BUSYBOX))
def test_addgroupuser(self):
cmds_debian = addgroupuser('user1', ['a', 'b'], return_list=True)
self.assertEqual(1, len(cmds_debian))
self.assertContainsAllArgs(
cmds_debian[0],
'adduser', 'user1', '-G a,b', '--no-create-home', '--disabled-login', '--disabled-password', "--gecos ''")
cmds_centos = addgroupuser('user1', ['a', 'b'], arg_mapping=CmdArgMappings.CENTOS, return_list=True)
self.assertEqual(1, len(cmds_centos))
self.assertContainsAllArgs(
cmds_centos[0],
'adduser', 'user1', '-G a,b', '--no-create-home', '-s /sbin/nologin')
cmds_busybox = addgroupuser('user1', ['a', 'b'], arg_mapping=CmdArgMappings.BUSYBOX, return_list=True)
self.assertEqual(3, len(cmds_busybox))
self.assertContainsAllArgs(
cmds_busybox[0],
'adduser', 'user1', '-H', '-D', '-s /sbin/nologin')
self.assertEqual(cmds_busybox[1], 'adduser user1 a')
self.assertEqual(cmds_busybox[2], 'adduser user1 b')
def test_mkdir(self):
self.assertEqual('mkdir -p path/to/mk', mkdir('path/to/mk'))
self.assertEqual('mkdir path/to/mk', mkdir('path/to/mk', create_parent=False))
self.assertEqual('if [[ ! -d path/to/mk ]]; then mkdir -p path/to/mk; fi',
mkdir('path/to/mk', check_if_exists=True))
self.assertEqual('if [[ ! -d path/to/mk ]]; then mkdir path/to/mk; fi',
mkdir('path/to/mk', create_parent=False, check_if_exists=True))
def test_mkdir_chown(self):
self.assertEqual('mkdir -p path/a && chown user:user path/a && chmod ug=rwX,o=rX path/a',
mkdir_chown('path/a', 'user'))
self.assertEqual('mkdir path/a && chown 1000:1001 path/a && chmod ug=rwX,o=rX path/a',
mkdir_chown('path/a', (1000, 1001), create_parent=False))
self.assertEqual('mkdir -p path/a && chown -R user:group path/a && chmod -R ug=rwX,o=rX path/a',
mkdir_chown('path/a', 'user:group', recursive=True))
self.assertEqual('mkdir -p path/a && chmod 0700 path/a',
mkdir_chown('path/a', None, permissions='0700'))
self.assertEqual('mkdir -p path/a && chown -R user:group path/a; '
'mkdir -p path/b && chown -R user:group path/b',
mkdir_chown(('path/a', 'path/b'), 'user:group', permissions=None, recursive=True))
self.assertEqual('mkdir -p path/a && chown user:group path/a && chmod ug=rwX,o=rX path/a; '
'mkdir -p path/b && chown user:group path/b && chmod ug=rwX,o=rX path/b',
mkdir_chown(['path/a', 'path/b'], 'user:group'))
self.assertEqual([['mkdir -p path/a', 'chown user:group path/a', 'chmod ug=rwX,o=rX path/a'],
['mkdir -p path/b', 'chown user:group path/b', 'chmod ug=rwX,o=rX path/b']],
mkdir_chown(['path/a', 'path/b'], 'user:group', return_list=True))
def test_tar(self):
self.assertEqual('tar -cf archive.tar src/path', tar('archive.tar', 'src/path'))
self.assertContainsAllArgs(tar('archive.tar', 'src/path', _v=True),
'tar', 'src/path', '-cf archive.tar', '-v')
def test_untar(self):
self.assertContainsAllArgs(untar('archive.tar', 'src/path'),
'tar', 'src/path', '-xf archive.tar', '-C')
self.assertContainsAllArgs(untar('archive.tar', 'src/path', _v=True),
'tar', None, '-xf archive.tar', '-C src/path', '-v')
def test_targz(self):
self.assertContainsAllArgs(targz('archive.tar', 'src/path'),
'tar', 'src/path', '-cf archive.tar', '-z')
self.assertContainsAllArgs(targz('archive.tar', 'src/path', _v=True),
'tar', 'src/path', '-cf archive.tar', '-z', '-v')
def test_untargz(self):
self.assertContainsAllArgs(untargz('archive.tar', 'src/path'),
'tar', None, '-xf archive.tar', '-C src/path', '-z')
self.assertContainsAllArgs(untargz('archive.tar', 'src/path', _v=True),
'tar', None, '-xf archive.tar', '-C src/path', '-z', '-v')
| StarcoderdataPython |
3497209 | _base_ = ['../actnn/resnet18_b64x4_imagenet.py']
actnn = False
| StarcoderdataPython |
4985950 |
__author__ = '<NAME>'
from setuptools import setup
requires = [
]
setup( name='sarch2',
version="1.1.0",
description='Simple archiving solution',
scripts=['bin/sarch2'],
packages=['sarch2'],
long_description=open('README.rst').read(),
url='https://github.com/susundberg/python-sarch2',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
install_requires=requires,
test_suite="test",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.6',
'Topic :: System :: Archiving',
'Topic :: System :: Filesystems'
],
zip_safe=True )
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.