id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
230195 | <filename>packages/jet_bridge_base/jet_bridge_base/settings.py
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
CURRENT_MODULE = sys.modules[__name__]
DEBUG = False
READ_ONLY = False
AUTO_OPEN_REGISTER = True
PROJECT = None
TOKEN = None
ENVIRONMENT = None
CORS_HEADERS = True
BASE_URL = None
JWT_VERIFY_KEY = '-----<KEY>'
ENVIRONMENT_TYPE = None
WEB_BASE_URL = None
API_BASE_URL = None
DATABASE_ENGINE = None
DATABASE_HOST = None
DATABASE_PORT = None
DATABASE_USER = None
DATABASE_PASSWORD = None
DATABASE_NAME = None
DATABASE_EXTRA = None
DATABASE_CONNECTIONS = None
DATABASE_ONLY = None
DATABASE_EXCEPT = None
DATABASE_SCHEMA = None
COOKIE_SAMESITE = None
COOKIE_SECURE = None
COOKIE_DOMAIN = None
COOKIE_COMPRESS = None
SSO_APPLICATIONS = {}
ALLOW_ORIGIN = '*'
def set_settings(settings):
for key, value in settings.items():
if value is None:
continue
setattr(CURRENT_MODULE, key, value)
| StarcoderdataPython |
1994049 | #!/usr/bin/env python3
import math
def solution(x, y, jump_size):
"""
Returns the minimum number of times to add jump_size to x
in order to reach y.
Solution in O(1) for "Frog Jump".
"""
return math.ceil((y - x) / jump_size)
| StarcoderdataPython |
1635429 | """
Script for saving the info of your members in a json file
so that the identification process is carrying out smoothly
Format for running this script
there must be a folder - Database
Inside it
each identity photo must be there and the photo named as <that_person_name>.{any_format /png/jpeg/jpg}
For example - shahrukh khan face photo saved with name shahrukh.jpg
Returns:
a json file named as database.json
"""
import os
from facenet_pytorch import MTCNN, InceptionResnetV1
from MTCNN import FaceDetector
import cv2
import json
database_folder = "database" # path of that folder here
if __name__ == "__main__":
mtcnn = MTCNN(select_largest = True , device = 'cuda')
# Create an inception resnet (in eval mode):
reco = InceptionResnetV1(pretrained='vggface2').eval()
for person in os.listdir(database_folder):
name, extension = person.split(".")
image = cv2.imread(os.path.join(database_folder, person))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
fcd = FaceDetector(mtcnn, reco, is_image = True, img = image)
embedding = fcd.run()
# adding peron by person data
if not os.path.exists("Database.json"): # if file not exist add the first person
with open("Database.json", "w") as f:
print(f"{name} - Data stored successfully")
json.dump({name: embedding.numpy().tolist()}, f)
else:
with open("Database.json") as f: # if exist the extract the original info and append the latest one
database = json.load(f)
database[name] = embedding.numpy().tolist()
with open("Database.json", "w") as f:
print(f"{name} - Data stored successfully")
json.dump(database, f)
| StarcoderdataPython |
3410852 | from __future__ import absolute_import
from lit36.formats.base import TestFormat, FileBasedTest, OneCommandPerFileTest
from lit36.formats.googletest import GoogleTest
from lit36.formats.shtest import ShTest
| StarcoderdataPython |
6597269 | <gh_stars>1-10
from unittest import TestCase
import requests
import json
#curl -H "Content-Type: application/json;charset=UTF-8 " -X POST -d '{"id":90,"asciimath":"", "mathml":"", "latex":"x \\lt y"}' http://host:port/latex_ to_asciimath
class TestBatch(TestCase):
def setUp(self):
self.host = '172.16.58.3:8089'
def test_mathml2latex(self):
cuda1_host = 'http://' + self.host + '/latex_to_asciimath'
payload = {"id":90,"asciimath":"", "mathml":"", "latex":"x \\lt y"}
headers = {'content-type': 'application/json'}
r = requests.post(cuda1_host, data=json.dumps(payload), headers=headers)
print '\n' + r.text
print '\n' + str(r.headers) | StarcoderdataPython |
217598 | from random import randint
escolha = input('estou pensando em um numero entre 0 e 10 \nConsegue adivinhar? ')
num = randint(0,10)
while True:
escolha = input('estou pensando em um numero entre 0 e 10 \nConsegue adivinhar? ')
if escolha== num:
print('é esse mesmo, você é bom msm')
break
else:
print('não é esse... \nQuer tentar de novo?')
| StarcoderdataPython |
3339619 | import random
import tensorflow as tf
from config.model_config import ModelConfig
def color_augment(x: tf.Tensor, y: tf.Tensor):
"""Color augmentation
Args:
x: Images
y: Labels
"""
x = tf.image.random_hue(x, 0.08)
x = tf.image.random_saturation(x, 0.6, 1.6)
x = tf.image.random_brightness(x, 0.1)
x = tf.image.random_contrast(x, 0.6, 1.4)
x = tf.minimum(x, 1.0)
x = tf.maximum(x, 0.0)
return x, y
def rot_flip_augment(x: tf.Tensor, y: tf.Tensor):
"""Random 90 degrees rotations and flips augmentation
Args:
x: Images
y: Labels
"""
# Rotation
x = tf.image.rot90(x, k=random.randint(0, 3))
# Zoom
x = tf.image.random_crop(x, (ModelConfig.IMG_SIZE, ModelConfig.IMG_SIZE, 3))
x = tf.image.resize(x, (ModelConfig.IMG_SIZE, ModelConfig.IMG_SIZE))
# Flip
x = tf.image.random_flip_left_right(x)
x = tf.image.random_flip_up_down(x)
return x, y
| StarcoderdataPython |
1852870 | <reponame>Clonexy700/Shiro-DiscordBot
import discord
import asyncio
import json
from collections import OrderedDict
import random
import numpy as np
from functools import partial
from discord.utils import get
from collections import namedtuple
from discord.ext import commands
class Marriage(commands.Cog):
def __init__(self, client):
self.client = client
self.client.loop.create_task(self.save_users())
with open('marry.json', 'r') as f:
self.users = json.load(f)
async def save_users(self):
await self.client.wait_until_ready()
while not self.client.is_closed():
with open('marry.json', 'w') as f:
if self.users:
json.dump(self.users, f, indent=4)
await asyncio.sleep(3)
@commands.Cog.listener()
async def on_member_join(self, member):
author_id = str(member.id)
self.users[author_id] = {}
self.users[author_id]['couple_id'] = 'None'
self.users[author_id]['price'] = 1
self.users[author_id]['gift1'] = 'None'
self.users[author_id]['gift2'] = 'None'
self.users[author_id]['gift3'] = 'None'
self.users[author_id]['gift4'] = 'None'
self.users[author_id]['gift5'] = 'None'
self.users[author_id]['gift6'] = 'None'
self.users[author_id]['gift7'] = 'None'
self.users[author_id]['gift8'] = 'None'
self.users[author_id]['gift9'] = 'None'
self.users[author_id]['gift10'] = 'None'
self.users[author_id]['gift11'] = 'None'
self.users[author_id]['gift12'] = 'None'
self.users[author_id]['gift13'] = 'None'
self.users[author_id]['gift14'] = 'None'
self.users[author_id]['gift15'] = 'None'
self.users[author_id]['gift16'] = 'None'
self.users[author_id]['gift17'] = 'None'
self.users[author_id]['gift18'] = 'None'
self.users[author_id]['gift19'] = 'None'
self.users[author_id]['gift20'] = 'None'
self.users[author_id]['gift21'] = 'None'
self.users[author_id]['divorce_num'] = 0
@commands.Cog.listener()
async def on_message(self, message):
author_id = str(message.author.id)
if not author_id in self.users:
self.users[author_id] = {}
self.users[author_id]['couple_id'] = 'None'
self.users[author_id]['price'] = 1
self.users[author_id]['gift1'] = 'None'
self.users[author_id]['gift2'] = 'None'
self.users[author_id]['gift3'] = 'None'
self.users[author_id]['gift4'] = 'None'
self.users[author_id]['gift5'] = 'None'
self.users[author_id]['gift6'] = 'None'
self.users[author_id]['gift7'] = 'None'
self.users[author_id]['gift8'] = 'None'
self.users[author_id]['gift9'] = 'None'
self.users[author_id]['gift10'] = 'None'
self.users[author_id]['gift11'] = 'None'
self.users[author_id]['gift12'] = 'None'
self.users[author_id]['gift13'] = 'None'
self.users[author_id]['gift14'] = 'None'
self.users[author_id]['gift15'] = 'None'
self.users[author_id]['gift16'] = 'None'
self.users[author_id]['gift17'] = 'None'
self.users[author_id]['gift18'] = 'None'
self.users[author_id]['gift19'] = 'None'
self.users[author_id]['gift20'] = 'None'
self.users[author_id]['gift21'] = 'None'
self.users[author_id]['divorce_num'] = 0
@commands.command(name='marry', pass_context=True)
async def marry(self, ctx, member: discord.Member):
author_id = str(ctx.message.author.id)
if not author_id in self.users:
self.users[author_id] = {}
self.users[author_id]['couple_id'] = 'None'
self.users[author_id]['price'] = 1
self.users[author_id]['gift1'] = 'None'
self.users[author_id]['gift2'] = 'None'
self.users[author_id]['gift3'] = 'None'
self.users[author_id]['gift4'] = 'None'
self.users[author_id]['gift5'] = 'None'
self.users[author_id]['gift6'] = 'None'
self.users[author_id]['gift7'] = 'None'
self.users[author_id]['gift8'] = 'None'
self.users[author_id]['gift9'] = 'None'
self.users[author_id]['gift10'] = 'None'
self.users[author_id]['gift11'] = 'None'
self.users[author_id]['gift12'] = 'None'
self.users[author_id]['gift13'] = 'None'
self.users[author_id]['gift14'] = 'None'
self.users[author_id]['gift15'] = 'None'
self.users[author_id]['gift16'] = 'None'
self.users[author_id]['gift17'] = 'None'
self.users[author_id]['gift18'] = 'None'
self.users[author_id]['gift19'] = 'None'
self.users[author_id]['gift20'] = 'None'
self.users[author_id]['gift21'] = 'None'
self.users[author_id]['divorce_num'] = 0
member_id = str(member.id)
if not member_id in self.users:
self.users[member_id] = {}
self.users[member_id]['couple_id'] = 'None'
self.users[member_id]['price'] = 1
self.users[member_id]['gift1'] = 'None'
self.users[member_id]['gift2'] = 'None'
self.users[member_id]['gift3'] = 'None'
self.users[member_id]['gift4'] = 'None'
self.users[member_id]['gift5'] = 'None'
self.users[member_id]['gift6'] = 'None'
self.users[member_id]['gift7'] = 'None'
self.users[member_id]['gift8'] = 'None'
self.users[member_id]['gift9'] = 'None'
self.users[member_id]['gift10'] = 'None'
self.users[member_id]['gift11'] = 'None'
self.users[member_id]['gift12'] = 'None'
self.users[member_id]['gift13'] = 'None'
self.users[member_id]['gift14'] = 'None'
self.users[member_id]['gift15'] = 'None'
self.users[member_id]['gift16'] = 'None'
self.users[member_id]['gift17'] = 'None'
self.users[member_id]['gift18'] = 'None'
self.users[member_id]['gift19'] = 'None'
self.users[member_id]['gift20'] = 'None'
self.users[member_id]['gift21'] = 'None'
self.users[author_id]['divorce_num'] = 0
role = discord.utils.get(member.guild.roles, name="💍")
author = ctx.message.author
embedmarry = discord.Embed(
color=discord.Colour.dark_purple()
)
embedyee = discord.Embed(
color=discord.Colour.dark_purple()
)
embednii = discord.Embed(
color=discord.Colour.dark_purple()
)
embedalreadymarried = discord.Embed(
color=discord.Colour.dark_purple()
)
embedyoucantmarryyourselflmaoidiot = discord.Embed(
color=discord.Colour.dark_purple()
)
embedmarry.add_field(name='Свадьба :heart_exclamation: ',
value=f'{author.mention} предлагает вам свою руку и сердце! {member.mention}',
inline=False)
embedmarry.add_field(name='Каков будет ваш ответ? :question: :thinking: ',
value='Вы можете ответить с помощью **да** или **нет**, у вас есть целая минута, чтобы принять '
'решение!',
inline=False)
embedmarry.set_image(
url='https://media1.tenor.com/images/69dbcb02b724d26644228a38e367d017/tenor.gif?itemid=14444888')
embedyee.add_field(name='Удачная свадьба :heart:',
value=f'{author.mention} женился на {member.mention} ! Мои поздравления! ;) ')
embedyee.set_image(
url='https://cdn.discordapp.com/attachments/624296774747553808/642210564571004929/2c4259204e631b3e70cbd248331ac1e2.gif' or 'https://media1.tenor.com/images/ed8113a52d8517b31b4073b9ee9db314/tenor.gif?itemid=11767932')
embednii.add_field(name='<NAME> :broken_heart: ',
value=f"{member.mention} отказалась(ся). Не плачь {author.mention} , найдешь кого-нибудь другого")
embednii.set_image(
url='https://cdn.discordapp.com/attachments/624296774747553808/642209594130694163/0caba0318aa667572c0ae30f34ecf8b62896aee5_hq.gif')
embedalreadymarried.add_field(name='Вы уже женаты!',
value=f'{author.mention} Вы уже женаты!')
embedyoucantmarryyourselflmaoidiot.add_field(name='Свадьба неудачна :thinking: :thinking: ',
value=f"Хмм?..На себе нельзя жениться, ты чего?")
embedyoucantmarryyourselflmaoidiot.set_image(url='https://media1.giphy.com/media/GstlqgmrVgpuE/source.gif')
if member == author:
await ctx.send(embed=embedyoucantmarryyourselflmaoidiot)
if self.users[author_id]['couple_id'] == member_id:
await ctx.send(embed=embedalreadymarried)
if (self.users[member_id]['couple_id'] != 'None' or self.users[author_id]['couple_id'] != 'None') and \
self.users[author_id]['couple_id'] != member_id:
await ctx.send('Сперва сделайте развод со своими партнерами')
if role not in author.roles and role not in member.roles:
if member != author:
if self.users[author_id]['couple_id'] != member_id:
await ctx.send(embed=embedmarry)
def check(message):
return message.content in (
'да', 'нет', 'Да', 'дА', 'ДА', 'Нет', 'нЕт', 'неТ', 'НЕт', 'НеТ', 'нЕТ', 'НЕТ')
reply = await self.client.wait_for('message', check=check, timeout=30)
if not reply or reply.content.casefold() == 'нет':
await ctx.send(embed=embednii)
if not reply or reply.content.casefold() == 'да':
await ctx.send(embed=embedyee)
await discord.Member.add_roles(member, role)
await discord.Member.add_roles(author, role)
self.users[author_id]['couple_id'] = member_id
self.users[member_id]['couple_id'] = author_id
@marry.error
async def marry_timeout(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
embedtimeout = discord.Embed(
color=discord.Colour.dark_purple()
)
embedtimeout.add_field(name='Оууууууууууу! :interrobang: ',
value="Время свадьбы закончилось. Не беспокойтесь и попробуйте ещё раз!")
embedtimeout.set_image(url='https://media3.giphy.com/media/uHJTtpE9WqfYc/source.gif')
await ctx.send(embed=embedtimeout)
if isinstance(error, commands.MissingRequiredArgument):
embedmarryerror = discord.Embed(
color=discord.Colour.dark_purple()
)
embedmarryerror.add_field(name='Ошибка',
value=f' Укажите на ком хотите жениться. .marry @ \n Пример:')
await ctx.send(embed=embedmarryerror)
await ctx.send('```.marry @Clonexy700#3767```')
@commands.command(pass_context=True)
async def divorce(self, ctx):
author_id = str(ctx.message.author.id)
member = ctx.guild.get_member(int(self.users[author_id]['couple_id']))
role = discord.utils.get(member.guild.roles, name="💍")
author = ctx.message.author
embeddivorce = discord.Embed(
color=discord.Colour.dark_purple()
)
embeddivorce.add_field(name='Развод :broken_heart: :broccoli: ',
value=f'{author.mention} теперь больше не женаты с {member.mention}')
embeddivorce.set_image(url="https://rabujoi.files.wordpress.com/2017/02/fuu62.jpg")
embeddivorcefail = discord.Embed(
color=discord.Colour.dark_purple()
)
embeddivorcefail.add_field(name='Развод :broccoli:',
value=f'{author.mention} не получилось развестись! Кажется ты или {member.mention} уже не женаты! ')
embeddivorcefail.set_image(url='https://i.gifer.com/BtGB.gif')
if role in author.roles and role in member.roles:
await discord.Member.remove_roles(author, role)
await discord.Member.remove_roles(member, role)
member_id = self.users[author_id]['couple_id']
self.users[member_id]['couple_id'] = 'None'
self.users[author_id]['couple_id'] = 'None'
self.users[author_id]['divorce_num'] += 1
await ctx.send(embed=embeddivorce)
else:
await ctx.send(embed=embeddivorcefail)
@commands.command(name='hentai', aliases=['ласкать', 'хентай'])
async def hentai(self, ctx, member: discord.Member):
msgend = [":spades:", ":clubs:", ":diamonds:", ":hearts:", ":fleur_de_lis:", ":black_heart:"]
author = ctx.message.author
author_id = str(ctx.message.author.id)
member_id = str(member.id)
if self.users[author_id]['couple_id'] == member_id:
embedhentai = discord.Embed(
color=discord.Colour.dark_purple()
)
hentaigifs = [
"https://66.media.tumblr.com/e5921dae5876c00e1c3fcc0540ecab12/tumblr_opnxbzZbYT1sghvvyo1_500.gif",
"https://i.pinimg.com/originals/fc/fa/b1/fcfab1d1656f5e9f859744ce999b35d5.gif",
"https://ci.memecdn.com/1394312.gif",
"https://lh3.googleusercontent.com/proxy/4N3f_aUCbDjN7Dcbvbd0xi1Fx1VNJsW_U4givReTfGarUG4S5-SFBAfpLa1BH0UaGbdQItlwQgqaErtORYcCmsUAtIYU9Z1xPw",
"https://userdisk.webry.biglobe.ne.jp/020/046/94/N000/000/000/126525418193816203746_dat1265220059955.gif",
"https://psv4.userapi.com/c610925/u259174237/docs/5e23102a51fa/3f74a54545774160611b83983fe8020c.gif?extra=xsXm8TuVauL_efYWOtBq1fhNRhqMvHTrxoauN58As_O2Nup80jf4VVVdB-z_6AflVR-bQRIO-U351o8_MauFbTRFbskG5jQbg073yy9tSDb1fEtF169xFrWq-fFoJz_1KVsOLxqrrdS8Zf8_QhMTpuA",
"https://66.media.tumblr.com/32f890680919d5edb248cf6de10cd1cb/tumblr_nrdtcvHEhl1trw883o1_1280.gif",
"https://x.imagefapusercontent.com/u/bvelasco/5823564/1635151298/gOqBNfD.gif",
"https://66.media.tumblr.com/2bd84e6dcd5fcb76dbc0448ce7211cc1/tumblr_o5zxbaSKjE1rv1jano1_540.gif",
"https://66.media.tumblr.com/3779148a1def9b5484ce33b873b02272/tumblr_nwjlolOMea1sz111so2_540.gif",
"https://66.media.tumblr.com/05454f06a749373d72065dbdceadb50a/tumblr_ohbvseGCp81vkkt9ro1_1280.gif",
"https://lh3.googleusercontent.com/proxy/vStpimmvo0WAaK6nk1SwgMtp7LlXPuleXsjuln82K80K3ZJ3SBOkz6F1u-KkmO1DKsJZs6k9ndgQ4aDmDcxUA84Lbh1vsODFjieODT_JzneyMAxD_RbpMpcdauaziRRlYBbvCMuv9FCyFbxrT3biqyN1WVv05swGG22wpSY3uQ",
"https://lh3.googleusercontent.com/proxy/VFCSlrNEErwdJhkFVO5Sk9P3R-SWYcejlDrkQgwRmCqp5MJZCgqrmWS2Uah-WzMDgl80w0ljAYU-yiNqZJ2mYtg6lF03bjbZGkLE9rC2qKBAup_dtj3pM24kxI6M_Jife_ar45BtROxtq6MhJhPY1b0OiV8YGZFYKbFuusafuoaDtxtt",
"https://2gifs.ru/images/p49.media.tumblr.com/544299c4e63878310a02957b5e5a133c/tumblr_nyw1ejWY2E1tlb937o1_500.gif",
"https://cs6.pikabu.ru/images/big_size_comm_an/2014-05_3/14001629958442.gif",
"https://img-4.poringa.net/poringa/img/8/1/E/1/B/E/FrankFolla/789.gif",
"https://pbs.twimg.com/profile_images/855027348386701313/YIgX4cok_400x400.jpg",
"https://lh3.googleusercontent.com/proxy/rNRpnzLDcrOdqA5UmT5BGXH0Wqhv-iE1bvB3D1bbhjRWesgyW8t6_RLvQhSOjMnJW77S_Xqyt_W-47ubVJgV668G7efQmvVcdqFPxdRBXJjnI10EWrOoepav3RdWhIYFKnQsPbA6YQ",
"https://i.imgur.com/xuZ4dHi.gif"]
embedhentai.add_field(name="Любовь :heart:",
value=f"У {author.mention} и {member.mention} любовь. :heart: {random.choice(msgend)}")
embedhentai.set_image(url=random.choice(hentaigifs))
await ctx.send(embed=embedhentai)
else:
await ctx.send('Вы не женаты с данным человеком, поэтому не можете использовать данную команду')
@commands.command(name='waifu', aliases=['вайфу', 'пара', 'жена'])
async def waifu(self, ctx, member: discord.Member = None):
emoji = self.client.get_emoji(676803534758477845)
loli = self.client.get_emoji(680064619385192515)
author = ctx.author if not member else member
member_id = str(author.id)
price = self.users[member_id]['price']
if self.users[member_id]["couple_id"] == 'None':
pair = 'None'
else:
pair = ctx.guild.get_member(int(self.users[member_id]['couple_id']))
pair = pair.name
waifu_menu = discord.Embed(
color=discord.Colour.dark_purple(),
timestamp=ctx.message.created_at,
description=f'**Цена**\n{price}{emoji}\nВайфу {author.name}\n**Вместе с**\n{pair}\n Разводов {self.users[member_id]["divorce_num"]} :broken_heart:\n __**Подарки вайфу**__:'
)
waifu_menu.set_author(name=f'Вайфу {author.name}', icon_url=author.avatar_url)
waifu_menu.set_thumbnail(url=author.avatar_url)
gifts = 'Список всех ваших подарков: \n '
if self.users[member_id]["gift1"] != 'None':
waifu_menu.add_field(name='Трусы ', value=f':briefs: \n • {self.users[member_id]["gift1"]}', inline=True)
if self.users[member_id]["gift2"] != 'None':
waifu_menu.add_field(name='Лифчик ', value=f'🩱 \n • {self.users[member_id]["gift2"]}', inline=True)
if self.users[member_id]["gift3"] != 'None':
waifu_menu.add_field(name='Сапоги ', value=f'🥾 \n • {self.users[member_id]["gift3"]}', inline=True)
if self.users[member_id]["gift4"] != 'None':
waifu_menu.add_field(name='Котик ', value=f'🐱 \n • {self.users[member_id]["gift4"]}', inline=True)
if self.users[member_id]["gift16"] != 'None':
waifu_menu.add_field(name='Собачка ', value=f'🐶 \n • {self.users[member_id]["gift16"]}', inline=True)
if self.users[member_id]["gift5"] != 'None':
waifu_menu.add_field(name='Дом ', value=f'🏠 \n • {self.users[member_id]["gift5"]}', inline=True)
if self.users[member_id]["gift6"] != 'None':
waifu_menu.add_field(name='Манго ', value=f'🥭 \n • {self.users[member_id]["gift6"]}', inline=True)
if self.users[member_id]["gift7"] != 'None':
waifu_menu.add_field(name='Шарфик ', value=f'🧣 \n • {self.users[member_id]["gift7"]}', inline=True)
if self.users[member_id]["gift17"] != 'None':
waifu_menu.add_field(name='Носки ', value=f'🧦 \n • {self.users[member_id]["gift17"]}', inline=True)
if self.users[member_id]["gift8"] != 'None':
waifu_menu.add_field(name='Чулки ', value=f'🧦 \n • {self.users[member_id]["gift8"]}', inline=True)
if self.users[member_id]["gift9"] != 'None':
waifu_menu.add_field(name='Неко-уши ', value=f'👙 \n • {self.users[member_id]["gift9"]}', inline=True)
if self.users[member_id]["gift10"] != 'None':
waifu_menu.add_field(name='Хвостик ', value=f'👙 \n • {self.users[member_id]["gift10"]}', inline=True)
if self.users[member_id]["gift11"] != 'None':
waifu_menu.add_field(name='Кимоно ', value=f'👘 \n • {self.users[member_id]["gift11"]}', inline=True)
if self.users[member_id]["gift18"] != 'None':
waifu_menu.add_field(name='Костюм горничной ', value=f'👘 \n • {self.users[member_id]["gift18"]}',
inline=True)
if self.users[member_id]["gift12"] != 'None':
waifu_menu.add_field(name='Ошейник ', value=f'👙 \n • {self.users[member_id]["gift12"]}', inline=True)
if self.users[member_id]["gift13"] != 'None':
waifu_menu.add_field(name='Очки ', value=f':eyeglasses: \n • {self.users[member_id]["gift13"]}',
inline=True)
if self.users[member_id]["gift14"] != 'None':
waifu_menu.add_field(name='Лоли ', value=f'{loli} \n • {self.users[member_id]["gift14"]}', inline=True)
if self.users[member_id]["gift15"] != 'None':
waifu_menu.add_field(name='Чупа-Чупс ', value=f'🍭 \n • {self.users[member_id]["gift15"]}', inline=True)
if self.users[member_id]["gift19"] != 'None':
waifu_menu.add_field(name='Мишка ', value=f'🧸 \n • {self.users[member_id]["gift19"]}', inline=True)
if self.users[member_id]["gift20"] != 'None':
waifu_menu.add_field(name='Вино ', value=f':wine_glass: \n • {self.users[member_id]["gift20"]}', inline=True)
if self.users[member_id]["gift21"] != 'None':
waifu_menu.add_field(name='Торт ', value=f':birthday: \n • {self.users[member_id]["gift21"]}', inline=True)
waifu_menu.set_footer(text=f'Запросил {ctx.author.name} \n (♥ω♥ ) ~♪')
await ctx.send(embed=waifu_menu)
@commands.command(name='gifts', aliases=['подарки', 'giftlist', 'мп', 'giftshop', 'пм'])
async def gifts(self, ctx):
emoji = self.client.get_emoji(676803534758477845)
loli = self.client.get_emoji(680064619385192515)
embed_gifts = discord.Embed(color=discord.Colour.dark_purple(), timestamp=ctx.message.created_at)
embed_gifts.set_author(name='Подарки для вашей вайфу')
embed_gifts.add_field(name='Трусы ', value=f':briefs: \n > 50 {emoji}', inline=True)
embed_gifts.add_field(name='Лифчик', value=f'🩱 \n > 70 {emoji}', inline=True)
embed_gifts.add_field(name='Сапоги', value=f'🥾 \n > 90 {emoji}', inline=True)
embed_gifts.add_field(name='Кот', value=f'🐱 \n > 5000 {emoji}', inline=True)
embed_gifts.add_field(name='Собака', value=f'🐶 \n > 5000 {emoji}', inline=True)
embed_gifts.add_field(name='Дом', value=f'🏠 \n > 50000 {emoji}', inline=True)
embed_gifts.add_field(name='Манго', value=f'🥭 \n > 20 {emoji}', inline=True)
embed_gifts.add_field(name='Шарфик', value=f'🧣 \n > 60 {emoji}', inline=True)
embed_gifts.add_field(name='Носки', value=f'🧦 \n > 300 {emoji}', inline=True)
embed_gifts.add_field(name='Чулки', value=f'👙 \n > 600 {emoji}', inline=True)
embed_gifts.add_field(name='Неко-уши', value=f'👙 \n > 800 {emoji}', inline=True)
embed_gifts.add_field(name='Хвостик', value=f'👙 \n > 800 {emoji}', inline=True)
embed_gifts.add_field(name='Кимоно', value=f'👘 \n > 1200 {emoji}', inline=True)
embed_gifts.add_field(name='Костюм горничной', value=f'👘 \n > 2400 {emoji}', inline=True)
embed_gifts.add_field(name='Ошейник', value=f'👙 \n > 2000 {emoji}', inline=True)
embed_gifts.add_field(name='Очки', value=f':eyeglasses: \n > 4000 {emoji}', inline=True)
embed_gifts.add_field(name='Лоли', value=f'{loli} \n > 100000 {emoji}', inline=True)
embed_gifts.add_field(name='Чупа-чупс', value=f'🍭 \n > 10 {emoji}', inline=True)
embed_gifts.add_field(name='Мишка', value=f'🧸 \n > 2500 {emoji}', inline=True)
embed_gifts.add_field(name='Вино', value=f':wine_glass: \n > 250 {emoji}', inline=True)
embed_gifts.add_field(name='Торт', value=f':birthday: \n > 500 {emoji}', inline=True)
embed_gifts.set_image(
url='https://66.media.tumblr.com/c704b54b4390f2b2eef8d85c50d35dfd/tumblr_ofb1vtxYrF1th93f0o3_400.gif')
embed_gifts.set_footer(text=f'запросил {ctx.author.name}')
await ctx.send(embed=embed_gifts)
@commands.command(name='gift', aliases=['подарить', 'гифт', 'п', 'подарок'])
async def gift(self, ctx, member: discord.Member, gift=None, n: int = 1):
loli = self.client.get_emoji(680064619385192515)
member_id = str(member.id)
author_id = str(ctx.author.id)
embed_reply = discord.Embed(color=discord.Colour.dark_purple())
gift = gift.casefold()
if gift is None:
def check(author):
def inner_check(message):
return message.author == ctx.author and message.casefold().content in (
'трусы', 'лифчик', 'сапоги', 'кот', 'Собака', 'Дом', 'Манго', 'Шарфик', 'Носки', 'Чулки',
'Неко-уши', 'Хвостик', 'Кимоно', 'Ошейник', 'Лоли', 'Чупа-чупс', 'Костюм горничной', 'Очки', 'Мишка', 'Вино', 'Торт')
return inner_check
await ctx.send('Уточните пожалуйста подарок, который вы хотите отправить!')
else:
if gift == 'трусы':
if self.client.currency[author_id]["money"] - 50 * n >= 0:
await self.client.unupdate_currency(author_id, 50 * n)
if self.users[member_id]["gift1"] == 'None':
self.users[member_id]["gift1"] = 0
self.users[member_id]["gift1"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 50 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Трусики '
f':briefs:!')
if gift == 'лифчик':
if self.client.currency[author_id]["money"] - 70 * n >= 0:
await self.client.unupdate_currency(author_id, 70 * n)
if self.users[member_id]["gift2"] == 'None':
self.users[member_id]["gift2"] = 0
self.users[member_id]["gift2"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 70 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Лифчик '
f'🩱!')
if gift == 'сапоги':
if self.client.currency[author_id]["money"] - 90 * n >= 0:
await self.client.unupdate_currency(author_id, 90 * n)
if self.users[member_id]["gift3"] == 'None':
self.users[member_id]["gift3"] = 0
self.users[member_id]["gift3"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 90 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Сапоги '
f'🥾!')
if gift == 'кот':
if self.client.currency[author_id]["money"] - 5000 * n >= 0:
await self.client.unupdate_currency(author_id, 5000 * n)
if self.users[member_id]["gift4"] == 'None':
self.users[member_id]["gift4"] = 0
self.users[member_id]["gift4"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 5000 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Котика '
f'🐱!')
if gift == 'собака':
if self.client.currency[author_id]["money"] - 5000 * n >= 0:
await self.client.unupdate_currency(author_id, 5000 * n)
if self.users[member_id]["gift16"] == 'None':
self.users[member_id]["gift16"] = 0
self.users[member_id]["gift16"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 5000 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Cобачку '
f'🐶!')
if gift == 'дом':
if self.client.currency[author_id]["money"] - 50000 * n >= 0:
await self.client.unupdate_currency(author_id, 50000 * n)
if self.users[member_id]["gift5"] == 'None':
self.users[member_id]["gift5"] = 0
self.users[member_id]["gift5"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 50000 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Дом '
f'🏠!')
if gift == 'манго':
if self.client.currency[author_id]["money"] - 20 * n >= 0:
await self.client.unupdate_currency(author_id, 20 * n)
if self.users[member_id]["gift6"] == 'None':
self.users[member_id]["gift6"] = 0
self.users[member_id]["gift6"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 20 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Манго '
f'🥭!')
if gift == 'шарфик':
if self.client.currency[author_id]["money"] - 60 * n >= 0:
await self.client.unupdate_currency(author_id, 60 * n)
if self.users[member_id]["gift7"] == 'None':
self.users[member_id]["gift7"] = 0
self.users[member_id]["gift7"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 60 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Шарфик '
f'🧣!')
if gift == 'носки':
if self.client.currency[author_id]["money"] - 300 * n >= 0:
await self.client.unupdate_currency(author_id, 300 * n)
if self.users[member_id]["gift17"] == 'None':
self.users[member_id]["gift17"] = 0
self.users[member_id]["gift17"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 300 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Носочки '
f'🧦!')
if gift == 'чулки':
if self.client.currency[author_id]["money"] - 600 * n >= 0:
await self.client.unupdate_currency(author_id, 600 * n)
if self.users[member_id]["gift8"] == 'None':
self.users[member_id]["gift8"] = 0
self.users[member_id]["gift8"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 600 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Чулочки '
f'👙!')
if gift == 'неко-уши':
if self.client.currency[author_id]["money"] - 800 * n >= 0:
await self.client.unupdate_currency(author_id, 800 * n)
if self.users[member_id]["gift9"] == 'None':
self.users[member_id]["gift9"] = 0
self.users[member_id]["gift9"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 800 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Неко-уши '
f'👙!')
if gift == 'хвостик':
if self.client.currency[author_id]["money"] - 800 * n >= 0:
await self.client.unupdate_currency(author_id, 800 * n)
if self.users[member_id]["gift10"] == 'None':
self.users[member_id]["gift10"] = 0
self.users[member_id]["gift10"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 800 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Хвостик '
f'👙!')
if gift == 'кимоно':
if self.client.currency[author_id]["money"] - 1200 * n >= 0:
await self.client.unupdate_currency(author_id, 1200 * n)
if self.users[member_id]["gift11"] == 'None':
self.users[member_id]["gift11"] = 0
self.users[member_id]["gift11"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 1200 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Кимоно '
f'👘 !')
if gift == 'костюм горничной':
if self.client.currency[author_id]["money"] - 2400 * n >= 0:
await self.client.unupdate_currency(author_id, 2400 * n)
if self.users[member_id]["gift18"] == 'None':
self.users[member_id]["gift18"] = 0
self.users[member_id]["gift18"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 2400 * n
embed_reply.add_field(name='Вы подарили',
value=f'{ctx.author.mention} подарил {member.mention} Костюм горничной '
f'👘 !')
if gift == 'ошейник':
if self.client.currency[author_id]["money"] - 2000 * n >= 0:
await self.client.unupdate_currency(author_id, 2000 * n)
if self.users[member_id]["gift12"] == 'None':
self.users[member_id]["gift12"] = 0
self.users[member_id]["gift12"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 2000 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Ошейник '
f'👙!')
if gift == 'очки':
if self.client.currency[author_id]["money"] - 4000 * n >= 0:
await self.client.unupdate_currency(author_id, 4000 * n)
if self.users[member_id]["gift13"] == 'None':
self.users[member_id]["gift13"] = 0
self.users[member_id]["gift13"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 4000 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Очечки '
f':eyeglasses:!')
if gift == 'лоли':
if self.client.currency[author_id]["money"] - 100000 * n >= 0:
await self.client.unupdate_currency(author_id, 100000 * n)
if self.users[member_id]["gift14"] == 'None':
self.users[member_id]["gift14"] = 0
self.users[member_id]["gift14"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 100000 * n
embed_reply.add_field(name='Вы подарили', value=f'{ctx.author.mention} подарил {member.mention} Лольку '
f'{loli}!')
if gift == 'чупа-чупс':
if self.client.currency[author_id]["money"] - 10 * n >= 0:
await self.client.unupdate_currency(author_id, 10 * n)
if self.users[member_id]["gift15"] == 'None':
self.users[member_id]["gift15"] = 0
self.users[member_id]["gift15"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 10 * n
embed_reply.add_field(name='Вы подарили',
value=f'{ctx.author.mention} подарил {member.mention} Чупа-чупс 🍭!')
if gift == 'мишка':
if self.client.currency[author_id]["money"] - 2500 * n >= 0:
await self.client.unupdate_currency(author_id, 2500 * n)
if self.users[member_id]["gift19"] == 'None':
self.users[member_id]["gift19"] = 0
self.users[member_id]["gift19"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 2500 * n
embed_reply.add_field(name='Вы подарили',
value=f'{ctx.author.mention} подарил {member.mention} Плюшевого мишку :teddy_bear: !')
if gift == 'вино':
if self.client.currency[author_id]["money"] - 250 * n >= 0:
await self.client.unupdate_currency(author_id, 250 * n)
if self.users[member_id]["gift20"] == 'None':
self.users[member_id]["gift20"] = 0
self.users[member_id]["gift20"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 250 * n
embed_reply.add_field(name='Вы подарили',
value=f'{ctx.author.mention} подарил {member.mention} Винишко :wine_glass: !')
if gift == 'торт':
if self.client.currency[author_id]["money"] - 500 * n >= 0:
await self.client.unupdate_currency(author_id, 500 * n)
if self.users[member_id]["gift21"] == 'None':
self.users[member_id]["gift21"] = 0
self.users[member_id]["gift21"] += n
if self.users[member_id]["price"] == 1:
self.users[member_id]["price"] -= 1
self.users[member_id]["price"] += 500 * n
embed_reply.add_field(name='Вы подарили',
value=f'{ctx.author.mention} подарил {member.mention} Тортик :birthday: !')
await ctx.send(embed=embed_reply)
@commands.command(name='ship', aliases=['шип', 'ебитес'], help=' ship 1st person with 2nd person')
async def ship(self, ctx, member1: discord.Member, member2: discord.Member):
role = discord.utils.get(member1.guild.roles, name="💍")
empedship = discord.Embed(
color=discord.Colour.dark_purple()
)
empedship.add_field(name=':heart_eyes: Ship :heart: ',
value=f' Шипперим {member1.mention} и {member2.mention} :smirk: ', inline=False)
LovePossibility = random.randint(0, 100)
LoveSymbolic = ["Между ними нет любви. ",
"▄ █ ",
"▄ █ █ ▄",
"▄ █ █ ▄ ▄ █",
"▄ █ █ ▄ ▄ █ ▄ █",
"▄ █ █ ▄ ▄ █ ▄ █ ▄ █ ▄",
"▄ █ █ ▄ ▄ █ ▄ █ ▄ █ ▄ █ ",
"▄ █ █ ▄ ▄ █ ▄ █ ▄ █ ▄ █ ▄ █",
"▄ █ █ ▄ ▄ █ ▄ █ ▄ █ ▄ █ ▄ █ ▆ ▅",
"▄ █ █ ▄ ▄ █ ▄ █ ▄ █ ▄ █ ▄ █ ▆ ▅ ▃",
"▄ █ █ ▄ ▄ █ ▄ █ ▄ █ ▄ █ ▄ █ ▆ ▅ ▃ ▂"]
if member1 == member2:
empedship.add_field(name=f'Ммм... Я думаю тут проблемка.',
value=f"Ты не можешь шипперить двух одинаковых людей.")
if member1 != member2:
if member2 or member1 != discord.User.bot:
author_id = str(member1.id)
member_id = str(member2.id)
if role not in member1.roles or role not in member2.roles or self.users[author_id][
'couple_id'] != member_id:
if LovePossibility <= 5:
empedship.add_field(name=f'Вероятность любви между {member1} и {member2} это :',
value=f'{LoveSymbolic[0]} {LovePossibility} % :broken_heart: ',
inline=False)
empedship.set_image(
url='https://media1.tenor.com/images/2d432485a60674319ef423b643877ee4/tenor.gif?itemid=11302966')
if LovePossibility >= 6:
if LovePossibility <= 10:
empedship.add_field(name=f'Вероятность любви между {member1} и {member2} это :',
value=f'{LoveSymbolic[1]} __**{LovePossibility}%**__ :broken_heart: ',
inline=False)
empedship.set_image(url='https://cdn42.picsart.com/171043668003202.gif?c256x256')
if LovePossibility >= 11:
if LovePossibility <= 19:
empedship.add_field(name=f'Вероятность любви между {member1} и {member2} это :',
value=f'{LoveSymbolic[2]} __**{LovePossibility}%**__ :black_heart: ',
inline=False)
empedship.set_image(
url='https://media1.tenor.com/images/e375857e0f78c321b83040514b21a420/tenor.gif?itemid=13802170')
if LovePossibility >= 20:
if LovePossibility <= 29:
empedship.add_field(name=f'Вероятность любви между {member1} и {member2} это :',
value=f'{LoveSymbolic[3]} __**{LovePossibility}%**__ :black_heart: ',
inline=False)
empedship.set_image(
url='https://media1.tenor.com/images/93f5876e82ae575a6c4b4613d57f6e29/tenor.gif?itemid=13665536')
if LovePossibility >= 30:
if LovePossibility <= 39:
empedship.add_field(name=f'Вероятность любви между {member1} и {member2} это :',
value=f'{LoveSymbolic[4]} __**{LovePossibility}%**__ :brown_heart: ',
inline=False)
empedship.set_image(
url='https://media1.tenor.com/images/f0479ee873f30213a7a5579cc18da5d0/tenor.gif?itemid=12165912')
if LovePossibility >= 40:
if LovePossibility <= 49:
empedship.add_field(name=f'Вероятность любви между {member1} и {member2} это :',
value=f'{LoveSymbolic[5]} __**{LovePossibility}%**__ :hearts: ',
inline=False)
empedship.set_image(
url='https://media1.tenor.com/images/93f5876e82ae575a6c4b4613d57f6e29/tenor.gif?itemid=13665536')
if LovePossibility >= 50:
if LovePossibility <= 59:
empedship.add_field(name=f'Вероятность любви между {member1} и {member2} это :',
value=f'{LoveSymbolic[6]} __**{LovePossibility}%**__ :heart_decoration: ',
inline=False)
empedship.set_image(
url='https://media1.tenor.com/images/f0479ee873f30213a7a5579cc18da5d0/tenor.gif?itemid=12165912')
if LovePossibility >= 60:
if LovePossibility <= 69:
empedship.add_field(name=f'Вероятность любви между {member1} и {member2} это :',
value=f'{LoveSymbolic[7]} __**{LovePossibility}%**__ :blue_heart: ',
inline=False)
empedship.set_image(
url='https://media1.tenor.com/images/4490d35d5950b90df2b7bccaf4f79922/tenor.gif?itemid=3478319')
if LovePossibility >= 70:
if LovePossibility <= 79:
empedship.add_field(name=f'Вероятность любви между {member1} и {member2} это :',
value=f'{LoveSymbolic[8]} __**{LovePossibility}%**__ :heartbeat: ',
inline=False)
empedship.set_image(
url='https://media1.tenor.com/images/fb1aa76944c156acc494fff37ebdbcfa/tenor.gif?itemid=14521920')
if LovePossibility >= 80:
if LovePossibility <= 89:
empedship.add_field(name=f'Вероятность любви между {member1} и {member2} это :',
value=f'{LoveSymbolic[9]} __**{LovePossibility}%**__ :heart_exclamation: ',
inline=False)
empedship.set_image(
url='https://media1.tenor.com/images/62a43e567137edec0d5231d5ec4b814b/tenor.gif?itemid=8955295')
if LovePossibility >= 90:
if LovePossibility <= 100:
empedship.add_field(name=f'Вероятность любви между {member1} и {member2} это :',
value=f'{LoveSymbolic[10]} _**{LovePossibility}%**_ :heartpulse: ',
inline=False)
empedship.set_image(
url='https://media1.tenor.com/images/8cab4f4c73547d077c56066461c40a5e/tenor.gif?itemid=12873196')
author_id = str(member1.id)
member_id = str(member2.id)
if self.users[author_id]['couple_id'] == member_id:
if role in member1.roles and role in member2.roles:
empedship.add_field(name=f' {member1} и {member2} в любовных отношениях:',
value=f"▄ █ █ ▄ ▄ █ ▄ █ ▄ █ ▄ █ ▄ █ ▆ ▅ ▃ ▂ _ **100%** :heartpulse: _\nОни "
f"женаты! Конечно "
f"же это любовь!")
empedship.set_image(
url='https://media1.tenor.com/images/8cbe0edadc12ca1056d5eb685a4c27f6/tenor.gif?itemid=14518537')
await ctx.send(embed=empedship)
@ship.error
async def shiperror(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
embedmarryerror = discord.Embed(
color=discord.Colour.dark_purple()
)
embedmarryerror.add_field(name='Ошибка синтаксиса команды',
value=f' Пожалуйста идентифицируйте 1-го и 2-го пользователей. .ship @ @\n Пример:')
await ctx.send(embed=embedmarryerror)
await ctx.send('```.ship @Clonexy#3767 .Shiro ♣#9014```')
@commands.command(name='waifunull', aliases=['waifureset', 'сбросвайфу', 'resetwaifu'])
async def waifureset(self, ctx):
author = ctx.author
member_id = str(author.id)
self.users[member_id]['price'] = 1
self.users[member_id]['gift1'] = 'None'
self.users[member_id]['gift2'] = 'None'
self.users[member_id]['gift3'] = 'None'
self.users[member_id]['gift4'] = 'None'
self.users[member_id]['gift5'] = 'None'
self.users[member_id]['gift6'] = 'None'
self.users[member_id]['gift7'] = 'None'
self.users[member_id]['gift8'] = 'None'
self.users[member_id]['gift9'] = 'None'
self.users[member_id]['gift10'] = 'None'
self.users[member_id]['gift11'] = 'None'
self.users[member_id]['gift12'] = 'None'
self.users[member_id]['gift13'] = 'None'
self.users[member_id]['gift14'] = 'None'
self.users[member_id]['gift15'] = 'None'
self.users[member_id]['gift16'] = 'None'
self.users[member_id]['gift17'] = 'None'
self.users[member_id]['gift18'] = 'None'
self.users[member_id]['gift19'] = 'None'
self.users[member_id]['gift20'] = 'None'
self.users[member_id]['gift21'] = 'None'
null = discord.Embed(
color=discord.Colour.dark_purple()
)
null.add_field(name='Ваша статистика',
value='Ваша статистика Вайфу была успешно обнулена по-вашему запросу, проверьте её с помощью команды .waifu !')
await ctx.send(embed=null)
@commands.command(name='waifutop', aliases=['топвайфу', 'вайфутоп', 'твайфу', 'лучшиевайфу', 'topwaifu'])
async def waifutop(self, ctx):
emoji = self.client.get_emoji(676803534758477845)
kamo = ["(●♡∀♡)",
"✿♥‿♥✿",
"(♥ω♥ ) ~♪",
"(。・ω・。)ノ♡",
"(◍•ᴗ•◍)❤",
"-ω(´•ω•`)♡",
"(◍•ᴗ•◍)♡ ✧*。",
"( ◜◒◝ )♡",
"(人 •͈ᴗ•͈)",
"(´͈ ᵕ `͈ ♡°◌̊)",
"(ღ˘⌣˘ღ)",
"( ˘ ³˘)♥",
"( ˘ ³˘)❤",
"❣ (●❛3❛●)",
"(っ˘з(˘⌣˘ )",
"(◦˘ З(◦’ںˉ◦)♡",
"(*-ω-)ω-*)"]
with open('marry.json') as json_data:
d = json.load(json_data)
result = OrderedDict({k: v for k, v in sorted(d.items(), reverse=True, key=lambda i: i[1]["price"])})
embed = discord.Embed(
color=discord.Colour.dark_purple()
)
for index, element in enumerate(result):
try:
user = await self.client.fetch_user(element)
a = user.name
except AttributeError:
a = '?'
embed.add_field(name=str(int(index + 1)),
value=f"``{random.choice(kamo)}`` | {a} - {result[element]['price']} {emoji}",
inline=False)
await ctx.send(embed=embed)
@commands.command(name='emojitest')
async def testetetetetetetete(self, ctx):
msg = discord.Embed(
color=discord.Colour.dark_purple()
)
msg.add_field(name='Игровые роли:', value='_ _')
msg = await ctx.send(embed=msg)
reactions = [691522796970573856, 691523294515429377, 691523884675235960, 691524275454345286, 691525147445952583, 691525387473125376, 691525891100246027, 691526165566980176, 691526344919744513, 691526943006523441]
for emoji_id in reactions:
emoji = self.client.get_emoji(emoji_id)
await msg.add_reaction(emoji)
def setup(client):
client.add_cog(Marriage(client)) | StarcoderdataPython |
40698 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import unittest
import re
from query import *
def squeeze(value):
value = value.replace('\r', '')
value = value.replace('\n', '')
return value
class QueryTestCase(unittest.TestCase):
def test_parse_names(self):
self.assertEqual(None, parse_names(u''))
self.assertEqual(None, parse_names(u' '))
self.assertEqual(None, parse_names(u'\t'))
self.assertEqual(None, parse_names(u'\r'))
self.assertEqual(None, parse_names(u'\n'))
self.assertEqual(None, parse_names(u'a'))
self.assertEqual(None, parse_names(u' a'))
self.assertEqual(None, parse_names(u' a\t'))
self.assertEqual(None, parse_names(u' a\t '))
self.assertEqual((u'a', u'b'), parse_names(u' a b'))
self.assertEqual((u'a', u'b'), parse_names(u' a b '))
self.assertEqual((u'a', u'b'), parse_names(u' a b '))
self.assertEqual((u'a', u'b'), parse_names(u'a-b'))
self.assertEqual((u'a', u'b'), parse_names(u'a - b'))
self.assertEqual((u'a', u'b'), parse_names(u'a〜b'))
self.assertEqual((u'a', u'b'), parse_names(u'a~b'))
self.assertEqual((u'a', u'b'), parse_names(u'a-b'))
self.assertEqual((u'a', u'b'), parse_names(u'a - b'))
self.assertEqual(None, parse_names(u' a b c'))
def test_do(self):
self.maxDiff = None
xml = re.sub(ur'>\s*<', u'><', do(u''))
self.assertEqual(u'<?xml version="1.0"?><items><item uid="result" arg="" valid="no"><title>type “from” and “to” station names</title></item></items>', xml)
xml = re.sub(ur'>\s*<', u'><', do(u' a '))
self.assertEqual(u'<?xml version="1.0"?><items><item uid="result" arg="" valid="no"><title>type “from” and “to” station names</title></item></items>', xml)
xml = re.sub(ur'>\s*<', u'><', do(u' a b '))
self.assertEqual(u'<?xml version="1.0"?><items><item uid="result" arg="http://transit.yahoo.co.jp/search/result?from=a&to=b" valid="yes"><title>Query routes from a to b</title></item></items>', xml)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6633565 | """
Setuptools based setup module
"""
from setuptools import setup, find_packages
import versioneer
setup(
name='pyiron_electrochemistry',
version=versioneer.get_version(),
description='pyiron_electrochemistry - module extension to pyiron.',
long_description='http://pyiron.org',
url='https://github.com/pyiron/pyiron_electrochemistry',
author='Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department',
author_email='<EMAIL>',
license='BSD',
classifiers=['Development Status :: 5 - Production/Stable',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'],
keywords='pyiron',
packages=find_packages(exclude=["*tests*", "*docs*", "*binder*", "*conda*", "*notebooks*", "*.ci_support*"]),
install_requires=[
'pyiron_base'
],
cmdclass=versioneer.get_cmdclass(),
)
| StarcoderdataPython |
3246975 | from collections import deque
from itertools import combinations
from advent.load import read_input
def sum_window(numbers):
return set(x + y for x, y in combinations(numbers, 2) if x != y)
numbers = [int(x) for x in read_input()]
window = deque(numbers[:25], 25)
sums = sum_window(window)
invalid = 0
for number in numbers[25:]:
if number not in sums:
invalid = number
break
window.append(number)
sums = sum_window(window)
print(invalid)
for i in range(len(numbers)):
acc = deque()
for number in numbers[i:]:
acc.append(number)
s = sum(acc)
if s > invalid:
acc.clear()
if s >= invalid:
break
if acc:
print(min(acc) + max(acc))
break
| StarcoderdataPython |
11384346 | <filename>kslurm/cli/kpy.py
from __future__ import absolute_import
import importlib.resources as impr
import os
import re
import shutil
import subprocess as sp
import sys
import tarfile
import tempfile
from pathlib import Path
from typing import Literal, Optional, cast, overload
import attr
from kslurm.args import Subcommand, choice, flag, keyword, positional, shape, subcommand
from kslurm.args.command import CommandError, command
from kslurm.args.protocols import WrappedCommand
from kslurm.models import validators
from kslurm.shell import Shell
from kslurm.venv import (
KpyIndex,
MissingPipdirError,
PromptRefreshError,
VenvCache,
VenvPrompt,
rebase_venv,
)
def _get_unique_name(index: KpyIndex, stem: str = "venv", i: int = 0) -> str:
if i == 0:
candidate = stem
else:
candidate = f"{stem}{i}"
if candidate in index:
return _get_unique_name(index, stem, i + 1)
return candidate
class MissingSlurmTmpdirError(CommandError):
pass
@overload
def _get_slurm_tmpdir(allow_missing: Literal[True] = ...) -> Optional[Path]:
...
@overload
def _get_slurm_tmpdir(allow_missing: Literal[False] = ...) -> Path:
...
def _get_slurm_tmpdir(allow_missing: bool = True):
if not os.environ.get("SLURM_TMPDIR"):
if not allow_missing:
raise MissingSlurmTmpdirError(
"This command can only be used in a compute node. Use `krun` to start "
"an interactive session"
)
return
return Path(os.environ["SLURM_TMPDIR"])
@command
def _bash():
"""Echo script for inclusion in .bashrc
e.g.
kpy bash >> $HOME/.bashrc
"""
with impr.path("kslurm.bin", "bash.sh") as path:
print(f"\nsource {path.resolve()}")
@command(inline=True)
def _load(
name: str = positional(default="", help="Test help"),
new_name: list[str] = keyword(match=["--as"], validate=validators.fs_name),
script: list[str] = keyword(match=["--script"]),
):
"""Load a saved python venv
Run without name to list available venvs for loading
"""
slurm_tmp = _get_slurm_tmpdir()
if slurm_tmp:
index = KpyIndex(slurm_tmp)
label = new_name[0] if new_name else name
if label in index:
print(
f"An environment called '{label}' already exists. You can load "
f"'{name}' under a different name using --as:\n"
f"\tkpy load {label} --as <name>\n"
f"You can also activate the existing '{label}' using\n"
f"\tkpy activate {label}"
)
return 1
venv_dir = Path(tempfile.mkdtemp(prefix="kslurm-venv-", dir=slurm_tmp / "tmp"))
else:
index = None
label = name
venv_dir = Path(tempfile.mkdtemp(prefix="kslurm-"))
venv_cache = VenvCache()
if not name or name not in venv_cache:
print("Valid venvs:\n" + str(venv_cache))
return
print(f"Unpacking venv '{name}'", end="")
if label != name:
print(f" as '{label}'")
else:
print()
with tarfile.open(venv_cache[name], "r") as tar:
tar.extractall(venv_dir)
print("Updating paths")
rebase_venv(venv_dir)
prompt = VenvPrompt(venv_dir)
prompt.update_prompt(label)
prompt.update_hash()
prompt.save()
if index is not None:
index[label] = str(venv_dir)
index.write()
shell = Shell.get()
if script:
with Path(script[0]).open("w") as f:
f.write(shell.source(venv_dir))
return 2
shell.activate(venv_dir)
@command(inline=True)
def _export(
mode: str = choice(["venv"], help="What sort of export to perform"),
name: str = positional(help="Name of the venv to export"),
path: list[Path] = keyword(
["--path", "-p"], default=None, help="Path for the export"
),
):
"""Export a saved venv
Saves to a path of choice. Currently "venv" is the only valid export mode. Exported
venvs can only be safely activated by a bash shell.
"""
venv_cache = VenvCache()
if name not in venv_cache:
print("Valid venvs:\n" + str(venv_cache))
return 1
if path[0].exists():
print(f"{path[0]} already exists")
return 1
print("exporting...")
with tarfile.open(venv_cache[name], "r") as tar:
tar.extractall(path[0])
rebase_venv(path[0])
print(
"Export complete! Activate the venv by running\n\tsource "
f"{path[0]}/bin/activate"
)
@command(inline=True)
def _save(
name: str = positional(format=validators.fs_name),
force: bool = flag(match=["--force", "-f"]),
):
"""Save current venv"""
if not os.environ.get("VIRTUAL_ENV"):
print(
"No active virtual env detected. Please activate one, or ensure "
"$VIRTUAL_ENV is being set correctly"
)
venv_cache = VenvCache()
delete = False
if name in venv_cache:
if force:
delete = True
else:
print(f"{name} already exists. Run with -f to force overwrite")
return
dest = venv_cache.get_path(name)
_, tmp = tempfile.mkstemp(prefix="kslurm-", suffix="tar.gz")
venv_dir = Path(os.environ["VIRTUAL_ENV"])
prompt = VenvPrompt(venv_dir)
prompt.update_prompt(name)
prompt.update_hash()
prompt.save()
with tarfile.open(tmp, mode="w:gz") as tar:
tar.add(venv_dir, arcname="")
if delete:
os.remove(dest)
slurm_tmp = _get_slurm_tmpdir()
if slurm_tmp:
index = KpyIndex(slurm_tmp)
index[name] = str(venv_dir)
index.write()
shutil.move(tmp, dest)
@command(inline=True)
def _create(
name: str = positional("", help="Name of the new venv", format=validators.fs_name),
version: str = shape(
default="",
match=lambda s: bool(re.match(r"^[23]\.\d{1,2}$", s)),
syntax="(2|3).x",
examples=["2.7", "3.8"],
help="Python version to use in new venv. An appropriate executable must be on "
"the $PATH (e.g. 3.7 -> python3.7",
),
script: list[str] = keyword(match=["--script"]),
):
"""Create a new venv
If no name provided, a placeholder name will be generated
"""
if version:
ver = ["-p", version]
else:
try:
data = sp.run(
"eval $($LMOD_CMD bash list python)", shell=True, capture_output=True
)
if match := re.search(r"(?<=python\/)\d\.\d{1,2}", data.stdout.decode()):
ver = ["-p", match[0]]
else:
ver = []
except RuntimeError:
ver = []
slurm_tmp = _get_slurm_tmpdir()
if slurm_tmp:
index = KpyIndex(slurm_tmp)
name = name if name else _get_unique_name(index, "venv")
if name in index:
print(
f"An environment called '{name}' already exists. You can activate "
f"the existing '{name}' using\n"
"\tkpy activate {name}"
)
return 1
venv_dir = tempfile.mkdtemp(prefix="kslurm-venv-", dir=slurm_tmp / "tmp")
no_download = ["--no-download"]
no_index = ["--no-index"]
else:
index = None
name = name if name else "venv"
venv_dir = tempfile.mkdtemp(prefix="kslurm-")
no_download = []
no_index = []
try:
sp.run(
[
sys.executable,
"-m",
"virtualenv",
venv_dir,
"--symlinks",
*ver,
*no_download,
],
)
sp.run(
[
os.path.join(venv_dir, "bin", "python"),
"-m",
"pip",
"install",
"--upgrade",
"pip",
*no_index,
],
)
except RuntimeError as err:
print(err.args[0])
return 1
if index is not None:
index[name] = str(venv_dir)
index.write()
prompt = VenvPrompt(Path(venv_dir))
prompt.update_prompt(name)
prompt.save()
shell = Shell.get()
if script:
with Path(script[0]).open("w") as f:
f.write(shell.source(Path(venv_dir)))
return 2
shell.activate(Path(venv_dir))
@command(inline=True)
def _activate(name: str = positional(""), script: list[str] = keyword(["--script"])):
"""Activate a venv already created or loaded
Only works on compute nodes. Use kpy create or kpy load --as on a login node
"""
slurm_tmp = _get_slurm_tmpdir(False)
index = KpyIndex(slurm_tmp)
if not name:
print(str(index))
return
if name not in index:
print(
f"An environment with the name '{name}' has not yet been initialized. ",
end="",
)
try:
venv_cache = VenvCache()
if name in venv_cache:
print(
f"The saved environment called '{name}' can be loaded using\n"
f"\tkpy load {name}\n"
)
except MissingPipdirError:
pass
print(f"A new environment can be created using\n\tkpy create {name}")
print(f"Currently initialized environments:\n{index}")
return 1
shell = Shell.get()
if script:
with Path(script[0]).open("w") as f:
f.write(shell.source(Path(index[name])))
return 2
shell.activate(Path(index[name]))
@command
def _list():
"""List all saved venvs.
To list initialized venvs (either created or loaded), run `kpy activate` without any
arguments
"""
venv_cache = VenvCache()
print(str(venv_cache))
return
@command
def _refresh():
try:
dir = Path(os.environ["VIRTUAL_ENV"])
except KeyError:
return
prompt = VenvPrompt(dir)
try:
prompt.refresh()
print(prompt.name)
except PromptRefreshError:
return
def _kpy_wrapper(argv: list[str] = sys.argv):
with impr.path("kslurm.bin", "kpy-wrapper.sh") as path:
print(path)
@command(inline=True)
def _rm(name: str = positional("")):
try:
venv_cache = VenvCache()
except MissingPipdirError as err:
print(err.msg)
return 1
if not name:
print("Valid venvs:\n" + str(venv_cache))
if name not in venv_cache:
print(f"{name} is not a valid venv. Currently saved venvs are:\n{venv_cache}")
return 1
os.remove(venv_cache[name])
return
@attr.frozen
class _KpyModel:
command: Subcommand = subcommand(
commands={
"load": _load.cli,
"save": _save.cli,
"bash": _bash.cli,
"create": _create.cli,
"activate": _activate.cli,
"list": _list.cli,
"rm": _rm.cli,
"export": _export.cli,
"_refresh": _refresh.cli,
"_kpy_wrapper": cast(WrappedCommand, _kpy_wrapper),
},
)
@command
def kpy(cmd_name: str, args: _KpyModel, tail: list[str]):
"""Set of commands for interacting with python virtual envs"""
name, func = args.command
entry = f"{cmd_name} {name}"
return func([entry, *tail])
if __name__ == "__main__":
kpy.cli(["kpy", "create"])
| StarcoderdataPython |
3356021 | <filename>2020/src/day4.py<gh_stars>1-10
# advent of code
# day4
import re
keywords = ['byr', 'ecl', 'eyr', 'hcl', 'hgt', 'iyr', 'pid']
keywords.sort()
valid_passport_count_a = 0
valid_passport_count_b = 0
with open("../input/day4.txt","r") as file:
entries = file.readlines()
# first puzzle
given_keywords = []
for i in range(0, len(entries)):
e = entries[i]
if e == '\n' or i == len(entries) - 1:
given_keywords.sort() # for debugging
if len(given_keywords) == len(keywords):
valid_passport_count_a += 1
# reset
given_keywords = []
else:
items = e.split(' ')
for i in items:
key, value = i.split(':')
if key != 'cid':
given_keywords.append(key)
# second puzzle
given_keywords = []
debug_given_items=[]
for i in range(0, len(entries)):
e = entries[i]
if e != '\n':
items = e.split(' ')
for item in items:
key, value = item.split(':')
# whether the key's value is valid or not
valid_value = False
value = value.rstrip('\r\n') # remove windows end of line
if key == 'byr' :
valid_value = 1920 <= int(value) <= 2002
elif (key == 'ecl'):
valid_value = value in ['amb', 'blu', 'brn', 'gry', 'grn' ,'hzl' ,'oth']
elif key == 'iyr':
valid_value = 2010 <= int(value) <= 2020
elif key == 'eyr':
valid_value = 2020 <= int(value) <= 2030
elif key == 'hgt':
if 'cm' in value:
valid_value = 150 <= int(value.split('cm')[0]) <= 193
elif 'in' in value:
valid_value = 59 <= int(value.split('in')[0]) <= 76
elif key == 'hcl' :
valid_value = re.search("^#[0-9a-f]{6}", value)
elif key == 'pid' :
valid_value = re.search("^[0-9]{9}$", value)
if valid_value:
given_keywords.append(key)
debug_given_items.append(key+":"+value) # for debugging
if (i < len(entries) - 1 and entries[i+1] == '\n') or i ==len(entries) -1:
given_keywords.sort() # for debugging
if len(given_keywords) == len(keywords):
valid_passport_count_b += 1
# for debugging
debug_given_items.sort()
#print("b) " + str(valid_passport_count_b)+":"+str(debug_given_items))
# reset
given_keywords = []
debug_given_items=[]
print("a) valid passports ", valid_passport_count_a+1)
print("b) valid passports ",valid_passport_count_b)
| StarcoderdataPython |
12823128 | <reponame>lone17/deform-conv
import keras
import numpy as np
import tensorflow as tf
from keras import backend as K
def weighted_binary_crossentropy(y_true, y_pred, class_weights=None):
# Original binary crossentropy (see losses.py):
# K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1)
# Calculate the binary crossentropy
b_ce = K.binary_crossentropy(y_true, y_pred)
if class_weights is None:
return b_ce
else:
class_weights = np.array(class_weights) / np.sum(class_weights)
y_shape = list(K.int_shape(y_pred))
if len(class_weights) != 2:
raise ValueError('''Number of weights ({}) does not match number of
classes ({})'''.format(len(class_weights), y_shape[-1]))
# Apply the weights
weight_vector = y_true * class_weights[1] + (1. - y_true) * class_weights[0]
weighted_b_ce = weight_vector * b_ce
# Return the mean error
return K.mean(weighted_b_ce)
def weighted_categorical_crossentropy(y_true, y_pred, class_weights=None):
# Original binary crossentropy (see losses.py):
# K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1)
# Calculate the crossentropy
loss_map = K.categorical_crossentropy(y_true, y_pred)
if class_weights is None:
return loss_map
else:
class_weights = np.array(class_weights) / np.sum(class_weights)
y_shape = list(K.int_shape(y_pred))
if len(class_weights) != y_shape[-1]:
raise ValueError('''Number of weights ({}) does not match number of
classes ({})'''.format(len(class_weights), y_shape[-1]))
# Compute the weight
weight_map = K.zeros_like(y_pred[..., 0])
for i in range(len(class_weights)):
weight_map += y_true[..., i] * class_weights[i]
# Apply the weights
loss_map = loss_map * weight_map
# Return the mean error
return K.mean(loss_map)
def dice_loss(y_true, y_pred, ignore_last_channel, smooth=1e-6):
if ignore_last_channel:
y_true = y_true[..., :-1]
y_pred = y_pred[..., :-1]
intersection = K.sum(y_true * y_pred, axis=[1, 2])
union = K.sum(y_true, axis=[1, 2]) + K.sum(y_pred, axis=[1, 2])
# print('-' * 50)
# print('dice_coef')
# print(K.int_shape(y_true), y_true.shape.as_list())
# print(K.int_shape(intersection), intersection.shape.as_list())
# print(K.int_shape(union), union.shape.as_list())
# print('-' * 50)
loss = K.mean((2. * intersection + smooth) / (union + smooth), axis=-1)
loss = K.mean(loss)
return -loss
def custom_loss(y_true, y_pred, class_weights=[0.1, 0.9],
loss_weights=[4, 0.5], ignore_last_channel=False):
dice = dice_loss(y_true, y_pred, ignore_last_channel=ignore_last_channel)
cross_entropy = weighted_binary_crossentropy(y_true, y_pred, class_weights)
return loss_weights[0] * dice + loss_weights[1] * cross_entropy
def custom_categorical_loss(y_true, y_pred, class_weights=[1, 1, 1, 0.3],
loss_weights=[4, 0.5], ignore_last_channel=False):
dice = dice_loss(y_true, y_pred, ignore_last_channel=ignore_last_channel)
cross_entropy = weighted_categorical_crossentropy(y_true, y_pred, class_weights)
return loss_weights[0] * dice + loss_weights[1] * cross_entropy
def IoU_score(y_true, y_pred, smooth=1e-6, ignore_last_channel=False):
if ignore_last_channel:
y_true = y_true[..., :-1]
y_pred = y_pred[..., :-1]
intersection = K.sum(K.abs(y_true * y_pred), axis=[1, 2])
union = K.sum(y_true, [1, 2]) + K.sum(y_pred, [1, 2]) - intersection
iou = K.mean((intersection + smooth) / (union + smooth), axis=-1)
iou = K.mean(iou)
# print('-' * 50)
# print('IoU_score')
# print(K.int_shape(y_true), y_true.shape.as_list())
# print(K.int_shape(intersection), intersection.shape.as_list())
# print(K.int_shape(union), union.shape.as_list())
# print('-' * 50)
return iou | StarcoderdataPython |
29773 |
import grpc
from concurrent import futures
import time
import sys
sys.path.insert(0, 'service/')
from service_spec import fake_news_pb2
from service_spec import fake_news_pb2_grpc
import json
import test
class fake_news_classificationServicer(fake_news_pb2_grpc.fake_news_classificationServicer):
def classify(self, request, context):
response = fake_news_pb2.OutputMessage()
response.result = test.predict(request.value)
return response
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
fake_news_pb2_grpc.add_fake_news_classificationServicer_to_server(fake_news_classificationServicer(), server)
print('Starting server. Listening on port 7011.')
server.add_insecure_port('0.0.0.0:7011')
server.start()
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0)
| StarcoderdataPython |
4811998 | <reponame>kozistr/RAdam-tensorflow<filename>radam.py
"""RAdamOptimizer implementation `in Tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
class RAdamOptimizer(tf.train.Optimizer):
"""Optimizer that implements the RAdam algorithm.
See [Liu et al., 2019](https://arxiv.org/abs/1908.03265)
([pdf](https://arxiv.org/abs/1908.03265)).
"""
def __init__(self,
learning_rate: float = 0.001,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-6,
decay: float = 0.,
warmup_proportion: float = 0.1,
weight_decay: float = 0.,
exclude_from_weight_decay: list = None,
amsgrad: bool = False,
use_locking: bool = False,
name: str = "RAdam"):
super(RAdamOptimizer, self).__init__(use_locking, name)
if not 0. <= beta1 < 1.:
raise ValueError("Invalid beta1 value : {}".format(beta1))
if not 0. <= beta2 < 1.:
raise ValueError("Invalid beta2 value : {}".format(beta2))
if epsilon <= 0.:
raise ValueError("Invalid epsilon value : {}".format(epsilon))
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._decay = decay
self._warmup_proportion = warmup_proportion
self._weight_decay = weight_decay
self._exclude_from_weight_decay = exclude_from_weight_decay
self._amsgrad = amsgrad
self._base_lr = learning_rate
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
lr = self._lr
t = tf.cast(global_step, dtype=tf.float32)
if self._decay > 0.:
lr *= (1. / (1. + self._decay * t))
t += 1
bias_correction1 = 1. - (self._beta1 ** t)
bias_correction2 = 1. - (self._beta2 ** t)
# Compute the maximum length of the approximated SMA
sma_inf = 2. / (1. - self._beta2) - 1.
# Compute the length of the approximated SMA
sma_t = sma_inf - 2. * t * (self._beta2 ** t) / bias_correction2
assignments = []
for grad, param in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/radam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/radam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
if self._amsgrad:
v_hat = tf.get_variable(
name=param_name + "/radam_v_hat",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v_t = (
tf.multiply(self._beta2, v) + tf.multiply(1. - self._beta2, tf.square(grad)))
m_t = (
tf.multiply(self._beta1, m) + tf.multiply(1. - self._beta1, grad))
m_corr_t = m_t / bias_correction1
if self._amsgrad:
v_hat = tf.math.maximum(v_hat, v_t)
v_corr_t = v_hat / bias_correction2
else:
v_corr_t = v_t / bias_correction2
p_t = tf.cond(
sma_t > 4.,
lambda: tf.sqrt(
(sma_t - 4.) * (sma_t - 2.) * sma_inf /
((sma_inf - 4.) * (sma_inf - 2.) * sma_t)
) * m_corr_t / tf.sqrt(v_corr_t + self._epsilon),
lambda: m_corr_t
)
if self._do_use_weight_decay(param_name):
p_t += self._weight_decay * param
p_t = param - lr * p_t
update_list = [param.assign(p_t), m.assign(m_t), v.assign(v_t)]
if self._amsgrad:
update_list.append(v_hat.assign(v_hat))
assignments.extend(update_list)
# update the global step
assignments.append(global_step.assign_add(1))
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self._weight_decay:
return False
if self._exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
@staticmethod
def _get_variable_name(param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
| StarcoderdataPython |
9793826 | from bot.helper.telegram_helper.message_utils import sendMessage
from bot import AUTHORIZED_CHATS, SUDO_USERS, dispatcher, DB_URI
from telegram.ext import CommandHandler
from bot.helper.telegram_helper.filters import CustomFilters
from telegram.ext import Filters
from telegram import Update
from bot.helper.telegram_helper.bot_commands import BotCommands
from bot.helper.ext_utils.db_handler import DbManger
def authorize(update, context):
reply_message = None
message_ = None
reply_message = update.message.reply_to_message
message_ = update.message.text.split(' ')
if len(message_) == 2:
user_id = int(message_[1])
if user_id not in AUTHORIZED_CHATS:
if DB_URI is not None:
msg = DbManger().db_auth(user_id)
else:
with open('authorized_chats.txt', 'a') as file:
file.write(f'{user_id}\n')
AUTHORIZED_CHATS.add(user_id)
msg = 'User Authorized'
else:
msg = 'User Already Authorized'
else:
if reply_message is None:
# Trying to authorize a chat
chat_id = update.effective_chat.id
if chat_id not in AUTHORIZED_CHATS:
if DB_URI is not None:
msg = DbManger().db_auth(chat_id)
else:
with open('authorized_chats.txt', 'a') as file:
file.write(f'{chat_id}\n')
AUTHORIZED_CHATS.add(chat_id)
msg = 'Chat Authorized'
else:
msg = 'Chat Already Authorized'
else:
# Trying to authorize someone by replying
user_id = reply_message.from_user.id
if user_id not in AUTHORIZED_CHATS:
if DB_URI is not None:
msg = DbManger().db_auth(user_id)
else:
with open('authorized_chats.txt', 'a') as file:
file.write(f'{user_id}\n')
AUTHORIZED_CHATS.add(user_id)
msg = 'User Authorized'
else:
msg = 'User Already Authorized'
sendMessage(msg, context.bot, update)
def unauthorize(update, context):
reply_message = None
message_ = None
reply_message = update.message.reply_to_message
message_ = update.message.text.split(' ')
if len(message_) == 2:
user_id = int(message_[1])
if user_id in AUTHORIZED_CHATS:
if DB_URI is not None:
msg = DbManger().db_unauth(user_id)
else:
AUTHORIZED_CHATS.remove(user_id)
msg = 'User Unauthorized'
else:
msg = 'User Already Unauthorized'
else:
if reply_message is None:
# Trying to unauthorize a chat
chat_id = update.effective_chat.id
if chat_id in AUTHORIZED_CHATS:
if DB_URI is not None:
msg = DbManger().db_unauth(chat_id)
else:
AUTHORIZED_CHATS.remove(chat_id)
msg = 'Chat Unauthorized'
else:
msg = 'Chat Already Unauthorized'
else:
# Trying to authorize someone by replying
user_id = reply_message.from_user.id
if user_id in AUTHORIZED_CHATS:
if DB_URI is not None:
msg = DbManger().db_unauth(user_id)
else:
AUTHORIZED_CHATS.remove(user_id)
msg = 'User Unauthorized'
else:
msg = 'User Already Unauthorized'
with open('authorized_chats.txt', 'a') as file:
file.truncate(0)
for i in AUTHORIZED_CHATS:
file.write(f'{i}\n')
sendMessage(msg, context.bot, update)
def addSudo(update, context):
reply_message = None
message_ = None
reply_message = update.message.reply_to_message
message_ = update.message.text.split(' ')
if len(message_) == 2:
user_id = int(message_[1])
if user_id not in SUDO_USERS:
if DB_URI is not None:
msg = DbManger().db_addsudo(user_id)
else:
with open('sudo_users.txt', 'a') as file:
file.write(f'{user_id}\n')
SUDO_USERS.add(user_id)
msg = 'Promoted as Sudo'
else:
msg = 'Already Sudo'
else:
if reply_message is None:
msg = "Give ID or Reply To message of whom you want to Promote"
else:
# Trying to authorize someone by replying
user_id = reply_message.from_user.id
if user_id not in SUDO_USERS:
if DB_URI is not None:
msg = DbManger().db_addsudo(user_id)
else:
with open('sudo_users.txt', 'a') as file:
file.write(f'{user_id}\n')
SUDO_USERS.add(user_id)
msg = 'Promoted as Sudo'
else:
msg = 'Already Sudo'
sendMessage(msg, context.bot, update)
def removeSudo(update, context):
reply_message = None
message_ = None
reply_message = update.message.reply_to_message
message_ = update.message.text.split(' ')
if len(message_) == 2:
user_id = int(message_[1])
if user_id in SUDO_USERS:
if DB_URI is not None:
msg = DbManger().db_rmsudo(user_id)
else:
SUDO_USERS.remove(user_id)
msg = 'Demoted'
else:
msg = 'Not a Sudo'
else:
if reply_message is None:
msg = "Give ID or Reply To message of whom you want to remove from Sudo"
else:
user_id = reply_message.from_user.id
if user_id in SUDO_USERS:
if DB_URI is not None:
msg = DbManger().db_rmsudo(user_id)
else:
SUDO_USERS.remove(user_id)
msg = 'Demoted'
else:
msg = 'Not a Sudo'
if DB_URI is None:
with open('sudo_users.txt', 'a') as file:
file.truncate(0)
for i in SUDO_USERS:
file.write(f'{i}\n')
sendMessage(msg, context.bot, update)
def sendAuthChats(update, context):
user = sudo = ''
user += '\n'.join(str(id) for id in AUTHORIZED_CHATS)
sudo += '\n'.join(str(id) for id in SUDO_USERS)
sendMessage(f'<b><u>Authorized Chats</u></b>\n<code>{user}</code>\n<b><u>Sudo Users</u></b>\n<code>{sudo}</code>', context.bot, update)
send_auth_handler = CommandHandler(command=BotCommands.AuthorizedUsersCommand, callback=sendAuthChats,
filters=CustomFilters.owner_filter | CustomFilters.sudo_user, run_async=True)
authorize_handler = CommandHandler(command=BotCommands.AuthorizeCommand, callback=authorize,
filters=CustomFilters.owner_filter | CustomFilters.sudo_user, run_async=True)
unauthorize_handler = CommandHandler(command=BotCommands.UnAuthorizeCommand, callback=unauthorize,
filters=CustomFilters.owner_filter | CustomFilters.sudo_user, run_async=True)
addsudo_handler = CommandHandler(command=BotCommands.AddSudoCommand, callback=addSudo,
filters=CustomFilters.owner_filter, run_async=True)
removesudo_handler = CommandHandler(command=BotCommands.RmSudoCommand, callback=removeSudo,
filters=CustomFilters.owner_filter, run_async=True)
dispatcher.add_handler(send_auth_handler)
dispatcher.add_handler(authorize_handler)
dispatcher.add_handler(unauthorize_handler)
dispatcher.add_handler(addsudo_handler)
dispatcher.add_handler(removesudo_handler)
| StarcoderdataPython |
4908362 | class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
left = []
right = []
for i in range(len(s)):
if s[i] == '(':
left.append(i)
elif s[i] == ')':
if len(left) > 0:
left.pop()
else:
right.append(i)
res = ""
s = list(s)
for i, e in enumerate(s):
if i not in left and i not in right:
res = res + e
return res
| StarcoderdataPython |
1793208 | # -*- coding:utf-8 -*-
import json
import math
# 字母数
LETTERS = 5
def get_status(guess, actual):
"""
根据猜测的单词和实际单词,获取状态码
:param guess: 猜测的单词
:param actual: 实际的单词
:return: 状态码
"""
status = 0
base = 1
for i in range(len(guess)):
c = guess[i]
if c == actual[i]:
status += 2 * base
elif c in actual:
status += base
base *= 3
return status
def get_socre(status_list):
"""
根据状态码,获取信息量
:param status_list:
:return:
"""
total = sum(status_list)
score = 0
for c in status_list:
if c > 0:
p = float(c) / total
score -= p * math.log(p, 2)
return score
def transform_status(status_str_list):
"""
将字符串状态转为三进制状态
:param status_str_list: 字符串状态
:return: 三进制状态
"""
status_list = list()
for status_str in status_str_list:
status = 0
base = 1
for c in status_str:
status += int(c) * base
base *= 3
status_list.append(status)
return status_list
def get_possible_words(word_list, guess_list, status_list):
"""
获取可能的词汇
:param word_list: 词汇列表
:param guess_list: 猜测列表
:param status_list: 状态结果
:return: 可能的词汇列表
"""
possible_word_list = list()
for word in word_list:
flag = True
for i in range(len(guess_list)):
if get_status(guess_list[i], word) != status_list[i]:
flag = False
break
if flag:
possible_word_list.append(word)
return possible_word_list
# part1: 过滤词汇,仅执行一次
# word_map = dict()
# with open("en_50k.txt", "r") as f:
# for line in f:
# parts = line.strip().split(" ")
# word = parts[0]
# freq = int(parts[1])
# available = True
# if len(word) != LETTERS:
# available = False
# else:
# for c in word:
# if c < 'a' or c > 'z':
# available = False
# break
# if available:
# word_map[word] = freq
# with open("words%d.txt" % LETTERS, "w") as f:
# f.write(json.dumps(word_map))
# part2: 分析词汇
def main(guess_list, status_str_list):
"""
猜词主函数
:param guess_list: 猜词列表
:param status_str_list: 猜词结果,每次以一个长度为5的纯数字字符串表示(0表示不存在,1表示存在但位置错,2表示位置对)
:return:
"""
status_list = transform_status(status_str_list)
right_word_score = dict()
all_word_score = dict()
with open("words%d.txt" % LETTERS, "r") as f:
word_map = json.loads(f.read())
all_word_list = word_map.keys()
possible_word_list = get_possible_words(word_map.keys(), guess_list, status_list)
word_count = len(all_word_list)
print("Calculating ...")
for i in range(word_count):
guess = all_word_list[i]
status_list = [0] * (3 ** LETTERS)
for actual in possible_word_list:
# 给对应的状态码,添加freq
freq = word_map[actual]
status = get_status(guess, actual)
status_list[status] += freq
if guess in possible_word_list:
right_word_score[guess] = [get_socre(status_list), word_map[guess]]
all_word_score[guess] = get_socre(status_list)
# 按信息量排序
candidates = sorted(right_word_score.items(), key=lambda (k, v): v[1], reverse=True)[:10]
candidates2 = sorted(all_word_score.items(), key=lambda (k, v): v, reverse=True)[:10]
print("====== POSSIBLE ======")
for item in candidates[:3]:
print("Word = %s, Freq = %d, Score = %.2f" % (item[0], item[1][1], item[1][0]))
print("====== BEST GUESS ======")
for item in candidates2[:3]:
print("Word = %s, Score = %.2f" % (item[0], item[1]))
print("====== RESULT ======")
if len(candidates) < 10:
print("I would like to guess '%s'" % candidates[0][0])
else:
print("Still no idea, try '%s'" % candidates2[0][0])
if __name__ == "__main__":
guess_list = list()
status_str_list = list()
FIRST_GUESS = ["", "i", "to", "ton", "tale", "tears", "tories", "cartons"]
print("====== BEGIN ======")
if LETTERS <= 7:
print("Firstly, I'll try '%s'" % FIRST_GUESS[LETTERS])
count = 0
while True:
if LETTERS <= 7 or count != 0:
words = raw_input("=> Input your actual guess (%d letters, for example: %s): " % (LETTERS, 'a' * LETTERS))
status_str = raw_input("=> Input your result (0-gray, 1-yellow, 2-green, for example: %s): " % ('0' * LETTERS,))
if "0" not in status_str and "1" not in status_str:
print("Success, total times = %d" % (count + 1, ))
break
guess_list.append(words)
status_str_list.append(status_str)
main(guess_list, status_str_list)
count += 1
| StarcoderdataPython |
6654550 | <reponame>shreejitverma/GeeksforGeeks<filename>Competitive Programming/Binary Trees/Top View of Binary Tree.py
'''https://practice.geeksforgeeks.org/problems/top-view-of-binary-tree/1
https://www.geeksforgeeks.org/print-nodes-top-view-binary-tree/#:~:text=Top%20view%20of%20a%20binary,node%20at%20its%20horizontal%20distance.
Top View of Binary Tree
Medium Accuracy: 32.3% Submissions: 100k+ Points: 4
Given below is a binary tree. The task is to print the top view of binary tree. Top view of a binary tree is the set of nodes visible when the tree is viewed from the top. For the given below tree
1
/ \
2 3
/ \ / \
4 5 6 7
Top view will be: 4 2 1 3 7
Note: Return nodes from leftmost node to rightmost node.
Example 1:
Input:
1
/ \
2 3
Output: 2 1 3
Example 2:
Input:
10
/ \
20 30
/ \ / \
40 60 90 100
Output: 40 20 10 30 100
Your Task:
Since this is a function problem. You don't have to take input. Just complete the function topView() that takes root node as parameter and returns a list of nodes visible from the top view from left to right.
Expected Time Complexity: O(N)
Expected Auxiliary Space: O(N).
Constraints:
1 ≤ N ≤ 105
1 ≤ Node Data ≤ 105'''
# Python3 program to print top
# view of binary tree
# Binary Tree Node
""" utility that allocates a newNode
with the given key """
class newNode:
# Construct to create a newNode
def __init__(self, key):
self.data = key
self.left = None
self.right = None
self.hd = 0
# function should print the topView
# of the binary tree
def topview(root):
if(root == None):
return
q = []
m = dict()
hd = 0
root.hd = hd
# push node and horizontal
# distance to queue
q.append(root)
while(len(q)):
root = q[0]
hd = root.hd
# count function returns 1 if the
# container contains an element
# whose key is equivalent to hd,
# or returns zero otherwise.
if hd not in m:
m[hd] = root.data
if(root.left):
root.left.hd = hd - 1
q.append(root.left)
if(root.right):
root.right.hd = hd + 1
q.append(root.right)
q.pop(0)
for i in sorted(m):
print(m[i], end="")
# Driver Code
if __name__ == '__main__':
""" Create following Binary Tree
1
/ \
2 3
\
4
\
5
\
6*"""
root = newNode(1)
root.left = newNode(2)
root.right = newNode(3)
root.left.right = newNode(4)
root.left.right.right = newNode(5)
root.left.right.right.right = newNode(6)
print("Following are nodes in top",
"view of Binary Tree")
topview(root)
def topView(self, root):
dic = {}
def inner(root, dic, index, height):
if index not in dic:
dic[index] = (root.data, height)
else:
d, h = dic[index]
if height < h:
dic[index] = (root.data, height)
if root.left:
inner(root.left, dic, index-1, height+1)
if root.right:
inner(root.right, dic, index+1, height+1)
inner(root, dic, 0, 0)
maxi = max(dic)
mini = min(dic)
res = []
for i in range(mini, maxi+1):
res.append(dic[i][0])
return res
| StarcoderdataPython |
257826 | <gh_stars>1-10
import wx
from gooey.gui import events
from gooey.gui.pubsub import pub
from gooey.gui.util import wx_util
class Tabbar(wx.Panel):
def __init__(self, parent, buildSpec, configPanels, *args, **kwargs):
super(Tabbar, self).__init__(parent, *args, **kwargs)
self._parent = parent
self.notebook = wx.Notebook(self, style=wx.BK_DEFAULT)
self.buildSpec = buildSpec
self.configPanels = configPanels
self.options = list(self.buildSpec['widgets'].keys())
self.layoutComponent()
def layoutComponent(self):
for group, panel in zip(self.options, self.configPanels):
panel.Reparent( self.notebook)
self.notebook.AddPage(panel, group)
self.notebook.Layout()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.notebook, 1, wx.EXPAND)
self.SetSizer(sizer)
self.Layout()
def getSelectedGroup(self):
return self.options[self.notebook.Selection]
def getActiveConfig(self):
return self.configPanels[self.notebook.Selection]
def show(self, b):
self.Show(b)
| StarcoderdataPython |
5015955 | import numpy as np
import timeit # ref
import matplotlib.pyplot as plt
from matlablib import *
closefigures(plt)
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
# x0=np.arange(1, 100,10, dtype=float)
N=1000;
x0=np.linspace(-10, 10, N)
x1=gaussian(x0, 0, .5)
x2=np.heaviside(x0,1, )
# x1=np.random.uniform(size=10000)
# x2=np.random.uniform(size=10000)
x3=np.convolve(x1,x2)
plt.plot(x1)
plt.plot(x2)
# plt.plot(x0,x3)
plt.show();
plt.figure()
plt.plot(x3)
plt.show();
# keyboard() | StarcoderdataPython |
6631312 | <filename>ticker.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''show scores of today's NHL games'''
import datetime
import json
import os
import platform
import sys
import time
import requests
from colorama import init, Fore, Style
from pytz import reference
# API purportedly updates every 60 seconds
REFRESH_TIME = 30
API_URL = 'http://live.nhle.com/GameData/RegularSeasonScoreboardv3.jsonp'
TEST = False
def main():
'''generates a scoreboard of today's NHL games'''
games_today = False
playoffs = False
# Today's date
t_object = datetime.datetime.now()
today_date = "" + t_object.strftime("%A") + " " + "%s/%s" % (t_object.month, t_object.day)
# Yesterday's date
y_object = t_object - datetime.timedelta(days=1)
yesterday_date = "" + y_object.strftime("%A") + " " + "%s/%s" % (y_object.month, y_object.day)
while True:
scraped_page = requests.get(API_URL)
# Convert the scraped page to text and trim
scraped_page = scraped_page.text.replace('loadScoreboard(', '')
scraped_page = scraped_page[:-1]
# Create JSON object
data = json.loads(scraped_page)
clear_screen()
for key in data:
if key == 'games':
for game_info in data[key]:
# extract useful info from JSON
game_id = str(game_info['id'])
game_clock = game_info['ts']
game_stage = game_info['tsc']
status = game_info['bs']
away_locale = fix_locale(game_info['atn'])
away_name = fix_name(game_info['atv']).title()
away_score = game_info['ats']
away_result = game_info['atc']
home_locale = fix_locale(game_info['htn'])
home_name = fix_name(game_info['htv']).title()
home_score = game_info['hts']
home_result = game_info['htc']
if game_id[4:6] == '03':
playoffs = True
series_game_number = game_id[-1:]
# Show today's games
if today_date in game_clock.title() \
or 'TODAY' in game_clock \
or 'LIVE' in status:
games_today = True
header_text = away_locale + ' ' + away_name + \
' @ ' + home_locale + ' ' + home_name
# Show the game number of current 7-game series,
# if it's playoff time
if playoffs:
header_text += ' -- Game ' + series_game_number
# Different displays for different states of game:
# Game from yesterday, ex: YESTERDAY (FINAL 2nd OT)
# Game from today finished, ex: TODAY (FINAL 2nd OT)
if 'FINAL' in status:
if yesterday_date in game_clock.title():
header_text += '\nYESTERDAY '
elif today_date in game_clock.title() or 'TODAY' in game_clock:
header_text += '\nTODAY '
else:
header_text += game_clock.title()
header_text += '(' + status + ')'
# Upcoming game, ex: TUESDAY 4/21, 7:00 PM MDT)
elif 'DAY' in game_clock and 'FINAL' not in status:
timezone = local_time()
header_text += Fore.YELLOW + \
'\n(' + game_clock + ', ' + status + \
' ' + timezone + ')' + Fore.RESET
# Last 5 minutes of game and all of overtime,
# eg. (1:59 3rd PERIOD) in *red* font
elif 'LIVE' in status and 'critical' in game_stage:
header_text += Fore.RED + \
'\n(' + game_clock + ' PERIOD)' + Fore.RESET
# Any other time in game
# eg. (10:34 1st PERIOD)
else:
header_text += Fore.YELLOW + \
'\n(' + game_clock + Style.RESET_ALL
if 'PRE GAME' not in game_clock:
header_text += Fore.YELLOW + ' PERIOD'
header_text += Fore.YELLOW + ')' + Style.RESET_ALL
print(header_text)
# Highlight the winner of finished games in blue, games underway in green:
if away_result == 'winner': # Away team wins
print(Style.BRIGHT + Fore.BLUE + away_name + ' ' + away_score
+ Style.RESET_ALL + ' - ' + home_score + ' ' + home_name)
elif home_result == 'winner': # Home team wins
print(away_name + ' ' + away_score + ' - ' + Style.BRIGHT
+ Fore.BLUE + home_score + ' ' + home_name + Style.RESET_ALL)
elif 'progress' in game_stage or 'critical' in game_stage: # Game underway
print(Fore.GREEN + away_name + ' ' + away_score + ' - '
+ home_score + ' ' + home_name + Fore.RESET)
print('')
if not games_today:
print('\nThere are no NHL games scheduled for today.\n')
# Perform the sleep only if we're not currently testing
if TEST is True:
sys.exit(0)
else:
time.sleep(REFRESH_TIME)
print('\n')
def clear_screen():
'''os-adaptive screen wipe'''
if platform.system() == 'Windows':
os.system('cls')
else:
os.system('clear')
def fix_locale(team_locale):
'''modify place names from the values in JSON'''
if 'NY ' in team_locale:
return 'New York'
elif 'Montr' in team_locale:
return u'Montréal'
return team_locale
def fix_name(team_name):
'''modify team names from the values in JSON'''
if 'wings' in team_name:
return 'Red Wings'
elif 'jackets' in team_name:
return 'Blue Jackets'
elif 'leafs' in team_name:
return 'Maple Leafs'
elif 'knights' in team_name:
return 'Golden Knights'
return team_name
def local_time():
'''get local timezone'''
today = datetime.datetime.now()
localtime = reference.LocalTimezone()
return localtime.tzname(today)
def parse_arguments(arguments):
'''process the arguments provided at runtime'''
for index in range(1, len(arguments)):
argument = arguments[index]
if argument == '--test' or argument == '-t':
print('Running in TEST mode.\n')
global TEST
TEST = True
if __name__ == '__main__':
init() # colorama
parse_arguments(sys.argv)
main()
# Originally forked from <NAME>'s NHL-Scores - https://github.com/jtf323/NHL-Scores
| StarcoderdataPython |
4986198 | <filename>looking_for_group/rpgcollections/tasks.py
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from ..game_catalog.models import SourceBook, PublishedModule, GameSystem
from . import models
def recalc_library_content(library):
"""
For given library recalc all the denormalized stats.
"""
sb_ct = ContentType.objects.get_for_model(SourceBook)
pm_ct = ContentType.objects.get_for_model(PublishedModule)
gs_ct = ContentType.objects.get_for_model(GameSystem)
num_titles = models.Book.objects.filter(library=library).count()
num_print = models.Book.objects.filter(library=library, in_print=True).count()
num_pdf = models.Book.objects.filter(library=library, in_pdf=True).count()
Q_sb = Q(content_type=sb_ct)
Q_system = Q(content_type=gs_ct)
distinct_sourcebooks = models.Book.objects.filter(library=library).filter(Q_sb | Q_system)
distinct_modules = models.Book.objects.filter(library=library, content_type=pm_ct)
library.num_titles = num_titles
library.num_pdf = num_pdf
library.num_print = num_print
library.distinct_sourcebooks = distinct_sourcebooks.count()
library.distinct_modules = distinct_modules.count()
library.save()
| StarcoderdataPython |
11352520 | from unittest import mock
import constants
from hackassembler import compiler
def test__clean_line():
comment_line = ' FAKE=INSTRUCTION // Fake comment\n'
only_comment_line = '// Fake comment\n'
test_compiler = compiler.Compiler('fake_src')
assert test_compiler._clean_line(comment_line) == 'FAKE=INSTRUCTION'
assert test_compiler._clean_line(only_comment_line) == ''
def test__get_loop_labels():
test_compiler = compiler.Compiler('fake_file')
test_compiler.code = ['(LABEL1)', 'Fake_instruction', '(LABEL2)']
test_compiler._get_loop_labels()
expected_dict = {
'LABEL1': '0000000000000000',
'LABEL2': '0000000000000001'
}
assert test_compiler.labels == expected_dict
def test__set_new_variable():
test_compiler = compiler.Compiler('fake_file')
test_compiler._set_new_variable('variable1')
expected_dict = {'variable1': '0000000000010000'}
assert test_compiler.variables == expected_dict
assert test_compiler.variables_count == 1
test_compiler._set_new_variable('variable2')
expected_dict = {
'variable1': '0000000000010000',
'variable2': '0000000000010001'
}
assert test_compiler.variables == expected_dict
assert test_compiler.variables_count == 2
@mock.patch.object(compiler.Compiler, '_write_to_file')
@mock.patch.object(compiler.Compiler, '_get_cleaned_code')
def test_compile(get_cleaned_code_mock, write_to_file_mock):
test_compiler = compiler.Compiler('fake_file')
test_compiler.code = constants.CODE
test_compiler.compile()
get_cleaned_code_mock.assert_called_once()
write_to_file_mock.assert_called_once()
assert '\n'.join(test_compiler.instructions) == constants.INSTRUCTIONS
| StarcoderdataPython |
1706568 | <reponame>htlcnn/ironpython-stubs<filename>release/stubs.min/System/Drawing/__init___parts/ImageConverter.py
class ImageConverter(TypeConverter):
"""
System.Drawing.ImageConverter is a class that can be used to convert System.Drawing.Image objects from one data type to another. Access this class through the System.ComponentModel.TypeDescriptor object.
ImageConverter()
"""
def CanConvertFrom(self,*__args):
"""
CanConvertFrom(self: ImageConverter,context: ITypeDescriptorContext,sourceType: Type) -> bool
Determines whether this System.Drawing.ImageConverter can convert an instance of a specified
type to an System.Drawing.Image,using the specified context.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context.
sourceType: A System.Type that specifies the type you want to convert from.
Returns: This method returns true if this System.Drawing.ImageConverter can perform the conversion;
otherwise,false.
"""
pass
def CanConvertTo(self,*__args):
"""
CanConvertTo(self: ImageConverter,context: ITypeDescriptorContext,destinationType: Type) -> bool
Determines whether this System.Drawing.ImageConverter can convert an System.Drawing.Image to an
instance of a specified type,using the specified context.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context.
destinationType: A System.Type that specifies the type you want to convert to.
Returns: This method returns true if this System.Drawing.ImageConverter can perform the conversion;
otherwise,false.
"""
pass
def ConvertFrom(self,*__args):
"""
ConvertFrom(self: ImageConverter,context: ITypeDescriptorContext,culture: CultureInfo,value: object) -> object
Converts a specified object to an System.Drawing.Image.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context.
culture: A System.Globalization.CultureInfo that holds information about a specific culture.
value: The System.Object to be converted.
Returns: If this method succeeds,it returns the System.Drawing.Image that it created by converting the
specified object. Otherwise,it throws an exception.
"""
pass
def ConvertTo(self,*__args):
"""
ConvertTo(self: ImageConverter,context: ITypeDescriptorContext,culture: CultureInfo,value: object,destinationType: Type) -> object
Converts an System.Drawing.Image (or an object that can be cast to an System.Drawing.Image) to
the specified type.
context: A formatter context. This object can be used to get more information about the environment this
converter is being called from. This may be null,so you should always check. Also,properties
on the context object may also return null.
culture: A System.Globalization.CultureInfo object that specifies formatting conventions used by a
particular culture.
value: The System.Drawing.Image to convert.
destinationType: The System.Type to convert the System.Drawing.Image to.
Returns: This method returns the converted object.
"""
pass
def GetProperties(self,*__args):
"""
GetProperties(self: ImageConverter,context: ITypeDescriptorContext,value: object,attributes: Array[Attribute]) -> PropertyDescriptorCollection
Gets the set of properties for this type.
context: A type descriptor through which additional context can be provided.
value: The value of the object to get the properties for.
attributes: An array of System.Attribute objects that describe the properties.
Returns: The set of properties that should be exposed for this data type. If no properties should be
exposed,this can return null. The default implementation always returns null.
"""
pass
def GetPropertiesSupported(self,context=None):
"""
GetPropertiesSupported(self: ImageConverter,context: ITypeDescriptorContext) -> bool
Indicates whether this object supports properties. By default,this is false.
context: A type descriptor through which additional context can be provided.
Returns: This method returns true if the erload:System.Drawing.ImageConverter.GetProperties method should
be called to find the properties of this object.
"""
pass
| StarcoderdataPython |
8007387 | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
# WILL USE THIS LATER IF/WHEN YOU CREATE USER ACCOUNTS - THIS MAY BE IN A DIFFERENT APP AS WELL!!! | StarcoderdataPython |
4953803 | <filename>Lib/test/test_trace.py
# Testing the line trace facility.
from test import test_support
import unittest
import sys
import difflib
import gc
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Many of the tests below are tricky because they involve pass statements.
# If there is implicit control flow around a pass statement (in an except
# clause or else caluse) under what conditions do you set a line number
# following that clause?
# The entire "while 0:" statement is optimized away. No code
# exists for it, so the line numbers skip directly from "del x"
# to "x = 1".
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just one instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
y = 1
while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(4, 'line'),
(4, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception, exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError, exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
# This test is interesting because of the else: pass
# part of the code. The code generate for the true
# part of the if contains a jump past the else branch.
# The compiler then generates an implicit "return None"
# Internally, the compiler visits the pass statement
# and stores its line number for use on the next instruction.
# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(6, 'line'),
(6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
def generator_function():
try:
yield True
"continued"
finally:
"finally"
def generator_example():
# any() will leave the generator before its end
x = any(generator_function())
# the following lines were not traced
for x in range(10):
y = x
generator_example.events = ([(0, 'call'),
(2, 'line'),
(-6, 'call'),
(-5, 'line'),
(-4, 'line'),
(-4, 'return'),
(-4, 'call'),
(-4, 'exception'),
(-1, 'line'),
(-1, 'return')] +
[(5, 'line'), (6, 'line')] * 10 +
[(5, 'line'), (5, 'return')])
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
def traceWithGenexp(self, frame, event, arg):
(o for o in [1])
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
# Disable gc collection when tracing, otherwise the
# deallocators may be traced as well.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff([str(x) for x in expected_events],
[str(x) for x in events])))
def run_and_compare(self, func, events):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, events)
def run_test(self, func):
self.run_and_compare(func, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, func.events)
def set_and_retrieve_none(self):
sys.settrace(None)
assert sys.gettrace() is None
def set_and_retrieve_func(self):
def fn(*args):
pass
sys.settrace(fn)
try:
assert sys.gettrace() is fn
finally:
sys.settrace(None)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test(no_pop_tops)
def test_06_call(self):
self.run_test(call)
def test_07_raise(self):
self.run_test(test_raise)
def test_08_settrace_and_return(self):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
def test_10_ireturn(self):
self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
self.run_test(tighterloop_example)
def test_13_genexp(self):
self.run_test(generator_example)
# issue1265: if the trace function contains a generator,
# and if the traced function contains another generator
# that is not completely exhausted, the trace stopped.
# Worse: the 'finally' clause was not invoked.
tracer = Tracer()
sys.settrace(tracer.traceWithGenexp)
generator_example()
sys.settrace(None)
self.compare_events(generator_example.__code__.co_firstlineno,
tracer.events, generator_example.events)
def test_14_onliner_if(self):
def onliners():
if True: False
else: True
return 0
self.run_and_compare(
onliners,
[(0, 'call'),
(1, 'line'),
(3, 'line'),
(3, 'return')])
def test_15_loops(self):
# issue1750076: "while" expression is skipped by debugger
def for_example():
for x in range(2):
pass
self.run_and_compare(
for_example,
[(0, 'call'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(1, 'return')])
def while_example():
# While expression should be traced on every loop
x = 2
while x > 0:
x -= 1
self.run_and_compare(
while_example,
[(0, 'call'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(3, 'return')])
def test_16_blank_lines(self):
exec("def f():\n" + "\n" * 256 + " pass")
self.run_and_compare(
f,
[(0, 'call'),
(257, 'line'),
(257, 'return')])
def test_tracing_turned_on_inside_generator(self):
def gen():
yield 1 # Tracing starts on the re-entry from this yield.
yield 2
tracer = Tracer()
g = gen()
v1 = g.next()
sys.settrace(tracer.trace)
v2 = g.next()
try:
g.next()
except StopIteration:
pass
else:
self.fail('Expected StopIteration')
sys.settrace(None)
self.compare_events(gen.func_code.co_firstlineno,
tracer.events,
[(1, 'call'),
(2, 'line'),
(2, 'return'),
(2, 'call'),
(2, 'return')])
self.assertEquals((v1, v2), (1, 2))
class RaisingTraceFuncTestCase(unittest.TestCase):
def trace(self, frame, event, arg):
"""A trace function that raises an exception in response to a
specific trace event."""
self.output.append(event)
if event == self.raiseOnEvent:
raise ValueError # just something that isn't RuntimeError
else:
return self.trace
def f(self):
"""The function to trace; raises an exception if that's the case
we're testing, so that the 'exception' trace event fires."""
if self.raiseOnEvent == 'exception':
x = 0
y = 1/x
else:
return 1
def run_test_for_event(self, event, expected_events):
"""Tests that an exception raised in response to the given event is
handled OK."""
self.raiseOnEvent = event
try:
for i in xrange(sys.getrecursionlimit() + 1):
self.output = []
sys.settrace(self.trace)
try:
self.f()
except ValueError:
pass
else:
self.fail("exception not thrown!")
self.assertEquals(self.output, expected_events)
except RuntimeError:
self.fail("recursion counter not reset")
# Test the handling of exceptions raised by each kind of trace event.
def test_call(self):
self.run_test_for_event('call', ['call'])
def test_line(self):
self.run_test_for_event('line', ['call', 'line'])
def test_return(self):
self.run_test_for_event('return', ['call', 'line', 'line', 'return'])
def test_exception(self):
self.run_test_for_event('exception',
['call', 'line', 'line', 'line', 'exception'])
def test_trash_stack(self):
def f():
for i in range(5):
print i # line tracing will raise an exception at this line
def g(frame, why, extra):
if (why == 'line' and
frame.f_lineno == f.func_code.co_firstlineno + 2):
raise RuntimeError, "i am crashing"
return g
sys.settrace(g)
try:
f()
except RuntimeError:
# the test is really that this doesn't segfault:
import gc
gc.collect()
else:
self.fail("exception not propagated")
# 'Jump' tests: assigning to frame.f_lineno within a trace function
# moves the execution position - it's how debuggers implement a Jump
# command (aka. "Set next statement").
class JumpTracer:
"""Defines a trace function that jumps from one place to another,
with the source and destination lines of the jump being defined by
the 'jump' property of the function under test."""
def __init__(self, function):
self.function = function
self.jumpFrom = function.jump[0]
self.jumpTo = function.jump[1]
self.done = False
def trace(self, frame, event, arg):
if not self.done and frame.f_code == self.function.func_code:
firstLine = frame.f_code.co_firstlineno
if frame.f_lineno == firstLine + self.jumpFrom:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
frame.f_lineno = firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
# The first set of 'jump' tests are for things that are allowed:
def jump_simple_forwards(output):
output.append(1)
output.append(2)
output.append(3)
jump_simple_forwards.jump = (1, 3)
jump_simple_forwards.output = [3]
def jump_simple_backwards(output):
output.append(1)
output.append(2)
jump_simple_backwards.jump = (2, 1)
jump_simple_backwards.output = [1, 1, 2]
def jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
for j in [3]: # Also tests jumping over a block
output.append(4)
output.append(5)
jump_out_of_block_forwards.jump = (3, 5)
jump_out_of_block_forwards.output = [2, 5]
def jump_out_of_block_backwards(output):
output.append(1)
for i in [1]:
output.append(3)
for j in [2]: # Also tests jumping over a block
output.append(5)
output.append(6)
output.append(7)
jump_out_of_block_backwards.jump = (6, 1)
jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
def jump_to_codeless_line(output):
output.append(1)
# Jumping to this line should skip to the next one.
output.append(3)
jump_to_codeless_line.jump = (1, 2)
jump_to_codeless_line.output = [3]
def jump_to_same_line(output):
output.append(1)
output.append(2)
output.append(3)
jump_to_same_line.jump = (2, 2)
jump_to_same_line.output = [1, 2, 3]
# Tests jumping within a finally block, and over one.
def jump_in_nested_finally(output):
try:
output.append(2)
finally:
output.append(4)
try:
output.append(6)
finally:
output.append(8)
output.append(9)
jump_in_nested_finally.jump = (4, 9)
jump_in_nested_finally.output = [2, 9]
# The second set of 'jump' tests are for things that are not allowed:
def no_jump_too_far_forwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('after' in str(e))
no_jump_too_far_forwards.jump = (3, 6)
no_jump_too_far_forwards.output = [2, True]
def no_jump_too_far_backwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('before' in str(e))
no_jump_too_far_backwards.jump = (3, -1)
no_jump_too_far_backwards.output = [2, True]
# Test each kind of 'except' line.
def no_jump_to_except_1(output):
try:
output.append(2)
except:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_1.jump = (2, 3)
no_jump_to_except_1.output = [True]
def no_jump_to_except_2(output):
try:
output.append(2)
except ValueError:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_2.jump = (2, 3)
no_jump_to_except_2.output = [True]
def no_jump_to_except_3(output):
try:
output.append(2)
except ValueError, e:
output.append('except' in str(e))
no_jump_to_except_3.jump = (2, 3)
no_jump_to_except_3.output = [True]
def no_jump_to_except_4(output):
try:
output.append(2)
except (ValueError, RuntimeError), e:
output.append('except' in str(e))
no_jump_to_except_4.jump = (2, 3)
no_jump_to_except_4.output = [True]
def no_jump_forwards_into_block(output):
try:
output.append(2)
for i in 1, 2:
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_forwards_into_block.jump = (2, 4)
no_jump_forwards_into_block.output = [True]
def no_jump_backwards_into_block(output):
try:
for i in 1, 2:
output.append(3)
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_backwards_into_block.jump = (4, 3)
no_jump_backwards_into_block.output = [3, 3, True]
def no_jump_into_finally_block(output):
try:
try:
output.append(3)
x = 1
finally:
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_into_finally_block.jump = (4, 6)
no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
def no_jump_out_of_finally_block(output):
try:
try:
output.append(3)
finally:
output.append(5)
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_out_of_finally_block.jump = (5, 1)
no_jump_out_of_finally_block.output = [3, True]
# This verifies the line-numbers-must-be-integers rule.
def no_jump_to_non_integers(output):
try:
output.append(2)
except ValueError, e:
output.append('integer' in str(e))
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
try:
previous_frame = sys._getframe().f_back
previous_frame.f_lineno = previous_frame.f_lineno
except ValueError, e:
# This is the exception we wanted; make sure the error message
# talks about trace functions.
if 'trace' not in str(e):
raise
else:
# Something's wrong - the expected exception wasn't raised.
raise RuntimeError, "Trace-function-less jump failed to fail"
class JumpTestCase(unittest.TestCase):
def compare_jump_output(self, expected, received):
if received != expected:
self.fail( "Outputs don't match:\n" +
"Expected: " + repr(expected) + "\n" +
"Received: " + repr(received))
def run_test(self, func):
tracer = JumpTracer(func)
sys.settrace(tracer.trace)
output = []
func(output)
sys.settrace(None)
self.compare_jump_output(func.output, output)
def test_01_jump_simple_forwards(self):
self.run_test(jump_simple_forwards)
def test_02_jump_simple_backwards(self):
self.run_test(jump_simple_backwards)
def test_03_jump_out_of_block_forwards(self):
self.run_test(jump_out_of_block_forwards)
def test_04_jump_out_of_block_backwards(self):
self.run_test(jump_out_of_block_backwards)
def test_05_jump_to_codeless_line(self):
self.run_test(jump_to_codeless_line)
def test_06_jump_to_same_line(self):
self.run_test(jump_to_same_line)
def test_07_jump_in_nested_finally(self):
self.run_test(jump_in_nested_finally)
def test_08_no_jump_too_far_forwards(self):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
def test_10_no_jump_to_except_1(self):
self.run_test(no_jump_to_except_1)
def test_11_no_jump_to_except_2(self):
self.run_test(no_jump_to_except_2)
def test_12_no_jump_to_except_3(self):
self.run_test(no_jump_to_except_3)
def test_13_no_jump_to_except_4(self):
self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
self.run_test(no_jump_backwards_into_block)
def test_16_no_jump_into_finally_block(self):
self.run_test(no_jump_into_finally_block)
def test_17_no_jump_out_of_finally_block(self):
self.run_test(no_jump_out_of_finally_block)
def test_18_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
no_jump_without_trace_function()
def test_20_large_function(self):
d = {}
exec("""def f(output): # line 0
x = 0 # line 1
y = 1 # line 2
''' # line 3
%s # lines 4-1004
''' # line 1005
x += 1 # line 1006
output.append(x) # line 1007
return""" % ('\n' * 1000,), d)
f = d['f']
f.jump = (2, 1007)
f.output = [0]
self.run_test(f)
def test_no_jump_out_of_finally_after_backedge(self):
def loop_in_finally(output):
a = 1 # 1
try: # 2
if a == 1: # 3
try: # 4
output.append(5)
finally: # 6
while a == 1: #7
a = 2 # 8
output.append(9)
except ValueError, e:
output.append('finally' in str(e))
# We hit line 7 twice in the above code, once from the top as
# we enter the finally block, and again from the bottom as the
# loop iterates. The back-edge that keeps the loop iterating
# jumps to halfway into the line, so it doesn't get any code
# that exists on line entry. In one version of the LLVM
# tracing implementation, this made the block checking
# algorithm in set_f_lineno() fail.
class SecondJumpTracer:
times_hit = 0
def trace(self, frame, event, arg):
if frame.f_code == loop_in_finally.func_code:
firstLine = frame.f_code.co_firstlineno
if frame.f_lineno == firstLine + 7:
self.times_hit += 1
if self.times_hit == 2:
# Should raise about jumping out of a finally
# block. The resulting ValueError is caught in
# loop_in_finally().
frame.f_lineno = firstLine + 3
return self.trace
tracer = SecondJumpTracer()
output = []
sys.settrace(tracer.trace)
loop_in_finally(output)
sys.settrace(None)
self.compare_jump_output([5, True], output)
def test_main():
test_support.run_unittest(
TraceTestCase,
RaisingTraceFuncTestCase,
JumpTestCase
)
if __name__ == "__main__":
test_main()
| StarcoderdataPython |
3242693 | #!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 <NAME> <<EMAIL>>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "nRF51 Development Kit",
'link' : [ "https://www.nordicsemi.com/Products/Bluetooth-Smart-Bluetooth-low-energy/nRF51822" ],
# This is the PCA10028
'default_console' : "EV_SERIAL1",
'default_console_tx' : "D9",
'default_console_rx' : "D11",
'default_console_baudrate' : "9600",
'variables' : 1050,
'binary_name' : 'espruino_%v_nrf51822.bin',
'build' : {
'optimizeflags' : '-Os',
'libraries' : [
'BLUETOOTH',
'GRAPHICS',
],
'makefile' : [
'SAVE_ON_FLASH=1',
'DEFINES+=-DCONFIG_GPIO_AS_PINRESET', # Allow the reset pin to work
'DEFINES += -DUSE_DEBUGGER -DUSE_TAB_COMPLETE',
'DEFINES += -DBOARD_PCA10028'
]
}
};
chip = {
'part' : "NRF51822",
'family' : "NRF51",
'package' : "QFN48",
'ram' : 32,
'flash' : 256,
'speed' : 16,
'usart' : 1,
'spi' : 1,
'i2c' : 1,
'adc' : 0,
'dac' : 0,
# If using DFU bootloader, it sits at 0x3C000 - 0x40000 (0x40000 is end of flash)
# Might want to change 256 -> 240 in the code below
'saved_code' : {
'address' : ((256 - 3) * 1024),
'page_size' : 1024,
'pages' : 3,
'flash_available' : (256 - 108 - 3)
}
};
devices = {
'BTN1' : { 'pin' : 'D17', 'pinstate' : 'IN_PULLDOWN'}, # Pin negated in software
'BTN2' : { 'pin' : 'D18', 'pinstate' : 'IN_PULLDOWN'}, # Pin negated in software
'BTN3' : { 'pin' : 'D19', 'pinstate' : 'IN_PULLDOWN'}, # Pin negated in software
'BTN4' : { 'pin' : 'D20', 'pinstate' : 'IN_PULLDOWN'}, # Pin negated in software
'LED1' : { 'pin' : 'D21' }, # Pin negated in software
'LED2' : { 'pin' : 'D22' }, # Pin negated in software
'LED3' : { 'pin' : 'D23' }, # Pin negated in software
'LED4' : { 'pin' : 'D24' }, # Pin negated in software
'RX_PIN_NUMBER' : { 'pin' : 'D11'},
'TX_PIN_NUMBER' : { 'pin' : 'D9'},
'CTS_PIN_NUMBER' : { 'pin' : 'D10'},
'RTS_PIN_NUMBER' : { 'pin' : 'D8'},
};
# left-right, or top-bottom order THIS IS INCORRECT!!!!!
board = {
'left' : [ 'VDD', 'VDD', 'RESET', 'VDD','5V','GND','GND','PD3','PD4','PD28','PD29','PD30','PD31'],
'right' : [ 'PD27', 'PD26', 'PD2', 'GND', 'PD25','PD24','PD23', 'PD22','PD20','PD19','PD18','PD17','PD16','PD15','PD14','PD13','PD12','PD11','PD10','PD9','PD8','PD7','PD6','PD5','PD21','PD1','PD0'],
};
board["_css"] = """
""";
def get_pins():
pins = pinutils.generate_pins(0,31) # 32 General Purpose I/O Pins.
pinutils.findpin(pins, "PD27", True)["functions"]["XL1"]=0;
pinutils.findpin(pins, "PD26", True)["functions"]["XL2"]=0;
pinutils.findpin(pins, "PD8", True)["functions"]["RTS"]=0;
pinutils.findpin(pins, "PD9", True)["functions"]["TXD"]=0;
pinutils.findpin(pins, "PD10", True)["functions"]["CTS"]=0;
pinutils.findpin(pins, "PD11", True)["functions"]["RXD"]=0;
pinutils.findpin(pins, "PD17", True)["functions"]["Button_1"]=0;
pinutils.findpin(pins, "PD18", True)["functions"]["Button_2"]=0;
pinutils.findpin(pins, "PD19", True)["functions"]["Button_3"]=0;
pinutils.findpin(pins, "PD20", True)["functions"]["Button_4"]=0;
pinutils.findpin(pins, "PD21", True)["functions"]["LED_1"]=0;
pinutils.findpin(pins, "PD22", True)["functions"]["LED_2"]=0;
pinutils.findpin(pins, "PD23", True)["functions"]["LED_3"]=0;
pinutils.findpin(pins, "PD24", True)["functions"]["LED_4"]=0;
pinutils.findpin(pins, "PD0", True)["functions"]["ADC1_IN1"]=0;
pinutils.findpin(pins, "PD1", True)["functions"]["ADC1_IN2"]=0;
pinutils.findpin(pins, "PD2", True)["functions"]["ADC1_IN3"]=0;
pinutils.findpin(pins, "PD3", True)["functions"]["ADC1_IN4"]=0;
pinutils.findpin(pins, "PD4", True)["functions"]["ADC1_IN5"]=0;
pinutils.findpin(pins, "PD5", True)["functions"]["ADC1_IN6"]=0;
pinutils.findpin(pins, "PD6", True)["functions"]["ADC1_IN7"]=0;
# Make buttons and LEDs negated
pinutils.findpin(pins, "PD17", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD18", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD19", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD20", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD21", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD22", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD23", True)["functions"]["NEGATED"]=0;
pinutils.findpin(pins, "PD24", True)["functions"]["NEGATED"]=0;
# everything is non-5v tolerant
for pin in pins:
pin["functions"]["3.3"]=0;
#The boot/reset button will function as a reset button in normal operation. Pin reset on PD21 needs to be enabled on the nRF52832 device for this to work.
return pins
| StarcoderdataPython |
1754868 | <filename>mongodb_tryndx-master/app.py
from re import template
import config
from flask import Flask
from flask_pymongo import PyMongo as DB
import controllers
from flask_mail import *
from random import *
from flask_cors import CORS
# from OpenSSL import SSL
# context = SSL.Context(SSL.PROTOCOL_TLSv1_2)
# context.use_privatekey_file('/etc/letsencrypt/live/python.webdevelopmentsolution.net/privkey.pem')
# context.use_certificate_file('/etc/letsencrypt/live/python.webdevelopmentsolution.net/fullchain.pem')
app = Flask(__name__, template_folder="mails/verify_login/")
app.config['JSON_SORT_KEYS'] = False
cors = CORS(app, resources={r"/*": {"origins": "*"}})
app.config['CORS-HEADERS'] = 'Content-Type'
app.config['MONGO_URI'] = config.local.DB_URI
mongo = DB(app)
app.config["MAIL_SERVER"]='smtp.gmail.com'
app.config["MAIL_PORT"] = 465
app.config["MAIL_USERNAME"] = '<EMAIL>'
app.config['MAIL_PASSWORD'] = '<PASSWORD>$'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
# Registering the Blueprint defined in controller.admin
app.register_blueprint(controllers.admin.admin(mongo.db, config.local,mail))
app.register_blueprint(controllers.user.user(mongo.db,config.local,mail))
app.register_blueprint(controllers.payments.payment(mongo.db,config.local,mail))
if __name__ == '__main__':
# app.run(host="0.0.0.0", port=3082, debug=True, ssl_context=("/etc/letsencrypt/live/python.webdevelopmentsolution.net/fullchain.pem", "/etc/letsencrypt/live/python.webdevelopmentsolution.net/privkey.pem")) # Running the Application.
app.run(host="0.0.0.0", port=3082, debug=True) # Running the Application.c
| StarcoderdataPython |
1876918 | from datetime import time
import unittest
from term import Term
from day import Day
from lesson import Lesson
from teacher import Teacher
from timetable1 import Timetable1
from break_module import Break
from timetable2 import Timetable2
class Test_Term(unittest.TestCase):
# testy zad z zaj
# def test_stringify_teacher(self):
# teacherInstance = Teacher("Koko", "Roko")
# self.assertEqual(str(teacherInstance), "Prowadzący: <NAME>")
# def test_add_teacher(self):
# termInstance = Term(9, 30, 45, Day.FRI)
# termInstance.setTerm("3 II 2021 8:00 - 3 II 2021 8:30")
# lessonInstance = Lesson(termInstance, "koko", 2)
# teacherInstance = Teacher("Koko", "Roko")
# lessonInstance + teacherInstance
# self.assertEqual(lessonInstance.teacher, teacherInstance)
# def test_remove_teacher(self):
# termInstance = Term(9, 30, 45, Day.FRI)
# termInstance.setTerm("3 II 2021 8:00 - 3 II 2021 8:30")
# lessonInstance = Lesson(termInstance, "koko", 2)
# teacherInstance = Teacher("Koko", "Roko")
# lessonInstance + teacherInstance
# self.assertEqual(lessonInstance.teacher, teacherInstance)
# lessonInstance - teacherInstance
# self.assertEqual(lessonInstance.teacher, None)
# def test_add_teacher_too_many_hours(self):
# termInstance = Term(9, 30, 45, Day.FRI)
# termInstance.setTerm("3 II 2021 8:00 - 3 II 2021 13:30")
# termInstance2 = Term(9, 30, 45, Day.FRI)
# termInstance2.setTerm("3 II 2021 13:30 - 3 II 2021 18:00")
# lessonInstance = Lesson(termInstance, "koko", 2)
# lessonInstance2 = Lesson(termInstance, "kuku", 2)
# teacherInstance = Teacher("Koko", "Roko")
# lessonInstance + teacherInstance
# self.assertEqual(lessonInstance.teacher, teacherInstance)
# lessonInstance2 + teacherInstance
# self.assertEqual(lessonInstance2.teacher, None)
# testy przygotowawcze nr.3 z poprzednich lab
# def test_create_table(self):
# termInstance = Term(9, 30, 45, Day.WED)
# termInstance.setTerm("3 II 2021 8:00 - 3 II 2021 9:30")
# termInstance2 = Term(9, 30, 45, Day.THU)
# termInstance2.setTerm("4 II 2021 9:30 - 4 II 2021 11:00")
# lessonInstance = Lesson(termInstance, "koko", 2)
# lessonInstance2 = Lesson(termInstance2, "kuki", 2)
# timetableInstance = Timetable1()
# timetableInstance.put(lessonInstance)
# timetableInstance.put(lessonInstance2)
# timetableInstance.perform([])
# print(timetableInstance)
# self.assertEqual(len(timetableInstance.lessons), 2)
# self.assertEqual(timetableInstance.lessons[0].term.hour, 8)
# self.assertEqual(timetableInstance.lessons[0].term.minute, 0)
# self.assertEqual(timetableInstance.lessons[1].term.hour, 9)
# self.assertEqual(timetableInstance.lessons[1].term.minute, 30)
# def test_move_ahead(self):
# termInstance = Term(9, 30, 45, Day.WED)
# termInstance.setTerm("3 II 2021 8:00 - 3 II 2021 9:30")
# termInstance2 = Term(9, 30, 45, Day.THU)
# termInstance2.setTerm("4 II 2021 9:30 - 4 II 2021 11:00")
# lessonInstance = Lesson(termInstance, "koko", 2)
# lessonInstance2 = Lesson(termInstance2, "kuki", 2)
# timetableInstance = Timetable1()
# timetableInstance.put(lessonInstance)
# timetableInstance.put(lessonInstance2)
# timetableInstance.perform(["d+", "d-", "d+", "d-"])
# print(timetableInstance)
# self.assertEqual(timetableInstance.lessons[0].term.day, Day.FRI)
# self.assertEqual(timetableInstance.lessons[1].term.day, Day.TUE)
# def test_move_fail(self):
# termInstance = Term(9, 30, 45, Day.WED)
# termInstance.setTerm("3 II 2021 8:00 - 3 II 2021 9:30")
# termInstance2 = Term(9, 30, 45, Day.THU)
# termInstance2.setTerm("4 II 2021 9:30 - 4 II 2021 11:00")
# lessonInstance = Lesson(termInstance, "koko", 2)
# lessonInstance2 = Lesson(termInstance2, "kuki", 2)
# timetableInstance = Timetable1()
# timetableInstance.put(lessonInstance)
# timetableInstance.put(lessonInstance2)
# timetableInstance.perform(["d+", "d-", "d+", "d-", "d+"])
# print(timetableInstance)
# self.assertEqual(timetableInstance.lessons[0].term.day, Day.FRI)
# self.assertEqual(timetableInstance.lessons[1].term.day, Day.TUE)
# testy przygotowawcze nr.1
# def test_break_table_create(self):
# termInstance = Term(9, 30, 45, Day.WED)
# termInstance.setTerm("3 II 2021 8:00 - 3 II 2021 9:30")
# termInstance2 = Term(9, 30, 45, Day.FRI)
# termInstance2.setTerm("5 II 2021 13:00 - 5 II 2021 14:20")
# teacherInstance = Teacher("Koko", "Roko")
# lessonInstance = Lesson(termInstance, "koko", 2)
# lessonInstance + teacherInstance
# lessonInstance2 = Lesson(termInstance2, "kuki", 2, True)
# lessonInstance2 + teacherInstance
# breakInstance = Break(9, 30, 10)
# breakInstance2 = Break(12, 50, 10)
# timetableInstance = Timetable2([breakInstance, breakInstance2])
# timetableInstance.put(lessonInstance)
# timetableInstance.put(lessonInstance2)
# timetableInstance.perform([])
# print(timetableInstance)
# self.assertEqual(timetableInstance.breaks[0], breakInstance)
# self.assertEqual(timetableInstance.breaks[1], breakInstance2)
# self.assertEqual(timetableInstance.lessons[0].term.day, Day.WED)
# self.assertEqual(timetableInstance.lessons[0].term.hour, 8)
# self.assertEqual(timetableInstance.lessons[0].term.minute, 0)
# self.assertEqual(timetableInstance.lessons[0].term.duration, 90)
# self.assertEqual(timetableInstance.lessons[1].term.day, Day.FRI)
# self.assertEqual(timetableInstance.lessons[1].term.hour, 13)
# self.assertEqual(timetableInstance.lessons[1].term.minute, 0)
# self.assertEqual(timetableInstance.lessons[1].term.duration, 80)
def test_break_fail_put(self):
termInstance = Term(9, 30, 45, Day.WED)
termInstance.setTerm("3 II 2021 8:00 - 3 II 2021 9:30")
termInstance2 = Term(9, 30, 45, Day.FRI)
termInstance2.setTerm("5 II 2021 12:55 - 5 II 2021 13:00")
teacherInstance = Teacher("Koko", "Roko")
lessonInstance = Lesson(termInstance, "koko", 2)
lessonInstance + teacherInstance
lessonInstance2 = Lesson(termInstance2, "kuki", 2)
lessonInstance2 + teacherInstance
breakInstance = Break(9, 30, 10)
breakInstance2 = Break(12, 50, 10)
timetableInstance = Timetable2([breakInstance, breakInstance2])
timetableInstance.put(lessonInstance)
timetableInstance.put(lessonInstance2)
print(timetableInstance)
def test_break_put_overdrive(self):
termInstance = Term(9, 30, 45, Day.WED)
termInstance.setTerm("3 II 2021 8:00 - 3 II 2021 9:30")
termInstance2 = Term(9, 30, 45, Day.FRI)
termInstance2.setTerm("5 II 2021 12:55 - 5 II 2021 13:00")
teacherInstance = Teacher("Koko", "Roko")
lessonInstance = Lesson(termInstance, "koko", 2)
lessonInstance + teacherInstance
lessonInstance2 = Lesson(termInstance2, "kuki", 2, True)
lessonInstance2 + teacherInstance
breakInstance = Break(9, 30, 10)
breakInstance2 = Break(12, 50, 10)
timetableInstance = Timetable2([breakInstance, breakInstance2])
timetableInstance.put(lessonInstance)
timetableInstance.put(lessonInstance2)
print(timetableInstance)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6589395 | <reponame>rland93/uciMAE107LabS21
import pandas as pd
import numpy as np
import scipy.stats as stats
# length, visc., density
L, mu, rho = 0.36, 0.0010016, 1000
data = pd.read_csv('./rawdata.csv')
data['dp'] = data['p1 mm'] - data['p2 mm']
data['Q m3/s'] = data['vol mL'] / 1000000 / data['time s']
data['Q mL/s'] = data['vol mL'] / data['time s']
data['dp Pa'] = data['dp'] / .10197
# sfunc = lambda t: True if t not in ['Venturi', 'Orifice'] else False
# data = data[data['type'].apply(sfunc)]
data['A'] = data['d1'] * data['d1'] * np.pi / 4.0
data['uavg'] = data['Q m3/s'] / data['A']
data['Re'] = rho * data['uavg'] * data['d1'] / mu
data['f'] = data['d1'] / L * data['dp Pa'] / (0.5 * rho * data['uavg'] * data['uavg'])
data['head loss'] = data['dp Pa'] / (9.81 * rho)
data['pump power'] = data['dp Pa'] * data['Q m3/s']
t1d, t2d, t3d = {}, {}, {}
for D, df in data.groupby('d1'):
# to calculate var
errPa =stats.tstd(df['dp Pa'].values)
errV =stats.tstd(df['vol mL'].values)
errt =stats.tstd(df['time s'].values)
# flow rate error
df['errQ'] = 1/df['time s'] * errV - 1/(df['time s']* df['time s']) * errt # mL
# velocity error
df['errVe'] = df['errQ'] / 1000000 / df['A'].max() #mL -> m^3 then convert to m/s
errRe = (rho * df['errVe'] * D / mu).mean()
# f error each
df['errf'] = 2 * D / (L * rho * df['uavg']**2) * errPa - 6 * D * df['dp Pa'] / (L * rho * df['uavg']**3) * df['errVe']
# f error overall
errf = df['errf'].mean()
# tabulate
t1d[D] = {
'dp Pa' : errPa,
'vol mL' : errV,
'time s' : errt,
'Re' : errRe,
'f' : errf
}
# pump power
power = df['pump power'].mean()
# cost function
cfunc = lambda price, hperday: power * price * hperday * 3600 * 365 * 10 / 3600000
volfunc = lambda hperday: 3600 * hperday * 365 * 10 * df['Q m3/s'].mean()
t2d[D] = {
'Re': df['Re'].mean(),
'f' : df['f'].mean(),
}
t3d[D] = {
'Q m3/s' : df['Q mL/s'].mean(),
'head loss' : df['head loss'].mean(),
'Pump Power' : power,
'cost (peak)' : cfunc(0.25, 8),
'cost (off-peak)' : cfunc(0.10, 16),
'cost (peak, normalized)' : cfunc(0.25, 8) / volfunc(8) * 100,
'cost (off-peak, normalized)' : cfunc(0.10, 16) / volfunc(16) * 100,
}
table1 = pd.DataFrame(t1d)
print(table1)
print('\n\n')
table2 = pd.DataFrame(t2d)
print(table2)
print('\n\n')
table3 = pd.DataFrame(t3d)
print(table3)
print('\n\n')
table1.to_csv('table1.csv')
table2.to_csv('table2.csv')
table3.to_csv('table3.csv') | StarcoderdataPython |
4862494 | <filename>main_MetaAdaptModel_test.py
"""
@author : <NAME>
# Adaptation model test for metaSCI
# modified: <NAME>, 2021.6
Note:
-
Todo:
"""
import numpy as np
from datetime import datetime
import os
import logging
from os.path import join as opj
from os.path import exists as ope
import random
import scipy.io as sci
from utils import generate_masks_MAML, generate_meas
from my_util.plot_util import plot_multi
from my_util.quality_util import cal_psnrssim
import time
from tqdm import tqdm
from MetaFunc import construct_weights_modulation, forward_modulation
# %% setting
# envir config
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # hide tensorflow warning
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.8
tf.reset_default_graph()
# params config
# setting global parameters
batch_size = 1
num_frame = 10
image_dim = 256
Epoch = 3
sigmaInit = 0.01
step = 1
update_lr = 1e-5
num_updates = 5
picked_task = list(range(1,5)) # pick masks for base model train
num_task = len(picked_task) # num of picked masks
run_mode = 'test' # 'train', 'test','finetune'
test_real = False # test real data
pretrain_model_idx = -1 # pretrained model index, 0 for no pretrained
exp_name = "Realmask_AdaptTest_256_Cr10_zzhTest"
model_name_prefix = 'adapt_model'
timestamp = '{:%m-%d_%H-%M}'.format(datetime.now()) # date info
# data path
# trainning set
# datadir = "../[data]/dataset/testing_truth/bm_256_10f/"
datadir = "../[data]/benchmark/orig/bm_256/"
# datadir = "../[data]/dataset/testing_truth/test_256_10f/"
maskpath = "./dataset/mask/realMask_256_Cr10_N576_overlap50.mat"
# model path
# pretrain_model_path = './result/_pretrained_model/simulate_data_256_Cr8/'
pretrain_model_path = './result/train/M_RealmaskDemo_AdaptTrain_256_Cr10_zzhTest/trained_model/'
# saving path
save_path = './result/test/'+exp_name+'_'+timestamp+'/'
if not os.path.exists(save_path):
os.makedirs(save_path)
# logging setting
logger = logging.getLogger()
logger.setLevel('INFO')
BASIC_FORMAT = "%(asctime)s:%(levelname)s:%(message)s"
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(BASIC_FORMAT, DATE_FORMAT)
chlr = logging.StreamHandler() # handler for console output
chlr.setFormatter(formatter)
chlr.setLevel('INFO')
fhlr = logging.FileHandler(save_path+'train.log') # handler for log file
fhlr.setFormatter(formatter)
logger.addHandler(chlr)
logger.addHandler(fhlr)
logger.info('\t Exp. name: '+exp_name)
logger.info('\t Mask path: '+maskpath)
logger.info('\t Data dir: '+datadir)
logger.info('\t pretrain model: '+pretrain_model_path)
logger.info('\t Params: batch_size {:d}, num_frame {:d}, image_dim {:d}, sigmaInit {:f}, update_lr {:f}, num_updates {:d}, picked_task {:s}, run_mode- {:s}, pretrain_model_idx {:d}'.format(
batch_size, num_frame, image_dim, sigmaInit, update_lr, num_updates, str(picked_task), run_mode, pretrain_model_idx))
#%% construct graph, load pretrained params ==> train, finetune, test
weights, weights_m = construct_weights_modulation(sigmaInit,num_frame)
mask = tf.placeholder('float32', [image_dim, image_dim, num_frame])
meas_re = tf.placeholder('float32', [batch_size, image_dim, image_dim, 1])
gt = tf.placeholder('float32', [batch_size, image_dim, image_dim, num_frame])
final_output = forward_modulation(mask, meas_re, gt, weights, weights_m, batch_size, num_frame, image_dim)
nameList = os.listdir(datadir)
mask_sample, mask_s_sample = generate_masks_MAML(maskpath, picked_task)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for task_index in range(num_task):
# load pretrained params
ckpt = tf.train.get_checkpoint_state(pretrain_model_path+model_name_prefix+str(picked_task[task_index]))
if ckpt:
ckpt_states = ckpt.all_model_checkpoint_paths
saver.restore(sess, ckpt_states[pretrain_model_idx])
logger.info('===> Load pretrained model from: '+ckpt_states[pretrain_model_idx])
else:
logger.error('===> No pretrained model found')
raise FileNotFoundError('No pretrained model found')
# [==> test]
logger.info('\n===== Task {:4d}/{:<4d} Test Begin=====\n'.format(task_index, len(picked_task)))
validset_psnr = 0
validset_ssim = 0
mask_sample_i = mask_sample[task_index]
mask_s_sample_i = mask_s_sample[task_index]
for index in tqdm(range(len(nameList))):
# load data
data_tmp = sci.loadmat(datadir + nameList[index])
if test_real:
gt_tmp = np.zeros([image_dim, image_dim, num_frame])
assert "meas" in data_tmp, 'NotFound ERROR: No MEAS in dataset'
meas_sample = data_tmp['meas'][task_index]
# meas_tmp = data_tmp['meas']task_index / 255
else:
if "patch_save" in data_tmp:
gt_tmp = data_tmp['patch_save'] / 255
elif "orig" in data_tmp:
gt_tmp = data_tmp['orig'] / 255
else:
raise FileNotFoundError('No ORIG in dataset')
meas_sample,gt_sample = generate_meas(gt_tmp, mask_sample_i)
# normalize data
mask_max = np.max(mask_sample_i)
mask_sample_i = mask_sample_i/mask_max
mask_s_sample_i = mask_s_sample_i/mask_max # to be verified
meas_sample = meas_sample/mask_max
meas_sample_re = meas_sample / mask_s_sample_i
meas_sample_re = np.expand_dims(meas_sample_re, -1)
# test data
pred = np.zeros((image_dim, image_dim, num_frame,meas_sample_re.shape[0]))
time_all = 0
for k in range(meas_sample_re.shape[0]):
meas_sample_re_k = np.expand_dims(meas_sample_re[k],0)
gt_sample_k = np.expand_dims(gt_sample[k],0)
begin = time.time()
pred_k = sess.run([final_output['pred']],
feed_dict={mask: mask_sample_i,
meas_re: meas_sample_re_k,
gt: gt_sample_k}) # pred for Y_meas
time_all += time.time() - begin
pred[...,k] = pred_k[0]
# eval: psnr, ssim
mean_psnr,mean_ssim = 0,0
psnr_all = np.zeros(0)
ssim_all = np.zeros(0)
if np.sum(gt_sample)!=0:
for m in range(meas_sample_re.shape[0]):
psnr_all_m = np.zeros(0)
ssim_all_m = np.zeros(0)
for k in range(num_frame):
psnr_k, ssim_k = cal_psnrssim(gt_sample[m,...,k], pred[...,k,m])
psnr_all_m = np.append(psnr_all_m,psnr_k)
ssim_all_m =np.append(ssim_all_m,ssim_k)
psnr_all = np.append(psnr_all,psnr_all_m)
ssim_all =np.append(ssim_all,ssim_all_m)
# save image
plot_multi(pred[...,m], 'MeasRecon_Task%d_%s_Frame%d'%(picked_task[task_index], nameList[index].split('.')[0],m), col_num=num_frame//2, titles=psnr_all_m,savename='MeasRecon_Task%d_%s_Frame%d_psnr%.2f_ssim%.2f'%(picked_task[task_index], nameList[index].split('.')[0],m,np.mean(psnr_all_m),np.mean(ssim_all_m)), savedir=save_path+'recon_img/task%d/'%picked_task[task_index])
mean_psnr = np.mean(psnr_all)
mean_ssim = np.mean(ssim_all)
validset_psnr += mean_psnr
validset_ssim += mean_ssim
logger.info('---> Task {} - {:<20s} Recon complete: PSNR {:.2f}, SSIM {:.2f}, Time {:.2f}'.format(picked_task[task_index], nameList[index], mean_psnr, mean_ssim, time_all))
mat_save_path = save_path+'recon_mat/task%d/'%picked_task[task_index]
if not ope(mat_save_path):
os.makedirs(mat_save_path)
sci.savemat(mat_save_path+'MeasRecon_Task%d_%s_psnr%.2f_ssim%.2f.mat'%(picked_task[task_index], nameList[index].split('.')[0],mean_psnr,mean_ssim),
{'recon':pred,
'gt':gt_sample,
'psnr_all':psnr_all,
'ssim_all':ssim_all,
'mean_psnr':mean_psnr,
'mean_ssim':mean_ssim,
'time_all':time_all,
'task_index':picked_task[task_index]
})
logger.info('---> Recon data saved to: '+save_path)
validset_psnr /= len(nameList)
validset_ssim /= len(nameList)
logger.info('===> Task {:4d}/{:<4d} Recon complete: Aver. PSNR {:.2f}, Aver.SSIM {:.2f}'.format(task_index,len(picked_task), validset_psnr, validset_ssim)) | StarcoderdataPython |
3232218 | from scrapy.dupefilters import BaseDupeFilter, referer_str
import logging
import hashlib
from scrapy.utils.python import to_bytes
class MyRFPDupeFilter(BaseDupeFilter):
"""My Request Fingerprint duplicates filter"""
def __init__(self, debug=False):
self.fingerprints = set()
self.logdupes = True
self.debug = debug
self.logger = logging.getLogger(__name__)
@classmethod
def from_settings(cls, settings):
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(debug)
def request_seen(self, request):
fp = self.request_fingerprint(request)
if fp in self.fingerprints:
return True
self.fingerprints.add(fp)
def request_fingerprint(self, request) -> str:
return hashlib.md5(to_bytes(request.url)).hexdigest()
def close(self, reason):
msg = "The number of URLs: %(url_num)d"
args = {'url_num': len(self.fingerprints)}
self.logger.info(msg, args)
def log(self, request, spider):
if self.debug:
msg = "Filtered duplicate request: %(request)s (referer: %(referer)s)"
args = {'request': request, 'referer': referer_str(request)}
self.logger.debug(msg, args, extra={'spider': spider})
elif self.logdupes:
msg = ("Filtered duplicate request: %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
self.logdupes = False
spider.crawler.stats.inc_value('dupefilter/filtered', spider=spider)
| StarcoderdataPython |
9770408 | <reponame>commandarmy/discord-event-manager<filename>api/mod_wow/region.py
"""Provides a wrapper around the WoW region system used in the API."""
from __future__ import annotations
__LICENSE__ = """
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from enum import Enum
class Region(Enum):
"""All supported regions."""
eu = 'eu'
us = 'us'
# Blizzard namespaces conversion.
# https://develop.battle.net/documentation/world-of-warcraft/guides/namespaces
@property
def dynamic_namespace(self):
"""Returns the Blizzard dynamic namespace for this region."""
return f'dynamic-{self.value}'
@property
def static_namespace(self):
"""Returns the Blizzard static namespace for this region."""
return f'static-{self.value}'
@property
def profile_namespace(self):
"""Returns the Blizzard profile namespace for this region."""
return f'profile-{self.value}'
# The default region to use for little-region sensitive data.
DEFAULT_REGION = Region.eu
| StarcoderdataPython |
9721709 | from app.helpers.user import response
def bad_request(e):
'''
error 404 response
'''
return response(
'failed',
'Request data cannot be executable',
400
)
def route_not_found(e):
'''
error 404 response
'''
return response(
'failed',
'Endpoint not found',
404
)
def method_not_found(e):
'''
error 405 response
'''
return response(
'failed',
'The method is not allowed for the requested URL',
405
)
def internal_server_error(e):
'''
error 500 response
'''
return response(
'failed',
'Internal serve error',
500
) | StarcoderdataPython |
8106145 | """Project settings"""
# flake8: noqa
from config.settings.components import *
from config.settings.custom import *
from config.settings.environments import *
SETTINGS_EXPORT = [
"DEBUG",
"LANGUAGE_CODE",
]
| StarcoderdataPython |
1782509 | <reponame>martinell/django-carrot
from django.views.generic import TemplateView
class MessageList(TemplateView):
template_name = 'carrot/index.vue'
| StarcoderdataPython |
6624215 | # This file is useful during development and must never be checked in.
import os
# NB: do not set DEBUG here. Some settings depend on it and setting it here has
# no effect. Edit an .env file and set it there. See
# https://django-environ.readthedocs.io/en/latest/ for details.
# Declare or redeclare variables here
FOOFOO = 1
# Uncomment to use PostgreSQL as database or set in an .env file
"""
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": "skeleton",
"USER": "postgres",
"PASSWORD": "",
"HOST": "",
"PORT": "5432",
"CONN_MAX_AGE": 600
}
}
"""
# Uncomment to use memcache as caching backend or set in an .env file
"""
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
"KEY_PREFIX": "skeleton",
},
}
"""
# Uncomment if you are doing performance profiling with Django Debug Toolbar
"""
DEBUG_TOOLBAR_PANELS = [
"ddt_request_history.panels.request_history.RequestHistoryPanel",
"debug_toolbar.panels.versions.VersionsPanel",
"debug_toolbar.panels.timer.TimerPanel",
"debug_toolbar.panels.settings.SettingsPanel",
"debug_toolbar.panels.headers.HeadersPanel",
"debug_toolbar.panels.request.RequestPanel",
"debug_toolbar.panels.sql.SQLPanel",
"debug_toolbar.panels.staticfiles.StaticFilesPanel",
"debug_toolbar.panels.templates.TemplatesPanel",
"debug_toolbar.panels.cache.CachePanel",
"debug_toolbar.panels.signals.SignalsPanel",
"debug_toolbar.panels.logging.LoggingPanel",
"debug_toolbar.panels.redirects.RedirectsPanel",
]
INTERNAL_IPS = ["127.0.0.1"]
RESULTS_CACHE_SIZE = 20000
"""
# If you need to access an existing variable your code must be in configure
def configure(**kwargs):
# Uncomment if you are doing performance profiling with Django Debug Toolbar
"""
return {
"INSTALLED_APPS": kwargs["INSTALLED_APPS"] + ["debug_toolbar"],
"MIDDLEWARE_CLASSES": (
"debug_toolbar.middleware.DebugToolbarMiddleware",
) + kwargs["MIDDLEWARE_CLASSES"]
}
"""
return {}
| StarcoderdataPython |
1638858 | <gh_stars>1-10
class RoutingRulesAdditionalFields:
USERS = "users"
CASE_TYPES = "case_types"
FLAGS = "flags"
COUNTRY = "country"
choices = [
(USERS, "Users"),
(CASE_TYPES, "Case Types"),
(FLAGS, "flags"),
(COUNTRY, "Country"),
]
class StatusAction:
DEACTIVATE = "deactivate"
REACTIVATE = "reactivate"
| StarcoderdataPython |
9730654 | import sys
sys.path.append("..")
from tqdm import tqdm, trange
import json
import numpy as np
import torch
""" pretrain 데이터셋"""
class PretrainDataSet(torch.utils.data.Dataset):
"""
데이터로더에 사용하기 위한 데이터 셋
is_next: tokens_a와 tokens_b가 연속된 문장인지 여부
tokens: 문장들의 tokens
segment: tokens_a(0)와 tokens_b(1)을 구분하기 위한 값
mask_idx: tokens 내 mask index
mask_label: tokens 내 mask 된 부분의 정답
"""
def __init__(self, vocab, infile):
self.vocab = vocab
self.labels_cls = []
self.labels_lm = []
self.sentences = []
self.segments = []
line_cnt = 0
with open(infile, "r") as f:
for line in f:
line_cnt += 1
with open(infile, "r") as f:
for i, line in enumerate(tqdm(f, total=line_cnt, desc=f"Loading {infile}", unit=" lines")):
instance = json.loads(line)
self.labels_cls.append(instance["is_next"])
sentences = [vocab.piece_to_id(p) for p in instance["tokens"]]
self.sentences.append(sentences)
self.segments.append(instance["segment"])
mask_idx = np.array(instance["mask_idx"], dtype=np.int)
mask_label = np.array([vocab.piece_to_id(p) for p in instance["mask_label"]], dtype=np.int)
label_lm = np.full(len(sentences), dtype=np.int, fill_value=-1)
label_lm[mask_idx] = mask_label
self.labels_lm.append(label_lm)
def __len__(self):
assert len(self.labels_cls) == len(self.labels_lm)
assert len(self.labels_cls) == len(self.sentences)
assert len(self.labels_cls) == len(self.segments)
return len(self.labels_cls)
def __getitem__(self, item):
return (torch.tensor(self.labels_cls[item]),
torch.tensor(self.labels_lm[item]),
torch.tensor(self.sentences[item]),
torch.tensor(self.segments[item]))
""" pretrain data collate_fn """
def pretrin_collate_fn(inputs):
"""
배치 단위로 데이터 처리를 위한 collate_fn
:param inputs:
:return: batch
"""
labels_cls, labels_lm, inputs, segments = list(zip(*inputs))
# LM의 라벨의 길이가 같아지도록, 짧은 문장에 대해 padding 값-1 추가
labels_lm = torch.nn.utils.rnn.pad_sequence(labels_lm, batch_first=True, padding_value=-1)
# inputs의 길이가 같아지도록 짧은 문장에 대해 padding 값 0 추가 이때 padding은 vocab 만들기 시, pad_id = 0으로 지정한 값
inputs = torch.nn.utils.rnn.pad_sequence(inputs, batch_first=True, padding_value=0)
# segments에 대한 값도 짧은 문장에 대해 padding값 0 추가
segments = torch.nn.utils.rnn.pad_sequence(segments, batch_first=True, padding_value=0)
batch = [
torch.stack(labels_cls, dim=0), # 길이가 고정 1이므로, stack 함수를 통해 torch tensor로 변환
labels_lm,
inputs,
segments
]
return batch
""" pretrain 데이터 로더 """
def pretrain_data_loader(vocab, data_dir, batch_size = 128):
dataset = PretrainDataSet(vocab, f"{data_dir}/kowiki_bert_0.json")
data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=pretrin_collate_fn)
return data_loader
| StarcoderdataPython |
3448706 | from keras.layers import Input, merge, Dropout, Dense, Flatten, Activation
from keras.layers.convolutional import MaxPooling3D, Conv3D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from keras.utils.data_utils import get_file
from keras import optimizers as O
import tensorflow as tf
import logging
K.set_image_dim_ordering('tf')
class Nothinbutnet(Model):
logger = logging.getLogger('pix.Nothinbutnet')
def __init__(self, generator):
self.generator = generator
self.logger.info("Assembling Model")
self._input = Input(shape=generator.output)
self.logger.info(self._input)
self.logger.info(self._input.shape)
import pdb
# plane, time, wire
# layer = MaxPooling3D((1, 1, 1), strides=(2, 2, 2),
# data_format='channels_first',
# name='block0_pool')(self._input)
# self.logger.info(layer.shape)
# pdb.set_trace()
## EC: This had been 0th layer before 20-Sep-2017.
# layer = Conv3D(32, (3,4,self._input.shape[-1]), strides=(3,4,1),
layer = Conv3D(32, (2,2,2), strides=(2,2,2),
activation='elu', padding='valid', #'same',
data_format='channels_first',
name='block1_conv1')(self._input)
self.logger.info(layer.shape)
layer = MaxPooling3D((2,2,2), strides=(2,2,2),
data_format='channels_first',
name='block1_pool')(layer)
self.logger.info(layer.shape)
'''
layer = BatchNormalization(axis=2, name="block1_norm")(layer)
self.logger.info(layer.shape)
'''
layer = Dropout(0.1)(layer)
layer = Conv3D(8, (6,6,6), strides=(3,3,3),
activation='elu', padding='same',
data_format='channels_first',
name='block2_conv1')(layer)
self.logger.info(layer.shape)
layer = MaxPooling3D((2, 2, 2), strides=(2,2, 2),
data_format='channels_first',
name='block2_pool')(layer)
self.logger.info(layer.shape)
'''
layer = Conv3D(4, (5,5,5), strides=(2,2,2),
activation='relu', padding='same',
data_format='channels_first',
name='block3_conv1')(layer)
self.logger.info(layer.shape)
layer = MaxPooling3D((8, 8, 8), strides=(4,4, 4),
data_format='channels_first',
name='block3_pool')(layer)
self.logger.info(layer.shape)
layer = BatchNormalization(axis=2, name="block3_norm")(layer)
self.logger.info(layer.shape)
'''
'''
layer = Dropout(0.1)(layer)
layer = Conv3D(256, (3,3,3), strides=(3,3,3),
activation='relu', padding='same',
data_format='channels_first',
name='block4_conv1')(layer)
self.logger.info(layer.shape)
layer = MaxPooling3D((3, 3, 3), strides=(3, 3, 3),
data_format='channels_first',
name='block4_pool')(layer)
self.logger.info(layer.shape)
'''
'''
layer = Conv3D(512, (1,3,3), strides=(1,2,2),
activation='relu', padding='same',
data_format='channels_first',
name='block5_conv1')(layer)
self.logger.info(layer.shape)
layer = MaxPooling3D((1, 3, 3), strides=(1,2, 2),
data_format='channels_first',
name='block5_pool')(layer)
self.logger.info(layer.shape)
layer = Dropout(0.1)(layer)
'''
# Classification block
layer = Flatten(name='flatten')(layer)
layer = Dense(256, activation='elu', name='fc1')(layer)
layer = Dense(generator.input, activation='softmax', name='predictions')(layer)
self.logger.info(layer.shape)
super(Nothinbutnet, self).__init__(self._input, layer)
self.logger.info("Compiling Model")
ogd = O.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) # was lr=0.001, EC 24-Jan-2018
#ogd = O.Adam(lr=0.1, beta_1=0.9, beta_2=0.999, decay=0.0) # was lr=0.001, EC 24-Jan-2018
self.compile(loss='binary_crossentropy', optimizer=ogd, metrics=['categorical_accuracy'])
| StarcoderdataPython |
6440062 | <reponame>chickentuna/wapkg
import hashlib
from sys import stdout
from urllib.request import urlopen
class Downloader(object):
def __init__(self, quiet=False):
self.quiet = quiet
self._last_path = None
# URLError is thrown in case of errors
def go(self, link, path, action=None):
with urlopen(link) as req:
with open(path, 'wb') as f:
if self.quiet:
f.write(req.read())
else:
seg = 131072 # 128K
total = 0
dl_size = ''
dl_size_int = -1
if link.startswith('http'):
cl = req.info().get('Content-Length')
dl_size_int = int(int(cl) / 1024)
dl_size = '/' + str(dl_size_int)
while True:
chunk = req.read(seg)
total += int(len(chunk) / 1024)
msg = '- Downloading ' + link.split('/')[-1] + ', ' + str(total) + dl_size + ' KB'
if action:
action.update_progress(total, dl_size_int)
stdout.write('\r' + msg)
f.write(chunk)
if len(chunk) < seg:
break
print() # newline
self._last_path = path
return self
# Raises RuntimeError when verifying fails
def _verify(self, hexdigest, algo):
if not hexdigest or not self._last_path:
return
hash = hashlib.new(algo)
with open(self._last_path, 'rb') as f:
hash.update(f.read())
if not hash.hexdigest() == hexdigest.lower():
raise RuntimeError('Checksum does not match')
def verify_sha1(self, hexdigest):
self._verify(hexdigest, 'sha1')
class DownloadAction(object):
def __init__(self, token):
self.token = token
def update_progress(self, current, total):
pass
| StarcoderdataPython |
225422 | import unittest
from pythonwarrior.units.archer import Archer
class TestArcher(unittest.TestCase):
def setUp(self):
self.archer = Archer()
def test_should_have_look_and_shoot_abilities(self):
self.assertEqual({'shoot_', 'look'}, set(self.archer.abilities.keys()))
def test_should_have_shoot_power_of_3(self):
self.assertEqual(3, self.archer.shoot_power)
def test_should_have_7_max_health(self):
self.assertEqual(7, self.archer.max_health)
def test_should_appear_as_a_on_map(self):
self.assertEqual('a', self.archer.character)
| StarcoderdataPython |
5120695 | <filename>Toonland-2013-master/toonland/speedchat/TTChatInputSpeedChat.py
########################## THE TOON LAND PROJECT ##########################
# Filename: TTChatInputSpeedChat.py
# Created by: Cody/Fd Green Cat Fd (February 11th, 2013)
####
# Description:
#
# Modifies the Speed Chat globals to contain some of our custom messages.
####
from otp.speedchat import SCStaticTextTerminal
from toontown.chat import TTChatInputSpeedChat
SCStaticTextTerminal.SpeedChatStaticText[31051] = 'Let\'s go to Funny Farm!'
scStructure = TTChatInputSpeedChat.scStructure[7][1]
TTChatInputSpeedChat.scStructure[7][1] = scStructure | StarcoderdataPython |
6597894 | from rest_framework.serializers import ModelSerializer
from .models import Setting
class SettingsSerializer(ModelSerializer):
class Meta:
model = Setting
fields = ('active',)
| StarcoderdataPython |
5183065 | """authentik outposts app config"""
from importlib import import_module
from django.apps import AppConfig
from structlog.stdlib import get_logger
LOGGER = get_logger()
class AuthentikOutpostConfig(AppConfig):
"""authentik outposts app config"""
name = "authentik.outposts"
label = "authentik_outposts"
verbose_name = "authentik Outpost"
def ready(self):
import_module("authentik.outposts.signals")
import_module("authentik.outposts.managed")
| StarcoderdataPython |
9767444 | <filename>source/ext/extref.py
#
# Copyright (c) 2021. JetBrains s.r.o.
# Use of this source code is governed by the MIT license that can be found in the LICENSE file.
#
"""A sphinx extension for inserting references in two formats - image or text.
The ``extref`` directive do the all job.
For example:
.. code-block:: rst
.. extref:: name
:type: image
:url: https://example.com
:image: default
:width: 400
It approximately translates to the following html:
.. code-block:: html
<a class="reference external image-reference" href="https://example.com">
<img alt="default-text" title="default-text" src="default-image" width="400">
</a>
Here ``alt``, ``title`` and ``src`` values comes from the JSON configuration file, and they implicitly specified by the ``name`` value.
The first one is given by default, the second is defined through the ``:image:`` option.
Configuration
-------------
In your configuration file add ``extref`` to your extensions list, e.g.:
.. code-block:: python
extensions = [
...
'extref',
...
]
The extension provides the following configuration values:
- ``extref_conf`` : str
Path to the JSON configuration file with parameters variety for each reference.
The structure is the following: {"name1": options, "name2": options, ...}.
Here for each named reference there is a standard bunch of options that is described below in the "JSON Options" section.
The name of the reference connects directive with this options.
- ``extref_images_dir`` : str
Path to the output directory with images inside the documentation site.
- ``extref_logo_images`` : dict
For each ``:ref:`` value that is used to be a logo it should be specified the path to the corresponding logo image.
- ``extref_default_type`` : {'image', 'logo', 'text'}
Default ``:type:`` value if it is not specified.
- ``extref_default_ref`` : str
Default ``:ref:`` value if it is not specified.
- ``extref_default_image`` : str
Default ``:image:`` value if it is not specified.
JSON Options
------------
- ``ref`` :
Dictionary of pairs ``"ref_type": "url"``.
``:ref: ref_type`` in the reStructuredText means ``href="url"`` in html.
By default, if ``:ref:`` option and ``extref_default_ref`` config value are not specified, used the first reference among all.
- ``image`` :
Dictionary of pairs ``"img_type": "path"``.
``:image: img_type`` in the reStructuredText means ``src="path"`` in html, if reference type is image.
By default, if ``:image:`` option and ``extref_default_image`` config value are not specified, used the first image among all.
- ``title`` :
String with default title value, if reference type is image.
- ``text`` :
String with default text value.
In html it means ``<a ...>text</a>`` when reference type is text and ``alt="text"`` in other cases.
Used when ``:text:`` option is not specified.
Directive Options
-----------------
- ``type`` :
Should be the one of the three types of references: text, image, logo.
- ``ref`` :
Reference type from the JSON configuration file.
- ``url`` :
Explicit url for the reference, that used instead of the ``:ref:`` option.
- ``image`` :
Image type from the JSON configuration file.
- ``text`` :
Explicit text for the reference.
- ``title`` :
Explicit title for the reference.
- ``width`` :
Width of the image if the ``:type:`` value is image or logo.
- ``height`` :
Height of the image if the ``:type:`` value is image or logo.
Examples
--------
Suppose that ``extref_logo_images`` configuration value is the following:
.. code-block:: python
extref_logo_images = {
...
'kaggle': "_static/images/kaggle.svg",
...
}
Suppose that JSON configuration file is the following:
.. code-block:: javascript
{
...
"example1": {
"ref": {
"nbviewer": "https://nbviewer.jupyter.org/github/Example/example/blob/master/example/example.ipynb",
"kaggle": "https://www.kaggle.com/example/example"
},
"image": {
"default": "_static/images/example1.png"
},
"text": "My Example"
}
...
}
Then
.. code-block:: rst
.. extref:: example1
gives the following html:
.. code-block:: html
<a class="reference external image-reference" href="https://nbviewer.jupyter.org/github/Example/example/blob/master/example/example.ipynb">
<img alt="My Example" title="My Example" src="_static/images/example1.png">
</a>
The code
.. code-block:: rst
.. extref:: example1
:type: logo
:ref: kaggle
gives the following html:
.. code-block:: html
<a class="reference external image-reference" href="https://www.kaggle.com/example/example">
<img alt="My Example" title="My Example" src="_static/images/kaggle.svg">
</a>
The code
.. code-block:: rst
.. extref:: example1
:type: text
:url: https://example.com
gives the following html:
.. code-block:: html
<a class="reference external" href="https://example.com">
My Example
</a>
"""
import os
import json
import shutil
from urllib.parse import urlparse
from docutils import nodes
from docutils.parsers.rst import Directive, directives
REF_TYPES = ('image', 'logo', 'text')
IMAGES_DIR = "_extref_images"
LOGO_DIR = "logo"
class ExtRefDirective(Directive):
has_content = True
option_spec = {
'type': lambda t: directives.choice(t, REF_TYPES),
'ref': directives.unchanged,
'url': directives.uri,
'image': directives.unchanged,
'title': directives.unchanged,
'text': directives.unchanged,
'width': directives.unchanged,
'height': directives.unchanged,
}
def run(self):
return [nodes.raw(
"",
'<a class="{0}" href="{1}">{2}</a>'.format(self._class(), self._href(), self._content()),
format='html'
)]
def _env(self):
return self.state.document.settings.env
def _conf(self):
return self._env().config['extref_conf'][self.content[0]]
def _type(self):
if 'type' in self.options.keys():
return self.options['type']
return self._env().config['extref_default_type']
def _href(self):
if 'url' in self.options.keys():
return self.options['url']
return self._ref()
def _ref(self):
return self._conf()['ref'][self._ref_type()]
def _ref_type(self):
if 'ref' in self.options.keys():
return self.options['ref']
if 'extref_default_ref' in self._env().config and \
self._env().config['extref_default_ref'] in self._conf()['ref']:
return self._env().config['extref_default_ref']
return list(self._conf()['ref'])[0]
def _class(self):
if self._type() in ['image', 'logo']:
return "reference {0} image-reference".format(self._url_type())
return "reference {0}".format(self._url_type())
def _url_type(self):
return "external" if urlparse(self._href()).netloc else "internal"
def _content(self):
if self._type() == 'image':
return self._image()
if self._type() == 'logo':
return self._logo()
return self._text()
def _image(self):
image_src_path = self._image_src_path()
image_src_fullpath = os.path.join(self._env().app.srcdir, image_src_path)
image_doc_path = os.path.join(self._env().config['extref_images_dir'], os.path.basename(image_src_path))
image_doc_fullpath = os.path.join(self._env().app.outdir, image_doc_path)
if not os.path.isfile(image_doc_fullpath):
shutil.copy(image_src_fullpath, image_doc_fullpath)
return self._image_tag(image_doc_path)
def _image_src_path(self):
conf_image = self._conf()['image']
if 'image' in self.options.keys():
return conf_image[self.options['image']]
if 'extref_default_image' in self._env().config and \
self._env().config['extref_default_image'] in conf_image:
return conf_image[self._env().config['extref_default_image']]
return conf_image[list(conf_image)[0]]
def _alt(self):
return self._text()
def _title(self):
if 'title' in self.options.keys():
return self.options['title']
if 'title' in self._conf():
return self._conf()['title']
if 'text' in self.options.keys():
return self.options['text']
if 'text' in self._conf():
return self._conf()['text']
return ""
def _logo(self):
logo_fullpath = next((path for name, path in self._env().config['extref_logo_images'].items() \
if name == self._ref_type()), None)
if not logo_fullpath:
raise ValueError("There is no appropriate logo for the reference {0}".format(self._ref_type()))
logo_path = logo_fullpath.replace(self._env().app.outdir, '')[1:]
return self._image_tag(logo_path)
def _image_tag(self, image_path):
doc_dir = os.path.dirname(self.state.document.attributes['source'].replace(self._env().app.srcdir, ''))[1:]
return '<img alt="{0}" title="{1}" src="{2}" style="{3}{4}"/>'.format(
self._alt(), self._title(), os.path.relpath(image_path, doc_dir),
self._width_tag_option(), self._height_tag_option()
)
def _width_tag_option(self):
if 'width' in self.options.keys():
width = self.options['width']
if width[-1] in "0123456789":
width += "px"
return "width: {0};".format(width)
return ""
def _height_tag_option(self):
if 'height' in self.options.keys():
height = self.options['height']
if height[-1] in "0123456789":
height += "px"
return "height: {0};".format(height)
return ""
def _text(self):
if 'text' in self.options.keys():
return self.options['text']
if 'text' in self._conf():
return self._conf()['text']
return self._href()
def config_inited_handler(app, config):
if config.extref_default_type and not config.extref_default_type in REF_TYPES:
raise ValueError("Parameter extref_default_type should be in {0}".format(REF_TYPES))
prepare_conf_json(app, config)
prepare_images(app, config)
def prepare_conf_json(app, config):
if not config.extref_conf:
raise ValueError("Parameter extref_conf could not be empty")
with open(os.path.join(app.srcdir, config.extref_conf)) as f:
try:
config.extref_conf = json.loads(f.read())
except json.decoder.JSONDecodeError as e:
msg = "Decode error in {0}. {1}".format(config.extref_conf, e.msg)
raise json.decoder.JSONDecodeError(msg, e.doc, e.pos) from e
def prepare_images(app, config):
extref_images_dir = os.path.join(app.outdir, config.extref_images_dir)
if not os.path.isdir(extref_images_dir):
os.makedirs(extref_images_dir)
prepare_logo(app, config)
def prepare_logo(app, config):
extref_logo_dir = os.path.join(app.outdir, config.extref_images_dir, LOGO_DIR)
if not os.path.isdir(extref_logo_dir):
os.makedirs(extref_logo_dir)
if config.extref_logo_images:
extref_logo_images = {}
for logo_name, logo_src_path in config.extref_logo_images.items():
logo_src_fullpath = os.path.join(app.srcdir, logo_src_path)
logo_doc_fullpath = os.path.join(extref_logo_dir, "{0}{1}".format(logo_name, os.path.splitext(logo_src_path)[1]))
extref_logo_images[logo_name] = logo_doc_fullpath
if not os.path.isfile(logo_doc_fullpath):
shutil.copy(logo_src_fullpath, logo_doc_fullpath)
config.extref_logo_images = extref_logo_images
def setup(app):
app.add_config_value('extref_conf', None, 'html')
app.add_config_value('extref_logo_images', None, 'html')
app.add_config_value('extref_images_dir', IMAGES_DIR, 'html')
app.add_config_value('extref_default_type', REF_TYPES[0], 'html')
app.add_config_value('extref_default_ref', None, 'html')
app.add_config_value('extref_default_image', None, 'html')
app.add_directive('extref', ExtRefDirective)
app.connect('config-inited', config_inited_handler)
return {
'version': '0.2',
} | StarcoderdataPython |
9639141 | def heapify(arr,n,i):
smallest = i
l = 2*i+1
r = 2*i+2
if l<n and arr[l]<arr[smallest]:
smallest = l
if r < n and arr[r] < arr[smallest]:
smallest = r
if smallest != i:
arr[i],arr[smallest] = arr[smallest],arr[i]
heapify(arr,n,smallest)
def heapSort(arr):
n = len(arr)
for i in range(n//2-1,-1,-1):
heapify(arr,n,i)
for i in range(n-1,0,-1):
arr[i],arr[0] = arr[0],arr[i]
heapify(arr,i,0)
arr.reverse()
# Time: O(nLogN)
# Space: O(1)
| StarcoderdataPython |
268890 | __________________________________________________________________________________________________
sample 48 ms submission
import heapq as hq
class Solution:
def maxProfit(self, k: int, prices: List[int]) -> int:
n = len(prices)
if n == 0 or k == 0:
return 0
if k > n // 2:
# Equivalent to as many as transactions you want
maxp = 0
for i in range(1, n):
if prices[i] > prices[i - 1]:
maxp += prices[i] - prices[i - 1]
return maxp
else:
# dp = [[0] * n for _ in range(k)]
# for kk in range(k):
# prevMin = prices[0] # initially
# for i in range(1, n):
# dp[kk][i] = max(dp[kk][i - 1], prices[i] - prevMin)
# if kk > 0:
# # prices[i] - prices[j] + dp[kk - 1][j - 1] for j in range(i - 1)
# # --> prices[i] - (prices[j] - dp[kk - 1][j - 1])
# # keep updating the later term, so it remains minimum
# prevMin = min(prevMin, prices[i] - dp[kk - 1][i - 1])
# else:
# prevMin = min(prevMin, prices[i])
#
# return dp[-1][-1]
lo, hi = 0, 0
profits, trans = [], []
while True:
# Find segments with positive slope --> transaction making profits
# Be aware of horizontal line
lo = hi
while lo < n - 1 and prices[lo + 1] <= prices[lo]:
lo += 1
hi = lo
while hi < n - 1 and prices[hi + 1] >= prices[hi]:
hi += 1
if lo == hi:
break
while trans:
if prices[lo] < prices[trans[-1][0]]:
x, y = trans.pop()
profits.append(prices[x] - prices[y]) # Push negative value so minHeap -> maxHeap
elif prices[hi] > prices[trans[-1][1]]:
x, y = trans.pop()
profits.append(prices[lo] - prices[y])
lo = x
else:
break
trans.append((lo, hi))
while trans:
x, y = trans.pop()
profits.append(prices[x] - prices[y])
hq.heapify(profits)
maxp = 0
while profits and k > 0:
maxp += hq.heappop(profits)
k -= 1
return -maxp
__________________________________________________________________________________________________
sample 13556 kb submission
class Solution:
def maxProfit(self, k: int, prices: List[int]) -> int:
n = len(prices)
if k >= n // 2:
return sum(x - y for x, y in zip(prices[1:], prices[:-1]) if x > y)
profits = [0]*n
for _ in range(k):
preprofit = 0
for i in range(1,n):
p = prices[i] - prices[i-1]
curprofit = max(preprofit + p, profits[i])
profits[i] = max(curprofit, profits[i-1])
preprofit = curprofit # not profits[i]
return profits[-1]
def maxProfit(self, k, prices):
n = len(prices)
if k >= n//2:
return sum(x-y for x,y in zip(prices[1:], prices[:-1]) if x > y)
#global_max = [[0]*n for _ in range(k+1)]
local_max = [0]*n
for i in range(1, k+1):
#local_max = [0]*n
preprofit = 0
for j in range(1, n):
profit = prices[j] - prices[j-1]
curr_profit = max(local_max[j], preprofit + profit)
local_max[j] = max(local_max[j-1], curr_profit)
preprofit = curr_profit
return local_max[-1]
__________________________________________________________________________________________________
| StarcoderdataPython |
173721 | """
Mocsár Environment
File name: envs/gmocsar.py
Author: <NAME>
Date created: 3/27/2020
"""
from rlcard3 import models
from rlcard3.envs.env import Env
from rlcard3.games.mocsar.game import MocsarGame as Game
from rlcard3.games.mocsar.utils import action_to_string, \
string_to_action, payoff_func, print_state, encode_to_obs
from typing import List
class MocsarEnv(Env):
""" GinRummy Environment
"""
state_shape: List[int] # Dimensions of state numpy array
def __init__(self, config):
self.game = Game()
self.state_shape = [3, 9, 14]
super().__init__(config=config)
def _extract_state(self, state): # 200213 don't use state ???
"""
Extract useful information from state for RL. Must be implemented in the child class.
numpy(3,9,14)
Menaing: x,y,z
z: 1/0, 1 means, the hand contains y amount of card.
y: rank of cards in some hand.
x=0: player's hand
x=1: others hand
x=2: target
x>2: history, not implemented....
:param state: dict, the raw state
:return: dict: 'obs':the extracted state, numpy.array, 'legal_actions': list of actions
"""
obs = encode_to_obs(state=state)
extracted_state = {'obs': obs,
'legal_actions': self._get_legal_actions(),
'is_extract': True # State is extracted>
}
return extracted_state
def get_payoffs(self):
"""
Get the payoffs of players. Must be implemented in the child class.
First one scores 1, Last one scores 0. Other ith player scores 0.5 ^^i
:return: A list of payoffs for each player.
"""
num_players = self.game.num_players
# winnersben a győzelmek sorrendje van
# List indexed by PlayerID instead of OrderId, pl [1,3,2,0]
win_id = [self.game.players.winners.index(i) for i in range(num_players)]
# win_id-ben, meg az, hogy az adott indexű játékos hányadik, pl [3,0,2,1], mivel a 0-ik indexű játékos utolsó=3
payoffs = [payoff_func(position=win_id[i], num_players=num_players) for i in range(num_players)]
return payoffs
def _decode_action(self, action_id):
"""
Decode Action id to the action in the game.
:param action_id: The id of the action
:return: The action that will be passed to the game engine.
"""
return action_to_string(action=action_id)
def _get_legal_actions(self):
"""
Get all legal actions for current state.
:return: A list of legal actions' id.
"""
return [string_to_action(action) for action in self.game.get_legal_actions()]
def _load_model(self):
"""
Load pretrained/rule model
:return: A Model object
"""
return models.load('mocsar-rule-v1', num_players=self.game.get_player_num())
def print_state(self, player: int):
"""
Print out the state of a given player
:param player: Player Id to print
"""
state = self.game.get_state(player)
print_state(state)
def print_result(self, player):
"""
Print the game result when the game is over
:param player: Player Id to print
"""
payoffs = self.get_payoffs()
for player_ in self.game.players.players:
print(f"Player {player_.__str__()} : points {payoffs[player_.player_id]}")
@staticmethod
def print_action(action: str):
"""
Print out an action in a nice form
:param action: Code of the action
"""
if type(action) is tuple:
action, _ = action
print(f"\nAction code:{string_to_action(action)}, action:{action}")
| StarcoderdataPython |
1931151 | <gh_stars>0
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>"
__version__ = "1.3.8"
class PyngrokError(Exception):
"""
Raised when a general `pyngrok` error has occurred.
"""
pass
class PyngrokNgrokInstallError(PyngrokError):
"""
Raised when an error has occurred while downloading and installing the `ngrok` binary.
"""
pass
class PyngrokNgrokError(PyngrokError):
"""
Raised when an error occurs interacting directly with the `ngrok` binary.
:var string error: A description of the error being thrown.
:var list ngrok_errors: A list of errors reported by the `ngrok` process.
"""
def __init__(self, error, ngrok_errors=None):
super(PyngrokNgrokError, self).__init__(error)
if ngrok_errors is None:
ngrok_errors = []
self.ngrok_errors = ngrok_errors
class PyngrokNgrokHTTPError(PyngrokNgrokError):
"""
Raised when an error occurs making a request to the `ngrok` web interface. The `body`
contains the error response received from `ngrok`.
:var string error: A description of the error being thrown.
:var string url: The request URL that failed.
:var int status_code: The response status code from `ngrok`.
:var string message: The response message from `ngrok`.
:var dict headers: The request headers sent to `ngrok`.
:var string body: The response body from `ngrok`.
"""
def __init__(self, error, url, status_code, message, headers, body):
super(PyngrokNgrokHTTPError, self).__init__(error)
self.url = url
self.status_code = status_code
self.message = message
self.headers = headers
self.body = body
class PyngrokNgrokURLError(PyngrokNgrokError):
"""
Raised when an error occurs when trying to initiate an API request.
:var string error: A description of the error being thrown.
:var string reason: The reason for the URL error.
"""
def __init__(self, error, reason):
super(PyngrokNgrokURLError, self).__init__(error)
self.reason = reason
| StarcoderdataPython |
6668172 | <gh_stars>0
# Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from nova.network.neutronv2 import api as neutron
from nova.virt.ironic import driver as ironic_driver
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class CiscoIronicDriver(ironic_driver.IronicDriver):
"""Hypervisor driver for Ironic - bare metal provisioning."""
def macs_for_instance(self, instance):
return None
def _plug_vifs(self, node, instance, network_info):
LOG.debug('Plug VIFs called for instance', instance=instance)
node_uuid = instance.get('node')
client = neutron.get_client(None, admin=True)
for vif in network_info:
network = client.show_network(vif['network']['id'])
net_info = {
'vlan': network['network']['provider:segmentation_id'],
'mac': vif['address'],
'uuid': vif['id'],
'pxe': False
}
self.ironicclient.call("node.vendor_passthru", node_uuid,
"add_vnic", args=net_info)
LOG.debug('Plug VIFs successful for instance', instance=instance)
def _unplug_vifs(self, node, instance, network_info):
node_uuid = instance.get('node')
# Delete vnics from UCS for this node via vendor passthru
for vif in network_info:
net_info = {
'uuid': vif['id']
}
self.ironicclient.call("node.vendor_passthru", node_uuid,
"delete_vnic", args=net_info)
| StarcoderdataPython |
3370883 | <filename>radpress/migrations/0001_initial.py
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table('radpress_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
))
db.send_create_signal('radpress', ['Tag'])
# Adding model 'Article'
db.create_table('radpress_article', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=500)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
('content', self.gf('django.db.models.fields.TextField')()),
('content_body', self.gf('django.db.models.fields.TextField')()),
('is_published', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('radpress', ['Article'])
# Adding model 'ArticleTag'
db.create_table('radpress_articletag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['radpress.Tag'])),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['radpress.Article'])),
))
db.send_create_signal('radpress', ['ArticleTag'])
# Adding model 'Page'
db.create_table('radpress_page', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=500)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
('content', self.gf('django.db.models.fields.TextField')()),
('content_body', self.gf('django.db.models.fields.TextField')()),
('is_published', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('radpress', ['Page'])
# Adding model 'Menu'
db.create_table('radpress_menu', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('order', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=3)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['radpress.Page'], unique=True)),
))
db.send_create_signal('radpress', ['Menu'])
# Adding unique constraint on 'Menu', fields ['order', 'page']
db.create_unique('radpress_menu', ['order', 'page_id'])
def backwards(self, orm):
# Removing unique constraint on 'Menu', fields ['order', 'page']
db.delete_unique('radpress_menu', ['order', 'page_id'])
# Deleting model 'Tag'
db.delete_table('radpress_tag')
# Deleting model 'Article'
db.delete_table('radpress_article')
# Deleting model 'ArticleTag'
db.delete_table('radpress_articletag')
# Deleting model 'Page'
db.delete_table('radpress_page')
# Deleting model 'Menu'
db.delete_table('radpress_menu')
models = {
'radpress.article': {
'Meta': {'ordering': "('-created_at', '-updated_at')", 'object_name': 'Article'},
'content': ('django.db.models.fields.TextField', [], {}),
'content_body': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['radpress.Tag']", 'null': 'True', 'through': "orm['radpress.ArticleTag']", 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'radpress.articletag': {
'Meta': {'object_name': 'ArticleTag'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['radpress.Article']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['radpress.Tag']"})
},
'radpress.menu': {
'Meta': {'unique_together': "(('order', 'page'),)", 'object_name': 'Menu'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['radpress.Page']", 'unique': 'True'})
},
'radpress.page': {
'Meta': {'ordering': "('-created_at', '-updated_at')", 'object_name': 'Page'},
'content': ('django.db.models.fields.TextField', [], {}),
'content_body': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'radpress.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['radpress'] | StarcoderdataPython |
3370943 | import cc_dat_utils
#Part 1
input_dat_file = "data/pfgd_test.dat"
#Use cc_dat_utils.make_cc_data_from_dat() to load the file specified by input_dat_file
#print the resulting data
if __name__ == '__main__':
# Reading from input dat file
dat_file = "data/pfgd_test.dat"
result = cc_dat_utils.make_cc_data_from_dat(dat_file)
# Writing spring representation to outfile
outfile = "data/pfgd_test.txt"
f = open(outfile,"w")
f.write(str(result))
f.close()
| StarcoderdataPython |
6599958 | import unittest
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import Column
from sqlalchemy.types import Integer, String
from sqlalchemy import *
from sqlalchemy.orm import mapper, sessionmaker
from sqlalchemy.engine.reflection import Inspector
import sqlite3
class TestSqlite3(unittest.TestCase):
def test_sqlite3(self):
testa = sqlite3.connect('testa.sqlite3')
testb = sqlite3.connect('testb.sqlite3')
cur = testa.cursor()
cur.execute("pragma database_list;")
print(cur.fetchall())
"""
def test_attach(self):
engine = create_engine("sqlite:///testa.sqlite3", echo=True)
engine.execute("ATTACH DATABASE \"./testb.sqlite3\" AS test;")
inspector = Inspector.from_engine(engine)
print(inspector.get_table_names(schema="test"))
metadata = MetaData(engine)
Base = declarative_base(metadata)
class ModelA(object):
pass
class ModelB(object):
pass
modela = Table(
"modela",
metadata,
Column("id", Integer, autoincrement=True, primary_key=True),
Column("name", String),
schema="main",
autoload = True,
)
modelb = Table(
"modelb",
metadata,
Column("id", Integer, autoincrement=True, primary_key=True),
Column("name", String),
schema="test",
autoload = True,
)
mapper(ModelA, modela)
mapper(ModelB, modelb)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
a = ModelA()
a.name = "hhh"
b = ModelB()
b.name = "ddd"
session.add(a)
session.add(b)
session.commit()
"""
| StarcoderdataPython |
1867806 | import sqlite3
#Load the data.
conn = sqlite3.connect('rpg_db.sqlite3')
#Write queries to explore the data.
#1) How many total Characters are there?
#Create a cursor object.
curs1 = conn.cursor()
#Write the query.
query1 = 'SELECT COUNT(*) FROM charactercreator_character;'
#Execute the query.
curs1.execute(query1)
#2) How many of each specific subclass?
#Create a cursor object.
curs2 = conn.cursor()
#Write the query.
#Execute the query.
#3) How many total items? | StarcoderdataPython |
11311413 | import time
import json
import datetime
import RPi.GPIO as GPIO
from IBMConnector import IBMConnector
from Dallas import Dallas
from DHT import DHT
from SystemData import SystemData
from Weather import Weather
from CSVPersistor import CSVPersistor
from TimedDigitalActuator import TimedDigitalActuator
from Hatch import Hatch
from Picture import Picture
from Stepper import Stepper
def commandCallback(cmd):
print("Command received: %s with data: %s" % (cmd.command, cmd.data))
value = cmd.data['d']['value']
if alloff:
return
if cmd.command == "InsideFan":
insideFan.setTime(value)
elif cmd.command == "OutsideFan":
outsideFan.setTime(value)
elif cmd.command == "Humidifier":
humidifier.setTime(value)
elif cmd.command == "Hatch":
if value >= 0 and value <= 1:
hatch.setHatch(value)
elif cmd.command == "Stepper":
stepper.setTime(value)
elif cmd.command == "Picture":
picture.makePicture()
else:
print "Unknown command, ignore"
print(cmd.command)
if __name__ == '__main__':
print "MS HAL start"
#Connectors
iotfClient = IBMConnector(commandCallback)
time.sleep(2)
csvPersistor = CSVPersistor("Plant1")
time.sleep(1)
#Sensors
waterTemperature = Dallas()
time.sleep(1)
airInside = DHT(23)
time.sleep(1)
airOutside = DHT(24)
time.sleep(1)
systemData = SystemData()
time.sleep(1)
weather = Weather()
time.sleep(1)
#Actuators
outsideFan = TimedDigitalActuator(20)
time.sleep(1)
insideFan = TimedDigitalActuator(21)
time.sleep(1)
humidifier = TimedDigitalActuator(16)
time.sleep(1)
hatch = Hatch()
time.sleep(1)
stepper = Stepper()
time.sleep(1)
#picture
picture = Picture("Plant1")
#silencer
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(12, GPIO.IN, pull_up_down=GPIO.PUD_UP)
timeToSilence = 0
alloff = False
while True:
if not GPIO.input(12):
print "Silence"
timeToSilence = time.time() + 60 * 60
if timeToSilence > time.time():
print "REMAINE SILENCE"
hatch.setHatch(1)
insideFan.setTime(0)
outsideFan.setTime(0)
humidifier.setTime(0)
stepper.setTime(0)
alloff = True
else:
alloff = False
now = datetime.datetime.now()
m = {}
m['Timestamp'] = "%d-%d-%d-%d-%d-%d" % (now.year, now.month, now.day, now.hour, now.minute, now.second)
m['Watertemperature'] = waterTemperature.getWaterTemp()
m['InsideHumidity'] = airInside.getHumidity()
m['InsideTemperature'] = airInside.getTemperature()
m['OutsideHumidity'] = airOutside.getHumidity()
m['OutsideTemperature'] = airOutside.getTemperature()
m['CPUTemperature'] = systemData.getCPUTemp()
m['GPUTemperature'] = systemData.getGPUTemp()
m['CPUUsage'] = systemData.getCPUuse()
m['Loadlevel'] = systemData.getLoadLevel()
m['WeatherHumidity'] = weather.getHumidity()
m['WeatherPressure'] = weather.getPressure()
m['WeatherPressureTrent'] = weather.getPressureTrent()
m['WeatherUV'] = weather.getUV()
m['WeatherPreciptionTotal'] = weather.getPrecipTotal()
m['WeatherPreciptionHourly'] = weather.getPrecipHrly()
m['OutsideFan'] = outsideFan.getState()
m['InsideFan'] = insideFan.getState()
m['Humidifier'] = humidifier.getState()
m['Hatch'] = hatch.getHatch()
m['Stepper'] = stepper.getState()
m['StepperPosition'] = stepper.getCounter()
m['AllOff'] = alloff
iotfClient.pushDataToIBM(m)
csvPersistor.persist(m)
time.sleep(1)
| StarcoderdataPython |
4959445 | #! /usr/bin/env python
""" Writes the distribution of halos from an SQL query.
"""
import sys
import argparse
import numpy
from pat.utils import gioSqlite as gio_sqlite
def load_sqlite_data(path, query, sqlite_file):
""" Loads data using SQLite query.
"""
# load file
print("Reading {}...".format(path))
query_mgr = gio_sqlite.GioSqlite3()
query_mgr.loadGIOSqlite(sqlite_file)
# load data
i = 0
table_name = "foo_{}".format(i)
query_mgr.createTable(table_name, (path))
# execute query
query = query.replace("__TABLE__", table_name)
result = query_mgr.runQueryOutputList(query)
# typecast
result = numpy.array(result).flatten()
assert(len(result.shape) == 1)
return result
# parse command line
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--input-file", required=True)
parser.add_argument("--output-file", required=True)
parser.add_argument("--sqlite-file", default="/projects/exasky/visio/genericio/frontend/GenericIOSQLite.so")
parser.add_argument("--query", default="select fof_halo_mass from __TABLE__ ORDER BY fof_halo_mass")
parser.add_argument("--xlabel", default="Halo Mass")
parser.add_argument("--ylabel", default="Counts")
parser.add_argument("--xlim", nargs="+", type=float, default=[])
parser.add_argument("--bins", type=float, default=20)
parser.add_argument("--log-bins", action="store_true")
opts = parser.parse_args()
# read data
data = numpy.array(load_sqlite_data(opts.input_file, opts.query, opts.sqlite_file))
# update ranges
x_min = min(x_min, data.min()) if not len(opts.xlim) > 0 else opts.xlim[0]
x_max = max(x_max, data.max()) if not len(opts.xlim) > 1 else opts.xlim[1]
# set binning and range of histograms
# can do uniform in linear space or logarithmic space
if opts.log_bins:
bins = numpy.logspace(numpy.log10(x_min), numpy.log10(x_max), num=opts.bins)
bins_range = None
else:
bins = opts.bins
bins_range = (x_min, x_max)
# create histogram
hist, bin_edges = numpy.histogram(data, bins=bins, range=bins_range)
hist = numpy.hstack([(0), numpy.repeat(hist, 2), (0)])
bin_edges = numpy.hstack([(bin_edges[0], bin_edges[0]),
numpy.repeat(bin_edges[1:-1], 2),
(bin_edges[-1], bin_edges[-1])])
# save results
delimiter = ","
results = numpy.column_stack([bin_edges, hist])
header = delimiter.join(map(str, [opts.xlabel, opts.ylabel]))
numpy.savetxt(opts.output_file, results, header=header, delimiter=delimiter)
print("Done!") | StarcoderdataPython |
8072396 | <reponame>Staberinde/data-hub-api
# used to calculate the quote expiry date, that is whichever is earliest of
# [delivery date - QUOTE_EXPIRY_DAYS_BEFORE_DELIVERY days] OR
# [date quote created + QUOTE_EXPIRY_DAYS_FROM_NOW days]
QUOTE_EXPIRY_DAYS_BEFORE_DELIVERY = 21
QUOTE_EXPIRY_DAYS_FROM_NOW = 30
| StarcoderdataPython |
5069792 | <filename>documentation/scripts/render_md.py
#!/usr/bin/env python
"""
The script adds rendered images to all plantUML sections in the markdown files.
Usage:
1. Include plantUML diagram picture to markdown:
> render_md.py -i ABC.md -o XYZ.md --action render
> render_md.py -i ABC.md -o XYZ.md
2. Include plantUML diagram picture in place:
> render_md.py -i ABC.md
3. Open html to preview:
> render_md.py -i ABC.md --action open
4. Render with preview:
> render_md.py -i ABC.md -o XYZ.md --all
> render_md.py -i ABC.md --all
Import as:
import documentation.scripts.render_md as dscremd
"""
import argparse
import logging
import os
import tempfile
from typing import List, Tuple
import helpers.hdbg as hdbg
import helpers.hio as hio
import helpers.hparser as hparser
import helpers.hprint as hprint
import helpers.hsystem as hsysinte
_LOG = logging.getLogger(__name__)
# #############################################################################
_ACTION_OPEN = "open"
_ACTION_RENDER = "render"
_VALID_ACTIONS = [_ACTION_OPEN, _ACTION_RENDER]
_DEFAULT_ACTIONS = [_ACTION_RENDER]
def _open_html(md_file: str) -> None:
"""
Pandoc markdown to html and open it.
"""
_LOG.info("\n%s", hprint.frame("Process markdown to html"))
# Get pandoc.py command.
curr_path = os.path.abspath(os.path.dirname(__file__))
tmp_dir = os.path.split(md_file)[0]
cmd = (
"%s/pandoc.py -t %s -i %s --skip_action %s --skip_action %s --tmp_dir %s"
% (curr_path, "html", md_file, "copy_to_gdrive", "cleanup_after", tmp_dir)
)
hsysinte.system(cmd)
def _uml_file_names(
dest_file: str, idx: int, extension: str
) -> Tuple[str, str, str]:
"""
Generate plantUML picture filename, temporary UML filename, full path to
picture. We want to assign the name of the image relative to the
originating file and index. In this way if we update the image, the name of
the image doesn't change.
:param dest_file: markdowm target file where diagrams should be included
:param idx: order number of the UML appearence at the input file
:param extension: extension for image file
:return:
- full path to UML picture dir
- relative picture file name
- temporary UML file name
"""
sub_dir = "plantuml-images"
dst_dir, dest_file_name = os.path.split(os.path.abspath(dest_file))
file_name_body = os.path.splitext(dest_file_name)[0]
# Create image name.
img_name = "%s.%s.%s" % (file_name_body, idx, extension)
# Get dir with images.
abs_path = os.path.join(dst_dir, sub_dir)
# Get relative path to image.
rel_path = os.path.join(sub_dir, img_name)
# Get temporary file name.
tmp_name = "%s.%s.puml" % (file_name_body, idx)
return (abs_path, rel_path, tmp_name)
def _render_command(uml_file: str, pic_dest: str, extension: str) -> str:
"""
Create PlantUML rendering command.
"""
available_extensions = ["svg", "png"]
hdbg.dassert_in(extension, available_extensions)
cmd = "plantuml -t%s -o %s %s" % (extension, pic_dest, uml_file)
return cmd
def _render_plantuml_code(
uml_text: str,
out_file: str,
idx: int,
extension: str,
dry_run: bool,
) -> str:
"""
Render the PlantUML text into a file.
:param uml_text: UML format text
:param out_file: full path to output md file
:param idx: index of UML appearence
:param extension: type of rendered image
:param dry_run: if True, doesn't execute plantulml command
:return: related path to image
"""
# Format UML text to render.
uml_content = uml_text
if not uml_content.startswith("@startuml"):
uml_content = "@startuml\n%s" % uml_content
if not uml_content.endswith("@enduml"):
uml_content = "%s\n@enduml" % uml_content
# Create the including directory, if needed.
hio.create_enclosing_dir(out_file, incremental=True)
# Get pathes.
target_dir, rel_path, tmp_file_name = _uml_file_names(
out_file, idx, extension
)
# Save text to temporary file.
tmp_file = os.path.join(tempfile.gettempdir(), tmp_file_name)
hio.to_file(tmp_file, uml_content)
# Convert the plantUML txt.
cmd = _render_command(tmp_file, target_dir, extension)
_LOG.info("Creating uml diagram from %s source.", tmp_file)
_LOG.info("Saving image to %s.", target_dir)
_LOG.info("> %s", cmd)
hsysinte.system(cmd, dry_run=dry_run)
return rel_path
def _render_plantuml(
in_txt: List[str], out_file: str, extension: str, dry_run: bool
) -> List[str]:
"""
Add rendered image after plantuml code blocks.
:param in_txt: list of strings to process
:param out_file: name of outcome file
:param extension: extension for rendered images
:param dry_run: only changes text skipping image creation
"""
# Store the output.
out_txt: List[str] = []
# Store the plantuml code found so far.
plantuml_txt: List[str] = []
# Count the index of the plantuml found in the file.
plantuml_idx = 0
# Store the state of the parser.
state = "searching"
for i, line in enumerate(in_txt):
_LOG.debug("%d: %s -> state=%s", i, line, state)
out_txt.append(line)
if line.strip() == "```plantuml":
# Found the beginning of a plantuml text.
hdbg.dassert_eq(state, "searching")
plantuml_txt = []
plantuml_idx += 1
state = "found_plantuml"
_LOG.debug(" -> state=%s", state)
elif line.strip() == "```" and state == "found_plantuml":
img_file_name = _render_plantuml_code(
uml_text="\n".join(plantuml_txt),
out_file=out_file,
idx=plantuml_idx,
extension=extension,
dry_run=dry_run,
)
out_txt.append("" % img_file_name)
state = "searching"
_LOG.debug(" -> state=%s", state)
elif line.strip != "```" and state == "found_plantuml":
plantuml_txt.append(line)
return out_txt
def _parse() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
hparser.add_input_output_args(parser)
hparser.add_action_arg(parser, _VALID_ACTIONS, _DEFAULT_ACTIONS)
# Debug arguments.
parser.add_argument(
"--dry_run",
action="store_true",
help="Don't create images with plantuml command",
)
hparser.add_verbosity_arg(parser)
return parser
def _main(parser: argparse.ArgumentParser) -> None:
args = parser.parse_args()
hdbg.init_logger(verbosity=args.log_level, use_exec_path=True)
# Insert your code here.
# Read file arguments.
in_file, out_file = hparser.parse_input_output_args(args, clear_screen=True)
# Not support stdin and stdout.
hdbg.dassert_ne(in_file, "-")
hdbg.dassert_ne(out_file, "-")
# Read actions argument.
actions = hparser.select_actions(args, _VALID_ACTIONS, _DEFAULT_ACTIONS)
# Set rendered image extension.
extension = "png"
# Save to temporary file and keep svg extension if only open.
if actions == [_ACTION_OPEN]:
out_file = tempfile.mktemp(suffix=".md")
extension = "svg"
# Read input file lines.
in_lines = hio.from_file(in_file).split("\n")
# Get updated lines after rendering.
out_lines = _render_plantuml(in_lines, out_file, extension, args.dry_run)
# Save the output into a file.
hio.to_file(out_file, "\n".join(out_lines))
# Open if needed.
if _ACTION_OPEN in actions:
_open_html(out_file)
if __name__ == "__main__":
_main(_parse())
| StarcoderdataPython |
273680 | <reponame>TirianShirley/learning_python
#!/usr/bin/env python3
print('hello world")
| StarcoderdataPython |
4916284 | from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from accounts.views import my_logout
urlpatterns = [
# Examples:
url(r'^$', 'shell.views.landing', name='landing'),
url(r'^login/', 'accounts.views.login', name='login'),
url(r'^login_failed/', 'accounts.views.login_failed', name='login_failed'),
url(r'^mesa/(?P<mesa_id>\d+)/reportes/$', 'genericform.views.formularios_por_mesa', name='formularios_por_mesa'),
url(r'^mesas/$', 'genericform.views.mesas', name='mesas'),
# url(r'^login/', include(admin.site.urls)),
url(r'^logout/$', my_logout),
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api-forms/', include('genericform.urls')),
url(r'^reporte/', TemplateView.as_view(template_name='djform.html')),
url(r'^mesa/(?P<mesa_id>\d+)/reporte/(?P<reporte_id>\d+)$', TemplateView.as_view(template_name='djform.html')),
]
| StarcoderdataPython |
1947505 | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Optional
import tensorflow as tf
from merlin.models.utils.doc_utils import docstring_parameter
from merlin.schema import Schema
from ..core import (
TABULAR_MODULE_PARAMS_DOCSTRING,
BlockType,
Filter,
TabularAggregationType,
TabularBlock,
)
@docstring_parameter(tabular_module_parameters=TABULAR_MODULE_PARAMS_DOCSTRING)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class ContinuousFeatures(TabularBlock):
"""Input block for continuous features.
Parameters
----------
features: List[str]
List of continuous features to include in this module.
{tabular_module_parameters}
"""
def __init__(
self,
features: List[str],
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
aggregation: Optional[TabularAggregationType] = None,
schema: Optional[Schema] = None,
name: Optional[str] = None,
**kwargs
):
super().__init__(
pre=pre,
post=post,
aggregation=aggregation,
schema=schema,
name=name,
is_input=True,
**kwargs
)
self.filter_features = Filter(features)
@classmethod
def from_features(cls, features, **kwargs):
return cls(features, **kwargs)
def call(self, inputs, *args, **kwargs):
cont_features = self.filter_features(inputs)
cont_features = {
k: tf.expand_dims(v, -1) if len(v.shape) == 1 else v for k, v in cont_features.items()
}
return cont_features
def compute_call_output_shape(self, input_shapes):
cont_features_sizes = self.filter_features.compute_output_shape(input_shapes)
cont_features_sizes = {
k: tf.TensorShape(list(v) + [1]) if len(v) == 1 else v
for k, v in cont_features_sizes.items()
}
return cont_features_sizes
def get_config(self):
config = super().get_config()
config["features"] = self.filter_features.feature_names
return config
def _get_name(self):
return "ContinuousFeatures"
def repr_ignore(self) -> List[str]:
return ["filter_features"]
def repr_extra(self):
return ", ".join(sorted(self.filter_features.feature_names))
| StarcoderdataPython |
5073539 | from django.db import models
from PIL import Image
# Create your models here.
class Imgcompress(models.Model):
img = models.ImageField(
upload_to="imgupload/compress/", blank=True)
def save(self):
super().save() # saving image first
image = Image.open(self.img.path) # Open image using self
if image.height > 300 or image.width > 300:
new_img = (300, 300)
image.thumbnail(new_img)
image.save(self.img.path)
| StarcoderdataPython |
1827216 | import setuptools
with open("README.md") as fh:
long_description = fh.read()
with open("requirements.txt") as fh:
requirements = fh.readlines()
setuptools.setup(
name="role-pattern-nlp",
version="0.2.0",
author="<NAME>",
author_email="<EMAIL>",
description=" Build and match patterns for semantic role labelling",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/cyclecycle/role-pattern-nlp",
packages=setuptools.find_packages(),
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| StarcoderdataPython |
3420224 | import random
class ExampleGenerator(object):
def __init__(self, M, N):
self.L = M # label space dimension
self.M = M # feature vector space dimension
self.N = N # size of data set
self.data_set = {} # data_set
for p in range(N):
random_size = random.randint(1,10)
random_index = random.sample(range(self.L), random_size)
random_vector = {q: 0 for q in random_index}
for q in random_index:
if random.choice((True, False)):
random_vector[q] = random.uniform(1.0, 2.0)
else:
random_vector[q] = random.uniform(-2.0, -1.0)
self.data_set[p] = {"features": random_vector}
for p in range(N):
vector_dict = self.data_set[p]["features"]
self.data_set[p]["labels"] = [q for q in vector_dict.keys() if vector_dict[q] > 0]
def file_write(self, file_name):
L, M, N = self.L, self.M, self.N
with open(file_name, 'w') as f:
f.write("{} {} {}\n".format(N, M, L))
for p in range(N):
labels = ','.join(map(str, self.data_set[p]["labels"]))
vector_dict = self.data_set[p]["features"]
features = ' '.join(["{}:{}".format(q, vector_dict[q]) for q in vector_dict.keys()])
f.write("{} {}\n".format(labels, features))
if __name__ == '__main__':
e = ExampleGenerator(M=10, N=100)
e.file_write("train.txt")
e = ExampleGenerator(M=10, N=10)
e.file_write("test.txt")
| StarcoderdataPython |
3439965 | from twine.cli import dispatch as twine_upload
from .filesystem import find_files
def upload(*globs, dry_run=False):
files = find_files(globs, 'PyPI')
if dry_run:
print('NOTE: Skipping PyPI upload step since this is a dry run.')
else:
twine_upload(["upload", *files])
| StarcoderdataPython |
6657474 | <filename>Spell Compendium/scr/tpModifiers/sp_unholy_storm.py<gh_stars>1-10
from templeplus.pymod import PythonModifier
from toee import *
import tpdp
from utilities import *
print "Registering sp-Unholy Storm"
def unholyStormSpellOnConditionAdd(attachee, args, evt_obj):
spellPacket = tpdp.SpellPacket(args.get_arg(0))
unholyStormPartsysId = game.particles('sp-Holy Storm', attachee)
spellPacket.add_spell_object(attachee, unholyStormPartsysId) # store the spell obj and the particle sys
radiusUnholyStorm = 20.0
unholyStormEventId = attachee.object_event_append(OLC_CRITTERS, radiusUnholyStorm)
args.set_arg(3, unholyStormEventId)
spellPacket.update_registry()
return 0
def unholyStormSpellOnEnteredAoe(attachee, args, evt_obj):
spellPacket = tpdp.SpellPacket(args.get_arg(0))
spellCaster = spellPacket.caster
spellTarget = evt_obj.target
unholyStormEventId = args.get_arg(3)
print "Unholy Storm enter: ", spellTarget
if unholyStormEventId != evt_obj.evt_id:
return 0
if spellPacket.add_target(spellTarget, 0):
print "Added: ", spellTarget
spellTarget.condition_add_with_args('Unholy Storm Effect', args.get_arg(0), args.get_arg(1), 0, args.get_arg(3))
return 0
def unholyStormSpellOnBeginRound(attachee, args, evt_obj):
crittersInAoe = game.obj_list_cone(attachee, OLC_CRITTERS, 20, 0, 360)
spellPacket = tpdp.SpellPacket(args.get_arg(0))
neededAlignment = [ALIGNMENT_LAWFUL_GOOD, ALIGNMENT_NEUTRAL_GOOD, ALIGNMENT_CHAOTIC_GOOD]
goodOutsiderInAoe = []
spellDamageDice = dice_new('1d6')
spellDamageDice.number = 5
for target in crittersInAoe:
targetIsDead = target.d20_query(Q_Dead)
if target.stat_level_get(stat_alignment) in neededAlignment:
hasGoodAlignment = True
else:
hasGoodAlignment = False
if target.is_category_type(mc_type_outsider):
isOutsider = True
else:
isOutsider = False
if not targetIsDead and hasGoodAlignment and isOutsider:
goodOutsiderInAoe.append(target)
if not goodOutsiderInAoe:
return 0
numberOfTargets = len(goodOutsiderInAoe)
randomDice = dice_new('1d{}'.format(numberOfTargets))
selectTarget = randomDice.roll() - 1 #First List Element is 0 not 1
spellTarget = goodOutsiderInAoe[selectTarget]
game.particles('sp-Unholy Storm-hit', spellTarget)
game.create_history_freeform("{} is affected by ~Unholy Storm~[TAG_SPELLS_UNHOLY_STORM] burst\n\n".format(spellTarget.description))
spellTarget.float_text_line("Unholy Storm burst", tf_red)
spellTarget.spell_damage(spellPacket.caster, D20DT_FIRE, spellDamageDice, D20DAP_UNSPECIFIED, D20A_CAST_SPELL, args.get_arg(0))
return 0
def unholyStormSpellHasSpellActive(attachee, args, evt_obj):
spellPacket = tpdp.SpellPacket(args.get_arg(0))
if evt_obj.data1 == spellPacket.spell_enum:
evt_obj.return_val = 1
return 0
def unholyStormSpellKilled(attachee, args, evt_obj):
args.remove_spell()
args.remove_spell_mod()
return 0
def unholyStormSpellSpellEnd(attachee, args, evt_obj):
print "Unholy Storm SpellEnd"
return 0
unholyStormSpell = PythonModifier("sp-Unholy Storm", 4) # spell_id, duration, empty, eventId
unholyStormSpell.AddHook(ET_OnConditionAdd, EK_NONE, unholyStormSpellOnConditionAdd, ())
unholyStormSpell.AddHook(ET_OnObjectEvent, EK_OnEnterAoE, unholyStormSpellOnEnteredAoe, ())
#unholyStormSpell.AddHook(ET_OnBeginRound, EK_NONE, unholyStormSpellOnBeginRound, ())
unholyStormSpell.AddHook(ET_OnD20Signal, EK_S_Spell_End, unholyStormSpellSpellEnd, ())
unholyStormSpell.AddHook(ET_OnD20Query, EK_Q_Critter_Has_Spell_Active, unholyStormSpellHasSpellActive, ())
unholyStormSpell.AddHook(ET_OnD20Signal, EK_S_Killed, unholyStormSpellKilled, ())
unholyStormSpell.AddSpellDispelCheckStandard()
unholyStormSpell.AddSpellTeleportPrepareStandard()
unholyStormSpell.AddSpellTeleportReconnectStandard()
unholyStormSpell.AddSpellCountdownStandardHook()
unholyStormSpell.AddAoESpellEndStandardHook()
### Begin Unholy Storm Effect ###
def unholyStormEffectOnBeginRound(attachee, args, evt_obj):
args.set_arg(1, args.get_arg(1)-evt_obj.data1) # Ticking down duration
if args.get_arg(1) < 0:
args.condition_remove()
else:
spellPacket = tpdp.SpellPacket(args.get_arg(0))
spellTarget = attachee
neededAlignment = [ALIGNMENT_LAWFUL_GOOD, ALIGNMENT_NEUTRAL_GOOD, ALIGNMENT_CHAOTIC_GOOD]
spellDamageDice = dice_new('1d6')
if not spellTarget.stat_level_get(stat_alignment) in neededAlignment:
return 0
elif spellTarget.is_category_type(mc_type_outsider):
spellDamageDice.number = 4
else:
spellDamageDice.number = 2
game.create_history_freeform("{} is affected by ~Unholy Storm~[TAG_SPELLS_UNHOLY_STORM]\n\n".format(spellTarget.description))
spellTarget.float_text_line("Unholy Storm", tf_red)
spellTarget.spell_damage(spellPacket.caster, D20DT_MAGIC, spellDamageDice, D20DAP_UNSPECIFIED, D20A_CAST_SPELL, args.get_arg(0))
return 0
def unholyStormEffectSkillPenalty(attachee, args, evt_obj):
evt_obj.bonus_list.add(-4, 160, "~Unholy Storm~[TAG_SPELLS_UNHOLY_STORM] Penalty") #Unholy Storm gives a -4 penalty on Listen, Search and Spot Checks
return 0
def unholyStormEffectAttackPenalty(attachee, args, evt_obj):
if evt_obj.attack_packet.get_flags() & D20CAF_RANGED:
evt_obj.bonus_list.add(-4, 160, "~Unholy Storm~[TAG_SPELLS_UNHOLY_STORM] Penalty") #Unholy Storm gives a -4 penalty on ranged attacks made in, into, or out of the storm
return 0
def unholyStormEffectTooltip(attachee, args, evt_obj):
if args.get_arg(1) == 1:
evt_obj.append("Unholy Storm ({} round)".format(args.get_arg(1)))
else:
evt_obj.append("Unholy Storm ({} rounds)".format(args.get_arg(1)))
return 0
def unholyStormEffectEffectTooltip(attachee, args, evt_obj):
if args.get_arg(1) == 1:
evt_obj.append(tpdp.hash("UNHOLY_STORM"), -2, " ({} round)".format(args.get_arg(1)))
else:
evt_obj.append(tpdp.hash("UNHOLY_STORM"), -2, " ({} rounds)".format(args.get_arg(1)))
return 0
def unholyStormEffectOnLeaveAoE(attachee, args, evt_obj):
spellPacket = tpdp.SpellPacket(args.get_arg(0))
unholyStormEventId = args.get_arg(3)
if unholyStormEventId != evt_obj.evt_id:
print "ID Mismach"
return 0
args.condition_remove()
return 0
def unholyStormEffectOnRemove(attachee, args, evt_obj):
spellPacket = tpdp.SpellPacket(args.get_arg(0))
spellPacket.remove_target(attachee)
return 0
def unholyStormEffectSpellKilled(attachee, args, evt_obj):
args.condition_remove()
return 0
def unholyStormEffectHasSpellActive(attachee, args, evt_obj):
spellPacket = tpdp.SpellPacket(args.get_arg(0))
if evt_obj.data1 == spellPacket.spell_enum:
evt_obj.return_val = 1
return 0
unholyStormEffect = PythonModifier("Unholy Storm Effect", 4) #spell_id, duration, empty, eventId
unholyStormEffect.AddHook(ET_OnBeginRound, EK_NONE, unholyStormEffectOnBeginRound, ())
unholyStormEffect.AddHook(ET_OnGetSkillLevel, EK_SKILL_LISTEN, unholyStormEffectSkillPenalty, ())
unholyStormEffect.AddHook(ET_OnGetSkillLevel, EK_SKILL_SEARCH, unholyStormEffectSkillPenalty, ())
unholyStormEffect.AddHook(ET_OnGetSkillLevel, EK_SKILL_SPOT, unholyStormEffectSkillPenalty, ())
unholyStormEffect.AddHook(ET_OnToHitBonus2, EK_NONE, unholyStormEffectAttackPenalty, ())
unholyStormEffect.AddHook(ET_OnToHitBonusFromDefenderCondition, EK_NONE, unholyStormEffectAttackPenalty, ())
unholyStormEffect.AddHook(ET_OnGetTooltip, EK_NONE, unholyStormEffectTooltip, ())
unholyStormEffect.AddHook(ET_OnGetEffectTooltip, EK_NONE, unholyStormEffectEffectTooltip, ())
unholyStormEffect.AddHook(ET_OnObjectEvent, EK_OnLeaveAoE, unholyStormEffectOnLeaveAoE, ())
unholyStormEffect.AddHook(ET_OnConditionRemove, EK_NONE, unholyStormEffectOnRemove, ())
unholyStormEffect.AddHook(ET_OnD20Signal, EK_S_Killed, unholyStormEffectSpellKilled, ()) | StarcoderdataPython |
9792722 | <filename>pyapprox/optimization.py
import numpy as np
from scipy.optimize import minimize, Bounds
from functools import partial
from scipy.stats import gaussian_kde as KDE
from pyapprox.configure_plots import *
import scipy.stats as ss
from pyapprox.utilities import get_all_sample_combinations
def approx_jacobian(func, x, *args, epsilon=np.sqrt(np.finfo(float).eps)):
x0 = np.asfarray(x)
assert x0.ndim == 1 or x0.shape[1] == 1
f0 = np.atleast_1d(func(*((x0,)+args)))
if f0.ndim == 2:
assert f0.shape[1] == 1
f0 = f0[:, 0]
jac = np.zeros([len(x0), len(f0)])
dx = np.zeros(x0.shape)
for i in range(len(x0)):
dx[i] = epsilon
f1 = func(*((x0+dx,)+args))
if f1.ndim==2:
assert f1.shape[1] == 1
f1 = f1[:,0]
jac[i] = (f1 - f0)/epsilon
dx[i] = 0.0
return jac.transpose()
def eval_function_at_multiple_design_and_random_samples(function,uq_samples,design_samples):
"""
for functions which only take 1d arrays for uq_samples and design_samples
loop over all combinations and evaluate function at each combination
design_samples vary slowest and uq_samples vary fastest
Let design samples = [[1,2],[2,3]]
uq_samples = [[0, 0, 0],[0, 1, 2]]
Then samples will be
([1, 2], [0, 0, 0])
([1, 2], [0, 1, 2])
([3, 4], [0, 0, 0])
([3, 4], [0, 1, 2])
function(uq_samples,design_samples)
"""
vals = []
# put design samples first so that samples iterates over uq_samples fastest
samples = get_all_sample_combinations(design_samples,uq_samples)
for xx,zz in zip(
samples[:design_samples.shape[0]].T,
samples[design_samples.shape[0]:].T):
# flip xx,zz because functions assumed to take uq_samples then
# design_samples
vals.append(function(zz,xx))
return np.asarray(vals)
def eval_mc_based_jacobian_at_multiple_design_samples(grad,stat_func,
uq_samples,design_samples):
"""
Alternatively I could use
jacobian = [np.mean([constraint_grad_single(z,x) for z in zz.T],axis=0) for x in xx.T]
But I think this implementation will allow better use of concurent evaluations in the
future. For example eval_function_at_multiple_design_and_random_samples could
utilize an asynchronous call over all the sample combinations
TODO combine uq_samples and design samples into one matrix and assume functions
always take a single matrix and not two matrices
"""
grads = eval_function_at_multiple_design_and_random_samples(
grad,uq_samples,design_samples)
ndesign_samples = design_samples.shape[1]
nuq_samples = uq_samples.shape[1]
jacobian = np.array(
[stat_func(grads[ii*nuq_samples:(ii+1)*nuq_samples])
for ii in range(ndesign_samples)])
return jacobian
def check_inputs(uq_samples,design_samples):
if design_samples.ndim==1:
design_samples = design_samples[:,np.newaxis]
if uq_samples is not None and uq_samples.ndim==1:
uq_samples = design_samples[:,np.newaxis]
if (uq_samples is not None and
(design_samples.shape[1]>1 and uq_samples.shape[1]>1)):
assert design_samples.shape[1]==uq_samples.shape[1]
return uq_samples,design_samples
def deterministic_lower_bound_constraint(constraint_function,lower_bound,
uq_samples,design_samples):
uq_samples,design_samples = check_inputs(uq_samples,design_samples)
assert design_samples.shape[1]==1
val = lower_bound-constraint_function(uq_samples,design_samples)
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce upper bound
return -val
def variance_lower_bound_constraint(constraint_function,lower_bound,uq_samples,
design_samples):
uq_samples,design_samples = check_inputs(uq_samples,design_samples)
assert design_samples.shape[1]==1
# scipy minimize enforces constraints are non-negative
vals = constraint_function(uq_samples,design_samples)
val = lower_bound-np.std(vals)**2
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce upper bound
return -val
def mean_lower_bound_constraint(constraint_function,lower_bound,uq_samples,
design_samples):
uq_samples,design_samples = check_inputs(uq_samples,design_samples)
assert design_samples.shape[1]==1
# scipy minimize enforces constraints are non-negative
vals = constraint_function(uq_samples,design_samples)
val = lower_bound-np.mean(vals)**2
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce upper bound
return -val
def mean_lower_bound_constraint_jacobian(constraint_function_jacobian,uq_samples,
design_samples):
uq_samples,design_samples = check_inputs(uq_samples,design_samples)
assert design_samples.shape[1]==1
# scipy minimize enforces constraints are non-negative
vals = constraint_function_jacobian(uq_samples,design_samples)
val = -np.mean(vals)**2
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce upper bound
return -val
def quantile_lower_bound_constraint(constraint_function,quantile,lower_bound,
uq_samples,design_samples):
uq_samples,design_samples = check_inputs(uq_samples,design_samples)
assert design_samples.shape[1]==1
vals = constraint_function(uq_samples,design_samples)
val = (lower_bound-ss.mstats.mquantiles(vals,prob=[quantile]))
# scipy minimize enforces constraints are non-negative so use negative here
# to enforce lower bound
return -val
from pyapprox.cvar_regression import smooth_conditional_value_at_risk, \
conditional_value_at_risk
def cvar_lower_bound_constraint(constraint_function,quantile,lower_bound,eps,
uq_samples,design_samples):
uq_samples,design_samples = check_inputs(uq_samples,design_samples)
assert design_samples.shape[1]==1
vals = constraint_function(uq_samples,design_samples)
# -vals because we want to minimize lower tail
val = (lower_bound-smooth_conditional_value_at_risk(0,eps,quantile,-vals))
#val = (lower_bound-conditional_value_at_risk(-vals,quantile))
return val
class MultipleConstraints(object):
def __init__(self,constraints):
self.constraints=constraints
def __call__(self,design_sample,constraint_idx=None):
if constraint_idx is None:
constraint_idx = np.arange(len(self.constraints))
nconstraints = len(constraint_idx)
vals = np.empty(nconstraints)
for ii,jj in enumerate(constraint_idx):
vals[ii]=self.constraints[jj](design_sample)
return vals
class MCStatisticConstraint(object):
def __init__(self,constraint_function,generate_samples,info):
self.constraint_function = constraint_function
self.generate_samples=generate_samples
self.info=info
def __call__(self,design_samples):
uq_samples = self.generate_samples()
constraint_type=self.info['type']
if constraint_type=='quantile':
quantile = self.info['quantile']
lower_bound = self.info['lower_bound']
return quantile_lower_bound_constraint(
self.constraint_function,quantile,lower_bound,
uq_samples,design_samples)
elif constraint_type=='cvar':
quantile = self.info['quantile']
lower_bound = self.info['lower_bound']
eps = self.info['smoothing_eps']
return cvar_lower_bound_constraint(
constraint_functions[ii], quantile, lower_bound, eps,
uq_samples, design_samples)
elif constraint_type=='var':
var_lower_bound = self.info['lower_bound']
return variance_lower_bound_constraint(
constraint_functions[ii],lower_bound,uq_samples,design_samples)
else:
raise Exception(
'constraint type (%s) not implemented'%constraint_type[ii])
class DeterministicConstraint(object):
def __init__(self,constraint_function,info):
self.constraint_function = constraint_function
self.info=info
def __call__(self,design_samples):
lower_bound = self.info['lower_bound']
uq_nominal_sample = self.info['uq_nominal_sample']
return deterministic_lower_bound_constraint(
self.constraint_function,lower_bound,uq_nominal_sample,
design_samples)
def setup_inequality_constraints(constraint_functions,constraints_info,
uq_samples):
constraints = []
for ii in range(len(constraint_functions)):
info = constraints_info[ii]
constraint_type = info['type']
if constraint_type=='quantile':
quantile = info['quantile']
quantile_lower_bound = info['quantile_lower_bound']
ineq_cons_fun = partial(
quantile_lower_bound_constraint, constraint_functions[ii],
quantile, quantile_lower_bound, uq_samples)
elif constraint_type=='cvar':
quantile = info['quantile']
quantile_lower_bound = info['cvar_lower_bound']
eps = info['smoothing_eps']
ineq_cons_fun = partial(
cvar_lower_bound_constraint, constraint_functions[ii],
quantile, quantile_lower_bound, eps, uq_samples)
elif constraint_type=='var':
var_lower_bound = info['var_lower_bound']
ineq_cons_fun = partial(
variance_lower_bound_constraint, constraint_functions[ii],
var_lower_bound, uq_samples)
elif constraint_type=='deterministic':
lower_bound = info['lower_bound']
ineq_cons_fun = partial(
deterministic_lower_bound_constraint, constraint_functions[ii],
lower_bound, uq_samples)
else:
raise Exception(
'constraint type (%s) not implemented'%constraint_type[ii])
ineq_cons = {'type': 'ineq', 'fun' : ineq_cons_fun}
constraints.append(ineq_cons)
return constraints
def run_design(objective, init_design_sample,
constraints, bounds, optim_options):
opt_history = [init_design_sample[:,0]]
def callback(xk):
opt_history.append(xk)
#print(objective(xk))
#print([constraints[ii]['fun'](xk) for ii in [0,1]])
# opt_method = 'SLSQP'
# res = minimize(
# objective, init_design_sample[:,0], method=opt_method, jac=None,
# constraints=constraints,
# options=optim_options,bounds=bounds,callback=callback)
from scipy.optimize import fmin_slsqp
res = fmin_slsqp(objective, init_design_sample[:,0], f_ieqcons=constraints,
bounds=bounds, callback=callback, full_output=True)#, **optim_options)
class result():
def __init__(self,x,fun):
self.x=np.atleast_1d(x)
self.fun=fun
res = result(res[0],res[1])
opt_history = (np.array(opt_history)).T
return res, opt_history
def plot_optimization_history(obj_function,constraints,uq_samples,opt_history,
plot_limits):
# fig,axs=plot_optimization_objective_and_constraints_2D(
# [constraints[ii]['fun'] for ii in range(len(constraints))],
# partial(obj_function,uq_samples[:,0]),plot_limits)
fig,axs=plot_optimization_objective_and_constraints_2D(
constraints,partial(obj_function,uq_samples[:,0]),plot_limits)
# objective can only be evaluated at one uq_sample thus use of
# uq_samples[:,0]
for ii in range(len(axs)):
axs[ii].plot(opt_history[0,:],opt_history[1,:],'ko')
for jj, txt in enumerate(range(opt_history.shape[1])):
axs[ii].annotate(
'%d'%txt,(opt_history[0,jj],opt_history[1,jj]))
return fig,axs
#def plot_optimization_objective_and_constraints_2D(
# constraint_functions,objective,plot_limits):
def plot_optimization_objective_and_constraints_2D(
constraints,objective,plot_limits):
from pyapprox.visualization import get_meshgrid_function_data
num_pts_1d = 100; num_contour_levels=30
fig,axs=plt.subplots(1,3,figsize=(3*8,6))
#for ii in range(len(constraint_functions)+1):
for ii in range(len(constraints.constraints)+1):
#if ii==len(constraint_functions):
if ii==len(constraints.constraints):
function=objective
else:
# def function(design_samples):
# vals = np.empty((design_samples.shape[1]))
# for jj in range(design_samples.shape[1]):
# vals[jj]=constraint_functions[ii](design_samples[:,jj])
# return vals
def function(design_samples):
vals = np.empty((design_samples.shape[1]))
for jj in range(design_samples.shape[1]):
vals[jj]=constraints(design_samples[:,jj],[ii])
return vals
X,Y,Z = get_meshgrid_function_data(
function, plot_limits, num_pts_1d)
norm = None
cset = axs[ii].contourf(
X, Y, Z, levels=np.linspace(Z.min(),Z.max(),num_contour_levels),
cmap=mpl.cm.coolwarm,
norm=norm)
#for kk in range(len(constraint_functions)):
for kk in range(len(constraints.constraints)):
if ii==kk:
ls = '-'
else:
ls = '--'
axs[kk].contour(X,Y,Z,levels=[0],colors='k',linestyles=ls)
plt.colorbar(cset,ax=axs[ii])
return fig,axs
def plot_constraint_pdfs(constraint_functions,uq_samples,design_sample,
fig_pdf=None,axs_pdf=None,label=None,color=None):
colors = ['b','gray']
nconstraints = len(constraint_functions)
if axs_pdf is None:
fig_pdf,axs_pdf = plt.subplots(1,nconstraints,figsize=(nconstraints*8,6))
for ii in range(nconstraints):
# evaluate constraint function at each of the uq samples
constraint_function_vals = constraint_functions[ii](
uq_samples,design_sample)
constraint_kde = KDE(constraint_function_vals)
yy = np.linspace(constraint_function_vals.min(),
constraint_function_vals.max(),101)
axs_pdf[ii].fill_between(yy,0,constraint_kde(yy),alpha=0.5,label=label,
color=color)
axs_pdf[ii].axvline(0,color='k')
#axs_pdf[ii].axvline(constraints[ii]['fun'](design_sample),color='r')
return fig_pdf,axs_pdf
def plot_constraint_cdfs(constraints,constraint_functions,uq_samples,
design_sample,quantile,fig_cdf,axs_cdf=None,label=None,
color=None):
nconstraints = len(constraint_functions)
if axs_cdf is None:
fig_cdf,axs_cdf = plt.subplots(
1,nconstraints,figsize=(nconstraints*8,6))
for ii in range(nconstraints):
constraint_function_vals = constraint_functions[ii](
uq_samples,design_sample)
cvar = (conditional_value_at_risk(-constraint_function_vals,0.9))
cvars = (smooth_conditional_value_at_risk(0,1e-3,0.9,-constraint_function_vals))
print ('cvar',cvar)
print ('cvars',cvars)
#constraint_val = constraints[ii]['fun'](design_sample)
constraint_val = constraints(design_sample,[ii])
constraint_function_vals.sort()
cdf_vals = np.linspace(0,1,constraint_function_vals.shape[0]+1)[1:]
axs_cdf[ii].plot(constraint_function_vals,cdf_vals,label=label,
color=color)
#I = np.where(constraint_function_vals<=constraint_val)[0]
I = np.where(constraint_function_vals<=0)[0]
axs_cdf[ii].fill_between(
constraint_function_vals[I],0,cdf_vals[I],alpha=0.5,color=color)
axs_cdf[ii].axvline(0,color='k')
J = np.where(constraint_function_vals<=0)[0]
#print (J.shape[0]/float(constraint_function_vals.shape[0]),'p failure',constraint_val,J.shape[0])
# Compute the constraint value. This combines constraint_function_vals
# into a scalar value
#axs_cdf[ii].axvline(constraint_val,color='r')
#axs_cdf[ii].plot(
# np.linspace(constraint_function_vals[0],constraint_val,101),
# quantile*np.ones(101),'-r')
#axs_cdf[ii].set_yticks(list(axs_cdf[ii].get_yticks()) + [quantile])
axs_cdf[ii].set_ylim(0,1.05)
axs_cdf[ii].set_xlim(
constraint_function_vals[0],constraint_function_vals[-1])
return fig_cdf, axs_cdf
def check_gradients(fun, jac, zz, plot=False, disp=True, rel=True):
"""
Compare a user specified jacobian with the jacobian computed with finite
difference with multiple step sizes.
Parameters
---------
fun : callable
A function with signature
``fun(z) -> np.ndarray``
where ``z`` is a 2D np.ndarray with shape (nvars, 1) and the
output is a 2D np.ndarray with shape (nqoi, 1)
jac : callable
The jacobian of ``fun`` with signature
``jac(z) -> np.ndarray``
where ``z`` is a 2D np.ndarray with shape (nvars, 1) and the
output is a 2D np.ndarray with shape (nqoi, nvars)
zz : np.ndarray (nvars, 1)
A sample of ``z`` at which to compute the gradient
plot : boolean
Plot the errors as a function of the finite difference step size
disp : boolean
True - print the errors
False - do not print
rel : boolean
True - compute the relative error in the directional derivative,
i.e. the absolute error divided by the directional derivative using
``jac``.
False - compute the absolute error in the directional derivative
Returns
-------
errors : np.ndarray (14, nqoi)
The errors in the directional derivative of ``fun`` at 14 different
values of finite difference tolerance for each quantity of interest
"""
assert zz.ndim == 2
assert zz.shape[1] == 1
if callable(jac):
function_val = fun(zz)
grad_val = jac(zz)
elif jac==True:
function_val, grad_val = fun(zz)
direction = np.random.normal(0, 1, (zz.shape[0], 1))
direction /= np.linalg.norm(direction)
directional_derivative = grad_val.squeeze().dot(direction).squeeze()
fd_eps = np.logspace(-13, 0, 14)[::-1]
errors = []
row_format = "{:<25} {:<25} {:<25}"
if disp:
if rel:
print(
row_format.format(
"Eps", "Rel. Errors (max)", "Rel. Errors (min)"))
else:
print(row_format.format("Eps", "Errors (max)", "Errors (min)"))
for ii in range(fd_eps.shape[0]):
zz_perturbed = zz.copy()+fd_eps[ii]*direction
perturbed_function_val = fun(zz_perturbed)
if jac==True:
perturbed_function_val = perturbed_function_val[0].squeeze()
fd_directional_derivative = (
perturbed_function_val-function_val).squeeze()/fd_eps[ii]
errors.append(np.absolute(
fd_directional_derivative.reshape(directional_derivative.shape)-
directional_derivative))
if rel:
errors[-1]/=np.absolute(directional_derivative)
if disp:
print(row_format.format(fd_eps[ii], errors[ii].max(),
errors[ii].min()))
#print(fd_directional_derivative, directional_derivative)
if plot:
plt.loglog(fd_eps, errors, 'o-')
plt.ylabel(r'$\lvert\nabla_\epsilon f\cdot p-\nabla f\cdot p\rvert$')
plt.xlabel(r'$\epsilon$')
plt.show()
return np.asarray(errors)
def check_hessian(jac,hessian_matvec,zz,plot=False,disp=True):
"""
Compare a user specified Hessian matrix-vector product with the
Hessian matrix vector produced computed with finite
difference with multiple step sizes using a user specified jacobian.
Parameters
---------
jac : callable
The jacobian with signature
``jac(z) -> np.ndarray``
where ``z`` is a 2D np.ndarray with shape (nvars,1) and the
output is a 2D np.ndarray with shape (nqoi,nvars)
hessian_matvec : callable
A function implementing the hessian matrix-vector product with signature
``hessian_matvec(z,p) -> np.ndarray``
where ``z`` is a 2D np.ndarray with shape (nvars,1), ``p`` is
an arbitrary vector with shape (nvars,1) and the
output is a 2D np.ndarray with shape (nqoi,nvars)
zz : np.ndarray (nvars,1)
A sample of ``z`` at which to compute the gradient
plot : boolean
Plot the errors as a function of the finite difference step size
disp : boolean
True - print the errors
False - do not print
rel : boolean
True - compute the relative error in the directional derivative,
i.e. the absolute error divided by the directional derivative using
``jac``.
False - compute the absolute error in the directional derivative
Returns
-------
errors : np.ndarray (14,nqoi)
The errors in the directional derivative of ``jac`` at 14 different
values of finite difference tolerance for each quantity of interest
"""
assert zz.ndim==2
assert zz.shape[1]==1
grad = jac(zz)
direction = np.random.normal(0,1,(zz.shape[0],1))
direction /= np.linalg.norm(direction)
directional_derivative = hessian_matvec(zz,direction)
fd_eps = np.logspace(-13,0,14)[::-1]
errors = []
row_format = "{:<25} {:<25} {:<25}"
if disp:
print(row_format.format("Eps","Errors (max)","Errors (min)"))
for ii in range(fd_eps.shape[0]):
zz_perturbed = zz.copy()+fd_eps[ii]*direction
perturbed_grad = jac(zz_perturbed)
fd_directional_derivative = (perturbed_grad-grad)/fd_eps[ii]
errors.append(np.absolute(
fd_directional_derivative-directional_derivative))
if disp:
print(row_format.format(fd_eps[ii],errors[ii].max(),
errors[ii].min()))
#print(fd_directional_derivative,directional_derivative)
if plot:
plt.loglog(fd_eps,errors,'o-')
plt.ylabel(r'$\lvert\nabla^2_\epsilon \cdot p f-\nabla^2 f\cdot p\rvert$')
plt.xlabel(r'$\epsilon$')
plt.show()
return np.asarray(errors)
def expectation_fun(values,weights):
assert values.shape[0]%weights.shape[0]==0
nqoi = values.shape[0]//weights.shape[0]
nsamples = values.shape[0]//nqoi
assert nqoi==1
fun_vals = (values.T.dot(weights)).T
return fun_vals
def expectation_jac(jac_values,weights):
assert jac_values.shape[0]%weights.shape[0]==0
nqoi = jac_values.shape[0]//weights.shape[0]
nsamples = jac_values.shape[0]//nqoi
num_vars = jac_values.shape[1]
assert nqoi==1
jac = (jac_values.T.dot(weights)).T
return jac
from pyapprox.cvar_regression import smooth_max_function_first_derivative,\
smooth_max_function_second_derivative
def smooth_prob_failure_fun(smoother_type,eps,tol,values,weights):
assert values.shape[0]%weights.shape[0]==0
nqoi = values.shape[0]//weights.shape[0]
assert nqoi==1
nsamples = values.shape[0]//nqoi
heaviside_vals = smooth_max_function_first_derivative(
smoother_type,eps,values-tol)
fun_vals = (heaviside_vals.dot(weights)).T
#print(fun_vals.shape)
return fun_vals
def smooth_prob_failure_jac(smoother_type,eps,tol,jac_values,weights):
assert jac_values.shape[0]%weights.shape[0]==0
nqoi = jac_values.shape[0]//weights.shape[0]
assert nqoi==1
nsamples = jac_values.shape[0]//nqoi
num_vars = jac_values.shape[1]
grad_heaviside_vals = smooth_max_function_second_derivative(
smoother_type,eps,jac_values-tol)
jac = (grad_heaviside_vals*jac_values).T.dot(weights)[np.newaxis,:]
print(jac_values.max(axis=0),'m',eps)
return jac
def generate_monte_carlo_quadrature_data(
generate_random_samples,num_vars,design_var_indices,fun,seed=None):
if seed is not None:
np.random.seed(seed)
samples = generate_random_samples()
weights = np.ones(samples.shape[1])/samples.shape[1]
values = fun(samples)
return samples,weights,values
from pyapprox.models.wrappers import ActiveSetVariableModel
class StatisticalConstraint(object):
"""
Notes
-----
TODO ensure the following.
This class unifies the jac=True and callable(jac)=True interfaces.
The interface is used for passing to optimizers that need the fun and jac functions
to be separate. This is often good practice as it avoids computing
jac when only fun is required.
If jac=True the jacobian is stored and returned when self.jac is called
"""
def __init__(self,fun,jac,stats_fun,stats_jac,num_vars,
design_var_indices,generate_sample_data,bound=None,
upper_bound=True,isobjective=False):
self.fun,self.jac,self.stats_fun=fun,jac,stats_fun
self.stats_jac=stats_jac
self.num_vars=num_vars
self.design_var_indices=design_var_indices
self.random_var_indices = np.delete(
np.arange(self.num_vars),self.design_var_indices)
self.generate_sample_data=generate_sample_data
self.bound=bound
self.upper_bound=upper_bound
self.isobjective=isobjective
self.design_sample = None
self.jac_values = None
self.samples = None
if self.stats_jac is not None and self.jac is None:
msg = 'stats_jac requries jac to be defined'
raise Exception(msg)
if self.jac is not None and self.stats_jac is None:
msg = 'jac will be ignored because stats_jac was not defined'
raise Exception(msg)
def generate_shared_data(self,design_sample):
self.design_sample=design_sample.copy()
fun = ActiveSetVariableModel(self.fun,self.num_vars,design_sample,
self.random_var_indices)
data = self.generate_sample_data(fun)
self.samples,self.weights,self.fun_values = data[:3]
assert self.samples.shape[0]==\
self.num_vars-self.design_var_indices.shape[0]
assert self.samples.shape[1]==self.weights.shape[0]
#assert self.samples.shape[1]==self.fun_values.shape[0]
if not callable(self.jac) and self.jac:
# consider whether to support self.jac=True. It seems appealing
# if using gradients from adjoint PDE simulation which requires
# data used to compute function values and thus better to do at the
# time the function values are obtained. Challenge is defining the
# correct output interface and only computing gradients if self.jac
# has been called and not if self.__call__ is called.
raise Exception ("Not yet implemented")
self.jac_values = data[3]
def __call__(self,design_sample):
if design_sample.ndim==1:
design_sample = design_sample[:,np.newaxis]
self.generate_shared_data(design_sample)
nsamples = self.weights.shape[0]
nqoi = self.fun_values.shape[1]
#print(self.fun_values)
values = np.empty((nqoi))
for ii in range(nqoi):
values[ii] = self.stats_fun(
self.fun_values[:,ii:ii+1],self.weights)
#print('b',np.where(self.fun_values[:,ii:ii+1]>0)[0].shape[0]/nsamples)
#print('c',values[ii])
#print(self.fun_values.min(),self.fun_values.max())
if self.bound is not None:
values = values-self.bound
if self.upper_bound:
values *= -1
if self.isobjective:
values= values[0]
return values
def jacobian(self,design_sample):
if design_sample.ndim==1:
design_sample = design_sample[:,np.newaxis]
if (np.array_equal(design_sample,self.design_sample) and
self.jac_values is not None):
jac_values = self.jac_values
else:
jac = ActiveSetVariableModel(
self.jac,self.num_vars,self.samples,self.design_var_indices)
jac_values = jac(design_sample)
nsamples = self.weights.shape[0]
nqoi = self.fun_values.shape[1]
nvars = jac_values.shape[1]
constraint_jac = np.empty((nqoi,nvars))
for ii in range(nqoi):
constraint_jac[ii] = self.stats_jac(
jac_values[ii*nsamples:(ii+1)*nsamples,:],self.weights)
if self.bound is not None and self.upper_bound:
constraint_jac *= -1
return constraint_jac.squeeze()
class PyapproxFunctionAsScipyMinimizeObjective(object):
def __init__(self,fun):
self.fun=fun
def __call__(self,scipy_sample):
assert scipy_sample.ndim==1
data = self.fun(scipy_sample[:,np.newaxis])
if not np.isscalar(data):
assert len(data)==2
val = data[0]
assert np.isscalar(val)
assert data[1].ndim==2 and data[1].shape[0]==1
jac = data[1][0,:]
return val,jac
return data
class ScipyMinimizeObjectiveAsPyapproxFunction(object):
def __init__(self,fun):
self.fun=fun
def __call__(self,pyapprox_sample):
assert pyapprox_sample.ndim==2 and pyapprox_sample.shape[1]==1
data = self.fun(pyapprox_sample[:,0])
if not np.isscalar(data):
assert len(data)==2
val = data[0]
assert np.isscalar(val)
assert data[1].ndim==2 and data[1].shape[0]==1
jac = data[1][0,:]
return val,jac
return data
class ScipyMinimizeObjectiveJacAsPyapproxJac(object):
def __init__(self,jac):
self.jac=jac
def __call__(self,pyapprox_sample):
assert pyapprox_sample.ndim==2 and pyapprox_sample.shape[1]==1
grad = self.jac(pyapprox_sample[:,0])
return grad[np.newaxis,:]
| StarcoderdataPython |
360574 | <reponame>edchelstephens/django-rest-utils
from exceptions.exceptions import * | StarcoderdataPython |
5151692 | from bs4 import BeautifulSoup
import pandas as pd
import re
import requests
from progress.bar import IncrementalBar
from pathlib import Path
from .cache import HttpCache, DataCache
from urllib.parse import urljoin, urlparse, urlunparse
import json
def to_bar_message(s):
return s[0:30].ljust(30)
def parse_country(cities_urls, url, name, data):
soup = BeautifulSoup(data, 'html.parser')
for link in soup.find_all('a', href=True):
href = urljoin(url, link['href'])
if not urlparse(href).path.startswith('/weather/' + name + '/'):
continue
if href not in cities_urls:
cities_urls.append(href)
def fetch_countries(cache, session, urls):
cities_urls = list()
bar = IncrementalBar('fetching', max=len(urls))
for url in urls:
name = urlparse(url).path.split('/')[-1]
bar.message = to_bar_message(name)
# next forces the bar to refresh (it's needed for the message)
bar.next(0)
data = cache.get(session, url, params=None)
parse_country(cities_urls, url, name, data)
bar.next()
bar.finish()
return cities_urls
def parse_temps(temps):
months_names = ["Jan", "Feb", "Mar", "Apr", "May",
"Jun", "Jul", "Aug", "Sep", "Oct",
"Nov", "Dec"]
temps_data = list()
for month in months_names:
data = next(x for x in temps if x['name'] == month)
data = list(map(lambda x: data.get(x, None),
['min', 'max', 'mean', 'prec']))
temps_data.append(data)
return temps_data
def parse_city(name, data):
datum = {}
m = re.search(r'var data=({.*?});', data)
if not m:
print("\ncouldn't find temps!")
return None
temps = json.loads(m[1])['months']
if len(temps) != 12:
print(f"\nunexpected temps size: {len(temps)}")
return None
datum['temps'] = parse_temps(temps)
m = re.search(r'TAD.lon=([\d\.-]+);TAD.lat=([\d\.-]+);', data)
if not m:
print("\ncouldn't find pos")
return None
datum['pos'] = [float(m[1]), float(m[2])]
soup = BeautifulSoup(data, 'html.parser')
m = re.search(' in (.*?)$', soup.find('title').text)
if not m:
print("\ntitle not found")
return None
name = list(map(lambda x: x.strip(), m[1].split(',')))
if len(name) < 2:
print(f"\nunknown name format: '{name}'")
return None
datum['name'] = name[0]
datum['country'] = name[-1]
return datum
def fetch_cities(cache, session, urls):
print(f'fetching cities'' data')
cities_data = list()
bar = IncrementalBar('fetching', max=len(urls))
for url in urls:
name = urlparse(url).path.split('/')[-1]
bar.message = to_bar_message(name)
# next forces the bar to refresh (it's needed for the message)
bar.next(0)
url += '/climate'
data = cache.get(session, url, params=None)
datum = parse_city(name, data)
if datum is not None:
cities_data.append(datum)
bar.next()
bar.finish()
return cities_data
def process_data(data):
countries = list()
for city in data['cities']:
country = city['country']
if country not in countries:
countries.append(country)
city['country'] = countries.index(country)
data['countries'] = countries
def fetch_all_temps(data_cache):
session = requests.Session()
cache = HttpCache('~/download.cache')
print(f'collecting countries list')
url = 'https://www.timeanddate.com/weather/?low=c'
data = cache.get(session, url)
soup = BeautifulSoup(data, 'html.parser')
links = list()
for link in soup.find_all('a'):
href = urlparse(urljoin(url, link['href']))
if not href.path.startswith('/weather'):
continue
parts = href.path.split('/')
if len(parts) != 4:
continue
# oops, bad link
if parts[2] == 'kazakstan':
parts[2] = 'kazakhstan'
href = urlunparse(href._replace(path='/'.join(parts[0:3])))
if href not in links:
links.append(href)
cities_urls = fetch_countries(cache, session, links)
data = fetch_cities(cache, session, cities_urls)
data = { 'cities': data }
process_data(data)
return data
def save_data(data, json_path):
print(f'storing data at {json_path}')
with open(json_path, 'w') as file:
file.write(json.dumps(data))
def clean_rows(datum):
return map(lambda row: clean_row(row), datum)
data_cache = DataCache('timendate.cache')
json_path = Path('temps/src/assets/temps.json')
if not json_path.exists():
data = fetch_all_temps(data_cache)
save_data(data, json_path)
else:
print(f'data already exists at "{json_path}"')
| StarcoderdataPython |
11291704 | <reponame>lexndru/buildok<filename>buildok/statements/duckduckgo.py
# Copyright 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from buildok.statements.web import ViewWeb
class DuckDuckGoSearch(ViewWeb):
r"""Open a DuckDuckGo search in default browser.
Args:
search (str): Search string to lookup.
Retuns:
str: Output as string.
Raises:
TypeError: If an invalid `search` is provided.
Accepted statements:
^lookup `(?P<search>.+)` online$
Sample (input):
- Lookup `buildok` online.
Expected:
Lookup results => https://duckduckgo.com/?q=buildok
"""
def run(self, search=None, *args, **kwargs):
url = r"https://duckduckgo.com/?q={}".format(search)
error = self.open_url(url)
if error is not None:
self.fail(error)
else:
self.success("Lookup results => %s" % url)
| StarcoderdataPython |
1913114 | import unittest
from mock import patch
from starter.starter_helper import NullRequiredDataException
from starter.starter_PMCDeposit import starter_PMCDeposit
from tests.activity.classes_mock import FakeLogger
from tests.classes_mock import FakeLayer1
import tests.settings_mock as settings_mock
class TestStarterPMCDeposit(unittest.TestCase):
def setUp(self):
self.fake_logger = FakeLogger()
self.starter = starter_PMCDeposit(settings_mock, self.fake_logger)
def test_start_no_document(self):
self.assertRaises(
NullRequiredDataException,
self.starter.start,
settings=settings_mock,
document=None,
)
@patch("boto.swf.layer1.Layer1")
def test_start(self, fake_conn):
document = "document"
fake_conn.return_value = FakeLayer1()
self.assertIsNone(self.starter.start(settings_mock, document))
| StarcoderdataPython |
3505269 | <gh_stars>1-10
from multiprocessing.pool import Pool
def cal_list(num):
the_list = [1]
st = 1
interval = 2
for i in range(num):
for j in range(4):
st += interval
the_list.append(st)
interval += 2
return the_list
def cal_prime(num):
for i in range(2, int(num**0.5+1)):
if num % i == 0:
return None
return num
def is_prime(num):
if num == 1:
return False
for i in range(2, int(num**0.5+1)):
if num % i == 0:
return False
return True
def try_num(k):
test_list = cal_list(k)
num_range = test_list
p = Pool(processes=16)
check_list = p.map(is_prime, num_range)
p.close()
p.join()
sum = 0
for i in check_list:
if i == True:
sum += 1
print(str(k)+':'+str(sum/len(test_list)))
return sum/len(test_list)
for i in range(13100, 14000):
if try_num(i) < 0.1:
break
| StarcoderdataPython |
11383014 | # Copyright (c) 2017, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import json
import os
import unittest
import uuid
from pytz import timezone
from cerebralcortex.CerebralCortex import CerebralCortex
from cerebralcortex.kernel.DataStoreEngine.Metadata.Metadata import Metadata
from cerebralcortex.kernel.DataStoreEngine.dataset import DataSet
from cerebralcortex.kernel.datatypes.datapoint import DataPoint
from cerebralcortex.kernel.datatypes.datastream import DataStream
class TestDataStoreEngine(unittest.TestCase):
testConfigFile = os.path.join(os.path.dirname(__file__), 'res/test_configuration.yml')
CC = CerebralCortex(testConfigFile, master="local[*]", name="Cerebral Cortex DataStoreEngine Tests",
time_zone="US/Central", load_spark=True)
configuration = CC.configuration
meta_obj = Metadata(CC)
def test_01_setup_data(self):
data_descriptor = {}
execution_context = json.loads(
'{"execution_context": {"algorithm": {"method": "cerebralcortex.data_processor.data_diagnostic.BatteryDataMarker"}}}')
annotations = {}
stream_type = "datastream"
start_time = datetime.datetime(2017, 4, 24, 0, 0, 1)
end_time = datetime.datetime(2017, 4, 24, 0, 0, 2)
result = Metadata(self.CC).is_id_created("06634264-56bc-4c92-abd7-377dbbad79dd", "data-store-test",
execution_context)
if result["status"] == "new":
stream_identifier = "6db98dfb-d6e8-4b27-8d55-95b20fa0f754"
else:
stream_identifier = result["id"]
self.assertEqual(stream_identifier, "6db98dfb-d6e8-4b27-8d55-95b20fa0f754")
Metadata(self.CC).store_stream_info(stream_identifier,
"06634264-56bc-4c92-abd7-377dbbad79dd", "data-store-test",
data_descriptor, execution_context,
annotations,
stream_type, start_time, end_time, result["status"])
def test_02_get_stream_info(self):
stream_info = Metadata(self.CC).get_stream_info("6db98dfb-d6e8-4b27-8d55-95b20fa0f754")
self.assertEqual(stream_info[0]["identifier"], "6db98dfb-d6e8-4b27-8d55-95b20fa0f754")
self.assertEqual(stream_info[0]["owner"], "06634264-56bc-4c92-abd7-377dbbad79dd")
self.assertEqual(stream_info[0]["name"], "data-store-test")
self.assertEqual(stream_info[0]["data_descriptor"], "{}")
self.assertEqual(stream_info[0]["execution_context"],
'{"execution_context": {"algorithm": {"method": "cerebralcortex.data_processor.data_diagnostic.BatteryDataMarker"}}}')
self.assertEqual(stream_info[0]["annotations"], "{}")
self.assertEqual(stream_info[0]["type"], "datastream")
def test_03_append_annotations(self):
self.assertRaises(Exception, Metadata(self.CC).append_annotations, "6db98dfb-d6e8-4b27-8d55-95b20fa0f754",
"06634264-56bc-4c92-abd7-377dbbad79dd",
"data-store-test", {}, {}, {}, "datastream1")
self.assertRaises(Exception, Metadata(self.CC).append_annotations, "6db98dfb-d6e8-4b27-8d55-95b20fa0f754",
"06634264-56bc-4c92-abd7-377dbbad79dd",
"data-store-test", {}, {"some": "none"}, {}, "datastream1")
self.assertRaises(Exception, Metadata(self.CC).append_annotations, "6db98dfb-d6e8-4b27-8d55-95b20fa0f754",
"06634264-56bc-4c92-abd7-377dbbad79dd",
"data-store-test", {"a": "b"}, {}, {}, "datastream1")
self.assertRaises(Exception, Metadata(self.CC).append_annotations, "6db98dfb-d6e8-4b27-8d55-95b20fa0f754",
"06634264-56bc-4c92-abd7-377dbbad79dd",
"data-diagnostic_diff", {}, {}, {}, "datastream1")
annotations_unchanged = Metadata(self.CC).append_annotations("6db98dfb-d6e8-4b27-8d55-95b20fa0f754",
"06634264-56bc-4c92-abd7-377dbbad79dd",
"data-store-test", {}, json.loads(
'{"execution_context": {"algorithm": {"method": "cerebralcortex.data_processor.data_diagnostic.BatteryDataMarker"}}}'),
{}, "datastream")
self.assertEqual(annotations_unchanged, "unchanged")
def test_04_get_stream_ids_by_name(self):
start_time = datetime.datetime(2017, 4, 24, 0, 0, 1)
end_time = datetime.datetime(2017, 4, 24, 0, 0, 2)
by_name = Metadata(self.CC).get_stream_ids_by_name("data-store-test")
self.assertIsInstance(by_name, list)
self.assertEqual(by_name[0], "6db98dfb-d6e8-4b27-8d55-95b20fa0f754")
by_name_id = Metadata(self.CC).get_stream_ids_by_name("data-store-test",
"06634264-56bc-4c92-abd7-377dbbad79dd")
self.assertIsInstance(by_name_id, list)
self.assertEqual(by_name_id[0], "6db98dfb-d6e8-4b27-8d55-95b20fa0f754")
by_name_id_start_time = Metadata(self.CC).get_stream_ids_by_name("data-store-test",
"06634264-56bc-4c92-abd7-377dbbad79dd",
start_time)
self.assertIsInstance(by_name_id_start_time, list)
self.assertEqual(by_name_id_start_time[0], "6db98dfb-d6e8-4b27-8d55-95b20fa0f754")
by_name_id_start_time_end_time = Metadata(self.CC).get_stream_ids_by_name("data-store-test",
"06634264-56bc-4c92-abd7-377dbbad79dd",
start_time, end_time)
self.assertIsInstance(by_name_id_start_time_end_time, list)
self.assertEqual(by_name_id_start_time_end_time[0], "6db98dfb-d6e8-4b27-8d55-95b20fa0f754")
def test_05_get_stream_ids_of_owner(self):
start_time = datetime.datetime(2017, 4, 24, 0, 0, 1)
end_time = datetime.datetime(2017, 4, 24, 0, 0, 2)
by_id = Metadata(self.CC).get_stream_ids_of_owner("06634264-56bc-4c92-abd7-377dbbad79dd")
self.assertIsInstance(by_id, list)
self.assertEqual(by_id[0], "6db98dfb-d6e8-4b27-8d55-95b20fa0f754")
by_name_id = Metadata(self.CC).get_stream_ids_of_owner("06634264-56bc-4c92-abd7-377dbbad79dd",
"data-store-test")
self.assertIsInstance(by_name_id, list)
self.assertEqual(by_name_id[0], "6db98dfb-d6e8-4b27-8d55-95b20fa0f754")
by_name_id_start_time = Metadata(self.CC).get_stream_ids_of_owner("06634264-56bc-4c92-abd7-377dbbad79dd",
"data-store-test", start_time)
self.assertIsInstance(by_name_id_start_time, list)
self.assertEqual(by_name_id_start_time[0], "6db98dfb-d6e8-4b27-8d55-95b20fa0f754")
by_name_id_start_time_end_time = Metadata(self.CC).get_stream_ids_of_owner(
"06634264-56bc-4c92-abd7-377dbbad79dd", "data-store-test", start_time, end_time)
self.assertIsInstance(by_name_id_start_time_end_time, list)
self.assertEqual(by_name_id_start_time_end_time[0], "6db98dfb-d6e8-4b27-8d55-95b20fa0f754")
def test_06_store_stream(self):
identifier = "6db98dfb-d6e8-4b27-8d55-95b20fa0f754"
owner = "06634264-56bc-4c92-abd7-377dbbad79dd"
name = "data-store-test"
data_descriptor = {}
execution_context = json.loads(
'{"execution_context": {"algorithm": {"method": "cerebralcortex.data_processor.data_diagnostic.BatteryDataMarker"}}}')
annotations = {}
datapoints = []
stream_type = "datastream"
start_time = datetime.datetime(2017, 4, 24, 0, 0, 1)
end_time = datetime.datetime(2017, 4, 24, 0, 0, 2)
localtz = timezone('US/Central')
start_time = localtz.localize(start_time)
end_time = localtz.localize(end_time)
sample = {'Foo3': 123}
dp1 = DataPoint(start_time=start_time, end_time=end_time, sample=sample)
datapoints.append(dp1)
ds = DataStream(identifier, owner, name, data_descriptor, execution_context,
annotations, stream_type, start_time, end_time, datapoints)
self.CC.save_datastream(ds)
stream = self.CC.get_datastream(identifier, data_type=DataSet.COMPLETE)
self.assertEqual(stream._identifier, identifier)
self.assertEqual(stream._owner, owner)
self.assertEqual(stream._name, name)
self.assertEqual(stream._data_descriptor, data_descriptor)
self.assertEqual(stream._execution_context, execution_context)
self.assertEqual(stream._annotations, annotations)
self.assertEqual(stream._datastream_type, stream_type)
self.assertEqual(stream.data[0].start_time, start_time)
self.assertEqual(stream.data[0].end_time, end_time)
self.assertEqual(stream.data[0].sample, sample)
def test_07_stream_filter(self):
identifier_anno = "6db98dfb-d6e8-4b27-8d55-95b20fa0f750"
identifier_data = "6db98dfb-d6e8-4b27-8d55-95b20fa0f751"
owner_id = "06634264-56bc-4c92-abd7-377dbbad79dd"
name_anno = "data-store-test-annotation"
name_data = "data-store-test-data"
data_descriptor = {}
execution_context_anno = json.loads(
'{"execution_context": {"algorithm": {"method": "test.data_store.annotation.filter"}}}')
execution_context_data = json.loads(
'{"execution_context": {"algorithm": {"method": "test.data_store.data.filter"}}}')
annotations_data = json.loads('[{"name": "test-case","identifier": "6db98dfb-d6e8-4b27-8d55-95b20fa0f750"}]')
annotations_anno = {}
datapoints_anno = []
datapoints_data = []
result_data = Metadata(self.CC).is_id_created(owner_id, name_data, execution_context_data)
if result_data["status"] != "new":
identifier_data = result_data["id"]
Metadata(self.CC).store_stream_info(identifier_anno,
owner_id, name_anno,
data_descriptor, execution_context_anno,
annotations_anno,
"annotations", datetime.datetime(2017, 4, 24, 0, 0, 1),
datetime.datetime(2017, 4, 24, 0, 0, 5), result_data["status"])
result_anno = Metadata(self.CC).is_id_created(owner_id, name_data, execution_context_data)
if result_anno["status"] != "new":
identifier_anno = result_anno["id"]
Metadata(self.CC).store_stream_info(identifier_data,
owner_id, name_data,
data_descriptor, execution_context_data,
annotations_data,
"datastream", datetime.datetime(2017, 4, 24, 0, 0, 1),
datetime.datetime(2017, 4, 24, 0, 0, 5), result_anno["status"])
for i in range(0, 5):
if (i % 2 == 0):
sample_anno = 'good'
else:
sample_anno = 'bad'
sample_data = i, i + 2, i + 3
start_time_anno = datetime.datetime(2017, 4, 24, 0, 0, i)
end_time_anno = datetime.datetime(2017, 4, 24, 0, 0, (5 + i))
start_time_data = datetime.datetime(2017, 4, 24, 0, 0, i)
end_time_data = datetime.datetime(2017, 4, 24, 0, 0, (3 + i))
localtz = timezone('US/Central')
start_time_anno = localtz.localize(start_time_anno)
end_time_anno = localtz.localize(end_time_anno)
start_time_data = localtz.localize(start_time_data)
end_time_data = localtz.localize(end_time_data)
datapoints_anno.append(DataPoint(start_time=start_time_anno, end_time=end_time_anno, sample=sample_anno))
datapoints_data.append(DataPoint(start_time=start_time_data, end_time=end_time_data, sample=sample_data))
ds_anno = DataStream(uuid.UUID(identifier_anno), owner_id, name_anno, data_descriptor, execution_context_anno,
annotations_data, "annotations", start_time_anno, end_time_anno, datapoints_anno)
ds_data = DataStream(uuid.UUID(identifier_data), owner_id, name_data, data_descriptor, execution_context_data,
annotations_anno, "datastream", start_time_anno, end_time_anno, datapoints_data)
self.CC.save_datastream(ds_anno)
self.CC.save_datastream(ds_data)
filted_stream = self.CC.filter_stream(identifier_data, "test-case", "good")
self.assertEqual(len(filted_stream), 5)
for i in range(0, 5):
sample_data = [i, i + 2, i + 3]
start_time_data = datetime.datetime(2017, 4, 24, 0, 0, i)
end_time_data = datetime.datetime(2017, 4, 24, 0, 0, (3 + i))
start_time_data = localtz.localize(start_time_data)
end_time_data = localtz.localize(end_time_data)
self.assertEqual(filted_stream[i].start_time, start_time_data)
self.assertEqual(filted_stream[i].end_time, end_time_data)
self.assertEqual(filted_stream[i].sample, sample_data)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
12843050 | <gh_stars>1-10
# ••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
# Copyright (c) 2018, <NAME>. This software is licensed under the BSD
# 3-Clause License. Please see the LICENSE file in the project root directory.
# ••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
import sys
import requests
from django.utils.translation import gettext_lazy as _
from uwsgi_tasks import RetryTaskException, task
from generic.utils import TokenAuth, print_error, print_message, print_warning, retry_get
@task(retry_count=5, retry_timeout=300)
def remove_from_trillian(pk):
from measurements.models import InstanceRun
try:
# Try to find the InstanceRun multiple times, in case of a race condition
run = retry_get(InstanceRun.objects.exclude(analysed=None), pk=pk)
if not run.analysed:
print_warning(_("InstanceRun {pk} has not yet been analysed").format(pk=pk))
return
if not run.trillian_url:
# Already cleaned up
return
print_message(_("Deleting InstanceRun {run.pk} ({run.url}) from {run.trillian.name}").format(run=run))
response = requests.request(
method='DELETE',
url=run.trillian_url,
auth=TokenAuth(run.trillian.token),
timeout=(5, 15),
)
print(response)
if response.status_code not in [204, 404]:
# 204 = deleted, 404 = doesn't exist anymore
print_error(
_("{run.trillian.name} didn't accept our request ({response.status_code}), retrying later").format(
run=run,
response=response
)
)
raise RetryTaskException
run.trillian_url = ''
run.save()
print_message(_("Trillian {run.trillian.name} deleted completed InstanceRun {run.pk}").format(run=run))
except RetryTaskException:
raise
except InstanceRun.DoesNotExist:
print_warning(_("InstanceRun {pk} does not exist anymore").format(pk=pk))
return
except Exception as ex:
print_error(_('{name} on line {line}: {msg}').format(
name=type(ex).__name__,
line=sys.exc_info()[-1].tb_lineno,
msg=ex
))
raise RetryTaskException
| StarcoderdataPython |
4987052 | <reponame>aigarius/photoriver2
"""Remotes implementation - state of a local folder"""
import logging
import os
import shutil
import requests
from photoriver2.remote_base import BaseRemote
IMAGE_EXTENSIONS = ("JPEG", "JPG", "HEIC", "CR2", "TIFF", "TIF")
logger = logging.getLogger(__name__)
def deconflict(path):
if not os.path.exists(path):
return path
if "." in os.path.basename(path):
base, ext = path.rsplit(".", 1)
else:
base = path
ext = ""
if base[-3] != "_" or not base[-2:].isdigit():
return deconflict(base + "_01." + ext)
return deconflict("{}_{:02}.{}".format(base[:-3], int(base[-2:]) + 1, ext))
class LocalRemote(BaseRemote):
"""Remote representing a local folder with photos"""
folder = None
def __init__(self, folder, *args, **kwargs):
self.folder = folder
super().__init__(*args, **kwargs)
def get_photos(self):
photos = []
for root, _, files in os.walk(self.folder):
for afile in files:
name = os.path.relpath(os.path.join(root, afile), self.folder)
if "." in name and name.rsplit(".", 1)[1].upper() in IMAGE_EXTENSIONS:
if not os.path.islink(os.path.join(root, afile)):
# pylint: disable=cell-var-from-loop
photos.append({"name": name, "data": lambda n=os.path.join(root, afile): open(n, "rb")})
return sorted(photos, key=lambda x: x["name"])
def get_albums(self):
albums = []
if os.path.exists(os.path.join(self.folder, "albums")):
dirs = os.scandir(os.path.join(self.folder, "albums"))
for adir in dirs:
if not adir.is_dir():
continue
photos = os.listdir(adir.path)
# Resolve symlinks in paths of photos in albums
photos = [os.path.relpath(os.path.realpath(os.path.join(adir.path, x)), self.folder) for x in photos]
albums.append(
{
"name": adir.name,
"photos": sorted(photos),
}
)
return sorted(albums, key=lambda x: x["name"])
def get_fixes(self):
fixes = []
# Files in albums/ should be symlinks
for root, _, files in os.walk(os.path.join(self.folder, "albums")):
for afile in files:
if "." in afile and afile.rsplit(".", 1)[1].upper() in IMAGE_EXTENSIONS:
full_path = os.path.join(root, afile)
if not os.path.islink(full_path):
fixes.append(
{
"action": "symlink",
"name": os.path.relpath(full_path, self.folder),
"to": os.path.basename(full_path),
}
)
return fixes
def _abs(self, path):
return os.path.join(self.folder, path)
def do_fixes(self, fixes):
for afix in fixes:
if afix["action"] == "symlink":
# Move the file over to new location (making parent folders as needed)
os.makedirs(self._abs(os.path.dirname(afix["to"])))
os.rename(self._abs(afix["name"]), self._abs(afix["to"]))
# Create a relative symlink in the old place pointing to the new location
os.symlink(
os.path.relpath(self._abs(afix["to"]), os.path.dirname(self._abs(afix["name"]))),
self._abs(afix["name"]),
)
# TODO figure out solution to match 2008/8/3/IMG1232.JPG to 2008/08/03/IMG1232.JPG
def _do_photo_updates(self, updates):
for update in updates:
if update["action"] == "new":
if not os.path.exists(self._abs(update["name"])):
logger.info("Remote %s: adding photo %s", self.name, update["name"])
os.makedirs(self._abs(os.path.dirname(update["name"])), exist_ok=True)
try:
with open(self._abs(update["name"]), "wb") as outfile:
infile = update["data"]()
outfile.write(infile.read())
infile.close()
except (requests.exceptions.HTTPError, OSError, IOError):
os.remove(self._abs(update["name"]))
raise
elif update["action"] == "del":
if os.path.exists(self._abs(update["name"])):
logger.info("Remote %s: deleting photo %s", self.name, update["name"])
os.remove(self._abs(update["name"]))
elif update["action"] == "mv":
if os.path.exists(self._abs(update["name"])):
if not os.path.exists(self._abs(update["new_name"])):
logger.info("Remote %s: moving photo %s to %s", self.name, update["name"], update["new_name"])
os.makedirs(self._abs(os.path.dirname(update["new_name"])), exist_ok=True)
os.rename(self._abs(update["name"]), self._abs(update["new_name"]))
def _do_album_updates(self, updates):
for update in updates:
if update["action"] == "new_album":
album_path = self._abs(os.path.join("albums", update["name"]))
if not os.path.exists(album_path):
logger.info("Remote %s: creating album %s", self.name, update["name"])
os.makedirs(album_path)
for aphoto in update["photos"]:
os.symlink(
os.path.relpath(self._abs(aphoto), album_path),
os.path.join(album_path, os.path.basename(aphoto)),
)
elif update["action"] == "del_album":
album_path = self._abs(os.path.join("albums", update["name"]))
logger.info("Remote %s: deleting album %s", self.name, update["name"])
shutil.rmtree(album_path, ignore_errors=True)
elif update["action"] == "new_album_photo":
album_path = self._abs(os.path.join("albums", update["album_name"]))
found = False
for image in os.listdir(album_path):
if os.path.realpath(os.path.join(album_path, image)) == self._abs(update["name"]):
found = True
if not found:
logger.info(
"Remote %s: adding photos %s to album %s", self.name, update["name"], update["album_name"]
)
link_path = os.path.join(album_path, os.path.basename(update["name"]))
link_path = deconflict(link_path)
os.symlink(
os.path.relpath(self._abs(update["name"]), album_path),
link_path,
)
elif update["action"] == "del_album_photo":
album_path = self._abs(os.path.join("albums", update["album_name"]))
for image in os.listdir(album_path):
if os.path.realpath(os.path.join(album_path, image)) == self._abs(update["name"]):
logger.info(
"Remote %s: removing photos %s to album %s",
self.name,
update["name"],
update["album_name"],
)
os.remove(os.path.join(album_path, image))
| StarcoderdataPython |
3282130 | <filename>UnityEngine/RectTransform/__init__.py<gh_stars>0
from typing import overload
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class RectTransform:
def __new__(cls, arg1=None):
'''
:returns: RectTransform
:rtype: UnityEngine.RectTransform
'''
pass
@staticmethod
def op_Implicit(arg1):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Equality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def op_Inequality(arg1, arg2):
'''
:param arg1: Object
:type arg1: UnityEngine.Object
:param arg2: Object
:type arg2: UnityEngine.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def add_reapplyDrivenProperties(arg1):
'''
:param arg1: ReapplyDrivenProperties
:type arg1: UnityEngine.ReapplyDrivenProperties
'''
pass
@staticmethod
def remove_reapplyDrivenProperties(arg1):
'''
:param arg1: ReapplyDrivenProperties
:type arg1: UnityEngine.ReapplyDrivenProperties
'''
pass
@staticmethod
def get_rect():
'''
:returns: Rect
:rtype: UnityEngine.Rect
'''
pass
@staticmethod
def get_anchorMin():
'''
:returns: Vector2
:rtype: UnityEngine.Vector2
'''
pass
@staticmethod
def set_anchorMin(arg1):
'''
:param arg1: Vector2
:type arg1: UnityEngine.Vector2
'''
pass
@staticmethod
def get_anchorMax():
'''
:returns: Vector2
:rtype: UnityEngine.Vector2
'''
pass
@staticmethod
def set_anchorMax(arg1):
'''
:param arg1: Vector2
:type arg1: UnityEngine.Vector2
'''
pass
@staticmethod
def get_anchoredPosition():
'''
:returns: Vector2
:rtype: UnityEngine.Vector2
'''
pass
@staticmethod
def set_anchoredPosition(arg1):
'''
:param arg1: Vector2
:type arg1: UnityEngine.Vector2
'''
pass
@staticmethod
def get_sizeDelta():
'''
:returns: Vector2
:rtype: UnityEngine.Vector2
'''
pass
@staticmethod
def set_sizeDelta(arg1):
'''
:param arg1: Vector2
:type arg1: UnityEngine.Vector2
'''
pass
@staticmethod
def get_pivot():
'''
:returns: Vector2
:rtype: UnityEngine.Vector2
'''
pass
@staticmethod
def set_pivot(arg1):
'''
:param arg1: Vector2
:type arg1: UnityEngine.Vector2
'''
pass
@staticmethod
def get_anchoredPosition3D():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_anchoredPosition3D(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_offsetMin():
'''
:returns: Vector2
:rtype: UnityEngine.Vector2
'''
pass
@staticmethod
def set_offsetMin(arg1):
'''
:param arg1: Vector2
:type arg1: UnityEngine.Vector2
'''
pass
@staticmethod
def get_offsetMax():
'''
:returns: Vector2
:rtype: UnityEngine.Vector2
'''
pass
@staticmethod
def set_offsetMax(arg1):
'''
:param arg1: Vector2
:type arg1: UnityEngine.Vector2
'''
pass
@staticmethod
def ForceUpdateRectTransforms():
pass
@staticmethod
def GetLocalCorners(arg1):
'''
:param arg1: Vector3Array
:type arg1: UnityEngine.Vector3Array
'''
pass
@staticmethod
def GetWorldCorners(arg1):
'''
:param arg1: Vector3Array
:type arg1: UnityEngine.Vector3Array
'''
pass
@staticmethod
def SetInsetAndSizeFromParentEdge(arg1, arg2, arg3):
'''
:param arg1: Edge
:type arg1: UnityEngine.Edge
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Single
:type arg3: System.Single or float
'''
pass
@staticmethod
def SetSizeWithCurrentAnchors(arg1, arg2):
'''
:param arg1: Axis
:type arg1: UnityEngine.Axis
:param arg2: Single
:type arg2: System.Single or float
'''
pass
@staticmethod
def IsChildOf(arg1):
'''
:param arg1: Transform
:type arg1: UnityEngine.Transform
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_hasChanged():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_hasChanged(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def GetEnumerator():
'''
:returns: IEnumerator
:rtype: System.IEnumerator
'''
pass
@staticmethod
def GetChild(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
:returns: Transform
:rtype: UnityEngine.Transform
'''
pass
@staticmethod
def get_hierarchyCapacity():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def set_hierarchyCapacity(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
'''
pass
@staticmethod
def get_hierarchyCount():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def get_position():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_position(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_localPosition():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_localPosition(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_eulerAngles():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_eulerAngles(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_localEulerAngles():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_localEulerAngles(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_right():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_right(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_up():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_up(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_forward():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_forward(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_rotation():
'''
:returns: Quaternion
:rtype: UnityEngine.Quaternion
'''
pass
@staticmethod
def set_rotation(arg1):
'''
:param arg1: Quaternion
:type arg1: UnityEngine.Quaternion
'''
pass
@staticmethod
def get_localRotation():
'''
:returns: Quaternion
:rtype: UnityEngine.Quaternion
'''
pass
@staticmethod
def set_localRotation(arg1):
'''
:param arg1: Quaternion
:type arg1: UnityEngine.Quaternion
'''
pass
@staticmethod
def get_localScale():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def set_localScale(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def get_parent():
'''
:returns: Transform
:rtype: UnityEngine.Transform
'''
pass
@staticmethod
def set_parent(arg1):
'''
:param arg1: Transform
:type arg1: UnityEngine.Transform
'''
pass
@staticmethod
@overload
def SetParent(arg1):
'''
:param arg1: Transform
:type arg1: UnityEngine.Transform
'''
pass
@staticmethod
@overload
def SetParent(arg1, arg2):
'''
:param arg1: Transform
:type arg1: UnityEngine.Transform
:param arg2: Boolean
:type arg2: System.Boolean or bool
'''
pass
@staticmethod
def SetParent(arg1=None, arg2=None):
pass
@staticmethod
def get_worldToLocalMatrix():
'''
:returns: Matrix4x4
:rtype: UnityEngine.Matrix4x4
'''
pass
@staticmethod
def get_localToWorldMatrix():
'''
:returns: Matrix4x4
:rtype: UnityEngine.Matrix4x4
'''
pass
@staticmethod
def SetPositionAndRotation(arg1, arg2):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Quaternion
:type arg2: UnityEngine.Quaternion
'''
pass
@staticmethod
@overload
def Translate(arg1, arg2):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Space
:type arg2: UnityEngine.Space
'''
pass
@staticmethod
@overload
def Translate(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def Translate(arg1, arg2, arg3, arg4):
'''
:param arg1: Single
:type arg1: System.Single or float
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Single
:type arg3: System.Single or float
:param arg4: Space
:type arg4: UnityEngine.Space
'''
pass
@staticmethod
@overload
def Translate(arg1, arg2, arg3):
'''
:param arg1: Single
:type arg1: System.Single or float
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Single
:type arg3: System.Single or float
'''
pass
@staticmethod
@overload
def Translate(arg1, arg2):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Transform
:type arg2: UnityEngine.Transform
'''
pass
@staticmethod
@overload
def Translate(arg1, arg2, arg3, arg4):
'''
:param arg1: Single
:type arg1: System.Single or float
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Single
:type arg3: System.Single or float
:param arg4: Transform
:type arg4: UnityEngine.Transform
'''
pass
@staticmethod
def Translate(arg1=None, arg2=None, arg3=None, arg4=None):
pass
@staticmethod
@overload
def Rotate(arg1, arg2):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Space
:type arg2: UnityEngine.Space
'''
pass
@staticmethod
@overload
def Rotate(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def Rotate(arg1, arg2, arg3, arg4):
'''
:param arg1: Single
:type arg1: System.Single or float
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Single
:type arg3: System.Single or float
:param arg4: Space
:type arg4: UnityEngine.Space
'''
pass
@staticmethod
@overload
def Rotate(arg1, arg2, arg3):
'''
:param arg1: Single
:type arg1: System.Single or float
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Single
:type arg3: System.Single or float
'''
pass
@staticmethod
@overload
def Rotate(arg1, arg2, arg3):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Space
:type arg3: UnityEngine.Space
'''
pass
@staticmethod
@overload
def Rotate(arg1, arg2):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Single
:type arg2: System.Single or float
'''
pass
@staticmethod
def Rotate(arg1=None, arg2=None, arg3=None, arg4=None):
pass
@staticmethod
def RotateAround(arg1, arg2, arg3):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
:param arg3: Single
:type arg3: System.Single or float
'''
pass
@staticmethod
@overload
def LookAt(arg1, arg2):
'''
:param arg1: Transform
:type arg1: UnityEngine.Transform
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def LookAt(arg1):
'''
:param arg1: Transform
:type arg1: UnityEngine.Transform
'''
pass
@staticmethod
@overload
def LookAt(arg1, arg2):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:param arg2: Vector3
:type arg2: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def LookAt(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
'''
pass
@staticmethod
def LookAt(arg1=None, arg2=None):
pass
@staticmethod
@overload
def TransformDirection(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def TransformDirection(arg1, arg2, arg3):
'''
:param arg1: Single
:type arg1: System.Single or float
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Single
:type arg3: System.Single or float
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def TransformDirection(arg1=None, arg2=None, arg3=None):
pass
@staticmethod
@overload
def InverseTransformDirection(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def InverseTransformDirection(arg1, arg2, arg3):
'''
:param arg1: Single
:type arg1: System.Single or float
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Single
:type arg3: System.Single or float
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def InverseTransformDirection(arg1=None, arg2=None, arg3=None):
pass
@staticmethod
@overload
def TransformVector(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def TransformVector(arg1, arg2, arg3):
'''
:param arg1: Single
:type arg1: System.Single or float
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Single
:type arg3: System.Single or float
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def TransformVector(arg1=None, arg2=None, arg3=None):
pass
@staticmethod
@overload
def InverseTransformVector(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def InverseTransformVector(arg1, arg2, arg3):
'''
:param arg1: Single
:type arg1: System.Single or float
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Single
:type arg3: System.Single or float
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def InverseTransformVector(arg1=None, arg2=None, arg3=None):
pass
@staticmethod
@overload
def TransformPoint(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def TransformPoint(arg1, arg2, arg3):
'''
:param arg1: Single
:type arg1: System.Single or float
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Single
:type arg3: System.Single or float
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def TransformPoint(arg1=None, arg2=None, arg3=None):
pass
@staticmethod
@overload
def InverseTransformPoint(arg1):
'''
:param arg1: Vector3
:type arg1: UnityEngine.Vector3
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
@overload
def InverseTransformPoint(arg1, arg2, arg3):
'''
:param arg1: Single
:type arg1: System.Single or float
:param arg2: Single
:type arg2: System.Single or float
:param arg3: Single
:type arg3: System.Single or float
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def InverseTransformPoint(arg1=None, arg2=None, arg3=None):
pass
@staticmethod
def get_root():
'''
:returns: Transform
:rtype: UnityEngine.Transform
'''
pass
@staticmethod
def get_childCount():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def DetachChildren():
pass
@staticmethod
def SetAsFirstSibling():
pass
@staticmethod
def SetAsLastSibling():
pass
@staticmethod
def SetSiblingIndex(arg1):
'''
:param arg1: Int32
:type arg1: System.Int32 or int
'''
pass
@staticmethod
def GetSiblingIndex():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def Find(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Transform
:rtype: UnityEngine.Transform
'''
pass
@staticmethod
def get_lossyScale():
'''
:returns: Vector3
:rtype: UnityEngine.Vector3
'''
pass
@staticmethod
def get_transform():
'''
:returns: Transform
:rtype: UnityEngine.Transform
'''
pass
@staticmethod
def get_gameObject():
'''
:returns: GameObject
:rtype: UnityEngine.GameObject
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponent(arg1):
'''
:param arg1: String
:type arg1: System.String or str
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponent(arg1=None):
pass
@staticmethod
@overload
def GetComponentInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
@overload
def GetComponentInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
@overload
def GetComponentsInChildren(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInChildren(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponentInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: Component
:rtype: UnityEngine.Component
'''
pass
@staticmethod
def GetComponentInParent(arg1=None):
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Boolean
:type arg2: System.Boolean or bool
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponentsInParent(arg1, arg2):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
:param arg2: Undefined variable
:type arg2: ListT.ListT
'''
pass
@staticmethod
def GetComponentsInParent(arg1=None, arg2=None):
pass
@staticmethod
@overload
def GetComponents(arg1):
'''
:param arg1: Type
:type arg1: System.Type
:returns: ComponentArray
:rtype: UnityEngine.ComponentArray
'''
pass
@staticmethod
@overload
def GetComponents(arg1, arg2):
'''
:param arg1: Type
:type arg1: System.Type
:param arg2: Undefined variable
:type arg2: SystemCollectionsGenericList.SystemCollectionsGenericList
'''
pass
@staticmethod
@overload
def GetComponents(arg1):
'''
:param arg1: Undefined variable
:type arg1: ListT.ListT
'''
pass
@staticmethod
def GetComponents(arg1=None, arg2=None):
pass
@staticmethod
def GetInstanceID():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetHashCode():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def Equals(arg1):
'''
:param arg1: Object
:type arg1: System.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def get_name():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def set_name(arg1):
'''
:param arg1: String
:type arg1: System.String or str
'''
pass
@staticmethod
def ToString():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def GetType():
'''
:returns: Type
:rtype: System.Type
'''
pass
| StarcoderdataPython |
11245967 | <reponame>mantaspieza/IMDB-gross-profit-prediction-api
import pandas as pd
import pickle5 as pickle
# file = open("model/One_Hot_Encoder.pkl", "rb")
one_hot = pickle.load(open("model/One_Hot_Encoder.pkl", "rb"))
def process_input(input_data):
"""
Function which One Hot Encodes input from API.
:param input_data: sent to API by the user
:return: input data, encoded features
"""
try:
dataframe = pd.DataFrame.from_dict(input_data, orient="index").T
dataframe[["year", "rating", "metascore", "total_votes"]] = dataframe[
["year", "rating", "metascore", "total_votes"]
].astype(float)
encoded_features = one_hot.transform(dataframe)
return input_data, encoded_features
except:
print("here is a problem with processing input function")
| StarcoderdataPython |
96148 | <reponame>dimithras/pandas<gh_stars>1-10
import numpy as np
import pandas as pd
from pandas import Series
class TestSeriesAnalytics:
def test_ptp(self):
# GH21614
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
assert np.ptp(ser) == np.ptp(arr)
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_monotonic
s = Series(np.arange(1000))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(np.arange(1000, 0, -1))
assert s.is_monotonic_decreasing is True
s = Series(pd.date_range("20130101", periods=10))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(list(reversed(s.tolist())))
assert s.is_monotonic is False
assert s.is_monotonic_decreasing is True
| StarcoderdataPython |
3454347 | <reponame>Corymbia/nephoria
from nephoria.testcontroller import TestController
from cloud_utils.log_utils import get_traceback, eulogger, red
from prettytable import PrettyTable
import time
from optparse import OptionParser
import resource
import signal
import sys
import __builtin__
openfiles = set()
oldfile = __builtin__.file
def printOpenFiles():
print red("\n\n### %d OPEN FILES: [%s]\n\n" % (len(openfiles), ", ".join(f.x for f in openfiles)))
class newfile(oldfile):
def __init__(self, *args):
self.x = args[0]
print red("### OPENING %s ###" % str(self.x))
oldfile.__init__(self, *args)
openfiles.add(self)
printOpenFiles()
def close(self):
print red("### CLOSING %s ###" % str(self.x))
oldfile.close(self)
openfiles.remove(self)
printOpenFiles()
oldopen = __builtin__.open
def newopen(*args):
return newfile(*args)
__builtin__.file = newfile
__builtin__.open = newopen
class InstanceBatchTest():
def __init__(self):
self.name = 'InstanceBatchTest'
parser = OptionParser('InstanceBatchTest')
parser.add_option("-c", "--clc-ip", dest="clc", default=None,
help="CLC IP")
parser.add_option("-p", "--password", dest="password", default='<PASSWORD>',
help="clc ssh password")
parser.add_option("-l", "--log-level", dest="log_level", default='DEBUG',
help="LOGLEVEL")
parser.add_option("--instance-timeout", dest="instance_timeout", type='int', default=1200,
help="Seconds used as timeout in run-image/instance request")
parser.add_option('--emi', type=str, default=None,
help='Image id used to run VMs')
parser.add_option('--keypair', type=str, default=None,
help='EC2 Keypair name to use for VM connections, '
'default:"InstanceBatchTestKey_<timestamp>"')
parser.add_option('--zone', type=str, default=None,
help='Name of availability zone to run VMs in')
parser.add_option('--vmtype', type=str, default='t2.micro',
help='Instance Vmtype to use')
parser.add_option('--results-file', type=str, default=None,
help='File to save results to')
parser.add_option('--vm-count', type=int, default=10,
help='Number of VMs to run per request')
parser.add_option('--vm-max', type=int, default=500,
help='Max or total number of VMs to run in this test')
parser.add_option('--no-clean', default=False, action='store_true',
help="Do not terminate VMs during test")
parser.add_option('--user', type=str, default='admin',
help='Cloud username, default: "admin"')
parser.add_option('--account', type=str, default='nephotest',
help='Cloud account name, default:"nephotest"')
self.args, pos = parser.parse_args()
if not self.args.clc:
raise ValueError('CLC must be provided. See --clc argument')
self.log = eulogger.Eulogger('InstanceBatchTest', stdout_level=self.args.log_level)
self.tc = TestController(self.args.clc,
password=self.args.password,
clouduser_account=self.args.account,
clouduser_name=self.args.user,
log_level=self.args.log_level)
self.emi = self.tc.user.ec2.get_emi(emi=self.args.emi)
self.vmtype = self.args.vmtype
self.keyname = self.args.keypair or "InstanceBatchTestKey_{0}".format(int(time.time()))
self.key = self.tc.user.ec2.get_keypair(key_name=self.keyname)
self.group = self.tc.user.ec2.add_group('InstanceBatchTestGroup')
self.tc.user.ec2.authorize_group(self.group, port=22, protocol='tcp')
self.tc.user.ec2.authorize_group(self.group, protocol='icmp', port=-1)
self.tc.user.ec2.show_security_group(self.group)
if not self.key:
raise RuntimeError('Was not able to find or create key:"{0}". '
'If the key exists, it may not be in the local dir?')
self.results = {}
self.pt = PrettyTable(['RUN', 'START', 'TOTAL', 'NEW', 'ELAPSED'])
self.last_kill_sig = 0
self.kill = False
def add_result(self, start_date, run_number, total, added, elapsed):
self.results[run_number] = {'start_date': start_date,
'total': total,
'added': added,
'elapsed': elapsed}
self.pt.add_row([run_number, start_date, total, added, elapsed])
self.log.info('')
def run_batch_loop(self):
error = ""
error_count = 0
self.log.info('Test start. Terminating all instances for user:{0}/{1}'
.format(self.tc.user.account_name, self.tc.user.user_name))
if not self.args.no_clean:
self.tc.user.ec2.connection.terminate_instances()
#Monitor to terminated state
self.tc.user.ec2.terminate_instances()
existing_instances = self.tc.admin.ec2.get_instances(state='running')
start_count = len(existing_instances)
elapsed = 0
run_number = 0
added = 0
ins_ids = []
start_date = time.strftime("%Y-%m-%d %H:%M")
self.add_result(start_date=start_date, run_number=run_number, total=start_count,
added=added, elapsed=elapsed)
while (len(ins_ids) < self.args.vm_max) and not self.kill:
try:
printOpenFiles()
run_number += 1
start_date = time.strftime("%Y-%m-%d %H:%M")
start_time = time.time()
ins = self.tc.user.ec2.run_image(image=self.emi, keypair=self.key,
min=self.args.vm_count, max=self.args.vm_count,
zone=self.args.zone, vmtype=self.vmtype,
group=self.group,
timeout=self.args.instance_timeout,
)
elapsed = time.time() - start_time
added = len(ins)
total = len(self.tc.admin.ec2.get_instances(state='running'))
for i in ins:
ins_ids.append(i.id)
try:
i.log.close()
if i.ssh:
i.ssh.connection.close()
i.ssh.close()
except Exception as IE:
printOpenFiles()
self.log.warning('Error closing instances fds:"{0}"'.format(IE))
i = None
ins = None
self.add_result(start_date=start_date, run_number=run_number, total=total,
added=added, elapsed=elapsed)
self.log.info('\n\nDone with iteration:"{0}", ran: "{1}". Now added:{2}/{3}\n\n'
.format(run_number, added, len(ins_ids), self.args.vm_max))
time.sleep(1)
except Exception as E:
error_count += 1
error = "{0}\n{1}".format(get_traceback(), E)
self.log.error(error)
if error_count > 1:
raise
self.show_results()
self.log.info('Done with test, ran: {0} instances'.format(len(ins_ids)))
return error
def show_results(self):
self.log.info('\n{0}\n'.format(self.pt))
if self.args.results_file:
with open(self.args.results_file, 'w') as resfile:
resfile.write('\n{0}\n'.format(self.pt))
resfile.flush()
def clean_method(self):
if not self.args.no_clean:
self.log.info('Terminating all instances for user:{0}/{1}'
.format(self.tc.user.account_name, self.tc.user.user_name))
self.tc.user.ec2.terminate_instances()
if __name__ == "__main__":
errors =[]
test = InstanceBatchTest()
try:
# In the case we want to keep each instance connection open?...
resource.setrlimit(resource.RLIMIT_NOFILE, (10 * test.args.vm_max ,-1))
except Exception as RE:
test.log.warning(red('Unable to set resource limit to:"{0}", err:"{1}"'
.format(10 * test.args.vm_max, RE)))
def signal_handler(signal, frame):
kill_it = False
now = time.time()
if now - test.last_kill_sig < 2:
kill_it = True
test.last_kill_sig = now
sys.stderr.write(red('\n\nReceived SIGINT, dumping results. (Press ctrl+c twice quickly to end test now)\n\n'))
sys.stderr.flush()
test.show_results()
if kill_it:
sys.stderr.write(red('\n\nReceived SIGINT twice within 2 seconds killing test...!\n\n'))
sys.stderr.flush()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
for meth in [test.run_batch_loop, test.clean_method]:
try:
ret = meth()
if ret:
test.log.error(ret)
except Exception as E:
test.tc.log.error('{0}\nError in test:"{1}"'.format(get_traceback(), E))
errors.append(E)
test.show_results()
printOpenFiles()
exit(len(errors) and 1)
| StarcoderdataPython |
308926 | <reponame>michaelfranzl/pyglpainter
"""
pyglpainter - Copyright (c) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from OpenGL.GL import (GL_LINES)
from .item import Item
class CoordSystem(Item):
"""
Draws a classical XYZ coordinate system with axis X as red, Y as green
and Z as blue. Length of axes is 1.
"""
def __init__(self, label, prog_id, origin=(0, 0, 0), scale=10,
linewidth=1):
"""
@param label
A string containing a unique name for this item.
@param prog_id
OpenGL program ID (determines shaders to use) to use for this item.
@param origin
Origin of this item in world space.
@param scale
Scale of this item in world space.
@param linewidth
Width of rendered lines in pixels.
"""
vertex_count = 6
super(CoordSystem, self).__init__(label, prog_id, GL_LINES,
linewidth, origin, scale, False,
vertex_count)
self.append_vertices([[(0, 0, 0), (.6, .0, .0, 1.0)]])
self.append_vertices([[(1, 0, 0), (.6, .0, .0, 1.0)]])
self.append_vertices([[(0, 0, 0), (.0, .6, .0, 1.0)]])
self.append_vertices([[(0, 1, 0), (.0, .6, .0, 1.0)]])
self.append_vertices([[(0, 0, 0), (.0, .0, .6, 1.0)]])
self.append_vertices([[(0, 0, 1), (.0, .0, .6, 1.0)]])
self.upload()
def highlight(self, val):
"""
Visually highlight this coordinate system.
Create a gradient towards white, towards the center
@val
True or False
"""
for x in range(0, 3):
if val is True:
newcol = (1, 1, 1, 1)
else:
newcol = (0, 0, 0, 1)
self.vdata_pos_col["color"][x * 2] = newcol
self.upload()
self.dirty = True
| StarcoderdataPython |
3566533 | import os
import six
import itertools
from dftinputgen.data import STANDARD_ATOMIC_WEIGHTS
from dftinputgen.utils import get_elem_symbol
from dftinputgen.utils import get_kpoint_grid_from_spacing
from dftinputgen.qe.settings import QE_TAGS
from dftinputgen.qe.settings.calculation_presets import QE_PRESETS
from dftinputgen.base import DftInputGenerator
from dftinputgen.base import DftInputGeneratorError
def _qe_val_formatter(val):
"""Format values for QE tags into strings."""
if isinstance(val, bool):
return ".{}.".format(str(val).lower())
elif isinstance(val, six.string_types):
return '"{}"'.format(val)
else:
return str(val)
class PwxInputGeneratorError(DftInputGeneratorError):
"""Base class for pw.x input files generation errors."""
pass
class PwxInputGenerator(DftInputGenerator):
"""Base class to generate input files for pw.x."""
def __init__(
self,
crystal_structure=None,
calculation_presets=None,
custom_sett_file=None,
custom_sett_dict=None,
specify_potentials=None,
write_location=None,
pwx_input_file=None,
overwrite_files=None,
**kwargs
):
"""
Constructor.
Parameters
----------
crystal_structure: :class:`ase.Atoms` object
:class:`ase.Atoms` object from `ase.io.read([crystal structure
file])`.
calculation_presets: str, optional
The "base" calculation settings to use--must be one of the
pre-defined groups of tags and values provided for pw.x.
Pre-defined settings for some common calculation types are in
INSTALL_PATH/qe/settings/calculation_presets/
custom_sett_file: str, optional
Location of a JSON file with custom calculation settings as a
dictionary of tags and values.
NB: Custom settings specified here always OVERRIDE those in
`calculation_presets` in case of overlap.
custom_sett_dict: dict, optional
Dictionary with custom calculation settings as tags and values/
NB: Custom settings specified here always OVERRIDE those in
`calculation_presets` and `custom_sett_file`.
specify_potentials: bool, optional
Whether to set pseudopotentials to use for each chemical species
in the input crystal structure.
If set to True, it is attempted to match every chemical species
to a pseudopotential.
1. If `pseudo_names` dictionary (with chemical species and
names of pseudopotential files) is specified in any of the
previous settings arguments, they are used.
2. For any species not in the input `pseudo_names` dictionary (or
if `pseudo_names` is not input at all), the *first matching*
pseudopotential file (species name in the name of the
pseudopotential file) in the specified `pseudo_dir` is used.
If a pseudopotential file cannot be found for a chemical
species, an error is thrown.
NB: If `pseudo_dir` is not provided as input, an error is
thrown.
NB: this above described matching is performed lazily, i.e.,
only when a card or namelist that requires pseudopotential
information is generated.
If set to False, no pseudopotential is set for any chemical
species. The generated input files have "None" in place of
pseudopotential names for every species listed in the
"ATOMIC_SPECIES" card.
Default: False
write_location: str, optional
Path to the directory in which to write the input file(s).
Default: Current working directory.
pwx_input_file: str, optional
Name of the file in which to write the formatted pw.x input
content.
Default: "[`calculation_presets`].in" if `calculation_presets` is
specified by the user, else "pwx.in".
overwrite_files: bool, optional
To overwrite files or not, that is the question.
Default: True
**kwargs:
Arbitrary keyword arguments.
"""
# TODO(@hegdevinayi): Add default Hubbard schemes (Wang/Aykol/Bennett)
# TODO(@hegdevinayi): Add default magnetism schemes (ferro/AFM G-type)
# TODO(@hegdevinayi): Consider allowing psp location via config file
super(PwxInputGenerator, self).__init__(
crystal_structure=crystal_structure,
calculation_presets=calculation_presets,
custom_sett_file=custom_sett_file,
custom_sett_dict=custom_sett_dict,
write_location=write_location,
overwrite_files=overwrite_files,
)
self._parameters_from_structure = self._get_parameters_from_structure()
self._calculation_settings = self._get_calculation_settings()
self._specify_potentials = False
self.specify_potentials = specify_potentials
self._pwx_input_file = self._get_default_pwx_input_file()
self.pwx_input_file = pwx_input_file
def _set_crystal_structure(self, crystal_structure):
super(PwxInputGenerator, self)._set_crystal_structure(
crystal_structure
)
self._parameters_from_structure = self._get_parameters_from_structure()
@property
def parameters_from_structure(self):
"""DFT parameters auto-determined for the input crystal structure."""
return self._parameters_from_structure
@property
def specify_potentials(self):
"""Should potentials be specified for each chemical species."""
return self._specify_potentials
@specify_potentials.setter
def specify_potentials(self, specify_potentials):
if specify_potentials is not None:
self._specify_potentials = specify_potentials
@property
def pwx_input_file(self):
"""Name of the pw.x input file to write to."""
return self._pwx_input_file
@pwx_input_file.setter
def pwx_input_file(self, pwx_input_file):
if pwx_input_file is not None:
self._pwx_input_file = pwx_input_file
def _get_default_pwx_input_file(self):
if self.calculation_presets is None:
return "pwx.in"
return "{}.in".format(self.calculation_presets)
@property
def dft_package(self):
"""Name of the DFT package (pw.x is part of Quantum Espresso)."""
return "qe"
def _get_parameters_from_structure(self):
"""Determine DFT settings from input crystal structure.
This includes some required parameters, e.g. number of atoms and the
number of types of species.
"""
return {
"nat": len(self.crystal_structure),
"ntyp": len(set(self.crystal_structure.get_chemical_symbols())),
}
@staticmethod
def _get_pseudo_name(species, pseudo_dir):
"""Match chemical species::pseudopotential in a given directory."""
# filler for black/flake8 compatibility
# empty line after docstring relaxed in pydocstyle>5.0
# this will have to stay due to py2 (pydocstyle ceil = 3.0.0)
def _elem_from_fname(fname):
bname = os.path.basename(fname)
elem = bname.partition(".")[0].partition("_")[0].lower()
return elem
elem_low = get_elem_symbol(species).lower()
# match pseudo iff a *.UPF filename matches element symbol in structure
# Note: generic except here for py2/py3 compatibility
try:
pseudo_dir_files = os.listdir(os.path.expanduser(pseudo_dir))
except: # noqa: E722
msg = 'Failed to list contents in "{}"'.format(pseudo_dir)
raise PwxInputGeneratorError(msg)
for p in pseudo_dir_files:
ext = os.path.splitext(p)[-1].lower()
if _elem_from_fname(p) == elem_low and ext == ".upf":
return os.path.basename(p)
def _get_pseudo_names(self):
"""Get names of pseudopotentials to use for each chemical species."""
species = sorted(set(self.crystal_structure.get_chemical_symbols()))
pseudo_names = {sp: None for sp in species}
if not self.specify_potentials:
return pseudo_names
# 1. check if pseudo names are provided in input calculation settings
input_pseudo_names = self.calculation_settings.get("pseudo_names", {})
pseudo_names.update(input_pseudo_names)
# 2. if pseudos for all species were input, nothing more to be done.
if None not in set(pseudo_names.values()):
return pseudo_names
# 3. for species that are missing pseudos, try matching psp files in
# the `pseudo_dir` directory (raise error if directory not specified)
pseudo_dir = self.calculation_settings.get("pseudo_dir")
if not pseudo_dir:
msg = "Pseudopotential directory not specified"
raise PwxInputGeneratorError(msg)
matched_pseudo_names = {
sp: self._get_pseudo_name(sp, pseudo_dir) for sp in species
}
# 4. overwrite with any user-specified pseudos
for sp in pseudo_names:
if pseudo_names[sp] is None:
pseudo_names[sp] = matched_pseudo_names.get(sp)
# 5. finally, if any species is missing pseudo, raise error
missing_pseudos = [k for k, v in pseudo_names.items() if v is None]
if missing_pseudos:
msg = "Failed to find potential for [{}]".format(
", ".join(missing_pseudos)
)
raise PwxInputGeneratorError(msg)
return pseudo_names
@property
def calculation_settings(self):
"""Dictionary of all calculation settings to use as input pw.x."""
return self._get_calculation_settings()
def _get_calculation_settings(self):
"""Load all calculation settings: user-input and auto-determined."""
calc_sett = {}
if self.calculation_presets is not None:
calc_sett.update(QE_PRESETS[self.calculation_presets])
if self.custom_sett_from_file is not None:
calc_sett.update(self.custom_sett_from_file)
if self.custom_sett_dict is not None:
calc_sett.update(self.custom_sett_dict)
calc_sett.update(self.parameters_from_structure)
return calc_sett
def _namelist_to_str(self, namelist):
"""Convert (tags, values) from a namelist into a formatted string."""
if namelist.lower() == "control":
if not self.calculation_settings.get("pseudo_dir"):
if self.specify_potentials:
msg = "Pseudopotentials directory not specified"
raise PwxInputGeneratorError(msg)
lines = ["&{}".format(namelist.upper())]
for tag in QE_TAGS["pw.x"]["namelist_tags"][namelist]:
if tag not in self.calculation_settings:
continue
lines.append(
" {} = {}".format(
tag, _qe_val_formatter(self.calculation_settings.get(tag)),
)
)
lines.append("/")
return "\n".join(lines)
@property
def all_namelists_as_str(self):
"""All pw.x namelists as one formatted string."""
blocks = []
for namelist in QE_TAGS["pw.x"]["namelists"]:
if namelist in self.calculation_settings.get("namelists", []):
blocks.append(self._namelist_to_str(namelist))
return "\n".join(blocks)
@property
def atomic_species_card(self):
"""pw.x ATOMIC_SPECIES card as a string."""
species = sorted(set(self.crystal_structure.get_chemical_symbols()))
pseudo_names = self._get_pseudo_names()
lines = ["ATOMIC_SPECIES"]
for sp in species:
lines.append(
"{:4s} {:12.8f} {}".format(
sp,
STANDARD_ATOMIC_WEIGHTS[sp]["standard_atomic_weight"],
pseudo_names[sp],
)
)
return "\n".join(lines)
@property
def atomic_positions_card(self):
"""pw.x ATOMIC_POSITIONS card as a string."""
symbols = self.crystal_structure.get_chemical_symbols()
positions = self.crystal_structure.get_scaled_positions()
lines = ["ATOMIC_POSITIONS {crystal}"]
for s, p in zip(symbols, positions):
lines.append("{:4s} {:12.8f} {:12.8f} {:12.8f}".format(s, *p))
return "\n".join(lines)
@property
def kpoints_card(self):
"""pw.x KPOINTS card as a string."""
kpoints_sett = self.calculation_settings.get("kpoints", {})
scheme = kpoints_sett.get("scheme")
if scheme not in ["gamma", "automatic"]:
raise NotImplementedError
if scheme == "gamma":
return "K_POINTS {gamma}"
elif scheme == "automatic":
lines = ["K_POINTS {automatic}"]
grid = kpoints_sett.get("grid", [])
shift = kpoints_sett["shift"]
if not grid:
grid = get_kpoint_grid_from_spacing(
self.crystal_structure, kpoints_sett["spacing"],
)
_l = "{} {} {} {} {} {}".format(*itertools.chain(grid, shift))
lines.append(_l)
return "\n".join(lines)
@property
def cell_parameters_card(self):
"""pw.x CELL_PARAMETERS card as a string."""
lines = ["CELL_PARAMETERS {angstrom}"]
for cv in self.crystal_structure.cell:
lines.append("{:12.8f} {:12.8f} {:12.8f}".format(*cv))
return "\n".join(lines)
@property
def occupations_card(self):
"""pw.x OCCUPATIONS card as a string."""
raise NotImplementedError
@property
def constraints_card(self):
"""pw.x CONSTRAINTS card as a string."""
raise NotImplementedError
@property
def atomic_forces_card(self):
"""pw.x ATOMIC_FORCES card as a string."""
raise NotImplementedError
@property
def all_cards_as_str(self):
"""All pw.x cards as one formatted string."""
blocks = []
for card in QE_TAGS["pw.x"]["cards"]:
if card in self.calculation_settings.get("cards", []):
blocks.append(getattr(self, "{}_card".format(card)))
return "\n".join(blocks)
@property
def pwx_input_as_str(self):
"""pw.x input (all namelists + cards) as a formatted string."""
return "\n".join([self.all_namelists_as_str, self.all_cards_as_str])
def write_pwx_input(self, write_location=None, filename=None):
"""Write the pw.x input file to disk at the specified location."""
if self.pwx_input_as_str.strip() == "":
msg = "Nothing to write. No input settings found?"
raise PwxInputGeneratorError(msg)
if write_location is None:
msg = "Location to write files not specified"
raise PwxInputGeneratorError(msg)
if filename is None:
msg = "Name of the input file to write into not specified"
raise PwxInputGeneratorError(msg)
with open(os.path.join(write_location, filename), "w") as fw:
fw.write(self.pwx_input_as_str)
def write_input_files(self):
"""Write pw.x input files to the user-specified location/file."""
self.write_pwx_input(
write_location=self.write_location, filename=self.pwx_input_file,
)
| StarcoderdataPython |
1862097 | """
Utilities for managing (local + remote) runs
"""
from . import managed, remote, calibration, powerbi, utils
from .managed import ManagedRun
| StarcoderdataPython |
5126565 | import unittest
if __name__ == '__main__':
suite = unittest.TestSuite()
for test in ['connection', 'fields', 'queryset', 'dereference',
'document', 'dynamic_document', 'signals',
'django_compatibility']:
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(test))
unittest.TextTestRunner().run(suite)
| StarcoderdataPython |
9710241 | import numpy as np
import torch
from pymoo.factory import get_mutation
from pymoo.core.mutation import Mutation
from lambo import utils
from lambo.tasks.chem.logp import prop_func
from lambo.models.mlm import sample_tokens
def get_mlm_mutation(mlm_obj, problem, cand_idx, res_idx):
seqs = [problem.candidate_pool[i].mutant_residue_seq for i in cand_idx]
base_tok_idxs = utils.str_to_tokens(seqs, mlm_obj.tokenizer)
mask_idxs = res_idx.reshape(-1, 1)
src_tok_idxs = base_tok_idxs.clone().to(mlm_obj.device)
np.put_along_axis(src_tok_idxs, mask_idxs, mlm_obj.tokenizer.padding_idx, axis=1)
with torch.no_grad():
tgt_tok_logits, _ = mlm_obj.logits_from_tokens(src_tok_idxs)
new_tok_idxs, _ = sample_tokens(
base_tok_idxs, tgt_tok_logits, mlm_obj.tokenizer, replacement=False
)
new_tok_idxs = np.take_along_axis(new_tok_idxs, mask_idxs, axis=1).reshape(-1)
new_toks = [mlm_obj.tokenizer.convert_id_to_token(t_idx) for t_idx in new_tok_idxs]
sampling_vocab_idxs = np.array([
mlm_obj.tokenizer.sampling_vocab.index(tok) for tok in new_toks
])
return sampling_vocab_idxs
#following https://peerj.com/articles/pchem-11.pdf
def safe_vocab_mutation(tokenizer, problem, cand_idx, res_idx):
muts = []
seqs = [problem.candidate_pool[i].mutant_residue_seq for i in cand_idx]
for seq, idx in zip(seqs, res_idx):
tokens = tokenizer.decode(tokenizer.encode(seq)).split(" ")[1:-1]
safe_mut = None
for i in range(50):
mut_idx = np.random.randint(0, len(tokenizer.sampling_vocab))
mut_res = tokenizer.sampling_vocab[mut_idx]
mut_seq = "".join(tokens[:idx] + [mut_res] + tokens[(idx + 1):])
if prop_func(mut_seq) > -100:
safe_mut = mut_idx
break
if safe_mut is None:
muts.append(np.random.randint(0, len(tokenizer.sampling_vocab)))
else:
muts.append(safe_mut)
return np.array(muts)
class UniformMutation(Mutation):
def __init__(self, tokenizer=None, mlm_obj=None, safe_mut=False):
self.tokenizer = tokenizer
self.mlm_obj = mlm_obj
self.safe_mut = safe_mut
def _do(self, problem, x, **kwargs):
query_batches = problem.x_to_query_batches(x)
batch_shape, num_vars = query_batches.shape[:-1], query_batches.shape[-1]
flat_queries = query_batches.reshape(-1, num_vars)
num_samples = flat_queries.shape[0]
x0 = flat_queries[..., 0]
seqs = [problem.candidate_pool[i].mutant_residue_seq for i in x0]
#NEXT LINE WON'T WORK UNLESS WE CHANGE CANDIDATE POOL TO NON-EMPTY IN TASK INIT
x1 = np.random.randint(problem.xl[1], problem.xu[1], num_samples)
x1 = np.array([idx % len(seq) for idx, seq in zip(x1, seqs)])
if self.mlm_obj is None and not self.safe_mut:
x2 = np.random.randint(0, len(self.tokenizer.sampling_vocab), num_samples)
elif self.safe_mut:
x2 = safe_vocab_mutation(self.tokenizer, problem, x0, x1)
else:
x2 = get_mlm_mutation(self.mlm_obj, problem, x0, x1)
x3 = np.random.randint(0, len(problem.op_types), num_samples)
new_queries = np.stack([x0, x1, x2, x3], axis=-1).reshape(*batch_shape, -1)
new_x = problem.query_batches_to_x(new_queries)
return new_x
class LocalMutation(Mutation):
def __init__(self, eta, prob, tokenizer=None, mlm_obj=None, safe_mut=False):
super().__init__()
self.poly_mutation = get_mutation('int_pm', eta=eta, prob=prob)
self.tokenizer = tokenizer
self.mlm_obj = mlm_obj
self.safe_mut = safe_mut
def _do(self, problem, x, **kwargs):
query_batches = problem.x_to_query_batches(x)
batch_shape, num_vars = query_batches.shape[:-1], query_batches.shape[-1]
flat_queries = query_batches.reshape(-1, num_vars)
num_samples = flat_queries.shape[0]
x0 = flat_queries[..., 0]
seqs = [problem.candidate_pool[i].mutant_residue_seq for i in x0]
mut_x = self.poly_mutation._do(problem, x)
mut_x = problem.x_to_query_batches(mut_x).reshape(-1, num_vars)
x1 = mut_x[..., 1]
# x1 = np.array([idx % len(seq) for idx, seq in zip(x1, seqs)])
for i, idx in enumerate(x0):
num_tokens = len(self.tokenizer.encode(problem.candidate_pool[idx].mutant_residue_seq)) - 2
x1[i] = min(num_tokens - 1, x1[i])
# TODO always work with token indices?
# num_tokens = len(self.tokenizer.encode(cand_seq))
# x1[i] = min(num_tokens - 2, x1[i]) # skip end token
# x1[i] = max(1, x1[i]) # skip start token
if self.mlm_obj is None and not self.safe_mut:
x2 = np.random.randint(0, len(self.tokenizer.sampling_vocab), num_samples)
elif self.safe_mut:
x2 = safe_vocab_mutation(self.tokenizer, problem, x0, x1)
else:
x2 = get_mlm_mutation(self.mlm_obj, problem, x0, x1)
x3 = np.random.randint(0, len(problem.op_types), num_samples)
new_queries = np.stack([x0, x1, x2, x3], axis=-1).reshape(*batch_shape, -1)
new_x = problem.query_batches_to_x(new_queries)
return new_x
| StarcoderdataPython |
1793416 | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import time
from ..builder import ALGORITHMS, build_backbone, build_head, build_neck, build_algorithm
from mmcv.runner.checkpoint import load_checkpoint
from .base import BaseModel
import torch
import torch.nn.functional as F
from mmselfsup.models.algorithms.simsiam_kd import *
from collections import OrderedDict
import torch.distributed as dist
import lpips
from mmselfsup.models.algorithms.simsiam_kd_test import *
@ALGORITHMS.register_module()
class SimSiam_dimcollapsecheckLPIPS2(SimSiamKD_OLMH_dimcollapsecheck): # optimize lower, minimize Higher
def __init__(self,
backbone,
neck=None,
head=None,
init_cfg=None,
**kwargs):
super(SimSiam_dimcollapsecheckLPIPS2, self).__init__(backbone, neck, head, init_cfg, **kwargs)
self.fn_alex = lpips.LPIPS(net='alex')
def forward_train(self, img, org_img):
"""Forward computation during training.
Args:
img (list[Tensor]): A list of input images with shape
(N, C, H, W). Typically these should be mean centered
and std scaled.
Returns:
loss[str, Tensor]: A dictionary of loss components
"""
assert isinstance(img, list)
self.teacher.eval()
img_v1 = img[0]
img_v2 = img[1]
org_img = org_img[0]
z1 = self.encoder(img_v1)[0] # NxC
z2 = self.encoder(img_v2)[0] # NxC
o_head_student1 = self.head(z1, z2)
o_head_student2 = self.head(z2, z1)
zt1 = self.teacher.encoder(img_v1)[0]
zt2 = self.teacher.encoder(img_v2)[0]
o_head_teacher1 = self.teacher.head(zt1, zt2)
o_head_teacher2 = self.teacher.head(zt2, zt1)
teacher_loss1 = o_head_teacher1['cossim'].detach()
teacher_loss2 = o_head_teacher2['cossim'].detach()
student_loss1 = o_head_student1['cossim']
student_loss2 = o_head_student2['cossim']
# for plotting loss
l_s1 = torch.mean(student_loss1)
l_s2 = torch.mean(student_loss2)
l_s = 1 / 2 * (l_s1 + l_s2).detach()
l_t = 1 / 2 * (torch.mean(teacher_loss1) + torch.mean(teacher_loss2)).detach()
index_lower1 = student_loss1 < teacher_loss1
index_lower2 = student_loss2 < teacher_loss2
index_higher1 = torch.bitwise_not(index_lower1)
index_higher2 = torch.bitwise_not(index_lower2)
# log frac
num1 = float(torch.nonzero(index_lower1).size(0))
num2 = float(torch.nonzero(index_lower2).size(0))
total_number = float(img_v1.size(0))
frac = (num1 + num2) / (total_number * 2)
# put test here
# log lpips
d = self.fn_alex(img_v1, img_v2)
# predictions
p1 = o_head_student1['pred']
p2 = o_head_student2['pred']
pt1 = o_head_teacher1['pred']
pt2 = o_head_teacher2['pred']
# log image
if self.save_images:
images_lower1 = img[0][index_lower1]
images_lower1_proj = img[1][index_lower1]
images_higher1 = img[0][index_higher1]
images_higher2_proj = img[1][index_higher1]
ori_lower1 = org_img[index_lower1]
ori_higher1 = org_img[index_higher1]
set_1 = [images_lower1, images_lower1_proj, ori_lower1, student_loss1[index_lower1],
teacher_loss1[index_lower1], d[index_lower1],
images_higher1, images_higher2_proj, ori_higher1, student_loss1[index_higher1],
teacher_loss1[index_higher1], d[index_higher1],
p1[index_lower1], p2[index_lower1], pt1[index_lower1], pt2[index_lower1],
p1[index_higher1], p2[index_higher1], pt1[index_higher1], pt2[index_higher1]]
# images_lower2 = img[0][index_lower2]
# images_lower2_proj = img[1][index_lower2]
# images_higher2 = img[0][index_higher2]
# images_higher2_proj = img[1][index_higher2]
# ori_lower2 = org_img[index_lower2]
# ori_higher2 = org_img[index_higher2]
#
# set_2 = [images_lower2, images_lower2_proj, ori_lower2, student_loss2[index_lower2],
# teacher_loss2[index_lower2], d[index_lower2],
# images_higher2, images_higher2_proj, ori_higher2, student_loss2[index_higher2],
# teacher_loss2[index_higher2], d[index_higher2]]
set_2 = None
else:
set_1, set_2 = None, None
loss1 = torch.mean(student_loss1)
loss2 = torch.mean(student_loss2)
losses = 1 / 2 * (loss1 + loss2)
return dict(loss=losses, l_student=l_s, l_teacher=l_t, frac=torch.tensor(frac).cuda()), set_1, set_2
def val_step(self, data, optimizer, teacher_model, save_images):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
if self.teacher is None:
self.teacher = teacher_model
self.save_images = save_images[0]
losses, set_1, set_2 = self(**data)
loss, log_vars = self._parse_losses(losses)
if isinstance(data['img'], list):
num_samples = len(data['img'][0].data)
else:
num_samples = len(data['img'].data)
outputs = dict(loss=loss, log_vars=log_vars, num_samples=num_samples)
return outputs, set_1, set_2
@ALGORITHMS.register_module()
class SimSiam_dimcollapsecheckLPIPS_fixedfirstview(SimSiam_dimcollapsecheckLPIPS2): # optimize lower, minimize Higher
def __init__(self,
backbone,
neck=None,
head=None,
init_cfg=None,
**kwargs):
super(SimSiam_dimcollapsecheckLPIPS_fixedfirstview, self).__init__(backbone, neck, head, init_cfg, **kwargs)
self.img_v1 = None
def forward_train(self, img, org_img):
"""Forward computation during training.
Args:
img (list[Tensor]): A list of input images with shape
(N, C, H, W). Typically these should be mean centered
and std scaled.
Returns:
loss[str, Tensor]: A dictionary of loss components
"""
assert isinstance(img, list)
self.teacher.eval()
if self.img_v1 == None:
self.img_v1 = img[0]
img_v1 = self.img_v1
img_v2 = img[1]
org_img = org_img[0]
z1 = self.encoder(img_v1)[0] # NxC
z2 = self.encoder(img_v2)[0] # NxC
o_head_student1 = self.head(z1, z2)
o_head_student2 = self.head(z2, z1)
zt1 = self.teacher.encoder(img_v1)[0]
zt2 = self.teacher.encoder(img_v2)[0]
o_head_teacher1 = self.teacher.head(zt1, zt2)
o_head_teacher2 = self.teacher.head(zt2, zt1)
teacher_loss1 = o_head_teacher1['cossim'].detach()
teacher_loss2 = o_head_teacher2['cossim'].detach()
student_loss1 = o_head_student1['cossim']
student_loss2 = o_head_student2['cossim']
# for plotting loss
l_s1 = torch.mean(student_loss1)
l_s2 = torch.mean(student_loss2)
l_s = 1 / 2 * (l_s1 + l_s2).detach()
l_t = 1 / 2 * (torch.mean(teacher_loss1) + torch.mean(teacher_loss2)).detach()
index_lower1 = student_loss1 < teacher_loss1
index_lower2 = student_loss2 < teacher_loss2
index_higher1 = torch.bitwise_not(index_lower1)
index_higher2 = torch.bitwise_not(index_lower2)
# log frac
num1 = float(torch.nonzero(index_lower1).size(0))
num2 = float(torch.nonzero(index_lower2).size(0))
total_number = float(img_v1.size(0))
frac = (num1 + num2) / (total_number * 2)
# put test here
# log lpips
d = self.fn_alex(img_v1, img_v2)
# predictions
p1 = o_head_student1['pred']
p2 = o_head_student2['pred']
pt1 = o_head_teacher1['pred']
pt2 = o_head_teacher2['pred']
# log image
if self.save_images:
images_lower1 = img_v1[index_lower1]
images_lower1_proj = img_v2[index_lower1]
images_higher1 = img_v1[index_higher1]
images_higher2_proj = img_v2[index_higher1]
ori_lower1 = org_img[index_lower1]
ori_higher1 = org_img[index_higher1]
set_1 = [images_lower1, images_lower1_proj, ori_lower1, student_loss1[index_lower1],
teacher_loss1[index_lower1], d[index_lower1],
images_higher1, images_higher2_proj, ori_higher1, student_loss1[index_higher1],
teacher_loss1[index_higher1], d[index_higher1],
p1[index_lower1], p2[index_lower1], pt1[index_lower1], pt2[index_lower1],
p1[index_higher1], p2[index_higher1], pt1[index_higher1], pt2[index_higher1]]
# images_lower2 = img[0][index_lower2]
# images_lower2_proj = img[1][index_lower2]
# images_higher2 = img[0][index_higher2]
# images_higher2_proj = img[1][index_higher2]
# ori_lower2 = org_img[index_lower2]
# ori_higher2 = org_img[index_higher2]
#
# set_2 = [images_lower2, images_lower2_proj, ori_lower2, student_loss2[index_lower2],
# teacher_loss2[index_lower2], d[index_lower2],
# images_higher2, images_higher2_proj, ori_higher2, student_loss2[index_higher2],
# teacher_loss2[index_higher2], d[index_higher2]]
set_2 = None
else:
set_1, set_2 = None, None
loss1 = torch.mean(student_loss1)
loss2 = torch.mean(student_loss2)
losses = 1 / 2 * (loss1 + loss2)
return dict(loss=losses, l_student=l_s, l_teacher=l_t, frac=torch.tensor(frac).cuda()), set_1, set_2
@ALGORITHMS.register_module()
class SimSiam_dimcollapsecheckLPIPS_fixedsecondview(SimSiam_dimcollapsecheckLPIPS2): # optimize lower, minimize Higher
def __init__(self,
backbone,
neck=None,
head=None,
init_cfg=None,
**kwargs):
super(SimSiam_dimcollapsecheckLPIPS_fixedsecondview, self).__init__(backbone, neck, head, init_cfg, **kwargs)
self.img_v2 = None
def forward_train(self, img, org_img):
"""Forward computation during training.
Args:
img (list[Tensor]): A list of input images with shape
(N, C, H, W). Typically these should be mean centered
and std scaled.
Returns:
loss[str, Tensor]: A dictionary of loss components
"""
assert isinstance(img, list)
self.teacher.eval()
if self.img_v2 == None:
self.img_v2 = img[1]
img_v1 = img[0]
img_v2 = self.img_v2
org_img = org_img[0]
z1 = self.encoder(img_v1)[0] # NxC
z2 = self.encoder(img_v2)[0] # NxC
o_head_student1 = self.head(z1, z2)
o_head_student2 = self.head(z2, z1)
zt1 = self.teacher.encoder(img_v1)[0]
zt2 = self.teacher.encoder(img_v2)[0]
o_head_teacher1 = self.teacher.head(zt1, zt2)
o_head_teacher2 = self.teacher.head(zt2, zt1)
teacher_loss1 = o_head_teacher1['cossim'].detach()
teacher_loss2 = o_head_teacher2['cossim'].detach()
student_loss1 = o_head_student1['cossim']
student_loss2 = o_head_student2['cossim']
# for plotting loss
l_s1 = torch.mean(student_loss1)
l_s2 = torch.mean(student_loss2)
l_s = 1 / 2 * (l_s1 + l_s2).detach()
l_t = 1 / 2 * (torch.mean(teacher_loss1) + torch.mean(teacher_loss2)).detach()
index_lower1 = student_loss1 < teacher_loss1
index_lower2 = student_loss2 < teacher_loss2
index_higher1 = torch.bitwise_not(index_lower1)
index_higher2 = torch.bitwise_not(index_lower2)
# log frac
num1 = float(torch.nonzero(index_lower1).size(0))
num2 = float(torch.nonzero(index_lower2).size(0))
total_number = float(img_v1.size(0))
frac = (num1 + num2) / (total_number * 2)
# put test here
# log lpips
d = self.fn_alex(img_v1, img_v2)
# predictions
p1 = o_head_student1['pred']
p2 = o_head_student2['pred']
pt1 = o_head_teacher1['pred']
pt2 = o_head_teacher2['pred']
# log image
if self.save_images:
images_lower1 = img_v1[index_lower1]
images_lower1_proj = img_v2[index_lower1]
images_higher1 = img_v1[index_higher1]
images_higher2_proj = img_v2[index_higher1]
ori_lower1 = org_img[index_lower1]
ori_higher1 = org_img[index_higher1]
set_1 = [images_lower1, images_lower1_proj, ori_lower1, student_loss1[index_lower1],
teacher_loss1[index_lower1], d[index_lower1],
images_higher1, images_higher2_proj, ori_higher1, student_loss1[index_higher1],
teacher_loss1[index_higher1], d[index_higher1],
p1[index_lower1], p2[index_lower1], pt1[index_lower1], pt2[index_lower1],
p1[index_higher1], p2[index_higher1], pt1[index_higher1], pt2[index_higher1]]
# images_lower2 = img[0][index_lower2]
# images_lower2_proj = img[1][index_lower2]
# images_higher2 = img[0][index_higher2]
# images_higher2_proj = img[1][index_higher2]
# ori_lower2 = org_img[index_lower2]
# ori_higher2 = org_img[index_higher2]
#
# set_2 = [images_lower2, images_lower2_proj, ori_lower2, student_loss2[index_lower2],
# teacher_loss2[index_lower2], d[index_lower2],
# images_higher2, images_higher2_proj, ori_higher2, student_loss2[index_higher2],
# teacher_loss2[index_higher2], d[index_higher2]]
set_2 = None
else:
set_1, set_2 = None, None
loss1 = torch.mean(student_loss1)
loss2 = torch.mean(student_loss2)
losses = 1 / 2 * (loss1 + loss2)
return dict(loss=losses, l_student=l_s, l_teacher=l_t, frac=torch.tensor(frac).cuda()), set_1, set_2
@ALGORITHMS.register_module()
class SimDisKD_OLIH(SimSiamKD): # optimize lower, minimize Higher
def __init__(self,
backbone,
neck=None,
head=None,
init_cfg=None,
**kwargs):
super(SimDisKD_OLIH, self).__init__(backbone, neck, head, init_cfg, **kwargs)
def forward_train(self, img):
"""Forward computation during training.
Args:
img (list[Tensor]): A list of input images with shape
(N, C, H, W). Typically these should be mean centered
and std scaled.
Returns:
loss[str, Tensor]: A dictionary of loss components
"""
assert isinstance(img, list)
self.teacher.eval()
img_v1 = img[0]
img_v2 = img[1]
z1 = self.encoder(img_v1)[0] # NxC
z2 = self.encoder(img_v2)[0] # NxC
zt1 = self.teacher.encoder(img_v1)[0]
zt2 = self.teacher.encoder(img_v2)[0]
p1 = self.head(z1, z2, loss_cal=False)
p2 = self.head(z2, z1, loss_cal=False)
pt1 = self.teacher.head(zt1, zt2, loss_cal=False).detach()
pt2 = self.teacher.head(zt2, zt1, loss_cal=False).detach()
teacher_loss1 = cosine_sim(pt1, zt2, mean=False).detach()
teacher_loss2 = cosine_sim(pt2, zt1, mean=False).detach()
student_loss1 = cosine_sim(p1, z2, mean=False)
student_loss2 = cosine_sim(p2, z1, mean=False)
# teacher_loss1 = self.teacher.head(zt1, zt2)['cossim'].detach()
# teacher_loss2 = self.teacher.head(zt2, zt1)['cossim'].detach()
#
# student_loss1 = self.head(z1, z2)['cossim']
# student_loss2 = self.head(z2, z1)['cossim']
# for plotting loss
l_s1 = torch.mean(student_loss1)
l_s2 = torch.mean(student_loss2)
l_s = 1/2 * (l_s1 + l_s2).detach()
l_t = 1/2 * (torch.mean(teacher_loss1) + torch.mean(teacher_loss2)).detach()
index_lower1 = student_loss1 < teacher_loss1
index_lower2 = student_loss2 < teacher_loss2
index_higher1 = torch.bitwise_not(index_lower1)
index_higher2 = torch.bitwise_not(index_lower2)
#loss 1
#image1
if student_loss1[index_higher1].shape[0] == 0:
loss1_stl1 = 0.0
else:
loss1_stl1 = torch.mean(student_loss1[index_higher1]) * 0.0
#image2
if student_loss2[index_higher2].shape[0] == 0:
loss1_stl2 = 0.0
else:
loss1_stl2 = torch.mean(student_loss2[index_higher2]) * 0.0
loss1 = 0.5 * (loss1_stl1 + loss1_stl2) #loss1 here
#loss 2
#image1
if student_loss1[index_lower1].shape[0] == 0:
loss2_stl1 = 0.0
else:
loss2_stl1 = nn.functional.mse_loss(student_loss1[index_lower1], teacher_loss1[index_lower1])
#image2
if student_loss2[index_lower2].shape[0] == 0:
loss2_stl2 = 0.0
else:
loss2_stl2 = nn.functional.mse_loss(student_loss2[index_lower2], teacher_loss2[index_lower2])
loss2 = 0.5 * (loss2_stl1 + loss2_stl2) #loss2 here
olih_losses = loss1 + loss2
# SimDis loss
distillation_loss1 = cosine_sim(p1, pt1)
distillation_loss2 = cosine_sim(p2, pt2)
distillation_losses = 0.5 * (distillation_loss1 + distillation_loss2)
losses = olih_losses + distillation_losses
return dict(loss=losses, l_student=l_s, l_teacher=l_t)
@ALGORITHMS.register_module()
class SimDisKD_OLMH(SimSiamKD): # optimize lower, minimize Higher
def __init__(self,
backbone,
neck=None,
head=None,
init_cfg=None,
**kwargs):
super(SimDisKD_OLMH, self).__init__(backbone, neck, head, init_cfg, **kwargs)
def forward_train(self, img):
"""Forward computation during training.
Args:
img (list[Tensor]): A list of input images with shape
(N, C, H, W). Typically these should be mean centered
and std scaled.
Returns:
loss[str, Tensor]: A dictionary of loss components
"""
assert isinstance(img, list)
self.teacher.eval()
img_v1 = img[0]
img_v2 = img[1]
z1 = self.encoder(img_v1)[0] # NxC
z2 = self.encoder(img_v2)[0] # NxC
zt1 = self.teacher.encoder(img_v1)[0]
zt2 = self.teacher.encoder(img_v2)[0]
p1 = self.head(z1, z2, loss_cal=False)
p2 = self.head(z2, z1, loss_cal=False)
pt1 = self.teacher.head(zt1, zt2, loss_cal=False).detach()
pt2 = self.teacher.head(zt2, zt1, loss_cal=False).detach()
teacher_loss1 = cosine_sim(pt1, zt2, mean=False).detach()
teacher_loss2 = cosine_sim(pt2, zt1, mean=False).detach()
student_loss1 = cosine_sim(p1, z2, mean=False)
student_loss2 = cosine_sim(p2, z1, mean=False)
# teacher_loss1 = self.teacher.head(zt1, zt2)['cossim'].detach()
# teacher_loss2 = self.teacher.head(zt2, zt1)['cossim'].detach()
#
# student_loss1 = self.head(z1, z2)['cossim']
# student_loss2 = self.head(z2, z1)['cossim']
# for plotting loss
l_s1 = torch.mean(student_loss1)
l_s2 = torch.mean(student_loss2)
l_s = 1/2 * (l_s1 + l_s2).detach()
l_t = 1/2 * (torch.mean(teacher_loss1) + torch.mean(teacher_loss2)).detach()
index_lower1 = student_loss1 < teacher_loss1
index_lower2 = student_loss2 < teacher_loss2
index_higher1 = torch.bitwise_not(index_lower1)
index_higher2 = torch.bitwise_not(index_lower2)
#loss 1
#image1
if student_loss1[index_higher1].shape[0] == 0:
loss1_stl1 = 0.0
else:
loss1_stl1 = torch.mean(student_loss1[index_higher1])
#image2
if student_loss2[index_higher2].shape[0] == 0:
loss1_stl2 = 0.0
else:
loss1_stl2 = torch.mean(student_loss2[index_higher2])
loss1 = 0.5 * (loss1_stl1 + loss1_stl2) #loss1 here
#loss 2
#image1
if student_loss1[index_lower1].shape[0] == 0:
loss2_stl1 = 0.0
else:
loss2_stl1 = nn.functional.mse_loss(student_loss1[index_lower1], teacher_loss1[index_lower1])
#image2
if student_loss2[index_lower2].shape[0] == 0:
loss2_stl2 = 0.0
else:
loss2_stl2 = nn.functional.mse_loss(student_loss2[index_lower2], teacher_loss2[index_lower2])
loss2 = 0.5 * (loss2_stl1 + loss2_stl2) #loss2 here
olmh_losses = loss1 + loss2
# SimDis loss
distillation_loss1 = cosine_sim(p1, pt1)
distillation_loss2 = cosine_sim(p2, pt2)
distillation_losses = 0.5 * (distillation_loss1 + distillation_loss2)
losses = olmh_losses + distillation_losses
return dict(loss=losses, l_student=l_s, l_teacher=l_t)
@ALGORITHMS.register_module()
class SimDisKD_tracklowhigh(SimSiamKD): # optimize lower, minimize Higher
def __init__(self,
backbone,
neck=None,
head=None,
init_cfg=None,
**kwargs):
super(SimDisKD_tracklowhigh, self).__init__(backbone, neck, head, init_cfg, **kwargs)
def forward_train(self, img):
"""Forward computation during training.
Args:
img (list[Tensor]): A list of input images with shape
(N, C, H, W). Typically these should be mean centered
and std scaled.
Returns:
loss[str, Tensor]: A dictionary of loss components
"""
assert isinstance(img, list)
self.teacher.eval()
img_v1 = img[0]
img_v2 = img[1]
z1 = self.encoder(img_v1)[0] # NxC
z2 = self.encoder(img_v2)[0] # NxC
zt1 = self.teacher.encoder(img_v1)[0]
zt2 = self.teacher.encoder(img_v2)[0]
p1 = self.head(z1, z2, loss_cal=False)
p2 = self.head(z2, z1, loss_cal=False)
pt1 = self.teacher.head(zt1, zt2, loss_cal=False).detach()
pt2 = self.teacher.head(zt2, zt1, loss_cal=False).detach()
teacher_loss1 = cosine_sim(pt1, zt2, mean=False).detach()
teacher_loss2 = cosine_sim(pt2, zt1, mean=False).detach()
student_loss1 = cosine_sim(p1, z2, mean=False)
student_loss2 = cosine_sim(p2, z1, mean=False)
# teacher_loss1 = self.teacher.head(zt1, zt2)['cossim'].detach()
# teacher_loss2 = self.teacher.head(zt2, zt1)['cossim'].detach()
#
# student_loss1 = self.head(z1, z2)['cossim']
# student_loss2 = self.head(z2, z1)['cossim']
# for plotting loss
l_s1 = torch.mean(student_loss1)
l_s2 = torch.mean(student_loss2)
l_s = 1/2 * (l_s1 + l_s2).detach()
l_t = 1/2 * (torch.mean(teacher_loss1) + torch.mean(teacher_loss2)).detach()
index_lower1 = student_loss1 < teacher_loss1
index_lower2 = student_loss2 < teacher_loss2
index_higher1 = torch.bitwise_not(index_lower1)
index_higher2 = torch.bitwise_not(index_lower2)
# log frac
num1 = float(torch.nonzero(index_lower1).size(0))
num2 = float(torch.nonzero(index_lower2).size(0))
total_number = float(img_v1.size(0))
frac = (num1 + num2) / (total_number * 2)
# SimDis loss
distillation_loss1 = cosine_sim(p1, pt1)
distillation_loss2 = cosine_sim(p2, pt2)
distillation_losses = 0.5 * (distillation_loss1 + distillation_loss2)
losses = distillation_losses
return dict(loss=losses, l_student=l_s, l_teacher=l_t, frac=torch.tensor(frac).cuda()) | StarcoderdataPython |
297772 | from flask import Flask, request, jsonify
from scholarly_wallet import orcid_api as orcid
from scholarly_wallet import config
from scholarly_wallet import mongo_access as mongo, github_api as github
from scholarly_wallet import figshare_api as figshare
from scholarly_wallet import zenodo_api as zenodo
from flask_jwt_extended import (
JWTManager,
jwt_required,
create_access_token,
get_jwt_identity,
)
from scholarly_wallet import hyperledger_api as hyperledger
from threading import Thread
app = Flask(__name__)
app.config["JWT_SECRET_KEY"] = config.get("DEFAULT", "JWT_SECRET")
app.config["JWT_ACCESS_TOKEN_EXPIRES"] = int(config.get("DEFAULT", "JWT_EXPIRES"))
jwt = JWTManager(app)
DEFAULT_SORT = {"orcid": "title", "github": "full_name", "figshare": "title"}
@app.route("/api/auth/orcid", methods=["GET", "OPTIONS"])
def signin():
auth_code = request.args.get("orcid_auth_code")
user_data = orcid.authenticate(auth_code)
if user_data is None:
return jsonify({"msg": "Failed authetication with Orcid"}), 403
access_token = create_access_token(identity=user_data)
return jsonify(access_token=access_token), 200
@app.route("/api/profile", methods=["GET", "OPTIONS"])
@jwt_required
def profile():
current_user = get_jwt_identity()
return jsonify(current_user), 200
@app.route("/api/auth/github", methods=["GET", "OPTIONS"])
@jwt_required
def github_auth():
auth_code = request.args.get("code")
orcid_id = request.args.get("orcid")
github_token = github.authenticate(auth_code, orcid_id)
repositories = github.get_repositories(github_token, orcid_id)
return jsonify(repositories)
@app.route("/api/auth/figshare", methods=["GET", "OPTIONS"])
@jwt_required
def figshare_auth():
auth_code = request.args.get("code")
orcid_id = request.args.get("orcid")
figshare_token = figshare.authenticate(auth_code, orcid_id)
if figshare_token is not None:
articles = figshare.get_articles(figshare_token, orcid_id)
return jsonify(articles)
else:
return jsonify({"Msg": "Not auth for figshare"}), 403
@app.route("/api/<string:source>/claim", methods=["POST", "OPTIONS"])
@jwt_required
def claim_ro(source):
repositories = request.get_json()
orcid_id = request.args.get("orcid")
for repository in repositories:
repository["claimed"] = True
repository["owner"] = orcid_id
mongo.save_ros(repositories, source)
for repository in repositories:
repository.pop("_id")
thread = Thread(target=hyperledger.claim_ros, args=(orcid_id, source, repositories))
thread.start()
return jsonify(repositories)
@app.route("/api/<string:orcid_id>/<string:source>/list", methods=["GET", "OPTIONS"])
@jwt_required
def list_github(orcid_id, source):
start = int(request.args.get("start"))
size = int(request.args.get("size"))
start = start * size
return jsonify(
{
"count": mongo.count_claimed(orcid_id, source),
"results": mongo.get_claimed(
orcid_id, source, DEFAULT_SORT[source], start, size
),
}
)
@app.route("/api/<string:orcid_id>/<string:source>/all", methods=["GET", "OPTIONS"])
@jwt_required
def list_all_ros_by_source(orcid_id, source):
return jsonify(
{
"count": mongo.count_claimed(orcid_id, source),
"results": mongo.get_claimed(orcid_id, source, DEFAULT_SORT[source]),
}
)
@app.route("/api/<string:orcid_id>/all", methods=["GET", "OPTIONS"])
@jwt_required
def list_all_ros(orcid_id,):
all_ros = {"github": [], "orcid": [], "figshare": []}
for source in all_ros.keys():
all_ros[source] = mongo.get_claimed(orcid_id, source, DEFAULT_SORT[source])
return jsonify(all_ros)
@app.route("/api/<string:orcid_id>/discos/create", methods=["POST", "OPTIONS"])
@jwt_required
def create_disco(orcid_id):
disco = request.get_json()
disco_id = str(mongo.save_disco(orcid_id, disco))
disco_name = disco["name"]
research_objects_urls = [
node["data"]["id"] for node in disco["diagram"]["elements"]["nodes"]
]
thread = Thread(
target=hyperledger.create_disco_transaction,
args=(orcid_id, disco_id, disco_name, research_objects_urls),
)
thread.start()
return jsonify(disco_id)
@app.route(
"/api/<string:orcid_id>/discos/<string:disco_id>/update",
methods=["POST", "OPTIONS"],
)
@jwt_required
def update_disco(orcid_id, disco_id):
disco = request.get_json()
disco_name = disco["name"]
research_objects_urls = [
node["data"]["id"] for node in disco["diagram"]["elements"]["nodes"]
]
thread = Thread(
target=hyperledger.update_disco,
args=(orcid_id, disco_id, disco_name, research_objects_urls),
)
thread.start()
return jsonify(str(mongo.update_disco(orcid_id, disco_id, disco)))
@app.route(
"/api/<string:orcid_id>/discos/<string:disco_id>/publish",
methods=["POST", "OPTIONS"],
)
@jwt_required
def publish_disco(orcid_id, disco_id):
user = mongo.get_user(orcid_id)
disco = mongo.get_disco(orcid_id, disco_id)
mongo.update_disco(orcid_id, disco_id, {"status": "in progress"})
thread = Thread(target=zenodo.publish_to_zenodo, args=(user, disco))
thread.start()
return "in progress"
@app.route(
"/api/<string:orcid_id>/discos/<string:disco_id>/status", methods=["GET", "OPTIONS"]
)
@jwt_required
def get_disco_status(orcid_id, disco_id):
disco = mongo.get_disco(orcid_id, disco_id)
return disco["status"] if "status" in disco else "unpublished"
@app.route(
"/api/<string:orcid_id>/discos/<string:disco_id>", methods=["GET", "OPTIONS"]
)
@jwt_required
def get_disco(orcid_id, disco_id):
disco = mongo.get_disco(orcid_id, disco_id)
disco["id"] = str(disco.pop("_id"))
return jsonify(disco)
@app.route("/api/<string:orcid_id>/discos", methods=["GET", "OPTIONS"])
@jwt_required
def get_all_discos(orcid_id):
start = int(request.args.get("start"))
size = int(request.args.get("size"))
start = start * size
return jsonify(
{
"count": mongo.count_discos(orcid_id),
"results": mongo.get_discos(orcid_id, start, size),
}
)
if __name__ == "__main__":
app.run(debug=True, port=8000, host="0.0.0.0", threaded=True)
| StarcoderdataPython |
163528 | # /usr/bin/env python3
# -*- coding: utf-8 -*-
##############################################
############## Importing ###############
##############################################
import utility as _utility
import docstrings as _docstrings
import subprocess as _subprocess
##############################################
########### Initialization #############
##############################################
_platform = _utility.platform()
#_platform = "windows"
if _platform == "windows":
_subprocess.call('', shell = True)
_utility.marker.pop('small')
_utility.marker_sequence.remove('small')
_shell = _utility.shell()
##############################################
########## Basic Containers ############
##############################################
class _figure():
def __init__(self):
self.width = None
self.height = None
self.rows = 1
self.cols = 1
self.set_subplots()
self.row = 0
self.col = 0
self.set_subplot()
self.canvas = ""
def set_subplots(self):
self.subplots = [[_subplot(r, c) for c in range(self.cols)] for r in range(self.rows)]
def get_subplot(self, row = 0 , col = 0):
return self.subplots[row - 1][col - 1]
def set_subplot(self):
self.subplot = self.subplots[self.row][self.col]
class _subplot():
def __init__(self, row, col):
self.row = row
self.col = col
self.yaxis = []
self.label = []
self.label_show = []
self.point_marker = []
self.line_marker = []
self.point_color = []
self.line_color = []
self.x = []
self.y = []
self.signals = 0
self.fillx = []
self.filly = []
self.width = None
self.height = None
self.width_set = None
self.height_set = None
self.title = ""
self.xlabel = ""
self.ylabel = ["", ""]
self.xaxes = [True, True]
self.yaxes = [True, True]
self.grid = [False, False]
self.axes_color = "white"
self.ticks_color = "black"
self.canvas_color = "white"
self.xlim_plot = [None, None]
self.ylim_plot_left = [None, None]
self.ylim_plot_right = [None, None]
self.xticks, self.xlabels = [], []
self.yticks_left, self.ylabels_left = [], []
self.yticks_right, self.ylabels_right = [], []
self.ticks = [5, 7]
self.xscale = "linear"
self.yscale = ["linear", "linear"]
_fig = _figure()
#figure = _fig
#utility = _utility
##############################################
######### Subplots Function ############
##############################################
def subplots(rows = None, cols = None, ):
rows, cols = _utility.set_first_to_both(rows, cols)
_set_rows(rows)
_set_cols(cols)
_fig.set_subplots()
subplot(1, 1)
subplots.__doc__ = _docstrings.subplots_doc
def _set_cols(cols = None):
cols = _utility.set_if_none(cols, 1)
_fig.cols = cols
def _set_rows(rows = None):
rows = _utility.set_if_none(rows, 1)
_fig.rows = rows
def subplot(row = 1, col = 1):
_set_row(row)
_set_col(col)
_fig.set_subplot()
subplot.__doc__ = _docstrings.subplot_doc
def _set_col(col = None):
col = _utility.set_if_none(col, 1)
_fig.col = col - 1
def _set_row(row = None):
row = _utility.set_if_none(row, 1)
_fig.row = row - 1
subplots(1, 1)
subplot(1, 1)
##############################################
####### Draw Related Functions #########
##############################################
def _draw(*args, **kwargs):
_yaxis(kwargs.get("yaxis"))
_label(kwargs.get("label"))
_point_marker(kwargs.get("point_marker"))
_line_marker(kwargs.get("line_marker"))
_point_color(kwargs.get("point_color"))
_line_color(kwargs.get("line_color"))
_data(*args)
_fillx(kwargs.get("fillx"))
_filly(kwargs.get("filly"))
def _yaxis(axis = None):
axis_none = "left"
axis = _utility.set_if_none(axis, axis_none)
axis = "left" if axis != "left" and axis != "right" else axis
_fig.subplot.yaxis.append(axis)
def _label(label = None):
label_none = ""
label = _utility.set_if_none(label, label_none)
label_show = True
_fig.subplot.label.append(label)
_fig.subplot.label_show.append(label_show)
#To-do: data with same label
def _point_marker(marker = None):
index = len(set(_fig.subplot.point_marker)) % len(_utility.marker_sequence)
marker_none = _utility.marker_sequence[index]
marker = "" if marker == "" else marker
marker = _utility.set_if_none(marker, marker_none)
small_test = marker == "small" and _platform == "linux"
marker = _utility.marker[marker] if marker in _utility.marker and not small_test else marker
marker = "small" if small_test else (marker[0] if len(marker) > 0 else marker)
_fig.subplot.point_marker.append(marker)
def _line_marker(marker = None):
index = len(set(_fig.subplot.line_marker)) % len(_utility.marker_sequence)
marker_none = _utility.marker_sequence[index]
marker = "" if marker == "" else marker
marker = _utility.set_if_none(marker, marker_none)
small_test = marker == "small" and _platform == "linux"
marker = _utility.marker[marker] if marker in _utility.marker and not small_test else marker
marker = "small" if small_test else (marker[0] if len(marker) > 0 else marker)
_fig.subplot.line_marker.append(marker)
def _point_color(color = None):
color = None if color not in _utility.color_sequence else color
index = len(set(_fig.subplot.point_color)) % len(_utility.color_sequence)
color_none = _utility.color_sequence[index]
color = _utility.set_if_none(color, color_none)
_fig.subplot.point_color.append(color)
def _line_color(color = None):
color = None if color not in _utility.color_sequence else color
index = len(set(_fig.subplot.line_color)) % len(_utility.color_sequence)
color_none = _utility.color_sequence[index]
color = _utility.set_if_none(color, color_none)
_fig.subplot.line_color.append(color)
def _data(*args):
x, y = _utility.get_data(*args)
_fig.subplot.x.append(x)
_fig.subplot.y.append(y)
_fig.subplot.signals += 1
def _fillx(fill = None):
fill = _utility.set_if_none(fill, False)
fill = bool(fill)
_fig.subplot.fillx.append(fill)
def _filly(fill = None):
fill = _utility.set_if_none(fill, False)
fill = bool(fill)
_fig.subplot.filly.append(fill)
##############################################
########### Clear Functions ############
##############################################
def clear_terminal():
_utility.write('\033c')
_utility._terminal_printed_lines_cnt = 0
clear_terminal.__doc__ = _docstrings.clear_terminal_doc
clt = clear_terminal
def clear_terminal_printed_lines():
# clear the lines that plotext had printed
# (plus 1 because the last line would not has an \n at the end)
n = _utility._terminal_printed_lines_cnt + 1
for i in range(n):
_utility.write("\033[2K")
if i < n - 1:
_utility.write("\033[A")
_utility.write("\033[2K")
_utility._terminal_printed_lines_cnt = 0
def clear_figure():
_fig.__init__()
clear_figure.__doc__ = _docstrings.clear_figure_doc
clf = clear_figure
def clear_plot():
_fig.subplot.__init__(_fig.row, _fig.col)
clear_plot.__doc__ = _docstrings.clear_plot_doc
clp = clear_plot
def clear_data():
_fig.subplot.x = []
_fig.subplot.y = []
_fig.subplot.signals = 0
clear_data.__doc__ = _docstrings.clear_data_doc
cld = clear_data
##############################################
############ Set Functions #############
##############################################
def plotsize(width = None, height = None):
width, height = _utility.set_first_to_both(width, height)
width, height = _utility.set_list_to_both(width, height)
_fig.subplot.width_set = width
_fig.subplot.height_set = height
plotsize.__doc__ = _docstrings.plotsize_doc
plot_size = plotsize
def title(label = None):
label = _utility.set_if_none(label, _fig.subplot.title)
label = None if label == "" else label
_fig.subplot.title = label
title.__doc__ = _docstrings.title_doc
def xlabel(label = ""):
label = _utility.set_if_none(label, _fig.subplot.xlabel)
_fig.subplot.xlabel = label
xlabel.__doc__ = _docstrings.xlabel_doc
def ylabel(label_left = "", label_right = ""):
label_left = _utility.set_if_none(label_left, _fig.subplot.ylabel[0])
label_right = _utility.set_if_none(label_right, _fig.subplot.ylabel[1])
_fig.subplot.ylabel = [label_left, label_right]
ylabel.__doc__ = _docstrings.ylabel_doc
def xaxes(x = None, y = None):
x, y = _utility.set_first_to_both(x, y)
y = bool(y)
x, y = _utility.set_list_if_none([x, y], _fig.subplot.xaxes)
x = bool(x)
x, y = _utility.set_list_to_both(x, y)
_fig.subplot.xaxes = [x, y]
xaxes.__doc__ = _docstrings.xaxes_doc
def yaxes(x = None, y = None):
x, y = _utility.set_first_to_both(x, y)
y = bool(y)
x, y = _utility.set_list_if_none([x, y], _fig.subplot.yaxes)
x = bool(x)
x, y = _utility.set_list_to_both(x, y)
_fig.subplot.yaxes = [x, y]
yaxes.__doc__ = _docstrings.yaxes_doc
def grid(x = None, y = None):
x, y = _utility.set_first_to_both(x, y)
y = bool(y)
x, y = _utility.set_list_if_none([x, y], _fig.subplot.grid)
x = bool(x)
x, y = _utility.set_list_to_both(x, y)
_fig.subplot.grid = [x, y]
grid.__doc__ = _docstrings.grid_doc
def axes_color(color = "white"):
color = _utility.set_if_none(color, _fig.subplot.axes_color)
color = "white" if color not in list(_utility.background_color.keys()) else color
_fig.subplot.axes_color = color
axes_color.__doc__ = _docstrings.axes_color_doc
def ticks_color(color = "black"):
color = _utility.set_if_none(color, _fig.subplot.ticks_color)
color = "black" if color not in list(_utility.fullground_color.keys()) else color
_fig.subplot.ticks_color = color
ticks_color.__doc__ = _docstrings.ticks_color_doc
def canvas_color(color = "white"):
color = _utility.set_if_none(color, _fig.subplot.canvas_color)
color = "white" if color not in list(_utility.background_color.keys()) else color
_fig.subplot.canvas_color = color
canvas_color.__doc__ = _docstrings.canvas_color_doc
def _colorless_subplot(subplot):
subplot.point_color = ["none"] * len(subplot.point_color)
subplot.line_color = ["none"] * len(subplot.line_color)
subplot.axes_color = "none"
subplot.ticks_color = "none"
subplot.canvas_color = "none"
def colorless():
_colorless_subplot(_fig.subplot)
colorless.__doc__ = _docstrings.colorless_doc
cls = colorless
def xlim(left = None, right = None):
left, right = _utility.set_list_to_both(left, right)
left, right = min(left, right), max(left, right)
_fig.subplot.xlim_plot = [left, right]
xlim.__doc__ = _docstrings.xlim_doc
def ylim(lower = None, upper = None, yaxis = "left"):
lower, upper = _utility.set_list_to_both(lower, upper)
lower, upper = min(lower, upper), max(lower, upper)
if yaxis == "left":
_fig.subplot.ylim_plot_left = [lower, upper]
elif yaxis == "right":
_fig.subplot.ylim_plot_right = [lower, upper]
ylim.__doc__ = _docstrings.ylim_doc
def ticks(x = None, y = None):
x, y = _utility.set_first_to_both(x, y)
x, y = _utility.set_list_to_both(x, y)
x_none, y_none = 5, 7
x = _utility.set_if_none(x, x_none)
y = _utility.set_if_none(y, y_none)
_fig.subplot.ticks = [x, y]
ticks.__doc__ = _docstrings.ticks_doc
def xticks(ticks = [], labels = None):
ticks, labels = _utility.set_first_to_both(list(ticks), labels)
labels = list(map(str, list(labels)))
ticks, labels = _utility.sort_data(ticks, labels)
_fig.subplot.xticks, _fig.subplot.xlabels = ticks, labels
_fig.subplot.ticks[0] = len(ticks)
xticks.__doc__ = _docstrings.xticks_doc
def yticks(ticks = [], labels = None, yaxis = "left"):
ticks, labels = _utility.set_first_to_both(list(ticks), labels)
labels = list(map(str, list(labels)))
ticks, labels = _utility.sort_data(ticks, labels)
if yaxis == "left":
_fig.subplot.yticks_left, _fig.subplot.ylabels_left = ticks, labels
elif yaxis == "right":
_fig.subplot.yticks_right, _fig.subplot.ylabels_right = ticks, labels
_fig.subplot.ticks[1] = len(ticks)
yticks.__doc__ = _docstrings.yticks_doc
def xscale(scale = None):
scale = _utility.set_if_none(scale, _fig.subplot.xscale)
scale = "linear" if not (scale in ["linear", "log"]) else scale
_fig.subplot.xscale = scale
xscale.__doc__ = _docstrings.xscale_doc
def yscale(scale = None, yaxis = "left"):
scale = _utility.set_if_none(scale, _fig.subplot.xscale)
scale = "linear" if not (scale in ["linear", "log"]) else scale
if yaxis == "right":
_fig.subplot.yscale[1] = scale
else:
_fig.subplot.yscale[0] = scale
yscale.__doc__ = _docstrings.yscale_doc
##############################################
########### Show Functions #############
##############################################
def show(hide = False):
_figure_size_max()
_figure_size()
#_plots_size()
_coherent_sizes()
for r in range(_fig.rows):
for c in range(_fig.cols):
subplot = _fig.subplots[r][c]
_previous_size(subplot)
_sort_data(subplot)
_height(subplot)
_ylim_data(subplot)
_ylim_plot(subplot)
_yticks(subplot)
_width(subplot)
_xlim_data(subplot)
_xlim_plot(subplot)
_xticks(subplot)
_matrix(subplot)
_add_xgrid(subplot)
_add_ygrid(subplot)
_add_data(subplot)
_add_legend(subplot)
_add_yaxis(subplot)
_add_xaxis(subplot)
_add_title(subplot)
_add_labels(subplot)
_join_matrices()
_fig.canvas = _utility.get_canvas(_fig.matrix)
if hide:
return
_utility.write(_fig.canvas)
show.__doc__ = _docstrings.show_doc
def _figure_size_max():
_fig.width_max, _fig.height_max = terminal_size()
_fig.height_max -= 3
_fig.width_max -= (_fig.cols - 1)
_fig.height_max -= (_fig.rows - 1)
def _figure_size():
# width = _utility.set_if_none(_fig.width, _fig.width_max)
# height = _utility.set_if_none(_fig.height, _fig.height_max)
# width = abs(int(width))
# height = abs(int(height))
# width = _fig.width_max if width > _fig.width_max else width
# height = _fig.height_max if height > _fig.height_max else height
_fig.width = _fig.width_max
_fig.height = _fig.height_max
def _coherent_sizes():
width = []
for c in range(_fig.cols):
w = [_fig.subplots[r][c].width for r in range(_fig.rows)]
w = [el for el in w if el is not None]
w = max(w, default = None)
width.append(w)
height = []
for r in range(_fig.rows):
h = [_fig.subplots[r][c].height for c in range(_fig.cols)]
h = [el for el in h if el is not None]
h = max(h, default = None)
height.append(h)
for c in range(_fig.cols):
for r in range(_fig.rows):
subplot = _fig.subplots[r][c]
subplot.width = width[c]
subplot.height = height[r]
def _previous_size(subplot):
row, col = subplot.row, subplot.col
width, height = 0, 0
for r in range(row):
height += _fig.subplots[r][0].height
for c in range(col):
width += _fig.subplots[0][c].width
_fig.previous_width = width
_fig.previous_height = height
def _sort_data(subplot):
subplot.x_left = [subplot.x[i] for i in range(len(subplot.x)) if subplot.yaxis[i] == "left"]
subplot.y_left = [subplot.y[i] for i in range(len(subplot.x)) if subplot.yaxis[i] == "left"]
subplot.signals_left = len(subplot.y_left)
subplot.x_right = [subplot.x[i] for i in range(len(subplot.x)) if subplot.yaxis[i] == "right"]
subplot.y_right = [subplot.y[i] for i in range(len(subplot.x)) if subplot.yaxis[i] == "right"]
subplot.signals_right = len(subplot.y_right)
subplot.data_left = subplot.x_left != [] and subplot.y_left != []
subplot.data_right = subplot.x_right != [] and subplot.y_right != []
subplot.data = subplot.data_left or subplot.data_right
def _height(subplot):
subplot.height_max = _fig.height - _fig.previous_height
height_none = subplot.height_max // (_fig.rows - subplot.row)
height = _utility.set_if_none(subplot.height_set, height_none)
height = abs(int(height))
height = subplot.height_max if height > subplot.height_max else height
subplot.height = height
subplot.xaxes[0] = 0 if height < 2 else subplot.xaxes[0]
subplot.xaxes[1] = 0 if height < 3 else subplot.xaxes[1]
subplot.ticks[0] = 0 if height < 4 else subplot.ticks[0]
subplot.title = "" if height < 5 else subplot.title
subplot.xlabel = "" if height < 6 else subplot.xlabel
subplot.ylabel = ["", ""] if height < 6 else subplot.ylabel
xaxis_low = int(subplot.xaxes[0] and subplot.data)
xaxis_up = int(subplot.xaxes[1] and subplot.data)
xticks = bool(subplot.ticks[0] and subplot.data)
title = int(subplot.title != "")
labels = int(subplot.xlabel != "" or subplot.ylabel != ["", ""])
tot = title + xaxis_low + xaxis_up + xticks + labels
subplot.height_canvas = subplot.height - tot
def _ylim_data(subplot):
y_left = [_utility.log(subplot.y_left[s]) if subplot.yscale[0] == "log" else subplot.y_left[s] for s in range(subplot.signals_left)]
y_right = [_utility.log(subplot.y_right[s]) if subplot.yscale[1] == "log" else subplot.y_right[s] for s in range(subplot.signals_right)]
subplot.ylim_data_left = _utility.get_lim_data(y_left)
subplot.ylim_data_right = _utility.get_lim_data(y_right)
def _ylim_plot(subplot):
subplot.ylim_plot_left = _utility.set_list_if_none(subplot.ylim_plot_left, subplot.ylim_data_left)
subplot.ylim_plot_right = _utility.set_list_if_none(subplot.ylim_plot_right, subplot.ylim_data_right)
#subplot.dy = (subplot.ylim_plot[1] - subplot.ylim_plot[0]) / subplot.height_canvas
def _yticks(subplot):
if subplot.yticks_left == [] and subplot.ticks[1] and subplot.data_left:
if subplot.yscale[0] == "linear":
subplot.yticks_left = _utility.get_ticks(subplot.ylim_plot_left, subplot.ticks[1])
subplot.ylabels_left = _utility.get_labels(subplot.yticks_left)
if subplot.yscale[0] == "log":
subplot.yticks_left, subplot.ylabels_left = _utility.get_log_ticks(subplot.ylim_plot_left, subplot.ticks[1])
subplot.yticks_rows_left = _utility.get_matrix_data(subplot.yticks_left, subplot.ylim_plot_left, subplot.height_canvas)
if subplot.yticks_right == [] and subplot.ticks[1] and subplot.data_right:
if subplot.yscale[1] == "linear":
subplot.yticks_right = _utility.get_ticks(subplot.ylim_plot_right, subplot.ticks[1])
subplot.ylabels_right = _utility.get_labels(subplot.yticks_right)
if subplot.yscale[1] == "log":
subplot.yticks_right, subplot.ylabels_right = _utility.get_log_ticks(subplot.ylim_plot_right, subplot.ticks[1])
subplot.yticks_rows_right = _utility.get_matrix_data(subplot.yticks_right, subplot.ylim_plot_right, subplot.height_canvas)
def _width(subplot):
subplot.width_max = _fig.width - _fig.previous_width
width_none = subplot.width_max // (_fig.cols - subplot.col)
width = _utility.set_if_none(subplot.width_set, width_none)
width = abs(int(width))
width = subplot.width_max if width > subplot.width_max else width
subplot.width = width
subplot.yaxes[0] = 0 if width < 2 else subplot.yaxes[0]
subplot.yaxes[1] = 0 if width < 3 else subplot.yaxes[1]
ylabels_width_left = max(map(len, subplot.ylabels_left), default = 0) * bool(subplot.ticks[1] and subplot.data_left)
subplot.ticks[1] = 0 if width < 3 + ylabels_width_left else subplot.ticks[1]
ylabels_width_left = 0 if width < 3 + ylabels_width_left else ylabels_width_left
ylabels_width_right = max(map(len, subplot.ylabels_right), default = 0) * bool(subplot.ticks[1] and subplot.data_right)
ylabels_width_right = 0 if width < ylabels_width_right + ylabels_width_left + 3 else ylabels_width_right
yaxis_left = int(subplot.yaxes[0] and subplot.data)
yaxis_right = int(subplot.yaxes[1] and subplot.data)
tot = ylabels_width_left + yaxis_left + yaxis_right + ylabels_width_right
subplot.width_canvas = subplot.width - tot
subplot.ylabels_width_left = ylabels_width_left
subplot.ylabels_width_right = ylabels_width_right
ylabel_length = len(subplot.ylabel[0]) + 3
if subplot.width < ylabel_length:
subplot.height_canvas += 1
def _xlim_data(subplot):
x = [_utility.log(subplot.x[s]) if subplot.xscale == "log" else subplot.x[s] for s in range(subplot.signals)]
subplot.xlim_data = _utility.get_lim_data(x)
def _xlim_plot(subplot):
subplot.xlim_plot = _utility.set_list_if_none(subplot.xlim_plot, subplot.xlim_data)
#subplot.dy = (subplot.ylim_plot[1] - subplot.ylim_plot[0]) / subplot.height_canvas
def _xticks(subplot):
if subplot.xticks == [] and subplot.ticks[0]:
if subplot.xscale == "linear":
subplot.xticks = _utility.get_ticks(subplot.xlim_plot, subplot.ticks[0])
subplot.xlabels = _utility.get_labels(subplot.xticks)
if subplot.xscale == "log":
subplot.xticks, subplot.xlabels = _utility.get_log_ticks(subplot.xlim_plot, subplot.ticks[0])
subplot.xticks_cols = _utility.get_matrix_data(subplot.xticks, subplot.xlim_plot, subplot.width_canvas)
def _matrix(subplot):
marker = [" ", "none", subplot.canvas_color]
subplot.matrix = [[marker[:] for c in range(subplot.width_canvas)] for r in range(subplot.height_canvas)]
def _add_xgrid(subplot):
if not subplot.grid[0]:
return
grid_color = subplot.ticks_color
for c in subplot.xticks_cols:
x, y = _utility.get_line([c, c], [0, subplot.height_canvas])
marker = "│"
subplot.matrix = _utility.update_matrix(subplot.matrix, x, y, marker, grid_color)
def _add_ygrid(subplot):
if not subplot.grid[1]:
return
grid_color = subplot.ticks_color
for r in subplot.yticks_rows_left + subplot.yticks_rows_right:
x, y = _utility.get_line([0, subplot.width_canvas], [r, r])
marker = "─"
subplot.matrix = _utility.update_matrix(subplot.matrix, x, y, marker, grid_color)
if subplot.grid[0]:
x = subplot.xticks_cols
y = [r] * len(x)
marker = "┼"
subplot.matrix = _utility.update_matrix(subplot.matrix, x, y, marker, grid_color)
def _add_data(subplot):
for s in range(len(subplot.x)):
point_marker, point_color = subplot.point_marker[s], subplot.point_color[s]
line_marker, line_color = subplot.line_marker[s], subplot.line_color[s]
x, y = subplot.x[s], subplot.y[s]
x_test = subplot.xscale == "log"
x = _utility.log(x) if x_test else x
y_test = (subplot.yscale[0] == "log" and subplot.yaxis[s] == "left") or (subplot.yscale[1] == "log" and subplot.yaxis[s] == "right")
y = _utility.log(y) if y_test else y
mf = 2 if point_marker == "small" or line_marker == "small" else 1 # small marker factor
ylim_plot = subplot.ylim_plot_left if subplot.yaxis[s] == "left" else subplot.ylim_plot_right
x_point = _utility.get_matrix_data(x, subplot.xlim_plot, mf * subplot.width_canvas)
y_point = _utility.get_matrix_data(y, ylim_plot, mf * subplot.height_canvas)
x_line, y_line = [], []
if line_marker != "":
x_line, y_line = _utility.get_line(x_point, y_point)
if subplot.fillx[s]:
height0 = _utility.get_matrix_data([0], ylim_plot, mf * subplot.height_canvas)[0]
x_point, y_point = _utility.fill_data(x_point, y_point, height0)
x_line, y_line = _utility.fill_data(x_line, y_line, height0)
if subplot.filly[s]:
width0 = _utility.get_matrix_data([0], subplot.xlim_plot, mf * subplot.width_canvas)[0]
y_point, x_point = _utility.fill_data(y_point, x_point, width0)
y_line, x_line = _utility.fill_data(y_line, x_line, width0)
x_line = [el / mf for el in x_line]
y_line = [el / mf for el in y_line]
if line_marker != "":
subplot.matrix = _utility.update_matrix(subplot.matrix, x_line, y_line, line_marker, line_color)
x_point = [el / mf for el in x_point]
y_point = [el / mf for el in y_point]
if point_marker != "":
subplot.matrix = _utility.update_matrix(subplot.matrix, x_point, y_point, point_marker, point_color)
def _add_legend(subplot):
label = subplot.label
show = any([el != "" for el in label])
side_test = subplot.data_left and subplot.data_right
if not (show or side_test):
return
l = len(label)
label = ["signal " + str(i + 1) if label[i] == "" and show else label[i] for i in range(l)]
side_label = ["[" + subplot.yaxis[i] + "] " if side_test else "" for i in range(l)]
label = [side_label[i] + label[i] for i in range(l)]
label = [" " + el + " " for el in label]
label = [list(el) for el in label]
w = max(map(len, label))
label = [el + [" "] * (w - len(el)) for el in label]
legend = [[] for i in range(l)]
legend_color = [[] for i in range(l)]
for i in range(l):
point_marker, line_marker = subplot.point_marker[i], subplot.line_marker[i]
point_color, line_color = subplot.point_color[i], subplot.line_color[i]
marker = point_marker if point_marker != "" else line_marker
marker = "▄" if marker == "small" else marker
color = point_color if point_marker != "" else line_color
legend[i] += [marker] * 3
legend[i] += label[i]
legend_color[i] += [color] * 3
legend_color[i] += [subplot.ticks_color] * w
legend = [legend[i] for i in range(len(legend)) if subplot.label_show[i]]
legend_color = [legend_color[i] for i in range(len(legend_color)) if subplot.label_show[i]]
legend = _utility.frame_matrix(legend)
legend_color = _utility.frame_matrix(legend_color, subplot.ticks_color)
legend = [[ [legend[i][j], legend_color[i][j], subplot.canvas_color] for j in range(len(legend[0]))] for i in range(len(legend))]
subplot.matrix = _utility.insert(legend, subplot.matrix) if show or side_test else subplot.matrix
# To do: Legend frame interferes with grid lines
def _add_yaxis(subplot):
if subplot.x == []:
return
labels_left = [" " * subplot.ylabels_width_left for r in range(subplot.height_canvas)]
for i in range(len(subplot.yticks_rows_left)):
r = subplot.yticks_rows_left[i]
if r in range(subplot.height_canvas):
labels_left[r] = str(subplot.ylabels_left[i])[ : subplot.ylabels_width_left]
labels_left = [" " * (subplot.ylabels_width_left - len(el)) + el for el in labels_left]
labels_left = [list(el) for el in labels_left]
labels_left = [[[sub_el, subplot.ticks_color, subplot.axes_color] for sub_el in el] for el in labels_left]
labels_left = labels_left[::-1]
ytick = "┼" if subplot.grid[1] else "┤"
ytick = "│" if subplot.ylabels_width_right == 0 and subplot.grid[1] == False else ytick
ytick = ("┼" if subplot.ylabels_width_left != 0 else "├") if subplot.grid[1] else ("┤" if subplot.ylabels_width_left != 0 else "│")
axis_left = [(ytick if r in subplot.yticks_rows_right + subplot.yticks_rows_left else "│") for r in range(subplot.height_canvas)]
axis_left = [list(el) for el in axis_left]
axis_left = [[[sub_el, subplot.ticks_color, subplot.axes_color] for sub_el in el] for el in axis_left]
axis_left = axis_left[::-1]
labels_right = [" " * subplot.ylabels_width_right for r in range(subplot.height_canvas)]
for i in range(len(subplot.yticks_rows_right)):
r = subplot.yticks_rows_right[i]
if r in range(subplot.height_canvas):
labels_right[r] = str(subplot.ylabels_right[i])[ : subplot.ylabels_width_right]
labels_right = [el + " " * (subplot.ylabels_width_right - len(el)) for el in labels_right]
labels_right = [list(el) for el in labels_right]
labels_right = [[[sub_el, subplot.ticks_color, subplot.axes_color] for sub_el in el] for el in labels_right]
labels_right = labels_right[::-1]
ytick = ("┼" if subplot.ylabels_width_right != 0 else "┤") if subplot.grid[1] else ("├" if subplot.ylabels_width_right != 0 else "│")
axis_right = [(ytick if r in subplot.yticks_rows_right + subplot.yticks_rows_left else "│") for r in range(subplot.height_canvas)]
axis_right = [list(el) for el in axis_right]
axis_right = [[[sub_el, subplot.ticks_color, subplot.axes_color] for sub_el in el] for el in axis_right]
axis_right = axis_right[::-1]
if subplot.yaxes[0]:
for r in range(subplot.height_canvas):
subplot.matrix[r] = axis_left[r] + subplot.matrix[r]
if subplot.yaxes[1]:
for r in range(subplot.height_canvas):
subplot.matrix[r] = subplot.matrix[r] + axis_right[r]
if subplot.ticks[1]:
for r in range(subplot.height_canvas):
subplot.matrix[r] = labels_left[r] + subplot.matrix[r] + labels_right[r]
def _add_xaxis(subplot):
if subplot.x == []:
return
axis_lower = [" "] * subplot.ylabels_width_left + ["└"] * subplot.yaxes[0]
axis_lower += ["─" for r in range(subplot.width_canvas)]
axis_lower += ["┘"] * subplot.yaxes[1] + [" "] * subplot.ylabels_width_right
axis_up = [" "] * subplot.ylabels_width_left + ["┌"] * subplot.yaxes[0]
axis_up += ["─" for r in range(subplot.width_canvas)]
axis_up += ["┐"] * subplot.yaxes[1] + [" "] * subplot.ylabels_width_right
labels_lower = [" "] * subplot.ylabels_width_left + [" "] * subplot.yaxes[0]
iniz_length = len(labels_lower)
labels_lower += [" " for r in range(subplot.width_canvas)]
labels_lower += [" "] * subplot.yaxes[0] + [" "] * subplot.ylabels_width_right
xtick_lower = "┼" if subplot.grid[0] else "┬"
xtick_up = "┬" if subplot.grid[0] else "─"
l = len(subplot.xticks_cols)
for i in range(l):
col = subplot.xticks_cols[i] + iniz_length
#if col >= subplot.width:
# continue
label = str(subplot.xlabels[i])
label_length = len(label)
label_col = list(range(max(col - label_length, 0), min(col + label_length + 1, subplot.width)))
label_col = [c for c in label_col if c + label_length <= subplot.width]
if label_col == []:
continue
label_col = min(label_col, key = lambda x : abs(x - (col - (label_length - 2) / 2)))
if label_col + label_length > subplot.width:
continue
label_prev = labels_lower[label_col - 1: label_col + label_length + 1]
label_prev = list(set(label_prev))
if label_prev == [" "] or label_prev == []:
labels_lower[label_col: label_col + label_length] = list(label)
axis_lower[col] = xtick_lower
axis_up[col] = xtick_up
elif axis_lower[col] == "─":
axis_lower[col] = "┴" if subplot.grid[0] else "─"
axis_up[col] = "┬" if subplot.grid[0] else "─"
axis_up = [[el, subplot.ticks_color, subplot.axes_color] for el in axis_up]
axis_lower = [[el, subplot.ticks_color, subplot.axes_color] for el in axis_lower]
labels_lower = [[el, subplot.ticks_color, subplot.axes_color] for el in labels_lower]
if subplot.xaxes[0]:
subplot.matrix += [axis_lower]
if subplot.xaxes[1]:
subplot.matrix = [axis_up] + subplot.matrix
if subplot.ticks[0]:
subplot.matrix += [labels_lower]
def _add_title(subplot):
if subplot.title == "":
return
width_left = subplot.ylabels_width_left + int(subplot.yaxes[0])
title = subplot.title[ : subplot.width_canvas]
space1 = " " * (width_left + int((subplot.width_canvas - len(title)) / 2))
space2 = " " * (subplot.width - len(title + space1))
title = space1 + title + space2
title = list(title)
title = [[el, subplot.ticks_color, subplot.axes_color] for el in title]
subplot.matrix = [title] + subplot.matrix
def _add_labels(subplot):
if subplot.xlabel == "" and subplot.ylabel == ["", ""]:
return
width_max = subplot.width - 4 * 3
ylabel_left = "[y] " + subplot.ylabel[0]
width_left = subplot.ylabels_width_left + int(subplot.yaxes[0])
ylabel_left = ylabel_left + " " * (width_left - len(ylabel_left))
ylabel_left = ylabel_left[ : width_max // 3]
ylabel_right = ""
if subplot.ylabel[1] != "":
ylabel_right = subplot.ylabel[1] + " [y]"
xlabel = " " + subplot.xlabel + " [x] "
l_left = len(ylabel_left)
l_tot = len(ylabel_left + xlabel + ylabel_right)
if l_tot > subplot.width:
xlabel = ""
l_tot = len(ylabel_left + xlabel + ylabel_right)
if l_tot > subplot.width:
ylabel_right = ""
l_tot = len(ylabel_left + xlabel + ylabel_right)
if l_tot > subplot.width:
ylabel_left = ""
space1 = " " * (width_left + int((subplot.width_canvas - len(xlabel)) / 2) - l_left)
if space1 == '':
return
space1 = " " * subplot.width if space1 == '' else space1
space2 = " " * (subplot.width - l_tot - len(space1))
label = ylabel_left + space1 + xlabel + space2 + ylabel_right
label = list(label)
label = [[el, subplot.ticks_color, subplot.axes_color] for el in label]
subplot.matrix += [label]
#ToDo: a bit messy, need reordering
def _join_matrices():
sep = " "
sep = [sep, "none", "none"]
matrix = []
for c in range(_fig.cols):
matrix_c = []
for r in range(_fig.rows):
matrix_c = _utility.join(matrix_c, _fig.subplots[r][c].matrix, sep, "vertical")
matrix = _utility.join(matrix, matrix_c, sep, "horizontal")
_fig.matrix = matrix
size = [0, 0] if matrix == [] else [len(matrix[0]), len(matrix)]
_fig.width, _fig.height = size
##############################################
######### Plotting Functions ###########
##############################################
def scatter(*args,
yaxis = "left",
label = "",
marker = None,
color = None,
fillx = None,
filly = None):
_draw(
*args,
yaxis = yaxis,
label = label,
point_marker = marker,
line_marker = "",
point_color = color,
line_color = "none",
fillx = fillx,
filly = filly)
scatter.__doc__ = _docstrings.scatter_doc
def plot(*args,
yaxis = "left",
label = "",
marker = None,
color = None,
fillx = None,
filly = None):
_draw(
*args,
yaxis = yaxis,
label = label,
point_marker = "",
line_marker = marker,
point_color = "none",
line_color = color,
fillx = fillx,
filly = filly)
plot.__doc__ = _docstrings.plot_doc
def bar(*args,
yaxis = "left",
label = "",
marker = "small",
color = None,
fill = True,
width = 4 / 5,
orientation = 'vertical'):
x, y = _utility.get_data(*args)
x, x_labels = _utility.bar_xdata(x)
xbar, ybar = _utility.bars(x, y, width)
if orientation in ['vertical', 'v']:
fillx, filly = fill, False
x_ticks = _fig.subplot.xticks + x
x_labels = _fig.subplot.xlabels + x_labels
xticks(x_ticks, x_labels)
elif orientation in ['horizontal', 'h']:
xbar, ybar = ybar, xbar
fillx, filly = False, fill
y_ticks = x + (_fig.subplot.yticks_left if yaxis == "left" else _fig.subplot.yticks_right)
y_labels = x_labels + (_fig.subplot.ylabels_left if yaxis == "left" else _fig.subplot.ylabels_right)
yticks(y_ticks, y_labels, yaxis = yaxis)
for b in range(len(x)):
xb, yb = xbar[b][1:3] + xbar[b][3:5], ybar[b][1:3] + ybar[b][3:5]
if not fill:
xb, yb = xbar[b], ybar[b]
fillx, filly = False, False
if list(set(yb)) != [0]:
plot(xb, yb, yaxis = yaxis, label = label, marker = marker, color = color, fillx = fillx, filly = filly)
if b != 0:
_fig.subplot.point_color[-1] = _fig.subplot.point_color[-2]
_fig.subplot.line_color[-1] = _fig.subplot.line_color[-2]
_fig.subplot.label_show[-1] = False
# _sort_data(_fig.subplot)
# y_plot = _fig.subplot.y_left if yaxis == "left" else _fig.subplot.y_right
# if orientation in ['horizontal', 'h']:
# y_plot = _fig.subplot.x_left if yaxis == "left" else _fig.subplot.x_right
# m, M = _utility.get_lim_data(y_plot)
# if m * M > 0:
# m = 0
# if orientation in ['vertical', 'v']:
# #ylim(m, M, yaxis = yaxis)
# pass
# else:
# xlim(m, M)
bar.__doc__ = _docstrings.bar_doc
def hist(data,
bins = 10,
yaxis = "left",
label = "",
marker = "small",
color = None,
fill = True,
width = 4 / 5,
orientation = 'vertical'):
x, y = _utility.hist_data(data, bins)
bar(x, y, yaxis = yaxis, label = label, marker = marker, color = color, fill = fill, width = width, orientation= orientation)
hist.__doc__ = _docstrings.plot_doc
##############################################
########## Other Functions #############
##############################################
string_to_time = _utility.string_to_time
string_to_time.__doc__ = _docstrings.string_to_time_doc
def get_canvas():
return _fig.canvas
get_canvas.__doc__ = _docstrings.get_canvas_doc
sleep = _utility.sleep
sleep.__doc__ = _docstrings.sleep_doc
def savefig(path = None):
path = _utility.check_path(path)
with open(path , "w+", encoding = "utf-8") as file:
file.write(_utility.remove_color(_fig.canvas))
print("plot saved as " + path)
savefig.__doc__ = _docstrings.savefig_doc
save_fig = savefig
terminal_size = _utility.terminal_size
terminal_size.__doc__ = _docstrings.terminal_size_doc
version = _utility.version
version.__doc__ = _docstrings.version_doc
docstrings = _utility.docstrings
docstrings.__doc__ = _docstrings.docstrings_doc
colors = _utility.colors
colors.__doc__ = _docstrings.colors_doc
markers = _utility.markers
markers.__doc__ = _docstrings.markers_doc
sin = _utility.sin
sin.__doc__ = _docstrings.sin_doc
if __name__ == "__main__":
#test()
import plotext as plt
plt.test()
| StarcoderdataPython |
1644693 | <filename>evaluateData/changeHSVSoThatMinCompareHist.py<gh_stars>1-10
import cv2
import matplotlib.pyplot as plt
from changeBrightness import changeH, changeS, changeV
def changeImage(img, s, v):
return changeV(changeS(img, s), v)
def findHSV(img, img2):
hist = cv2.calcHist([img],[0],None,[256],[0,256])
hist2 = cv2.calcHist([img2],[0],None,[256],[0,256])
minHist = abs(cv2.compareHist(hist, hist2, cv2.HISTCMP_CORREL))
minJ, minK = 0,0
for j in range(0, 255,5):
for k in range(0, 255,5):
changedImage = changeImage(img2, j, k)
changedHist = cv2.calcHist([changedImage],[0],None,[256],[0,256])
if minHist > abs(cv2.compareHist(hist, changedHist, cv2.HISTCMP_CORREL)):
minHist = abs(cv2.compareHist(hist, changedHist, cv2.HISTCMP_CORREL))
minJ = j
minK = k
print(f"Changed: {minJ}, {minK}")
return changedImage, minHist
img = cv2.imread("epoch004_real_A.png")
img2 = cv2.imread("epoch004_fake_B.png")
changedImage, minHist = findHSV(img, img2)
hist = cv2.calcHist([img],[0],None,[256],[0,256])
changedHist = cv2.calcHist([changedImage],[0],None,[256],[0,256])
plt.subplot(2,1,1)
plt.plot(hist)
plt.subplot(2,1,2)
plt.plot(changedHist)
plt.show()
print(minHist)
cv2.imshow("origin", img)
cv2.imshow("changed", changedImage)
cv2.waitKey(0)
cv2.destroyAllWindows() | StarcoderdataPython |
6420482 | import pandas as pd
from sqlalchemy.sql.functions import min
def SaveToExcel(player1,player2,excelfilename):
data={"name":[player1,player2]}
index = ["player1", "player2"]
df = pd.DataFrame(data, index)
print(df)
# df.to_excel(excelfilename)
#0,1,2,3
def ReadFromExcel(excelfilename):
df=pd.read_excel(excelfilename,index_col=0)
#print(df)
players=df["name"]
player1=players[0]
player2=players[1]
player1=players["player1"]
return player1,player2
x1,x2=ReadFromExcel("E:\\Excel\\data.xlsx")
print(x1,x2)
"""
SaveToExcel("AB","CD","h:\\pandas\\data.xlsx")
player1="dffsdfsf"
player2="sadsd<PASSWORD>"
filename="h:\\pandas\\myfile.xlsx"
SaveToExcel(player1,player2,filename)
print("Hello")
data={"name":["pappu","pippi"]}
index=["player1","player2"]
print(data)
df=pd.DataFrame(data,index)
print(df)
df.to_excel("h:\\pandas\\data.xlsx")
"""
| StarcoderdataPython |
6674561 | from numpy import sum as npsum
from numpy import zeros, abs, mean
from CopMargSep import CopMargSep
def SWDepMeasure(X, probs):
# This function estimates the Schweizer and Wolff measure of dependence
# between two random variables by means of Monte Carlo simulations
# INPUTS
# X : [matrix] (2 x j_) joint scenarios
# probs : [vector] (1 x j_) vector of Flexible probabilities
# OPS
# dep : [scalar] Schweizer-Wolff measure estimate
# For details on the exercise, see here .
## Code
_, _, U = CopMargSep(X, probs) # grades scenarios
j_ = X.shape[1] # number of scenarios
g = zeros((j_, j_))
for i in range(j_):
for k in range(j_):
g[i, k] = abs(npsum(probs*(U[0] <= i/j_)*(U[1] <= k/j_))-(i*k)/j_**2)
dep = 12 *mean(g.flatten())
return dep
| StarcoderdataPython |
6674614 | from os.path import dirname, abspath, join, isfile, basename
from os import listdir
from importlib.util import spec_from_file_location, module_from_spec
"""
Structure typing:
OneWireRegisterMap: (str, [OneWireRegisterEntry, ...], [OneWireRegisterEntry, ...])
OneWireRegisterEntry: (int, int, str, bool, int, int, str)
Structure meaning:
OneWireRegisterMap: (name, [EEPROM_register, ...], [RAM_register, ...])
OneWireRegisterEntry: (address, size, name, writable, min, max, docstring)
"""
def register_map_structure_check(register_map):
assert isinstance(register_map, tuple)
assert len(register_map) == 3
assert isinstance(register_map[0], str)
i = 0
for e in register_map[1:]:
j = 0
try:
assert isinstance(e, list)
for f in e:
assert isinstance(f, tuple)
assert len(f) == 7
assert isinstance(f[0], int)
assert isinstance(f[1], int)
assert isinstance(f[2], str)
assert isinstance(f[3], bool)
assert isinstance(f[4], int)
assert isinstance(f[5], int)
assert isinstance(f[6], str)
j += 1
except AssertionError:
print("Error at i=" + str(i), "j=" + str(j))
raise
i += 1
def get_register_map_list():
this_file = abspath(__file__)
reg_dir = dirname(this_file)
other_files = []
for f in listdir(reg_dir):
file = join(reg_dir, f)
if isfile(file) and file != this_file and file.lower().endswith('.py'):
other_files.append(file)
reg_map_list = []
for f in other_files:
# noinspection PyBroadException
try:
spec = spec_from_file_location(basename(f), f)
reg_module = module_from_spec(spec)
spec.loader.exec_module(reg_module)
reg_map = reg_module.OneWireRegisterMap
register_map_structure_check(reg_map)
reg_map_list.append(reg_map)
except Exception:
pass
return reg_map_list
| StarcoderdataPython |
1941138 | import argparse
import torch
from transformers import ElectraModel
from tokenization_kocharelectra import KoCharElectraTokenizer
# Get the model path
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", default="monologg/kocharelectra-base-discriminator",
type=str, help="Path to pre-trained model or shortcut name")
args = parser.parse_args()
# Load model and tokenizer
model = ElectraModel.from_pretrained(args.model_name_or_path)
tokenizer = KoCharElectraTokenizer.from_pretrained(args.model_name_or_path)
text_a = "나는 걸어가고 있는 중입니다."
text_b = "나는 밥을 먹고 있는 중입니다."
inputs = tokenizer.encode_plus(
text=text_a,
text_pair=text_b,
add_special_tokens=True, # This add [CLS] on front, [SEP] at last
pad_to_max_length=True,
max_length=40
)
tokens = tokenizer.tokenize("[CLS] " + text_a + " [SEP] " + text_b + " [SEP]")
print("--------------------------------------------------------")
print("tokens: ", " ".join(tokens))
print("input_ids: {}".format(" ".join([str(x) for x in inputs['input_ids']])))
print("token_type_ids: {}".format(" ".join([str(x) for x in inputs['token_type_ids']])))
print("attention_mask: {}".format(" ".join([str(x) for x in inputs['attention_mask']])))
print("--------------------------------------------------------")
# Make the input with batch size 1
input_ids = torch.LongTensor(inputs['input_ids']).unsqueeze(0)
token_type_ids = torch.LongTensor(inputs['token_type_ids']).unsqueeze(0)
attention_mask = torch.LongTensor(inputs['attention_mask']).unsqueeze(0)
with torch.no_grad():
output = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
last_layer_hidden_state = output[0]
print("[Last layer hidden state]")
print("Size:", last_layer_hidden_state.size())
print("Tensor:", last_layer_hidden_state)
| StarcoderdataPython |
216865 | <filename>docs/examples/all_on_3.py<gh_stars>100-1000
from gpiozero import LED, Buzzer, Button
from signal import pause
button = Button(2)
buzzer = Buzzer(3)
red = LED(4)
amber = LED(5)
green = LED(6)
things = [red, amber, green, buzzer]
def things_on():
for thing in things:
thing.on()
def things_off():
for thing in things:
thing.off()
button.when_pressed = things_on
button.when_released = things_off
pause()
| StarcoderdataPython |
4974096 | import json
from pathlib import Path
import appdirs
from jambottle.bottles import Jam
def getdatadir():
appname = "jambottle"
appauthor = "<NAME>"
datadir = Path(appdirs.user_data_dir(appname, appauthor))
return datadir
conf_path = getdatadir() / "conf.json"
class Config():
def __init__(self, discord_token=None, jams=None):
self.discord_token = discord_token or ""
self.jams = jams or []
def save(self):
conf_path.parent.mkdir(parents=True, exist_ok=True)
conf_path.write_text(json.dumps(self.to_json(), indent=4))
def to_json(self):
return {
"discord_token": self.discord_token,
"jams": [jam.to_json() for jam in self.jams]
}
@classmethod
def load(cls):
try:
conf = cls.from_json(json.loads(conf_path.read_text()))
except FileNotFoundError:
# use default configuration
conf = cls()
conf.save()
return conf
@classmethod
def from_json(cls, j):
discord_token = j["discord_token"]
jams = [Jam.from_json(jam) for jam in j.get("jams")]
return cls(discord_token, jams)
| StarcoderdataPython |
6431294 | #!/usr/bin/env python
# coding: utf-8
# <!--BOOK_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="images/book_cover.jpg" width="120">
#
# *This notebook contains an excerpt from the [Python Programming and Numerical Methods - A Guide for Engineers and Scientists](https://www.elsevier.com/books/python-programming-and-numerical-methods/kong/978-0-12-819549-9), the content is also available at [Berkeley Python Numerical Methods](https://pythonnumericalmethods.berkeley.edu/notebooks/Index.html).*
#
# *The copyright of the book belongs to Elsevier. We also have this interactive book online for a better learning experience. The code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work on [Elsevier](https://www.elsevier.com/books/python-programming-and-numerical-methods/kong/978-0-12-819549-9) or [Amazon](https://www.amazon.com/Python-Programming-Numerical-Methods-Scientists/dp/0128195495/ref=sr_1_1?dchild=1&keywords=Python+Programming+and+Numerical+Methods+-+A+Guide+for+Engineers+and+Scientists&qid=1604761352&sr=8-1)!*
# <!--NAVIGATION-->
# < [14.4 Solutions to Systems of Linear Equations](chapter14.04-Solutions-to-Systems-of-Linear-Equations.ipynb) | [Contents](Index.ipynb) | [14.6 Matrix Inversion](chapter14.06-Matrix-Inversion.ipynb) >
# # Solve Systems of Linear Equations in Python
# Though we discussed various methods to solve the systems of linear equations, it is actually very easy to do it in Python. In this section, we will use Python to solve the systems of equations. The easiest way to get a solution is via the *solve* function in Numpy.
#
# **TRY IT!** Use numpy.linalg.solve to solve the following equations.
#
# \begin{eqnarray*}
# 4x_1 + 3x_2 - 5x_3 &=& 2 \\
# -2x_1 - 4x_2 + 5x_3 &=& 5 \\
# 8x_1 + 8x_2 &=& -3 \\
# \end{eqnarray*}
# In[1]:
import numpy as np
A = np.array([[4, 3, -5],
[-2, -4, 5],
[8, 8, 0]])
y = np.array([2, 5, -3])
x = np.linalg.solve(A, y)
print(x)
# We can see we get the same results as that in the previous section when we calculated by hand. Under the hood, the solver is actually doing a LU decomposition to get the results. You can check the help of the function, it needs the input matrix to be square and of full-rank, i.e., all rows (or, equivalently, columns) must be linearly independent.
#
# **TRY IT!** Try to solve the above equations using the matrix inversion approach.
# In[2]:
A_inv = np.linalg.inv(A)
x = np.dot(A_inv, y)
print(x)
# We can also get the $L$ and $U$ matrices used in the LU decomposition using the scipy package.
#
# **TRY IT!** Get the $L$ and $U$ for the above matrix A.
# In[3]:
from scipy.linalg import lu
P, L, U = lu(A)
print('P:\n', P)
print('L:\n', L)
print('U:\n', U)
print('LU:\n',np.dot(L, U))
# We can see the $L$ and $U$ we get are different from the ones we got in the last section by hand. You will also see there is a **permutation matrix** $P$ that returned by the *lu* function. This permutation matrix record how do we change the order of the equations for easier calculation purposes (for example, if first element in first row is zero, it can not be the pivot equation, since you can not turn the first elements in other rows to zero. Therefore, we need to switch the order of the equations to get a new pivot equation). If you multiply $P$ with $A$, you will see that this permutation matrix reverse the order of the equations for this case.
#
# **TRY IT!** Multiply $P$ and $A$ and see what's the effect of the permutation matrix on $A$.
# In[4]:
print(np.dot(P, A))
# <!--NAVIGATION-->
# < [14.4 Solutions to Systems of Linear Equations](chapter14.04-Solutions-to-Systems-of-Linear-Equations.ipynb) | [Contents](Index.ipynb) | [14.6 Matrix Inversion](chapter14.06-Matrix-Inversion.ipynb) >
| StarcoderdataPython |
6514561 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# stdlib
import base64
import urllib
import urllib2
import json
import logging
class EasyException(Exception):
def __init__(self, code, code_explain, error, data):
"""
easyops的错误异常
:param code: 返回码
:param code_explain: 详细定位问题用的错误字符串解释
:param error: 给用户看的美观的字符串解释
:param data: 额外的返回数据
"""
self.code = code
self.code_explain = code_explain
self.error = error
self.data = data
class UnknownException(EasyException):
def __init__(self, error):
self.code = 100010
self.code_explain = "ERR_UNKNOWN"
self.error = error
self.data = {}
class NameServiceException(EasyException):
def __init__(self, session_id):
self.code = 100013
self.code_explain = "ERR_UNAVAILABLE"
self.error = "name service error, session_id={}".format(session_id)
self.data = {}
def do_http(method, url, params={}, headers={}, timeout=10):
"""
do http request
"""
method = method.upper()
if not isinstance(params, dict) or not isinstance(headers, dict):
raise Exception('params and headers must be dict')
if len(params) > 0:
if method == 'GET':
data = urllib.urlencode(params)
request = urllib2.Request('%s?%s' % (url, data))
else:
if headers.get('Content-Type', '').lower() == 'application/json':
data = json.dumps(params)
else:
data = urllib.urlencode(params)
request = urllib2.Request(url, data=data)
else:
request = urllib2.Request(url)
for key, val in headers.items():
request.add_header(key, val)
request.get_method = lambda: method
response = urllib2.urlopen(request, timeout=timeout)
data = response.read()
response.close()
return data
def do_api_request(
method, src_name, dst_name, host, uri, params={}, headers={}, timeout=10, auth_user="", auth_password="", server_ip="", server_port=0
):
headers['Content-Type'] = 'application/json'
if host:
headers['Host'] = host
if auth_user:
base64string = base64.b64encode('%s:%s' % (auth_user, auth_password))
headers["Authorization"] = "Basic %s" % base64string
if server_ip != "" and server_port != 0:
ip = server_ip
port = server_port
else:
import ens_api
session_id, ip, port = ens_api.get_service_by_name(src_name, dst_name)
if session_id <= 0:
raise NameServiceException(session_id)
url = "http://{ip}:{port}{uri}".format(ip=ip, port=port, uri=uri)
data = None
try:
data = do_http(method, url, params, headers, timeout)
data_obj = json.loads(data)
logging.debug(
"method: {method}, url: {url}, params: {params}, headers: {headers}, timeout: {timeout}, data: {data}".
format(method=method, url=url, params=params, headers=headers, timeout=timeout, data=data_obj)
)
except ValueError as e:
logging.error(
"json decode error, method={0} url={1}, "
"params={2}, headers={3}, timeout={4}, data={5}".format(method, url, params, headers, timeout, data)
)
raise UnknownException(e)
except urllib2.HTTPError as e:
data = e.read()
logging.error(
"json decode error, method={0} url={1}, "
"params={2}, headers={3}, timeout={4}, data={5}".format(method, url, params, headers, timeout, data)
)
try:
data_obj = json.loads(data)
except Exception, e:
raise UnknownException(e)
raise EasyException(data_obj["code"], data_obj.get("codeExplain"), data_obj.get("error"), data_obj.get("data"))
else:
return data_obj
| StarcoderdataPython |
4807705 | # encoding: utf-8
# Sample project fabfile.py (should be on the same level as manage.py, see README)
from fabmanager import fabfile
from fabmanager.fabfile import *
# Environments
WORKON_HOME = '/opt/python'
GIT_REPO = '<EMAIL>:myproject.git'
PROJECT = 'myproject'
ENVS.update({
'production': {
'host': 'www.mysite.com.br',
'git_repo': GIT_REPO,
'workon': WORKON_HOME,
'project': PROJECT,
'virtualenv': 'prod',
'settings': 'settings_production',
},
'beta': {
'host': 'beta.mysite.com.br',
'git_repo': GIT_REPO,
'workon': WORKON_HOME,
'project': PROJECT,
'virtualenv': 'beta',
'git_branch': 'beta',
'settings': 'settings_beta',
},
})
def prod():
fabfile._setup_environment('production')
def beta():
fabfile._setup_environment('beta')
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.