id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3241103 | #coding:utf-8
"""The News Insert Implementation."""
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
def insert_news(date, title, html, trhtml, text, trtext):
client = MongoClient(connectTimeoutMS=2000, serverSelectionTimeoutMS=2000)
try:
# The ismaster command is cheap and does not require auth.
client.admin.command('ismaster')
except ConnectionFailure:
print("Server not available")
return
db = client.news
coll = db[date]
post = {"title": title,
"text": text,
"trtext": trtext,
"html": html,
"trhtml": trhtml,
# "date": datetime.datetime.utcnow()
}
coll.insert_one(post)
| StarcoderdataPython |
3298994 | from .TaroColor import TaroColor
| StarcoderdataPython |
1750627 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from oauthlib.common import Request
from oauthlib.oauth1 import (
SIGNATURE_HMAC_SHA1, SIGNATURE_HMAC_SHA256, SIGNATURE_PLAINTEXT,
SIGNATURE_RSA, SIGNATURE_TYPE_BODY, SIGNATURE_TYPE_QUERY,
)
from oauthlib.oauth1.rfc5849 import Client
from tests.unittest import TestCase
class ClientRealmTests(TestCase):
def test_client_no_realm(self):
client = Client("client-key")
uri, header, body = client.sign("http://example-uri")
self.assertTrue(
header["Authorization"].startswith('OAuth oauth_nonce='))
def test_client_realm_sign_with_default_realm(self):
client = Client("client-key", realm="moo-realm")
self.assertEqual(client.realm, "moo-realm")
uri, header, body = client.sign("http://example-uri")
self.assertTrue(
header["Authorization"].startswith('OAuth realm="moo-realm",'))
def test_client_realm_sign_with_additional_realm(self):
client = Client("client-key", realm="moo-realm")
uri, header, body = client.sign("http://example-uri", realm="baa-realm")
self.assertTrue(
header["Authorization"].startswith('OAuth realm="baa-realm",'))
# make sure sign() does not override the default realm
self.assertEqual(client.realm, "moo-realm")
class ClientConstructorTests(TestCase):
def test_convert_to_unicode_resource_owner(self):
client = Client('client-key',
resource_owner_key=b'owner key')
self.assertNotIsInstance(client.resource_owner_key, bytes)
self.assertEqual(client.resource_owner_key, 'owner key')
def test_give_explicit_timestamp(self):
client = Client('client-key', timestamp='1')
params = dict(client.get_oauth_params(Request('http://example.com')))
self.assertEqual(params['oauth_timestamp'], '1')
def test_give_explicit_nonce(self):
client = Client('client-key', nonce='1')
params = dict(client.get_oauth_params(Request('http://example.com')))
self.assertEqual(params['oauth_nonce'], '1')
def test_decoding(self):
client = Client('client_key', decoding='utf-8')
uri, headers, body = client.sign('http://a.b/path?query',
http_method='POST', body='a=b',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertIsInstance(uri, bytes)
self.assertIsInstance(body, bytes)
for k, v in headers.items():
self.assertIsInstance(k, bytes)
self.assertIsInstance(v, bytes)
def test_hmac_sha1(self):
client = Client('client_key')
# instance is using the correct signer method
self.assertEqual(Client.SIGNATURE_METHODS[SIGNATURE_HMAC_SHA1],
client.SIGNATURE_METHODS[client.signature_method])
def test_hmac_sha256(self):
client = Client('client_key', signature_method=SIGNATURE_HMAC_SHA256)
# instance is using the correct signer method
self.assertEqual(Client.SIGNATURE_METHODS[SIGNATURE_HMAC_SHA256],
client.SIGNATURE_METHODS[client.signature_method])
def test_rsa(self):
client = Client('client_key', signature_method=SIGNATURE_RSA)
# instance is using the correct signer method
self.assertEqual(Client.SIGNATURE_METHODS[SIGNATURE_RSA],
client.SIGNATURE_METHODS[client.signature_method])
# don't need an RSA key to instantiate
self.assertIsNone(client.rsa_key)
class SignatureMethodTest(TestCase):
def test_hmac_sha1_method(self):
client = Client('client_key', timestamp='1234567890', nonce='abc')
u, h, b = client.sign('http://example.com')
correct = ('OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
'oauth_version="1.0", oauth_signature_method="HMAC-SHA1", '
'oauth_consumer_key="client_key", '
'oauth_signature="hH5BWYVqo7QI4EmPBUUe9owRUUQ%3D"')
self.assertEqual(h['Authorization'], correct)
def test_hmac_sha256_method(self):
client = Client('client_key', signature_method=SIGNATURE_HMAC_SHA256,
timestamp='1234567890', nonce='abc')
u, h, b = client.sign('http://example.com')
correct = ('OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
'oauth_version="1.0", oauth_signature_method="HMAC-SHA256", '
'oauth_consumer_key="client_key", '
'oauth_signature="JzgJWBxX664OiMW3WE4MEjtYwOjI%2FpaUWHqtdHe68Es%3D"')
self.assertEqual(h['Authorization'], correct)
def test_rsa_method(self):
private_key = (
"-----<KEY>"
"S8Q8jiheHeYYp/<KEY>pZI4s5i+UPwVpupG\nAlwXWfzXw"
"SMaKPAoKJNdu7tqKRniqst5uoHXw98gj0x7zamu0Ck1LtQ4c7pFMVa"
"h\n5IYGhBi2E9ycNS329W27nJPWNCbESTu7snVlG8V8mfvGGg3xNjT"
"MO7IdrwIDAQAB\nAoGBAOQ2KuH8S5+OrsL4K+wfjoCi6MfxCUyqVU9"
"GxocdM1m30WyWRFMEz2nKJ8fR\np3vTD4w8yplTOhcoXdQZl0kRoaD"
"zrcYkm2VvJtQRrX7dKFT8dR8D/Tr7dNQLOXfC\nDY6xveQczE7qt7V"
"k7lp4FqmxBsaaEuokt78pOOjywZoInjZhAkEA9wz3zoZNT0/i\nrf6"
"qv2qTIeieUB035N3dyw6f1BGSWYaXSuerDCD/J1qZbAPKKhyHZbVaw"
"Ft3UMhe\n542UftBaxQJBAO0iJy1I8GQjGnS7B3yvyH3CcLYGy296+"
"XO/2xKp/d/ty1OIeovx\nC60pLNwuFNF3z9d2GVQAdoQ89hUkOtjZL"
"eMCQQD0JO6oPHUeUjYT+T7ImAv7UKVT\nSuy30sKjLzqoGw1kR+wv7"
"C5PeDRvscs4wa4CW9s6mjSrMDkDrmCLuJDtmf55AkEA\nkmaMg2PNr"
"jUR51F0zOEFycaaqXbGcFwe1/xx9zLmHzMDXd4bsnwt9kk+fe0hQzV"
"S\nJzatanQit3+feev1PN3QewJAWv4RZeavEUhKv+kLe95Yd0su7lT"
"LVduVgh4v5yLT\nGa6FHdjGPcfajt+nrpB1n8UQBEH9ZxniokR/IPv"
"dMlxqXA==\n-----END RSA PRIVATE KEY-----"
)
client = Client('client_key', signature_method=SIGNATURE_RSA,
rsa_key=private_key, timestamp='1234567890', nonce='abc')
u, h, b = client.sign('http://example.com')
correct = ('OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
'oauth_version="1.0", oauth_signature_method="RSA-SHA1", '
'oauth_consumer_key="client_key", '
'oauth_signature="ktvzkUhtrIawBcq21DRJrAyysTc3E1Zq5GdGu8EzH'
'OtbeaCmOBDLGHAcqlm92mj7xp5E1Z6i2vbExPimYAJL7FzkLnkRE5YEJR4'
'rNtIgAf1OZbYsIUmmBO%2BCLuStuu5Lg3tAluwC7XkkgoXCBaRKT1mUXzP'
'HJILzZ8iFOvS6w5E%3D"')
self.assertEqual(h['Authorization'], correct)
def test_plaintext_method(self):
client = Client('client_key',
signature_method=SIGNATURE_PLAINTEXT,
timestamp='1234567890',
nonce='abc',
client_secret='foo',
resource_owner_secret='bar')
u, h, b = client.sign('http://example.com')
correct = ('OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
'oauth_version="1.0", oauth_signature_method="PLAINTEXT", '
'oauth_consumer_key="client_key", '
'oauth_signature="foo%26bar"')
self.assertEqual(h['Authorization'], correct)
def test_invalid_method(self):
client = Client('client_key', signature_method='invalid')
self.assertRaises(ValueError, client.sign, 'http://example.com')
def test_rsa_no_key(self):
client = Client('client_key', signature_method=SIGNATURE_RSA)
self.assertRaises(ValueError, client.sign, 'http://example.com')
def test_register_method(self):
Client.register_signature_method('PIZZA',
lambda base_string, client: 'PIZZA')
self.assertIn('PIZZA', Client.SIGNATURE_METHODS)
client = Client('client_key', signature_method='PIZZA',
timestamp='1234567890', nonce='abc')
u, h, b = client.sign('http://example.<EMAIL>')
self.assertEqual(h['Authorization'], (
'OAuth oauth_nonce="abc", oauth_timestamp="1234567890", '
'oauth_version="1.0", oauth_signature_method="PIZZA", '
'oauth_consumer_key="client_key", '
'oauth_signature="PIZZA"'
))
class SignatureTypeTest(TestCase):
def test_params_in_body(self):
client = Client('client_key', signature_type=SIGNATURE_TYPE_BODY,
timestamp='1378988215', nonce='14205877133089081931378988215')
_, h, b = client.sign('http://i.b/path', http_method='POST', body='a=b',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(h['Content-Type'], 'application/x-www-form-urlencoded')
correct = ('a=b&oauth_nonce=14205877133089081931378988215&'
'oauth_timestamp=1378988215&'
'oauth_version=1.0&'
'oauth_signature_method=HMAC-SHA1&'
'oauth_consumer_key=client_key&'
'oauth_signature=2JAQomgbShqoscqKWBiYQZwWq94%3D')
self.assertEqual(b, correct)
def test_params_in_query(self):
client = Client('client_key', signature_type=SIGNATURE_TYPE_QUERY,
timestamp='1378988215', nonce='14205877133089081931378988215')
u, _, _ = client.sign('http://i.b/path', http_method='POST')
correct = ('http://i.b/path?oauth_nonce=14205877133089081931378988215&'
'oauth_timestamp=1378988215&'
'oauth_version=1.0&'
'oauth_signature_method=HMAC-SHA1&'
'oauth_consumer_key=client_key&'
'oauth_signature=08G5Snvw%2BgDAzBF%2BCmT5KqlrPKo%3D')
self.assertEqual(u, correct)
def test_invalid_signature_type(self):
client = Client('client_key', signature_type='invalid')
self.assertRaises(ValueError, client.sign, 'http://i.b/path')
class SigningTest(TestCase):
def test_case_insensitive_headers(self):
client = Client('client_key')
# Uppercase
_, h, _ = client.sign('http://i.b/path', http_method='POST', body='',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(h['Content-Type'], 'application/x-www-form-urlencoded')
# Lowercase
_, h, _ = client.sign('http://i.b/path', http_method='POST', body='',
headers={'content-type': 'application/x-www-form-urlencoded'})
self.assertEqual(h['content-type'], 'application/x-www-form-urlencoded')
# Capitalized
_, h, _ = client.sign('http://i.b/path', http_method='POST', body='',
headers={'Content-type': 'application/x-www-form-urlencoded'})
self.assertEqual(h['Content-type'], 'application/x-www-form-urlencoded')
# Random
_, h, _ = client.sign('http://i.b/path', http_method='POST', body='',
headers={'conTent-tYpe': 'application/x-www-form-urlencoded'})
self.assertEqual(h['conTent-tYpe'], 'application/x-www-form-urlencoded')
def test_sign_no_body(self):
client = Client('client_key', decoding='utf-8')
self.assertRaises(ValueError, client.sign, 'http://i.b/path',
http_method='POST', body=None,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
def test_sign_body(self):
client = Client('client_key')
_, h, b = client.sign('http://i.b/path', http_method='POST', body='',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(h['Content-Type'], 'application/x-www-form-urlencoded')
def test_sign_get_with_body(self):
client = Client('client_key')
for method in ('GET', 'HEAD'):
self.assertRaises(ValueError, client.sign, 'http://a.b/path?query',
http_method=method, body='a=b',
headers={
'Content-Type': 'application/x-www-form-urlencoded'
})
def test_sign_unicode(self):
client = Client('client_key', nonce='abc', timestamp='abc')
_, h, b = client.sign('http://i.b/path', http_method='POST',
body='status=%E5%95%A6%E5%95%A6',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, 'status=%E5%95%A6%E5%95%A6')
self.assertIn('oauth_signature="yrtSqp88m%2Fc5UDaucI8BXK4oEtk%3D"', h['Authorization'])
_, h, b = client.sign('http://i.b/path', http_method='POST',
body='status=%C3%A6%C3%A5%C3%B8',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, 'status=%C3%A6%C3%A5%C3%B8')
self.assertIn('oauth_signature="oG5t3Eg%2FXO5FfQgUUlTtUeeZzvk%3D"', h['Authorization'])
| StarcoderdataPython |
1641563 | """
This file must not depend on any other CuPy modules.
"""
import os
import os.path
import shutil
_cuda_path = None
_nvcc_path = None
def get_cuda_path():
# Returns the CUDA installation path or None if not found.
global _cuda_path
if _cuda_path is None:
_cuda_path = _get_cuda_path()
return _cuda_path
def get_nvcc_path():
# Returns the path to the nvcc command or None if not found.
global _nvcc_path
if _nvcc_path is None:
_nvcc_path = _get_nvcc_path()
return _nvcc_path
def _get_cuda_path():
# Use environment variable
cuda_path = os.environ.get('CUDA_PATH', '') # Nvidia default on Windows
if os.path.exists(cuda_path):
return cuda_path
# Use nvcc path
nvcc_path = shutil.which('nvcc')
if nvcc_path is not None:
return os.path.dirname(os.path.dirname(nvcc_path))
# Use typical path
if os.path.exists('/usr/local/cuda'):
return '/usr/local/cuda'
return None
def _get_nvcc_path():
# Honor the "NVCC" env var
nvcc_path = os.environ.get('NVCC', None)
if nvcc_path is not None:
return nvcc_path
# Lookup <CUDA>/bin
cuda_path = get_cuda_path()
if cuda_path is None:
return None
return shutil.which('nvcc', path=os.path.join(cuda_path, 'bin'))
def _setup_win32_dll_directory():
cuda_path = get_cuda_path()
if cuda_path is None:
raise RuntimeError('CUDA path could not be detected.')
os.add_dll_directory(os.path.join(cuda_path, 'bin'))
| StarcoderdataPython |
3372923 | <reponame>rlberry-py/rlberry<gh_stars>10-100
from rlberry.seeding.seeding import safe_reseed
import gym
import numpy as np
import pytest
from rlberry.seeding import Seeder
from rlberry.envs import gym_make
from copy import deepcopy
gym_envs = [
'Acrobot-v1',
'CartPole-v1',
'MountainCar-v0',
]
def get_env_trajectory(env, horizon):
states = []
ss = env.reset()
for ii in range(horizon):
states.append(ss)
ss, _, done, _ = env.step(env.action_space.sample())
if done:
ss = env.reset()
return states
def compare_trajectories(traj1, traj2):
for ss1, ss2 in zip(traj1, traj2):
if not np.array_equal(ss1, ss2):
return False
return True
@pytest.mark.parametrize("env_name", gym_envs)
def test_env_seeding(env_name):
seeder1 = Seeder(123)
env1 = gym_make(env_name)
env1.reseed(seeder1)
seeder2 = Seeder(456)
env2 = gym_make(env_name)
env2.reseed(seeder2)
seeder3 = Seeder(123)
env3 = gym_make(env_name)
env3.reseed(seeder3)
if deepcopy(env1).is_online():
traj1 = get_env_trajectory(env1, 500)
traj2 = get_env_trajectory(env2, 500)
traj3 = get_env_trajectory(env3, 500)
assert not compare_trajectories(traj1, traj2)
assert compare_trajectories(traj1, traj3)
@pytest.mark.parametrize("env_name", gym_envs)
def test_copy_reseeding(env_name):
seeder = Seeder(123)
env = gym_make(env_name)
env.reseed(seeder)
c_env = deepcopy(env)
c_env.reseed()
if deepcopy(env).is_online():
traj1 = get_env_trajectory(env, 500)
traj2 = get_env_trajectory(c_env, 500)
assert not compare_trajectories(traj1, traj2)
@pytest.mark.parametrize("env_name", gym_envs)
def test_gym_safe_reseed(env_name):
seeder = Seeder(123)
seeder_aux = Seeder(123)
env1 = gym.make(env_name)
env2 = gym.make(env_name)
env3 = gym.make(env_name)
safe_reseed(env1, seeder)
safe_reseed(env2, seeder)
safe_reseed(env3, seeder_aux)
traj1 = get_env_trajectory(env1, 500)
traj2 = get_env_trajectory(env2, 500)
traj3 = get_env_trajectory(env3, 500)
assert not compare_trajectories(traj1, traj2)
assert compare_trajectories(traj1, traj3)
| StarcoderdataPython |
1703511 | import flask_wtf
import wtforms
import wtforms.fields.core as wtforms_core
import wtforms.fields.simple as wtforms_simple
class LoginForm(flask_wtf.FlaskForm):
"""Class representing the login form for the application
Parameters
----------
FlaskForm : WTForms
Flask wtf class that is extended to create the user login form
"""
email = wtforms_core.StringField(
"Email",
validators=[
wtforms.validators.DataRequired(),
wtforms.validators.Email(),
],
)
password = wtforms_simple.PasswordField(
"Password", validators=[wtforms.validators.DataRequired()]
)
remember = wtforms_core.BooleanField("Remember Me")
submit = wtforms_simple.SubmitField("Login")
| StarcoderdataPython |
1731974 | import coc
import discord
from config import *
async def claim(client_discord,
connectionBDD,
args,
message):
"""claim:
Args:
client_discord ([discord.Client]): [un client pour l'API discord]
connectionBDD ([database_outils.appelsBDD]): [un connecteur a la BDD]
args ([list]): [les arguments de la commande]
message ([discord.message]): [le message génerateur]
Returns:
[type]: [descriptio n]
"""
if len(args) != 3 and len(message.mentions) != 1:
return await message.channel.send("nombre d'arguments incorect")
tag=coc.utils.correct_tag(args[1])
idDiscord=message.mentions[0].id
#on verifie qu'on a 3 arguments, la commande, le tag et qu'il y a bien une mention
try:
connectionBDD.add_discord_id(tag,idDiscord,821743088296919041 in list(map(lambda r: r.id , message.author.roles)))#TODO: ajouter une verification de roles
except PermissionError:
return await message.channel.send("déjà enregistré par le passé, vous n'avez pas les permissions d'éditer ce lien.")
except ValueError:
try:
player=await client_discord.cocClient.get_player(tag)
except coc.NotFound:
return await message.channel.send("tag ne correspondant a aucun joueur")
except coc.Maintenance:
return await message.channel.send("maintenance")
th = player.town_hall
pseudo= player.name
clan = player.clan.name if player.clan is not None else None
connectionBDD.check_presence_database(tag, th, pseudo, clan)
connectionBDD.add_discord_id(tag,idDiscord,False)#TODO: ajouter une verification de roles
return await message.channel.send("operation réussie")
async def unclaim(client_discord,
connectionBDD,
args,
message):
"""[permet de délier un joueur de la bdd]
Args:
client_discord ([discord.Client]): [un client pour l'API discord]
connectionBDD ([database_outils.appelsBDD]): [un connecteur a la BDD]
args ([list]): [les arguments de la commande]
message ([discord.message]): [le message génerateur]
Returns:
[None]: [rien]
"""
if len(args) != 2 and len(message.mentions) != 0:
return await message.channel.send("nombre d'arguments incorect")
tag=coc.utils.correct_tag(args[1])
#on verifie qu'on a 2 arguments, la commande, le tag
if message.author.id in config["liste_id_administratifs"]:
try:
connectionBDD.add_discord_id(tag,None,True)
except ValueError:
return await message.channel.send("ce joueur n'existe pas a mes yeux")
else:
return await message.channel.send("operation réussie")
return await message.channel.send("vous n'avez pas les permissions")
async def add_clan(client_discord,
ClientCOC,
connectionBDD,
args,
message):
if len(args) != 3 and len(message.raw_role_mentions) != 1:
return await message.channel.send("nombre d'arguments incorect")
tag=coc.utils.correct_tag(args[1])
try:
clan = await ClientCOC.get_clan(tag)
except NotFound:
return await message.channel.send("On me dit dans l'oreillette que ce clan n'existe pas ")
except Maintenance:
return await message.channel.send("Il y a actuellement une maintenance de l'api")
except GatewayError:
return await message.channel.send("erreur de gateway")
#on verifie qu'on a 2 arguments, la commande, le tag
if discord.utils.find(lambda role:role.id==message.raw_role_mentions[0],message.guild.roles) is None:
return await message.channel.send("merci d'indiquer un role valide")
if message.author.id in config["liste_id_administratifs"]:
try:
connectionBDD.add_clan(args[1],clan.name,str(message.raw_role_mentions[0]))
except ValueError:
return await message.channel.send("vous n'avez pas les permissions")
else:
return await message.channel.send("operation réussie")
| StarcoderdataPython |
305 | from turtle import Turtle
SPEED = 10
class Ball(Turtle):
def __init__(self):
super().__init__()
self.penup()
self.color("white")
self.shape("circle")
self.move_speed = 0.1
self.y_bounce = 1
self.x_bounce = 1
def move(self):
new_x = self.xcor() + SPEED*self.x_bounce
new_y = self.ycor() + SPEED*self.y_bounce
self.goto(new_x, new_y)
def reset(self):
self.goto(0, 0)
self.move_speed = 0.1
self.x_bounce *= -1
| StarcoderdataPython |
3305821 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 4 15:23:46 2018
@author: Jared
"""
from collections import Counter
import pymongo
import pandas as pd
from ast import literal_eval
from ml.elements import *
#import machineLearner as ml #get rid of if doing oqmd database
#from qmpy import * #use python 2.7!
from matplotlib import pyplot as plt
import math
#import mysql.connector
import numpy as np
from pandas.plotting import scatter_matrix
import matplotlib.patches as mpatches
import matplotlib
import matplotlib.gridspec as gridspec
# ENERGY OF FORMATION
# dH = totalEnergy - sum(i,x){x*E_i}, x number of atoms of that type
# STABILITY (E_HULL)
# dH_stab = dH - dH_hull
# dH_hull (ev/atom), but calculated a different way than our energy of formation
# We need
# Access Syntax for direct acces to DB
'''
cnx = mysql.connector.connect(user='root', password='<PASSWORD>',
host='127.0.0.1',
database='qmpy_jared')
cursor = cnx.cursor()
cursor.execute("USE qmpy_jared;")
cursor.close()
cnx.close()
'''
# DEFINITIONS FOR OQMD DATA
'''
space = 'Cs-Sn-Br'
comp = 'CsSnBr3'
space = PhaseSpace(space)
energy, phase = space.gclp(comp)
compute_stability
print(energy, phase)
'''
def main():
matplotlib.rcParams.update({'font.size': 15.5})
# QUICK LOAD TO AVOID CALCULATION
path = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/'
deltaH_qmpy = pd.read_csv(path + 'compEnergy_qmdb_d3.csv')
print('qmpy ', len(deltaH_qmpy))
mng_client = pymongo.MongoClient('localhost', 27017)
db = mng_client['perovskites']
# GET AGGREGATED CRYSTAL DATA FROM MONGODB
df = pd.DataFrame(list(db['qw_outputs_aggregated'].find()))
#df = pd.read_csv('/Users/Jared/Dropbox/Master Thesis/Data/crystalDB3/aggregated_features_14092018.csv')
df_features = pd.read_csv('/Users/Jared/Dropbox/Master Thesis/Data/featureDB2/d2_paper_24102018.csv')
'''
plt.ylabel('$E_{gap}$ (eV)')
plt.xlabel('Iodine Mixing Fraction')
plt.title('Iodine Bandgap Trend')
s = 'fracI'
s2 = 'dir_gap'
y_cl = df_features.groupby([s])[s2].mean()
x_cl = np.array([i for i in y_cl.index])
y_cl = y_cl.values
plt.scatter(df_features[s], df_features[s2], alpha = 0.2)
p1, = plt.plot(x_cl, y_cl, linestyle = '-', lw = 2, label = 'D$_{3}$')
ax1 = plt.axes()
ax1.yaxis.set_major_locator(plt.MaxNLocator(6))
#plt.legend(handles = [p1])
plt.tight_layout()
path = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/'
plt.savefig(path + 'dummyTrend_realI.png', dpi = 400, bbox_inches="tight")
plt.show()
'''
#df = df.dropna(axis = 0)
dff = df.drop(df[df['nIterations'] >= 201].index).copy()
dff = dff.drop(df[df['crystal_id'] == 1526850748].index).copy()
df = dff.drop(df[df['crystal_id'] == 1526752626].index).copy()
print('here', len(df))
#deltaH_qmdb = getCrystalOQMDData(df)
# MY CALCULATED FORMATION ENERGY
mu = getMuCorrectedDFT2()
deltaH2_formation = getDeltaH_formation(df, mu)
mu = getMuDFT()
deltaH_formation = getDeltaH_formation(df, mu)
#df_delta = pd.DataFrame(deltaH_formation, columns = 'dH_formation')
#deltaH_formation.to_csv('/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/df_formation.csv')
#plotDeltaH_formation(list(deltaH_formation['deltaH_formation']))
# GEOMETRIC FORMATION ENERGY (BASED ON FIT)
#deltaH_geo = getDeltaH_geo(df)
#deltaH_geo.to_csv('/Users/Jared/Dropbox/Master Thesis/' +
# 'code/codeOutputs/deltaH_geo.csv')
deltaH_geo = pd.read_csv('/Users/Jared/Dropbox/Master Thesis/' +
'code/codeOutputs/deltaH_geo.csv')
print('geo', len(deltaH_geo))
#plotDeltaH_geo(list(deltaH_geo['deltaH_geo']))
# comparison of geometric approach fidelity
'''
plt.plot(deltaH_geo['descriptor'], deltaH['deltaH'], 'o')
plt.xlabel('$(t + \mu)^{\eta}$')
plt.ylabel('$\Delta H_{f}$ (eV/atom)')
plt.title('Formation Energy vs. Geometric Factor')
plt.show()
'''
#error associated with SG15 basis set
#delta = ((10.78 + 8.19 + 7.69 + 0.19)*(4/20) +
# (4.35 + 8.07)*(4/20) +
# (1.9 + 6.03 + 5.53)*(8/20))
# MERGE ALL DATA
result = pd.merge(deltaH_formation, deltaH_qmpy, on=['crystal_id'])
result = pd.merge(result, deltaH_geo, on=['crystal_id'])
result= pd.merge(result, df_features, on=['crystal_id'])
result_corrected = pd.merge(deltaH2_formation, deltaH_qmpy, on=['crystal_id'])
result_corrected = pd.merge(result_corrected, deltaH_geo, on=['crystal_id'])
result_corrected = pd.merge(result_corrected, df_features, on=['crystal_id'])
sresult = result_corrected
'''
result = result[result.crystal_id != 1519471915]
result = result[result.crystal_id != 1519608323]
result = result[result.crystal_id != 1519429441]
result = result[result.crystal_id != 1520265350]
result = result[result.crystal_id != 1520268226]
result = result[result.crystal_id != 1520334800]
result = result[result.crystal_id != 1520343157]
result = result[result.crystal_id != 1520349833]
result = result[result.crystal_id != 1520411007]
result = result[result.crystal_id != 1520429554]
result = result[result.crystal_id != 1520442584]
result = result[result.crystal_id != 1520483780]
'''
# big plot
my_dpi = 500
fig = plt.figure(figsize=(5, 5), dpi=my_dpi)
m = np.array((list(result['deltaH_formation'] - result['deltaH_hull'])))
m = m.mean()
m = 0.150 # 100 mev line
ymin = 1.12*min(result['deltaH_hull']) if min(result['deltaH_hull']) <=0 else 0.88*min(result['deltaH_hull'])
ymax = 1.12*max(result['deltaH_hull']) if max(result['deltaH_hull']) >=0 else 0.88*max(result['deltaH_hull'])
xmax = ymax
plt.ylim(ymin, ymax)
plt.xlim(ymin, xmax)
xy = [min(result['deltaH_hull']), max(result['deltaH_hull'])]
xy = [ymin, ymax]
p1, = plt.plot(xy, xy, color = 'k', label = '$E_{hull}$')
p0c, = plt.plot(result['deltaH_hull'],
result_corrected['deltaH_formation'], 'o',
alpha = 0.5, color = 'r', label = '$\mu_{corrected}$')
p0, = plt.plot(result['deltaH_hull'],
result['deltaH_formation'], 'o',
alpha = 0.5, label = '$\mu$')
#p1, = plt.plot(xy, xy, color = 'k', label = '$E_{hull}$')
#xy = [min(result['deltaH_hull']), max(result['deltaH_hull'])]
#p2, = plt.plot(xy, [i + m for i in xy], alpha = 1.0,
# color = 'k',
# label = '$\Delta E_{hull}$ = 100 meV',
# linestyle = '--', linewidth = 3.0)
plt.xlabel('$\Delta H_{f, OQMD}$ (eV/atom)')
plt.ylabel('$\Delta H_{f}$ (eV/atom)')
plt.title('Convex Hull Distance', y = 1.04)
plt.legend(handles = [p0c, p0, p1])
ax1 = plt.axes()
ax1.xaxis.set_major_locator(plt.MaxNLocator(6))
ax1.yaxis.set_major_locator(plt.MaxNLocator(6))
ax1.tick_params(bottom = True, top = True, left = True, right = True,
direction = 'in')
plt.savefig(path + 'paper_oqmdb_new1.png', dpi=400, bbox_inches="tight")
plt.show()
'''
# hist plot
c, d, e = plt.hist(list(result['deltaH_formation'] - result['deltaH_hull']), bins = 21)
plt.setp(e, edgecolor='w', lw=1, alpha = 0.7)
#plt.title('Stability of ' + str(len(result)) + ' Compounds')
#plt.xlabel('$E_{hull}$ distance (eV)')
#plt.ylabel('Count')
c, d, e = plt.hist(
list(result_corrected['deltaH_formation'] -
result['deltaH_hull']), bins = 21, color = 'r')
plt.setp(e, edgecolor='w', lw=1, alpha = 0.7)
plt.title('D$_{3}$ Hull Distance')
plt.xlabel('$\Delta E_{hull}$ (eV)')
plt.ylabel('Count')
ax1 = plt.axes()
ax1.tick_params(bottom = True, top = True, left = True, right = True,
direction = 'in')
plt.savefig(path + 'oqmdb_new1.png', dpi=400, bbox_inches="tight")
plt.show()
'''
#sresult = result_corrected.copy() #result_corrected[['fracCl','fracBr',
# 'fracI', 'fracCs',
#'fracRb', 'fracNa',
#'fracK', 'fracSn',
# 'fracGe', 'deltaH_hull']]
#plt.scatter(result['fracCl'], result['deltaH_hull'])
#print(sresult['t'])
print(len(sresult))
#
#
# lattice validity
t1 = 2*(sresult['lb'].values)/(sresult['la'].values)
t2 = 2*(sresult['lb'].values)/(sresult['lc'].values)
'''
blue_patch = mpatches.Patch(color='blue', label='2*lb/la')
red_patch = mpatches.Patch(color='red', label='2*lb/lc')
c2, d2, e2 = plt.hist(t1, bins = 21, color = 'b')
plt.setp(e2, edgecolor='w', lw=1, alpha = 0.7)
c1, d1, e1 = plt.hist(t2, bins = 21, color = 'r')
plt.setp(e1, edgecolor='w', lw=1, alpha = 0.7)
plt.legend(handles=[blue_patch, red_patch])
plt.title('D$_{3}$ Perovskite Validity')
plt.xlabel('Lattice Vector Ratio')
plt.ylabel('Count')
plt.show()
'''
sresult['hullDistance'] = list(result_corrected['deltaH_formation'] -
result_corrected['deltaH_hull'])
sresult['deltaH_formation'] = list(result_corrected['deltaH_formation'])
'''
#
#
# goldshmitd vs dhhull
plt.scatter(sresult['t'].values, sresult['hullDistance'].values)
plt.show()
#
#
# goldschmidt validity
#plt.hist(sresult['t'].values)
c1, d1, e1 = plt.hist(sresult['t'].values, bins = 21)
plt.setp(e1, edgecolor='w', lw=1)
plt.title('D$_{3}$ Perovskite Validity')
plt.xlabel('Goldschmidt Tolerance Factor')
plt.ylabel('Count')
plt.show()
'''
plt.ylabel('$\Delta E_{hull}$ (eV)')
plt.xlabel('Sodium Mixing Fraction')
plt.title('Sodium $\Delta E_{hull}$ Trend')
s = 'fracNa'
s2 = 'hullDistance'
y_cl = sresult.groupby([s])[s2].mean()
x_cl = np.array([i for i in y_cl.index])
y_cl = y_cl.values
plt.scatter(sresult[s], sresult[s2], alpha = 0.2)
plt.plot(x_cl, y_cl, linestyle = '-', lw = 2, label = 'D$_{3}$')
ax1 = plt.axes()
ax1.yaxis.set_major_locator(plt.MaxNLocator(6))
ax1.xaxis.set_major_locator(plt.MaxNLocator(6))
ax1.tick_params(bottom = True, top = True, left = True, right = True,
direction = 'in')
#plt.legend(handles = [p1])
plt.tight_layout()
path = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/'
plt.savefig(path + 'dummyTrend_realNa.png', dpi = 400, bbox_inches="tight")
plt.show()
print(xx)
# run each of these with d3 data
s = 'dir_gap'
sname = '$E_{gap}$ (eV)'
fname = 'eGap'
'''
s = 'deltaH_hull'
s = 'hullDistance'
sname = '$\Delta E_{hull}$ (eV)'
fname = 'dh_hull'
s = 'deltaH_formation'
sname = '$\Delta H_{f}$ (eV/atom)'
fname = 'dh_form'
# goldschmidt
s = 't' #'dir_gap'
sname = '$t$'
fname = 'gold'
#lattice
sresult['t1'] = t2
s = 't1' #'dir_gap'
sname = '2*lb/la'
fname = '2lbla'
'''
glist = [g for g in sresult.groupby(['fracCl'])[s]]
print(type(sresult[s].values[0]))
y_cl = sresult.groupby(['fracCl'])[s].mean()
y_cl_sd = sresult.groupby(['fracCl'])[s].std()
x_cl = np.array([i for i in y_cl.index])
y_cl = y_cl.values
y_br = sresult.groupby(['fracBr'])[s].mean()
y_br_sd = sresult.groupby(['fracBr'])[s].std()
x_br = np.array([i for i in y_br.index])
y_br = y_br.values
y_i = sresult.groupby(['fracI'])[s].mean()
y_i_sd = sresult.groupby(['fracI'])[s].std()
x_i = np.array([i for i in y_i.index])
y_i = y_i.values
y_cs = sresult.groupby(['fracCs'])[s].mean()
y_cs_sd = sresult.groupby(['fracCs'])[s].std()
x_cs = np.array([i for i in y_cs.index])
y_cs = y_cs.values
y_rb = sresult.groupby(['fracRb'])[s].mean()
y_rb_sd = sresult.groupby(['fracRb'])[s].std()
x_rb = np.array([i for i in y_rb.index])
y_rb = y_rb.values
y_na = sresult.groupby(['fracNa'])[s].mean()
y_na_sd = sresult.groupby(['fracNa'])[s].std()
x_na = np.array([i for i in y_na.index])
y_na = y_na.values
y_k = sresult.groupby(['fracK'])[s].mean()
y_k_sd = sresult.groupby(['fracK'])[s].std()
x_k = np.array([i for i in y_k.index])
y_k = y_k.values
y_sn = sresult.groupby(['fracSn'])[s].mean()
y_sn_sd = sresult.groupby(['fracSn'])[s].std()
x_sn = np.array([i for i in y_sn.index])
y_sn = y_sn.values
y_ge = sresult.groupby(['fracGe'])[s].mean()
y_ge_sd = sresult.groupby(['fracGe'])[s].std()
x_ge = np.array([i for i in y_ge.index])
y_ge = y_ge.values
y = (sresult['deltaH_hull'].values)
#scatter_matrix(sresult, alpha=0.2, figsize=(6, 6), diagonal='kde')
#f, ax = plt.subplots(3, sharey = True)
plt.figure(figsize = (5,15.2))
gs1 = gridspec.GridSpec(3, 1)
gs1.update(wspace=0.0, hspace=0.0)
cs = 8
alpha = 0.3
ax1 = plt.subplot(gs1[0])
ax1.tick_params(bottom = True, top = True, left = True, right = True,
direction = 'in')
ax1.set_ylabel(sname)
ax1.scatter(x_cl, y_cl, color = 'C0')
ax1.plot(x_cl, y_cl, linestyle = '--', label = 'Cl', color = 'C0')
ax1.errorbar(x_cl, y_cl, yerr=y_cl_sd,
capsize = cs, fmt='none', color = 'C0', alpha = alpha)
ax1.scatter(x_br, y_br, color = 'C1')
ax1.plot(x_br, y_br, linestyle = '--', label = 'Br', color = 'C1')
ax1.errorbar(x_br, y_br, yerr=y_br_sd,
capsize = cs, fmt='none', color = 'C1', alpha = alpha)
ax1.scatter(x_i, y_i, color = 'C2')
ax1.plot(x_i, y_i, linestyle = '--', label = 'I', color = 'C2')
ax1.errorbar(x_i, y_i, yerr=y_i_sd,
capsize = cs, fmt='none', color = 'C2', alpha = alpha)
label = 'X-Site'
ax1.annotate(label, (0.78, 0.87), xycoords='axes fraction', va='center')
ax1.legend(loc = 2)
ax1.set_title('D$_{3}$ Stability Trends')
#ax1.xlabel('Mxing Fraction')
#ax1.ylabel(sname)
#plt.savefig(path + fname + '-x-site-stability.png', dpi=400, bbox_inches="tight")
#plt.show()
ax2 = plt.subplot(gs1[1])
ax2.tick_params(bottom = True, top = True, left = True, right = True,
direction = 'in')
ax2.set_ylabel(sname)
ax2.scatter(x_sn, y_sn, color = 'C0')
ax2.plot(x_sn, y_sn, linestyle = '--', label = 'Sn', color = 'C0')
ax2.errorbar(x_sn, y_sn, yerr=y_sn_sd,
capsize = cs, fmt='none', color = 'C0', alpha = alpha)
ax2.scatter(x_ge, y_ge, color = 'C1')
ax2.plot(x_ge, y_ge, linestyle = '--', label = 'Ge', color = 'C1')
ax2.errorbar(x_ge, y_ge, yerr=y_ge_sd,
capsize = cs, fmt='none', color = 'C1', alpha = alpha)
label = 'B-Site'
ax2.annotate(label, (0.78, 0.87), xycoords='axes fraction', va='center')
ax2.legend(loc = 2)
#plt.title('D$_{3}$ Stability Trends (B-site)')
#plt.xlabel('Mixing Fraction')
#plt.ylabel(sname)
#plt.savefig(path + fname + '-b-site-stability.png', dpi=400, bbox_inches="tight")
#plt.show()
ax3 = plt.subplot(gs1[2])
ax3.tick_params(bottom = True, top = True, left = True, right = True,
direction = 'in')
ax3.set_ylabel(sname)
ax3.scatter(x_rb, y_rb, color = 'C0')
ax3.plot(x_rb, y_rb, linestyle = '--', label = 'Rb', color = 'C0')
ax3.errorbar(x_rb, y_rb, yerr=y_rb_sd,
capsize = cs, fmt='none', color = 'C0', alpha = alpha)
ax3.scatter(x_cs, y_cs, color = 'C1')
ax3.plot(x_cs, y_cs, linestyle = '--', label = 'Cs', color = 'C1')
ax3.errorbar(x_cs, y_cs, yerr=y_cs_sd,
capsize = cs, fmt='none', color = 'C1', alpha = alpha)
ax3.scatter(x_na, y_na, color = 'C2')
ax3.plot(x_na, y_na, linestyle = '--', label = 'Na', color = 'C2')
ax3.errorbar(x_na, y_na, yerr=y_na_sd,
capsize = cs, fmt='none', color = 'C2', alpha = alpha)
ax3.scatter(x_k, y_k, color = 'C3')
ax3.plot(x_k, y_k, linestyle = '--', label = 'K', color = 'C3')
ax3.errorbar(x_k, y_k, yerr=y_k_sd,
capsize = cs, fmt='none', color = 'C3', alpha = alpha)
label = 'A-Site'
ax3.annotate(label, (0.78, 0.87), xycoords='axes fraction', va='center')
ax3.legend(loc = 2)
plt.xlabel('Mixing Fraction')
#plt.title('D$_{3}$ Stability Trends (A-site)')
#plt.xlabel('Mixing Fraction')
#plt.ylabel(sname)
#plt.savefig(path + fname + '-a-site-stability.png', dpi=400, bbox_inches="tight")
plt.savefig(path + fname + '-t-trend.png', dpi=400, bbox_inches="tight")
plt.show()
#plt.scatter(x_br, y)
#plt.scatter(x_i, y)
#axarr[0].plot(x, y)
#axarr[0].set_title('Sharing X axis')
#axarr[1].scatter(x, y)
#ax[0].set_title('Simple plot')
#plt.scatter(x, y)
'''
c, d, e = plt.hist(list(deltaH_formation['deltaH_formation']), bins = 21)
plt.setp(e, edgecolor='w', lw=1)
plt.title('Formation Energy of ' + str(len(deltaH_formation)) + ' Compounds')
plt.xlabel('$\Delta H_{f}$ (eV/atom)')
plt.ylabel('Count')
plt.savefig(path + 'oqmdb_new2.png', dpi=400, bbox_inches="tight")
plt.show()
'''
# VARIANCE FROM CUBIC STRUCTURE
'''
plt.plot(1000*(result['deltaH_formation'] - result['deltaH_hull']), 'og')
plt.title('Energy above $E_{hull}$ (meV)')
plt.show()
plt.plot(deltaH_geo['t'], 'p')
plt.title('Goldschmidt Tolerance Factor')
plt.show()
plt.plot(100*abs(1 - deltaH_geo['b/a']), 'p')
plt.title('b/a lattice distortion percent (%)')
plt.show()
plt.plot(100*abs(1 - deltaH_geo['c/a']), 'p')
plt.title('c/a lattice distortion percent (%)')
plt.show()
'''
def calculateOQMDData(row_elements):
counts = Counter(convertElementsLong2Short(
literal_eval(row['elements'])))
comp = ''
space = ''
s = 0.0
for el in counts:
comp += str(el) + str(counts[el])
s += counts[el]
space += str(el) + '-'
space = space[0:-1]
def getCrystalOQMDData(df):
#df = df.copy().head()
fname = 'compEnergy_qmdb_d3_junk.csv'
path = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/'
print('Computing compositional energy')
# PRE-ALLOCATE DATAFRAME
df_columns = ['crystal_id',
'totalEnergy_hull',
'deltaH_hull',
'phase_sum',
'pEnergy',
'compSum',
'stablePhase']
compEnergy = pd.DataFrame(index = range(df.shape[0]),
columns = df_columns)
deltaH = []
for index, row in df.iterrows():
print(index + 1, 'out of', len(df))
counts = Counter(convertElementsLong2Short(
literal_eval(row['elements'])))
# build composition A1B2C3, and space A-B-C strings for qmpy
comp = ''
space = ''
s = 0.0
for el in counts:
comp += str(el) + str(counts[el])
s += counts[el]
space += str(el) + '-'
space = space[0:-1]
space = PhaseSpace(space)
energy, phase = space.gclp(comp)
#mu = getMuDFT()
mu = getMuCorrectedDFT2()
phase_sum = 0
pEnergy = []
compSum = []
for p in phase: # CsI, GeI2 are phases of CsGeI3 (dummy example)
comp_sum = 0
for el in p.comp: # each element, ex: Cs, I, in CsI
# total energy * number of that element in this compostion
comp_sum += mu[str(el)]*p.comp[el]
pEnergy += [p.energy] # the energy of that phase (eV/atom)
compSum += [comp_sum] # the total energy of that phase's constituents
# still need the formation energy... but I don't calculate it for all these
# compounds.. that is what we are trying to extrapolate
# NEED TO NORMALIZE BY NUMBER OF ATOMS
phase_sum += comp_sum # phase_sum is total energy of all phases,
#should just be same as compSum from deltaH_formationenergy
#plt.plot(pEnergy, compSum)
##total_energy_estimated += sum(
# [mu[str(el)]*int(p.comp[el])*int(phase[p])
# for el in p.comp])
#print(total_energy_estimated/s)
deltaH += [energy/s]
stablePhase = (len(phase) == 1)
compEnergy.loc[index] = [str(row['crystal_id']),
energy,
energy/s,
phase_sum,
str(pEnergy),
str(compSum),
stablePhase]
print(str(pEnergy))
with open(path + fname, 'w') as f:
compEnergy.to_csv(f, index = None)
#plt.show()
return(deltaH)
def plotDeltaH_formation(deltaH):
c, d, e = plt.hist(deltaH, bins = 20)
plt.setp(e, edgecolor='w', lw=1)
plt.xlabel('$\Delta H_{f}$ (eV/atom)')
plt.ylabel('Number of Compounds')
#plt.axvline(x=0.0, color='r', linestyle='-')
plt.title('Formation Energy of Database')
plt.plot()
plt.show()
def plotDeltaH_geo(deltaH):
# positive H stable
plt.plot(deltaH, 'o')
plt.xlabel('Index')
plt.ylabel('deltaH (Predicted)')
plt.title('Stability Prediction: $-1.987 + 1.66(t + \mu)^{\eta}$')
plt.axhline(y=0.0, color='r', linestyle='-')
stableNum = len([a for a in deltaH if a >= 0])
print('\n\n ' + str(stableNum) + ' stable compounds of ' + str(len(deltaH)) +
' total. (' + str(round(100*stableNum/len(deltaH), 2)) + '%)')
savePath = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/'
plt.savefig(savePath + 'stability01.png', pad_inches=0.4,
dpi=200, bbox_inches='tight')
plt.plot()
plt.show()
c, d, e = plt.hist(deltaH, bins = 21)
print('here2')
plt.setp(e, edgecolor='w', lw=1)
plt.xlabel('$\Delta H_{d}$ (eV/atom)')
plt.ylabel('Number of Compounds')
plt.axvline(x=0.0, color='r', linestyle='-')
plt.title('Stability Prediction: $-1.987 + 1.66(t + \mu)^{\eta}$')
plt.plot()
plt.savefig(savePath + 'stability02.png', pad_inches=0.4,
dpi=200, bbox_inches='tight')
plt.show()
def getDeltaH_geo(df):
# SPECIFY POSSIBLE SITE OCCUPATIONS
Asites = ['Cs', 'Rb', 'Na', 'K']
Bsites = ['Sn', 'Ge']
Xsites = ['I', 'Cl', 'Br']
# IONIC RADIAL PROPERTY SPECIFICATION FOR CALCULATIONS
# PRE-ALLOCATE DATAFRAME
df_columns = ['crystal_id', 'deltaH_geo',
't', 'mu', 'eta', 'descriptor',
'b/a', 'c/a']
deltaH_df = pd.DataFrame(index = range(df.shape[0]),
columns = df_columns)
for index, row in df.iterrows():
#print('GEO: ', index, ' of ', len(df) - 1)
# GET VOLUME
v = literal_eval(row['cellPrimVectors_end']) # GET LATTICE VECTORS
la = v[0]
lb = v[4]
lc = v[8]
volume = la*lb*lc
# GET ELEMENT COUNTS
counts = Counter(convertElementsLong2Short(
literal_eval(row['elements'])))
# GET IONIC RADII OF ELEMENTS
#properties = ['X', 'EI1', 'rp', 'rs', 'ra', 'EI2',
# 'l', 'h', 'fermi', 'ir']
#properties = ml.getPropertyMixerLabels(properties)
A_ir = []
B_ir = []
X_ir = []
for e in counts:
v = counts[e] # NUMBER OF ELEMENTS OF THIS TYPE
if e in Asites:
A_ir += [getElementFeature()[e]['ir']]*v
if e in Bsites:
B_ir += [getElementFeature()[e]['ir']]*v
if e in Xsites:
X_ir += [getElementFeature()[e]['ir']]*v
# GET EFFECTIVE IONIC RADII BY AVERAGING OVER ALL
# A, B, AND X SITE ATOMS
Aeff = sum(A_ir)/4.0
Beff = sum(B_ir)/4.0
Xeff = sum(X_ir)/12.0
# GET TOTAL VOLUME OF ELEMENTS
A_volumes = sum([(4./3)*math.pi*r**3 for r in A_ir])
B_volumes = sum([(4./3)*math.pi*r**3 for r in B_ir])
X_volumes = sum([(4./3)*math.pi*r**3 for r in X_ir])
# GOLDSCHMIDT TOLERANCE
t = (Aeff + Xeff)/(math.sqrt(2.0)*(Beff + Xeff))
# OCTAHEDRAL
mu = Beff/Xeff
# PACKING FACTOR
eta = sum([A_volumes, B_volumes, X_volumes])/volume
descriptor = (t + mu)**eta
# PREDICTED DECOMPOSITION ENERGY (POSITIVE MEANS STABLE)
#https://pubs.acs.org/doi/suppl/10.1021/
#jacs.7b09379/suppl_file/ja7b09379_si_001.pdf
deltaH_geo = -1.987 + 1.660*(t + mu)**eta #(eV)
deltaH_df.loc[index] = [row['crystal_id'], deltaH_geo,
t, mu, eta, descriptor,
2*lb/la, lc/la]
# since deltaH_p positive means stable
print('done')
return deltaH_df
def calculateDeltaH_formation(row_elements, row_totalEnergy, mu):
# GET ELEMENT COUNTS
counts = Counter(convertElementsLong2Short(
literal_eval(row_elements)))
#mu = getMuDFT() #eV
mus = [mu[el]*counts[el] for el in counts]
muSum = sum(mus) #eV
deltaH_result = (row_totalEnergy - muSum)/len(literal_eval(row_elements))
return deltaH_result, muSum
def getDeltaH_formation(df, mu):
# PRE-ALLOCATE DATAFRAME
df_columns = ['crystal_id',
'totalEnergy',
'deltaH_formation',
'compSum_formation']
deltaH_df = pd.DataFrame(index = range(df.shape[0]),
columns = df_columns)
for index, row in df.iterrows():
#print('FORMATION: ', index, ' of ', len(df) - 1)
'''
# GET ELEMENT COUNTS
counts = Counter(convertElementsLong2Short(
literal_eval(row['elements'])))
#if(counts['Br'] == 12 and counts['Cs'] == 4 and counts['Sn'] == 4):
mu = getMuDFT()
mus = [mu[el]*counts[el] for el in counts]
muSum = sum(mus)
#print(row['directGap'], volume, row['totalEnergy'],-1*mus)
#print(row['volume'])
#print(counts, (row['totalEnergy'] - mus)/20.)
deltaH_result = (row['totalEnergy'] - muSum)/20.
'''
deltaH_result, muSum = calculateDeltaH_formation(row['elements'],
row['totalEnergy'],
mu)
# GET VOLUME
'''
v = literal_eval(row['cellPrimVectors_end']) # GET LATTICE VECTORS
la = v[0]
lb = v[4]
lc = v[8]
volume = la*lb*lc
'''
deltaH_df.loc[index] = [row['crystal_id'],
row['totalEnergy'],
deltaH_result,
muSum]
path = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/'
deltaH_df.to_csv(path + 'formationEnergy.csv')
return deltaH_df
if __name__ == '__main__':
main() | StarcoderdataPython |
1622760 | import json
import os
import pytz
def _bool_convert(value):
truthy = {"t", "true", "on", "y", "yes", "1", 1, 1.0, True}
falsy = {"f", "false", "off", "n", "no", "0", 0, 0.0, False}
if isinstance(value, str):
value = value.lower()
if value in truthy:
return True
if value in falsy:
return False
return bool(value)
class Db:
if os.environ.get('DB_USERNAME') is not None:
db_username = os.environ.get('DB_USERNAME')
db_password = <PASSWORD>('DB_PASSWORD')
db_port = os.environ.get('DB_PORT', '5432')
db_host = os.environ.get('DB_HOST', 'localhost')
db_database = os.environ.get('DB_DATABASE', 'termine')
url = f"postgresql://{db_username}:{db_password}@{db_host}:{db_port}/{db_database}"
else:
url = os.environ.get(
"DB_URL", 'postgresql://postgres:example@localhost:5432/termine')
class Settings:
claim_timeout_min = int(os.environ.get("CLAIM_TIMEOUT_MIN", 5))
num_display_slots = int(os.environ.get("DISPLAY_SLOTS_COUNT", 150))
tz = pytz.timezone(os.environ.get("TERMINE_TIME_ZONE", 'Europe/Berlin'))
disable_auth_for_booking = _bool_convert(
os.environ.get("DISABLE_AUTH", False))
use_ldap = _bool_convert(os.environ.get("USE_LDAP", False))
jwt_key = os.environ.get("JWT_SECRET_KEY", "")
class Ldap:
url = os.environ.get("LDAP_URL", "")
user_dn = os.environ.get("LDAP_SYSTEM_DN", "")
user_pw = os.environ.get("LDAP_SYSTEM_USER_PW", "")
user_coupon_number = int(os.environ.get("LDAP_USER_COUPONS", 3))
search_base = os.environ.get("LDAP_SEARCH_BASE", "")
search_filter = os.environ.get("LDAP_SEARCH_FILTER", "")
search_attribute = os.environ.get("LDAP_ATTRIBUTE", "")
use_tls = _bool_convert(os.environ.get("LDAP_USE_TLS", False))
port = int(os.environ.get("LDAP_PORT", 389))
tls_port = int(os.environ.get("LDAP_TLS_PORT", 636))
class FrontendSettings:
_inst = None
@classmethod
def by_env(cls):
env_name = os.environ.get("ENVIRONMENT", "local")
with open(os.path.join("config", 'by_env', f'{env_name}.json')) as file:
frontend_conf = json.load(file)
return frontend_conf
@classmethod
def instance_by_env(cls):
if not cls._inst:
cls._inst = cls.by_env()
return cls._inst
@classmethod
def json_by_env(cls):
return json.dumps(cls.instance_by_env())
seed = os.environ.get("PASSWORD_HASH_SEED_DO_NOT_CHANGE", 'Wir sind SEEED')
| StarcoderdataPython |
78114 | <filename>hms/migrations/0001_initial.py
# Generated by Django 3.2.4 on 2021-06-08 17:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Doctor',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(default='profile_photos/Doctor/default_ku6ks9.jpg', upload_to='profile_photos/Doctor/')),
('address', models.CharField(max_length=40)),
('mobile', models.CharField(max_length=20, null=True)),
('department', models.CharField(choices=[('Cardiologist', 'Cardiologist'), ('Dermatologist', 'Dermatologist'), ('Emergency Medicine Specialist', 'Emergency Medicine Specialist'), ('Allergist/Immunologist', 'Allergist/Immunologist'), ('Anesthesiologist', 'Anesthesiologist'), ('Colon and Rectal Surgeon', 'Colon and Rectal Surgeon')], default='Cardiologist', max_length=50)),
('status', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| StarcoderdataPython |
1624524 | <gh_stars>1-10
from django import forms
from django.utils.html import strip_tags
from .forms import HTMLField
from .widgets import HTMLFieldWidget
def _process_checkbox(field_json):
field = forms.MultipleChoiceField()
field.widget = forms.CheckboxSelectMultiple()
return field
def _process_date(field_json):
field = forms.DateField()
field.widget.input_type = "date"
return field
def _process_email(field_json):
return forms.EmailField()
def _process_hidden(field_json):
field = forms.CharField()
field.widget = forms.HiddenInput()
return field
def _process_number(field_json):
return forms.FloatField(
min_value=field_json.get("min", None),
max_value=field_json.get("max", None),
widget=forms.NumberInput(attrs={'step': field_json.get("step", "any")})
)
def _process_radio(field_json):
field = forms.ChoiceField()
field.widget = forms.RadioSelect(
attrs={"required": field_json.get("required", False)}
)
return field
def _process_select(field_json):
if (field_json.get('multiple', False)):
return forms.MultipleChoiceField()
return forms.ChoiceField()
def _process_text_input(field_json):
return forms.CharField(max_length=field_json.get("maxlength", None))
def _process_text_area(field_json):
field = forms.CharField()
field.widget = forms.Textarea()
return field
def _process_url(field_json):
return forms.URLField()
def _process_heading(field_json):
field = HTMLField()
field.widget = HTMLFieldWidget(params=field_json)
return field
def _process_paragraph(field_json):
field = HTMLField()
field.widget = HTMLFieldWidget(params=field_json)
return field
TYPE_MAPPING = {
'checkbox-group': _process_checkbox,
'date': _process_date,
'email': _process_email,
'hidden': _process_hidden,
'number': _process_number,
'radio-group': _process_radio,
'select': _process_select,
'text': _process_text_input,
'textarea': _process_text_area,
'header': _process_heading,
'paragraph': _process_paragraph,
'url': _process_url
}
def process_field_from_json(field_json):
if not isinstance(field_json, dict):
raise TypeError("Each field JSON must be a dictionary")
field_type = field_json['type']
if field_type == 'text':
field_type = field_json.get('subtype', 'text')
common_field_attrs = {
'required': field_json.get('required', False),
'label': strip_tags(field_json.get('label', None)),
'initial': field_json.get('value', None),
'help_text': field_json.get('description', None),
}
common_widget_attrs = {
'required': field_json.get('required', False),
'placeholder': field_json.get('placeholder', False),
'class': field_json.get('className', False),
}
field = TYPE_MAPPING[field_type](field_json)
for attr, val in common_field_attrs.items():
if field_type not in ['paragraph', 'header', 'hidden']:
setattr(field, attr, val)
if field_type not in ['radio-group', 'hidden']:
for attr, val in common_widget_attrs.items():
field.widget.attrs[attr] = val
if field_type in ['checkbox-group', 'radio-group', 'select']:
choices = [
(choice['value'], choice['label']) for choice in field_json['values']
]
field.choices = choices
field.widget.choices = choices
if field_type == 'hidden':
setattr(field, 'initial', field_json.get('value', None))
return field
def gen_fields_from_json(form_json):
if not isinstance(form_json, list):
raise TypeError("Form JSON must be a list.")
fields = []
for field_json in form_json:
fields.append(process_field_from_json(field_json))
return fields
| StarcoderdataPython |
84058 | # --coding:utf-8--
#
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import time
import pytest
from tests.common.nebula_test_suite import NebulaTestSuite, T_NULL
class TestInsert2(NebulaTestSuite):
'''
The tests for nebula2
'''
@classmethod
def prepare(self):
resp = self.execute('CREATE SPACE IF NOT EXISTS mySpace2(partition_num=1, vid_size=10);'
'USE mySpace2;'
'CREATE TAG student(name string NOT NULL, age int);'
'CREATE TAG course(name fixed_string(5) NOT NULL, introduce string DEFAULT NULL);')
self.check_resp_succeeded(resp)
time.sleep(self.delay)
@classmethod
def cleanup(self):
resp = self.execute('DROP SPACE mySpace2')
self.check_resp_succeeded(resp)
def test_insert_out_of_range_id_size(self):
resp = self.execute('INSERT VERTEX student(name, age) VALUES "12345678901":("Tom", "2")')
self.check_resp_failed(resp)
def test_insert_not_null_prop(self):
resp = self.execute('INSERT VERTEX student(name, age) VALUES "Tom":(NULL, 12)')
self.check_resp_failed(resp)
def test_insert_with_fix_string(self):
# succeeded
resp = self.execute('INSERT VERTEX course(name) VALUES "Math":("Math")')
self.check_resp_succeeded(resp)
# out of range
resp = self.execute('INSERT VERTEX course(name) VALUES "English":("English")')
self.check_resp_succeeded(resp)
# check
resp = self.execute_query('FETCH PROP ON course "English"')
self.check_resp_succeeded(resp)
expect_result = [['English', 'Engli', T_NULL]]
self.check_out_of_order_result(resp, expect_result)
| StarcoderdataPython |
180252 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 4 17:50:06 2020
@author: <NAME>
"""
def CLEARLAYOUT(layout):
for i in reversed(range(layout.count())):
layoutItem = layout.itemAt(i)
if layoutItem.widget() is not None:
widgetToRemove = layoutItem.widget()
widgetToRemove.setParent(None)
layout.removeWidget(widgetToRemove)
else:
layoutToRemove = layout.itemAt(i)
CLEARLAYOUT(layoutToRemove) | StarcoderdataPython |
3311682 | <gh_stars>10-100
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
"""Sparse modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dragon.vm.torch.core.nn import functional as F
from dragon.vm.torch.core.nn import init
from dragon.vm.torch.core.nn.modules.module import Module
from dragon.vm.torch.core.nn.parameter import Parameter
from dragon.vm.torch.core.tensor import Tensor
class Embedding(Module):
"""Lookup the embeddings of a fixed dictionary."""
def __init__(self, num_embeddings, embedding_dim, padding_idx=None):
"""Create an ``Embedding`` module.
Parameters
----------
num_embeddings : int
The dictionary size.
embedding_dim : int
The embedding dimension.
padding_idx : int, optional
The position where to return zeros.
"""
super(Embedding, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
if padding_idx >= self.num_embeddings:
raise ValueError('<padding_idx> must be within <num_embeddings>.')
elif padding_idx < 0:
if padding_idx < -self.num_embeddings:
raise ValueError('<padding_idx> must be within <num_embeddings>.')
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.weight = Parameter(Tensor(num_embeddings, embedding_dim))
self.reset_parameters()
def reset_parameters(self) -> None:
init.normal_(self.weight)
if self.padding_idx is not None:
self.weight[self.padding_idx] = 0
def forward(self, input):
return F.embedding(input, self.weight, self.padding_idx)
| StarcoderdataPython |
153935 | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, <NAME> <<EMAIL>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: replace
author: "<NAME> (@EvanK)"
extends_documentation_fragment:
- files
- validate
short_description: Replace all instances of a particular string in a
file using a back-referenced regular expression.
description:
- This module will replace all instances of a pattern within a file.
- It is up to the user to maintain idempotence by ensuring that the
same pattern would never match any replacements made.
version_added: "1.6"
options:
path:
description:
- The file to modify.
- Before 2.3 this option was only usable as I(dest), I(destfile) and I(name).
aliases: [ dest, destfile, name ]
required: true
regexp:
description:
- The regular expression to look for in the contents of the file.
Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
Uses MULTILINE mode, which means C(^) and C($) match the beginning
and end of the file, as well as the beginning and end respectively
of I(each line) of the file.
- Does not use DOTALL, which means the C(.) special character matches
any character I(except newlines). A common mistake is to assume that
a negated character set like C([^#]) will also not match newlines.
In order to exclude newlines, they must be added to the set like C([^#\\n]).
- Note that, as of ansible 2, short form tasks should have any escape
sequences backslash-escaped in order to prevent them being parsed
as string literal escapes. See the examples.
required: true
replace:
description:
- The string to replace regexp matches. May contain backreferences
that will get expanded with the regexp capture groups if the regexp
matches. If not set, matches are removed entirely.
after:
description:
- If specified, the line after the replace/remove will start. Can be used
in combination with C(before).
Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
version_added: "2.4"
before:
description:
- If specified, the line before the replace/remove will occur. Can be used
in combination with C(after).
Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
version_added: "2.4"
backup:
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
type: bool
default: 'no'
others:
description:
- All arguments accepted by the M(file) module also work here.
encoding:
description:
- "The character encoding for reading and writing the file."
default: "utf-8"
version_added: "2.4"
notes:
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
- Option I(follow) has been removed in version 2.5, because this module modifies the contents of the file so I(follow=no) doesn't make sense.
"""
EXAMPLES = r"""
# Before 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
- replace:
path: /etc/hosts
regexp: '(\s+)old\.host\.name(\s+.*)?$'
replace: '\1new.host.name\2'
backup: yes
# Replace after the expression till the end of the file (requires >=2.4)
- replace:
path: /etc/hosts
regexp: '(\s+)old\.host\.name(\s+.*)?$'
replace: '\1new.host.name\2'
after: 'Start after line.*'
backup: yes
# Replace before the expression till the begin of the file (requires >=2.4)
- replace:
path: /etc/hosts
regexp: '(\s+)old\.host\.name(\s+.*)?$'
replace: '\1new.host.name\2'
before: 'Start before line.*'
backup: yes
# Replace between the expressions (requires >=2.4)
- replace:
path: /etc/hosts
regexp: '(\s+)old\.host\.name(\s+.*)?$'
replace: '\1new.host.name\2'
after: 'Start after line.*'
before: 'Start before line.*'
backup: yes
- replace:
path: /home/jdoe/.ssh/known_hosts
regexp: '^old\.host\.name[^\n]*\n'
owner: jdoe
group: jdoe
mode: 0644
- replace:
path: /etc/apache/ports
regexp: '^(NameVirtualHost|Listen)\s+80\s*$'
replace: '\1 127.0.0.1:8080'
validate: '/usr/sbin/apache2ctl -f %s -t'
- name: short form task (in ansible 2+) necessitates backslash-escaped sequences
replace: dest=/etc/hosts regexp='\\b(localhost)(\\d*)\\b' replace='\\1\\2.localdomain\\2 \\1\\2'
- name: long form task does not
replace:
dest: /etc/hosts
regexp: '\b(localhost)(\d*)\b'
replace: '\1\2.localdomain\2 \1\2'
"""
import os
import re
import tempfile
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.basic import AnsibleModule
def write_changes(module, contents, path):
tmpfd, tmpfile = tempfile.mkstemp(dir=getattr(module, 'tmpdir', None))
f = os.fdopen(tmpfd, 'wb')
f.write(contents)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, aliases=['dest', 'destfile', 'name'], type='path'),
regexp=dict(required=True),
replace=dict(default='', type='str'),
after=dict(required=False),
before=dict(required=False),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
encoding=dict(default='utf-8', type='str'),
),
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
path = params['path']
encoding = params['encoding']
res_args = dict()
params['after'] = to_text(params['after'], errors='surrogate_or_strict', nonstring='passthru')
params['before'] = to_text(params['before'], errors='surrogate_or_strict', nonstring='passthru')
params['regexp'] = to_text(params['regexp'], errors='surrogate_or_strict', nonstring='passthru')
params['replace'] = to_text(params['replace'], errors='surrogate_or_strict', nonstring='passthru')
if os.path.isdir(path):
module.fail_json(rc=256, msg='Path %s is a directory !' % path)
if not os.path.exists(path):
module.fail_json(rc=257, msg='Path %s does not exist !' % path)
else:
f = open(path, 'rb')
contents = to_text(f.read(), errors='surrogate_or_strict', encoding=encoding)
f.close()
pattern = u''
if params['after'] and params['before']:
pattern = u'%s(?P<subsection>.*?)%s' % (params['before'], params['after'])
elif params['after']:
pattern = u'%s(?P<subsection>.*)' % params['after']
elif params['before']:
pattern = u'(?P<subsection>.*)%s' % params['before']
if pattern:
section_re = re.compile(pattern, re.DOTALL)
match = re.search(section_re, contents)
if match:
section = match.group('subsection')
else:
res_args['msg'] = 'Pattern for before/after params did not match the given file: %s' % pattern
res_args['changed'] = False
module.exit_json(**res_args)
else:
section = contents
mre = re.compile(params['regexp'], re.MULTILINE)
result = re.subn(mre, params['replace'], section, 0)
if result[1] > 0 and section != result[0]:
if pattern:
result = (contents.replace(section, result[0]), result[1])
msg = '%s replacements made' % result[1]
changed = True
if module._diff:
res_args['diff'] = {
'before_header': path,
'before': contents,
'after_header': path,
'after': result[0],
}
else:
msg = ''
changed = False
if changed and not module.check_mode:
if params['backup'] and os.path.exists(path):
res_args['backup_file'] = module.backup_local(path)
# We should always follow symlinks so that we change the real file
path = os.path.realpath(path)
write_changes(module, to_bytes(result[0], encoding=encoding), path)
res_args['msg'], res_args['changed'] = check_file_attrs(module, changed, msg)
module.exit_json(**res_args)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1647300 | #
# Copyright 2015 ClusterHQ
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python bindings for ``libzfs_core``.
"""
from __future__ import absolute_import, division, print_function
CDEF = """
enum lzc_send_flags {
LZC_SEND_FLAG_EMBED_DATA = 1,
LZC_SEND_FLAG_LARGE_BLOCK = 2,
LZC_SEND_FLAG_COMPRESS = 4,
LZC_SEND_FLAG_RAW = 8
};
typedef enum {
DMU_OST_NONE,
DMU_OST_META,
DMU_OST_ZFS,
DMU_OST_ZVOL,
DMU_OST_OTHER,
DMU_OST_ANY,
DMU_OST_NUMTYPES
} dmu_objset_type_t;
#define MAXNAMELEN 256
struct drr_begin {
uint64_t drr_magic;
uint64_t drr_versioninfo; /* was drr_version */
uint64_t drr_creation_time;
dmu_objset_type_t drr_type;
uint32_t drr_flags;
uint64_t drr_toguid;
uint64_t drr_fromguid;
char drr_toname[MAXNAMELEN];
};
typedef struct zio_cksum {
uint64_t zc_word[4];
} zio_cksum_t;
typedef struct dmu_replay_record {
enum {
DRR_BEGIN, DRR_OBJECT, DRR_FREEOBJECTS,
DRR_WRITE, DRR_FREE, DRR_END, DRR_WRITE_BYREF,
DRR_SPILL, DRR_WRITE_EMBEDDED, DRR_NUMTYPES
} drr_type;
uint32_t drr_payloadlen;
union {
struct drr_begin drr_begin;
/* ... */
struct drr_checksum {
uint64_t drr_pad[34];
zio_cksum_t drr_checksum;
} drr_checksum;
} drr_u;
} dmu_replay_record_t;
typedef enum {
DCP_CMD_NONE,
DCP_CMD_RAW_RECV,
DCP_CMD_NEW_KEY,
DCP_CMD_INHERIT,
DCP_CMD_FORCE_NEW_KEY,
DCP_CMD_FORCE_INHERIT
} dcp_cmd_t;
int libzfs_core_init(void);
void libzfs_core_fini(void);
int lzc_bookmark(nvlist_t *, nvlist_t **);
int lzc_change_key(const char *, uint64_t, nvlist_t *, uint8_t *, uint_t);
int lzc_channel_program(const char *, const char *, uint64_t, uint64_t,
nvlist_t *, nvlist_t **);
int lzc_channel_program_nosync(const char *, const char *, uint64_t,
uint64_t, nvlist_t *, nvlist_t **);
int lzc_clone(const char *, const char *, nvlist_t *);
int lzc_create(const char *, dmu_objset_type_t, nvlist_t *, uint8_t *,
uint_t);
int lzc_destroy_bookmarks(nvlist_t *, nvlist_t **);
int lzc_destroy_snaps(nvlist_t *, boolean_t, nvlist_t **);
boolean_t lzc_exists(const char *);
int lzc_get_bookmarks(const char *, nvlist_t *, nvlist_t **);
int lzc_get_holds(const char *, nvlist_t **);
int lzc_hold(nvlist_t *, int, nvlist_t **);
int lzc_load_key(const char *, boolean_t, uint8_t *, uint_t);
int lzc_promote(const char *, nvlist_t *, nvlist_t **);
int lzc_receive(const char *, nvlist_t *, const char *, boolean_t,
boolean_t, int);
int lzc_receive_one(const char *, nvlist_t *, const char *, boolean_t,
boolean_t, boolean_t, int, const dmu_replay_record_t *, int,
uint64_t *, uint64_t *, uint64_t *, nvlist_t **);
int lzc_receive_resumable(const char *, nvlist_t *, const char *,
boolean_t, boolean_t, int);
int lzc_receive_with_cmdprops(const char *, nvlist_t *, nvlist_t *,
uint8_t *, uint_t, const char *, boolean_t, boolean_t,
boolean_t, int, const dmu_replay_record_t *, int, uint64_t *,
uint64_t *, uint64_t *, nvlist_t **);
int lzc_receive_with_header(const char *, nvlist_t *, const char *,
boolean_t, boolean_t, boolean_t, int, const dmu_replay_record_t *);
int lzc_release(nvlist_t *, nvlist_t **);
int lzc_reopen(const char *, boolean_t);
int lzc_rollback(const char *, char *, int);
int lzc_rollback_to(const char *, const char *);
int lzc_send(const char *, const char *, int, enum lzc_send_flags);
int lzc_send_resume(const char *, const char *, int, enum lzc_send_flags,
uint64_t, uint64_t);
int lzc_send_space(const char *, const char *, enum lzc_send_flags,
uint64_t *);
int lzc_snaprange_space(const char *, const char *, uint64_t *);
int lzc_snapshot(nvlist_t *, nvlist_t *, nvlist_t **);
int lzc_sync(const char *, nvlist_t *, nvlist_t **);
int lzc_unload_key(const char *);
int lzc_pool_checkpoint(const char *);
int lzc_pool_checkpoint_discard(const char *);
int lzc_rename(const char *, const char *);
int lzc_destroy(const char *fsname);
int lzc_inherit(const char *fsname, const char *name, nvlist_t *);
int lzc_set_props(const char *, nvlist_t *, nvlist_t *, nvlist_t *);
int lzc_list (const char *, nvlist_t *);
"""
SOURCE = """
#include <libzfs/libzfs_core.h>
"""
LIBRARY = "zfs_core"
# vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4
| StarcoderdataPython |
1617483 | #!/usr/bin/env python
import sys
import os
import json
import re
import urllib
import urllib2
# Basic Dynamic DNS update client. Written around the no-ip.com API.
# Supports determining your public IP by querying an external web service, or by
# using an IP address from a local interface
#
# Reads in config from a JSON file. Required parameters:
# * method (web or interface)
# * hostname (dynamic DNS name that will be updated)
# * username
# * password
# * externIf (interface name, only required if using method 'interface')
#
# Author: <NAME>
# Created: 2018-08-17
# UserAgent parameters
program = "tac-dyndns"
version = "1.0"
contact = "<EMAIL>"
baseUrl = "https://dynupdate.no-ip.com/nic/update"
if (len(sys.argv) != 2):
print ("usage: %s config.json" % sys.argv[0])
exit()
# Read in config from file
filename = sys.argv[1]
with open(filename) as json_file:
config = json.load(json_file)
method = config["method"]
hostname = config["hostname"]
username = config["username"]
password = config["password"]
if method == "interface":
externIf = config["externIf"]
# Figure out what IP we're at. Either use a web query, or the IP from an if
if method == "web":
ip = urllib2.urlopen('http://ip.42.pl/raw').read()
elif method == "interface":
# Pull the IP address using the 'ip addr show' command
ipStr = os.popen("/usr/sbin/ip addr show %s | grep 'inet '" % externIf).read().strip()
m = re.match("inet (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})", ipStr)
if m:
ip = m.group(1)
else:
print "No IP address found on interface %s!" % externIf
exit()
else:
print "Method %s not known!" % method
exit()
# If there is a last IP in the config file and it's the same as the IP we've
# detected, then there is no need to continue
if "lastIp" in config:
if config["lastIp"] == ip:
print "No IP change detected"
exit()
# Write new IP to the config file
config["lastIp"] = ip
with open(filename, 'w') as json_file:
json.dump(config, json_file, indent=4)
# Encode data into URL string
data = {
'hostname': hostname,
'myip': ip
}
updateData = urllib.urlencode(data)
# Create the base64 encoded authentication string, build headers
authStr = "%s:%s" % (username,password)
encodedAuth = authStr.encode("base64").rstrip()
headers = {
'Authorization': "Basic %s" % encodedAuth,
'User-Agent': "%s/%s %s" % (program, version, contact)
}
# Make the request
request = urllib2.Request(baseUrl, updateData, headers)
try:
result = urllib2.urlopen(request)
except urllib2.HTTPError as e:
print "HTTP Error: %s %s" % (e.code,e.reason)
exit()
print (result.read())
| StarcoderdataPython |
3302518 | <gh_stars>1-10
# Copyright (c) 2016-2021 InSeven Limited
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import base64
import collections
import copy
import datetime
import functools
import hashlib
import itertools
import json as js
import logging
import os
import pytz
import random
import re
import struct
import sys
import time
import urllib.parse
import uuid
import dateutil.parser
import jinja2
import lxml.html
import misaka
import titlecase
from flask import Flask, Response, render_template, send_from_directory, send_file, abort, jsonify, request
logging.basicConfig(stream=sys.stderr)
import config
import converters
import extensions
import store
import utils
app = Flask(__name__)
class MyRenderer(misaka.SaferHtmlRenderer):
def __init__(self, page):
super(MyRenderer, self).__init__(flags=(),
sanitization_mode='',
nesting_level=0,
link_rewrite=None,
img_src_rewrite=None)
self._page = page
def rewrite_url(self, url, is_image_src=False):
if is_image_src:
o = urllib.parse.urlparse(url)
if o.scheme == '' and o.netloc == '':
# Insert a marker in markdown images to allow us to replace them with the image template for local images.
return "+" + fixup_relative_url(url, self._page["path"])
return fixup_relative_url(url, self._page["path"])
def check_url(self, url, is_image_src=False):
return True
def header(self, content, level):
return "<h%d><a id=\"%s\"></a>%s</h%d>" % (level, content.replace(' ', '-').lower(), content, level);
app.config.from_object(config.Configuration)
def initialize(templates_path, store_path, config):
app.jinja_loader = jinja2.FileSystemLoader(templates_path)
app.template_mtime = directory_mtime(templates_path)
app.config["CONFIG"] = config
app.config["STORE_PATH"] = store_path
app.jinja_env.extend(site=Site())
app.jinja_env.extend(store=store.DocumentStore(app.config["STORE_PATH"]))
app.jinja_env.add_extension('extensions.Gallery')
app.jinja_env.add_extension('extensions.Video')
app.jinja_env.add_extension('extensions.TemplateExtension')
def directory_mtime(path):
mtimes = []
for root, dirs, files in os.walk(path):
for name in files:
mtimes.append(os.path.getmtime(os.path.join(root, name)))
return max(mtimes) if mtimes else 0
def wrap_document(document):
if document is None:
return None
if not "template" in document:
document["template"] = "post.html"
return DocumentWrapper(document)
def sort_posts(posts, ascending=False):
posts_with_date = [post for post in posts if post.date is not None]
posts_without_date = [post for post in posts if post.date is None]
return sorted(posts_with_date, key=lambda x: x.sort_date, reverse=not ascending) + posts_without_date
class Site(object):
def __init__(self):
self.config = app.config["CONFIG"]
self._cache = None
self._cache_by_parent = None
def load_cache(self):
if self._cache is not None:
return
self._cache = dict()
self._cache_by_parent = collections.defaultdict(list)
for document in [wrap_document(document) for document in app.jinja_env.store.getall()]:
self._cache[document.url] = document
self._cache_by_parent[document["parent"]].append(document)
def posts(self, include=None, exclude=None, parent=None, search=None, ascending=False, **kwargs):
self.load_cache()
cached_posts = None
if include is None and exclude is None and parent is None and search is None and not kwargs:
cached_posts = self._cache.values()
elif include is None and exclude is None and search is None and not kwargs:
cached_posts = self._cache_by_parent[parent]
if cached_posts is not None:
return sort_posts(cached_posts, ascending=ascending)
return [wrap_document(document) for document in app.jinja_env.store.getall(include=include,
exclude=exclude,
parent=parent,
search=search,
asc=ascending,
**kwargs)]
def post(self, url):
self.load_cache()
try:
return self._cache[url]
except KeyError:
return None
# TODO: Dependencies are not tracked for included image files.
def __getitem__(self, key):
return self.config[key]
@property
def last_modified(self):
return app.jinja_env.store.last_modified
def normpath(path):
if path.endswith("/"):
path = path[:-1]
if not path.startswith("/"):
path = "/" + path
return path
class ValueCache(object):
def __init__(self, value):
self.value = value
class QueryTracker(object):
def __init__(self):
self.queries = {}
def add(self, parameters, documents):
mtimes = [document.mtime for document in documents]
self.queries[js.dumps(parameters)] = utils.hash_items(mtimes)
class DocumentWrapper(object):
def __init__(self, document):
self._document = document
self._thumbnail = None
self._siblings = None
self.query_tracker = QueryTracker()
def __getitem__(self, key):
return self._document[key]
def __getattr__(self, name):
try:
return self._document[name]
except KeyError:
return None
@property
def hash(self):
return utils.hash_items([self.mtime])
@property
def last_modified(self):
return time.ctime(self.mtime)
@property
def parent(self):
document = self._record_query({"type": "post", "url": self._document["parent"]})
return document[0] if document else None
@property
def children(self):
sort = self._document["sort"] if "sort" in self._document else "ascending"
return self._record_query({"type": "children", "parent": self.url, "sort": sort})
@property
def siblings(self):
return self._record_query({"type": "siblings", "parent": self._document["parent"]})
@property
def previous(self):
document = self._record_query({"type": "previous", "parent": self._document["parent"]})
return document[0] if document else None
@property
def next(self):
document = self._record_query({"type": "next", "parent": self._document["parent"]})
return document[0] if document else None
def _record_query(self, parameters):
documents = self._run_query(parameters)
self.query_tracker.add(parameters, documents)
for document in documents:
document.query_tracker = self.query_tracker
return documents
def evaluate_queries(self, queries):
hashes = []
for query, digest in queries.items():
parameters = js.loads(query)
documents = self._run_query(parameters)
hashes.append(utils.hash_items([document.mtime for document in documents]))
return hashes
def _run_query(self, parameters):
if "type" not in parameters or parameters["type"] == "posts":
include = parameters["include"] if "include" in parameters else None
exclude = parameters["exclude"] if "exclude" in parameters else None
ascending = parameters["sort"] == "ascending" if "sort" in parameters else True
return app.jinja_env.site.posts(include=include, exclude=exclude, ascending=ascending)
elif parameters["type"] == "children":
parent = parameters["parent"]
ascending = parameters["sort"] == "ascending" if "sort" in parameters else True
return app.jinja_env.site.posts(parent=parent, ascending=ascending)
elif parameters["type"] == "post":
url = parameters["url"]
document = app.jinja_env.site.post(url)
return [document] if document is not None else []
elif parameters["type"] == "siblings":
if parameters["parent"] is None:
return []
return app.jinja_env.site.posts(parent=parameters["parent"])
elif parameters["type"] == "previous":
if parameters["parent"] is None:
return []
previous = []
for index, document in enumerate(app.jinja_env.site.posts(parent=parameters["parent"])):
if document.url == self.url:
return previous
previous = [document]
return []
elif parameters["type"] == "next":
if parameters["parent"] is None:
return []
found = False
for index, document in enumerate(app.jinja_env.site.posts(parent=parameters["parent"])):
if found:
return [document]
if document.url == self.url:
found = True
return []
exit("Unsupported query with parameters '%s'" % (parameters, ))
def query(self, identifier):
parameters = None
try:
parameters = self._document["queries"][identifier]
except KeyError:
exit("Unknown query '%s'." % (identifier, ))
return self._record_query(parameters)
def abspath(self, path):
if path == '.':
return self.url
if not path.startswith("/"):
return self.url + path
return path
@property
def content(self):
if self._document["content"]:
content = app.jinja_env.from_string(self._document["content"]).render(site=app.jinja_env.site,
page=self,
url=self.url)
return content
return None
@property
def html(self):
content = self.content
if content:
return markdown(self._document)(content)
return None
@property
def thumbnail(self):
if self._thumbnail is None:
def get_thumbnail():
# Images are their own thumbnails.
try:
return self._document["image"]
except KeyError:
pass
# Use any manually specified thumbnail.
try:
return self._document["thumbnail"]
except KeyError:
pass
# Parse the HTML and look for a suitable thumbnail.
html = self.html
if html:
document = lxml.html.fromstring(html)
images = document.xpath("//img")
if images:
return {'url': images[0].get('src')}
# See if any of the children have thumbnails.
for child in self.children:
thumbnail = child.thumbnail
if thumbnail is not None:
return thumbnail
return None
self._thumbnail = ValueCache(get_thumbnail())
return self._thumbnail.value
@property
def sort_date(self):
return self.date.replace(tzinfo=None)
# Filters
@app.add_template_filter
def date(value, format='%Y-%d-%m'):
return value.strftime(format)
@app.add_template_filter
def prepend(value, prefix):
return prefix + value
@app.add_template_filter
def sort_by(items, key):
return sorted(items, key=lambda item: item[key])
@app.add_template_filter
def json(object):
return js.dumps(object)
@app.add_template_filter
def text(html):
return " ".join(lxml.html.fromstring(html).text_content().split(" ")[:40])
@app.add_template_filter
def slice_list(items, start=0, stop=0):
return itertools.islice(items, start, stop)
@app.add_template_filter
def rfc3339(date):
if date.tzinfo is None:
return date.replace(tzinfo=pytz.utc).isoformat()
return date.isoformat()
@app.add_template_filter
def tag(identifier):
if identifier in app.jinja_env.site.config['tags']:
details = copy.deepcopy(app.jinja_env.site.config['tags'][identifier])
details['identifier'] = identifier
return details
title, _ = converters.title_and_scale_from_path(identifier)
return {'title': title, 'description': None, 'identifier': identifier}
@app.add_template_filter
def date_or_now(date):
if date is not None:
return date
return datetime.datetime.now()
class DefaultAttributeWrapper(object):
def __init__(self, wrapped, name, value):
self.wrapped = wrapped
self.name = name
self.value = value
def __getitem__(self, key):
return self.__getattr__(key)
def __getattr__(self, name):
if name == self.name:
try:
value = getattr(self.wrapped, name)
if value is None:
return self.value
return value
except AttributeError:
return self.value
elif name == f"{self.name}_original":
return getattr(self.wrapped, self.name)
else:
return getattr(self.wrapped, name)
@app.add_template_filter
def attribute_with_default(wrapped, attribute, value):
return [DefaultAttributeWrapper(w, attribute, value) for w in wrapped]
def filter_base64(string):
return base64.b64encode(string.encode('utf-8')).decode('utf-8')
def filter_render_template(template, **kwargs):
template = app.jinja_env.get_template(template)
content = template.render(site=app.jinja_env.site, **kwargs)
return content
def filter_titlecase(string):
return titlecase.titlecase(string)
app.add_template_filter(filter_base64, name='base64')
app.add_template_filter(filter_render_template, name='render_template')
app.add_template_filter(filter_titlecase, name='titlecase')
def fixup_relative_url(url, page_path):
o = urllib.parse.urlparse(url)
if o.scheme == '' and o.netloc == '' and not o.path.startswith('/') and not url.startswith('#'):
result = os.path.join(os.path.dirname(page_path), o.path)
return result
return url
def fixup_relative_image_srcset(srcset, page_path):
if srcset is None:
return None
srcs = [re.split("\s+", src) for src in re.split('\s*,\s*', srcset)]
for src in srcs:
src[0] = fixup_relative_image_url(src[0], page_path)
return ", ".join([" ".join(src) for src in srcs])
def fixup_relative_image_url(url, page_path):
o = urllib.parse.urlparse(url)
if o.scheme == '' and o.netloc == '' and not o.path.startswith('/') and not url.startswith('#'):
path = os.path.join(os.path.dirname(page_path), o.path)
image = app.jinja_env.store.getall(path=path)[0]
return image["image"]["url"]
return url
def markdown(page):
def render(text):
renderer = MyRenderer(page)
markdown = misaka.Markdown(renderer, extensions=('fenced-code',
'smartypants',
'strikethrough',
'superscript',
'tables',
'footnotes'))
content = misaka.smartypants(markdown(text))
if not content:
return content
document = lxml.html.fromstring(content)
for image in document.xpath("//img"):
src = image.get('src')
if src.startswith("+"):
image_url = (os.path.splitext(src[1:])[0] + "/").lower()
image_document = app.jinja_env.site.post(image_url)
if image_document is None:
logging.error("Failed to get document for image '%s'" % (image_url, ))
continue
template = app.jinja_env.get_template("image.html")
title = None
try:
title = image_document['title']
except KeyError:
pass
html = template.render(site=app.jinja_env.site,
image=image_document)
replacement_image = lxml.html.fromstring(html)
parent = image.getparent()
parent.insert(parent.index(image) + 1, replacement_image)
parent.remove(image)
else:
image.set('src', fixup_relative_image_url(src, page["path"]))
image.set('srcset', fixup_relative_image_srcset(image.get('srcset'), page["path"]))
for source in document.xpath("//picture/source"):
srcset = source.get('srcset')
source.set('srcset', fixup_relative_image_url(srcset, page["path"]))
for anchor in document.xpath("//a"):
anchor.set('href', fixup_relative_url(anchor.get('href'), page["path"]))
results = lxml.html.tostring(document, method='html', encoding='unicode')
return results
return render
# Decorators
def get_document(path):
page = app.jinja_env.site.post(path)
if not page:
abort(404)
return page
def local_path(path):
return os.path.join(os.path.join(os.path.expanduser(app.config[config.keys.ROOT]), "files"), path[1:])
@app.route("/")
@app.route("/<path:path>")
def documents(path=""):
path = normpath(path)
path = converters.ensure_trailing_slash(path)
page = get_document(path)
page.query_tracker = QueryTracker()
template_filename = page.template
if template_filename.endswith(".json"):
template = app.jinja_env.get_template(page.template)
content = template.render(site=app.jinja_env.site,
page=page,
args=request.args)
return app.response_class(
response=content,
status=200,
mimetype="application/json"
), page.query_tracker
headers = {
'Last-Modified': app.jinja_env.site.last_modified,
'Cache-Control': 'no-cache, must-revalidate',
}
content = render_template(page.template,
site=app.jinja_env.site,
page=page,
args=request.args,
markdown=markdown(page._document))
return Response(content, headers=headers), page.query_tracker
| StarcoderdataPython |
91802 | import RPi.GPIO as GPIO
from picamera import PiCamera
class DiceCam(object):
"""Dice Cam! Flash the LEDs, take a pic!"""
def __init__(self, *led_pins):
super(DiceCam, self).__init__()
self.led_pins = list(led_pins)
self.camera = PiCamera()
self.camera.stop_preview()
self.camera.resolution = (1640, 922)
self.camera.iso = 800
self.camera.awb_mode = 'fluorescent'
self.camera.brightness = 30
self.camera.contrast = 90
self.camera.exposure_mode = 'verylong'
self.camera.meter_mode = 'spot'
self.camera.shutter_speed = 25000
self.camera.color_effects = (128, 128)
self.camera.led = False
self.setup()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
def setup(self):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
for pin in self.led_pins:
GPIO.setup(pin, GPIO.OUT)
def set_leds(self, value):
for pin in self.led_pins:
GPIO.output(pin, value)
def leds_on(self):
self.set_leds(GPIO.HIGH)
def leds_off(self):
self.set_leds(GPIO.LOW)
def capture(self, out):
self.leds_on()
self.camera.capture(out, 'jpeg')
self.leds_off()
def cleanup(self):
self.leds_off()
GPIO.cleanup()
| StarcoderdataPython |
3294773 | <filename>src/IVR-Comprehend2DynamoDB/read_transcripts.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 02 2020
@author: <NAME> (Amazon Web Services)
@email: <EMAIL>
"""
import json
import boto3
# Get S3 client
s3 = boto3.client("s3")
def read_transcripts(src_bucket, guid):
"""Get the transcripts JSON file from the S3 bucket.
Args:
src_bucket (str): A string containing the S3 source bucket.
guid (str): A string containing the unique ID.
Returns:
spoken_text (str): A string with the text the customer spoke to Connect
Examples:
>>> data = query_table(guid='f39caef7-94a7-4ca4-8b59-6811055672f0')
"""
try:
spoken_text = json.loads(
s3.get_object(
Bucket=src_bucket,
Key=guid+".json"
)["Body"].read()
)
spoken_text = spoken_text["results"]["transcripts"][0]["transcript"]
except:
spoken_text = ""
return spoken_text
| StarcoderdataPython |
3331011 | <<<<<<< HEAD
<<<<<<< refs/remotes/origin/master
=======
>>>>>>> 6b69621ca7b67c04c4370529ed374ff23ee4c464
#lista znajomych bez piewszej osoby od 01 pozycji listy, lista zaczyna się od pozycji 0
friends = ["Monika", "Piotrek", "Tomek", "Tomek.P", "Justyna", "Rafał", "Witek", "Agnieszka", "Sabina"]
#Dodawanie do listy pozycja, nazwa
friends.insert(1, "Marek")
#Usuwanie pozycji z listy
friends.remove("Marek")
print(friends [1:])
<<<<<<< HEAD
<<<<<<< refs/remotes/origin/master
=======
#lista znajomych bez piewszej osoby od 01 pozycji listy, lista zaczyna się od pozycji 0
friends = ["Monika", "Piotrek", "Tomek", "Tomek.P", "Justyna", "Rafał", "Witek", "Agnieszka", "Sabina"]
print(friends [1:])
>>>>>>> Fixing
=======
>>>>>>> New files Hello and list
=======
>>>>>>> 6b69621ca7b67c04c4370529ed374ff23ee4c464
| StarcoderdataPython |
1634809 | <gh_stars>1-10
from configparser import ConfigParser
parser = ConfigParser()
parser.read('multisection.ini')
for candidate in ['wiki', 'bug_tracker', 'dvcs']:
print('{:<12}: {}'.format(
candidate, parser.has_section(candidate))) | StarcoderdataPython |
75178 | <filename>pytsp/core/__init__.py<gh_stars>1-10
from pytsp.core.annealing import (AnnealingMixin, CompressedAnnealing,
SimulatedAnnealing)
from pytsp.core.genetic import GeneticAlgorithm
from pytsp.core.util import Model, cached, jarvis
from pytsp.core.tsp import TravellingSalesman, TravellingSalesmanTimeWindows
| StarcoderdataPython |
1739869 | for i in range(9):
for j in range(20000):
if(j<19999 or i!=8):
print(i,end=" ")
else:
print(i)
print("7")
| StarcoderdataPython |
1748165 | <reponame>bpbpublications/Programming-Techniques-using-Python
class Uppercase_decorator:
def __init__(self, myfunc):
self.myfunc = myfunc
def __call__(self):
mystr1 = self.myfunc()
return mystr1.upper()
# adding class decorator to the function mygreet
@Uppercase_decorator
def mygreet():
return "good evening"
print(mygreet())
| StarcoderdataPython |
199133 | <filename>synapse/rest/client/v2_alpha/room_keys.py
# -*- coding: utf-8 -*-
# Copyright 2017, 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from synapse.api.errors import Codes, NotFoundError, SynapseError
from synapse.http.servlet import (
RestServlet,
parse_json_object_from_request,
parse_string,
)
from ._base import client_patterns
logger = logging.getLogger(__name__)
class RoomKeysServlet(RestServlet):
PATTERNS = client_patterns(
"/room_keys/keys(/(?P<room_id>[^/]+))?(/(?P<session_id>[^/]+))?$"
)
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super().__init__()
self.auth = hs.get_auth()
self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
async def on_PUT(self, request, room_id, session_id):
"""
Uploads one or more encrypted E2E room keys for backup purposes.
room_id: the ID of the room the keys are for (optional)
session_id: the ID for the E2E room keys for the room (optional)
version: the version of the user's backup which this data is for.
the version must already have been created via the /room_keys/version API.
Each session has:
* first_message_index: a numeric index indicating the oldest message
encrypted by this session.
* forwarded_count: how many times the uploading client claims this key
has been shared (forwarded)
* is_verified: whether the client that uploaded the keys claims they
were sent by a device which they've verified
* session_data: base64-encrypted data describing the session.
Returns 200 OK on success with body {}
Returns 403 Forbidden if the version in question is not the most recently
created version (i.e. if this is an old client trying to write to a stale backup)
Returns 404 Not Found if the version in question doesn't exist
The API is designed to be otherwise agnostic to the room_key encryption
algorithm being used. Sessions are merged with existing ones in the
backup using the heuristics:
* is_verified sessions always win over unverified sessions
* older first_message_index always win over newer sessions
* lower forwarded_count always wins over higher forwarded_count
We trust the clients not to lie and corrupt their own backups.
It also means that if your access_token is stolen, the attacker could
delete your backup.
POST /room_keys/keys/!abc:matrix.org/c0ff33?version=1 HTTP/1.1
Content-Type: application/json
{
"first_message_index": 1,
"forwarded_count": 1,
"is_verified": false,
"session_data": "SSBBTSBBIEZJU0gK"
}
Or...
POST /room_keys/keys/!abc:matrix.org?version=1 HTTP/1.1
Content-Type: application/json
{
"sessions": {
"c0ff33": {
"first_message_index": 1,
"forwarded_count": 1,
"is_verified": false,
"session_data": "SSBBTSBBIEZJU0gK"
}
}
}
Or...
POST /room_keys/keys?version=1 HTTP/1.1
Content-Type: application/json
{
"rooms": {
"!abc:matrix.org": {
"sessions": {
"c0ff33": {
"first_message_index": 1,
"forwarded_count": 1,
"is_verified": false,
"session_data": "SSBBTSBBIEZJU0gK"
}
}
}
}
}
"""
requester = await self.auth.get_user_by_req(request, allow_guest=False)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
version = parse_string(request, "version")
if session_id:
body = {"sessions": {session_id: body}}
if room_id:
body = {"rooms": {room_id: body}}
ret = await self.e2e_room_keys_handler.upload_room_keys(user_id, version, body)
return 200, ret
async def on_GET(self, request, room_id, session_id):
"""
Retrieves one or more encrypted E2E room keys for backup purposes.
Symmetric with the PUT version of the API.
room_id: the ID of the room to retrieve the keys for (optional)
session_id: the ID for the E2E room keys to retrieve the keys for (optional)
version: the version of the user's backup which this data is for.
the version must already have been created via the /change_secret API.
Returns as follows:
GET /room_keys/keys/!abc:matrix.org/c0ff33?version=1 HTTP/1.1
{
"first_message_index": 1,
"forwarded_count": 1,
"is_verified": false,
"session_data": "SSBBTSBBIEZJU0gK"
}
Or...
GET /room_keys/keys/!abc:matrix.org?version=1 HTTP/1.1
{
"sessions": {
"c0ff33": {
"first_message_index": 1,
"forwarded_count": 1,
"is_verified": false,
"session_data": "SSBBTSBBIEZJU0gK"
}
}
}
Or...
GET /room_keys/keys?version=1 HTTP/1.1
{
"rooms": {
"!abc:matrix.org": {
"sessions": {
"c0ff33": {
"first_message_index": 1,
"forwarded_count": 1,
"is_verified": false,
"session_data": "SSBBTSBBIEZJU0gK"
}
}
}
}
}
"""
requester = await self.auth.get_user_by_req(request, allow_guest=False)
user_id = requester.user.to_string()
version = parse_string(request, "version", required=True)
room_keys = await self.e2e_room_keys_handler.get_room_keys(
user_id, version, room_id, session_id
)
# Convert room_keys to the right format to return.
if session_id:
# If the client requests a specific session, but that session was
# not backed up, then return an M_NOT_FOUND.
if room_keys["rooms"] == {}:
raise NotFoundError("No room_keys found")
else:
room_keys = room_keys["rooms"][room_id]["sessions"][session_id]
elif room_id:
# If the client requests all sessions from a room, but no sessions
# are found, then return an empty result rather than an error, so
# that clients don't have to handle an error condition, and an
# empty result is valid. (Similarly if the client requests all
# sessions from the backup, but in that case, room_keys is already
# in the right format, so we don't need to do anything about it.)
if room_keys["rooms"] == {}:
room_keys = {"sessions": {}}
else:
room_keys = room_keys["rooms"][room_id]
return 200, room_keys
async def on_DELETE(self, request, room_id, session_id):
"""
Deletes one or more encrypted E2E room keys for a user for backup purposes.
DELETE /room_keys/keys/!abc:matrix.org/c0ff33?version=1
HTTP/1.1 200 OK
{}
room_id: the ID of the room whose keys to delete (optional)
session_id: the ID for the E2E session to delete (optional)
version: the version of the user's backup which this data is for.
the version must already have been created via the /change_secret API.
"""
requester = await self.auth.get_user_by_req(request, allow_guest=False)
user_id = requester.user.to_string()
version = parse_string(request, "version")
ret = await self.e2e_room_keys_handler.delete_room_keys(
user_id, version, room_id, session_id
)
return 200, ret
class RoomKeysNewVersionServlet(RestServlet):
PATTERNS = client_patterns("/room_keys/version$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super().__init__()
self.auth = hs.get_auth()
self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
async def on_POST(self, request):
"""
Create a new backup version for this user's room_keys with the given
info. The version is allocated by the server and returned to the user
in the response. This API is intended to be used whenever the user
changes the encryption key for their backups, ensuring that backups
encrypted with different keys don't collide.
It takes out an exclusive lock on this user's room_key backups, to ensure
clients only upload to the current backup.
The algorithm passed in the version info is a reverse-DNS namespaced
identifier to describe the format of the encrypted backupped keys.
The auth_data is { user_id: "user_id", nonce: <random string> }
encrypted using the algorithm and current encryption key described above.
POST /room_keys/version
Content-Type: application/json
{
"algorithm": "m.megolm_backup.v1",
"auth_data": "<KEY>"
}
HTTP/1.1 200 OK
Content-Type: application/json
{
"version": 12345
}
"""
requester = await self.auth.get_user_by_req(request, allow_guest=False)
user_id = requester.user.to_string()
info = parse_json_object_from_request(request)
new_version = await self.e2e_room_keys_handler.create_version(user_id, info)
return 200, {"version": new_version}
# we deliberately don't have a PUT /version, as these things really should
# be immutable to avoid people footgunning
class RoomKeysVersionServlet(RestServlet):
PATTERNS = client_patterns("/room_keys/version(/(?P<version>[^/]+))?$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super().__init__()
self.auth = hs.get_auth()
self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
async def on_GET(self, request, version):
"""
Retrieve the version information about a given version of the user's
room_keys backup. If the version part is missing, returns info about the
most current backup version (if any)
It takes out an exclusive lock on this user's room_key backups, to ensure
clients only upload to the current backup.
Returns 404 if the given version does not exist.
GET /room_keys/version/12345 HTTP/1.1
{
"version": "12345",
"algorithm": "m.megolm_backup.v1",
"auth_data": "<KEY>"
}
"""
requester = await self.auth.get_user_by_req(request, allow_guest=False)
user_id = requester.user.to_string()
try:
info = await self.e2e_room_keys_handler.get_version_info(user_id, version)
except SynapseError as e:
if e.code == 404:
raise SynapseError(404, "No backup found", Codes.NOT_FOUND)
return 200, info
async def on_DELETE(self, request, version):
"""
Delete the information about a given version of the user's
room_keys backup. If the version part is missing, deletes the most
current backup version (if any). Doesn't delete the actual room data.
DELETE /room_keys/version/12345 HTTP/1.1
HTTP/1.1 200 OK
{}
"""
if version is None:
raise SynapseError(400, "No version specified to delete", Codes.NOT_FOUND)
requester = await self.auth.get_user_by_req(request, allow_guest=False)
user_id = requester.user.to_string()
await self.e2e_room_keys_handler.delete_version(user_id, version)
return 200, {}
async def on_PUT(self, request, version):
"""
Update the information about a given version of the user's room_keys backup.
POST /room_keys/version/12345 HTTP/1.1
Content-Type: application/json
{
"algorithm": "m.megolm_backup.v1",
"auth_data": {
"public_key": "abcdefg",
"signatures": {
"ed25519:something": "hijklmnop"
}
},
"version": "12345"
}
HTTP/1.1 200 OK
Content-Type: application/json
{}
"""
requester = await self.auth.get_user_by_req(request, allow_guest=False)
user_id = requester.user.to_string()
info = parse_json_object_from_request(request)
if version is None:
raise SynapseError(
400, "No version specified to update", Codes.MISSING_PARAM
)
await self.e2e_room_keys_handler.update_version(user_id, version, info)
return 200, {}
def register_servlets(hs, http_server):
RoomKeysServlet(hs).register(http_server)
RoomKeysVersionServlet(hs).register(http_server)
RoomKeysNewVersionServlet(hs).register(http_server)
| StarcoderdataPython |
3381654 |
import poplib
from email.parser import Parser
email = '<EMAIL>'
password = '<PASSWORD>'
pop3_server = 'pop.163.com'
server = poplib.POP3(pop3_server)
print(server.getwelcome().decode('utf8'))
server.user(email)
server.pass_(password)
print('Message: %s. Size: %s' % (server.stat()))
resp, mails, octets = server.list()
# print(mails)
index = len(mails)
resp, lines, octets = server.retr(index)
msg_content = b'\r\n'.join(lines).decode('utf-8')
msg = Parser().parsestr(msg_content)
print(msg)
server.quit()
| StarcoderdataPython |
1701681 | # fix imports for appengine environments
import fix_imports
(fix_imports)
from tg import AppConfig
from tg import redirect
from google.appengine.api import users
from main import MainController
# def controller_wrapper(next_caller):
# def call(*args, **kw):
# user = users.get_current_user()
# if not user:
# login_url = users.create_login_url('/')
# redirect(login_url)
#
# return next_caller(*args, **kw)
#
# return call
config = AppConfig(minimal=True, root_controller=MainController())
#config.register_controller_wrapper(controller_wrapper)
app = config.make_wsgi_app() | StarcoderdataPython |
4830904 | <reponame>sangaman/raiden<gh_stars>0
from pathlib import Path
import pytest
from raiden.storage.versions import filter_db_names, latest_db_file
def test_latest_db_file():
assert latest_db_file([Path("v10_log.db"), Path("v9_log.db")]) == Path("v10_log.db")
assert latest_db_file([Path("v9_log.db"), Path("v10_log.db")]) == Path("v10_log.db")
assert latest_db_file([Path("v1_log.db"), Path("v9_log.db")]) == Path("v9_log.db")
assert latest_db_file([Path("v9_log.db"), Path("v1_log.db")]) == Path("v9_log.db")
assert latest_db_file([]) is None
values = ["a", ".db", "v9.db", "9_log.db", "va9_log.db", "v9a_log.db"]
for invalid_value in values:
with pytest.raises(AssertionError):
latest_db_file([Path(invalid_value)])
def test_filter_db_names():
assert filter_db_names(["v10_log.db", "v9_log.db"]) == [Path("v10_log.db"), Path("v9_log.db")]
assert filter_db_names(["v9_log.db", "v10_log.db"]) == [Path("v9_log.db"), Path("v10_log.db")]
assert filter_db_names(["v1_log.db", "v9_log.db"]) == [Path("v1_log.db"), Path("v9_log.db")]
assert filter_db_names(["v9_log.db", "v1_log.db"]) == [Path("v9_log.db"), Path("v1_log.db")]
values = [[], ["a"], [".db"], ["v9.db"], ["9_log.db"], ["va9_log.db"], ["v9a_log.db"]]
for invalid_value in values:
assert filter_db_names(invalid_value) == []
| StarcoderdataPython |
53758 | <gh_stars>0
#-*- coding:utf-8 -*-
##############################################
# GARUDA CLIENT SDK
# Reference: Garuda Base Protocol Version 1.1
# Last Updated: 07-Aug-2015
##############################################
import sys
import time
import json
import socket
import threading
# Configuration for Garuda Core Connection
# These are for internal use of the SDK
# Note: The values are pre-configured and user MUST NOT change the values
_GAURDA_HOST = 'localhost' # Host address for Garuda Core
_GAURDA_PORT = 9000 # Port number for Garuda Core
_GAURDA_ADDR = (_GAURDA_HOST, _GAURDA_PORT) # Garuda Core Address
_BUFFER_SIZE = 2048 # Socket Reader Buffer size
# Protocol Request Messages
# These are for internal use of the SDK
_ID_ACTIVATE_GADGET_REQ = "ActivateGadgetRequest";
_ID_LOAD_GADGET_REQ = "LoadGadgetRequest";
_ID_GET_COMPATIBLE_GADGET_LIST_REQ = "GetCompatibleGadgetListRequest";
_ID_SEND_DATA_TO_GADGET_REQ = "SendDataToGadgetRequest";
_ID_LOAD_DATA_REQ = "LoadDataRequest";
_ID_SEND_NOTIFICATION_TO_GADGET_REQ = "SendNotificationToGadgetRequest";
_ID_SEND_NOTIFICATION_TO_CORE_REQ = "SendNotificationToCoreRequest";
_ID_TERMINATE_GADGET_REQ = "stop"
# Protocol Response Messages
# These are for internal use of the SDK
_ID_ACTIVATE_GADGET_RESP = "ActivateGadgetResponse";
_ID_LOAD_GADGET_RESP = "LoadGadgetResponse";
_ID_GET_COMPATIBLE_GADGET_LIST_RESP = "GetCompatibleGadgetListResponse";
_ID_SEND_DATA_TO_GADGET_RESP = "SendDataToGadgetResponse";
_ID_LOAD_DATA_RESP = "LoadDataResponse";
_ID_SEND_NOTIFICATION_TO_GADGET_RESP = "SendNotificationToGadgetResponse";
# Protocol Message Versions
# These are for internal use of the SDK
_REQ_MSG_VERSION = "0.2";
_RESP_MSG_VERSION = "0.2";
# Custom messages
# The SDK can generate during invocation of the callback method
# The callback handler in the gadget implementation should handle these
MSG_REMOTE_HOST_CLOSED = "RemoteHostClosedError"
# Custom IDs
# The SDK will use these as 'message_id' argument in the callback method invocation
# Each ID corresponds to an event/action within the SDK for which the callback method is invoked
# The callback handler in the gadget implementation MUST handle these IDs
ID_ACTIVATE_GADGET_RESPONSE = "activate_gadget_response";
ID_LOAD_GADGET_REQUEST = "load_gadget_request";
ID_GET_COMPATIBLE_GADGET_LIST_RESPONSE = "get_compatible_gadget_list_response";
ID_SEND_DATA_GADGET_RESPONSE = "send_data_to_gadget_response";
ID_LOAD_DATA_REQUEST = "load_data_request";
ID_LOAD_DATA_STREAM_REQUEST = "load_data_stream_request";
ID_SEND_NOTIFICATION_TO_GADGET_REQUEST = "send_notification_to_gadget_request";
ID_CONNECTION_TERMINATED = "connection_terminated";
ID_CONNECTION_NOT_INITIALIZED = "connection_not_initialized";
ID_JSON_PARSE_ERROR = "json_parse_error";
ID_JSON_DUMPS_ERROR = "json_dumps_error";
# Protocol Notification Types
# The SDK will use these as 'response_code' argument in the callback method invocation for 'message_id' ID_SEND_NOTIFICATION_TO_GADGET_REQUEST
# The callback handler in the gadget implementation should handle these on receiving ID_SEND_NOTIFICATION_TO_GADGET_REQUEST
NOTIFICATION_BRING_TO_FRONT = 602
NOTIFICATION_ERROR = 603
NOTIFICATION_TERMINATE = 604
# Response Codes
# The SDK will use these as 'response_code' argument in the callback method invocation for messages other than ID_SEND_NOTIFICATION_TO_GADGET_REQUEST
# The callback handler in the gadget implementation should handle these for the appropriate messages received
RESPCODE_SUCCESS = 200
RESPCODE_GADGET_ALREADY_CONNECTED = 400
RESPCODE_GADGET_APPKEY_MISMATCH = 401
RESPCODE_NO_COMPATIBLE_GADGET_FOUND = 403
RESPCODE_NO_CORE_DATABASE_CONNECTION = 404
RESPCODE_GADGET_NOT_FOUND_IN_CORE_DB = 405
RESPCODE_DATABASE_QUERY_ERROR = 409
RESPCODE_DATA_RESOURCE_NOT_FOUND = 415
RESPCODE_INTERNAL_ERROR = 500
RESPCODE_UNABLE_TO_PARSE_JSON = 501
RESPCODE_FILE_NOT_IN_OUTBOUND_LIST = 503
RESPCODE_INCOMPATIBLE_DATA_TYPE = 508
RESPCODE_GADGET_ALREADY_REGISTERED = 509
RESPCODE_INCOMPLETE_REQUEST_PARAMETERS = 512
RESPCODE_INVALID_NOTIFICATION_CODE = 513
RESPCODE_CORE_DB_OPERATION_FAILED = 515
RESPCODE_GADGET_NOT_ACTIVATED = 518
RESPCODE_BRING_TO_FRONT = 602
RESPCODE_ANY_ERROR_MESSAGES_FROM_CORE = 603
RESPCODE_TERMINATE_GADGETS = 604
####################################################################################################
# Classes for Custom Exception
# These are for internal use of the SDK
####################################################################################################
# Represents any Garuda related Exception
class GarudaException(Exception):
pass
# Represents any improper connection state (for connection with Garuda Core)
class ImproperConnectionState(GarudaException):
pass
# Represents any error in sending message/data to Garuda Core
class CannotSend(ImproperConnectionState):
errno = None
error_message = None
def __init__(self, errno=None, error_message=None):
self.errno = errno
self.error_message = error_message
def __str__(self):
return 'connection terminate! errno: %s, error_message: %s' % (self.errno, self.error_message)
# Represents any error in receiving message/data from Garuda Core
class CannotRecv(ImproperConnectionState):
pass
# Represents connection lost (for connection with Garuda Core)
class ConnectTerminated(ImproperConnectionState):
pass
# Represents any error in creating connection with Garuda Core
class CannotConnect(ImproperConnectionState):
pass
####################################################################################################
# Class representing a Gadget Entity
# The SDK will use this class to represent a gadget instance from the information received from Garuda Core
####################################################################################################
class Gadget:
def __init__(self, gadget_name=None, gadget_id=None, gadget_iconpath=None, gadget_provider=None, gadget_gatewayid=None):
self.gadget_name = gadget_name
self.gadget_id = gadget_id
self.gadget_iconpath = gadget_iconpath
self.gadget_provider = gadget_provider
self.gadget_gatewayid = gadget_gatewayid
def __str__(self):
result = 'gadget: name=%s\t\nid=%s\t\niconpath=%s\t\nprovider=%s\t\ngatewayid=%s'
result = result % (self.gadget_name,
self.gadget_id,
self.gadget_iconpath,
self.gadget_provider,
self.gadget_gatewayid)
return result
####################################################################################################
# Class representing connection with Gadget Core
# The SDK will use this class to establish TCP connection with Garuda Core and send/receive data to/from Garuda Core
# This class is for internal use of the SDK
####################################################################################################
class GarudaConnection(threading.Thread):
running = False
handle_read = lambda self, message: None
# Constructor for the Connection class
def __init__(self, addr=_GAURDA_ADDR):
threading.Thread.__init__(self)
self.addr = addr
self.socket = self.open_socket()
self.read_buffer = ''
# The connection thread execution method
def run(self):
self.running = True
while self.running:
self.read()
time.sleep(0.01)
# Connection creation handler method
def open_socket(self):
try:
return socket.create_connection(self.addr)
except socket.error:
raise CannotConnect()
# Connection termination handler method
def close_socket(self):
self.running = False
if self.socket:
self.socket.shutdown(2)
self.socket.close()
self.socket = None
# Handler method for sending data over the connection
def send(self, data):
if not self.socket:
raise ConnectTerminated()
try:
self.socket.sendall(bytes(data, 'UTF-8'))
except socket.error as what:
if what[0] == 10054 or what[0] == 9:
raise ConnectTerminated()
else:
raise CannotSend(what[0], what[1])
# Handler method for listening data over the connection
def read(self):
read = ''
try:
read = self.socket.recv(_BUFFER_SIZE)
self.read_buffer = self.read_buffer + read.decode('utf-8')
except socket.error:
return
index = self.read_buffer.find('\n')
if index >= 0:
message = self.read_buffer[:index+1]
if message.strip() == _ID_TERMINATE_GADGET_REQ:
self.handle_read(message.strip())
sys.exit(0)
elif message.strip() == '':
pass
else:
self.handle_read(message)
self.read_buffer = self.read_buffer[index+1:]
# Method that registers the 'Read Callback' listener of the SDK with to the GarudaConnection class
def bind(self, func):
self.handle_read = func
####################################################################################################
# Class representing the SDK
# This class handles all types of communication with Garuda Core and provides API for different Garuda related action
# Gadget implementation should instantiate this class for intended Garuda operations
####################################################################################################
class GarudaClientBackend:
_compatible_gadget_list = []
_listner_callback = lambda self, message_id, error_code, param: None
display_log = lambda self, log_message: None
# Constructor for the SDK class
def __init__(self, gadget_name, gadget_id):
self.gadget_name = gadget_name
self.gadget_id = gadget_id
self.connection = None
self.initialized = False
# This method establishes connection with Garuda core (using the connection class of the SDK) and activates the gadget
# Gadget implementation MUST invoke this method once the SDK is instantiated and callback method is registered
def initialize(self):
if self.initialized:
return
try:
self.connection = GarudaConnection()
except CannotConnect:
self._listner_callback(ID_CONNECTION_TERMINATED, None, None)
return
self._listner_callback(ID_CONNECTION_NOT_INITIALIZED, None, None)
self.connection.bind(self.handle_read)
self.connection.setDaemon(True)
self.connection.start()
self.activate_gadget()
self.initialized = True
# This method gives the initialization status of the SDK
def is_initialized(self):
return self.initialized
# This method registers the callback listener (provided by the gadget implementation) to the SDK
# Gadget implementation MUST invoke this method once the SDK is instantiated
def add_lisenter(self, event):
self._listner_callback = event
# API for sending 'Gadget Activation' request to Garuda Core
# On receiving response from Garuda Core, the SDK invokes the callback listener with ID_ACTIVATE_GADGET_RESPONSE
# Gadget implementation need not call this API as this step has already been executed during SDK initialization step
# However, the callback listener method in the gadget implementation should handle the response message
def activate_gadget(self):
header = dict(id = _ID_ACTIVATE_GADGET_REQ,
version = _REQ_MSG_VERSION)
body = dict(sourceGadgetName = self.gadget_name,
sourceGadgetID = self.gadget_id)
self.handle_request(header, body)
# API for sending 'Get Compatible Gadget List' request to Garuda Core
# On receiving response from Garuda Core, the SDK invokes the callback listener with ID_GET_COMPATIBLE_GADGET_LIST_RESPONSE
def request_compatible_gadget_list(self, file_extension='', file_format=''):
if file_extension.strip() == '' or file_format.strip() == '':
return
header = dict(id=_ID_GET_COMPATIBLE_GADGET_LIST_REQ,
version=_REQ_MSG_VERSION)
body = dict(fileExtension=file_extension,
fileFormat=file_format,
sourceGadgetName=self.gadget_name,
sourceGadgetID=self.gadget_id)
self.handle_request(header, body)
# API for sending 'Send Notification To Core' request to Garuda Core
# Note that for this request, the SDK does not provide any response message, i.e., there is no callback listener invocation as response
def send_notification_to_core(self, gadget, notify_type, message):
header = dict(id=_ID_SEND_NOTIFICATION_TO_CORE_REQ,
version=_REQ_MSG_VERSION)
body = dict(sourceGadgetName=gadget.gadget_name,
sourceGadgetID=gadget.gadget_id,
type=notify_type,
message=message)
self.handle_request(header, body)
# API for sending 'Send Data To Gadget' request to Garuda Core
# On receiving response from Garuda Core, the SDK invokes the callback listener with ID_SEND_DATA_GADGET_RESPONSE
def send_data_to_gadget(self, data, target_gadget_name, target_gadget_id, is_stream=False):
header = dict(id=_ID_SEND_DATA_TO_GADGET_REQ,
version=_REQ_MSG_VERSION)
if is_stream:
isst = True
else:
isst = False
body = dict(data=data,
sourceGadgetName=self.gadget_name,
sourceGadgetID=self.gadget_id,
targetGadgetName=target_gadget_name,
targetGadgetID=target_gadget_id,
isStream=isst)
self.handle_request(header, body)
# Handler method for creating and sending request message to Garuda Core
# For internal use of the SDK
def handle_request(self, header, body):
request_message = ""
try:
request_message = json.dumps(dict(header=header, body=body))
except Exception as what:
param = dict(message=what)
self._listner_callback(ID_JSON_DUMPS_ERROR, None, param)
return
self.send_message(request_message)
# API that returns the 'Compatible Gadget list' received from Garuda Core
# The method returns an array of instances of the class Gadget
def get_compatible_gadget_list(self):
return self._compatible_gadget_list
# Handler method retrieving message id received from Garuda Core
# For internal use of the SDK
def get_data_id(self, data):
try:
json_data = json.loads(data)
return json_data["header"]["id"]
except:
return None
# Method for sending message to Garuda Core (using the connection class of the SDK)
# For internal use of the SDK
def send_message(self, message):
message_id = self.get_data_id(message)
self.display_log(message_id+": ")
if not self.connection:
self._listner_callback(ID_CONNECTION_TERMINATED, None, None)
return
else:
pass
try:
if message.endswith('\n'):
self.connection.send(message)
else:
message = message + '\n'
self.connection.send(message)
json_message = json.loads(message)
self.display_log(json.dumps(json_message, indent=4))
self.print_log(message)
except CannotSend as cannot_send_error:
param = dict(message=cannot_send_error)
self._listner_callback(ID_CONNECTION_TERMINATED, None, param)
except ConnectTerminated as what:
param = dict(message=what)
self._listner_callback(ID_CONNECTION_TERMINATED, None, param)
except Exception:
pass
# The 'Read Callback' method registered to the connection class of the SDK
# This method handles the messages received from Garuda Core
# For internal use of the SDK
def handle_read(self, data):
self.print_log(data)
# Handle stop message
if data == _ID_TERMINATE_GADGET_REQ:
param = dict(message=MSG_REMOTE_HOST_CLOSED)
self._listner_callback(ID_CONNECTION_TERMINATED, None, param)
return
data_id = self.get_data_id(data)
json_message = json.loads(data)
self.display_log(data_id+": ")
self.display_log(json.dumps(json_message, indent=4))
if data_id == _ID_ACTIVATE_GADGET_RESP:
self.parser_activate_gadget(data)
elif data_id == _ID_GET_COMPATIBLE_GADGET_LIST_RESP:
self.parser_compatible_gadget_list(data)
elif data_id == _ID_SEND_DATA_TO_GADGET_RESP:
self.parser_send_data_to_gadget(data)
elif data_id == _ID_LOAD_DATA_REQ:
self.parser_load_data(data)
elif data_id == _ID_LOAD_GADGET_REQ:
self.parser_load_gadget(data)
elif data_id == _ID_SEND_NOTIFICATION_TO_GADGET_REQ:
self.parser_send_notification_to_gadget(data)
else:
pass
# Handler method for the 'Activation Response' message
# On success, the method invokes the callback listener of the gadget with ID_ACTIVATE_GADGET_RESPONSE
# For internal use of the SDK
def parser_activate_gadget(self, data):
try:
json_data = json.loads(data)
response_code = json_data["body"]["result"]
if response_code != RESPCODE_SUCCESS:
self._listner_callback(ID_ACTIVATE_GADGET_RESPONSE, response_code, None)
except Exception as what:
param = dict(message=what)
self._listner_callback(ID_JSON_PARSE_ERROR, None, param)
# Handler method for the 'Compatible Gadget List Response' message
# On success, the method invokes the callback listener of the gadget with ID_GET_COMPATIBLE_GADGET_LIST_RESPONSE
# For internal use of the SDK
def parser_compatible_gadget_list(self, data):
self._compatible_gadget_list = []
gadgets = []
response_code = None
try:
json_data = json.loads(data)
gadgets = json_data["body"]["gadgets"]
if not gadgets:
gadgets = []
response_code = json_data["body"]["result"]
except Exception as what:
param = dict(message=what)
self._listner_callback(ID_JSON_PARSE_ERROR, None, param)
return
for gadget in gadgets:
gdgt = Gadget(gadget.get("name", None),
gadget.get("ID", None),
gadget.get("iconPath", None),
gadget.get("provider", None),
gadget.get("gateway_id", None))
self._compatible_gadget_list.append(gdgt)
self._listner_callback(ID_GET_COMPATIBLE_GADGET_LIST_RESPONSE, response_code, None)
# Handler method for the 'Send Data To Gadget Response' message
# On success, the method invokes the callback listener of the gadget with ID_SEND_DATA_GADGET_RESPONSE
# For internal use of the SDK
def parser_send_data_to_gadget(self, data):
try:
json_data = json.loads(data)
response_code = json_data["body"]["result"]
if response_code == RESPCODE_SUCCESS:
gadget = Gadget(json_data["body"]["targetGadgetName"],
json_data["body"]["targetGadgetID"],
None,
None,
None)
self._listner_callback(ID_SEND_DATA_GADGET_RESPONSE, response_code, gadget)
else:
self._listner_callback(ID_SEND_DATA_GADGET_RESPONSE, response_code, None)
except Exception as what:
param = dict(message=what)
self._listner_callback(ID_JSON_PARSE_ERROR, None, param)
# Handler method for the 'Load Data' request from Garuda Core
# On success, the method invokes the callback listener of the gadget with any of -
# ID_LOAD_DATA_STREAM_REQUEST when the loadable data is stream data
# ID_LOAD_DATA_REQUEST when the loadable data is not streamed
# For internal use of the SDK
def parser_load_data(self, data):
try:
json_data = json.loads(data)
gadget = Gadget(json_data["body"]["originGadgetName"],
json_data["body"]["originGadgetID"],
None,
None,
None)
is_stream = json_data["body"]["isStream"]
gadget_data = json_data["body"]["data"]
param = dict(gadget=gadget, data=gadget_data)
if is_stream:
self._listner_callback(ID_LOAD_DATA_STREAM_REQUEST, None, param)
else:
self._listner_callback(ID_LOAD_DATA_REQUEST, None, param)
except Exception as what:
param = dict(message=what)
self._listner_callback(ID_JSON_PARSE_ERROR, None, param)
# Handler method for the 'Load Gadget' request from Garuda Core
# On success, the method invokes the callback listener of the gadget with ID_LOAD_GADGET_REQUEST
# For internal use of the SDK
def parser_load_gadget(self, data):
try:
json_data = json.loads(data)
gadget = Gadget(json_data["body"]["loadableGadgetName"],
json_data["body"]["loadableGadgetID"],
None,
None,
None)
loadable_gadget_source_path = json_data["body"]["loadableGadgetSourcePath"]
param = dict(gadget=gadget, path=loadable_gadget_source_path)
self._listner_callback(ID_LOAD_GADGET_REQUEST, None, param)
except Exception:
pass
# Handler method for the 'Send notification To Gadget' request from Garuda Core
# On success, the method invokes the callback listener of the gadget with ID_SEND_NOTIFICATION_TO_GADGET_REQUEST
# For internal use of the SDK
def parser_send_notification_to_gadget(self, data):
try:
json_data = json.loads(data)
targetGadgetName = json_data["body"]["targetGadgetName"]
targetGadgetId = json_data["body"]["targetGadgetID"]
if targetGadgetName == self.gadget_name and targetGadgetId == self.gadget_id:
gadget = Gadget(targetGadgetName,
targetGadgetId,
None,
None,
None)
notify_type = json_data["body"]["type"]
message = json_data["body"]["message"]
param = dict(message=message, gadget=gadget)
self._listner_callback(ID_SEND_NOTIFICATION_TO_GADGET_REQUEST, notify_type, param)
except Exception as what:
param = dict(message=what)
self._listner_callback(ID_JSON_PARSE_ERROR, None, param)
# Handler method for printing log message
# For internal use of the SDK
def print_log(self, message):
# Print Log
logc = []
log_title = self.get_data_id(message)
if not log_title:
print ('END')
return
log_left = log_right = int((80-len(log_title)) / 2)
logc.append('=' * (log_left-1) + ' ' + log_title + ' ' + '=' * (log_right-1))
content = json.dumps(json.loads(message), indent=4)
logc.append(content)
logc.append('=' * 80)
logc.append('\n')
# API for sending 'Load Data Response' to Garuda Core
# Gadget implementation should invoke this API for sending response of ID_LOAD_DATA_REQUEST and ID_LOAD_DATA_STREAM_REQUEST
# response_code:
# 200 - Success.
# 415 - Data Resource Not found.
# 500 - Internal Error.
# 508 - Incompatible data type.
# 512 - Incomplete Request parameters.
def response_load_data(self, target_gadget_name, target_gadget_id, response_code):
if not response_code:
return
header = dict(id=_ID_LOAD_DATA_RESP,
version=_RESP_MSG_VERSION)
body = dict(result=response_code,
sourceGadgetName=self.gadget_name,
sourceGadgetID=self.gadget_id,
originatorID=self.gadget_id,
targetGadgetName=target_gadget_name,
targetGadgetID=target_gadget_id)
self.handle_request(header, body)
# API for sending 'Load Gadget Response' to Garuda Core
# Gadget implementation should invoke this API for sending response of ID_LOAD_GADGET_REQUEST
# response_code:
# 200 - Success.
# 500 - Internal Error.
# 512 - Incomplete Request parameters.
def response_load_gadget(self, loaded_gadget_name, loaded_gadget_id, response_code):
if not response_code:
return
header = dict(id=_ID_LOAD_GADGET_RESP,
version=_RESP_MSG_VERSION)
body = dict(result=response_code,
sourceGadgetName=self.gadget_name,
sourceGadgetID=self.gadget_id,
originatorID=self.gadget_id,
loadedGadgetName=loaded_gadget_name,
loadedGadgetID=loaded_gadget_id)
self.handle_request(header, body)
# API for sending 'Send Notification to Gadget Response' to Garuda Core
# Gadget implementation should invoke this API for sending response of ID_SEND_NOTIFICATION_TO_GADGET_REQUEST
# response_code:
# 200 - Success.
# 500 - Internal Error.
# 501 - Unable to parse JSON.
# 503 - Invalid Request Message JSON.
# 512 - Incomplete Request parameters.
# 513 - Invalid Notification Code.
def response_send_notification_to_gadget(self, source_gadget_name, source_gadget_id, response_code):
if not response_code:
return
header = dict(id=_ID_SEND_NOTIFICATION_TO_GADGET_RESP,
version=_RESP_MSG_VERSION)
body = dict(result=response_code,
sourceGadgetName=source_gadget_name,
sourceGadgetID=source_gadget_id)
self.handle_request(header, body)
# API for terminating connection with Garuda Core (using the connection class of the SDK)
def stop_backend(self):
if self.connection:
self.connection.close_socket()
self.connection = None
self.initialized = False
sys.exit()
| StarcoderdataPython |
4821375 | import csv
import sys
with open(sys.argv[1]) as csvfile:
header_line = csvfile.readline()
split_header = header_line.split(',')
print(len(split_header))
first_line = csvfile.readline()
split_first_line = first_line.split(',')
print(len(split_first_line))
| StarcoderdataPython |
1737070 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Void Copyright NO ONE
#
# Void License
#
# The code belongs to no one. Do whatever you want.
# Forget about boring open source license.
#
# AEAD cipher for shadowsocks
#
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_int, create_string_buffer, byref, c_void_p
import hashlib
from struct import pack, unpack
from shadowsocks.crypto import util
from shadowsocks.crypto import hkdf
from shadowsocks.common import ord, chr
EVP_CTRL_GCM_SET_IVLEN = 0x9
EVP_CTRL_GCM_GET_TAG = 0x10
EVP_CTRL_GCM_SET_TAG = 0x11
EVP_CTRL_CCM_SET_IVLEN = EVP_CTRL_GCM_SET_IVLEN
EVP_CTRL_CCM_GET_TAG = EVP_CTRL_GCM_GET_TAG
EVP_CTRL_CCM_SET_TAG = EVP_CTRL_GCM_SET_TAG
EVP_CTRL_AEAD_SET_IVLEN = EVP_CTRL_GCM_SET_IVLEN
EVP_CTRL_AEAD_SET_TAG = EVP_CTRL_GCM_SET_TAG
EVP_CTRL_AEAD_GET_TAG = EVP_CTRL_GCM_GET_TAG
AEAD_MSG_LEN_UNKNOWN = 0
AEAD_CHUNK_SIZE_LEN = 2
AEAD_CHUNK_SIZE_MASK = 0x3FFF
CIPHER_NONCE_LEN = {
'aes-128-gcm': 12,
'aes-192-gcm': 12,
'aes-256-gcm': 12,
'aes-128-ocb': 12, # requires openssl 1.1
'aes-192-ocb': 12,
'aes-256-ocb': 12,
'chacha20-poly1305': 12,
'chacha20-ietf-poly1305': 12,
'xchacha20-ietf-poly1305': 24,
'sodium:aes-256-gcm': 12,
}
CIPHER_TAG_LEN = {
'aes-128-gcm': 16,
'aes-192-gcm': 16,
'aes-256-gcm': 16,
'aes-128-ocb': 16, # requires openssl 1.1
'aes-192-ocb': 16,
'aes-256-ocb': 16,
'chacha20-poly1305': 16,
'chacha20-ietf-poly1305': 16,
'xchacha20-ietf-poly1305': 16,
'sodium:aes-256-gcm': 16,
}
SUBKEY_INFO = b"ss-subkey"
libsodium = None
sodium_loaded = False
def load_sodium(path=None):
"""
Load libsodium helpers for nonce increment
:return: None
"""
global libsodium, sodium_loaded
libsodium = util.find_library('sodium', 'sodium_increment',
'libsodium', path)
if libsodium is None:
print('load libsodium failed with path %s' % path)
return
if libsodium.sodium_init() < 0:
libsodium = None
print('sodium init failed')
return
libsodium.sodium_increment.restype = c_void_p
libsodium.sodium_increment.argtypes = (
c_void_p, c_int
)
sodium_loaded = True
return
def nonce_increment(nonce, nlen):
"""
Increase nonce by 1 in little endian
From libsodium sodium_increment():
for (; i < nlen; i++) {
c += (uint_fast16_t) n[i];
n[i] = (unsigned char) c;
c >>= 8;
}
:param nonce: string_buffer nonce
:param nlen: nonce length
:return: nonce plus by 1
"""
c = 1
i = 0
# n = create_string_buffer(nlen)
while i < nlen:
c += ord(nonce[i])
nonce[i] = chr(c & 0xFF)
c >>= 8
i += 1
return # n.raw
class AeadCryptoBase(object):
"""
Handles basic aead process of shadowsocks protocol
TCP Chunk (after encryption, *ciphertext*)
+--------------+---------------+--------------+------------+
| *DataLen* | DataLen_TAG | *Data* | Data_TAG |
+--------------+---------------+--------------+------------+
| 2 | Fixed | Variable | Fixed |
+--------------+---------------+--------------+------------+
UDP (after encryption, *ciphertext*)
+--------+-----------+-----------+
| NONCE | *Data* | Data_TAG |
+-------+-----------+-----------+
| Fixed | Variable | Fixed |
+--------+-----------+-----------+
"""
def __init__(self, cipher_name, key, iv, op, crypto_path=None):
self._op = int(op)
self._salt = iv
self._nlen = CIPHER_NONCE_LEN[cipher_name]
self._nonce = create_string_buffer(self._nlen)
self._tlen = CIPHER_TAG_LEN[cipher_name]
crypto_hkdf = hkdf.Hkdf(iv, key, algorithm=hashlib.sha1)
self._skey = crypto_hkdf.expand(info=SUBKEY_INFO, length=len(key))
# _chunk['mlen']:
# -1, waiting data len header
# n, n > 0, waiting data
self._chunk = {'mlen': AEAD_MSG_LEN_UNKNOWN, 'data': b''}
self.encrypt_once = self.aead_encrypt
self.decrypt_once = self.aead_decrypt
# load libsodium for nonce increment
if not sodium_loaded:
crypto_path = dict(crypto_path) if crypto_path else dict()
path = crypto_path.get('sodium', None)
load_sodium(path)
def nonce_increment(self):
"""
AEAD ciphers need nonce to be unique per key
TODO: cache and check unique
:return: None
"""
global libsodium, sodium_loaded
if sodium_loaded:
libsodium.sodium_increment(byref(self._nonce), c_int(self._nlen))
else:
nonce_increment(self._nonce, self._nlen)
# print("".join("%02x" % ord(b) for b in self._nonce))
def cipher_ctx_init(self):
"""
Increase nonce to make it unique for the same key
:return: None
"""
self.nonce_increment()
def aead_encrypt(self, data):
"""
Encrypt data with authenticate tag
:param data: plain text
:return: str [payload][tag] cipher text with tag
"""
raise Exception("Must implement aead_encrypt method")
def encrypt_chunk(self, data):
"""
Encrypt a chunk for TCP chunks
:param data: str
:return: str [len][tag][payload][tag]
"""
plen = len(data)
# l = AEAD_CHUNK_SIZE_LEN + plen + self._tlen * 2
# network byte order
ctext = [self.aead_encrypt(pack("!H", plen & AEAD_CHUNK_SIZE_MASK))]
if len(ctext[0]) != AEAD_CHUNK_SIZE_LEN + self._tlen:
self.clean()
raise Exception("size length invalid")
ctext.append(self.aead_encrypt(data))
if len(ctext[1]) != plen + self._tlen:
self.clean()
raise Exception("data length invalid")
return b''.join(ctext)
def encrypt(self, data):
"""
Encrypt data, for TCP divided into chunks
For UDP data, call aead_encrypt instead
:param data: str data bytes
:return: str encrypted data
"""
plen = len(data)
if plen <= AEAD_CHUNK_SIZE_MASK:
ctext = self.encrypt_chunk(data)
return ctext
ctext = []
while plen > 0:
mlen = plen if plen < AEAD_CHUNK_SIZE_MASK \
else AEAD_CHUNK_SIZE_MASK
c = self.encrypt_chunk(data[:mlen])
ctext.append(c)
data = data[mlen:]
plen -= mlen
return b''.join(ctext)
def aead_decrypt(self, data):
"""
Decrypt data and authenticate tag
:param data: str [len][tag][payload][tag] cipher text with tag
:return: str plain text
"""
raise Exception("Must implement aead_decrypt method")
def decrypt_chunk_size(self, data):
"""
Decrypt chunk size
:param data: str [size][tag] encrypted chunk payload len
:return: (int, str) msg length and remaining encrypted data
"""
if self._chunk['mlen'] > 0:
return self._chunk['mlen'], data
data = self._chunk['data'] + data
self._chunk['data'] = b""
hlen = AEAD_CHUNK_SIZE_LEN + self._tlen
if hlen > len(data):
self._chunk['data'] = data
return 0, b""
plen = self.aead_decrypt(data[:hlen])
plen, = unpack("!H", plen)
if plen & AEAD_CHUNK_SIZE_MASK != plen or plen <= 0:
self.clean()
raise Exception('Invalid message length')
return plen, data[hlen:]
def decrypt_chunk_payload(self, plen, data):
"""
Decrypted encrypted msg payload
:param plen: int payload length
:param data: str [payload][tag][[len][tag]....] encrypted data
:return: (str, str) plain text and remaining encrypted data
"""
data = self._chunk['data'] + data
if len(data) < plen + self._tlen:
self._chunk['mlen'] = plen
self._chunk['data'] = data
return b"", b""
self._chunk['mlen'] = AEAD_MSG_LEN_UNKNOWN
self._chunk['data'] = b""
plaintext = self.aead_decrypt(data[:plen + self._tlen])
if len(plaintext) != plen:
self.clean()
raise Exception("plaintext length invalid")
return plaintext, data[plen + self._tlen:]
def decrypt_chunk(self, data):
"""
Decrypt a TCP chunk
:param data: str [len][tag][payload][tag][[len][tag]...] encrypted msg
:return: (str, str) decrypted msg and remaining encrypted data
"""
plen, data = self.decrypt_chunk_size(data)
if plen <= 0:
return b"", b""
return self.decrypt_chunk_payload(plen, data)
def decrypt(self, data):
"""
Decrypt data for TCP data divided into chunks
For UDP data, call aead_decrypt instead
:param data: str
:return: str
"""
ptext = []
pnext, left = self.decrypt_chunk(data)
ptext.append(pnext)
while len(left) > 0:
pnext, left = self.decrypt_chunk(left)
ptext.append(pnext)
return b''.join(ptext)
def test_nonce_increment():
buf = create_string_buffer(12)
print("".join("%02x" % ord(b) for b in buf))
nonce_increment(buf, 12)
nonce_increment(buf, 12)
nonce_increment(buf, 12)
nonce_increment(buf, 12)
print("".join("%02x" % ord(b) for b in buf))
for i in range(256):
nonce_increment(buf, 12)
print("".join("%02x" % ord(b) for b in buf))
if __name__ == '__main__':
load_sodium()
test_nonce_increment()
| StarcoderdataPython |
16637 | <gh_stars>1-10
# Copyright (c) The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE.
import warnings
import numpy as np
from sklearn.utils.extmath import randomized_svd
from .bucket_elimination import BucketElimination
from .factor import Factor, default_factor_name, product_over_
from .graphical_model import GraphicalModel
from .mini_bucket_elimination import MiniBucketElimination
class BucketRenormalization(MiniBucketElimination):
"""Bucket Renormalization algorithm."""
def __init__(self, model: GraphicalModel, **kwargs):
super(BucketRenormalization, self).__init__(model, **kwargs)
self._initialize_projectors()
def _initialize_projectors(self):
replications = dict()
working_model = self.renormalized_model.copy()
for var in self.elimination_order:
main_rvar = self.variables_replicated_from_[var][-1]
main_projectors = []
for (i, rvar) in enumerate(self.variables_replicated_from_[var]):
if i < len(self.variables_replicated_from_[var]) - 1:
fac = product_over_(*working_model.get_adj_factors(rvar))
replicated_projector = self._get_svd_projector(fac, rvar)
replicated_projector.name = "RP_{}".format(rvar)
projector = replicated_projector.copy()
projector.variables = [main_rvar]
projector.name = "P_{}".format(rvar)
replications[rvar] = (
main_rvar, replicated_projector, projector)
main_projectors.append(projector)
working_model.add_factors_from(
[replicated_projector.copy(), projector.copy()])
self.renormalized_model.add_factors_from(
[replicated_projector, projector])
working_model.contract_variable(rvar)
self.replications = replications
def _optimize(self):
for var in reversed(self.renormalized_elimination_order):
if var in self.replications.keys():
mb_var, projector, mb_projector = self.replications[var]
self.renormalized_model.remove_factors_from(
[projector, mb_projector])
be = BucketElimination(self.renormalized_model)
marginal_factor = be.get_marginal_factor(
elimination_order_method="given",
elimination_order=self.renormalized_elimination_order,
exception_variables=[var, mb_var],
)
new_mb_projector = self._get_svd_projector(marginal_factor,
mb_var)
new_projector = Factor(
name=default_factor_name(),
variables=[var],
log_values=new_mb_projector.log_values,
)
self.renormalized_model.add_factors_from(
[new_projector, new_mb_projector])
self.replications[var] = (
mb_var, new_projector, new_mb_projector)
def run(self, max_iter=10):
"""Runs the algorithm, returns log(Z)."""
for _ in range(max_iter):
self._optimize()
def get_log_z(self):
"""Calculates log Z."""
be = BucketElimination(self.renormalized_model)
logZ = self.base_logZ
logZ += be.run(
elimination_order_method="given",
elimination_order=self.renormalized_elimination_order
)
return logZ
def _get_svd_projector(self, factor, variable):
factor.transpose_by_(
[variable, *sorted(set(factor.variables) - set([variable]))])
flattened_factor_log_values = factor.log_values.reshape(
factor.get_cardinality_for_(variable), -1
)
max_log = np.max(flattened_factor_log_values)
if np.isnan(max_log):
warnings.warn('Got nan in flattened_factor_log_values')
np.nan_to_num(flattened_factor_log_values, copy=False, nan=-np.inf)
max_log = np.max(flattened_factor_log_values)
if not np.isfinite(max_log):
warnings.warn('Got infinite value in flattened_factor_log_values')
max_log = 0.0
flattened_factor_values = np.exp(flattened_factor_log_values - max_log)
U, _, _ = randomized_svd(flattened_factor_values, n_components=1)
# U,_,_ = np.linalg.svd(flattened_factor_values)
u = U[:, 0]
if np.sum(u) < 0:
u = -u
u[u < 0] = 0.0
u /= np.linalg.norm(u)
return Factor(name=default_factor_name(), variables=[variable],
values=u)
| StarcoderdataPython |
3247197 | <reponame>dfm/sup
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
__all__ = ["manager"]
from flask.ext.script import Manager
from sup import create_app
from sup.manage import (
CreateTablesCommand, DropTablesCommand,
CreateUserCommand,
SendSupCommand,
)
manager = Manager(create_app())
manager.add_command("create_tables", CreateTablesCommand())
manager.add_command("drop_tables", DropTablesCommand())
manager.add_command("create_user", CreateUserCommand())
manager.add_command("send_sup", SendSupCommand())
if __name__ == "__main__":
manager.run()
| StarcoderdataPython |
1692357 | # -*- coding: utf-8 -*-
import json
import scrapy
from robot.items import ProxyItem, ProxyItemLoader
from robot.processors import RemoveTags
from scrapy_redis.spiders import RedisSpider
class ProxySpider(RedisSpider):
name = 'proxy'
allowed_domains = ['raw.githubusercontent.com']
start_urls = ['https://raw.githubusercontent.com/fate0/proxylist/master/proxy.list']
def parse(self, response):
"""
解析ip代理
:param response:
:return:
"""
proxy_list = RemoveTags(response.text).splitlines()
for proxy in proxy_list:
item_loader = ProxyItemLoader(item=ProxyItem(), response=response)
proxy = json.loads(proxy)
item_loader.add_value("port", proxy.get("port", ""))
item_loader.add_value("anonymity", proxy.get("anonymity", ""))
item_loader.add_value("proxy_from", proxy.get("from", ""))
item_loader.add_value("proxy_type", proxy.get("type", ""))
item_loader.add_value("response_time", proxy.get("response_time", ""))
item_loader.add_value("host", proxy.get("host", ""))
item_loader.add_value("country", proxy.get("country", ""))
item_loader.add_value("export_address", proxy.get("export_address", ""))
proxy_item = item_loader.load_item()
yield proxy_item | StarcoderdataPython |
3303158 | <filename>metadata_driver_elasticsearch/utils.py<gh_stars>0
import logging
from datetime import datetime
import metadata_driver_elasticsearch.indexes as index
logger = logging.getLogger(__name__)
AND = 'must'
OR = 'should'
GT = 'gte'
LT = 'lte'
BOOL = 'bool'
RANGE = 'range'
MATCH = 'match'
def query_parser(query):
query_result = {}
text = None
for key in query.items():
if 'price' in key:
query_result = create_price_query(query, query_result)
elif 'license' in key:
query_result = create_query(query['license'], index.license, query_result, OR, MATCH)
elif 'categories' in key:
query_result = create_query(query['categories'], index.categories, query_result, OR,
MATCH)
elif 'tags' in key:
query_result = create_query(query['tags'], index.tags, query_result, OR,
MATCH)
elif 'type' in key:
query_result = create_query(query['type'], index.service_type, query_result, AND, MATCH)
elif 'updateFrequency' in key:
query_result = create_query(query['updateFrequency'], index.updated_frequency,
query_result, OR, MATCH)
elif 'created' in key:
query_result = create_created_query(query, query_result, 'created')
elif 'dateCreated' in key:
query_result = create_created_query(query, query_result, 'dateCreated')
elif 'datePublished' in key:
query_result = create_created_query(query, query_result, 'datePublished')
elif 'sample' in key:
query_result = create_query(['sample'], index.sample, query_result, AND, MATCH)
elif 'did' in key:
query_result = create_query(query['did'], index.did, query_result, OR, MATCH)
elif 'text' in key:
text = query['text'][0]
else:
logger.error('The key %s is not supported by MetadataDB.' % key[0])
raise Exception('The key %s is not supported by MetadataDB.' % key[0])
return query_result, text
def create_query(value, index, query, operator, query_type):
for i in range(len(value)):
if i == 0:
if BOOL in query and operator in query[BOOL]:
query[BOOL][operator] += [{query_type: {index: value[i]}}]
else:
if BOOL not in query:
query[BOOL] = {}
query[BOOL][operator] = [{query_type: {index: value[i]}}]
else:
query[BOOL][operator] += [{query_type: {index: value[i]}}]
return query
def create_price_query(query, query_result):
if len(query['price']) > 2:
logger.info('You are sending more values than needed.')
elif len(query['price']) == 0:
logger.info('You are not sending any value.')
elif len(query['price']) == 1:
query_result = create_query([{GT: 0, LT: query['price'][0]}], index.price, query_result,
AND, RANGE)
else:
query_result = create_query([{GT: query['price'][0], LT: query['price'][1]}], index.price,
query_result, AND, RANGE)
return query_result
def create_created_query(query, query_result, field):
if query[field][0] is None or query[field][1] is None:
logger.warning("You should provide two dates in your query.")
if query[field][0] > query[field][1]:
logger.warning("Your second date is smaller that the first.")
if field == 'created':
query_result = create_query([{GT: datetime.strptime(query[field][0], '%Y-%m-%dT%H:%M:%SZ'),
LT: datetime.strptime(query[field][1],
'%Y-%m-%dT%H:%M:%SZ')}], index.created,
query_result, AND, RANGE)
if field == 'dateCreated':
query_result = create_query([{GT: datetime.strptime(query[field][0], '%Y-%m-%dT%H:%M:%SZ'),
LT: datetime.strptime(query[field][1],
'%Y-%m-%dT%H:%M:%SZ')}],
index.dateCreated,
query_result,
AND, RANGE)
if field == 'datePublished':
query_result = create_query([{GT: datetime.strptime(query[field][0], '%Y-%m-%dT%H:%M:%SZ'),
LT: datetime.strptime(query[field][1],
'%Y-%m-%dT%H:%M:%SZ')}],
index.datePublished, query_result,
AND, RANGE)
else:
logger.info('The key %s is not supported in the created query')
return query_result
| StarcoderdataPython |
3348771 | <reponame>PredaaA/JackCogs
# Copyright 2018-2020 <NAME> (https://github.com/jack1142)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from abc import ABC, ABCMeta, abstractmethod
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Callable, TypeVar
import rlapi
from discord.ext.commands import CogMeta
from redbot.core.config import Config
from .image import RLStatsImageTemplate
T = TypeVar("T")
class CogAndABCMeta(CogMeta, ABCMeta):
"""
This allows the metaclass used for proper type detection to
coexist with discord.py's metaclass.
"""
class MixinMeta(ABC):
"""Base class for well behaved type hint detection with composite class."""
def __init__(self, *_args: Any) -> None:
self.loop: asyncio.AbstractEventLoop
self._executor: ThreadPoolExecutor
self.config: Config
self.rlapi_client: rlapi.Client
self.cog_data_path: Path
self.bundled_data_path: Path
self.competitive_template: RLStatsImageTemplate
self.extramodes_template: RLStatsImageTemplate
@abstractmethod
async def _run_in_executor(
self, func: Callable[..., T], *args: Any, **kwargs: Any
) -> T:
raise NotImplementedError()
| StarcoderdataPython |
102752 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.conf.urls.i18n import i18n_patterns
admin.autodiscover()
urlpatterns = [
url(r'^accounts/', include('allauth.urls')),
url(r'^', include('hes.urls', namespace="hes")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
# Mezzanine
# Add the urlpatterns for any custom Django applications here.
# You can also change the ``home`` view to add your own functionality
# to the project's homepage.
from django.conf.urls import patterns
from mezzanine.conf import settings as msettings
from mezzanine.core.views import direct_to_template
urlpatterns += i18n_patterns("",
# Change the admin prefix here to use an alternate URL for the
# admin interface, which would be marginally more secure.
("^admin/", include(admin.site.urls)),
)
if msettings.USE_MODELTRANSLATION:
urlpatterns += patterns('',
url('^i18n/$', 'django.views.i18n.set_language', name='set_language'),
)
urlpatterns += patterns('',
# Cartridge URLs.
("^shop/", include("cartridge.shop.urls")),
url("^account/orders/$", "cartridge.shop.views.order_history",
name="shop_order_history"),
url("^$", direct_to_template, {"template": "hes/index.html"}, name="home"),
# If you'd like more granular control over the patterns in
# ``mezzanine.urls``, go right ahead and take the parts you want
# from it, and use them directly below instead of using
# ``mezzanine.urls``.
("^", include("mezzanine.urls")),
)
# Adds ``STATIC_URL`` to the context of error pages, so that error
# pages can use JS, CSS and images.
handler404 = "mezzanine.core.views.page_not_found"
handler500 = "mezzanine.core.views.server_error" | StarcoderdataPython |
67390 | <gh_stars>1000+
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""run.py is a utility for running a program.
It can perform code signing, forward arguments to the program, and return the
program's error code.
"""
import argparse
import os
import platform
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--execdir', type=str, required=True)
parser.add_argument('--codesign_identity', type=str, required=False, default=None)
parser.add_argument('--env', type=str, nargs='*', required=False, default=dict())
parser.add_argument("command", nargs=argparse.ONE_OR_MORE)
args = parser.parse_args()
commandLine = args.command
# HACK:
# If an argument is a file that ends in `.tmp.exe`, assume it is the name
# of an executable generated by a test file. We call these test-executables
# below. This allows us to do custom processing like codesigning test-executables.
# It's also possible for there to be no such executable, for example in the case
# of a .sh.cpp test.
isTestExe = lambda exe: exe.endswith('.tmp.exe') and os.path.exists(exe)
# Do any necessary codesigning of test-executables found in the command line.
if args.codesign_identity:
for exe in filter(isTestExe, commandLine):
subprocess.check_call(['xcrun', 'codesign', '-f', '-s', args.codesign_identity, exe], env={})
# Extract environment variables into a dictionary
env = {k : v for (k, v) in map(lambda s: s.split('=', 1), args.env)}
if platform.system() == 'Windows':
# Pass some extra variables through on Windows:
# COMSPEC is needed for running subprocesses via std::system().
if 'COMSPEC' in os.environ:
env['COMSPEC'] = os.environ.get('COMSPEC')
# TEMP is needed for placing temp files in a sensible directory.
if 'TEMP' in os.environ:
env['TEMP'] = os.environ.get('TEMP')
# Run the command line with the given environment in the execution directory.
return subprocess.call(commandLine, cwd=args.execdir, env=env, shell=False)
if __name__ == '__main__':
exit(main())
| StarcoderdataPython |
1604506 | <reponame>faezs/plotly.py
import _plotly_utils.basevalidators
class FlatshadingValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name='flatshading', parent_name='mesh3d', **kwargs
):
super(FlatshadingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='style',
**kwargs
)
| StarcoderdataPython |
1764094 | #!/usr/bin/env python
if __name__ == '__main__' and __package__ is None:
print "?"
| StarcoderdataPython |
1674165 | <reponame>SweydAbdul/estudos-python
import os
import os.path
for a in os.listdir('.'):
if os.path.isdir(a):
print(f'{a}/')
elif os.path.isfile(a):
print(f'{a}') | StarcoderdataPython |
23469 | from functools import lru_cache
from itertools import product
from pathlib import Path
from typing import Optional, List, Tuple
from pydantic import validate_arguments
import pyhmmer
import requests as r
from yarl import URL
from sadie.typing import Species, Chain, Source
class G3:
"""API Wrapper with OpenAPI found here https://g3.jordanrwillis.com/docs"""
# TODO: most likely make this an import
data_folder = Path(__file__).parent.parent / "data"
segments = {"V", "D", "J"}
chains = {"H", "K", "L"}
def __init__(self):
self.base_url = URL("https://g3.jordanrwillis.com/api/v1")
self.not_usable_species = [
"pig",
"cow",
"cat", # missing L
"alpaca", # missing L and K
"rhesus", # TODO: breaks tests; fix and fall back on numbering for now
"dog", # TODO: viable but does not match. Need to check if diff species of dog from G3
]
self.alphabet = pyhmmer.easel.Alphabet.amino()
self.builder = pyhmmer.plan7.Builder(self.alphabet, architecture="hand")
self.background = pyhmmer.plan7.Background(self.alphabet)
@property
@lru_cache(maxsize=1)
def sources(self):
resp = r.get(self.base_url)
resp.raise_for_status()
return resp.json()["components"]["schemas"]["SourceName"]["enum"]
@property
@lru_cache(maxsize=1)
def species(self):
resp = r.get(self.base_url)
resp.raise_for_status()
species = resp.json()["components"]["schemas"]["CommonName"]["enum"]
return [single_species for single_species in species if single_species not in self.not_usable_species]
@lru_cache(maxsize=None)
@validate_arguments
def __get_gene_resp(
self,
source: Source = "imgt",
species: Species = "human",
segment: str = "V",
limit: Optional[int] = None,
) -> str:
segment = segment.upper()
if segment not in self.segments:
raise ValueError(f"{segment} is not a valid segment from {self.segments}")
params = {
"source": source,
"common": species,
"segment": segment,
"limit": limit if limit else "-1",
}
resp = r.get(self.base_url / "genes", params=params)
resp.raise_for_status()
return resp
@validate_arguments
def get_gene(
self,
source: Source = "imgt",
species: Species = "human",
chain: Chain = "H",
segment: str = "V",
limit: Optional[int] = None,
) -> str:
resp = self.__get_gene_resp(source=source, species=species, segment=segment, limit=limit)
return [x for x in resp.json() if x["gene"][2].lower() == chain.lower()]
def get_stockholm_pairs(
self,
source: Source = "imgt",
chain: Chain = "H",
species: Species = "human",
limit: Optional[int] = None,
) -> List[Tuple[str, str]]:
sub_v = self.get_gene(source=source, species=species, chain=chain, segment="V", limit=limit)
sub_j = self.get_gene(source=source, species=species, chain=chain, segment="J", limit=limit)
stockholm_pairs = []
for merge in product(sub_v, sub_j):
v_seg = merge[0]
j_seg = merge[1]
if v_seg["receptor"] not in ["IG"]:
continue
functional = v_seg["imgt"]["imgt_functional"]
v_part = v_seg["imgt"]["sequence_gapped_aa"].replace(".", "-")[:108].ljust(108).replace(" ", "-")
# if v_part[0] == "-":
# continue
cdr3_part = j_seg["imgt"]["cdr3_aa"]
fwr4_part = j_seg["imgt"]["fwr4_aa"]
v_name = v_seg["gene"]
j_name = j_seg["gene"]
name = f"{species}_{v_name}_{j_name}"
# why?
if functional != "F":
continue
# H rules
if chain.strip().lower() in "H":
if len(cdr3_part[-3:] + fwr4_part) == 13:
fwr4_part += "-"
# K rules
if chain.strip().lower() in "k":
if len(cdr3_part[-3:] + fwr4_part) in [12, 13]:
fwr4_part += "-"
# # L rules
if chain.strip().lower() == "l":
if len(cdr3_part[-3:] + fwr4_part) == 12:
fwr4_part += "-"
# todo: alt fwr4_part based on it's size and who's askin
multiplier = 128 - (len(v_part) + len(cdr3_part[-3:] + fwr4_part))
align = v_part + "-" * multiplier + cdr3_part[-3:] + fwr4_part
# sanity check if chains rules are working
assert len(align) == 128
stockholm_pairs.append((name, align))
return stockholm_pairs
# def get_msa(
# self,
# source: Source = "imgt",
# species: Species = "human",
# chain: Chain = "H",
# limit: Optional[int] = None,
# ) -> str:
# stockholm_pairs = self.get_stockholm_pairs(source=source, chain=chain, species=species, limit=limit)
# sequences = []
# for name, align in stockholm_pairs:
# sequence = pyhmmer.easel.TextSequence(name=name.encode(), sequence=align)
# sequences.append(sequence)
# if not sequences:
# return None
# return pyhmmer.easel.TextMSA(name=f"{species}_{chain}".encode(), sequences=sequences).digitize(self.alphabet)
@lru_cache(maxsize=None)
def build_stockholm(
self,
source: Source = "imgt",
species: Species = "human",
chain: Chain = "H",
limit: Optional[int] = None,
) -> Path:
"""
Get a stockholm file in string format for the given species and chain.
Parameters
----------
source : str, optional
Source of gene data, by default "imgt"
options: 'imgt' or 'custom'
species : str, optional
species selected from avaliabe, by default "human"
chain : str, optional
chain for seq, by default "H"
options: 'H', 'k', 'l' -> heavy, kappa light
Returns
-------
str
stockholm file in string format
"""
sto_path = self.data_folder / f"stockholms/{species}_{chain}.sto"
sto_pairs = self.get_stockholm_pairs(source=source, chain=chain, species=species, limit=limit)
if not sto_pairs:
return
head = f"# STOCKHOLM 1.0\n#=GF ID {species}_{chain}\n"
body = "\n".join([f"{name}\t{ali}" for name, ali in sto_pairs])
tail = "\n#=GC RF" + "\t" + "x" * 128 + "\n//\n"
# TODO: hand arch needs a parsed file -- will be refactored to handle digital directly
with open(sto_path, "w") as outfile:
outfile.write(head + body + tail)
return sto_path
@lru_cache(maxsize=None)
def build_hmm(
self,
source: Source = "imgt",
species: Species = "human",
chain: Chain = "H",
limit: Optional[int] = None,
) -> Path:
sto_path = self.build_stockholm(source=source, chain=chain, species=species, limit=limit)
if not sto_path:
return
hmm_path = self.data_folder / f"hmms/{species}_{chain}.hmm"
with pyhmmer.easel.MSAFile(sto_path, digital=True, alphabet=self.alphabet, format="stockholm") as msa_file:
msa = next(msa_file)
hmm, _, _ = self.builder.build_msa(msa, self.background)
with open(hmm_path, "wb") as output_file:
hmm.write(output_file)
return hmm_path
@lru_cache(maxsize=None)
def get_hmm(
self,
source: Source = "imgt",
species: Species = "human",
chain: Chain = "H",
limit: Optional[int] = None,
prioritize_cached_hmm: bool = False,
):
hmm_path = self.data_folder / f"hmms/{species}_{chain}.hmm"
if prioritize_cached_hmm is True:
if hmm_path.is_file() is False:
hmm_path = self.build_hmm(source=source, chain=chain, species=species, limit=limit)
else:
hmm_path = self.build_hmm(source=source, chain=chain, species=species, limit=limit)
if not hmm_path:
return
with pyhmmer.plan7.HMMFile(hmm_path) as hmm_file:
hmm = next(hmm_file)
return hmm
| StarcoderdataPython |
3244967 | <filename>Source/Qt5/modules/node.py<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
def main():
#Load modules
try:
import imp
import sys
except ImportError:
print("Error: Faild to import modules.")
print("Error: Exiting application")
sys.exit(1)
#Test if modules availible
testMods = ['sys', 're', 'time', 'PyQt5', 'node']
for mods in testMods:
try:
imp.find_module(mods)
except:
print("Error: Could not find '%s' module" %(mods))
print("Error: Exiting application")
sys.exit(1)
else:
print("Note : Found module '%s'" %(mods))
#Test Python Version
pyVersion = sys.hexversion
if (pyVersion < 0x04000000) or (pyVersion > 0x03000000):
print("Note : Running Python version '%s' " %(pyVersion))
else:
print("Error: Running Python version '%s'. Python 3.x is required " %(pyVersion))
sys.exit(1)
def modList():
return(['imp', 'sys'])
| StarcoderdataPython |
25122 | <filename>src/server_dgram/server.py
import logging
import socket
import numpy
import time
from cPickle import loads
from scipy import linalg
from matplotlib import pyplot
from multiprocessing import Array
from src.logic import helpers
from src.logic.parallel_process import ProcessParallel
from scipy import *
from numpy import *
class Server:
def __init__(self,
server_address,
server_port,
true_positions,
estimated_positions,
sensor_positions,
microphone_amount,
trials,
coordinates,
cores_amount):
self.__x, self.__y, self.__z = coordinates
self.__server_address = server_address
self.__microphone_amount = microphone_amount
self.__server_port = server_port
self.__true_positions = true_positions
self.__estimated_positions = estimated_positions
self.__trials = trials
self.__sensor_positions = sensor_positions
self.__distances = []
self.__time_delays = []
self.__padding = []
self.__cores_amount = cores_amount
self.__microphone_data = None
self.__raw_microphone_data = []
def generate_data(self):
self.generate_source_positions()
self.generate_distances()
self.prepare()
def run(self, received_data):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to the port
server_address = (self.__server_address, self.__server_port)
logging.info('Starting up on %s port %s', self.__server_address, self.__server_port)
sock.bind(server_address)
microphones_data = {}
received_data_count = 0
while received_data_count < self.__microphone_amount:
logging.info('Waiting to receive message...')
data, address = sock.recvfrom(65535 - 28)
logging.info("Received %s", len(data))
if len(data) == 36:
received_data[received_data_count] = microphones_data[data]
received_data_count += 1
logging.info("Received data from %s microphones", received_data_count)
else:
microphone_id = data[0:36]
if not microphone_id in microphones_data:
microphones_data[microphone_id] = data[36:]
else:
microphones_data[microphone_id] += data[36:]
logging.info("Received data from all microphones")
def generate_source_positions(self):
logging.info('Generating sources positions.')
for i in range(self.__trials):
#r = numpy.random.rand(1) * 50
#t = numpy.random.rand(1) * 2 * math.pi
r = 0.1 * 50
t = 0.2 * 50
z = 0.3 * 20
x = r * math.cos(t)
y = r * math.sin(t)
#z = numpy.random.rand(1) * 20
self.__true_positions[i, 0] = x
self.__true_positions[i, 1] = y
self.__true_positions[i, 2] = z
logging.info('Generated sources positions.')
def generate_distances(self):
logging.info('Generating distances.')
self.__distances = numpy.zeros((self.__trials, self.__microphone_amount))
for i in range(self.__trials):
for j in range(self.__microphone_amount):
x1 = self.__true_positions[i, 0]
y1 = self.__true_positions[i, 1]
z1 = self.__true_positions[i, 2]
x2 = self.__sensor_positions[j, 0]
y2 = self.__sensor_positions[j, 1]
z2 = self.__sensor_positions[j, 2]
self.__distances[i, j] = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2 + (z1 - z2) ** 2)
logging.info('Generated distances.')
def log_results(self):
for trial_number in range(self.__trials):
logging.info('Trial number: %d', trial_number + 1)
logging.info('Estimated X = %.15f, Estimated Y = %.15f, Estimated Z = %.15f',
float(self.__estimated_positions[trial_number][0]),
float(self.__estimated_positions[trial_number][1]),
float(self.__estimated_positions[trial_number][2]))
logging.info('True X = %.15f, True Y = %.15f, True Z = %.15f',
float(self.__true_positions[trial_number][0]),
float(self.__true_positions[trial_number][1]),
float(self.__true_positions[trial_number][2]))
def draw_plot(self):
pyplot.plot(self.__true_positions[:, 0], self.__true_positions[:, 1], 'bd', label='True position')
pyplot.plot(self.__estimated_positions[:, 0], self.__estimated_positions[:, 1], 'r+',
label='Estimated position')
pyplot.legend(loc='upper right', numpoints=1)
pyplot.xlabel('X coordinate of target')
pyplot.ylabel('Y coordinate of target')
pyplot.title('TDOA Hyperbolic Localization')
pyplot.axis([-50, 50, -50, 50])
pyplot.show()
def prepare(self):
logging.info('Preparing stage started.')
self.__time_delays = numpy.divide(self.__distances, 340.29)
self.__padding = numpy.multiply(self.__time_delays, 44100)
logging.info('Preparing stage ended.')
def handle_retrieved_data(self, received_data):
for i in range(self.__trials):
x = self.__true_positions[i, 0]
y = self.__true_positions[i, 1]
z = self.__true_positions[i, 2]
data = []
for j in range(self.__microphone_amount):
data.append(received_data[j])
multi_track = numpy.array([loads(raw) for raw in data])
logging.info('Prepared all data.')
logging.info('Started source localization.')
x, y, z = self.locate(self.__sensor_positions, multi_track)
logging.info('Localized source.')
self.__estimated_positions[i, 0] = x
self.__estimated_positions[i, 1] = y
self.__estimated_positions[i, 2] = z
def locate(self, sensor_positions, multi_track):
s = sensor_positions.shape
len = s[0]
time_delays = numpy.zeros((len, 1))
starts = time.time()
if self.__cores_amount == 1:
for p in range(len):
time_delays[p] = helpers.time_delay_function(multi_track[0,], multi_track[p,])
else:
pp = ProcessParallel()
outs = Array('d', range(len))
ranges = []
for result in helpers.per_delta(0, len, len / self.__cores_amount):
ranges.append(result)
for start, end in ranges:
pp.add_task(helpers.time_delay_function_optimized, (start, end, outs, multi_track))
pp.start_all()
pp.join_all()
for idx, res in enumerate(outs):
time_delays[idx] = res
ends = time.time()
logging.info('%.15f passed for localization computation trial.', ends - starts)
Amat = numpy.zeros((len, 1))
Bmat = numpy.zeros((len, 1))
Cmat = numpy.zeros((len, 1))
Dmat = numpy.zeros((len, 1))
for i in range(2, len):
x1 = sensor_positions[0, 0]
y1 = sensor_positions[0, 1]
z1 = sensor_positions[0, 2]
x2 = sensor_positions[1, 0]
y2 = sensor_positions[1, 1]
z2 = sensor_positions[1, 2]
xi = sensor_positions[i, 0]
yi = sensor_positions[i, 1]
zi = sensor_positions[i, 2]
if time_delays[i] == 0 and time_delays[1] == 0:
Amat[i] = 0
Bmat[i] = 0
Cmat[i] = 0
Dmat[i] = 0
continue
if time_delays[i] == 0:
ti_value = 0
else:
ti_value = 1 / (340.29 * time_delays[i])
if time_delays[1] == 0:
t1_value = 0
else:
t1_value = 1 / (340.29 * time_delays[1])
Amat[i] = ti_value * (-2 * x1 + 2 * xi) - t1_value * (
-2 * x1 + 2 * x2)
Bmat[i] = ti_value * (-2 * y1 + 2 * yi) - t1_value * (
-2 * y1 + 2 * y2)
Cmat[i] = ti_value * (-2 * z1 + 2 * zi) - t1_value * (
-2 * z1 + 2 * z2)
Sum1 = (x1 ** 2) + (y1 ** 2) + (z1 ** 2) - (xi ** 2) - (yi ** 2) - (zi ** 2)
Sum2 = (x1 ** 2) + (y1 ** 2) + (z1 ** 2) - (x2 ** 2) - (y2 ** 2) - (z2 ** 2)
Dmat[i] = 340.29 * (time_delays[i] - time_delays[1]) + ti_value * Sum1 - t1_value * Sum2
M = numpy.zeros((len + 1, 3))
D = numpy.zeros((len + 1, 1))
for i in range(len):
M[i, 0] = Amat[i]
M[i, 1] = Bmat[i]
M[i, 2] = Cmat[i]
D[i] = Dmat[i]
M = numpy.array(M[2:len, :])
D = numpy.array(D[2:len])
D = numpy.multiply(-1, D)
Minv = linalg.pinv(M)
T = numpy.dot(Minv, D)
x = T[0]
y = T[1]
z = T[2]
return x, y, z
@property
def padding(self):
return self.__padding
@property
def distances(self):
return self.__distances
| StarcoderdataPython |
3332858 | '''tzinfo timezone information for Australia/Lord_Howe.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Lord_Howe(DstTzInfo):
'''Australia/Lord_Howe timezone definition. See datetime.tzinfo for details'''
zone = 'Australia/Lord_Howe'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1981,2,28,14,0,0),
d(1981,10,24,15,30,0),
d(1982,3,6,14,30,0),
d(1982,10,30,15,30,0),
d(1983,3,5,14,30,0),
d(1983,10,29,15,30,0),
d(1984,3,3,14,30,0),
d(1984,10,27,15,30,0),
d(1985,3,2,14,30,0),
d(1985,10,26,15,30,0),
d(1986,3,15,15,0,0),
d(1986,10,18,15,30,0),
d(1987,3,14,15,0,0),
d(1987,10,24,15,30,0),
d(1988,3,19,15,0,0),
d(1988,10,29,15,30,0),
d(1989,3,18,15,0,0),
d(1989,10,28,15,30,0),
d(1990,3,3,15,0,0),
d(1990,10,27,15,30,0),
d(1991,3,2,15,0,0),
d(1991,10,26,15,30,0),
d(1992,2,29,15,0,0),
d(1992,10,24,15,30,0),
d(1993,3,6,15,0,0),
d(1993,10,30,15,30,0),
d(1994,3,5,15,0,0),
d(1994,10,29,15,30,0),
d(1995,3,4,15,0,0),
d(1995,10,28,15,30,0),
d(1996,3,30,15,0,0),
d(1996,10,26,15,30,0),
d(1997,3,29,15,0,0),
d(1997,10,25,15,30,0),
d(1998,3,28,15,0,0),
d(1998,10,24,15,30,0),
d(1999,3,27,15,0,0),
d(1999,10,30,15,30,0),
d(2000,3,25,15,0,0),
d(2000,8,26,15,30,0),
d(2001,3,24,15,0,0),
d(2001,10,27,15,30,0),
d(2002,3,30,15,0,0),
d(2002,10,26,15,30,0),
d(2003,3,29,15,0,0),
d(2003,10,25,15,30,0),
d(2004,3,27,15,0,0),
d(2004,10,30,15,30,0),
d(2005,3,26,15,0,0),
d(2005,10,29,15,30,0),
d(2006,4,1,15,0,0),
d(2006,10,28,15,30,0),
d(2007,3,24,15,0,0),
d(2007,10,27,15,30,0),
d(2008,3,29,15,0,0),
d(2008,10,25,15,30,0),
d(2009,3,28,15,0,0),
d(2009,10,24,15,30,0),
d(2010,3,27,15,0,0),
d(2010,10,30,15,30,0),
d(2011,3,26,15,0,0),
d(2011,10,29,15,30,0),
d(2012,3,24,15,0,0),
d(2012,10,27,15,30,0),
d(2013,3,30,15,0,0),
d(2013,10,26,15,30,0),
d(2014,3,29,15,0,0),
d(2014,10,25,15,30,0),
d(2015,3,28,15,0,0),
d(2015,10,24,15,30,0),
d(2016,3,26,15,0,0),
d(2016,10,29,15,30,0),
d(2017,3,25,15,0,0),
d(2017,10,28,15,30,0),
d(2018,3,24,15,0,0),
d(2018,10,27,15,30,0),
d(2019,3,30,15,0,0),
d(2019,10,26,15,30,0),
d(2020,3,28,15,0,0),
d(2020,10,24,15,30,0),
d(2021,3,27,15,0,0),
d(2021,10,30,15,30,0),
d(2022,3,26,15,0,0),
d(2022,10,29,15,30,0),
d(2023,3,25,15,0,0),
d(2023,10,28,15,30,0),
d(2024,3,30,15,0,0),
d(2024,10,26,15,30,0),
d(2025,3,29,15,0,0),
d(2025,10,25,15,30,0),
d(2026,3,28,15,0,0),
d(2026,10,24,15,30,0),
d(2027,3,27,15,0,0),
d(2027,10,30,15,30,0),
d(2028,3,25,15,0,0),
d(2028,10,28,15,30,0),
d(2029,3,24,15,0,0),
d(2029,10,27,15,30,0),
d(2030,3,30,15,0,0),
d(2030,10,26,15,30,0),
d(2031,3,29,15,0,0),
d(2031,10,25,15,30,0),
d(2032,3,27,15,0,0),
d(2032,10,30,15,30,0),
d(2033,3,26,15,0,0),
d(2033,10,29,15,30,0),
d(2034,3,25,15,0,0),
d(2034,10,28,15,30,0),
d(2035,3,24,15,0,0),
d(2035,10,27,15,30,0),
d(2036,3,29,15,0,0),
d(2036,10,25,15,30,0),
d(2037,3,28,15,0,0),
d(2037,10,24,15,30,0),
]
_transition_info = [
i(36000,0,'EST'),
i(37800,0,'LHST'),
i(41400,3600,'LHST'),
i(37800,0,'LHST'),
i(41400,3600,'LHST'),
i(37800,0,'LHST'),
i(41400,3600,'LHST'),
i(37800,0,'LHST'),
i(41400,3600,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
i(37800,0,'LHST'),
i(39600,1800,'LHST'),
]
Lord_Howe = Lord_Howe()
| StarcoderdataPython |
4837028 | <reponame>c0dehard/lazy-junk-organizer
"""
-*- coding: utf-8 -*-
========================
Python Lazy Junk Files Organizer
========================
========================
"""
import os
from pathlib import Path
DIRECTORIES = {
"HTML": [".html5", ".html", ".htm", ".xhtml"],
"MARKUP": [".md"],
"IMAGES": [".jpeg", ".jpg", ".tiff", ".gif", ".bmp", ".png", ".bpg", "svg",
".heif", ".psd"],
"VIDEOS": [".avi", ".flv", ".wmv", ".mov", ".mp4", ".webm", ".vob", ".mng",
".qt", ".mpg", ".mpeg", ".3gp",".mkv"],
"DOCUMENTS": [".oxps", ".epub", ".pages", ".docx", ".doc", ".fdf", ".ods",
".odt", ".pwi", ".xsn", ".xps", ".dotx", ".docm", ".dox",
".rvg", ".rtf", ".rtfd", ".wpd", ".xls", ".xlsx", ".ppt",
"pptx",".md",".pages",".numbers"],
"ARCHIVES": [".a", ".ar", ".arh",".tar",".tar.bz2",".tar.gz",".cpio", ".tar", ".gz", ".rz", ".7z",
".rar", ".xar", ".zip",".xz",".pkg",".deb",".rpm"],
"DISKIMAGE":[".iso",".img",".vcd",".dmg"],
"AUDIO": [".aac", ".aa", ".aac", ".dvf", ".m4a", ".m4b", ".m4p", ".mp3",
".msv", "ogg", "oga", ".raw", ".vox", ".wav", ".wma"],
"PLAINTEXT": [".txt", ".in", ".out",".csv",".log"],
"POWERSHELL": [".ps1",".psm1",".psd1"],
"PDF": [".pdf"],
"PYTHON": [".py",".pyi",".pyc"],
"XML": [".xml",".fxml"],
"EXECUTABLE": [".exe",".run"],
"SHELL": [".sh"],
"DATABASE":[".db",".sql"],
"C#" :[".cs"],
"C++": [".cpp"],
"C": [".c"],
"GO": [".go"],
"YAML": [".yaml"],
"JSON": [".json"],
"ASP Classic": [".asp"],
"ASP_NET": [".aspx", ".axd", ".asx", ".asmx", ".ashx"],
"CSS": [".css"],
"Coldfusion": [".cfm"],
"Erlang": [".yaws"],
"Flash": [".swf"],
"Java": [".jar",".java",".jsp", ".jspx", ".wss", ".do", ".action"],
"Kotlin": [".kt",".kts",".ktm"],
"JavaScript": [".js"],
"TypeScript": [".ts"],
"Rust": [".rs",".rlib"],
"Toml": [".toml"],
"Travis": [".travis"],
"Perl": [".pl"],
"PHP": [".php", ".php4", ".php3", ".phtml"],
"Ruby": [".rb", ".rhtml"],
"SSI": [".shtml"],
"XML": [".xml", ".rss", ".svg"],
"APPS": [".app",".ipa",".apk"],
"LINKS":[".webloc",".lnk"]
}
FILE_FORMATS = {file_format: directory
for directory, file_formats in DIRECTORIES.items()
for file_format in file_formats}
def organize_junk():
for entry in os.scandir():
if entry.is_dir():
continue
file_path = Path(entry)
file_format = file_path.suffix.lower()
if file_format in FILE_FORMATS:
directory_path = Path(FILE_FORMATS[file_format])
directory_path.mkdir(exist_ok=True)
file_path.rename(directory_path.joinpath(file_path))
try:
os.mkdir("OTHER-FILES")
except:
pass
for dir in os.scandir():
try:
if dir.is_dir():
os.rmdir(dir)
else:
os.rename(os.getcwd() + '/' + str(Path(dir)), os.getcwd() + '/OTHER-FILES/' + str(Path(dir)))
except:
pass
if __name__ == "__main__":
organize_junk() | StarcoderdataPython |
30608 | import os
def get_token():
return os.environ['VACCINEBOT_TOKEN'] | StarcoderdataPython |
1749822 | <filename>examples/topology-optimization/truss2.py
from matplotlib import pyplot as plt
from compas.numerical import topop_numpy
nelx = 100
nely = 200
plt.figure(figsize=(12, 8))
plt.axis([0, nelx, 0, nely])
plt.ion()
def callback(x):
plt.imshow(1 - x, cmap='gray', origin='lower')
plt.pause(0.001)
loads = {
'0-50': [1, 0],
'0-100': [1, 0],
'0-150': [1, 0],
'0-200': [1, 0],
}
supports = {
'0-0': [1, 1],
'50-0': [1, 1],
'100-0': [1, 1],
}
x = topop_numpy(nelx=nelx, nely=nely, loads=loads, supports=supports, volfrac=0.3, callback=callback)
| StarcoderdataPython |
1638778 | <filename>lib/python/treadmill/templates/ipset_host_restore.py<gh_stars>0
"""IPSet host restore template."""
T = """
create {{any_container}} list:set size 8
create {{infra_services}} hash:ip,port family inet hashsize 1024 maxelem 65536
create {{nonprod_containers}} hash:ip family inet hashsize 1024 maxelem 65536
create {{prod_containers}} hash:ip family inet hashsize 1024 maxelem 65536
create {{passthroughs}} hash:ip family inet hashsize 1024 maxelem 65536
create {{nodes}} hash:ip family inet hashsize 1024 maxelem 65536
create {{prod_sources}} hash:ip family inet hashsize 4096 maxelem 262144
create {{vring_containers}} hash:ip family inet hashsize 1024 maxelem 65536
"""
| StarcoderdataPython |
109116 | <reponame>ArcIX/vsdscrapy<filename>securityscrape/securityscrape/items.py<gh_stars>0
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class SecurityItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
source_url = Field()
| StarcoderdataPython |
3210194 | # ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import pytest
import ngraph as ng
from ngraph.op_graph.comm_nodes import RecvOp, ScatterRecvOp, GatherRecvOp
from ngraph.op_graph.comm_nodes import SendOp, ScatterSendOp, GatherSendOp
from ngraph.testing.hetr_utils import create_send_recv_graph, create_scatter_gather_graph
from ngraph.transformers.hetr.hetr_utils import comm_path_exists, update_comm_deps, find_recvs
pytestmark = pytest.mark.hetr_only
def test_find_recvs():
z, recv_x, recv_x_plus_one, send_x, x_plus_one, from_node, send_x_plus_one = \
create_send_recv_graph()
assert set([recv_x]) == set(find_recvs(x_plus_one))
assert set([recv_x]) == set(find_recvs(recv_x))
assert len(find_recvs(from_node)) == 0
assert set([recv_x]) == set(find_recvs(send_x_plus_one))
assert set([recv_x_plus_one, recv_x]) == set(find_recvs(recv_x_plus_one))
assert set([recv_x_plus_one, recv_x]) == set(find_recvs(z))
def test_find_recvs_scatter_gather():
scatter_send_x, scatter_recv_a, scatter_recv_b, gather_send_a, gather_send_b, \
gather_recv_x_plus_one = create_scatter_gather_graph()
assert set([scatter_recv_a]) == set(find_recvs(gather_send_a))
assert set([scatter_recv_b]) == set(find_recvs(gather_send_b))
assert len(find_recvs(scatter_send_x)) == 0
assert set([gather_recv_x_plus_one, scatter_recv_a]) == set(find_recvs(gather_recv_x_plus_one))
assert set([scatter_recv_a]) == set(find_recvs(scatter_recv_a))
def test_comm_path_exists():
axes = ng.make_axes([ng.make_axis(length=10, name='A'), ng.make_axis(length=15, name='B')])
with ng.metadata(device=None, device_id=None, transformer=None, host_transformer=None):
from_node = ng.placeholder(axes)
to_node = ng.placeholder(axes)
send_x = SendOp(from_node=from_node)
recv_x = RecvOp(to_node=to_node, send_node=send_x)
with ng.metadata(device=None, device_id=None, transformer=None, host_transformer=None):
x_plus_one = recv_x + 1
assert comm_path_exists(recv_x, send_x)
assert comm_path_exists(x_plus_one, send_x)
def test_comm_path_exists_scatter_gather():
scatter_send_x, scatter_recv_a, scatter_recv_b, gather_send_a, gather_send_b, \
gather_recv_x_plus_one = create_scatter_gather_graph()
assert comm_path_exists(scatter_recv_a, scatter_send_x)
assert comm_path_exists(gather_recv_x_plus_one, gather_send_a)
assert comm_path_exists(gather_recv_x_plus_one, scatter_send_x)
assert comm_path_exists(scatter_recv_b, scatter_send_x)
assert not comm_path_exists(gather_recv_x_plus_one, gather_send_b)
assert not comm_path_exists(gather_send_a, gather_recv_x_plus_one)
def test_update_comm_deps():
with ng.metadata(transformer='cpu0'):
z, recv_x, recv_x_plus_one, send_x, x_plus_one, from_node, send_x_plus_one = \
create_send_recv_graph()
update_comm_deps((z, send_x))
assert recv_x_plus_one in z.all_deps
def test_update_comm_deps_scatter_gather():
ax_a = ng.make_axis(length=10, name='A')
ax_b = ng.make_axis(length=15, name='B')
axes = ng.make_axes([ax_a, ax_b])
parallel_metadata = dict(parallel=ax_a, device_id=(0, 1),
transformer=None, host_transformer=None, device=None)
with ng.metadata(transformer='cpu0'):
with ng.metadata(**parallel_metadata):
from_node_a = ng.placeholder(axes)
to_node_a = ng.placeholder(axes)
scatter_send_x = ScatterSendOp(from_node=from_node_a, to_node=to_node_a)
scatter_recv_a = ScatterRecvOp(to_node=to_node_a, send_node=scatter_send_x)
with ng.metadata(**parallel_metadata):
x_plus_one_a = scatter_recv_a + 1
gather_send_x_plus_one_a = GatherSendOp(from_node=x_plus_one_a)
with ng.metadata(transformer='cpu1'):
with ng.metadata(**parallel_metadata):
to_node_b = ng.placeholder(axes)
scatter_recv_b = ScatterRecvOp(to_node=to_node_b, send_node=scatter_send_x)
with ng.metadata(**parallel_metadata):
x_plus_one_b = scatter_recv_b + 1
gather_send_x_plus_one_b = GatherSendOp(from_node=x_plus_one_b)
with ng.metadata(transformer='cpu0'):
with ng.metadata(**parallel_metadata):
gather_recv_x_plus_one_a = GatherRecvOp(from_node=from_node_a, to_node=to_node_a,
send_node=gather_send_x_plus_one_a)
z_a = gather_recv_x_plus_one_a + 1
update_comm_deps((scatter_send_x, gather_send_x_plus_one_a, z_a))
update_comm_deps((gather_send_x_plus_one_b,))
assert set([scatter_send_x]) == set(scatter_recv_a.control_deps)
assert set([scatter_send_x, gather_send_x_plus_one_a]) == \
set(gather_recv_x_plus_one_a.control_deps)
def assert_axes_eq_len(expected_axes, actual_axes):
for exp, act in zip(expected_axes, actual_axes):
assert exp.length == act.length
@pytest.mark.parametrize('config', [
{
'axes': [64],
'parallel_axis': 0,
'slices': [[slice(0, 32, 1)], [slice(32, 64, 1)]],
'device_id': (0, 1)
},
{
'axes': [64, 128],
'parallel_axis': 0,
'slices': [[slice(0, 16, 1), slice(None)],
[slice(16, 32, 1), slice(None)],
[slice(32, 48, 1), slice(None)],
[slice(48, 64, 1), slice(None)]],
'device_id': (0, 1, 2, 3)
},
{
'axes': [64, 128, 256],
'parallel_axis': 0,
'slices': [[slice(0, 16, 1), slice(None), slice(None)],
[slice(16, 32, 1), slice(None), slice(None)],
[slice(32, 48, 1), slice(None), slice(None)],
[slice(48, 64, 1), slice(None), slice(None)]],
'device_id': (0, 1, 2, 3)
},
{
'axes': [64, 128, 256],
'parallel_axis': 2,
'slices': [[slice(0, 128, 1), slice(None), slice(None)],
[slice(128, 256, 1), slice(None), slice(None)]],
'device_id': (0, 1)
}
])
def test_scatter_gather_node_axes(config):
t = config
axes = ng.make_axes([ng.make_axis(length) for length in t['axes']])
parallel_axis = axes[t['parallel_axis']]
hetr_axes = parallel_axis + (axes - parallel_axis)
with ng.metadata(device=None, device_id='0', transformer='cpu0', host_transformer=None):
from_node = ng.placeholder(axes=axes)
to_node = ng.placeholder(axes=axes)
with ng.metadata(device=None, device_id=t['device_id'], transformer=None,
parallel=parallel_axis, host_transformer=None):
par_node = ng.placeholder(axes=axes)
scatter_send_op = ScatterSendOp(from_node=from_node,
to_node=par_node)
assert hetr_axes == scatter_send_op.axes
assert t['slices'] == scatter_send_op.slices
scatter_recv_op = ScatterRecvOp(to_node=par_node,
send_node=scatter_send_op)
for sct_a, a in zip(scatter_recv_op.axes, hetr_axes):
assert sct_a.length == a.length
gather_send_op = GatherSendOp(from_node=scatter_recv_op)
assert_axes_eq_len(scatter_recv_op.axes, gather_send_op.axes)
gather_recv_op = GatherRecvOp(from_node=par_node,
to_node=to_node,
send_node=gather_send_op)
assert_axes_eq_len(hetr_axes, gather_recv_op.axes)
assert t['slices'] == gather_recv_op.slices
# TODO: Add def test_clone_graph() - Issue #1864
| StarcoderdataPython |
3283126 | <filename>DataStructuresandAlgorithmsInPython/com/kranthi/Algorithms/challenges/easy/maxMoneyWithdraw.py
"""
Maximum money that can be withdrawn in two steps
Last Updated : 10 May, 2019
There are two cash lockers, one has X number of coins and the other has Y number of coins, you can withdraw money at max two times, when you withdraw from a locker you will get the total money of the locker and the locker will be refilled with Z – 1 coins if it had Z coins initially. The task is to find the maximum coins you can get.
Examples:
Input: X = 6, Y = 3
Output: 11
Take from locker X i.e. 6
Now, X = 5 and Y = 3
Take again from locker X i.e. 5.
Input: X = 4, Y = 4
Output: 8
"""
def maxCoins(x, y):
if (x < y):
x, y = y, x | StarcoderdataPython |
28406 | #! /usr/bin/env python
from math import factorial
import numpy as np
# test passed
def generate_poly(max_exponent,max_diff,symbol):
f=np.zeros((max_diff+1, max_exponent+1), dtype=float)
for k in range(max_diff+1):
for i in range(max_exponent+1):
if (i - k) >= 0:
f[k,i] = factorial(i)*symbol**(i-k)/factorial(i-k)
else:
f[k,i] = 0
return f | StarcoderdataPython |
190243 | #----------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------
import argparse
import numpy as np
import sys
import os
from mmdnn.conversion.examples.imagenet_test import TestKit
import paddle.v2 as paddle
import gzip
from paddle.trainer_config_helpers.config_parser_utils import \
reset_parser
class TestPaddle(TestKit):
def __init__(self):
from six import text_type as _text_type
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--preprocess', type=_text_type, help='Model Preprocess Type')
parser.add_argument('--model', '-n', '-w', type=_text_type,
required=True, help='Paddle Model path.')
parser.add_argument('-s', type=_text_type, help='Source Framework Type',
choices=self.truth.keys())
parser.add_argument('--image', '-i',
type=_text_type, help='Test image path.',
default="mmdnn/conversion/examples/data/seagull.jpg")
parser.add_argument('-input', type=_text_type,
required=True, help='Paddle Input Node')
parser.add_argument('-output', type=_text_type,
required=True, help='Paddle Output Node')
parser.add_argument('-size', type=int,
default=224, help='Paddle Input Image Size')
self.args = parser.parse_args()
print("Loading model [{}].".format(self.args.model))
# import self.model
# self.model
# how the model can not load from `***.bin`
print("Model loading success.")
def preprocess(self, image_path):
from PIL import Image as pil_image
img = pil_image.open(image_path)
img = img.resize((self.args.size, self.args.size))
self.data = img
def print_result(self):
reset_parser()
img = np.transpose(self.data, (2, 0, 1))
test_data = [(img.flatten(),)]
parameters_file = self.args.w
with gzip.open(parameters_file, 'r') as f:
parameters = paddle.parameters.Parameters.from_tar(f)
predict = paddle.infer(output_layer = self.model, parameters=parameters, input=test_data)
predict = np.squeeze(predict)
super(TestPaddle, self).print_result(predict)
def print_intermediate_result(self, layer_name, if_transpose = False):
super(TestPaddle, self).print_intermediate_result(self.model.name, if_transpose)
def inference(self, image_path):
self.preprocess(image_path)
self.print_result()
if __name__=='__main__':
tester = TestPaddle()
tester.inference(tester.args.image)
| StarcoderdataPython |
1623495 | """Linear Predictive Coding analysis and resynthesis for audio."""
import numpy as np
import scipy.signal
def lpcfit(x, p=12, h=128, w=None, overlaps=True):
"""Perform LPC analysis of short-time windows of a waveform.
Args:
x: 1D np.array containing input audio waveform.
p: int, order of LP models to fit.
h: int, hop in samples between successive short-time windows.
w: int, analysis window length. Defaults to 2 x h.
overlaps: bool, if true, residuals are overlap-added between
windows (for a continuous excitation), otherwise only the
residual for each hop portion is kept (for perfect reconstruction).
Returns:
a: np.array of (n_frames, p + 1) containing the LPC filter coefficients for
each frame.
g: np.array of (n_frames,) giving the gain for each frame.
e: np.array of (n_frames * h + (w - h),) giving the normalized-energy
excitation (residual).
"""
if not w:
w = 2 * h
npts = x.shape[0]
nhops = int(npts/h)
# Pad x with zeros so that we can extract complete w-length windows from it.
x = np.hstack([np.zeros(int((w-h)/2)), x, np.zeros(int(w-h/2))])
a = np.zeros((nhops, p+1))
g = np.zeros(nhops)
if overlaps:
e = np.zeros((nhops - 1) * h + w)
else:
e = np.zeros(npts)
# Pre-emphasis
pre = [1, -0.9]
x = scipy.signal.lfilter(pre, 1 , x)
for hop in np.arange(nhops):
# Extract segment of signal.
xx = x[hop * h + np.arange(w)]
# Apply hanning window
wxx = xx * np.hanning(w)
# Form autocorrelation (calculates *way* too many points)
rxx = np.correlate(wxx, wxx, 'full')
# Extract just the points we need (middle p+1 points).
rxx = rxx[w - 1 + np.arange(p + 1)]
# Setup the normal equations
coeffs = np.dot(np.linalg.inv(scipy.linalg.toeplitz(rxx[:-1])), rxx[1:])
# Calculate residual by filtering windowed xx
aa = np.hstack([1.0, -coeffs])
if overlaps:
rs = scipy.signal.lfilter(aa, 1, wxx)
else:
rs = scipy.signal.lfilter(aa, 1, xx[int((w - h) / 2) + np.arange(h)])
G = np.sqrt(np.mean(rs**2))
# Save filter, gain and residual
a[hop] = aa
g[hop] = G
if overlaps:
e[hop * h + np.arange(w)] += rs / G
else:
e[hop *h + np.arange(h)] = rs / G
# Throw away first (win-hop)/2 pts if in overlap mode
# for proper synchronization of resynth
if overlaps:
e = e[int((w - h) / 2):]
return a, g, e
def lpcsynth(a, g, e=None, h=128, overlaps=True):
"""Resynthesize a short-time LPC analysis to audio.
Args:
a: np.array of (nframes, order + 1) giving the per-frame LPC filter
coefficients.
g: np.array of (nframes,) giving the gain for each frame.
e: np.array of (nframes * hop + (window - hop)) giving the excitation
signal to feed into the filters. If a scalar, an impulse train with the
specified period is used. Defaults to Gaussian white noise.
h: int, hop between successive reconstruction frames, in samples.
Reconstruction window is always 2 * h.
overlaps: bool. If true, successive frames are windowed and overlap-
added. If false, we assume e contains exact residuals for each
window, so reconstructions are similarly truncated and concatenated.
Returns:
1D np.array of the resynthesized waveform.
"""
w = 2 * h
nhops, p = a.shape
npts = nhops * h + w
# Excitation needs extra half-window at the end if in overlap mode
nepts = npts + overlaps*(w - h)
if e is None:
e = np.random.randn(nepts)
elif type(e) == int:
period = e;
e = np.sqrt(period) * (
np.mod(np.arange(nepts), period) == 0).astype(float)
else:
nepts = e.shape[0]
npts = nepts + h
# Try to make sure we don't run out of e (in ov mode)
e = np.hstack([e, np.zeros(w)])
d = np.zeros(npts)
for hop in np.arange(nhops):
hbase = hop * h
#print d.shape, hbase, hop, nhops
oldbit = d[hbase + np.arange(h)]
aa = a[hop, :]
G = g[hop]
if overlaps:
d[hbase + np.arange(w)] += np.hanning(w) * (
G * scipy.signal.lfilter([1], aa, e[hbase + np.arange(w)]))
else:
d[hbase + np.arange(h)] = G * scipy.signal.lfilter(
1, aa, e[hbase + np.arange(h)])
# De-emphasis (must match pre-emphasis in lpcfit)
pre = [1, -0.9]
d = scipy.signal.lfilter([1], pre, d)
return d
def lpcBHenc(E, H=None, W=256, viz=False):
"""
% P = lpcBHenc(E,H,W,viz) Encode LPC residual as buzz/hiss pitch periods
% E is a residual from LPC encoding. P is an encoding
% which, for every H samples, returns an integer pitch period
% or 0 for frames judged as noisy. Pitch is found via autocorrelation
% over a window of W points
% 2001-03-19 <EMAIL>
"""
if not H:
H = int(W / 2)
nhops = int(E.shape[0]/H)
P = np.zeros(nhops)
pmin = 2
pmax = 127
pdthresh = 0.2
# Pad so that each W-point frame is centered around hop * H.
ee = np.hstack([np.zeros(W / 2), E, np.zeros(W / 2)])
for hop in np.arange(nhops):
xx = ee[hop * H + np.arange(W)]
rxx = np.correlate(xx, xx, 'full')[W - 1 + np.arange(pmin, pmax)]
period = pmin + np.argmax(rxx)
rratio = np.max(rxx)/rxx[0]
#if viz:
# disp(['hop ',num2str(hop),' pd ',num2str(pd),' rrat ',num2str(rratio)]);
# subplot(211); plot(xx);
# subplot(212); plot(rxx); pause
if rratio > pdthresh:
P[hop] = period
else:
P[hop] = 0 # Noisy period
return P
def lpcBHdec(P, H=128):
"""
% E = lpcBHdec(P,H) Decode LPC residual encoded as pitch periods
% P is a vector pitch periods from lpcresenc. Reconstruct a
% stylized excitation vector E with a hop size H.
% 2001-03-19 <EMAIL>
"""
nhops = P.shape[0]
npts = H * nhops
E = np.zeros(npts)
phs = 0 # Current phase as proportion of a cyle (new pulse at 1.0)
for hop in np.arange(nhops):
period = P[hop]
base = H * (hop - 1)
if period == 0:
E[base + np.arange(H)] = np.random.randn(H)
else:
pt = 0;
# Steps to next pulse
remsteps = int(np.round((1 - phs) * period))
while (pt + remsteps) < H:
pt = pt + remsteps
E[base + pt] = np.sqrt(period) # so rms is 1
remsteps = period
# Store residual phase
phs = (H - pt)/float(period)
return E
| StarcoderdataPython |
73969 | <reponame>ParikhKadam/zenml
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Orchestrator for simple AWS VM backend"""
import os
import time
from typing import Text, Dict, Any
from zenml.backends.orchestrator.aws import utils
from zenml.backends.orchestrator import OrchestratorBaseBackend
from zenml.repo import Repository
from zenml.standards import standard_keys as keys
from zenml.utils import path_utils
from zenml.constants import ZENML_BASE_IMAGE_NAME
from zenml.logger import get_logger
logger = get_logger(__name__)
EXTRACTED_TAR_DIR_NAME = 'zenml_working'
STAGING_AREA = 'staging'
TAR_PATH_ARG = 'tar_path'
class OrchestratorAWSBackend(OrchestratorBaseBackend):
"""
Orchestrates pipeline on a AWS EC2 instance
"""
def __init__(self,
iam_role: Text,
instance_type: Text = 't2.micro',
instance_image: Text = 'ami-02e9f4e447e4cda79',
zenml_image: Text = None,
region: Text = None,
key_name: Text = None,
security_group: Text = None,
min_count: int = 1,
max_count: int = 1,
**kwargs):
"""
Base class for the orchestrator backend on AWS
:param iam_role: the name of the role created in AWS IAM
:param instance_type: the type of the EC2 instance, defaults to
t2.micro
:param instance_image: the image for the EC2 instance, defaults to the
public image: Deep Learning AMI (Amazon Linux 2) Version 39.0
:param zenml_image: refers to the image with ZenML
:param region: the name of the region that AWS is working on
:param key_name: the name of the key to be used whilst creating the
instance on EC2
:param security_group: the name of a selected security group
:param min_count: the minimum number of instances, defaults to 1
:param max_count: the maximum number of instances, defaults to 1
"""
self.session = utils.setup_session()
self.region = utils.setup_region(region)
self.ec2_client = self.session.client('ec2')
self.ec2_resource = self.session.resource('ec2')
self.instance_type = instance_type
self.instance_image = instance_image
self.zenml_image = zenml_image
self.key_name = key_name
self.min_count = min_count
self.max_count = max_count
if security_group is not None:
self.security_group = [security_group]
else:
self.security_group = security_group
self.iam_role = {'Name': iam_role}
if zenml_image is None:
self.zenml_image = ZENML_BASE_IMAGE_NAME
else:
self.zenml_image = zenml_image
super(OrchestratorBaseBackend, self).__init__(
instance_type=self.instance_type,
instance_image=self.instance_image,
zenml_image=self.zenml_image,
region=self.region,
key_name=self.key_name,
min_count=self.min_count,
max_count=self.max_count,
security_group=self.security_group,
iam_role=self.iam_role,
**kwargs,
)
@staticmethod
def make_unique_name(name):
return f'{name}-{time.asctime()}'
def launch_instance(self, config):
startup = utils.get_startup_script(config,
self.region,
self.zenml_image)
args = {'ImageId': self.instance_image,
'InstanceType': self.instance_type,
'IamInstanceProfile': self.iam_role,
'MaxCount': self.max_count,
'MinCount': self.min_count,
'UserData': startup}
if self.security_group:
args['SecurityGroups'] = self.security_group
if self.key_name:
args['KeyName'] = self.key_name
return self.ec2_resource.create_instances(**args)
def run(self, config: [Dict, Any]):
# Extract the paths to create the tar
logger.info('Orchestrating pipeline on AWS..')
repo: Repository = Repository.get_instance()
repo_path = repo.path
config_dir = repo.zenml_config.config_dir
tar_file_name = \
f'{EXTRACTED_TAR_DIR_NAME}_{str(int(time.time()))}.tar.gz'
path_to_tar = os.path.join(config_dir, tar_file_name)
# Create tarfile but exclude .zenml folder if exists
path_utils.create_tarfile(repo_path, path_to_tar)
logger.info(f'Created tar of current repository at: {path_to_tar}')
# Upload tar to artifact store
store_path = config[keys.GlobalKeys.ARTIFACT_STORE]
store_staging_area = os.path.join(store_path, STAGING_AREA)
store_path_to_tar = os.path.join(store_staging_area, tar_file_name)
path_utils.copy(path_to_tar, store_path_to_tar)
logger.info(f'Copied tar to artifact store at: {store_path_to_tar}')
# Remove tar
path_utils.rm_dir(path_to_tar)
logger.info(f'Removed tar at: {path_to_tar}')
# Append path of tar in config orchestrator utils
config[keys.GlobalKeys.BACKEND][keys.BackendKeys.ARGS][
TAR_PATH_ARG] = store_path_to_tar
# Launch the instance
self.launch_instance(config)
| StarcoderdataPython |
1697947 | import ConfigParser
import logging
import warnings
# to avoid the generation of .pyc files
import sys
sys.dont_write_bytecode = True
# necessary import to ignore any ExtdepricationWarning warnings for external
# libraries
from flask.exthook import ExtDeprecationWarning
warnings.simplefilter('ignore', ExtDeprecationWarning)
# other essential imports
from logging.handlers import RotatingFileHandler
from flask import (Flask, url_for, g, render_template, flash, redirect, abort)
from flask.ext.bcrypt import check_password_hash
from flask.ext.login import (LoginManager, login_user, logout_user,
login_required, current_user)
import models
import forms
app = Flask(__name__)
app.secret_key = 'sefdewfewr43r535rewfwda!'
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(userid):
try:
return models.User.get(models.User.id == userid)
except models.DoesNotExist:
return None
# to connect to the database before each request
@app.before_request
def before_request():
g.db = models.DATABASE
g.db.connect()
g.user = current_user
# to close the database connection after each request
@app.after_request
def after_request(response):
g.db.close()
return response
# routing to my landing page which is the portfolio section
@app.route("/myprofile/<username>")
@app.route("/myprofile")
@login_required
def profile(username=None):
template='portfolio.html'
try:
if username and username != current_user.username:
user = models.User.select().where(models.User.username**username).get()
this_route = url_for('.profile')
app.logger.info( current_user.username + " viewed " + username + "'s personal Profile page " + this_route)
else:
user=current_user
this_route = url_for('.profile')
app.logger.info( current_user.username + " viewed his/her personal Profile page " + this_route)
if username:
template = 'portfolio.html'
except models.DoesNotExist:
abort(404)
else:
return render_template(template, user=user)
# routing to the about section
@app.route("/about/<username>")
@app.route("/about")
@login_required
def about(username=None):
template='about.html'
try:
if username and username != current_user.username:
user = models.User.select().where(models.User.username**username).get()
this_route = url_for('.about')
app.logger.info( current_user.username + " viewed " + username + "'s personal About page " + this_route)
else:
user=current_user
this_route = url_for('.about')
app.logger.info( current_user.username + " viewed his/her personal About Me page " + this_route)
if username:
template = 'about.html'
except models.DoesNotExist:
abort(404)
else:
return render_template(template, user=user)
# routing to the create a new post section
@app.route("/new_post", methods=('GET','POST'))
@login_required
def post(username=None):
if username and username != current_user.username:
user = models.User.select().where(models.User.username**username).get()
this_route = url_for('.post')
app.logger.info( current_user.username + " created a new post on " +
username + "'s post feed section " + this_route)
else:
user=current_user
this_route = url_for('.post')
app.logger.info( current_user.username + " created a new post on his/her post feed section "
+ this_route)
form = forms.PostForm()
if form.validate_on_submit():
models.Post.create(user=g.user._get_current_object(),
content=form.content.data.strip())
flash("Message posted!", "success")
return redirect(url_for('root'))
return render_template('post.html', form=form, user=user)
# the user is redirected to the root page after posting a new message and can
# view their recent posts on the post feed section
@app.route("/")
def root(username=None):
if username and username != current_user.username:
user = models.User.select().where(models.User.username**username).get()
else:
user = current_user
this_route = url_for('.root')
app.logger.info(current_user.username + " was redirected to the root page " + this_route)
stream = models.Post.select().limit(100)
return render_template('stream.html',user=user, stream=stream)
# routing to the posts stream section
@app.route('/stream')
@app.route('/stream/<username>')
def stream(username=None):
template='stream.html'
if username and username != current_user.username:
this_route = url_for('.stream')
app.logger.info(current_user.username + " viewed " + username + "'s Stream section "
+ this_route)
try:
user = models.User.select().where(models.User.username**username).get()
except models.DoesNotExist:
abort(404)
else:
stream=user.posts.limit(100)
else:
stream=current_user.get_stream().limit(100)
user=current_user
this_route = url_for('.stream')
app.logger.info(current_user.username + " viewed his/her Stream section "
+ this_route)
if username:
template = 'user-stream.html'
return render_template(template, stream=stream, user=user)
# routing to each individual post
@app.route('/post/<int:post_id>')
def view_post(post_id, username=None):
if username and username != current_user.username:
user = models.User.select().where(models.User.username**username).get()
else:
user=current_user
posts = models.Post.select().where(models.Post.id == post_id)
if posts.count() == 0:
abort(404)
return render_template('stream.html', stream=posts, user=user)
# function that adds one follower in the relationship table for the selected user
@app.route('/follow/<username>')
@login_required
def follow(username):
try:
to_user = models.User.get(models.User.username**username)
except models.DoesNotExist:
abort(404)
else:
try:
models.Relationship.create(
from_user=g.user._get_current_object(),
to_user=to_user
)
except models.IntegrityError:
pass
else:
flash("You're now following {}!".format(to_user.username),"success")
app.logger.info(current_user.username + " is now following " + username)
return redirect(url_for('stream',username=to_user.username))
# function that deletes the follower instance from the relationship table for
# the selected user
@app.route('/unfollow/<username>')
@login_required
def unfollow(username):
try:
to_user = models.User.get(models.User.username**username)
except models.DoesNotExist:
abort(404)
else:
try:
models.Relationship.get(
from_user=g.user._get_current_object(),
to_user=to_user
).delete_instance()
except models.IntegrityError:
pass
else:
flash("You've unfollowed {}!".format(to_user.username),"success")
app.logger.info(current_user.username + " is now unfollowing " +
username)
return redirect(url_for('stream',username=to_user.username))
# routing to the register page
@app.route('/register', methods=('GET','POST'))
def register():
this_route = url_for('.register')
app.logger.info("Someone visited the Register page " + this_route)
form = forms.RegisterForm()
if form.validate_on_submit():
flash("Congratulations, you have successfully registered!", "success")
models.User.create_user(
username=form.username.data,
email=form.email.data,
password=form.password.data
)
return redirect(url_for('profile'))
return render_template('register.html', form=form)
# routing to the login page
@app.route('/login', methods=('GET','POST'))
def login():
this_route = url_for('.login')
app.logger.info("Someone visited the Login page " + this_route)
form = forms.LoginForm()
if form.validate_on_submit():
try:
user = models.User.get(models.User.email == form.email.data)
except models.DoesNotExist:
flash("Your email or password doesn't match!", "error")
else:
if check_password_hash(user.password, form.password.data):
login_user(user)
flash("You've been logged in!", "success")
return redirect(url_for('profile'))
else:
flash("Your email or password doesn't match!", "error")
return render_template('login.html', form=form)
# routing to the logout page which redirects the user to the login page
@app.route('/logout')
@login_required
def logout():
this_route = url_for('.logout')
app.logger.info( current_user.username + " requested to logout " + this_route)
logout_user()
flash("You've been logged out. Come back soon!","success")
return redirect(url_for('login'))
# parsing configuration details from an external file
def init (app):
config = ConfigParser.ConfigParser()
try:
config_location = "etc/defaults.cfg"
config.read(config_location)
app.config['DEBUG'] = config.get("config", "debug")
app.config['ip_address'] = config.get("config", "ip_address")
app.config['port'] = config.get("config", "port")
app.config['url'] = config.get("config", "url")
app.config['log_file'] = config.get("logging", "name")
app.config['log_location'] = config.get("logging", "location")
app.config['log_level'] = config.get("logging", "level")
except:
print "Could not read configuration file from: " , config_location
# setting up a logging feature to record action logs into a text file
def logs(app):
log_pathname = app.config['log_location']+ app.config['log_file']
file_handler = RotatingFileHandler(log_pathname, maxBytes=1024*1024*10 ,
backupCount=1024)
file_handler.setLevel( app.config['log_level'])
formatter = logging.Formatter("%(levelname)s | %(asctime)s | %(module)s | %(funcName)s | %(message)s")
file_handler.setFormatter(formatter)
app.logger.setLevel(app.config['log_level'])
app.logger.addHandler(file_handler)
# error handling mechanism to catch all the 404 errors and to redirect the user to
# a custom 404 page
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
# initialisation function
if __name__ == "__main__":
init(app)
logs(app)
models.initialize()
try:
# first user created to populate the user table
models.User.create_user(
username='poisonphoebe',
email='<EMAIL>',
password='password',
admin=True
)
except ValueError:
pass
app.run(
host = app.config['ip_address'],
port = int(app.config['port']))
| StarcoderdataPython |
3275079 | from abc import abstractmethod
from .autoencoder import Autoencoder
from .default_values import *
from .data_utils import DataCooker
from .layers import ConstantDispersionLayer
from keras.optimizers import *
from keras.models import model_from_json
import numpy as np
import json
import os
class Corrector():
@abstractmethod
def correct(self, counts, size_factors, **kwargs):
pass
class DummyCorrector(Corrector):
def __init__(self):
pass
def correct(self, counts, size_factors, **kwargs):
return np.ones_like(self.counts)
class AECorrector(Corrector):
def __init__(self, model_name=None, model_directory=None, verbose=True,
param_path=OPT_PARAM_PATH, param_exp_name=None, denoisingAE=False,
save_model=True, epochs=DEFAULT_EPOCHS, encoding_dim=DEFAULT_ENCODING_DIM,
lr=DEFAULT_LEARNING_RATE, batch_size=DEFAULT_BATCH_SIZE,
seed=None):
self.denoisingAE = denoisingAE
self.save_model = save_model
self.seed = seed
if model_name is None:
self.model_name = "model"
else:
self.model_name = model_name
if model_directory is None:
self.directory = MODEL_PATH
else:
self.directory = model_directory
if verbose:
self.verbose = 2
else:
self.verbose = 0
if param_exp_name is not None:
path = os.path.join(param_path,param_exp_name+"_best.json")
metrics = json.load(open(path))
self.batch_size = metrics['batch_size']
self.epochs = metrics['epochs']
self.encoding_dim = metrics['encoding_dim']
self.lr = metrics['lr']
else:
self.epochs = epochs
self.encoding_dim = encoding_dim
self.lr = lr
self.batch_size = batch_size
def correct(self, counts, size_factors=None, only_predict=False):
if len(counts.shape) == 1:
counts = counts.reshape(1,counts.shape[0])
size_factors = size_factors.reshape(1,size_factors.shape[0])
if size_factors is not None and counts.shape[0] != size_factors.shape[0]:
raise ValueError("Size factors and counts must have equal number of samples"+
"\nNow counts shape:"+str(counts.shape)+ \
"\nSize factors shape:"+str(size_factors.shape))
model_file = os.path.join(self.directory, self.model_name + '.json')
weights_file = os.path.join(self.directory, self.model_name + '_weights.h5')
if (not (os.path.isfile(model_file) or os.path.isfile(weights_file))) and only_predict:
raise ValueError("There is no model "+str(model_file)+" or no weigthts "+str(weights_file)+
"' saved. Only predict is not possible!")
self.loader = DataCooker(counts, size_factors,
inject_outliers=self.denoisingAE, inject_on_pred=False,
only_prediction=only_predict, seed=self.seed)
self.data = self.loader.data()
if not only_predict:
self.ae = Autoencoder(coder_type='autoencoder',
size=counts.shape[1], seed=self.seed,
encoding_dim=self.encoding_dim)
self.ae.model.compile(optimizer=Adam(lr=self.lr), loss=self.ae.loss)
self.ae.model.fit(self.data[0][0], self.data[0][1],
epochs=self.epochs, batch_size=self.batch_size,
shuffle = False if self.seed is not None else True,
validation_data=(self.data[1][0], self.data[1][1]),
verbose=self.verbose)
model = self.ae.model
if self.save_model:
os.makedirs(self.directory, exist_ok=True)
model_json = self.ae.model.to_json()
with open(model_file, "w") as json_file:
json_file.write(model_json)
self.ae.model.save_weights(weights_file)
print("Model saved on disk!")
else:
json_file = open(model_file, 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json,
custom_objects={'ConstantDispersionLayer': ConstantDispersionLayer})
model.load_weights(weights_file)
print("Model loaded from disk!")
self.corrected = model.predict(self.data[2][0])
return self.corrected
| StarcoderdataPython |
3227241 | <filename>models.py
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.use_cuda = torch.cuda.is_available()
self.method = method
self.hidden_size = hidden_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == 'concat':
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.FloatTensor(1, hidden_size))
def forward(self, hidden, targets, mask=None):
this_batch_size = targets.size(0)
max_len = targets.size(1)
# Create variable to store attention energies
attn_energies = Variable(torch.zeros(this_batch_size, max_len)) # B x S
if torch.cuda.is_available():
attn_energies = attn_energies.cuda()
# For each batch of encoder outputs
for b in range(this_batch_size):
# Calculate energy for each encoder output
for i in range(max_len):
attn_energies[b, i] = self.score(hidden[:, b], targets[b, i].unsqueeze(0))
if mask is not None:
attn_energies = attn_energies + mask
# Normalize energies to weights in range 0 to 1, resize to 1 x B x S
return F.softmax(attn_energies, dim=1).unsqueeze(1)
def score(self, hidden, target):
if self.method == 'dot':
energy = torch.dot(hidden.squeeze(0), target.squeeze(0))
return energy
elif self.method == 'general':
energy = self.attn(target)
return torch.dot(hidden.squeeze(0), energy.squeeze(0))
elif self.method == 'concat':
energy = self.attn(torch.cat((hidden, target), 1))
energy = self.v.dot(energy)
return energy
class BasicRNN(nn.Module):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(BasicRNN, self).__init__()
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
if pretrained_embeddings is not None:
for i in range(vocab_size):
word = lang.index2word[i]
if word in pretrained_embeddings:
self.word_embeds.weight[i] = nn.Parameter(torch.FloatTensor(pretrained_embeddings[word]))
self.word_embeds = nn.Embedding.from_pretrained(self.word_embeds.weight)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_directions = 1
self.rnn = nn.RNN(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
self.fc1 = nn.Linear(hidden_size * self.num_directions, 64)
self.fc2 = nn.Linear(64, num_classes)
self.dropout = nn.Dropout(p=dropout)
# figure this out
self.use_cuda = torch.cuda.is_available()
def freeze_layer(self, layer):
fc = self.fc1
if layer == "fc2":
fc = self.fc2
for param in fc.parameters():
print(param)
param.requires_grad = False
def forward(self, inputs, seq_lengths):
batch_size = inputs.size(0)
inputs = self.word_embeds(inputs)
# Set initial states
h0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
if self.use_cuda:
h0 = h0.cuda()
# Forward propagate RNN
outputs, _ = self.rnn(inputs, h0)
# Decode hidden state of last time step
outputs = F.relu(self.fc1(outputs[:, -1, :]))
outputs = self.dropout(outputs)
outputs = self.fc2(outputs)
return outputs
def to_cuda(self, tensor):
if torch.cuda.is_available():
return tensor.cuda()
else:
return tensor
class AttentionRNN(BasicRNN):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(AttentionRNN, self).__init__(
embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout)
self.attn = Attn('general', hidden_size)
def forward(self, inputs, lang, seq_lengths):
batch_size = inputs.size(0)
embedded = self.word_embeds(inputs)
total_length = embedded.size(1) # get the max sequence length
# Set initial states
h0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
if torch.cuda.is_available():
h0 = h0.cuda()
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, seq_lengths, batch_first=True)
# Forward propagate RNN
# rnn_outputs, state = self.rnn(embedded, h0)
rnn_outputs, state = self.rnn(packed, h0)
rnn_outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(
rnn_outputs, batch_first=True, total_length=total_length) # unpack (back to padded)
encoder_mask = torch.Tensor(np.array(inputs.cpu().data.numpy() == lang.PAD_token,
dtype=float) * (-1e6)) # [b x seq]
encoder_mask = Variable(self.to_cuda(encoder_mask))
# use attention to compute soft alignment score corresponding
# between each of the hidden_state and the last hidden_state of the RNN
attn_weights = self.attn(state, rnn_outputs, mask=encoder_mask)
new_state = attn_weights.bmm(rnn_outputs) # B x 1 x N
# Decode hidden state of last time step
# outputs = F.relu(self.fc1(rnn_outputs[:, -1, :]))
outputs = F.relu(self.fc1(new_state.squeeze(1)))
outputs = self.dropout(outputs)
outputs = self.fc2(outputs)
return outputs
class LSTM(BasicRNN):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(LSTM, self).__init__(embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout)
self.rnn = nn.LSTM(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
def forward(self, inputs, seq_lengths):
batch_size = inputs.size(0)
inputs = self.word_embeds(inputs)
# Set initial states
h0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
c0 = Variable(torch.zeros(self.num_layers * self.num_directions, batch_size, self.hidden_size))
if torch.cuda.is_available():
h0 = h0.cuda()
c0 = c0.cuda()
# Forward propagate RNN
outputs, _ = self.rnn(inputs, (h0, c0))
# Decode hidden state of last time step
outputs = F.relu(self.fc1(outputs[:, -1, :]))
outputs = self.dropout(outputs)
outputs = self.fc2(outputs)
return outputs
class GRURNN(BasicRNN):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(GRURNN, self).__init__(embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout)
self.rnn = nn.GRU(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
class AttentionGRURNN(AttentionRNN):
def __init__(self, embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout):
super(AttentionGRURNN, self).__init__(
embedding_dim, hidden_size, lang, pretrained_embeddings,
num_layers, vocab_size, num_classes, dropout)
self.rnn = nn.GRU(input_size=embedding_dim,
hidden_size=hidden_size, num_layers=num_layers,
batch_first=True, bidirectional=False)
class HighwayNetwork(nn.Module):
def __init__(self, input_size):
super(HighwayNetwork, self).__init__()
self.fc1 = nn.Linear(input_size, input_size, bias=True)
self.fc2 = nn.Linear(input_size, input_size, bias=True)
def forward(self, x):
t = F.sigmoid(self.fc1(x))
return torch.mul(t, F.relu(self.fc2(x))) + torch.mul(1 - t, x)
class CNN(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, lang,
pretrained_embeddings, dropout=0.1):
super(CNN, self).__init__()
self.use_cuda = torch.cuda.is_available()
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.output_size = output_size
self.dropout = dropout
print('vocab_size:', vocab_size)
self.embedding = nn.Embedding(vocab_size, embedding_dim)
if pretrained_embeddings is not None:
for i in range(vocab_size):
word = lang.index2word[i]
if word in pretrained_embeddings:
self.embedding.weight[i] = nn.Parameter(torch.FloatTensor(pretrained_embeddings[word]))
self.embedding = nn.Embedding.from_pretrained(self.embedding.weight)
self.conv1 = None
self.conv2 = None
self.init_conv1_layer()
self.maxpool1 = nn.MaxPool2d(kernel_size=(3, 1), stride=(3, 1))
self.init_conv2_layer()
self.maxpool2 = nn.MaxPool2d(kernel_size=(3, 1), stride=(3, 1))
self.fc1 = None
self.fc2 = None
self.init_fc_layers()
# Highway Networks
self.batch_norm = nn.BatchNorm1d(num_features=128, affine=False)
self.highway1 = HighwayNetwork(input_size=128)
self.highway2 = HighwayNetwork(input_size=128)
def init_conv1_layer(self):
self.conv1 = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=(5, self.embedding_dim), stride=1, padding=2),
nn.ReLU())
def init_conv2_layer(self):
self.conv2 = nn.Sequential(
nn.Conv2d(5, 20, kernel_size=(5, 3), stride=1),
nn.ReLU())
def freeze_conv1_layer(self):
for param in self.conv1.parameters():
param.requires_grad = False
def freeze_conv2_layer(self):
for param in self.conv2.parameters():
param.requires_grad = False
def init_fc_layers(self):
self.fc1 = nn.Sequential(
nn.Linear(4160, 256),
nn.ReLU(),
nn.Dropout(p=0.5)
)
self.fc2 = nn.Linear(256, self.output_size)
def forward(self, input_seqs):
x1 = self.embedding(input_seqs)
x2 = x1.unsqueeze(1)
x3 = self.conv1(x2)
x4 = x3.transpose(1, 3)
x5 = self.maxpool1(x4)
x6 = self.conv2(x5)
x7 = x6.transpose(1, 3)
x8 = self.maxpool2(x7)
x9 = x8.view(x8.size(0), -1)
x10 = self.fc1(x9)
x = self.fc2(x10)
# print('x1:', x1.size())
# print('x2:', x2.size())
# print('x3:', x3.size())
# print('x4:', x4.size())
# print('x5:', x5.size())
# print('x6:', x6.size())
# print('x7:', x7.size())
# print('x8:', x8.size())
# print('x9:', x9.size())
# print('x10:', x10.size())
# x = self.batch_norm(x)
# x = self.highway1(x)
# x = self.highway2(x)
return x
| StarcoderdataPython |
3350407 | <reponame>anirudhmungre/sneaky-lessons<gh_stars>1-10
# Incorporate the random library
import random
# Print Title
print("Let's Play Rock Paper Scissors!")
# Specify the three options
options = ["r", "p", "s"]
# Computer Selection
computer_choice = random.choice(options)
# User Selection
user_choice = input("Make your Choice: (r)ock, (p)aper, (s)cissors? ")
# Run Conditionals
| StarcoderdataPython |
1682168 | <filename>celery_task_plugins/redis_chain_store/task.py<gh_stars>0
import threading
from contextlib import contextmanager
import redis
from celery.task import Task
from kombu import serialization
def CeleryChainPlugin(
redis_host,
redis_port=6379,
redis_db=1,
base_exc_class=Exception,
read_kwarg="_read_chain_results",
store_kwarg="_store_chain_results",
store_expires=3600,
):
class CeleryChainTask(Task):
store_chain_results = False
read_chain_results = False
typing = False
thread_context = threading.local()
thread_context.redis_chain_store = redis.ConnectionPool(
host=redis_host, port=redis_port, db=redis_db
)
chain_store = redis.Redis(connection_pool=thread_context.redis_chain_store)
class ResultNotFound(base_exc_class):
pass
def read_results(self, task_id, delete=True, raise_if_missing=True):
if self.chain_store.exists(task_id):
serializer = self.app.conf["result_serializer"]
content_type, content_encoding, _ = serialization.registry._encoders[
serializer
]
chained_arg = self.chain_store.get(task_id)
chained_arg = serialization.loads(
chained_arg, content_type, content_encoding, accept={content_type}
)
if delete:
self.chain_store.delete(task_id)
return chained_arg
else:
if raise_if_missing:
raise self.ResultNotFound(f"Can't find result for task {task_id}")
else:
return task_id
@contextmanager
def with_chain_args(self, args, kwarg_override=False):
if not self.read_chain_results or kwarg_override:
yield args
else:
result_id = args[0]
try:
arg = self.read_results(
result_id, delete=False, raise_if_missing=False
)
args = (arg,) + args[1:]
yield args
except Exception:
raise
else:
self.chain_store.delete(result_id)
def __call__(self, *args, **kwargs):
self.read_chain_results = kwargs.pop(read_kwarg, self.read_chain_results)
self.store_chain_results = kwargs.pop(store_kwarg, self.store_chain_results)
with self.with_chain_args(args) as args:
result = super(CeleryChainTask, self).__call__(*args, **kwargs)
if (self.request.chain or self.request.group) and self.store_chain_results:
_, _, data = serialization.dumps(
result, self.app.conf["result_serializer"]
)
self.chain_store.set(self.request.id, data, ex=store_expires)
result = self.request.id
return result
return CeleryChainTask
| StarcoderdataPython |
1781148 | <filename>megaverse_rl/runs/single_agent.py
from sample_factory.runner.run_description import RunDescription
from megaverse_rl.runs.megaverse_base_experiments import EXPERIMENT_1AGENT
RUN_DESCRIPTION = RunDescription('megaverse_arxiv', experiments=[EXPERIMENT_1AGENT])
| StarcoderdataPython |
3208460 | <reponame>itsnamgyu/reid-research
import collections
import warnings
import torch
from reid_evaluation.metric import evaluate, compute_distances
from utils import MetricTracker, SharedStorage
class ActiveMetric:
"""Metric class that actively interacts with MetricTracker and SharedStorage to track metrics,
during end-of-step and end-of-epoch callbacks.
"""
def on_step_end(self, items: collections.Mapping, tracker: MetricTracker, storage: SharedStorage):
"""
:param items: output from data loader and model during a single step
:param tracker: metric tracker to write metrics to
:param storage: storage to interact with. Note that writing data to storage should be handled by
the trainer. A common use-case would be to write to the metadata dictionary.
:return:
"""
pass
def on_epoch_end(self, tracker: MetricTracker, storage: SharedStorage):
pass
def _accuracy(output, target):
with torch.no_grad():
pred = torch.argmax(output, dim=1)
assert pred.shape[0] == len(target)
correct = 0
correct += torch.sum(pred == target).item()
return correct / len(target)
def _top_k_acc(output, target, k=3):
warnings.warn("This metric isn't adapted to the current project. You'll probably get an error")
with torch.no_grad():
pred = torch.topk(output, k, dim=1)[1]
assert pred.shape[0] == len(target)
correct = 0
for i in range(k):
correct += torch.sum(pred[:, i] == target).item()
return correct / len(target)
class Accuracy(ActiveMetric):
def __init__(self, output_key="preds", target_key="targets", name="accuracy"):
if type(name) is not str:
raise Exception("name must be a valid string")
self.__name__ = str(name)
self.output_key = output_key
self.target_key = target_key
def on_step_end(self, items: collections.Mapping, tracker: MetricTracker, storage: SharedStorage):
output, target = items[self.output_key], items[self.target_key]
value = _accuracy(output, target)
tracker.update("accuracy", value, n=output.size(0))
class ReidMetric(ActiveMetric):
def __init__(self):
pass
def on_epoch_end(self, tracker: MetricTracker, storage: SharedStorage):
qpids = storage.get_data("qpids")
gpids = storage.get_data("gpids")
qcamids = storage.get_data("qcamids")
gcamids = storage.get_data("gcamids")
distmat = storage.get_data("distmat")
if distmat is None:
qf = storage.get_data("qf")
gf = storage.get_data("gf")
distmat = compute_distances(qf, gf)
storage.set_data("distmat", distmat)
all_cmc, all_AP, all_INP = evaluate(distmat, qpids, gpids, qcamids, gcamids)
r1 = all_cmc[0].item()
mAP = all_AP.mean().item()
mINP = all_INP.mean().item()
tracker.update("r1", r1)
tracker.update("mAP", mAP)
tracker.update("mINP", mINP)
class ReidGlobalDistanceHistogram(ActiveMetric):
def __init__(self, train=False):
self.train = train
def on_epoch_end(self, tracker: MetricTracker, storage: SharedStorage):
if self.train:
qf = gf = storage.get_data("features")
qpids = gpids = storage.get_data("pids")
qcamids = gcamids = storage.get_data("camids")
prefix = ""
else:
qf = storage.get_data("qf")
gf = storage.get_data("gf")
qpids = storage.get_data("qpids")
gpids = storage.get_data("gpids")
qcamids = storage.get_data("qcamids")
gcamids = storage.get_data("gcamids")
prefix = "valid_"
distmat = storage.get_data("distmat")
if distmat is None:
distmat = compute_distances(qf, gf)
storage.set_data("distmat", distmat)
same_pid = gpids.eq(qpids.reshape(-1, 1))
same_cam = gcamids.eq(qcamids.reshape(-1, 1))
negative: torch.Tensor = ~same_pid
positive: torch.Tensor = same_pid
positive_same_cam = torch.logical_and(same_pid, same_cam)
positive_diff_cam = torch.logical_and(same_pid, ~same_cam)
if self.train:
# filter out identical instances from positive distances
same_image = torch.diagflat(torch.ones(qf.size(0), dtype=torch.bool, device=qf.device))
positive.logical_and_(~same_image)
positive_same_cam.logical_and_(~same_image)
tracker.update(prefix + "global_dist_pos_same_cam_mean", distmat[positive_same_cam].mean().item())
tracker.update(prefix + "global_dist_pos_diff_cam_mean", distmat[positive_diff_cam].mean().item())
tracker.update(prefix + "global_dist_pos_mean", distmat[positive].mean().item())
tracker.update(prefix + "global_dist_neg_mean", distmat[negative].mean().item())
tracker.append_histogram(prefix + "global_dist_pos_same_cam", distmat[positive_same_cam])
tracker.append_histogram(prefix + "global_dist_pos_diff_cam", distmat[positive_diff_cam])
tracker.append_histogram(prefix + "global_dist_pos", distmat[positive])
tracker.append_histogram(prefix + "global_dist_neg", distmat[negative])
| StarcoderdataPython |
1659864 | <gh_stars>0
import sqlite3
# Connect to the database
conn = sqlite3.connect('data.sqlite')
cur = conn.cursor()
# Retrieve the most recent date from the Dates table
def recent_date(cr):
cr.execute('SELECT MAX(date) FROM Dates')
date = cr.fetchone()[0]
return date
# Display information about each restaurant in the database
def show_restaurants(cr):
date = recent_date(cr)
cr.execute('''
SELECT Restaurants.name, Restaurants.address, Restaurants.category, Restaurants.reviews, Ratings.rating
FROM Restaurants JOIN Ratings JOIN Dates
ON Ratings.restaurant_id = Restaurants.id AND Ratings.date_id = Dates.id
WHERE date = ?
''',
(date, ))
info = cr.fetchall()
print('Displaying information about each restaurant in the database:')
for name, address, category, reviews, rating in info:
print('======================================================')
print('Name:', name)
print('Address:', address)
print('Category:',category)
print('Number of reviews:',reviews)
print('Rating:',rating)
show_restaurants(cur)
| StarcoderdataPython |
72047 | # -*- coding: utf-8 -*-
"""
Tests for the Keystone states
"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import destructiveTest
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.unit import skipIf
log = logging.getLogger(__name__)
NO_KEYSTONE = False
try:
import keystoneclient # pylint: disable=import-error,unused-import
except ImportError:
NO_KEYSTONE = True
@skipIf(
NO_KEYSTONE,
"Please install keystoneclient and a keystone server before running"
"keystone integration tests.",
)
class KeystoneStateTest(ModuleCase, SaltReturnAssertsMixin):
"""
Validate the keystone state
"""
endpoint = "http://localhost:35357/v2.0"
token = "administrator"
@destructiveTest
def setUp(self):
ret = self.run_state(
"keystone.service_present",
name="keystone",
description="OpenStack Identity",
service_type="identity",
connection_endpoint=self.endpoint,
connection_token=self.token,
)
self.assertTrue(
ret["keystone_|-keystone_|-keystone_|-service_present"]["result"]
)
ret = self.run_state(
"keystone.endpoint_present",
name="keystone",
region="RegionOne",
publicurl="http://localhost:5000/v2.0",
internalurl="http://localhost:5000/v2.0",
adminurl="http://localhost:35357/v2.0",
connection_endpoint=self.endpoint,
connection_token=self.token,
)
self.assertTrue(
ret["keystone_|-keystone_|-keystone_|-endpoint_present"]["result"]
)
ret = self.run_state(
"keystone.tenant_present",
name="admin",
description="Admin Project",
connection_endpoint=self.endpoint,
connection_token=self.token,
)
self.assertTrue(ret["keystone_|-admin_|-admin_|-tenant_present"]["result"])
ret = self.run_state(
"keystone.tenant_present",
name="demo",
description="Demo Project",
connection_endpoint=self.endpoint,
connection_token=self.token,
)
self.assertTrue(ret["keystone_|-demo_|-demo_|-tenant_present"]["result"])
ret = self.run_state(
"keystone.role_present",
name="admin",
connection_endpoint=self.endpoint,
connection_token=self.token,
)
self.assertTrue(ret["keystone_|-admin_|-admin_|-role_present"]["result"])
ret = self.run_state(
"keystone.role_present",
name="user",
connection_endpoint=self.endpoint,
connection_token=self.token,
)
self.assertTrue(ret["keystone_|-user_|-user_|-role_present"]["result"])
ret = self.run_state(
"keystone.user_present",
name="admin",
email="<EMAIL>",
password="<PASSWORD>",
tenant="admin",
roles={"admin": ["admin"]},
connection_endpoint=self.endpoint,
connection_token=self.token,
)
self.assertTrue(ret["keystone_|-admin_|-admin_|-user_present"]["result"])
ret = self.run_state(
"keystone.user_present",
name="demo",
email="<EMAIL>",
password="<PASSWORD>",
tenant="demo",
roles={"demo": ["user"]},
connection_endpoint=self.endpoint,
connection_token=self.token,
)
self.assertTrue(ret["keystone_|-demo_|-demo_|-user_present"]["result"])
@destructiveTest
def test_keystone_v2(self):
ret = self.run_state(
"keystone.service_present",
name="testv2",
description="Nova Service",
service_type="compute",
profile="adminv2",
)
self.assertTrue(ret["keystone_|-testv2_|-testv2_|-service_present"]["result"])
ret = self.run_state(
"keystone.endpoint_present",
name="nova",
description="Nova Service",
publicurl="http://localhost:8774/v2.1/%(tenant_id)s",
internalurl="http://localhost:8774/v2.1/%(tenant_id)s",
adminurl="http://localhost:8774/v2.1/%(tenant_id)s",
region="RegionOne",
profile="adminv2",
)
self.assertTrue(ret["keystone_|-nova_|-nova_|-endpoint_present"]["result"])
# Region Two
ret = self.run_state(
"keystone.endpoint_present",
name="nova",
description="Nova Service",
publicurl="http://localhost:8774/v2.1/%(tenant_id)s",
internalurl="http://localhost:8774/v2.1/%(tenant_id)s",
adminurl="http://localhost:8774/v2.1/%(tenant_id)s",
region="RegionTwo",
profile="adminv2",
)
self.assertTrue(ret["keystone_|-nova_|-nova_|-endpoint_present"]["result"])
# Region One, change publicurl
ret = self.run_state(
"keystone.endpoint_present",
name="nova",
description="Nova Service",
publicurl="http://127.0.0.1:8774/v2.1/%(tenant_id)s",
internalurl="http://localhost:8774/v2.1/%(tenant_id)s",
adminurl="http://localhost:8774/v2.1/%(tenant_id)s",
region="RegionOne",
profile="adminv2",
)
self.assertTrue(ret["keystone_|-nova_|-nova_|-endpoint_present"]["result"])
ret = self.run_state(
"keystone.endpoint_get", name="nova", region="RegionOne", profile="adminv2"
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-endpoint_present"]["result"])
self.assertTrue(
ret["keystone_|-testv3_|-testv3_|-endpoint_present"][
"publicurl"
].start_with("http://127.0.0.1")
)
ret = self.run_state(
"keystone.tenant_present",
name="test",
description="Test Tenant",
profile="adminv2",
)
self.assertTrue(ret["keystone_|-test_|-test_|-tenant_present"]["result"])
ret = self.run_state("keystone.role_present", name="user", profile="adminv2")
self.assertTrue(ret["keystone_|-user_|-user_|-role_present"]["result"])
ret = self.run_state(
"keystone.user_present",
name="test",
email="<EMAIL>",
tenant="test",
password="<PASSWORD>",
roles={"test": ["user"]},
profile="adminv2",
)
self.assertTrue(ret["keystone_|-test_|-test_|-user_present"]["result"])
ret = self.run_state(
"keystone.service_absent", name="testv2", profile="adminv2"
)
self.assertTrue(ret["keystone_|-testv2_|-testv2_|-service_absent"]["result"])
@destructiveTest
def test_keystone_v3(self):
ret = self.run_state(
"keystone.service_present",
name="testv3",
description="Image Service",
service_type="image",
profile="adminv3",
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-service_present"]["result"])
ret = self.run_state(
"keystone.endpoint_present",
name="testv3",
description="Glance Service",
interface="public",
url="http://localhost:9292",
region="RegionOne",
profile="adminv3",
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-endpoint_present"]["result"])
ret = self.run_state(
"keystone.endpoint_present",
name="testv3",
description="Glance Service",
interface="internal",
url="http://localhost:9292",
region="RegionOne",
profile="adminv3",
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-endpoint_present"]["result"])
ret = self.run_state(
"keystone.endpoint_present",
name="testv3",
description="Glance Service",
interface="admin",
url="http://localhost:9292",
region="RegionOne",
profile="adminv3",
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-endpoint_present"]["result"])
# Region Two
ret = self.run_state(
"keystone.endpoint_present",
name="testv3",
description="Glance Service",
interface="public",
url="http://localhost:9292",
region="RegionTwo",
profile="adminv3",
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-endpoint_present"]["result"])
ret = self.run_state(
"keystone.endpoint_present",
name="testv3",
description="Glance Service",
interface="internal",
url="http://localhost:9292",
region="RegionTwo",
profile="adminv3",
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-endpoint_present"]["result"])
ret = self.run_state(
"keystone.endpoint_present",
name="testv3",
description="Glance Service",
interface="admin",
url="http://localhost:9292",
region="RegionTwo",
profile="adminv3",
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-endpoint_present"]["result"])
# Region One, change
ret = self.run_state(
"keystone.endpoint_present",
name="testv3",
description="Glance Service",
interface="public",
url="http://127.0.0.1:9292",
region="RegionOne",
profile="adminv3",
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-endpoint_present"]["result"])
ret = self.run_state(
"keystone.endpoint_get",
name="testv3",
region="RegionOne",
interface="public",
profile="adminv3",
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-endpoint_present"]["result"])
self.assertTrue(
ret["keystone_|-testv3_|-testv3_|-endpoint_present"]["endpoint"]["url"]
== "http://127.0.0.1:9292"
)
ret = self.run_state(
"keystone.project_present",
name="testv3",
description="Test v3 Tenant",
profile="adminv3",
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-project_present"]["result"])
ret = self.run_state("keystone.role_present", name="user", profile="adminv3")
self.assertTrue(ret["keystone_|-user_|-user_|-role_present"]["result"])
ret = self.run_state(
"keystone.user_present",
name="testv3",
email="<EMAIL>",
project="testv3",
password="<PASSWORD>",
roles={"testv3": ["user"]},
profile="adminv3",
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-user_present"]["result"])
ret = self.run_state(
"keystone.service_absent", name="testv3", profile="adminv3"
)
self.assertTrue(ret["keystone_|-testv3_|-testv3_|-service_absent"]["result"])
| StarcoderdataPython |
3387218 | <reponame>DiegoAV95/python_curso_-domingos<filename>Modulo1/Src/hola_argumento.py
import sys
# preguntando el nombre de la persona
nombre = input('Introduzca su nombre: ')
# imprimiendo nombre de la persona
print('hola, {} !'.format(nombre))
# print('hola, ' + nombre + '!') | StarcoderdataPython |
24818 | <gh_stars>0
from __future__ import absolute_import, division, print_function
from trakt.mapper.core.base import Mapper
import logging
log = logging.getLogger(__name__)
class SyncMapper(Mapper):
@classmethod
def process(cls, client, store, items, media=None, flat=False, **kwargs):
if flat:
# Return flat item iterator
return cls.iterate_items(
client, store, items, cls.item,
media=media,
**kwargs
)
return cls.map_items(
client, store, items, cls.item,
media=media,
**kwargs
)
@classmethod
def item(cls, client, store, item, media=None, **kwargs):
i_type = item.get('type') or media
# Find item type function
if i_type.startswith('movie'):
func = cls.movie
elif i_type.startswith('show'):
func = cls.show
elif i_type.startswith('season'):
func = cls.season
elif i_type.startswith('episode'):
func = cls.episode
else:
raise ValueError('Unknown item type: %r' % i_type)
# Map item
return func(
client, store, item,
**kwargs
)
#
# Movie
#
@classmethod
def movies(cls, client, store, items, **kwargs):
return cls.map_items(client, store, items, cls.movie, **kwargs)
@classmethod
def movie(cls, client, store, item, **kwargs):
movie = cls.map_item(client, store, item, 'movie', **kwargs)
# Update with root info
if 'movie' in item:
movie._update(item)
return movie
#
# Show
#
@classmethod
def shows(cls, client, store, items, **kwargs):
return cls.map_items(client, store, items, cls.show, **kwargs)
@classmethod
def show(cls, client, store, item, **kwargs):
show = cls.map_item(client, store, item, 'show', **kwargs)
# Update with root info
if 'show' in item:
show._update(item)
# Process any episodes in the item
for i_season in item.get('seasons', []):
season_num = i_season.get('number')
season = cls.show_season(client, show, season_num, **kwargs)
for i_episode in i_season.get('episodes', []):
episode_num = i_episode.get('number')
cls.show_episode(client, season, episode_num, i_episode, **kwargs)
return show
@classmethod
def show_season(cls, client, show, season_num, item=None, **kwargs):
season = cls.map_item(client, show.seasons, item, 'season', key=season_num, parent=show, **kwargs)
season.show = show
# Update with root info
if item and 'season' in item:
season._update(item)
return season
@classmethod
def show_episode(cls, client, season, episode_num, item=None, **kwargs):
episode = cls.map_item(
client, season.episodes, item, 'episode',
key=episode_num,
parent=season,
**kwargs
)
episode.show = season.show
episode.season = season
# Update with root info
if item and 'episode' in item:
episode._update(item)
return episode
#
# Season
#
@classmethod
def seasons(cls, client, store, items, **kwargs):
return cls.map_items(client, store, items, cls.season, **kwargs)
@classmethod
def season(cls, client, store, item, **kwargs):
i_season = item.get('season', {})
season_num = i_season.get('number')
# Build `show`
show = cls.show(client, store, item['show'])
if show is None:
# Unable to create show
return None
# Build `season`
season = cls.show_season(client, show, season_num, item, **kwargs)
return season
#
# Episode
#
@classmethod
def episodes(cls, client, store, items, **kwargs):
return cls.map_items(client, store, items, cls.episode, **kwargs)
@classmethod
def episode(cls, client, store, item, append=False, **kwargs):
i_episode = item.get('episode', {})
season_num = i_episode.get('season')
episode_num = i_episode.get('number')
# Build `show`
show = cls.show(client, store, item['show'])
if show is None:
# Unable to create show
return None
# Build `season`
season = cls.show_season(
client, show, season_num,
**kwargs
)
# Build `episode`
episode = cls.show_episode(
client, season, episode_num, item,
append=append,
**kwargs
)
return episode
#
# Helpers
#
@classmethod
def map_items(cls, client, store, items, func, **kwargs):
if store is None:
store = {}
for item in items:
result = func(
client, store, item,
**kwargs
)
if result is None:
log.warning('Unable to map item: %s', item)
return store
@classmethod
def iterate_items(cls, client, store, items, func, **kwargs):
if store is None:
store = {}
if 'movies' not in store:
store['movies'] = {}
if 'shows' not in store:
store['shows'] = {}
if 'seasons' not in store:
store['seasons'] = {}
if 'episodes' not in store:
store['episodes'] = {}
for item in items:
i_type = item.get('type')
if i_type == 'movie':
i_store = store['movies']
elif i_type == 'show':
i_store = store['shows']
elif i_type == 'season':
i_store = store['seasons']
elif i_type == 'episode':
i_store = store['episodes']
else:
raise ValueError('Unknown item type: %r' % i_type)
# Map item
result = func(
client, i_store, item,
append=True,
**kwargs
)
if result is None:
log.warning('Unable to map item: %s', item)
# Yield item in iterator
yield result
@classmethod
def map_item(cls, client, store, item, media, key=None, parent=None, append=False, **kwargs):
if item and media in item:
i_data = item[media]
else:
i_data = item
# Retrieve item key
pk, keys = cls.get_ids(media, i_data, parent=parent)
if key is not None:
pk = key
if not keys:
keys = [pk]
if pk is None:
# Item has no keys
return None
if store is None or pk not in store or append:
# Construct item
obj = cls.construct(client, media, i_data, keys, **kwargs)
if store is None:
return obj
# Update store
if append:
if pk in store:
store[pk].append(obj)
else:
store[pk] = [obj]
else:
store[pk] = obj
return obj
else:
# Update existing item
store[pk]._update(i_data, **kwargs)
return store[pk]
| StarcoderdataPython |
4825596 | <filename>gravedigger/gravedigger.py<gh_stars>1-10
"""
This module kills and removes containers that satisfy the following conditions:
* not matched by any pattern listed in whitelist.txt
* created more than 24h ago
Also, a logfile called gravedigger.log is created in the current directory
"""
import logging
import re
from datetime import datetime, timedelta, timezone
from typing import List
import docker
from dateutil import parser
from docker.errors import NotFound, APIError
from docker.models.containers import Container
WHITELIST_FILE = "whitelist.txt"
LOG_FILE = "gravedigger.log"
def read_whitelist() -> List[str]:
with open(WHITELIST_FILE, encoding="utf-8") as f:
return f.read().split()
def filter_whitelisted_containers(containers: List[Container], whitelist: List[str]) -> List[Container]:
"""
Filter out whitelisted containers.
:param containers:
:param whitelist:
:return: A list of containers not matching regex entries in the whitelist
"""
containers_set = set(containers)
for entry in whitelist:
matcher = re.compile(entry)
keep_containers = filter(lambda container: matcher.match(container.name), containers)
containers_set -= set(keep_containers)
return list(containers_set)
def filter_newer_containers(containers: List[Container]) -> List[Container]:
"""
Filter out containers younger than 24h
:param containers:
:return: A list of containers created more than 24h ago
"""
def fresh_container(container: Container) -> bool:
creation_date = parser.parse(container.attrs["Created"])
return datetime.now(timezone.utc) - creation_date < timedelta(hours=24)
return list(filter(lambda x: not fresh_container(x), containers))
def kill_and_remove_containers(containers: List[Container]) -> None:
"""
Take a list of containers and try to kill and remove them.
:param containers:
:return:
"""
for container in containers:
try:
container.kill()
except NotFound:
log.warning("Tried to kill {}, but found no corresponding container".format(container.name))
continue
except APIError:
log.exception("Could not kill {}".format(container.name))
try:
container.remove()
except APIError:
log.exception("Could not remove {}".format(container.name))
log.info(" * Killed and removed {}".format(container.name))
def init_logger():
global log
log = logging.getLogger("gravedigger")
log.setLevel(logging.INFO)
log.addHandler(logging.FileHandler(LOG_FILE))
log.addHandler(logging.StreamHandler())
def main():
init_logger()
log.info("Started at {}".format(datetime.now()))
whitelist = read_whitelist()
docker_client = docker.from_env()
containers = docker_client.containers.list(all=True)
filtered_containers = filter_whitelisted_containers(containers, whitelist)
filtered_containers = filter_newer_containers(filtered_containers)
kill_and_remove_containers(filtered_containers)
running_containers = docker_client.containers.list()
if running_containers:
log.info("Left the following containers running:\n{}".format(
"\n".join(map(lambda x: " * " + x.name, running_containers))))
else:
log.info("Left no containers running")
log.info("Ended at {}".format(datetime.now()))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1771245 | <filename>backend/videos/views.py
from logging import Formatter, StreamHandler, getLogger
from time import sleep, time
from flask import Blueprint, Response, current_app
from image_process.factory import create_image_processes
from .factory import create_camera
logger = getLogger(__name__)
handler = StreamHandler()
handler.setFormatter(Formatter('%(levelname)s: %(message)s'))
logger.addHandler(handler)
video = Blueprint('video', __name__, url_prefix='/video')
def gen(camera, interval):
while True:
try:
s = time()
frame = camera.get_frame()
if frame:
yield b'--frame\r\n' + b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n'
else:
logger.warn('@@@ frame is None')
e = time()
sleep_time = interval - (e - s)
if 0 < sleep_time:
sleep(sleep_time)
else:
logger.warn(f'@@@ sleep time is minus value. {sleep_time * 1000} msec')
except Exception as ex:
logger.error(f'### Exception: {ex}')
@video.route('/')
def video_feed():
config = current_app.config
interval = config['VIDEO_INTERVAL_SEC']
img_proc_list = create_image_processes(config)
camera = create_camera(config, img_proc_list)
return Response(gen(camera, interval), mimetype='multipart/x-mixed-replace; boundary=frame')
| StarcoderdataPython |
3218025 | import torch
import torchvision.models as models
import torch
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix
from tqdm import tqdm
import numpy as np
from . import vae, deepinfomax
import time
__version__ = "0.7.0"
from .model import EfficientNet, VALID_MODELS
from .utils import (
GlobalParams,
BlockArgs,
BlockDecoder,
efficientnet,
get_model_params,
)
try:
from apex import amp
except:
pass
def get_model(exp_dict, **kwargs):
if exp_dict["model"] == 'vae':
return vae.VAE(exp_dict, **kwargs)
elif exp_dict["model"] == "deepinfomax":
return deepinfomax.DeepInfoMax(exp_dict, **kwargs)
else:
raise ValueError("Model %s not found" %exp_dict["model"])
| StarcoderdataPython |
3362345 | <filename>leasing/tests/test_utils.py
from datetime import date
from leasing.utils import calculate_increase_with_360_day_calendar, days360
def test_days360_year():
date1 = date(year=2020, month=1, day=1)
date2 = date(year=2021, month=1, day=1)
days = days360(date1, date2, True)
assert days == 360
def test_days360_leap_year():
date1 = date(year=2020, month=1, day=15)
date2 = date(year=2020, month=3, day=15)
days = days360(date1, date2, True)
assert days == 60
def test_calculate_increase_with_360_day_calendar():
date1 = date(year=2020, month=8, day=3)
date2 = date(year=2020, month=10, day=15)
increase_percentage = 3
current_amount = 150000.0
expected_amount = 151000.0
calculated_amount = calculate_increase_with_360_day_calendar(
date1, date2, increase_percentage, current_amount
)
assert expected_amount == calculated_amount
| StarcoderdataPython |
1680884 | <reponame>BrenoCipolli/login_with_interface
from PySimpleGUI import PySimpleGUI as sg
import time
tema = sg.theme('Reddit')
def sucesso():
tema = sg.theme('Reddit')
layout = [
[sg.Text('Success!',font='Roboto',size=(20,30))]
]
janela1 = sg.Window('Successful',layout,size=(100,50))
while True:
event,values = janela1.Read()
if event == sg.WIN_CLOSED or event == 'Cancel':
break
janela1.Close()
layout = [
[sg.Text('Username',font='Roboto',size=(15,0))],
[sg.Input(font='Roboto',size=(100,70),key='name')],
[sg.Text('Password',font='Roboto',size=(15,0))],
[sg.Input(font='Roboto',size=(100,70),password_char='*',key='passw')],
[sg.Button('Confirm',font='Roboto',auto_size_button=(20,20),key='btn')]
]
janela = sg.Window('Login',layout,size=(300,270))
while True:
event,values = janela.Read()
if event == sg.WIN_CLOSED or event == 'Cancel':
break
if event == 'btn':
if values['passw'] == 'password' and values['name'] == 'username':
sucesso()
janela.Close() | StarcoderdataPython |
1633489 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-11 20:51
from __future__ import unicode_literals
from django.db import migrations, models
import estate.core.models.fields
class Migration(migrations.Migration):
dependencies = [
('terraform', '0003_auto_20170707_1414'),
]
operations = [
migrations.AddField(
model_name='historicalnamespace',
name='vault_backend',
field=models.CharField(blank=True, max_length=1024, null=True, verbose_name='vault_backend'),
),
migrations.AddField(
model_name='namespace',
name='vault_backend',
field=models.CharField(blank=True, max_length=1024, null=True, verbose_name='vault_backend'),
),
migrations.AlterField(
model_name='file',
name='slug',
field=estate.core.models.fields.SoftDeleteAwareAutoSlugField(blank=True, editable=False, populate_from=b'title', verbose_name='slug'),
),
migrations.AlterField(
model_name='historicalfile',
name='slug',
field=estate.core.models.fields.SoftDeleteAwareAutoSlugField(blank=True, editable=False, populate_from=b'title', verbose_name='slug'),
),
migrations.AlterField(
model_name='historicalnamespace',
name='slug',
field=estate.core.models.fields.SoftDeleteAwareAutoSlugField(blank=True, editable=False, populate_from=b'title', verbose_name='slug'),
),
migrations.AlterField(
model_name='historicaltemplate',
name='slug',
field=estate.core.models.fields.SoftDeleteAwareAutoSlugField(blank=True, editable=False, populate_from=b'title', verbose_name='slug'),
),
migrations.AlterField(
model_name='historicaltemplateinstance',
name='slug',
field=estate.core.models.fields.SoftDeleteAwareAutoSlugField(blank=True, editable=False, populate_from=b'title', verbose_name='slug'),
),
migrations.AlterField(
model_name='namespace',
name='slug',
field=estate.core.models.fields.SoftDeleteAwareAutoSlugField(blank=True, editable=False, populate_from=b'title', verbose_name='slug'),
),
migrations.AlterField(
model_name='template',
name='slug',
field=estate.core.models.fields.SoftDeleteAwareAutoSlugField(blank=True, editable=False, populate_from=b'title', verbose_name='slug'),
),
migrations.AlterField(
model_name='templateinstance',
name='slug',
field=estate.core.models.fields.SoftDeleteAwareAutoSlugField(blank=True, editable=False, populate_from=b'title', verbose_name='slug'),
),
]
| StarcoderdataPython |
3239395 | # -*- coding: utf-8 -*-
import application
if __name__ == '__main__':
app = application.Application()
app.run()
| StarcoderdataPython |
1649184 | from collections import defaultdict
result = 0
orbits = defaultdict(lambda: [])
numorbits = {"COM": 0}
with open("input.txt", "r") as input:
for line in input:
line = line.strip().split(")")
orbits[line[0]].append(line[1])
processing = ["COM"]
while len(processing) > 0:
c = processing.pop()
n = numorbits[c]
for new in orbits[c]:
processing.append(new)
numorbits[new] = n + 1
result += n + 1
with open("output1.txt", "w") as output:
output.write(str(result))
print(str(result))
| StarcoderdataPython |
84897 | #$Id$#
from books.model.BankRule import BankRule
from books.model.Criteria import Criteria
from books.service.ZohoBooks import ZohoBooks
zoho_books = ZohoBooks("{auth_token}", "{organization_id}")
bank_rules_api = zoho_books.get_bank_rules_api()
accounts_api = zoho_books.get_bank_accounts_api()
account_id = accounts_api.get_bank_accounts().get_bank_accounts()[0].get_account_id()
target_account_id = accounts_api.get_bank_accounts().get_bank_accounts()[1].get_account_id()
rule_id = bank_rules_api.get_rules(account_id).get_bank_rules()[0].get_rule_id()
contact_api = zoho_books.get_contacts_api()
customer_id = contact_api.get_contacts().get_contacts()[0].get_contact_id()
# get rules list
print bank_rules_api.get_rules(account_id)
# get a rule
print bank_rules_api.get(rule_id)
#create a rule
rule=BankRule()
rule.set_rule_name('rule 9')
rule.set_target_account_id(target_account_id)
rule.set_apply_to('deposits')
rule.set_criteria_type('and')
criteria = Criteria()
criteria.set_field('payee')
criteria.set_comparator('is')
criteria.set_value('dfd')
rule.set_criterion(criteria)
rule.set_record_as('sales_without_invoices')
rule.set_account_id(account_id)
rule.set_tax_id('')
rule.set_reference_number('from_statement')
rule.set_customer_id(customer_id)
print bank_rules_api.create(rule)
# update a rule
rule=BankRule()
rule.set_rule_name('rule 8')
rule.set_target_account_id(target_account_id)
rule.set_apply_to('deposits')
rule.set_criteria_type('and')
criteria = Criteria()
criteria.set_field('payee')
criteria.set_comparator('is')
criteria.set_value('dfd')
rule.set_criterion(criteria)
rule.set_record_as('sales_without_invoices')
rule.set_account_id(account_id)
rule.set_tax_id('')
rule.set_reference_number('from_statement')
rule.set_customer_id(customer_id)
print bank_rules_api.update(rule_id,rule)
# Delete a rule
print bank_rules_api.delete(rule_id)
| StarcoderdataPython |
1664385 | <reponame>thecoblack/CompilerGym
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:truncate."""
from compiler_gym.util.truncate import truncate, truncate_lines
from tests.test_main import main
def test_truncate_no_truncation():
assert truncate("abc") == "abc"
assert truncate("abcdef\nabcdef", max_line_len=7, max_lines=2) == "abcdef\nabcdef"
def test_truncate_single_line():
assert truncate("abcdefghijklmnop", max_line_len=5) == "ab..."
def test_truncate_dual_lines():
assert (
truncate("abcdefghijklmnop\nbcdefghijklmnop", max_line_len=5, max_lines=3)
== "ab...\nbc..."
)
def test_truncate_final_line():
assert truncate("abc\ndef\n123", max_line_len=5, max_lines=2) == "abc\nde..."
assert truncate("abc\ndef\n123", max_line_len=10, max_lines=2) == "abc\ndef..."
def test_truncate_lines_no_truncation():
assert truncate_lines(["abc"]) == "abc"
assert (
truncate_lines(["abcdef", "abcdef"], max_line_len=7, max_lines=2)
== "abcdef\nabcdef"
)
def test_truncate_lines_single_line():
assert truncate_lines(["abcdefghijklmnop"], max_line_len=5) == "ab..."
def test_truncate_lines_dual_lines():
assert (
truncate_lines(
["abcdefghijklmnop", "bcdefghijklmnop"], max_line_len=5, max_lines=3
)
== "ab...\nbc..."
)
def test_truncate_lines_dual_lines_generator():
def gen():
yield "abcdefghijklmnop"
yield "bcdefghijklmnop"
assert truncate_lines(gen(), max_line_len=5, max_lines=3) == "ab...\nbc..."
if __name__ == "__main__":
main()
| StarcoderdataPython |
115583 | #!/usr/bin/env python
# NOTE: this script is based on logeion_load.py to do a basic import of russian
from lattices.models import LatticeNode, LemmaNode
def create_lemma_node(lemma, lattice_node, context):
lemma_node, created = LemmaNode.objects.get_or_create(
context=context,
lemma=lemma,
defaults={
"node": lattice_node,
}
)
if created:
print(" created", context, "lemma node", lemma_node.pk, lemma)
else:
existing_lattice_node = lemma_node.node
print(" ", context, "node already existed pointing to lattice node", existing_lattice_node.pk, existing_lattice_node.label)
if existing_lattice_node.canonical:
parent_lattice_node = LatticeNode.objects.create(label=lemma, gloss="from " + context, canonical=False)
parent_lattice_node.children.add(existing_lattice_node)
parent_lattice_node.children.add(lattice_node)
parent_lattice_node.save()
lemma_node.node = parent_lattice_node
lemma_node.save()
print(" created parent lattice node", parent_lattice_node.pk, parent_lattice_node.label)
print(" lattice node", existing_lattice_node.pk, existing_lattice_node.label, "put under", parent_lattice_node.pk, parent_lattice_node.label)
print(" lattice node", lattice_node.pk, lattice_node.label, "put under", parent_lattice_node.pk, parent_lattice_node.label)
print(" ", context, "node now points to lattice node", lemma_node.node.pk, lemma_node.node.label)
else:
existing_lattice_node.children.add(lattice_node)
print(" lattice node", lattice_node.pk, lattice_node.label, "put under", existing_lattice_node.pk, existing_lattice_node.label)
existing_lattice_node.save()
with open("import-data/clancy-russian.tsv") as f:
for row in f:
print(row.strip())
clancy_lemma, short_def = (row.strip().split("\t") + [None, None])[:2]
if LemmaNode.objects.filter(lemma=clancy_lemma, context="clancy").exists():
pass
else:
lattice_node, _ = LatticeNode.objects.get_or_create(label=clancy_lemma, gloss=short_def or "unglossed", canonical=False)
print(" created lattice_node", lattice_node.pk, clancy_lemma, short_def)
create_lemma_node(clancy_lemma, lattice_node, "clancy")
print()
| StarcoderdataPython |
3315530 | """
namecom: data_models.py
Defines data models for the api.
<NAME> [https://github.com/CtheSky]
License: MIT
"""
class DataModel(object):
"""
This is base class for data models.
It provides following utilities:
1. class method `from_dict` to construct model from a dict
2. instance method `to_dict` to transfer model to a dict
3. overrides equality test using __dict__
"""
@classmethod
def from_dict(cls, dct):
"""Create DataModel object from dict."""
if not dct:
return None
return cls(**dct)
def to_dict(self):
"""Returns a dict representation of DataModel object."""
return {
k: v.to_dict() if isinstance(v, DataModel) else v
for k, v in self.__dict__.items()
}
def __repr__(self):
cls_name = self.__class__.__name__
params = ', '.join(['{}={!r}'.format(k, v) for k, v in self.__dict__.items()])
return '{}({})'.format(cls_name, params)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self is other or self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
class Record(DataModel):
"""
This is a class for an individual DNS resource record.
Attributes
----------
id : int
Unique record id. Value is ignored on Create, and must match the URI on Update.
domainName : string
DomainName is the zone that the record belongs to.
host : string
Host is the hostname relative to the zone: e.g. for a record for blog.example.org,
domain would be "example.org" and host would be "blog".
An apex record would be specified by either an empty host "" or "@".
A SRV record would be specified by "_{service}._{protocal}.{host}":
e.g. "_sip._tcp.phone" for _sip._tcp.phone.example.org.
fqdn : string
FQDN is the Fully Qualified Domain Name. It is the combination of the host and the domain name.
It always ends in a ".". FQDN is ignored in CreateRecord, specify via the Host field instead.
type : string
Type is one of the following: A, AAAA, ANAME, CNAME, MX, NS, SRV, or TXT.
answer : string
Answer is either the IP address for A or AAAA records; the target for ANAME, CNAME, MX, or NS records;
the text for TXT records. For SRV records, answer has the following format:
"{weight} {port} {target}" e.g. "1 5061 sip.example.org".
ttl : int
TTL is the time this record can be cached for in seconds. Name.com allows a minimum TTL of 300,
or 5 minutes.
priority : int
Priority is only required for MX and SRV records, it is ignored for all others.
"""
def __init__(self, id, domainName, fqdn, type, answer, host=None, ttl=300, priority=None):
self.id = id
self.domainName = domainName
self.host = host
self.fqdn = fqdn
self.type = type
self.answer = answer
self.ttl = ttl
self.priority = priority
class DNSSEC(DataModel):
"""
This is a class for Domain Name System Security Extensions (DNSSEC).
It contains all the data required to create a DS record at the registry.
Attributes
----------
domainName : string
DomainName is the domain name.
keyTag : int
KeyTag contains the key tag value of the DNSKEY RR that validates this signature.
The algorithm to generate it is here: https://tools.ietf.org/html/rfc4034#appendix-B
algorithm : int
Algorithm is an integer identifying the algorithm used for signing. Valid values can be found here:
https://www.iana.org/assignments/dns-sec-alg-numbers/dns-sec-alg-numbers.xhtml
digestType : int
DigestType is an integer identifying the algorithm used to create the digest. Valid values can be found
here: https://www.iana.org/assignments/ds-rr-types/ds-rr-types.xhtml
digest : string
Digest is a digest of the DNSKEY RR that is registered with the registry.
"""
def __init__(self, domainName, keyTag, algorithm, digestType, digest):
self.domainName = domainName
self.keyTag = keyTag
self.algorithm = algorithm
self.digestType = digestType
self.digest = digest
class Domain(DataModel):
"""
This class lists all the data for a domain.
Attributes
----------
domainName : string
DomainName is the punycode encoded value of the domain name.
nameservers : []string
Nameservers is the list of nameservers for this domain. If unspecified it defaults to your account
default nameservers.
contacts : :class:`~namecom.Contacts`
Contacts for the domain.
privacyEnabled : bool
PrivacyEnabled reflects if Whois Privacy is enabled for this domain.
locked : bool
Locked indicates that the domain cannot be transfered to another registrar.
autorenewEnabled : bool
AutorenewEnabled indicates if the domain will attempt to renew automatically before expiration.
expireDate : string
ExpireDate is the date the domain will expire.
createDate : string
CreateDate is the date the domain was created at the registry.
renewalPrice : float
RenewalPrice is the price to renew the domain. It may be required for the RenewDomain command.
"""
def __init__(self, domainName, locked=None, expireDate=None, createDate=None, contacts=None,
nameservers=None, privacyEnabled=None, autorenewEnabled=None, renewalPrice=None):
self.domainName = domainName
self.nameservers = nameservers
self.contacts = contacts
self.privacyEnabled = privacyEnabled
self.locked = locked
self.autorenewEnabled = autorenewEnabled
self.expireDate = expireDate
self.createDate = createDate
self.renewalPrice = renewalPrice
@classmethod
def from_dict(cls, dct):
if not dct:
return None
domain = Domain(**dct)
domain.contacts = Contacts.from_dict(dct.get('contacts'))
return domain
class Contacts(DataModel):
"""
This class stores the contact information for the roles related to domains.
Attributes
----------
registrant : :class:`~namecom.Contact`
Registrant is the rightful owner of the account and has the right to use and/or sell the domain name.
They are able to make changes to all account, domain, and product settings. This information should be
reviewed and updated regularly to ensure accuracy.
admin : :class:`~namecom.Contact`
Registrants often designate an administrative contact to manage their domain name(s). They primarily deal
with business information such as the name on record, postal address, and contact information for the
official registrant.
tech : :class:`~namecom.Contact`
The technical contact manages and maintains a domain's nameservers. If you're working with a web designer
or someone in a similar role, you many want to assign them as a technical contact.
billing : :class:`~namecom.Contact`
The billing contact is the party responsible for paying bills for the account and taking care of renewals.
"""
def __init__(self, registrant, admin, tech, billing):
self.registrant = registrant
self.admin = admin
self.tech = tech
self.billing = billing
@classmethod
def from_dict(cls, dct):
if not dct:
return None
kwargs = {
field: Contact.from_dict(dct.get(field))
for field in ['registrant', 'admin', 'tech', 'billing']
}
return Contacts(**kwargs)
class Contact(DataModel):
"""
This class contains all the contact data.
Attributes
----------
firstName : string
First name of the contact.
lastName : string
Last name of the contact.
companyName : string
Company name of the contact. Leave blank if the contact is an individual as some registries will assume
it is a corporate entity otherwise.
address1 : string
Address1 is the first line of the contact's address.
address2 : string
Address2 is the second line of the contact's address.
city : string
City of the contact's address.
state : string
State or Province for the contact's address.
zip : string
Zip or Postal Code for the contact's address.
country : string
Country code for the contact's address. Required to be a ISO 3166-1 alpha-2 code.
phone : string
Phone number of the contact. Should be specified in the following format: "+cc.llllllll" where cc
is the country code and llllllll is the local number.
fax : string
Fax number of the contact. Should be specified in the following format: "+cc.llllllll" where cc
is the country code and llllllll is the local number.
email : string
Email of the contact. Should be a complete and valid email address.
"""
def __init__(self, firstName, lastName, companyName=None, address1=None, address2=None, city=None,
state=None, zip=None, country=None, phone=None, fax=None, email=None):
self.firstName = firstName
self.lastName = lastName
self.companyName = companyName
self.address1 = address1
self.address2 = address2
self.city = city
self.state = state
self.zip = zip
self.country = country
self.phone = phone
self.fax = fax
self.email = email
class DomainSearchResult(DataModel):
"""
SearchResult is returned by the CheckAvailability, Search, and SearchStream functions.
Attributes
----------
domainName : string
DomainName is the punycode encoded value of the domain name.
sld : string
SLD is first portion of the domain_name.
tld : string
TLD is the rest of the domain_name after the SLD.
purchasable : bool
Purchaseable indicates whether the search result is available for purchase.
premium : bool
Premium indicates that this search result is a premium result and the purchase_price needs to be passed to
the DomainCreate command.
purchasePrice : float
PurchasePrice is the price for purchasing this domain for 1 year. Purchase_price is always in USD.
purchaseType : string
PurchaseType indicates what kind of purchase this result is for. It should be passed to the DomainCreate
command.
renewalPrice : float
RenewalPrice is the annual renewal price for this domain as it may be different then the purchase_price.
"""
def __init__(self, domainName, sld, tld, purchasable=None,
premium=None, purchasePrice=None, purchaseType=None, renewalPrice=None):
self.domainName = domainName
self.sld = sld
self.tld = tld
self.purchasable = purchasable
self.premium = premium
self.purchasePrice = purchasePrice
self.purchaseType = purchaseType
self.renewalPrice = renewalPrice
class EmailForwarding(DataModel):
"""
EmailForwarding contains all the information for an email forwarding entry.
Attributes
----------
domainName : string
DomainName is the domain part of the email address to forward
emailBox : string
DomainName is the domain part of the email address to forward
emailTo : string
EmailTo is the entire email address to forward email to
"""
def __init__(self, domainName, emailBox, emailTo):
self.domainName = domainName
self.emailBox = emailBox
self.emailTo = emailTo
class Transfer(DataModel):
"""
Transfer contains the information related to a transfer of a domain name to Name.com.
Attributes
----------
domainName : string
DomainName is the domain to be transfered to Name.com.
email : string
Email is the email address that the approval email was sent to. Not every TLD requries an approval email.
This is usaully pulled from Whois.
status : string
Status is the current status of the transfer. Details about statuses can be found in the following
Knowledge Base article: https://www.name.com/support/articles/115012519688-Transfer-status-FAQ.
"""
def __init__(self, domainName, email, status):
self.domainName = domainName
self.email = email
self.status = status
class URLForwarding(DataModel):
"""
URLForwarding is the model for URL forwarding entries.
Attributes
----------
domainName : string
DomainName is the domain part of the hostname to forward.
host : string
Host is the entirety of the hostname. i.e. www.example.org
forwardsTo : string
ForwardsTo is the URL this host will be forwarded to.
type : string
Type is the type of forwarding. Valid types are:
Masked - This retains the original domain in the address bar and will not reveal or display the actual
destination URL. If you are forwarding knowledgebase.ninja to Name.com, the address bar will say
knowledgebase.ninja. This is sometimes called iframe forwarding. And: Redirect - This does not retain
the original domain in the address bar, so the user will see it change and realize they were forwarded
from the URL they originally entered. If you are forwarding knowledgebase.ninja to Name.com, the address
bar will say Name.com. This is also called 301 forwarding.
title : string
Title is the title for the html page to use if the type is masked. Values are ignored for types other
then "masked".
meta : string
Meta is the meta tags to add to the html page if the type is masked.
ex: "meta name='keywords' content='fish, denver, platte'". Values are ignored for types other then "masked".
"""
def __init__(self, domainName, host, forwardsTo, type, title=None, meta=None):
self.domainName = domainName
self.host = host
self.forwardsTo = forwardsTo
self.type = type
self.title = title
self.meta = meta
class VanityNameserver(DataModel):
"""
VanityNameserver contains the hostname as well as the list of IP addresses for nameservers.
Attributes
----------
domainName : string
DomainName is the domain the nameserver is a subdomain of.
hostname : string
Hostname is the hostname of the nameserver.
ips : []string
IPs is a list of IP addresses that are used for glue records for this nameserver.
"""
def __init__(self, domainName, hostname, ips=None):
self.domainName = domainName
self.hostname = hostname
self.ips = ips
| StarcoderdataPython |
1711660 | # -*- coding: utf-8 -*-
#
# cafeWorker.py
#
# Defines Kakao cafe's worker interface.
from abc import ABCMeta, abstractmethod
class CafeWorker(metaclass=ABCMeta):
@abstractmethod
def Print(self) -> None:
raise NotImplementedError('Method Print not implemented')
| StarcoderdataPython |
1605438 | <reponame>DerouineauNicolas/dpx_to_ffv1<filename>dpx2ffv1/test.py<gh_stars>0
from unittest import TestCase
from dpx2ffv1.dpx2ffv1 import dpx2ffv1
class TestJoke(TestCase):
def test_main_function(self):
out = dpx2ffv1('./test/', 'out.mkv', 24)
assert(out == 0)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
35934 | TORRENTS_PER_PAGE = 25
| StarcoderdataPython |
3363236 | <filename>section6_turtle-tree.py
from turtle import *
# 再帰的に木を描く
def tree(n):
if n<=1: # 引数が1以下なら
forward(5) #5歩すすむ
else: # 引数は1より大きいとき
forward(5*(1.1**n)) # 引数の値に応じて前進(幹)
# 今の位置と向きを記録
xx = pos()
h = heading()
# 左へ 30 度回転
left(30)
# 大きさ n-2 で木を描く(左の枝)
tree(n-2)
# ペンを挙げて軌跡を残さない
up()
# 先に記録した位置(幹の先端)に戻る
setpos(xx)
setheading(h)
# ペンを降ろす
down()
# 右へ15度
right(15)
# 大きさ n-1 で木を描く(右の枝)
tree(n-1)
# ペンを上げてもどる
up()
setpos(xx)
setheading(h)
# ペンを降ろす
down()
# 時間がかかるので最も早い描画
speed(0)
# 大きさ 12 の木を描く
tree(12)
| StarcoderdataPython |
1718531 | #!/usr/bin/env python3
import sys, pybench
pythons = [
(1, '/usr/bin/python3'),
(0, '/usr/bin/python2')
]
stmts = [
# Use function calls: map wins
(0, 0, "[ord(x) for x in 'spam' * 2500]"),
(0, 0, "res=[]\nfor x in 'spam' * 2500: res.append(ord(x))"),
(0, 0, "$listif3(map(ord, 'spam' * 2500))"),
(0, 0, "list(ord(x) for x in 'spam' * 2500)"),
# Set and dicts
(0, 0, "{x ** 2 for x in range(1000)}"),
(0, 0, "s=set()\nfor x in range(1000): s.add(x ** 2)"),
(0, 0, "{x: x ** 2 for x in range(1000)}"),
(0, 0, "d={}\nfor x in range(1000): d[x] = x ** 2"),
# Pathological: 300k digits
(1, 1, "len(str(2**1000000))")
]
tracecmd = '-t' in sys.argv
pythons = pythons if '-a' in sys.argv else None
pybench.runner(stmts, pythons, tracecmd)
| StarcoderdataPython |
1697860 | <gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.examples.tutorials.mnist import input_data
import tabular_logger as tlogger
import tensorflow as tf
import numpy as np
import argparse
import time
import sys
import os
def normal(x, mu, sigma):
pi = tf.constant(np.pi)
Z = (2*pi*sigma**2)**0.5
return tf.exp(-0.5*(x - mu)**2 / sigma**2) / Z
def log_normal(x, mu, sigma):
pi = tf.constant(np.pi)
return -0.5*tf.log(2*pi) - tf.log(sigma) - 0.5*tf.square(x-mu) / (sigma**2)
def log_prior(x, isScaled=True):
if isScaled:
sigma1 = tf.exp(-1.0)
sigma2 = tf.exp(-7.0)
pi = 0.5
return tf.log(pi*normal(x,0.0,sigma1)+(1-pi)*normal(x,0.0,sigma2))
else:
sigma = tf.exp(-1.0)
return log_normal(x, 0.0, sigma)
class bayes():
def __init__(self, args):
self.args = args
self.data_dir = args.data_dir
self.mnist = input_data.read_data_sets(self.data_dir,one_hot=True,fake_data=False)
self.batch_size = args.batch_size
self.log_dir = args.log_dir
self.model = 'models'
self.scale = args.scale
self.w_prior_std = 1.0
self.isFlip = args.isFlip
self.LRT = args.LRT
if args.lr_decay:
self.learning_rate = tf.Variable(0.0, trainable=False)
else:
self.learning_rate = args.learning_rate
def bayesian_nn_layer(self, input_tensor, input_dim, output_dim, isTrain, layer_name, nonlinearity=None):
eps = 1e-35
with tf.name_scope(layer_name):
# Initialize the variational parameters. Reference: weight uncertainty in neural networks.
with tf.name_scope('weights_mean'):
mu_w = tf.Variable(tf.random_normal([input_dim, output_dim], stddev=0.1))
with tf.name_scope('weights_sd'):
rho_w = tf.Variable(tf.random_normal([input_dim, output_dim], mean=-3.0, stddev=0.1))
sigma_w = tf.log(1+tf.exp(rho_w))
with tf.name_scope('bias_mean'):
biases = tf.Variable(tf.zeros([output_dim]))
def closed_form_kl():
dim = input_dim * output_dim
return (tf.log(self.w_prior_std)*dim - \
tf.reduce_sum(tf.log(sigma_w+eps)) + \
0.5*(-dim+1.0/(self.w_prior_std**2)*(tf.reduce_sum(sigma_w**2) +\
tf.reduce_sum(mu_w**2))))
def train_forward():
epsilon_w = tf.random_normal([input_dim, output_dim], stddev=1.0)
weights = mu_w + tf.multiply(sigma_w, epsilon_w)
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
if nonlinearity is not None:
preactivate = nonlinearity(preactivate)
return preactivate, closed_form_kl()
def map_inference():
weights = mu_w
# biases = mu_b
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
if nonlinearity is not None:
preactivate = nonlinearity(preactivate)
return preactivate, closed_form_kl()
return tf.cond(isTrain, lambda: train_forward(), lambda: map_inference())
# Only be called during training.
def flipoutlayerFC(self, x, W_0, delta_W):
weight_dim = W_0.shape.as_list()
# x is n*m where m is the dimension, n is the mini-batch size.
# W_0 is m*h where h is the num of hidden units.
epsilon = tf.random_normal(weight_dim, stddev=1.0)
def generate_flipping_factor(dim):
shape = tf.stack([tf.shape(x)[0], dim])
random = tf.random_normal(shape)
positives = tf.ones(shape)
negatives = tf.zeros(shape)-1
return tf.where(random>0, positives, negatives)
E1 = generate_flipping_factor(weight_dim[1])
E2 = generate_flipping_factor(weight_dim[0])
pert_x = tf.multiply(tf.matmul(tf.multiply(x, E2), delta_W), E1)
return (tf.matmul(x, W_0) + pert_x)
def bayesian_nn_layer_flip(self, input_tensor, input_dim, output_dim, isTrain, layer_name, nonlinearity=None):
eps = 1e-35
with tf.name_scope(layer_name):
# Initialize the variational parameters. Reference: weight uncertainty in neural networks.
with tf.name_scope('weights_mean'):
mu_w = tf.Variable(tf.random_normal([input_dim, output_dim], stddev=0.1))
with tf.name_scope('weights_sd'):
rho_w = tf.Variable(tf.random_normal([input_dim, output_dim], mean=-3.0, stddev=0.1))
sigma_w = tf.log(1+tf.exp(rho_w))
with tf.name_scope('bias_mean'):
biases = tf.Variable(tf.zeros([output_dim]))
def closed_form_kl():
dim = input_dim * output_dim
return (tf.log(self.w_prior_std)*dim - \
tf.reduce_sum(tf.log(sigma_w+eps)) + \
0.5*(-dim+1.0/(self.w_prior_std**2)*(tf.reduce_sum(sigma_w**2) +\
tf.reduce_sum(mu_w**2))))
def train_forward():
with tf.name_scope('perturbation'):
epsilon_w = tf.random_normal([input_dim, output_dim], stddev=1.0)
delta_W = tf.multiply(sigma_w, epsilon_w)
weights = mu_w + delta_W
with tf.name_scope('flipout'):
flipoutFC = self.flipoutlayerFC(input_tensor, mu_w, delta_W)
with tf.name_scope('Wx_plus_b'):
preactivate = flipoutFC + biases
if nonlinearity is not None:
preactivate = nonlinearity(preactivate)
return preactivate, closed_form_kl()
def map_inference():
weights = mu_w
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
if nonlinearity is not None:
preactivate = nonlinearity(preactivate)
return preactivate, closed_form_kl()
return tf.cond(isTrain, lambda: train_forward(), lambda: map_inference())
def bayesian_nn_layer_LRT(self, input_tensor, input_dim, output_dim, isTrain, layer_name, nonlinearity=None):
eps = 1e-35
with tf.name_scope(layer_name):
with tf.name_scope('weights_mean'):
mu_w = tf.Variable(tf.random_normal([input_dim, output_dim], stddev=0.1))
with tf.name_scope('weights_sd'):
rho_w = tf.Variable(tf.random_normal([input_dim, output_dim], mean=-3.0, stddev=0.1))
sigma_w = tf.log(1+tf.exp(rho_w))
with tf.name_scope('bias_mean'):
biases = tf.Variable(tf.zeros([output_dim]))
def closed_form_kl():
dim = input_dim * output_dim
return (tf.log(self.w_prior_std)*dim - \
tf.reduce_sum(tf.log(sigma_w+eps)) + \
0.5*(-dim+1.0/(self.w_prior_std**2)*(tf.reduce_sum(sigma_w**2) +\
tf.reduce_sum(mu_w**2))))
def train_forward():
mu_b = tf.matmul(input_tensor, mu_w)
sigma_b = tf.sqrt(tf.matmul(tf.square(input_tensor), tf.square(sigma_w))+eps)
output_shape = tf.stack([tf.shape(input_tensor)[0], output_dim])
epsilon = tf.random_normal(output_shape, stddev=1.0)
preactivate = mu_b + tf.multiply(sigma_b, epsilon)
if nonlinearity is not None:
preactivate = nonlinearity(preactivate)
return preactivate, closed_form_kl()
def map_inference():
weights = mu_w
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
if nonlinearity is not None:
preactivate = nonlinearity(preactivate)
return preactivate, closed_form_kl()
return tf.cond(isTrain, lambda: train_forward(), lambda: map_inference())
def build_model(self):
self.isTrain = tf.placeholder(tf.bool, name='isTrain')
with tf.name_scope('input'):
self.x = tf.placeholder(tf.float32, [None, 784], name='x-input')
self.y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
self.M = tf.placeholder(tf.float32, shape=(), name='number_mini_batches')
self.n = tf.placeholder(tf.float32, shape=(), name='mini_batch_size')
if self.args.LRT:
hidden1, kl1 = self.bayesian_nn_layer_LRT(self.x , 784, 400, self.isTrain, 'layer1', nonlinearity=tf.nn.relu)
hidden2, kl2 = self.bayesian_nn_layer_LRT(hidden1, 400, 400, self.isTrain, 'layer2', nonlinearity=tf.nn.relu)
y, kl3 = self.bayesian_nn_layer_LRT(hidden2, 400, 10, self.isTrain, 'layer3')
elif self.isFlip:
hidden1, kl1 = self.bayesian_nn_layer_flip(self.x , 784, 400, self.isTrain, 'layer1', nonlinearity=tf.nn.relu)
hidden2, kl2 = self.bayesian_nn_layer_flip(hidden1, 400, 400, self.isTrain, 'layer2', nonlinearity=tf.nn.relu)
y, kl3 = self.bayesian_nn_layer_flip(hidden2, 400, 10, self.isTrain, 'layer3')
else:
hidden1, kl1 = self.bayesian_nn_layer(self.x , 784, 400, self.isTrain, 'layer1', nonlinearity=tf.nn.relu)
hidden2, kl2 = self.bayesian_nn_layer(hidden1, 400, 400, self.isTrain, 'layer2', nonlinearity=tf.nn.relu)
y, kl3 = self.bayesian_nn_layer(hidden2, 400, 10, self.isTrain, 'layer3')
with tf.name_scope('cross-entropy'):
self.cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=y))
with tf.name_scope('KL'):
self.KL = (kl1 + kl2 + kl3) / self.M
# tf.summary.scalar('KL', self.KL)
with tf.name_scope('loss'):
self.loss = self.KL + self.cross_entropy
# tf.summary.scalar('loss', self.loss)
with tf.name_scope('train'):
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(
self.loss)
# if self.args.lr_decay:
# tf.summary.scalar('lr', self.learning_rate)
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(self.y_, 1))
with tf.name_scope('accuracy'):
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# tf.summary.scalar('accuracy', self.accuracy)
def run_model(self):
self.build_model()
mnist = input_data.read_data_sets(self.data_dir, one_hot=True)
log_dir = self.log_dir + '/Flip{}batch{}scale{}lr{}'.format(self.isFlip, self.batch_size, self.scale, self.learning_rate)
tlogger.start(log_dir)
for k, v in self.args.__dict__.items():
tlogger.log('{}: {}'.format(k, v))
with tf.Session() as sess:
# merged = tf.summary.merge_all()
# train_writer = tf.summary.FileWriter(self.log_dir + '/Flip{}train{}scale{}lr{}'
# .format(self.isFlip, self.batch_size, self.scale, self.learning_rate), sess.graph)
# test_writer = tf.summary.FileWriter(self.log_dir + '/Flip{}test{}scale{}lr{}'
# .format(self.isFlip, self.batch_size, self.scale, self.learning_rate))
# saver = tf.train.Saver(max_to_keep=40)
sess.run(tf.global_variables_initializer())
# M = mnist.train.labels.shape[0] // self.batch_size
M = 55000
tstart = time.time()
for i in range(self.args.num_iterations):
start = time.time()
if self.args.lr_decay:
step_size = self.piecewise_learning_rate(i)
sess.run(tf.assign(self.learning_rate, step_size))
batch = mnist.train.next_batch(self.batch_size)
_, train_KL, train_accuracy, train_loss, train_cross = sess.run([self.train_step,
self.KL, self.accuracy, self.loss, self.cross_entropy], feed_dict={self.x: batch[0],
self.y_: batch[1], self.M: M, self.n: batch[0].shape[0], self.isTrain: True})
if i % 100 == 0:
# train_writer.add_summary(train_summary, i)
tlogger.log('********** Iteration {} **********'.format(i))
tlogger.record_tabular("train_loss", train_loss)
tlogger.record_tabular("train_cross", train_cross)
tlogger.record_tabular("train_KL", train_KL)
tlogger.record_tabular("train_acc", train_accuracy)
# print('Train accuracy, Loss at step %s: %s, %s' % (i, train_accuracy, train_loss))
xs, ys = mnist.test.images, mnist.test.labels
test_accuracy, test_loss, test_KL, test_cross = sess.run([self.accuracy, self.loss,
self.KL, self.cross_entropy], feed_dict={ self.x: xs, self.y_: ys, self.M: M,
self.n: xs.shape[0], self.isTrain: False})
# test_writer.add_summary(test_summary, i)
# print('Test accuracy at step %s: %s' % (i, test_accuracy))
tlogger.record_tabular("test_loss", test_loss)
tlogger.record_tabular("test_cross", test_cross)
tlogger.record_tabular("test_KL", test_KL)
tlogger.record_tabular("test_acc", test_accuracy)
tlogger.record_tabular("TimeElapsed", time.time() - tstart)
tlogger.dump_tabular()
tlogger.stop()
# print('test accuracy %g' % self.accuracy.eval(feed_dict={
# self.x: mnist.test.images, self.y_: mnist.test.labels, \
# self.M: 1.0, self.n: mnist.test.images.shape[0], self.isTrain: False}))
# if os.path.exists(self.model):
# saver.save(sess, os.path.join(self.model, '{}{}scale{}'.format(self.batch_size, self.scale)), global_step=i)
# train_writer.close()
# test_writer.close()
def piecewise_learning_rate(self, step):
init_lr = self.args.learning_rate
num_iterations = self.args.num_iterations
if step <= (num_iterations/3):
return init_lr
elif (num_iterations/3)< step <= (2*num_iterations/3):
return 0.1*init_lr
else:
return 0.1*0.1*init_lr
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_dir', type=str, default='/ais/gobi5/wenyemin/summerproject/data/mnist',
help='data file dir')
parser.add_argument('--log_dir', type=str, default='NEW_log',
help='log file dir')
parser.add_argument('--learning_rate', type=float, default=1e-4,
help='learning rate')
parser.add_argument('--scale', type=float, default=0.1,
help='scale the KL term')
parser.add_argument('--batch_size', type=int, default=128,
help='minibatch size')
parser.add_argument('--num_iterations', type=int, default=30000,
help='number of iterations')
parser.add_argument('--isFlip', action='store_true', default=False,
help='whether use flipout')
parser.add_argument('--lr_decay', action='store_true', default=False,
help='whether use learning rate decay')
parser.add_argument('--LRT', action='store_true', default=False,
help='whether use local reparametrization tric')
args = parser.parse_args()
BBB = bayes(args)
BBB.run_model() | StarcoderdataPython |
1701704 | from django.contrib import admin
from article.models import Article
class ArticleAdmin(admin.ModelAdmin):
list_display = ('title','date_time')
admin.site.register(Article,ArticleAdmin)
# Register your models here.
| StarcoderdataPython |
53766 | <gh_stars>0
'''
olympics.py
A command line interface for querying the olympics database.
Written by <NAME> for cs257
'''
import psycopg2
import argparse
from config import user, password, database
def connect_to_database():
try:
connection = psycopg2.connect(database = database, user = user, password = password)
return connection
except Exception as e:
print(e)
exit()
def get_parsed_arguments():
'''Obtains and parses the command line arguments'''
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--noc", help="A 3-letter NOC to list the athletes for")
parser.add_argument("-l", "--list", action = 'store_true', help="Lists the number of gold medals won by each NOC in descending order")
parser.add_argument("-y", "--year", help="Lists all athletes who won a medal for a given year, including what event they won their medal in and the type of medal won.")
parser.add_argument('-v','--version', action='version', version='olympics.py 1.0')
parsed_arguments = parser.parse_args()
return parsed_arguments
################# Query functions ######################
def query_noc_athletes(noc, cursor):
try:
query_string = noc
query = '''SELECT DISTINCT athlete_name FROM athletes JOIN results ON athletes.id = results.athlete_id JOIN nocs ON results.noc_id = nocs.id WHERE nocs.noc = %s'''
cursor.execute(query, (query_string,))
return cursor
except Exception as e:
print(e)
exit()
def query_noc_gold_medals(cursor):
try:
query = '''SELECT noc, count(medal) FROM results JOIN nocs ON results.noc_id = nocs.id WHERE medal = 'Gold' GROUP BY nocs.noc ORDER BY COUNT(Medal) DESC;'''
cursor.execute(query)
return cursor
except Exception as e:
print(e)
exit()
def query_year_medalists(year, cursor):
try:
query_int = int(year)
query = '''SELECT athlete_name, event_name, medal FROM athletes LEFT JOIN results ON athletes.id = results.athlete_id JOIN games ON games_id = games.id JOIN events ON results.event_id = events.id WHERE games.year = %s AND results.medal IS NOT NULL;'''
cursor.execute(query, (query_int,))
return cursor
except Exception as e:
print(e)
exit()
########################################################
################# Print Functions ######################
def print_noc_athletes(noc, cursor):
print('===== All Athletes from ' + noc + ' =====')
for row in cursor:
print(row[0])
print()
def print_noc_gold_medals(cursor):
print('===== NOCS and their total gold medals =====')
for row in cursor:
print(row[0], row[1])
print()
def print_year_medalists(year, cursor):
print('===== All medalists in the year ' + year + ' =====')
for row in cursor:
print(row[0] + ',' + row[1] + ',' + row[2])
print()
########################################################
def main():
arguments = get_parsed_arguments()
connection = connect_to_database()
cursor = connection.cursor()
if arguments.noc:
noc_athletes = query_noc_athletes(arguments.noc, cursor)
print_noc_athletes(arguments.noc, noc_athletes)
if arguments.list:
noc_gold_medals = query_noc_gold_medals(cursor)
print_noc_gold_medals(noc_gold_medals)
if arguments.year:
year_medalists = query_year_medalists(arguments.year, cursor)
print_year_medalists(arguments.year, year_medalists)
connection.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
58751 | <reponame>ber2/pybcn-meetup-pbt<filename>festa_major.py
import datetime as dt
def first_sunday_of_august(year: int) -> dt.date:
weekday_of_august_first = dt.date(year, 8, 1).isocalendar()[2]
missing_days = 7 - weekday_of_august_first
return dt.date(year, 8, 1 + missing_days)
def next_festa_major(date: dt.date) -> dt.date:
this_years_festa_major = first_sunday_of_august(date.year)
next_years_festa_major = first_sunday_of_august(date.year + 1)
if date <= this_years_festa_major:
return this_years_festa_major
else:
return next_years_festa_major
if __name__ == "__main__":
today = dt.date.today()
next_fm: dt.date = next_festa_major(today)
print(f"Today is {today}. The next festa major will be on {next_fm}")
| StarcoderdataPython |
3317255 | <filename>sudokubot/solver.py<gh_stars>1-10
from utils import search , display, grid_values, row_units
import sys
def solve(grid, format='string'):
if len(grid) != 81:
print 'ERROR: Sudoku length is not proper'
sys.exit()
values = search(grid_values(grid))
if '' in values.values():
print 'INFO: Invalid Sudoku'
sys.exit()
format_output = ""
for row in row_units:
for cell in row:
format_output += values[cell]
if format=='grid':
display(values)
else:
print format_output
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
solve(diag_sudoku_grid, 'grid')
| StarcoderdataPython |
98577 | <filename>modelutils/pytorch/rename_weights.py
import argparse
import os
import torch
def rename_weights(input_filepath, output_filepath, rename_lists):
if os.path.exists(output_filepath):
raise RuntimeError(f"{output_filepath} already exists.")
state_dict = torch.load(input_filepath)
for rename in rename_lists:
original_name, new_name = rename.split(':')
original_name = original_name.strip()
new_name = new_name.strip()
state_dict[new_name] = state_dict[original_name]
del state_dict[original_name]
torch.save(state_dict, output_filepath)
def main():
parser = argparse.ArgumentParser("Rename the name of pytorch state dict")
parser.add_argument('input_filepath', type=str)
parser.add_argument('output_filepath', type=str)
parser.add_argument('--rename', type=str, action='append', help='<original name>:<new name>')
args = parser.parse_args()
rename_weights(args.input_filepath, args.output_filepath, args.rename)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1680146 | import asyncio
from traceback import format_exc
from typing import List
from teletype.io import erase_lines, style_format, style_print
from stonky.const import SYSTEM
from stonky.settings import Settings
from stonky.stock_store import StockStore
def format_table(rows: List[List[str]], colours: List[str]):
column_widths = [
len(max(columns, key=len))
for columns in [list(column) for column in zip(*rows)]
]
return [
style_format(
" ".join(
[
col.ljust(column_widths[idx_col] + 1)
for idx_col, col in enumerate(row)
]
),
colours[idx],
)
for idx, row in enumerate(rows)
]
class Tty:
def __init__(self, settings: Settings, stock_store: StockStore):
self.settings = settings
self.stock_store = stock_store
self._draw_buffer = 0
@property
def watchlist(self) -> List[str]:
rows = []
colours = []
for stock in self.stock_store.watchlist:
row = []
row.append(stock.ticket)
if stock.volume:
row.append(stock.volume_str)
else:
row.append("")
row.append(f"@ {stock.amount_current:.2f}")
if stock.delta_amount < 0:
symbol = "▼"
elif stock.delta_amount == 0:
symbol = "▬"
else:
symbol = "▲"
row.append(symbol)
row.append(f"{stock.delta_amount:+,.2f}")
row.append(f"{stock.delta_percent*100:+.2f}%")
rows.append(row)
colours.append(stock.colour)
return [style_format("WATCHLIST", style="bold")] + format_table(
rows, colours
)
@property
def positions(self) -> List[str]:
rows = []
colours = []
for stock in self.stock_store.positions:
row = [
stock.ticket,
f"{stock.delta_amount:+,.2f}",
f"{stock.delta_percent*100:+.2f}%",
]
rows.append(row)
colours.append(stock.colour)
return [style_format("POSITIONS", style="bold")] + format_table(
rows, colours
)
@property
def profit_and_loss(self) -> List[str]:
lines = [style_format("PROFIT AND LOSS", style="bold")]
for stock in self.stock_store.profit_and_loss:
lines.append(
style_format(
f"{stock.delta_percent*100:+.2f}% {stock.delta_amount:+,.2f} {stock.currency.value}",
stock.colour,
)
)
return lines
@property
def balances_str(self) -> List[str]:
lines = [style_format("BALANCES", style="bold")]
for currency, balance in self.stock_store.balances.items():
lines.append(f"{balance:,.2f} {currency.value}")
return lines
async def draw(self):
lines = []
await self.stock_store.update()
if self.settings.watchlist:
lines += self.watchlist
if self.settings.positions:
lines.append("")
lines += self.positions
lines.append("")
lines += self.profit_and_loss
lines.append("")
lines += self.balances_str
if self._draw_buffer:
erase_lines(self._draw_buffer)
self._draw_buffer = len(lines)
print("\n".join(lines))
async def draw_live(self):
remaining = 0
while True:
if remaining == 0:
remaining = self.settings.refresh
await self.draw()
self._draw_buffer += 3
else:
erase_lines(3)
remaining -= 1
style_print(f"\nrefreshing in {remaining}", style="cyan")
style_print("press CTRL-C to quit", style="cyan")
await asyncio.sleep(1)
def crash_report():
system_information_str = "\n".join(
[f" - {k} = {getattr(v, 'value', v)}" for k, v in SYSTEM.items()]
)
s = f"""
============================== CRASH REPORT BEGIN ==============================
--------------------------------- environment ----------------------------------
{system_information_str}
--------------------------------- stack trace ----------------------------------
{format_exc()}
=============================== CRASH REPORT END ===============================
Dang, it looks like stonky crashed! Please consider filling an issue at
https://github.com/jkwill87/stonky/issues along with this report.
"""
print(s)
raise SystemExit(1)
| StarcoderdataPython |
63572 | # -*- coding: utf-8 -*- voir https://docs.python.org/2/tutorial/interpreter.html#source-code-encoding
def interface(jeu):
"""Retourne les éléments de l'interface pour le menu "Game Over", défini également les boutons dans la variable jeu.
Paramètre:
- dict jeu: Dictionnaire contenant les valeurs associé au jeu.
Retourne:
- dict: Dictionnaire comportant les éléments de l'interface.
"""
jeu["boutons"] = ["Recommencer", "Menu Principal"]
sous_titre = "Score: " + str(jeu["score"])
if jeu["score"] > jeu["sauvegarde"]["record"]:
sous_titre += "\n*NOUVEAU RECORD*" # Met en valeur le score si c'est un nouveau record
return {
"titre": "Game Over",
"sous_titre": sous_titre
}
def boutons(jeu):
"""Permet au joueur d'intéragir avec les boutons du menu principal.
Paramètre:
- dict jeu: Dictionnaire contenant les valeurs associé au jeu.
"""
if jeu["score"] > jeu["sauvegarde"]["record"]:
jeu["sauvegarde"]["record"] = jeu["score"] # Met à jour le record s'il est supérieur à l'ancien
jeu.pop("score")
if jeu["curseur"] == 0: # Bouton Recommencer
jeu["statut"] = 1
else: # Bouton Menu Principal
jeu["statut"] = 0
| StarcoderdataPython |
96606 | <gh_stars>0
'''
Copyright 2014 The MITRE Corporation.
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
=============================================================================
A simple interface to creating a taxonomic catalog ("taxcat") for OpenSextant TaxMatcher to use.
prerequisites: See XTax README
'''
import os
__API_PATH = os.path.realpath( __file__ )
SOLR_SERVER = "http://localhost:7000/solr/taxcat"
def _scrub_cdata_content(text):
''' User should scrub data themselves; but this gives ideas of what goes wrong when adding text to Solr
<,>,& all must be escaped.
'''
return text.replace('<', '(less than)').replace('>','(greater than)').replace('&', '& ')
def get_taxnode(t, val):
return t.lower() + "." + val.strip()
_FALSE_VAL = set(['f', 'false', '0', 'n', 'no'])
_TRUE_VAL = set(['t', 'true', '1', 'y', 'yes'])
def add_bool(dct, f, val, default=None):
if not val:
if default is not None:
dct[f] = default
return
if val.lower() in _FALSE_VAL:
dct[f] = 'false'
elif val.lower() in _TRUE_VAL:
dct[f] = 'true'
return
def add_text(dct, f, val):
''' add_text offers a basic idea of how to add values to dict
before sending to solr. TEXT strings may need scrubbing
but you just add non-TEXT values.
'''
if (isinstance(val, str) or isinstance(val, unicode)):
dct[f] = val
else:
dct[f] = val
def add_value(f, val, case=0):
''' add a value to a given field, f; And normalize case if non-zero.
case = CASE_LOWER | CASE_UPPER | 0(default) no change
'''
if val is None:
f.append(u'')
return
if (isinstance(val, str) or isinstance(val, unicode)):
v = val
#if "&" in val or "<" in val:
# print "SCRUB THIS:", val
# val.replace('&','+').replace('<', ' lt ')
if not case:
f.append(v)
elif case == CASE_LOWER:
f.append( v.lower() )
elif case == CASE_UPPER:
f.append( v.upper() )
else:
f.append(str(val))
return
CASE_LOWER=1
CASE_UPPER=2
'''
# Catalogs must be registered -- Solr has no concept of how to manage string-based record IDs
# that is something you must manage as you create your combined catalog,
#
# Catalog Registry maps your catalog ID to a starting offset for solr records
# If you think your reference data for catalog X will have 1 million entries, then
# start catalog X at 1,000,000 and let other smaller catalogs start at 0 or at less than 1 million
# start the next catalog at 3,000,000 to give X some breathing room.
#
'''
CATALOG_REGISTRY = {
"DEFAULT" : 0
}
class Taxon:
def __init__(self):
self.name = None
self.phrase = None
self.id = None
self.is_valid = True
# An array of additional tags.
self.tags = None
self.is_acronym = False
class TaxCatalogBuilder:
def __init__(self, server=None):
'''
@param server: solr server http URL; Not solrhome -- this is not SolrEmbedded.
@param stopwords: file of stopwords
'''
self.server = None
self.set_server(server)
self._record_count = 0l
self._byte_count = 0l
self._add_byte_count = 0l
self.commit_rate = -1
self._records = []
self.count = 0
from CommonsUtils import ConfigUtility
## Load file
self.utility = ConfigUtility(None)
self.stopwords = set( [] )
def add_stopwords( self, stopfile ):
if not os.path.exists(stopfile):
raise Exception("No stopwords found at " + stopfile)
print "Loading stopwords ", stopfile
_stopwords_list = self.utility.loadListFromFile(stopfile)
self.stopwords.add( _stopwords_list )
def get_starting_id(self, cat):
offset = CATALOG_REGISTRY.get(cat)
if not offset:
raise Exception("Catalog is not registered: " + cat)
return offset
def set_server(self, svr):
self.server_url = svr
if not self.server_url:
return
try:
from pysolr import Solr
self.server = Solr(self.server_url, timeout=600)
print "SERVER ", self.server_url, self.server
except Exception, err:
print "Problem with that server %s, ERR=%s" % (self.server_url, err)
def optimize(self):
if self.server:
self.server.optimize()
def save(self, flush=False):
if not self.server:
print "No server"
return
if not flush:
qty = len(self._records)
if self.commit_rate>0 and qty % self.commit_rate != 0:
return
if qty < self.commit_rate:
return
self.server.add(self._records)
self.server.commit()
self._records = []
return
def add(self, catalog, taxon):
'''
@param catalog ID of catalog where this taxon lives
@param taxon Taxon obj
'''
self.count = self.count + 1
rec = {'catalog':catalog, 'taxnode':taxon.name, 'phrase':taxon.phrase, 'id':taxon.id, 'valid': taxon.is_valid,
'name_type':'N' }
if taxon.tags:
rec['tag'] = taxon.tags
if taxon.is_acronym:
rec['name_type'] = 'A'
self._records.append( rec )
def add_wordlist(self, catalog, datafile, start_id, taxnode=None, minlen=1):
''' Given a simple one column word list file, each row of data is added
to catalog as a Taxon; taxnode may be used as a prefix for the words
Add a series of organized word lists to a single Catalog, but manage
each wordlist with some prefix taxon path.
add_wordlist('CAT', f1, 400, taxonode='first')
add_wordlist('CAT', f2, 500, taxonode='second')
add_wordlist('CAT', f3, 600, taxonode='third')
add_wordlist('CAT', f4, 700, taxonode='fourth')
'''
_name = os.path.basename(datafile)
if taxnode:
_name = taxnode
sheet = open(datafile,'rb')
words = set([])
for row in sheet:
_phrase = row.strip()
if not _phrase:
continue
if _phrase.startswith("#"):
# is a comment or commented out word.
continue
self.count += 1
_id = start_id + self.count
key = _phrase.lower()
if key in words:
print "Not adding ", key
continue
words.add(key)
t = Taxon()
t.id = _id
t.is_valid = len(key) >= minlen
t.name = _name
t.phrase = _phrase
# Allow case-sensitve entries. IFF input text contains UPPER
# case data, we'll mark it as acronym.
if t.phrase.isupper():
t.is_acronym = True
self.add(catalog, t)
print "COUNT: %d" %( self.count)
sheet.close()
| StarcoderdataPython |
1718347 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ecl.database import database_service
from ecl import resource2
class Datastore(resource2.Resource):
resource_key = None
resources_key = 'datastores'
base_path = '/datastores'
service = database_service.DatabaseService()
# capabilities
allow_list = True
_query_mapping = resource2.QueryParameters()
# Properties
#: The ID of this datastore
id = resource2.Body('id')
#: The name of this datastore.
name = resource2.Body('name')
#: Size of the disk this datastore offers. *Type: int*
default_version = resource2.Body('default_version')
#: The amount of RAM (in MB) this datastore offers. *Type: int*
versions = resource2.Body('versions')
#: Links pertaining to this datastore. This is a list of dictionaries,
#: each including keys ``href`` and ``rel``.
links = resource2.Body('links')
| StarcoderdataPython |
83388 | """Defines the factory for creating monitors"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
_SCANNERS = {}
def add_scanner_type(scanner_class):
"""Registers a scanner class so it can be used for Scale Scans
:param scanner_class: The class definition for a scanner
:type scanner_class: class:`ingest.scan.scanners.scanner.Scanner`
"""
scanner = scanner_class()
if scanner.scanner_type in _SCANNERS:
logger.warning('Duplicate scanner registration: %s', scanner.scanner_type)
_SCANNERS[scanner.scanner_type] = scanner_class
def get_scanner(scanner_type):
"""Returns a scanner of the given type that is set to scan the given workspace
:param scanner_type: The unique identifier of a registered scanner
:type scanner_type: string
:returns: A scanner for storing and retrieving files.
:rtype: :class:`ingest.scan.scanners.scanner.Scanner`
"""
if scanner_type in _SCANNERS:
return _SCANNERS[scanner_type]()
raise KeyError('\'%s\' is an invalid scanner type' % scanner_type)
def get_scanner_types():
"""Returns a list of type identifiers for all registered scanners
:returns: A list of scanner types
:rtype: [string]
"""
return _SCANNERS.keys()
| StarcoderdataPython |
1696294 | import puzzleinput
import math
def rotate(x2, y2, degrees):
angle = math.radians(degrees)
cos = math.cos(angle)
sin = math.sin(angle)
x3 = cos * x2 - sin * y2
y3 = sin * x2 + cos * y2
return round(x3), round(y3)
x = 10
y = -1
traveled_x = 0
traveled_y = 0
for line in puzzleinput.lines:
action = line[0]
value = int(line[1:])
if action == "F":
traveled_x += value * x
traveled_y += value * y
if action == "N":
y -= value
if action == "S":
y += value
if action == "E":
x += value
if action == "W":
x -= value
if action == "R":
x, y = rotate(x, y, value)
if action == "L":
x, y = rotate(x, y, -value)
print(abs(traveled_x) + abs(traveled_y))
| StarcoderdataPython |
4821235 | from sqlalchemy import Column, Integer, ForeignKey
from . import Base
class UserFeed(Base):
__tablename__ = 'user_feeds'
user_id = Column('user_id', ForeignKey('users.db_id'), primary_key=True)
feed_id = Column('feed_id', ForeignKey('feeds.db_id'), primary_key=True)
def __repr__(self):
return f"<UserFeed(user_id='{self.user_id}', feed_id='{self.feed_id}')>"
def to_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
| StarcoderdataPython |
1707770 | """
jobs.py
Defines routes that are used to interact with worker instantiation and
execution given input samples.
"""
import rq
import redis
from flask import jsonify, request, current_app, g
from rq import Queue
from boa.routes import web
from boa.worker import BoaWorker
def get_redis_connection():
""" Get attribute to redis connecion object """
conn = getattr(g, "_redis_connection", None)
if conn is None:
url = current_app.config["REDIS_URL"]
conn = g._redis_connection = redis.from_url(url)
return conn
@web.before_request
def push_rq_connection():
rq.push_connection(get_redis_connection())
@web.teardown_request
def pop_rq_connection(execption=None):
rq.pop_connection()
#########################################
# Endpoints for starting background jobs
#########################################
@web.route("/status/<job_id>", methods=["GET"])
def job_status(job_id):
""" Endpoint used to return status for a running job """
queue = Queue()
task = queue.fetch_job(job_id)
# define response based on task
response = {}
if task is None:
response = {"status": "unknown"}
else:
response = {
"status": "inprogress",
"data": {
"id": task.get_id(),
"status": task.get_status(),
"result": task.result,
},
}
return jsonify(response)
# TODO: stop job
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.