text string | size int64 | token_count int64 |
|---|---|---|
#!/usr/bin/env python3
from traceback import format_exc
from requests import get
from colorhex import colorex, BOLD
from datetime import datetime
from sys import exit
from os import system
BLURPLE = '7289da'
GREEN = '43b581'
YELLOW = 'fdcc4b'
RED = 'f04947'
def main():
system('clear')
country = input(colorex('Enter a countries name, press enter without typing anything to auto detect your country or e to exit\n -> ', GREEN, BOLD))
if country == '':
try:
auto_country = get('http://www.geoplugin.net/json.gp').json()
except Exception as exc:
system('clear')
print(colorex(f'An error occured while trying to auto detect country. Please try again or enter the countries name and make sure you have internet access\nTraceback: {exc}', RED, BOLD))
input(colorex('Press enter to go back\n-> ', GREEN, BOLD))
system('clear')
main()
country = auto_country['geoplugin_countryName']
getcovidstats(country)
elif country == 'e':
system('clear')
exit()
elif country != '':
getcovidstats(country)
def getcovidstats(country):
try:
resp = get(f'https://disease.sh/v3/covid-19/countries/{country}').json()
except Exception as exc:
system('clear')
print(colorex(f'An error occured while trying to get covid 19 stats. Please try again later and make sure you have internet access\nTraceback: {exc}', RED, BOLD))
input(colorex('Press enter to go back\n-> ', GREEN, BOLD))
system('clear')
main()
try:
country_name = resp['country']
except KeyError as exc:
system('clear')
print(colorex(f'Invalid country name, or the country doesnt have stats. Please try again\nTraceback: {format_exc()}', RED, BOLD))
input(colorex('Press enter to go back\n-> ', GREEN, BOLD))
system('clear')
main()
short_country_name = resp['countryInfo']['iso2']
country_population = resp['population']
total_cases = resp['cases']
cases_today = resp['todayCases']
total_deaths = resp['deaths']
deaths_today = resp['todayDeaths']
total_recovered = resp['recovered']
today_recovered = resp['todayRecovered']
continent = resp['continent']
updated_at = datetime.fromtimestamp(resp['updated'] / 1000.0).strftime('%d %B %Y at %I:%M:%S %p')
system('clear')
print(colorex(f'Country: {country_name} ({short_country_name})', BLURPLE, BOLD))
print(colorex(f'Continent: {continent}', BLURPLE, BOLD))
print(colorex(f'Population: {country_population}', GREEN, BOLD))
print(colorex(f'Total cases: {total_cases}, Today: {cases_today}', RED, BOLD))
print(colorex(f'Total deaths: {total_deaths}, Today: {deaths_today}', RED, BOLD))
print(colorex(f'Total recovered: {total_recovered}, Today: {today_recovered}', GREEN, BOLD))
print(colorex(f'Updated at: {updated_at}', YELLOW, BOLD))
input(colorex('Press enter to go back\n-> ', GREEN, BOLD))
system('clear')
main()
main() | 2,817 | 1,091 |
from models.Nets import *
from models.Basic import *
from models.Inception_Net import *
from models.Le_Net import *
from utils.dlc_practical_prologue import *
from utils.Evaluate import *
from utils.grid_search import *
from utils.loader import *
from utils.metrics import *
from utils.plot import *
from utils.training import *
import argparse
if __name__ == "__main__":
Nets_default = Nets()
seeds = [1,2,3,4,5,6,7,8,9,10]
train_results, test_losses,test_accuracies = evaluate_model(Nets_default.LeNet_sharing_aux, seeds , plot =False,
rotate = True,translate=True,swap_channel = True)
| 676 | 224 |
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 10:21:16 2020
@author: Ben Boys
"""
import subprocess
beams = ['3300beam952t.msh',
'3300beam2970t.msh',
'3300beam4392t.msh',
'3300beam6048t.msh',
'3300beam11836t.msh',
'3300beam17600t.msh',
'3300beam31680t.msh',
'3300beam64350t.msh',
'3300beam149600t.msh']
with open("data_force_3300t.txt", "w+") as output:
for beam in beams:
subprocess.call(["python", "./example4t.py", beam, "--profile"], stdout=output);
# =============================================================================
# with open("data_displacement_optimised_3300.txt", "w+") as output:
# for beam in beams:
# subprocess.call(["python", "./example4d.py", beam, "--optimised", "--profile"], stdout=output);
# =============================================================================
| 905 | 371 |
import yaml
from pandas import DataFrame
def val_translate(context, in_df: DataFrame, file_name: str) -> DataFrame:
task = context.get('task')
out_df = in_df.copy()
with open(f'{task.dag.etc_dir}/{file_name}', 'r', encoding='utf-8') as cfg:
config = yaml.safe_load(cfg)
out_df['test'] = out_df.apply(
lambda row: config[row['test']],
axis=1
)
return out_df
| 407 | 149 |
#!/usr/bin/env python
# coding: utf-8
# # section 13.Regex and Parsing challenges :
#
# ### writer : Faranak Alikhah 1954128
# ### 3. Group(), Groups() & Groupdict() :
# In[ ]:
import re
s= input()
pattern=r'([A-Z a-z 0-9])\1+'#alphabet numeric
m=re.search(pattern,s)
if m:
print(m.group(1))
else:
print(-1)
#
| 331 | 156 |
"""Represents executable entrypoint for `pep8-checker` application."""
import http
import os
from typing import Any, Dict, Optional
from pathlib import Path
import attr
from bottle import TEMPLATE_PATH, abort, request, route, run, view
import requests
TEMPLATE_PATH.append(str(Path('./') / 'checker' / 'views'))
def api_url() -> str:
"""Returns AWS_ENDPOINT URL."""
url: str = os.environ.get('AWS_ENDPOINT', '')
if not url:
raise RuntimeError('Please set API_URL environment variable')
return url
@attr.dataclass(frozen=True, slots=True)
class Server:
"""The class represents a server endpoint."""
host: str = '0.0.0.0'
port: str = os.environ.get('PORT', '5050')
is_debug: bool = True
reloader: bool = True
def as_json(self) -> Dict[str, Any]:
"""Returns server configuration as a dict."""
return {
'host': self.host,
'port': self.port,
'is_debug': self.is_debug,
'reloader': self.reloader,
}
@route('/', method=('GET', 'POST'))
@view(tpl_name='index')
def index() -> Dict[str, str]:
"""Specify index page view.
Returns: <dict[str, str]> response from AWS lambda server.
"""
title = 'PEP8 Checker'
code: str = request.forms.get('code', '') # pylint: disable=no-member
if code:
response: Dict[Any, Any] = requests.post(
url=api_url(), json={'code': code}
).json()
error: Optional[str] = response.get('errorMessage')
exception: Optional[str] = response.get('errorType')
if error and exception:
abort(
code=int(http.HTTPStatus.BAD_REQUEST),
text=f'Lambda function returned status {exception} exception',
)
return {'title': title, 'code': code, 'pep_errors': response['body']}
return {'title': title, 'code': code, 'pep_errors': ''}
def easyrun(server: Server = Server()) -> None:
"""Launches a web application.
Args:
server: <Server> a given server configuration.
"""
run(
host=server.host,
port=server.port,
debug=server.is_debug,
reloader=server.reloader,
)
if __name__ == '__main__':
easyrun()
| 2,238 | 709 |
import torch
from torch import nn
import torchvision.transforms as transforms
from tqdm import tqdm
import numpy as np
from train import model_helper
from models import mlp, dcgan
import data_manual
def get_numpy_data(dataloader):
x, y = [], []
for batch_x, batch_y in tqdm(iter(dataloader)):
x.append(batch_x.numpy())
y.append(batch_y.numpy())
x = np.vstack(x)
y = np.concatenate(y)
return x, y
def get_mnist_dataloaders(image_size, batch_size, dataroot, workers=0, data_transforms=None):
if data_transforms is None:
data_transforms = transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor()
])
train_dataloader = data_manual.get_dataloader('mnist', image_size, batch_size,
dataroot=dataroot, workers=0, data_transforms=None, type='train', shuffle=True, clear_cache=True)
db_dataloader = data_manual.get_dataloader('mnist', image_size, batch_size,
dataroot=dataroot, workers=0, data_transforms=None, type='db', shuffle=False, clear_cache=True)
query_dataloader = data_manual.get_dataloader('mnist', image_size, batch_size,
dataroot=dataroot, workers=0, data_transforms=None, type='query', shuffle=False, clear_cache=True)
return train_dataloader, db_dataloader, query_dataloader
def create_mlp_encoder_nobn(args, device):
net = mlp.Encoder(args.image_size, args.nc, args.enc_layers, args.nz,
activation=nn.LeakyReLU(0.2), use_bn=False, dropout=0)
net.apply(model_helper.weights_init)
print(net)
optimizer = model_helper.get_optimizer(args, net.parameters())
if torch.cuda.is_available():
net = net.type(torch.cuda.FloatTensor)
return net, optimizer
def create_mlp_decoder_nobn(args, device):
net = mlp.Decoder(args.nz, args.dec_layers, args.nc, args.image_size,
activation=nn.LeakyReLU(0.2),
output_activation=nn.Tanh(), use_bn=False, dropout=0)
net.apply(model_helper.weights_init)
print(net)
optimizer = model_helper.get_optimizer(args, net.parameters())
if torch.cuda.is_available():
net = net.type(torch.cuda.FloatTensor)
return net, optimizer
def create_mlp_encoder(args, device):
net = mlp.Encoder(args.image_size, args.nc, args.enc_layers, args.nz,
activation=nn.LeakyReLU(0.2), use_bn=True, dropout=0)
net.apply(model_helper.weights_init)
print(net)
optimizer = model_helper.get_optimizer(args, net.parameters())
if torch.cuda.is_available():
net = net.type(torch.cuda.FloatTensor)
return net, optimizer
def create_mlp_decoder(args, device):
net = mlp.Decoder(args.nz, args.dec_layers, args.nc, args.image_size,
activation=nn.LeakyReLU(0.2),
output_activation=nn.Tanh(), use_bn=True, dropout=0)
net.apply(model_helper.weights_init)
print(net)
optimizer = model_helper.get_optimizer(args, net.parameters())
if torch.cuda.is_available():
net = net.type(torch.cuda.FloatTensor)
return net, optimizer
def create_dcgan_encoder(args, device):
net = dcgan.Encoder(args.image_size, args.nc, args.ndf, args.nz, args.n_extra_layers)
net.apply(model_helper.weights_init)
print(net)
optimizer = model_helper.get_optimizer(args, net.parameters())
if torch.cuda.is_available():
net = net.type(torch.cuda.FloatTensor)
return net, optimizer
def create_dcgan_decoder(args, device):
net = dcgan.Decoder(args.nz, args.ngf, args.nc, args.image_size, args.n_extra_layers)
net.apply(model_helper.weights_init)
print(net)
optimizer = model_helper.get_optimizer(args, net.parameters())
if torch.cuda.is_available():
net = net.type(torch.cuda.FloatTensor)
return net, optimizer
def summarize_results(loss, metrics, ncols=5, figsize=(5 * 4, 3)):
if type(loss) != dict:
loss = loss.__dict__
metrics = metrics.__dict__
nrows = np.ceil(len(loss) / ncols).astype(int)
fig, axes = plt.subplots(nrows, ncols, figsize=figsize)
for i, (k, v) in enumerate(loss.items()):
if len(axes.shape) > 1:
ax = axes[int(i / nrows), i % ncols]
else:
ax = axes[i % ncols]
if len(v) > 0:
x, y = list(zip(*v.items()))
if 'grad' in k:
y = [e[0] for e in y] #only get the max norm
ax = sns.lineplot(x[10:], y[10:], ax=ax)
ax.set_title('{}: {:.4f}'.format(k, np.min(y)))
fig.suptitle('Losses')
plt.tight_layout()
plt.show()
| 4,758 | 1,712 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkDataObjectReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkDataObjectReader(), 'Reading vtkDataObject.',
(), ('vtkDataObject',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| 492 | 147 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class BrandsInformationList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version):
"""
Initialize the BrandsInformationList
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationList
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationList
"""
super(BrandsInformationList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self):
"""
Constructs a BrandsInformationContext
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
"""
return BrandsInformationContext(self._version, )
def __call__(self):
"""
Constructs a BrandsInformationContext
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
"""
return BrandsInformationContext(self._version, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.TrustedComms.BrandsInformationList>'
class BrandsInformationPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the BrandsInformationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationPage
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationPage
"""
super(BrandsInformationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of BrandsInformationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationInstance
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationInstance
"""
return BrandsInformationInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.TrustedComms.BrandsInformationPage>'
class BrandsInformationContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version):
"""
Initialize the BrandsInformationContext
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
"""
super(BrandsInformationContext, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/BrandsInformation'.format(**self._solution)
def fetch(self, if_none_match=values.unset):
"""
Fetch the BrandsInformationInstance
:param unicode if_none_match: Standard `If-None-Match` HTTP header
:returns: The fetched BrandsInformationInstance
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationInstance
"""
headers = values.of({'If-None-Match': if_none_match, })
payload = self._version.fetch(method='GET', uri=self._uri, headers=headers, )
return BrandsInformationInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.TrustedComms.BrandsInformationContext {}>'.format(context)
class BrandsInformationInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload):
"""
Initialize the BrandsInformationInstance
:returns: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationInstance
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationInstance
"""
super(BrandsInformationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'update_time': deserialize.iso8601_datetime(payload.get('update_time')),
'file_link': payload.get('file_link'),
'file_link_ttl_in_seconds': payload.get('file_link_ttl_in_seconds'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: BrandsInformationContext for this BrandsInformationInstance
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationContext
"""
if self._context is None:
self._context = BrandsInformationContext(self._version, )
return self._context
@property
def update_time(self):
"""
:returns: Creation time of the information retrieved
:rtype: datetime
"""
return self._properties['update_time']
@property
def file_link(self):
"""
:returns: The URL to the brands information
:rtype: unicode
"""
return self._properties['file_link']
@property
def file_link_ttl_in_seconds(self):
"""
:returns: How long will be the `file_link` valid
:rtype: unicode
"""
return self._properties['file_link_ttl_in_seconds']
@property
def url(self):
"""
:returns: The URL of this resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self, if_none_match=values.unset):
"""
Fetch the BrandsInformationInstance
:param unicode if_none_match: Standard `If-None-Match` HTTP header
:returns: The fetched BrandsInformationInstance
:rtype: twilio.rest.preview.trusted_comms.brands_information.BrandsInformationInstance
"""
return self._proxy.fetch(if_none_match=if_none_match, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.TrustedComms.BrandsInformationInstance {}>'.format(context)
| 8,311 | 2,225 |
import pygame
from pygame.locals import *
from os.path import realpath, dirname
from time import time
from random import randint
def main():
running, settings = load()
while running:
settings = update(settings)
draw(**settings)
running = check_exit(**settings)
pygame.quit()
quit()
def load():
screen_size = (450, 333)
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Shoot'n up")
game_object = {
'player' : Player(),
'enemy' : [],
'shoot' : [],
'jump_scare': [],
'hit_effect': [],
'shoot_effect':[],
'bg' : Background(),
'HUD' : [Sprite(dirname(realpath(__file__))+'/assets/img/effects/HUD_Vidas.png', 20, 300)],
}
game_object = load_level(game_object, 1)
path = dirname(realpath(__file__))
last_shoot = time()
level = 1
return True, {
'screen_size' : screen_size,
'screen' : screen,
'game_object' : game_object,
'path' : path,
'exit_request' : False,
'last_shoot' : last_shoot,
'level' : level,
'enemy_last_shoot': time()
}
def load_level(game_object, what_level):
path = dirname(realpath(__file__))
if what_level==1:
for j in range(10):
if j%2:
game_object['enemy'].append(Enemy(20+40*j, 40, 0, 0.5))
else:
game_object['enemy'].append(Enemy(20+40*j, 40, 0, 1))
elif what_level==2:
for i in range(3):
for j in range(10):
game_object['enemy'].append(Enemy(20+40*j, -70-80*i, i, .8))
elif what_level==3:
for i in range(5):
if i%2:
for j in range(5):
x = 20+(440/5*j)
game_object['enemy'].append(Enemy(x, -70-80*i, i%3, .8))
else:
for j in range(10):
x = 20+(440/10*j)
game_object['enemy'].append(Enemy(x, -70-80*i, i%3, .8))
else:
for i in range(randint(2, 8)):
foo = randint(5,10)
for j in range(foo):
x = 20+(440/foo*j)
if (i ==1 or i ==5) and j%2:
game_object['enemy'].append(Enemy(x, -70-80*i, i%3, 2))
else:
game_object['enemy'].append(Enemy(x, -70-80*i, i%3, 1))
return game_object
def update(settings):
settings = check_keys(settings)
if len(settings['game_object']['enemy'])==0:
settings['level'] +=1
load_level(settings['game_object'], settings['level'])
settings['game_object']['player'].load_img()
settings['game_object']['shoot'] = update_shoot(settings['game_object']['shoot'])
settings['game_object']['bg'].tile, settings['game_object']['bg'].time = update_bg(settings['game_object']['bg'].tile, settings['game_object']['bg'].time)
settings['game_object']['bg'] = parallax(settings['game_object']['player'], settings['game_object']['bg'])
settings['game_object']['enemy'] = update_enemy(settings['game_object']['enemy'], settings['screen_size'], settings)
settings['game_object'] = collider(settings['game_object'])
for fire in settings['game_object']['player'].fires:
fire.animation.update()
for gO in settings['game_object']['enemy']:
gO.fire.animation.update()
for explosion in settings['game_object']['hit_effect']:
explosion.animation.update()
if explosion.animation.pos == 7:
settings['game_object']['hit_effect'].remove(explosion)
for gO in settings['game_object']['shoot_effect']:
gO.animation.update()
gO.x += settings['game_object']['player'].x_speed
if gO.animation.pos == 6:
settings['game_object']['shoot_effect'].remove(gO)
return settings
def collider(game_object):
for shoot in game_object['shoot']:
if shoot.origin=='player':
for enemy in game_object['enemy']:
if (shoot.x>enemy.x and shoot.x<enemy.x+enemy.width) or \
(shoot.x+shoot.width>enemy.x and shoot.x+shoot.width<enemy.x+enemy.width):
if (shoot.y<enemy.y+enemy.height and shoot.y>enemy.y):
x, y = shoot.x-53/2, shoot.y-25
game_object['hit_effect'].append(Hit_effect(x, y))
enemy.hit_demage()
game_object['shoot'].remove(shoot)
if enemy.hp<=0:
game_object['enemy'].remove(enemy)
break
if shoot.y<0:
try:
game_object['shoot'].remove(shoot)
except:None
if shoot.origin=='enemy':
player = game_object['player']
if (shoot.x>player.x and shoot.x<player.x+player.width) or \
(shoot.x+shoot.width>player.x and shoot.x+shoot.width<player.x+player.width):
if (shoot.y<player.y+player.height and shoot.y>player.y):
x, y = shoot.x-53/2, shoot.y-25
game_object['hit_effect'].append(Hit_effect(x, y))
game_object['shoot'].remove(shoot)
game_object['player'].hp -=1
break
if shoot.y<0:
try:
game_object['shoot'].remove(shoot)
except:None
return game_object
def update_enemy(enemy, screen_size, settings):
enemy_who_gonna_shoot = randint(0, len(enemy))
index =0
for gO in enemy:
if index == enemy_who_gonna_shoot and time()-settings['enemy_last_shoot']>0.5:
x = gO.x+gO.width/2-8
y = gO.y+gO.height
settings['game_object']['shoot'].append(Shoot(x,y, 'enemy'))
settings['game_object']['shoot_effect'].append(Shoot_effect(x,y, 'enemy'))
settings['enemy_last_shoot'] = time()
gO.y += gO.y_speed
if time()-gO.init>0.1 and gO.hit_mark:
gO.image_return()
gO.hit_mark = False
if gO.y>screen_size[1]:
enemy.remove(gO)
index +=1
return enemy
def parallax(player, bg):
middle = player.x
foo = -middle/225.00*25
bg.x = foo
return bg
def update_shoot(shoot):
for gO in shoot:
gO.y+=gO.y_speed
return shoot
def check_keys(settings):
k = pygame.key.get_pressed()
settings['game_object']['player'].player_move_key(k, settings['screen_size'])
for e in pygame.event.get():
if e.type == QUIT or (e.type == KEYDOWN and e.key == K_ESCAPE):
settings['exit_request'] = True
if k[K_SPACE] and time()-settings['last_shoot']>0.24:
x, y = settings['game_object']['player'].x, settings['game_object']['player'].y
settings['game_object']['shoot'].append(Shoot(x,y+3, 'player'))
settings['game_object']['shoot'].append(Shoot(x+24,y+3, 'player'))
settings['game_object']['shoot_effect'].append(Shoot_effect(x, y-14, 'player'))
settings['game_object']['shoot_effect'].append(Shoot_effect(x+22, y-14, 'player'))
settings['last_shoot'] = time()
return settings
def update_bg(tile, last_time):
if time()-last_time>0.02:
tile = (tile+1)%200
last_time = time()
return tile, last_time
def draw(game_object, screen, screen_size, path, **kwargs):
draw_bg(screen, game_object['bg'])
draw_enemy(screen, game_object['enemy'])
draw_shoot_effect(screen, game_object['shoot_effect'])
draw_player(screen, game_object['player'])
draw_HUD(screen, game_object['HUD'], game_object['player'].hp)
draw_shoot(screen, game_object['shoot'])
draw_hit_effect(screen, game_object['hit_effect'])
pygame.display.flip()
fps(60)
pass
def draw_shoot_effect(screen, effect):
for gO in effect:
screen.blit(gO.img, (int(gO.x), int(gO.y)))
def draw_hit_effect(screen, explosion):
for gO in explosion:
screen.blit(gO.img, (int(gO.x), int(gO.y)))
def draw_shoot(screen, shoot):
for gO in shoot:
screen.blit(gO.img, (int(gO.x), int(gO.y)))
def draw_enemy(screen, enemy):
for gO in enemy:
screen.blit(gO.fire.img, (int(gO.x+16), int(gO.y-7)))
screen.blit(gO.img, (int(gO.x),int(gO.y)))
def draw_HUD(screen, HUD, lifes):
for gO in HUD:
if gO.__class__==Sprite:
x= gO.x
screen.blit(gO.img, (int(gO.x), int(gO.y)))
img = pygame.image.load(dirname(realpath(__file__)) + '/assets/img/effects/life.png')
for i in range(lifes-1):
screen.blit(img, (int(x+5+22*i), int(305)))
def draw_player(screen, player):
screen.blit(player.img, (int(player.x), int(player.y)))
y = player.fires[0].y+player.height-5
if player.pos == 'M':
x = player.x+2 #compensar o offset do primeiro fogo
x_offset = 25 #compensar o offset do segundo fogo
else:
x = player.x+4 #compensar o offset do primeiro fogo
x_offset = 18 #compensar o offset do segundo fogo
screen.blit(player.fires[0].img, (int(x), int(y)))
screen.blit(player.fires[0].img, (int(x+x_offset), int(y)))
def fps(frames):
pygame.time.Clock().tick(frames)
def draw_bg(screen, bg):
screen.blit(bg.img[bg.tile], (int(bg.x),int(bg.y)))
pass
def check_exit(exit_request, **kwargs):
return not exit_request
class Hit_effect:
def __init__(self,x,y):
self.x = x
self.y = y
path = dirname(realpath(__file__))+'/assets/img/effects'
self.img = pygame.image.load(path+'/explosion0.png')
self.animation = Animation({'explosion' : [8, 0.01]}, path, 'explosion', self)
class Shoot_effect:
def __init__(self, x,y, origin):
self.x = x
self.y = y
self.origin = origin
path = dirname(realpath(__file__))
if origin == 'player':
self.img = pygame.image.load(path+'/assets/img/effects/fire_effectPlayer0.png')
self.animation = Animation({'fire_effectPlayer' : [7, 0]}, path+'/assets/img/effects', 'fire_effectPlayer', self)
else:
self.img = pygame.image.load(path+'/assets/img/effects/fire_effectEnemy0.png')
self.animation = Animation({'fire_effectEnemy' : [7, 0]}, path+'/assets/img/effects', 'fire_effectEnemy', self)
class Shoot:
def __init__(self, x, y, origin):
self.x = x
self.y = y
self.origin = origin
if origin == 'player':
self.img = pygame.image.load(dirname(realpath(__file__))+'/assets/img/effects/shootPlayer.png')
self.y_speed = -4
else:
self.img = pygame.image.load(dirname(realpath(__file__))+'/assets/img/effects/shootEnemy.png')
self.y_speed = 4
self.width = self.img.get_width()
self.height= self.img.get_height()
class Sprite:
def __init__(self, path, x, y):
self.x = x
self.y = y
self.img = pygame.image.load(path)
class Explosion:
def __init__(self, x, y):
self.x = x
self.y = y
path = dirname(realpath(__file__))
self.img = pygame.image.load(path+'/assets/img/effects/explosion0.png')
self.animation = Animation({'explosion' : [7, 0.2]}, path, 'explosion', self)
class Enemy:
def __init__(self, x, y, type, y_speed):
self.x = x
self.x_speed = 0
self.y = y
self.hp = type+2
self.y_speed = y_speed
self.type = type
self.img = pygame.image.load(dirname(realpath(__file__))+'/assets/img/enemy/enemy'+str(type)+'.png').convert_alpha()
self.width = self.img.get_width()
self.height = self.img.get_height()
self.fire = Fire(self)
self.init = time()
self.hit_mark = False
def hit_demage(self):
if randint(1,2)%2:
self.img = white(self.img)
else:
self.img = red(self.img)
self.hit_mark = True
self.hp -= 1
self.init = time()
def image_return(self):
self.img = pygame.image.load(dirname(realpath(__file__))+'/assets/img/enemy/enemy'+str(self.type)+'.png').convert_alpha()
def white(surface):
for row in range(surface.get_height()):
for column in range(surface.get_width()):
if surface.get_at((column, row))[3] == 255:
surface.set_at((column, row), (255, 255, 255))
return surface
def red(surface):
for row in range(surface.get_height()):
for column in range(surface.get_width()):
if surface.get_at((column, row))[3] == 255:
surface.set_at((column, row), (255, 130, 130))
return surface
class Player:
def __init__(self):
self.pos = 'M'
self.hp = 4
self.x = 200
self.x_speed = 0
self.y = 280
self.tiles = {}
self.spaw_effect = False
self.spaw_effect_start = time()
self.fires= [
Fire(self),
Fire(self)
]
path = dirname(realpath(__file__))
for sides in ['L', 'M', 'R']:
for i in range(4):
k = i+1
self.tiles[str(k)+sides]= (pygame.image.load(path+'/assets/img/ships/ship' + str(k) + sides + '.png'))
self.load_img()
self.width = self.img.get_width()
self.height = self.img.get_height()
def load_img(self):
self.img = self.tiles[str(self.hp)+self.pos]
def player_move_key(self, k, screen_size):
if k[K_d]:
self.x_speed += 1.4
self.pos = 'R'
elif k[K_a]:
self.x_speed -= 1.4
self.pos = 'L'
else:
self.x_speed /= 1.1
self.pos = 'M'
if abs(self.x_speed)>5:
if self.x_speed>0:
self.x_speed = 5
else:
self.x_speed = -5
self.x+=self.x_speed
if self.x+self.width>screen_size[0]:
self.x = screen_size[0]-self.width
self.pos = 'M'
if self.x < 0:
self.x = 0
self.pos = 'M'
class Fire:
def __init__(self, obj):
self.x = obj.x
self.y = obj.y
self.img = ''
self.animation = Animation({'fire' : [4, 0.02]}, dirname(realpath(__file__))+'/assets/img/effects', 'fire', self)
class Animation():
def __init__(self, sprites, path, first, obj):
self.sprites = sprites
self.path = path
self.tile = first
self.pos = 0
self.last_update = time()
self.obj = obj
self.obj.img = pygame.image.load(path + '/' + first + str(self.pos) + '.png')
def change(self, tile, pos=0):
self.tile = tile
self.pos = 0
self.obj.img = pygame.image.load(self.path + '/' + tile + str(pos) + '.png')
def update(self):
if time()-self.last_update>self.sprites[self.tile][1]:
if self.pos == self.sprites[self.tile][0]-1:
self.pos = 0
else:
self.pos += 1
self.obj.img = pygame.image.load(self.path + '/' + self.tile + str(self.pos) + '.png')
self.last_update = time()
class Background:
def __init__(self):
self.x = -25
self.y = 0
self.tile = 0
self.time = time()
self.img = []
path = dirname(realpath(__file__))
for i in range(200):
self.img.append(pygame.image.load(path+'/assets/img/bg/b0553b276f5049bec4808d6a012e32bc-' + str(i)+'.png'))
main() | 15,676 | 5,499 |
from enum import Enum
class Forcing(Enum):
"""use .name for variable name, .value for human readable name"""
current = "current"
wind = "10-meter wind"
seawater_density = "seawater density"
| 209 | 68 |
"""
<your resource name> API Service Test Suite
Test cases can be run with the following:
nosetests -v --with-spec --spec-color
coverage report -m
"""
import os
import logging
from unittest import TestCase
from unittest.mock import MagicMock, patch
from tests.factories import WishlistFactory, ItemFactory
from service import status # HTTP Status Codes
from service.models import db
from service.routes import app, init_db
DATABASE_URI = os.getenv(
"DATABASE_URI", "postgresql://postgres:postgres@localhost:5432/postgres"
)
BASE_URL = "/wishlists"
CONTENT_TYPE_JSON = "application/json"
######################################################################
# T E S T C A S E S
######################################################################
class TestWishlistService(TestCase):
""" Wishlist Service Tests """
@classmethod
def setUpClass(cls):
""" Run once before all tests """
app.config['TESTING'] = True
app.config['DEBUG'] = False
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
app.logger.setLevel(logging.CRITICAL)
init_db()
@classmethod
def tearDownClass(cls):
""" Runs once before test suite """
pass
def setUp(self):
""" Runs before each test """
db.drop_all() # clean up the last tests
db.create_all() # create new tables
self.app = app.test_client()
def tearDown(self):
""" Runs once after each test case """
db.session.remove()
db.drop_all()
######################################################################
# H E L P E R M E T H O D S
######################################################################
def _create_wishlists(self, count):
""" Factory method to create wishlists in bulk """
wishlists = []
for _ in range(count):
wishlist = WishlistFactory()
resp = self.app.post(
BASE_URL, json=wishlist.serialize(), content_type="application/json"
)
self.assertEqual(
resp.status_code, status.HTTP_201_CREATED, "Could not create test Wishlist"
)
new_wishlist = resp.get_json()
wishlist.id = new_wishlist["id"]
wishlists.append(wishlist)
return wishlists
######################################################################
# W I S H L I S T T E S T C A S E S
######################################################################
def test_index(self):
""" Test index call """
resp = self.app.get("/")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
def test_get_wishlist_list(self):
""" Get a list of Wishlists """
self._create_wishlists(5)
resp = self.app.get(BASE_URL)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), 5)
def test_get_wishlist_by_name(self):
""" Get a Wishlist by Name """
wishlists = self._create_wishlists(3)
resp = self.app.get(
BASE_URL,
query_string=f"name={wishlists[1].name}"
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data[0]["name"], wishlists[1].name)
def test_get_wishlist(self):
""" Get a single Wishlist """
# get the id of an wishlist
wishlist = self._create_wishlists(1)[0]
resp = self.app.get(
f"{BASE_URL}/{wishlist.id}",
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data["name"], wishlist.name)
def test_get_wishlist_not_found(self):
"""Get a Wishlist that is not found"""
resp = self.app.get(f"{BASE_URL}/0")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_create_wishlist(self):
""" Create a new Wishlist """
wishlist = WishlistFactory()
resp = self.app.post(
BASE_URL,
json=wishlist.serialize(),
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# Make sure location header is set
location = resp.headers.get("Location", None)
self.assertIsNotNone(location)
# Check the data is correct
new_wishlist = resp.get_json()
self.assertEqual(new_wishlist["name"], wishlist.name, "Names does not match")
self.assertEqual(new_wishlist["type"], wishlist.type, "Type does not match")
self.assertEqual(new_wishlist["items"], wishlist.items, "Item does not match")
self.assertEqual(new_wishlist["user_id"], wishlist.user_id, "user_id does not match")
self.assertEqual(new_wishlist["created_date"], str(wishlist.created_date), "Created Date does not match")
# Check that the location header was correct by getting it
resp = self.app.get(location, content_type="application/json")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
new_wishlist = resp.get_json()
self.assertEqual(new_wishlist["name"], wishlist.name, "Names does not match")
self.assertEqual(new_wishlist["type"], wishlist.type, "Type does not match")
self.assertEqual(new_wishlist["items"], wishlist.items, "Item does not match")
self.assertEqual(new_wishlist["user_id"], wishlist.user_id, "user_id does not match")
self.assertEqual(new_wishlist["created_date"], str(wishlist.created_date), "Created Date does not match")
def test_update_wishlist(self):
""" Update (Edit) an existing Wishlist """
# create a wishlist to update
test_wishlist = WishlistFactory()
resp = self.app.post(
BASE_URL,
json=test_wishlist.serialize(),
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# update the wishlist
new_wishlist = resp.get_json()
new_wishlist["name"] = "Pets"
new_wishlist_id = new_wishlist["id"]
resp = self.app.put(
f"{BASE_URL}/{new_wishlist_id}",
json=new_wishlist,
content_type="application/json",
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_wishlist = resp.get_json()
self.assertEqual(updated_wishlist["name"], "Pets")
def test_update_wishlist_not_found(self):
"""Update a Wishlist that does not exist"""
new_wishlist = WishlistFactory()
resp = self.app.put(
f"{BASE_URL}/0",
json=new_wishlist.serialize(),
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_wishlist(self):
""" Delete an Wishlist """
# get the id of an wishlist
wishlist = self._create_wishlists(1)[0]
resp = self.app.delete(
f"{BASE_URL}/{wishlist.id}",
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
# Error handler testing code below based on the Service_Accounts code example
def test_bad_request(self):
""" Send wrong media type """
wishlist = WishlistFactory()
resp = self.app.post(
BASE_URL,
json={"name": "not enough data"},
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_unsupported_media_type(self):
""" Send wrong media type """
wishlist = WishlistFactory()
resp = self.app.post(
BASE_URL,
json=wishlist.serialize(),
content_type="test/html"
)
self.assertEqual(resp.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
def test_method_not_allowed(self):
""" Make an illegal method call """
resp = self.app.put(
BASE_URL,
json={"not": "today"},
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
######################################################################
# I T E M T E S T C A S E S
######################################################################
def test_get_item_list(self):
""" Get a list of Items """
# add two items to wishlist
wishlist = self._create_wishlists(1)[0]
item_list = ItemFactory.create_batch(2)
# Create item 1
resp = self.app.post(
f"{BASE_URL}/{wishlist.id}/items",
json=item_list[0].serialize(),
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# Create item 2
resp = self.app.post(
f"{BASE_URL}/{wishlist.id}/items",
json=item_list[1].serialize(),
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# get the list back and make sure there are 2
resp = self.app.get(
f"{BASE_URL}/{wishlist.id}/items",
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), 2)
def test_add_item(self):
""" Add an item to a wishlist """
wishlist = self._create_wishlists(1)[0]
item = ItemFactory()
resp = self.app.post(
f"{BASE_URL}/{wishlist.id}/items",
json=item.serialize(),
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
data = resp.get_json()
logging.debug(data)
self.assertEqual(data["wishlist_id"], wishlist.id)
self.assertEqual(data["name"], item.name)
self.assertEqual(data["category"], item.category)
self.assertEqual(data["price"], item.price)
# self.assertEqual(data["in_stock"], item.in_stock)
# self.assertEqual(data["purchased"], item.purchased)
def test_get_item(self):
""" Get an item from an wishlist """
# create a known item
wishlist = self._create_wishlists(1)[0]
item = ItemFactory()
resp = self.app.post(
f"{BASE_URL}/{wishlist.id}/items",
json=item.serialize(),
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
data = resp.get_json()
logging.debug(data)
item_id = data["id"]
# retrieve it back
resp = self.app.get(
f"{BASE_URL}/{wishlist.id}/items/{item_id}",
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
logging.debug(data)
self.assertEqual(data["wishlist_id"], wishlist.id)
self.assertEqual(data["name"], item.name)
self.assertEqual(data["category"], item.category)
self.assertEqual(data["price"], item.price)
# self.assertEqual(data["in_stock"], item.in_stock)
# self.assertEqual(data["purchased"], item.purchased)
def test_update_item(self):
""" Update an item on an wishlist """
# create a known item
wishlist = self._create_wishlists(1)[0]
item = ItemFactory()
resp = self.app.post(
f"{BASE_URL}/{wishlist.id}/items",
json=item.serialize(),
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
data = resp.get_json()
logging.debug(data)
item_id = data["id"]
data["name"] = "XXXX"
# send the update back
resp = self.app.put(
f"{BASE_URL}/{wishlist.id}/items/{item_id}",
json=data,
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# retrieve it back
resp = self.app.get(
f"{BASE_URL}/{wishlist.id}/items/{item_id}",
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
logging.debug(data)
self.assertEqual(data["id"], item_id)
self.assertEqual(data["wishlist_id"], wishlist.id)
self.assertEqual(data["name"], "XXXX")
def test_delete_item(self):
""" Delete an Item """
wishlist = self._create_wishlists(1)[0]
item = ItemFactory()
resp = self.app.post(
f"{BASE_URL}/{wishlist.id}/items",
json=item.serialize(),
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
data = resp.get_json()
logging.debug(data)
item_id = data["id"]
# send delete request
resp = self.app.delete(
f"{BASE_URL}/{wishlist.id}/items/{item_id}",
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
# retrieve it back and make sure item is not there
resp = self.app.get(
f"{BASE_URL}/{wishlist.id}/items/{item_id}",
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
######################################################################
# T E S T A C T I O N S
######################################################################
def test_purchase_a_item(self):
"""Purchase an Item"""
wishlist = self._create_wishlists(1)[0]
item = ItemFactory()
item.in_stock = True
resp = self.app.post(
f"{BASE_URL}/{wishlist.id}/items",
json=item.serialize(),
content_type="application/json"
)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
item_data = resp.get_json()
item_id = item_data["id"]
logging.info(f"Created Item with id {item_id} = {item_data}")
# Request to purchase a Item
resp = self.app.put(f"{BASE_URL}/{wishlist.id}/items/{item_id}/purchase")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# Retrieve the Item and make sue it is purchased
resp = self.app.get(f"{BASE_URL}/{wishlist.id}/items/{item_id}")
self.assertEqual(resp.status_code, status.HTTP_200_OK)
item_data = resp.get_json()
self.assertEqual(item_data["id"], item_id)
self.assertEqual(item_data["purchased"], True)
# Note: FIX ME PLEASE!
# def test_purchase_not_available(self):
# """Purchase a Item that is not in stock"""
# wishlist = self._create_wishlists(1)[0]
# item = ItemFactory()
# item.in_stock = False
# resp = self.app.post(
# f"{BASE_URL}/{wishlist.id}/items",
# json=item.serialize(),
# content_type="application/json"
# )
# self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# item_data = resp.get_json()
# item_id = item_data["id"]
# item_is = item_data["in_stock"]
# logging.info(f"Created Item with id {item_id} = {item_data}")
# logging.info(f"Item in stock {item_is}")
# # Request to purchase a Item should fail
# resp = self.app.put(f"{BASE_URL}/{wishlist.id}/items/{item_id}/purchase")
# self.assertEqual(resp.status_code, status.HTTP_409_CONFLICT)
def test_purchase_a_item_not_found(self):
"""Purchase a Item not found"""
wishlist = self._create_wishlists(1)[0]
resp = self.app.put(f"{BASE_URL}/{wishlist.id}/items/{0}/purchase")
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
| 16,219 | 5,059 |
import time
from .stage import Stage
class Debug_Stage(Stage):
'''!
Stage for debugging, print messages in setup and process
'''
configs_default={"name":"debug_stage", "blank_line":False, "wait_key": False, "wait_seconds": 0, "context_debug":"context"}
def __init__(self, configs={}):
'''!
Constructor
@param configs:
name: Stage name, will be printed in the screen (default: "debug_stage")
blank_line: Print a blank line in the screen (default: False)
wait_key: Wait for a key to be pressed after print (default: False)
wait_seconds: Wait for a number of seconds after print (default: 0, no wait)
context_debug: Word that will be placed in context, can be used for debbunging substages (default: "context")
'''
super().__init__(configs)
def setup(self):
'''!
Intiialize the stage
Print the name of the stage, and according to the settings,
wait for a key to be pressed or print a blank line
'''
print(self._configs["name"], "setup")
if self._configs["blank_line"]:
print()
if self._configs["wait_key"]:
input("Type anything to continue: ")
if self._configs["wait_seconds"] != 0:
time.sleep(self._configs["wait_seconds"])
self.set_context("context_debug", self._configs["context_debug"])
def process(self, context={}):
'''!
Prints the name of the stage, and according to the settings,
wait for a key to be pressed or print a blank line
If "context_debug" key is in the context, print the value.
'''
print(self._configs["name"], "process")
if "context_debug" in context:
print(context["context_debug"])
if self._configs["blank_line"]:
print()
if self._configs["wait_key"]:
input("Type anything to continue: ")
if self._configs["wait_seconds"] != 0:
time.sleep(self._configs["wait_seconds"])
| 2,168 | 587 |
#
# build.py
#
# Builds a back end for MGV based on a config file.
#
import os
import sys
import time
import json
from argparse import ArgumentParser
import re
from urllib.request import urlopen
import gzip
from lib.Config import ConfigFileReader
from lib.Downloader import downloaderNameMap
from lib.Importer import importerNameMap
from lib.Deployer import Deployer
### ------------------------------------------------------------------
class MgvDataBuilder :
VALID_TYPES = ["assembly", "models", "orthologs"]
VALID_PHASES = ["download", "import", "deploy"]
def __init__ (self) :
self.logfile = sys.stderr
self.genome_re = None
def log(self, s, newline='\n', timestamp=True) :
if timestamp:
ts = time.asctime(time.localtime(time.time()))
self.logfile.write(ts + " ")
self.logfile.write(str(s))
self.logfile.write(newline)
self.logfile.flush()
def getArgs (self) :
parser = ArgumentParser("Builds the backend for MGV based on a config file.")
parser.add_argument(
"-b", "--build-config",
required=True,
help = "Build config file. Required.")
parser.add_argument(
"-g", "--genome",
default = ".*",
help = "Which genomes to build. By default, builds all genomes. Specify a regex pattern used to match the genome names.")
parser.add_argument(
"-p", "--phase",
choices = self.VALID_PHASES,
action = "append",
default = [],
help = "Which phase to run. One of: %(choices)s. If not specified, runs all phases.")
parser.add_argument(
"-t", "--type",
choices = self.VALID_TYPES,
default = None,
help = "Which datatype to process. One of: %(choices)s. If not specified, processes all types.")
parser.add_argument(
"-l", "--log-file",
default = None,
help = "Where to write log messages. By default, logs to stderr.")
parser.add_argument(
"-d", "--downloads-dir",
default = "./downloads",
help = "Where downloaded files go. Default = %(default)s")
parser.add_argument(
"-o", "--output-dir",
default = "./output",
help = "Where the output files go. Default = %(default)s")
parser.add_argument(
"-w", "--web-dir",
help = "Web accessible directory containing data generated files. Default = same as --output-dir.")
parser.add_argument(
"--cgi-dir",
help = "Place to put the CGI scripts used by MGV Default = same as --web-dir.")
parser.add_argument(
"--snapshot-file",
help = "Alliance release snapshot file to use in lieu of querying API. (default = get snapshot from Alliance API)")
parser.add_argument(
"-D", "--debug",
action = "store_true",
default = False,
help = "Run in debug mode.")
args = parser.parse_args()
args.downloads_dir = os.path.abspath(args.downloads_dir)
args.output_dir = os.path.abspath(args.output_dir)
args.web_dir = os.path.abspath(args.web_dir) if args.web_dir else args.output_dir
args.cgi_dir = os.path.abspath(args.cgi_dir) if args.cgi_dir else args.web_dir
if len(args.phase) == 0:
args.phase = self.VALID_PHASES
return args
def deepCopy (self, obj) :
return json.loads(json.dumps(obj))
def ensureDirectory (self, d, empty = False):
if self.args.debug:
return
if not os.path.exists(d):
os.makedirs(d)
if empty:
cmd = "rm -fr %s/*" % d
self.log(cmd)
os.system(cmd)
def process(self, g) :
self.log("Processing cfg: " + str(g))
gn = g["name"]
for t in self.VALID_TYPES:
if self.args.type in [t, None] :
if not t in g:
continue
#
if type(g[t]) is str and g[t].startswith("="):
if "deploy" in self.args.phase:
gg = self.getCfg(g[t][1:])
tgtPath = os.path.join(self.args.web_dir, gg["name"], t)
lnkPath = os.path.join(self.args.web_dir, g["name"], t)
cmd = 'ln -s %s %s' % (tgtPath, lnkPath)
self.log("Creating symlink: " + cmd)
continue
sname = g[t].get("source","UrlDownloader")
cls = downloaderNameMap[sname]
downloader = cls(self, g, t, self.args.debug)
# Download data
if "download" in self.args.phase:
downloader.go()
# Import data
if "import" in self.args.phase:
icls = importerNameMap[t]
importer = icls(self, t, g, self.args.output_dir, self.args.debug)
importer.go()
# Deploy
if "deploy" in self.args.phase:
deployer = Deployer(self, t, g, self.args.output_dir, self.args.web_dir, self.args.cgi_dir, debug=self.args.debug)
deployer.go()
def getCfg (self, name = None) :
if name is None:
return self.cfg
else:
return self.name2cfg.get(name, None)
def main (self) :
#
self.args = self.getArgs()
if self.args.log_file:
self.logfile = open(self.args.log_file, 'w')
self.log("\n\nThis is the MGV back end data builder.")
self.log("Arguments: " + str(self.args))
self.genome_re = re.compile('^' + self.args.genome + '$')
#
self.cfg = ConfigFileReader(self.args.build_config).read()
if self.args.debug:
self.log("Running in DEBUG mode. No commands will be executed.")
#
self.name2cfg = {}
for g in self.cfg:
self.name2cfg[g["name"]] = g
#
for g in self.cfg:
if g.get("disabled", False) :
continue
if self.genome_re.match(g["name"]):
self.log("Processing " + g["name"])
self.process(g)
else:
# self.log("Skipping " + g["name"])
pass
self.log("Builder exiting.")
self.logfile.close()
### ------------------------------------------------------------------
if __name__ == "__main__":
MgvDataBuilder().main()
| 6,659 | 1,955 |
from django.conf.urls import url
from login import views
urlpatterns = [
url(r'^$', views.index, name='home'),
] | 117 | 40 |
'''
This package makes it easier to work with Storm and Python.
:organization: Parsely
'''
from __future__ import absolute_import, print_function, unicode_literals
import streamparse.bolt
import streamparse.cmdln
import streamparse.component
import streamparse.contextmanagers
import streamparse.debug
import streamparse.decorators
import streamparse.dsl
import streamparse.spout
import streamparse.storm
from streamparse.version import __version__, VERSION
__all__ = [
'bolt',
'cmdln',
'component',
'contextmanagers',
'debug',
'decorators',
'dsl',
'spout',
'storm',
'__version__',
'VERSION',
]
__license__ = """
Copyright 2014-2015 Parsely, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| 1,225 | 375 |
#----------------------------------------------------------------------
# # 9/25/18 - Update to use Python 3.6, PyQt5 and pyqtgraph 0.10.0
# G. Nordin
#----------------------------------------------------------------------
from PyQt5 import QtGui, QtCore
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import numpy as np
import sys
## Always start by initializing Qt (only once per application)
app = QtGui.QApplication([])
## Define a top-level widget to hold everything
w = QtGui.QWidget()
w.resize(1000,600)
w.setWindowTitle('Polarization Visualization')
## Create widgets to be placed inside
heading_text = QtGui.QLabel('Polarization Angles ' + u"\u03C8" + ' and ' + u"\u03B4")
# Box with sliders
sliderbox = QtGui.QGroupBox()
hBoxLayout = QtGui.QHBoxLayout()
psi_slider_layout = QtGui.QVBoxLayout()
delta_slider_layout = QtGui.QVBoxLayout()
# psi slider
psi_label = QtGui.QLabel(u"\u03C8")
psi_slider = QtGui.QSlider()
psi_slider.setOrientation(QtCore.Qt.Vertical)
psi_slider.setMinimum(0)
psi_slider.setMaximum(90)
psi_slider.setValue(0)
psi_value = QtGui.QLabel(str(psi_slider.value()) + u"\u00b0")
psi_slider_layout.addWidget(psi_label)
psi_slider_layout.addWidget(psi_slider)
psi_slider_layout.addWidget(psi_value)
def set_psi_value(value):
psi_value.setText(str(value) + u"\u00b0")
global psi_deg
psi_deg = value
psi_slider.valueChanged.connect(set_psi_value)
# delta slider
delta_label = QtGui.QLabel(u"\u03B4")
delta_slider = QtGui.QSlider()
delta_slider.setOrientation(QtCore.Qt.Vertical)
delta_slider.setMinimum(-180)
delta_slider.setMaximum(180)
delta_slider.setValue(0)
delta_value = QtGui.QLabel(str(delta_slider.value()) + u"\u00b0")
delta_slider_layout.addWidget(delta_label)
delta_slider_layout.addWidget(delta_slider)
delta_slider_layout.addWidget(delta_value)
def set_delta_value(value):
delta_value.setText(str(value) + u"\u00b0")
global delta_deg
delta_deg = value
delta_slider.valueChanged.connect(set_delta_value)
# Set layout of box containing sliders
hBoxLayout.addItem(psi_slider_layout)
hBoxLayout.addItem(delta_slider_layout)
sliderbox.setLayout(hBoxLayout)
# Box with options
optionbox = QtGui.QGroupBox()
vBoxLayout = QtGui.QVBoxLayout()
# Options
hfield_checkbox = QtGui.QCheckBox("Show H-field")
# Add to layout
vBoxLayout.addWidget(hfield_checkbox)
# Add to box
optionbox.setLayout(vBoxLayout)
# Create openGL view widget & add a grid
wGL = gl.GLViewWidget()
wGL.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
wGL.opts['distance'] = 5
g = gl.GLGridItem()
wGL.addItem(g)
## Create a grid layout to manage the widgets size and position
layout = QtGui.QGridLayout()
w.setLayout(layout)
layout.setColumnStretch (1, 2)
## Add widgets to the layout in their proper positions
layout.addWidget(heading_text, 0, 0) # heading text goes in upper-left
layout.addWidget(sliderbox, 1, 0) # slider box goes underneath heading text
layout.addWidget(optionbox, 2, 0) # option box goes underneath slider box
layout.addWidget(wGL, 0, 1, 3, 1) # wGL goes on right side, spanning 3 rows
## Display the widget as a new window
w.show()
##------------ Set up polarization animation ------------##
degtorad = np.pi/180.0
# Function to create new array from old where new array is formatted to prepare to
# draw lines perpendicular from z-axis to curve defined by input array
def preptomakelines(pts):
pts2 = np.zeros(shape=(2*pts.shape[0], pts.shape[1]))
for i in range(pts.shape[0]):
pts2[2*i,2] = pts[i,2]
pts2[2*i + 1,:] = pts[i,:]
return pts2
psi_deg = float(psi_slider.value())
delta_deg = float(delta_slider.value())
# Calculate sinusoidal electric field for arbitrary polarization
def efield_arbpol(t,z,amplitude,psi_rad,delta_rad):
x = amplitude * np.cos(psi_rad) * np.cos(2*np.pi*(t-z))
y = amplitude * np.sin(psi_rad) * np.cos(2*np.pi*(t-z) + delta_rad)
z = z
return x, y, z
# Prep coordinate rotations for electric & magnetic fields to go from calculation
# coordinates to pyqtgraph plotting coordinates
temp2Darray = [[-1, 0, 0],
[0, 0, 1],
[0, 1, 0]]
rot_efield_coord = np.array(temp2Darray)
# Calculate electric & magnetic field arrays. Also make arrays to define lines.
amplitude = 1.0
z = np.linspace(-10, 10, 500)
x, y, z = efield_arbpol(0.0,z,amplitude,psi_deg*degtorad,delta_deg*degtorad)
# E-field
pts_e = np.vstack([x,y,z]).transpose()
pts_e_lines = preptomakelines(pts_e)
pts_e = np.dot(pts_e, rot_efield_coord)
pts_e_lines = np.dot(pts_e_lines, rot_efield_coord)
z0 = np.zeros(len(z))
pts_e_z0 = np.vstack([x,y,z0]).transpose()
pts_e_z0 = np.dot(pts_e_z0, rot_efield_coord)
pts_e_arrow = np.array( [[0.0, 0.0, 0.0], pts_e_z0[int(len(pts_e_z0)/2.0)]] )
# H-field
pts_h = np.vstack([-y,x,z]).transpose() # Orthogonal to E
pts_h_lines = preptomakelines(pts_h)
pts_h = np.dot(pts_h, rot_efield_coord)
pts_h_lines = np.dot(pts_h_lines, rot_efield_coord)
pts_h_z0 = np.vstack([-y,x,z0]).transpose()
pts_h_z0 = np.dot(pts_h_z0, rot_efield_coord)
pts_h_arrow = np.array( [[0.0, 0.0, 0.0], pts_h_z0[int(len(pts_h_z0)/2.0)]] )
# Get ready to make plots
efield_color = (1, 0, 0, 1)
efield_color_z0 = (1, 1, 1, 1)
efield_color_arrow = (1, 0.67, 0.67, 1)
hfield_color = (0, 0, 1, 1)
hfield_color_z0 = (1, 1, 1, 1)
hfield_color_arrow = (0.67, 0.67, 1, 1)
linewidth = 4.0
linewidth2Dpol = 2.0
linewidth2Defieldvector = 10.0
# Make plots
plt_e = gl.GLLinePlotItem(pos=pts_e, mode='line_strip', color=efield_color, width=linewidth, antialias=True)
wGL.addItem(plt_e)
#plt_e_lines = gl.GLLinePlotItem(pos=pts_e_lines, mode='lines', color=efield_color, width=linewidth, antialias=True)
#wGL.addItem(plt_e_lines)
plt_e_z0 = gl.GLLinePlotItem(pos=pts_e_z0, mode='line_strip', color=efield_color_z0, width=linewidth2Dpol, antialias=True)
wGL.addItem(plt_e_z0)
plt_e_arrow = gl.GLLinePlotItem(pos=pts_e_arrow, mode='line_strip', color=efield_color_arrow, width=linewidth2Defieldvector, antialias=True)
wGL.addItem(plt_e_arrow)
plt_h = gl.GLLinePlotItem(pos=pts_h, mode='line_strip', color=hfield_color, width=linewidth, antialias=True)
wGL.addItem(plt_h)
#plt_h_lines = gl.GLLinePlotItem(pos=pts_h_lines, mode='lines', color=hfield_color, width=linewidth, antialias=True)
#wGL.addItem(plt_h_lines)
plt_h_z0 = gl.GLLinePlotItem(pos=pts_h_z0, mode='line_strip', color=hfield_color_z0, width=linewidth2Dpol, antialias=True)
wGL.addItem(plt_h_z0)
plt_h_arrow = gl.GLLinePlotItem(pos=pts_h_arrow, mode='line_strip', color=hfield_color_arrow, width=linewidth2Defieldvector, antialias=True)
wGL.addItem(plt_h_arrow)
# Start with H-field items as invisible
plt_h.setVisible(False)
#plt_h_lines.setVisible(False)
plt_h_z0.setVisible(False)
plt_h_arrow.setVisible(False)
# Add lines to visually define axes
x_length = 1.1
y_length = 1.1
z_length = 10
linewidthaxis = 1.0
axis_color = (32, 32, 32, 40)
## make z-axis
zaxis = np.linspace(-z_length,z_length,10)
x_zaxis = np.zeros(10)
y_zaxis = np.zeros(10)
pts_zaxis = np.vstack([x_zaxis,zaxis,y_zaxis]).transpose()
plt_zaxis = gl.GLLinePlotItem(pos=pts_zaxis, color=axis_color, width=linewidthaxis, antialias=True)
#wGL.addItem(plt_zaxis)
## make y-axis
yaxis = np.linspace(-y_length,y_length,10)
x_yaxis = np.zeros(10)
z_yaxis = np.zeros(10)
pts_yaxis = np.vstack([yaxis,z_yaxis,x_yaxis]).transpose()
plt_yaxis = gl.GLLinePlotItem(pos=pts_yaxis, color=axis_color, width=linewidthaxis, antialias=True)
wGL.addItem(plt_yaxis)
## make x-axis
xaxis = np.linspace(-x_length,x_length,10)
y_xaxis = np.zeros(10)
z_xaxis = np.zeros(10)
pts_xaxis = np.vstack([y_xaxis,z_xaxis,xaxis]).transpose()
plt_xaxis = gl.GLLinePlotItem(pos=pts_xaxis, color=axis_color, width=linewidthaxis, antialias=True)
wGL.addItem(plt_xaxis)
# make image for x-y plane
image_shape = (2,2)
uniform_values = np.ones(image_shape, dtype=np.int) * 255
print(uniform_values)
uniform_image_transparent = pg.makeARGB(uniform_values)[0]
uniform_image_transparent[:,:,:] = 255
uniform_image_transparent[:,:,3] = 80
print(uniform_image_transparent)
v1 = gl.GLImageItem(uniform_image_transparent)
v1.translate(-image_shape[0]/2., -image_shape[1]/2., 0)
v1.rotate(90, 1,0,0)
wGL.addItem(v1)
# Set up some animation parameters
frametime = 50 # frame refresh time in ms
velocity = 1./frametime
counter = 0
# Function to update scene for each frame
def update():
global z, z0, velocity, counter, amplitude
global plt_e, rot_efield_coord, plt_e_z0, plt_e_arrow #, plt_e_lines
global plt_h, plt_h_z0, plt_h_arrow #, plt_h_lines
global psi_deg, delta_deg, degtorad
counter +=1
time = float(counter)/frametime % 1
x, y, z = efield_arbpol(time,z,amplitude,psi_deg*degtorad,delta_deg*degtorad)
pts_e = np.vstack([x,y,z]).transpose()
pts_e_lines = preptomakelines(pts_e)
pts_e = np.dot(pts_e, rot_efield_coord)
#pts_e_lines = np.dot(pts_e_lines, rot_efield_coord)
plt_e.setData(pos=pts_e)
#plt_e_lines.setData(pos=pts_e_lines)
pts_e_z0 = np.vstack([x,y,z0]).transpose()
pts_e_z0 = np.dot(pts_e_z0, rot_efield_coord)
plt_e_z0.setData(pos=pts_e_z0)
pts_e_arrow = np.array( [[0.0, 0.0, 0.0], pts_e_z0[int(len(pts_e_z0)/2.0)]] )
plt_e_arrow.setData(pos=pts_e_arrow)
pts_h = np.vstack([-y,x,z]).transpose()
pts_h_lines = preptomakelines(pts_h)
pts_h = np.dot(pts_h, rot_efield_coord)
#pts_h_lines = np.dot(pts_h_lines, rot_efield_coord)
plt_h.setData(pos=pts_h)
#plt_h_lines.setData(pos=pts_h_lines)
pts_h_z0 = np.vstack([-y,x,z0]).transpose()
pts_h_z0 = np.dot(pts_h_z0, rot_efield_coord)
plt_h_z0.setData(pos=pts_h_z0)
pts_h_arrow = np.array( [[0.0, 0.0, 0.0], pts_h_z0[int(len(pts_h_z0)/2.0)]] )
plt_h_arrow.setData(pos=pts_h_arrow)
# Poor man's state updating
if hfield_checkbox.isChecked():
plt_h.setVisible(True)
#plt_h_lines.setVisible(True)
plt_h_z0.setVisible(True)
plt_h_arrow.setVisible(True)
else:
plt_h.setVisible(False)
#plt_h_lines.setVisible(False)
plt_h_z0.setVisible(False)
plt_h_arrow.setVisible(False)
# Set up timer for animation
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(50)
## Start the Qt event loop
app.exec_()
| 10,245 | 4,311 |
import numpy as np
from FEMpy import Mesh, FEBasis, Assemblers
mesh_1D_linear = Mesh.Interval1D(0, 1, 1/2, 'linear')
basis_1D_linear = FEBasis.IntervalBasis1D('linear')
mesh_1D_quadratic = Mesh.Interval1D(0, 1, 1/2, 'quadratic')
basis_1D_quadratic = FEBasis.IntervalBasis1D('quadratic')
mesh_2D_triangular_linear = Mesh.TriangularMesh2D(0, 1, 0, 1, 1/2, 1/2, 'linear')
basis_2D__triangular_linear = FEBasis.TriangularBasis2D('linear')
def coefficient_or_source_function(x):
return 1
def test_matrix_assembly_1d_linear():
matrix = Assemblers.assemble_matrix(coefficient_or_source_function, mesh_1D_linear,
basis_1D_linear, basis_1D_linear,
derivative_order_trial=1, derivative_order_test=1)
assert np.allclose(matrix.toarray(), np.array([[2., -2., 0.],
[-2., 4., -2.],
[0., -2., 2.]]))
def test_matrix_assembly_1d_quadratic():
matrix = Assemblers.assemble_matrix(coefficient_or_source_function, mesh_1D_quadratic,
basis_1D_quadratic, basis_1D_quadratic,
derivative_order_trial=1, derivative_order_test=1)
assert np.allclose(matrix.toarray(), np.array([[4.6667, -5.3333, 0.6667, 0., 0.],
[-5.3333, 10.6667, -5.3333, 0., 0.],
[0.6667, -5.3333, 9.3333, -5.3333, 0.6667],
[0., 0., -5.3333, 10.6667, -5.3333],
[0., 0., 0.6667, -5.3333, 4.6667]]), rtol=1e-4, atol=1e-7)
def test_matrix_assembly_2d_linear():
matrix = Assemblers.assemble_matrix(coefficient_or_source_function, mesh_2D_triangular_linear,
basis_2D__triangular_linear, basis_2D__triangular_linear,
derivative_order_trial=(1, 0), derivative_order_test=(1, 0))
assert np.allclose(matrix.toarray(), np.array([[0.5, 0., 0., -0.5, 0., 0., 0., 0., 0.],
[0., 1., 0., 0., -1., 0., 0., 0., 0.],
[0., 0., 0.5, 0., 0., -0.5, 0., 0., 0.],
[-0.5, 0., 0., 1., 0., 0., -0.5, 0., 0.],
[0., -1., 0., 0., 2., 0., 0., -1., 0.],
[0., 0., -0.5, 0., 0., 1., 0., 0., -0.5],
[0., 0., 0., -0.5, 0., 0., 0.5, 0., 0.],
[0., 0., 0., 0., -1., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., -0.5, 0., 0., 0.5]]))
# test_matrix_assembly_2d_quadratic omitted because the matrix is too large to type by hand.
def test_vector_assembly_1d_linear():
vector = Assemblers.assemble_vector(coefficient_or_source_function, mesh_1D_linear,
basis_1D_linear, derivative_order_test=0)
assert np.allclose(vector, np.array([0.25, 0.5, 0.25]))
def test_vector_assembly_1d_quadratic():
vector = Assemblers.assemble_vector(coefficient_or_source_function, mesh_1D_quadratic,
basis_1D_quadratic, derivative_order_test=0)
assert np.allclose(vector, np.array([0.0833, 0.3333, 0.1667, 0.3333, 0.0833]), rtol=1e-3, atol=1e-6)
def test_vector_assembly_2d_linear():
vector = Assemblers.assemble_vector(coefficient_or_source_function, mesh_2D_triangular_linear,
basis_2D__triangular_linear, derivative_order_test=(0,0))
assert np.allclose(vector, np.array([0.0417, 0.1250, 0.0833, 0.1250, 0.25, 0.1250, 0.0833, 0.1250, 0.0417]),
rtol=1e-3, atol=1e-6)
# test_vector_assembly_2d_quadratic omitted because the vector is too large to type by hand.
| 4,102 | 1,567 |
#Takes as input a numpy array of 3 parameters per article and a output array of emotions per text.
| 101 | 28 |
#
# Copyright (C) 2017-2020 Dimitar Toshkov Zhekov <dimitar.zhekov@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import re
import codecs
from collections import OrderedDict
from enum import IntEnum, unique
import fnutil
# -- Width --
DPARSE_LIMIT = 512
SPARSE_LIMIT = 32000
class Width:
def __init__(self, x, y):
self.x = x
self.y = y
@staticmethod
def parse(name, value, limit):
words = fnutil.split_words(name, value, 2)
return Width(fnutil.parse_dec(name + '.x', words[0], -limit, limit),
fnutil.parse_dec(name + '.y', words[1], -limit, limit))
@staticmethod
def parse_s(name, value):
return Width.parse(name, value, SPARSE_LIMIT)
@staticmethod
def parse_d(name, value):
return Width.parse(name, value, DPARSE_LIMIT)
def __str__(self):
return '%d %d' % (self.x, self.y)
# -- BXX --
class BBX:
def __init__(self, width, height, xoff, yoff):
self.width = width
self.height = height
self.xoff = xoff
self.yoff = yoff
@staticmethod
def parse(name, value):
words = fnutil.split_words(name, value, 4)
return BBX(fnutil.parse_dec('width', words[0], 1, DPARSE_LIMIT),
fnutil.parse_dec('height', words[1], 1, DPARSE_LIMIT),
fnutil.parse_dec('bbxoff', words[2], -DPARSE_LIMIT, DPARSE_LIMIT),
fnutil.parse_dec('bbyoff', words[3], -DPARSE_LIMIT, DPARSE_LIMIT))
def row_size(self):
return (self.width + 7) >> 3
def __str__(self):
return '%d %d %d %d' % (self.width, self.height, self.xoff, self.yoff)
# -- Props --
def skip_comments(line):
return None if line[:7] == b'COMMENT' else line
class Props(OrderedDict):
def __iter__(self):
return self.items().__iter__()
def read(self, input, name, callback=None):
return self.parse(input.read_lines(skip_comments), name, callback)
def parse(self, line, name, callback=None):
if not line or not line.startswith(bytes(name, 'ascii')):
raise Exception(name + ' expected')
value = line[len(name):].lstrip()
self[name] = value
return value if callback is None else callback(name, value)
def set(self, name, value):
self[name] = value if isinstance(value, (bytes, bytearray)) else bytes(str(value), 'ascii')
# -- Base --
class Base:
def __init__(self):
self.props = Props()
self.bbx = None
# -- Char
HEX_BYTES = (48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 65, 66, 67, 68, 69, 70)
class Char(Base):
def __init__(self):
Base.__init__(self)
self.code = -1
self.swidth = None
self.dwidth = None
self.data = None
def bitmap(self):
bitmap = ''
row_size = self.bbx.row_size()
for index in range(0, len(self.data), row_size):
bitmap += self.data[index : index + row_size].hex() + '\n'
return bytes(bitmap, 'ascii').upper()
def _read(self, input):
# HEADER
self.props.read(input, 'STARTCHAR')
self.code = self.props.read(input, 'ENCODING', fnutil.parse_dec)
self.swidth = self.props.read(input, 'SWIDTH', Width.parse_s)
self.dwidth = self.props.read(input, 'DWIDTH', Width.parse_d)
self.bbx = self.props.read(input, 'BBX', BBX.parse)
line = input.read_lines(skip_comments)
if line and line.startswith(b'ATTRIBUTES'):
self.props.parse(line, 'ATTRIBUTES')
line = input.read_lines(skip_comments)
# BITMAP
if self.props.parse(line, 'BITMAP'):
raise Exception('BITMAP expected')
row_len = self.bbx.row_size() * 2
self.data = bytearray()
for _ in range(0, self.bbx.height):
line = input.read_lines(skip_comments)
if not line:
raise Exception('bitmap data expected')
if len(line) == row_len:
self.data += codecs.decode(line, 'hex')
else:
raise Exception('invalid bitmap length')
# FINAL
if input.read_lines(skip_comments) != b'ENDCHAR':
raise Exception('ENDCHAR expected')
return self
@staticmethod
def read(input):
return Char()._read(input) # pylint: disable=protected-access
def write(self, output):
for [name, value] in self.props:
output.write_prop(name, value)
output.write_line(self.bitmap() + b'ENDCHAR')
# -- Font --
@unique
class XLFD(IntEnum):
FOUNDRY = 1
FAMILY_NAME = 2
WEIGHT_NAME = 3
SLANT = 4
SETWIDTH_NAME = 5
ADD_STYLE_NAME = 6
PIXEL_SIZE = 7
POINT_SIZE = 8
RESOLUTION_X = 9
RESOLUTION_Y = 10
SPACING = 11
AVERAGE_WIDTH = 12
CHARSET_REGISTRY = 13
CHARSET_ENCODING = 14
CHARS_MAX = 65535
class Font(Base):
def __init__(self):
Base.__init__(self)
self.xlfd = []
self.chars = []
self.default_code = -1
@property
def bold(self):
return b'bold' in self.xlfd[XLFD.WEIGHT_NAME].lower()
@property
def italic(self):
return self.xlfd[XLFD.SLANT] in [b'I', b'O']
@property
def proportional(self):
return self.xlfd[XLFD.SPACING] == b'P'
def _read(self, input):
# HEADER
line = input.read_line()
if self.props.parse(line, 'STARTFONT') != b'2.1':
raise Exception('STARTFONT 2.1 expected')
self.xlfd = self.props.read(input, 'FONT', lambda name, value: value.split(b'-', 15))
if len(self.xlfd) != 15 or self.xlfd[0] != b'':
raise Exception('non-XLFD font names are not supported')
self.props.read(input, 'SIZE')
self.bbx = self.props.read(input, 'FONTBOUNDINGBOX', BBX.parse)
line = input.read_lines(skip_comments)
if line and line.startswith(b'STARTPROPERTIES'):
num_props = self.props.parse(line, 'STARTPROPERTIES', fnutil.parse_dec)
for _ in range(0, num_props):
line = input.read_lines(skip_comments)
if line is None:
raise Exception('property expected')
match = re.fullmatch(br'(\w+)\s+([-\d"].*)', line)
if not match:
raise Exception('invalid property format')
name = str(match.group(1), 'ascii')
value = match.group(2)
if self.props.get(name) is not None:
raise Exception('duplicate property')
if name == 'DEFAULT_CHAR':
self.default_code = fnutil.parse_dec(name, value)
self.props[name] = value
if self.props.read(input, 'ENDPROPERTIES') != b'':
raise Exception('ENDPROPERTIES expected')
line = input.read_lines(skip_comments)
# GLYPHS
num_chars = fnutil.parse_dec('CHARS', self.props.parse(line, 'CHARS'), 1, CHARS_MAX)
for _ in range(0, num_chars):
self.chars.append(Char.read(input))
if next((char.code for char in self.chars if char.code == self.default_code), -1) != self.default_code:
raise Exception('invalid DEFAULT_CHAR')
# FINAL
if input.read_lines(skip_comments) != b'ENDFONT':
raise Exception('ENDFONT expected')
if input.read_line() is not None:
raise Exception('garbage after ENDFONT')
return self
@staticmethod
def read(input):
return Font()._read(input) # pylint: disable=protected-access
def write(self, output):
for [name, value] in self.props:
output.write_prop(name, value)
for char in self.chars:
char.write(output)
output.write_line(b'ENDFONT')
| 7,420 | 3,082 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import gym
import time
import math
import numpy as np
from gym import spaces
from gym.utils import seeding
from gym_brt.quanser import QubeServo2, QubeServo2Simulator
from gym_brt.control import QubeFlipUpControl
# theta, alpha: positions, velocities, accelerations
OBSERVATION_HIGH = np.asarray([
1, 1, 1, 1, # angles
np.pi / 4, np.pi / 4, # velocities
np.pi / 4, np.pi / 4, # accelerations
4100, # tach0
0.2, # sense
], dtype=np.float64)
OBSERVATION_LOW = -OBSERVATION_HIGH
MAX_MOTOR_VOLTAGE = 8.0
ACTION_HIGH = np.asarray([MAX_MOTOR_VOLTAGE], dtype=np.float64)
ACTION_LOW = -ACTION_HIGH
STATE_KEYS = [
'COS_THETA',
'SIN_THETA',
'COS_ALPHA',
'SIN_ALPHA',
'THETA_VELOCITY',
'ALPHA_VELOCITY',
'THETA_ACCELERATION',
'ALPHA_ACCELERATION',
'TACH0',
'SENSE'
]
def normalize_angle(theta):
return ((theta + np.pi) % (2 * np.pi)) - np.pi
class QubeBaseReward(object):
def __init__(self):
self.target_space = spaces.Box(
low=ACTION_LOW,
high=ACTION_HIGH, dtype=np.float32)
def __call__(self, state, action):
raise NotImplementedError
class QubeBaseEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self,
frequency=1000,
use_simulator=False):
self.observation_space = spaces.Box(
OBSERVATION_LOW, OBSERVATION_HIGH,
dtype=np.float32)
self.action_space = spaces.Box(
ACTION_LOW, ACTION_HIGH,
dtype=np.float32)
self.reward_fn = QubeBaseReward()
self._theta_velocity_cstate = 0
self._alpha_velocity_cstate = 0
self._theta_velocity = 0
self._alpha_velocity = 0
self._frequency = frequency
# Open the Qube
if use_simulator:
self.qube = QubeServo2Simulator(
euler_steps=1,
frequency=frequency)
else:
self.qube = QubeServo2(frequency=frequency)
self.qube.__enter__()
self.seed()
self.viewer = None
self.use_simulator = use_simulator
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
motor_voltages = np.clip(np.array(
[action[0]], dtype=np.float64), ACTION_LOW, ACTION_HIGH)
currents, encoders, others = self.qube.action(motor_voltages)
self._sense = currents[0]
self._tach0 = others[0]
# Calculate alpha, theta, alpha_velocity, and theta_velocity
self._theta = encoders[0] * (-2.0 * np.pi / 2048)
alpha_un = encoders[1] * (2.0 * np.pi / 2048) # Alpha without normalizing
self._alpha = (alpha_un % (2.0 * np.pi)) - np.pi # Normalized and shifted alpha
theta_velocity = -2500 * self._theta_velocity_cstate + 50 * self._theta
alpha_velocity = -2500 * self._alpha_velocity_cstate + 50 * alpha_un
self._theta_velocity_cstate += (-50 * self._theta_velocity_cstate + self._theta) / self._frequency
self._alpha_velocity_cstate += (-50 * self._alpha_velocity_cstate + alpha_un) / self._frequency
# TODO: update using the transfer function
self._theta_acceleration = (theta_velocity - self._theta_velocity) * self._frequency
self._alpha_acceleration = (alpha_velocity - self._alpha_velocity) * self._frequency
self._theta_velocity = theta_velocity
self._alpha_velocity = alpha_velocity
return self._get_state()
def _get_state(self):
state = np.asarray([
np.cos(self._theta),
np.sin(self._theta),
np.cos(self._alpha),
np.sin(self._alpha),
self._theta_velocity,
self._alpha_velocity,
self._theta_acceleration,
self._alpha_acceleration,
self._tach0,
self._sense,
], dtype=np.float32)
return state
def _flip_up(self, early_quit=False):
"""Run classic control for flip-up until the pendulum is inverted for
a set amount of time. Assumes that initial state is stationary
downwards.
Args:
early_quit: Quit if flip up doesn't succeed after set amount of
time
"""
control = QubeFlipUpControl(env=self, sample_freq=self._frequency)
time_hold = 1.0 * self._frequency # Number of samples to hold upright
sample = 0 # Samples since control system started
samples_upright = 0 # Consecutive samples pendulum is upright
action = self.action_space.sample()
state, _, _, _ = self.step([1.0])
while True:
action = control.action(state)
state, _, _, _ = self.step(action)
# Break if pendulum is inverted
if self._alpha < (10 * np.pi / 180):
if samples_upright > time_hold:
break
samples_upright += 1
else:
samples_upright = 0
sample += 1
return state
def _dampen_down(self, min_hold_time=0.5):
action = np.zeros(
shape=self.action_space.shape,
dtype=self.action_space.dtype)
time_hold = min_hold_time * self._frequency
samples_downwards = 0 # Consecutive samples pendulum is stationary
while True:
state, _, _, _ = self.step(action)
# Break if pendulum is stationary
ref_state = [0., 0., 0., 0.]
if np.allclose(state[4:8], ref_state, rtol=1e-02, atol=1e-03):
if samples_downwards > time_hold:
break
samples_downwards += 1
else:
samples_downwards = 0
return self._get_state()
def flip_up(self, early_quit=False, time_out=5, min_hold_time=1):
return self._flip_up(early_quit=early_quit)
def dampen_down(self):
return self._dampen_down()
def reset(self):
# Start the pendulum stationary at the bottom (stable point)
self.dampen_down()
action = np.zeros(
shape=self.action_space.shape,
dtype=self.action_space.dtype)
return self.step(action)[0]
def step(self, action):
state = self._step(action)
reward = self.reward_fn(state, action)
done = False
info = {}
return state, reward, done, info
def render(self, mode='human'):
# Simple and *NOT* physically accurate rendering
screen = screen_width = screen_height = 600
scale = 0.5 * screen / 100.0 # Everything is scaled out of 100
qubewidth = 10.0 * scale
qubeheight = 10.0 * scale
origin = (screen_width/2, screen_height/2)
arm_len = 40 * scale
arm_width = 1.0 * scale
pen_len = 40 * scale
pen_width = 2.0 * scale
def pen_origin(theta, origin=origin, len=arm_len):
x = origin[0] - len * math.sin(theta)
y = origin[1] + len * math.cos(theta)
return x, y
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
# draw qube base
l,r,t,b = qubewidth/2, -qubewidth/2, -qubeheight/2, qubeheight/2
qube = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
qube.set_color(0.0, 0.0, 0.0)
qubetrans = rendering.Transform(translation=origin)
qube.add_attr(qubetrans)
self.viewer.add_geom(qube)
# draw qube arm
l,r,t,b = arm_width/2, -arm_width/2, 0, arm_len
arm = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
arm.set_color(0.5, 0.5, 0.5)
self.armtrans = rendering.Transform(translation=origin)
arm.add_attr(self.armtrans)
self.viewer.add_geom(arm)
arm_trace = rendering.make_circle(radius=arm_len, filled=False)
armtracetrans = rendering.Transform(translation=origin)
arm_trace.set_color(0.5, 0.5, 0.5)
arm_trace.add_attr(armtracetrans)
self.viewer.add_geom(arm_trace)
# draw qube pendulum
pen_orgin = (origin[0], origin[1] + arm_len)
l,r,t,b = pen_width/2, -pen_width/2, 0, pen_len
pen = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
pen.set_color(1.0, 0.0, 0.0)
self.pentrans = rendering.Transform(
translation=pen_orgin,
rotation=math.pi/10)
pen.add_attr(self.pentrans)
self.viewer.add_geom(pen)
self.armtrans.set_rotation(np.pi+self._theta)
self.pentrans.set_translation(*pen_origin(np.pi+self._theta))
self.pentrans.set_rotation(self._alpha)
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def close(self, type=None, value=None, traceback=None):
# Safely close the Qube
self.qube.__exit__(type=type, value=value, traceback=traceback)
if self.viewer: self.viewer.close()
| 9,488 | 3,256 |
import tornado.web
import threading
class BaseService:
initialised = False
core = None
config = None
lock = None
def init(self, core, config):
self.core = core
self.config = config
self.lock = threading.Lock()
self.initialised = True
def check_init(self):
if not self.initialised:
raise tornado.web.HTTPError(500)
| 397 | 122 |
#coding=utf-8
from werkzeug import import_string, cached_property
from functools import wraps
from flask import request,render_template,session,current_app,url_for
from datetime import timedelta,datetime
# from main.extensions import redis_store
from flask_sse import sse
# from urllib.parse import urljoin
# from urllib import parse
# from urlparse import urlparse, urljoin
import time
class LazyView(object):
def __init__(self, import_name):
self.__module__, self.__name__ = import_name.rsplit('.', 1)
self.import_name = import_name
@cached_property
def view(self):
return import_string(self.import_name)
def __call__(self, *args, **kwargs):
return self.view(*args, **kwargs)
def url(bp,url_rule, import_name, **options):
view = LazyView('main.views.' + bp.name+'.'+ import_name)
bp.add_url_rule(url_rule, view_func=view, **options)
def templated(template=None):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
template_name = template
if template_name is None:
template_name = request.endpoint \
.replace('.', '/') + '.html'
ctx = f(*args, **kwargs)
if ctx is None:
ctx = {}
elif not isinstance(ctx, dict):
return ctx
return render_template(template_name, **ctx)
return decorated_function
return decorator
"""http://flask.pocoo.org/snippets/71/
Counting Online Users with Redis
"""
def mark_online(user_id):
now = int(time.time())
expires = now + (current_app.config['ONLINE_LAST_MINUTES'] * 60) + 10
all_users_key = 'online-users/%d' % (now // 60)
user_key = 'user-activity/%s' % user_id
p = redis_store.pipeline()
p.sadd(all_users_key, user_id)
p.set(user_key, now)
p.expireat(all_users_key, expires)
p.expireat(user_key, expires)
p.execute()
def get_user_last_activity(user_id):
last_active = redis_store.get('user-activity/%s' % user_id)
if last_active is None:
return None
return datetime.utcfromtimestamp(int(last_active))
def get_online_users():
current = int(time.time()) // 60
minutes = range(current_app.config['ONLINE_LAST_MINUTES'])
online_count = redis_store.sunion(['online-users/%d' % (current - x)
for x in minutes])
return online_count
"""http://flask.pocoo.org/snippets/62/
Securely Redirect Back
"""
# def is_safe_url(target):
# ref_url = parse(request.host_url)
# test_url = parse(urljoin(request.host_url, target))
# return test_url.scheme in ('http', 'https') and \
# ref_url.netloc == test_url.netloc
# def get_redirect_target():
# for target in request.values.get('next'), request.referrer:
# if not target:
# continue
# if is_safe_url(target):
# return target
# def redirect_back(endpoint, **values):
# target = request.form['next']
# if not target or not is_safe_url(target):
# target = url_for(endpoint, **values)
# return redirect(target)
"""
return redirect_back('index')
""" | 3,178 | 1,043 |
'''
https://leetcode.com/problems/search-a-2d-matrix-ii/
240. Search a 2D Matrix II
Write an efficient algorithm that searches for a target value in an m x n integer matrix. The matrix has the following properties:
- Integers in each row are sorted in ascending from left to right.
- Integers in each column are sorted in ascending from top to bottom.
'''
'''
Accepted
'''
class Solution:
def searchMatrix(self, matrix: [[int]], target: int) -> bool:
m = len(matrix)
n = len(matrix[0])
for col in range(0, n):
if matrix[0][col] == target:
return True
elif matrix[0][col] > target:
# there's no way we can find it moving forward
break
else:
# if matrix[0][col] < target:
# we need to search the column IF the cell at [row][col] < target
# there's a chance to find it in the column
for row in range(1, m):
if matrix[row][col] == target:
return True
elif matrix[row][col] > target:
# we reached a point in the column where the numbers are larger than target
break
return False
matrix = [[1, 4, 7, 11, 15], [2, 5, 8, 12, 19], [3, 6, 9, 16, 22], [10, 13, 14, 17, 24], [18, 21, 23, 26, 30]]
target = 30
print(Solution().searchMatrix(matrix, target))
| 1,482 | 455 |
import numpy as np
import threading
def wrapToPi(angle):
"""
Wrap a given angle in radians to the range -pi to pi.
@param angle : The angle to be wrapped
@param type angle : float
@return : Wrapped angle
@rtype : float
"""
return np.mod(angle + np.pi, 2.0 * np.pi) - np.pi
class AutomowEKF:
__nx = 7 # Number of States in the Kalman Filter
__ny_gps = 2 # Number of measurements from the GPS
__ny_imu = 2 # Number of measurements from the IMU
__nu = 2 # Number of inputs
__prev_time = 0
__dt = np.double
C_gps = np.array([[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0]], dtype=__dt)
C_imu = np.array([0, 0, 1, 0, 0, 0, 1], dtype=__dt)
def __init__(self,
x_hat_i,
P_i,
Q,
R_gps,
R_imu):
"""
Initialize the Kalman Filter with a set of input arguments
@param x_hat_i : The initial state of the Kalman Estimator
@param type x_hat_i : (7, ) numpy.array, dtype=np.double
@param P_i : The initial covariance matrix of the Kalman Estimator
@param type P_i : (7, 7) numpy.array, dtype=np.double
@param Q : The process noise covariance of the system
@param type Q : (7, 7) numpy.array, dtype=np.double
@param R_gps : The GPS measurement noise covariance
@param type R_gps : (2, 2) numpy.array, dtype=np.double
@param R_imu : The AHRS measurement noise covariance
@param type R_imu : (1, 1) numpy.array, dtype=np.double
"""
self.state_lock = threading.Lock()
with self.state_lock:
self.x_hat = x_hat_i
self.P = P_i
self.Q = Q
self.R_gps = R_gps
self.R_imu = R_imu
self.F = np.zeros((self.__nx, self.__nx), dtype=self.__dt)
self.G = np.zeros((self.__nx, self.__nx), dtype=self.__dt)
@classmethod
def fromDefault(cls):
"""
Initialize the Kalman Filter with a set of default arguments
"""
x_hat_i = np.array([0, 0, 0, 0.159, 0.159, 0.5461, 0], dtype=cls.__dt)
P_i = np.diag(np.array([100, 100, 100, 0.0001, 0.0001, 0.0001, 0.0001], dtype=cls.__dt))
Q = np.diag(np.array([0.1, 0.1, 0, 0, 0, 0, 0], dtype=cls.__dt))
R_gps = np.eye(2, dtype=cls.__dt) * 0.02
R_imu = np.eye(1, dtype=cls.__dt) * 0.02
return cls(x_hat_i, P_i, Q, R_gps, R_imu)
def updateModel(self, u, dt):
"""
Update the process and process noise matricies of the model
@param u : The current i
@param type u : (2, ) numpy.array, dtype=np.double
@param dt : The time delta from the previous time update
@param type dt : np.float
"""
self.F = np.eye(self.__nx, dtype=self.__dt)
self.F[0, 2] = -0.5 * dt \
* (self.x_hat[3] * u[0] + self.x_hat[4] * u[1]) \
* np.sin(self.x_hat[2])
self.F[0, 3] = 0.5 * dt * u[0] * np.cos(self.x_hat[2])
self.F[0, 4] = 0.5 * dt * u[1] * np.cos(self.x_hat[2])
self.F[1, 2] = 0.5 * dt \
* (self.x_hat[3] * u[0] + self.x_hat[4] * u[1]) \
* np.cos(self.x_hat[2])
self.F[1, 3] = 0.5 * dt * u[0] * np.sin(self.x_hat[2])
self.F[1, 4] = 0.5 * dt * u[1] * np.sin(self.x_hat[2])
self.F[2, 3] = -1.0 * dt * u[0] / self.x_hat[5]
self.F[2, 4] = dt * u[1] / self.x_hat[5]
self.F[2, 5] = dt \
* (self.x_hat[3] * u[0] - self.x_hat[4] * u[1]) \
/ np.power(self.x_hat[5], 2)
self.G = np.zeros((self.__nx, self.__nx), dtype=self.__dt)
self.G[0, 0] = 0.5 * dt * self.x_hat[3] * np.cos(self.x_hat[2])
self.G[0, 1] = 0.5 * dt * self.x_hat[4] * np.cos(self.x_hat[2])
self.G[0, 3] = 0.5 * dt * u[0] * np.cos(self.x_hat[2])
self.G[0, 4] = 0.5 * dt * u[1] * np.cos(self.x_hat[2])
self.G[1, 0] = 0.5 * dt * self.x_hat[3] * np.sin(self.x_hat[2])
self.G[1, 1] = 0.5 * dt * self.x_hat[4] * np.sin(self.x_hat[2])
self.G[1, 3] = 0.5 * dt * u[0] * np.cos(self.x_hat[2])
self.G[1, 4] = 0.5 * dt * u[1] * np.cos(self.x_hat[2])
self.G[2, 0] = -1.0 * dt * self.x_hat[3] / self.x_hat[5]
self.G[2, 1] = dt * self.x_hat[4] / self.x_hat[5]
self.G[2, 2] = dt
self.G[2, 3] = -1.0 * dt * self.x_hat[3] / self.x_hat[5]
self.G[2, 4] = dt * self.x_hat[4] / self.x_hat[5]
self.G[3, 3] = dt
self.G[4, 4] = dt
self.G[5, 5] = dt
self.G[6, 6] = dt
return
def timeUpdate(self, u, time):
dt = time - self.__prev_time
self.__prev_time = time
self.updateModel(u, dt)
v = self.x_hat[4] / 2.0 * u[1] + self.x_hat[3] / 2.0 * u[0]
w = self.x_hat[4] / self.x_hat[5] * u[1] - \
self.x_hat[3] / self.x_hat[5] * u[0]
with self.state_lock:
self.x_hat[0] += dt * v * np.cos(self.x_hat[2] + dt * w / 2.0)
self.x_hat[1] += dt * v * np.sin(self.x_hat[2] + dt * w / 2.0)
self.x_hat[2] += dt * w
self.x_hat[2] = wrapToPi(self.x_hat[2])
self.P = np.dot(self.F, np.dot(self.P, self.F.T)) \
+ np.dot(self.G, np.dot(self.Q, self.G.T))
return v, w
def measurementUpdateGPS(self, y, R):
if y.shape is (2, ):
y = y.reshape((1, 2))
if y.dtype is not np.double:
y = y.astype(np.double)
innovation = y - np.dot(self.C_gps, self.x_hat)
S = np.dot(self.C_gps, np.dot(self.P, self.C_gps.T))
S += R
K = np.dot(self.P, np.dot(self.C_gps.conj().T, np.linalg.inv(S)))
with self.state_lock:
self.x_hat = self.x_hat + np.dot(K, innovation)
self.P = np.dot((np.eye(self.__nx) - np.dot(K, self.C_gps)), self.P)
return innovation, S, K
def measurementUpdateAHRS(self, y):
y = wrapToPi(y)
# if y.dtype is not np.double:
# y = y.astype(np.double)
innovation = y - np.dot(self.C_imu, self.x_hat)
innovation = wrapToPi(innovation)
S = np.dot(self.C_imu, np.dot(self.P, self.C_imu.T))
S += self.R_imu[0, 0]
K = np.dot(self.P, self.C_imu.T / S)
with self.state_lock:
self.x_hat += K * innovation
self.x_hat[2] = wrapToPi(self.x_hat[2])
self.x_hat[6] = wrapToPi(self.x_hat[6])
self.P = np.dot((np.eye(self.__nx) - \
np.dot(K.reshape((self.__nx, 1)), self.C_imu.reshape((1, self.__nx)))), self.P)
return innovation, S, K
def getYaw(self):
with self.state_lock:
return self.x_hat[2]
def getNorthing(self):
with self.state_lock:
return self.x_hat[1]
def getEasting(self):
with self.state_lock:
return self.x_hat[0]
def getYawBias(self):
with self.state_lock:
return self.x_hat[6]
def getStateString(self):
with self.state_lock:
string = ''
for ii in range(7):
string += str(self.x_hat[ii]) + ", "
return string
def getStateList(self):
with self.state_lock:
return self.x_hat.flatten().tolist()
def getPList(self):
with self.state_lock:
return self.P.flatten()
| 7,503 | 3,140 |
'''Importing necessary modules'''
from urllib.parse import quote
import urllib
import requests
from bs4 import BeautifulSoup
'''Function to search word/phrase on oxford dictionary'''
def define(word):
#Oxford dictionary search query url
url='https://en.oxforddictionaries.com/definition/'+quote(word)
#Parse the html file
test=urllib.request.urlopen(url)
soup=BeautifulSoup(test,'html.parser')
#Initialize definition list
lst=[]
#Find all the elements of the definitions of the keyword section, and looping through them
meanings=soup.find_all('section',{'class':'gramb'})
for row in meanings:
#Obtain the definition type
types=row.find('h3',{'class':'ps pos'})
ulist=row.find('ul',{'class':'semb'})
#find the li tag which contains list of the definitions
word_defs=ulist.find_all('li')
for defs in word_defs:
#If tag <div class="trg"> exist, then fetch the main definition which is located in <p> tag
mean_word=defs.find('div',{'class':'trg'})
if mean_word!=None:
#Generate all <div class="trg"> children
m_word=mean_word.findChildren()
for mw in m_word:
#If the current section class is ind and the parent tag is p, then that's the main definiton
if mw.get('class')==['ind'] and mw.parent.name=='p':
#Putting on the type and the main definition to the list
lst.append('({}){}'.format(types.get_text().strip(),mw.get_text().strip()))
#If the list contains all of the defintions, then print out all of them
if lst:
res='List of definitions of "{}" word/phrase:\n'.format(word)
for num,define in enumerate(lst,1):
res+='{}. {}\n'.format(num,define)
return res
#Otherwise, print error message
else:
return 'There\'s no "{}" word/phrase in the oxford dictionary database!'.format(word)
| 2,028 | 580 |
from __future__ import annotations
import json
from collections import defaultdict
from typing import Any, TYPE_CHECKING
if TYPE_CHECKING:
from checkov.common.output.record import Record
from checkov.common.output.report import Report
from checkov.common.typing import _BaselineFinding, _BaselineFailedChecks
class Baseline:
def __init__(self) -> None:
self.path = ""
self.path_failed_checks_map: dict[str, list[_BaselineFinding]] = defaultdict(list)
self.failed_checks: list[_BaselineFailedChecks] = []
def add_findings_from_report(self, report: Report) -> None:
for check in report.failed_checks:
try:
existing = next(
x for x in self.path_failed_checks_map[check.file_path] if x["resource"] == check.resource
)
except StopIteration:
existing = {"resource": check.resource, "check_ids": []}
self.path_failed_checks_map[check.file_path].append(existing)
existing["check_ids"].append(check.check_id)
existing["check_ids"].sort() # Sort the check IDs to be nicer to the eye
def to_dict(self) -> dict[str, Any]:
"""
The output of this class needs to be very explicit, hence the following structure of the dict:
{
"failed_checks": [
{
"file": "path/to/file",
"findings: [
{
"resource": "aws_s3_bucket.this",
"check_ids": [
"CKV_AWS_1",
"CKV_AWS_2",
"CKV_AWS_3"
]
}
]
}
]
}
"""
failed_checks_list = []
for file, findings in self.path_failed_checks_map.items():
formatted_findings = []
for finding in findings:
formatted_findings.append({"resource": finding["resource"], "check_ids": finding["check_ids"]})
failed_checks_list.append({"file": file, "findings": formatted_findings})
resp = {"failed_checks": failed_checks_list}
return resp
def compare_and_reduce_reports(self, scan_reports: list[Report]) -> None:
for scan_report in scan_reports:
scan_report.passed_checks = [
check for check in scan_report.passed_checks if self._is_check_in_baseline(check)
]
scan_report.skipped_checks = [
check for check in scan_report.skipped_checks if self._is_check_in_baseline(check)
]
scan_report.failed_checks = [
check for check in scan_report.failed_checks if not self._is_check_in_baseline(check)
]
def _is_check_in_baseline(self, check: Record) -> bool:
failed_check_id = check.check_id
failed_check_resource = check.resource
for baseline_failed_check in self.failed_checks:
for finding in baseline_failed_check["findings"]:
if finding["resource"] == failed_check_resource and failed_check_id in finding["check_ids"]:
return True
return False
def from_json(self, file_path: str) -> None:
self.path = file_path
with open(file_path, "r") as f:
baseline_raw = json.load(f)
self.failed_checks = baseline_raw.get("failed_checks", {})
| 3,546 | 988 |
import FWCore.ParameterSet.Config as cms
bfilter = cms.EDFilter("MCSingleParticleFilter",
MaxEta = cms.untracked.vdouble(20.0, 20.0),
MinEta = cms.untracked.vdouble(-20.0, -20.0),
MinPt = cms.untracked.vdouble(0.0, 0.0),
ParticleID = cms.untracked.vint32(5, -5)
)
jpsifilter = cms.EDFilter("PythiaFilter",
Status = cms.untracked.int32(2),
MaxEta = cms.untracked.double(20.0),
MinEta = cms.untracked.double(-20.0),
MinPt = cms.untracked.double(0.0),
ParticleID = cms.untracked.int32(443)
)
mumufilter = cms.EDFilter("MCParticlePairFilter",
Status = cms.untracked.vint32(1, 1),
MinPt = cms.untracked.vdouble(2.0, 2.0),
MaxEta = cms.untracked.vdouble(2.5, 2.5),
MinEta = cms.untracked.vdouble(-2.5, -2.5),
ParticleCharge = cms.untracked.int32(-1),
MaxInvMass = cms.untracked.double(4.0),
MinInvMass = cms.untracked.double(2.0),
ParticleID1 = cms.untracked.vint32(13),
ParticleID2 = cms.untracked.vint32(13)
)
| 981 | 478 |
import json
import logging
import urllib.parse
from time import sleep
from typing import Generator
from typing import Optional
import requests
from requests.adapters import HTTPAdapter
from tests.support.dtos import ConsumerStatus
from tests.support.dtos import CurrentDestinationStatus
from tests.support.dtos import MessageStatus
logger = logging.getLogger(__name__)
_queues_details_request_path = "/api/queues"
_specific_queue_details_request_path = _queues_details_request_path + "/%2F/{queue_name}"
_bindings_from_queue_request_path = _queues_details_request_path + "/%2F/{queue_name}/bindings"
_get_message_from_queue_request_path = _queues_details_request_path + "/%2F/{queue_name}/get"
_channels_details_request_path = "/api/channels"
_channel_details_from_channel_request_path = _channels_details_request_path + "/{channel_name}"
_overview_request_path = "/api/overview"
def current_queue_configuration(queue_name, host="localhost", port=15672) -> Optional[CurrentDestinationStatus]:
result = _do_request(host, port, _specific_queue_details_request_path.format(queue_name=queue_name))
logger.debug("RabbitMQ request result: %s", result)
if result.get("error"):
return None
if result.get("message_stats"):
message_stats = result["message_stats"]
messages_dequeued = message_stats.get("deliver_get", 0)
messages_enqueued = message_stats.get("publish")
else:
messages_dequeued = 0
messages_enqueued = None
number_of_pending_messages = result["messages"]
number_of_consumers = result["consumers"]
return CurrentDestinationStatus(
number_of_pending_messages, number_of_consumers, messages_enqueued, messages_dequeued
)
def current_topic_configuration(topic_name, host="localhost", port=15672) -> Optional[CurrentDestinationStatus]:
queues = _do_request(host, port, _queues_details_request_path + "?name=&use_regex=false")
for queue_details in queues:
queue_name = queue_details["name"]
bindings = _do_request(host, port, _bindings_from_queue_request_path.format(queue_name=queue_name))
for binding in bindings:
if binding["source"] == "amq.topic" and binding["routing_key"] == topic_name:
message_stats = queue_details["message_stats"]
number_of_pending_messages = queue_details["messages"]
number_of_consumers = queue_details["consumers"]
messages_enqueued = message_stats["publish"]
messages_dequeued = message_stats["deliver_get"] if message_stats.get("deliver_get") else 0
return CurrentDestinationStatus(
number_of_pending_messages, number_of_consumers, messages_enqueued, messages_dequeued
)
return None
def consumers_details(connection_id, host="localhost", port=15672) -> Generator[ConsumerStatus, None, None]:
channels = _do_request(host, port, _channels_details_request_path)
for channel in channels:
channel_name = channel["connection_details"]["name"]
channel_details = _do_request(
host,
port,
_channel_details_from_channel_request_path.format(
channel_name=urllib.parse.quote(f"{channel_name} ") + "(1)"
),
)
if channel_details.get("consumer_details"):
for consumer in channel_details["consumer_details"]:
if consumer["consumer_tag"] == f"T_{connection_id}":
yield ConsumerStatus(
address_to_destination_details=None,
destination_name=consumer["queue"]["name"],
session_id=None,
enqueues=None,
dequeues=None,
dispatched=None,
dispatched_queue=None,
prefetch=consumer["prefetch_count"],
max_pending=channel_details["messages_unacknowledged"],
exclusive=consumer["exclusive"],
retroactive=None,
)
def retrieve_message_published(destination_name, host="localhost", port=15672) -> MessageStatus:
body = json.dumps(
{
"vhost": "/",
"name": destination_name,
"truncate": "50000",
"ackmode": "ack_requeue_false",
"encoding": "auto",
"count": "1",
}
)
message_details = _do_request(
host, port, _get_message_from_queue_request_path.format(queue_name=destination_name), do_post=True, body=body
)
assert len(message_details) == 1
properties = message_details[0]["properties"]
details = json.loads(message_details[0]["payload"])
persistent = None
correlation_id = properties["correlation_id"]
headers = properties.pop("headers")
return MessageStatus(None, details, persistent, correlation_id, {**headers, **properties})
def get_broker_version(host="localhost", port=15672) -> str:
broker_overview = _do_request(host, port, _overview_request_path)
return broker_overview["rabbitmq_version"]
def _do_request(host, port, request_path, do_post=False, body=None):
sleep(2)
session = requests.Session()
session.mount("http://", HTTPAdapter(max_retries=3))
address, auth = f"http://{host}:{port}{request_path}", ("guest", "guest")
with session:
if not do_post:
data = session.get(address, auth=auth)
else:
data = session.post(address, auth=auth, data=body)
return data.json()
| 5,618 | 1,637 |
# Here, left = 0 and right = length of array - 1
def ternarySearch(ar , key , left , right):
if left < right:
inter = (right - left ) // 3
rightmid = right - inter
leftmid = left +inter
if (ar[rightmid] == key ):
print( "Element found!Index:",rightmid )
return 0;
elif ( ar[leftmid] == key ):
print( "Element found!Index:",leftmid )
return 0;
elif ( key < ar[rightmid] and key > ar[leftmid] ) :
return ternarySearch( ar , key , leftmid , rightmid)
elif ( key > ar[rightmid] ) :
return ternarySearch( ar , key , rightmid , right)
else:
return ternarySearch( a , key , left , leftmid )
print( "Key not found!" )
return 0
# Sample Input :
# Ar = [12 , 90 , 67 , 19 , 18]
# Key = 19
# Output:
# Element found!Index: 3
| 964 | 318 |
import featuretools as ft
def merge_featuretools(df_parent, df_related, parent_column, related_column, date_column):
"""Automated feature engineering
More info:
https://www.featuretools.com
https://github.com/featuretools/featuretools
https://docs.featuretools.com
http://www.jmaxkanter.com/static/papers/DSAA_DSM_2015.pdf
"""
# Create the entityset
es = ft.EntitySet('parent')
# Add the entities to the entityset
es = es.entity_from_dataframe('parent', df_parent, index=parent_column)
es = es.entity_from_dataframe('relate', df_related, make_index=True,
time_index=date_column,
index='related_id')
# Define the relationships
relationship = ft.Relationship(es['parent'][parent_column], es['relate'][related_column])
# Add the relationships
es = es.add_relationships([relationship])
# Deep feature synthesis
feature_matrix, feature_defs = ft.dfs(entityset=es,
target_entity='parent')
return feature_matrix.reset_index()
| 1,114 | 327 |
from pyspark import SQLContext
from pyspark.sql.functions import lit
from datetime import datetime
def prepare_data(sc, months, output_path):
sqlContext = SQLContext(sc)
sqlContext.setConf('spark.sql.parquet.compression.codec', 'snappy')
blacklist = []
blacklist_top50 = ['({})|'.format(x) for x in get_top50()]
blacklist_filters = ['(.+\.{}.*)|'.format(x) for x in get_blackList()]
blacklist.extend(blacklist_top50)
blacklist.extend(blacklist_filters)
blacklist = list(set(blacklist))
rx = ''.join(blacklist)
rx = rx[:-1]
# gets all user installs from the selected number of previous months excluding the current month
df = get_files_from_s3(sqlContext, months)
# select only the hash and explode the list of packages
df_pkg = df.select(
df['hash'].alias('hash'),
df['pkg'].alias('package')
).drop_duplicates().cache()
# remove incoherent packages like "android"
rpkg = '.+\..+'
df_pkg = df_pkg.filter(df_pkg['package'].rlike(rpkg)).cache()
# filter blacklist packages and top 50
df_pkg_nosystemapps = df_pkg.filter(~df_pkg['package'].rlike(rx)).cache()
# connects to database and filter packages with less than 500 downloads
df_pkg_nosystemapps = filter_less_500_downloads(sqlContext, df_pkg_nosystemapps).cache()
def toCSVLine(data):
name = data[0]
id = data[1]
return "{},{}".format(name, id)
# mapping of hashs and ID used for recommendations
rdd_hashs = df_pkg_nosystemapps.select(df_pkg_nosystemapps['hash']).distinct().rdd.zipWithUniqueId().map(
lambda x: (x[0][0], x[1] + 1)).cache()
df_hashs = sqlContext.createDataFrame(rdd_hashs, ['hash', 'user_id'])
rdd_hashs = rdd_hashs.map(toCSVLine)
rdd_hashs.repartition(1).saveAsTextFile(output_path + "/hashs")
rdd_hashs.unpersist()
print("user hashs saved")
# mapping of packages and ID used for recommendations
rdd_packages = df_pkg_nosystemapps.select(df_pkg_nosystemapps['package']).distinct().rdd.zipWithUniqueId().map(
lambda x: (x[0][0], x[1]+1)).cache()
df_packages = sqlContext.createDataFrame(rdd_packages, ['package', 'app_id'])
rdd_packages = rdd_packages.map(toCSVLine)
rdd_packages.repartition(1).saveAsTextFile(output_path + "/apps")
print("apps ID's saved")
def toCSVLine_2(data):
app_id = data[0]
count = data[1]
quo = data[2]
return "{},{},{}".format(app_id, count, quo)
# final dataframe to be sent to recommend engine
df_data = df_pkg_nosystemapps.join(df_hashs, 'hash', 'left_outer').select('user_id', 'package').cache()
df_data = df_data.join(df_packages, 'package', 'left_outer').select('user_id', 'app_id').cache()
df_data = df_data.withColumn("rating", lit(1)).cache()
df_data.rdd.map(toCSVLine_2).repartition(1).saveAsTextFile(output_path + "/dataset")
print("dataset saved")
# save apps histogram
df_hist = get_app_histogram(df_data, df_packages)
df_hist.rdd.map(toCSVLine_2).repartition(1).saveAsTextFile(output_path + "/histogram")
print("apps histogram saved")
return df_data.rdd
def get_files_from_s3(sqlContext, amount_months):
year = datetime.today().year
month = datetime.today().month
if month - amount_months >= 0:
months = range(month - amount_months, month)
year_and_month = ["year={}/month={}".format(year, m) for m in months]
else:
previous_year_months = [x for x in range(12 - abs(month - amount_months), 13)]
this_year_months = [x for x in range(1, month)]
year_and_month = ["year={}/month={}".format(year - 1, m) for m in previous_year_months]
year_and_month = year_and_month + ["year={}/month={}".format(year, m) for m in this_year_months]
day = '*'
filename = '*'
version = '1'
filepath = ['{}/{}/{}/{}'.format(version, pair, day, filename) for pair in year_and_month]
print("reading {}".format(filepath))
return sqlContext.read.parquet(*filepath)
def filter_less_500_downloads(sqlContext, df_pkg_nosystemapps):
u, p = ['user', 'password']
durl = 'url'
dbta = 'table'
psql_df = sqlContext.read.format('jdbc').options(url=durl,
user=u,
password=p,
dbtable=dbta,
driver='org.postgresql.Driver').load()
psql_df = psql_df.drop(psql_df['added_timestamp'])
df_pkg_nosystemapps = df_pkg_nosystemapps.join(psql_df,
psql_df['data'] == df_pkg_nosystemapps['package']) \
.drop(psql_df['data'])
dbta = 'table'
psql_df = sqlContext.read.format('jdbc').options(url=durl,
user=u,
password=p,
dbtable=dbta,
driver='org.postgresql.Driver').load()
psql_df = psql_df.drop(psql_df['id'])
df_pkg_nosystemapps = df_pkg_nosystemapps.join(psql_df,
psql_df['app_package'] == df_pkg_nosystemapps['id']) \
.drop(psql_df['app_package']) \
.drop(df_pkg_nosystemapps['id'])
df_pkg_nosystemapps = df_pkg_nosystemapps.filter(
df_pkg_nosystemapps['downloads'] > 500).drop(df_pkg_nosystemapps['downloads'])
df_pkg_nosystemapps = df_pkg_nosystemapps.drop_duplicates()
return df_pkg_nosystemapps
def get_app_histogram(df_data, df_packages):
total = df_data.count()
df_hist = df_data.groupBy("app_id").count() # histogram
df_hist = df_hist.withColumn("total", lit(total))
df_hist = df_hist.withColumn('percentage', (df_hist['count'] / df_hist['total'])*100)
df_hist = df_hist.join(df_packages, 'app_id', 'left_outer').select('package', 'count', 'percentage')
return df_hist
def get_blackList():
blacklist_filters = ['list']
return blacklist_filters
def get_top50():
blacklist_top50 = ['list']
return blacklist_top50
| 6,234 | 2,107 |
import pytest
from typing import List
from tests.globals.constants import NUMBER_OF_DOCUMENTS
from tests.globals.document import dataclass_document
@pytest.fixture(scope="session")
def dataclass_documents() -> List:
return [dataclass_document() for _ in range(NUMBER_OF_DOCUMENTS)]
| 291 | 93 |
import tweepy
import praw
import prawcore
import time
import requests
import logging
import os
import shutil
import facebook
import requests
# pip install python-decouple
from decouple import config
# Login Credentials
REDDIT_CLIENT_ID = config('REDDIT_CLIENT_ID')
REDDIT_CLIENT_SECRET = config('REDDIT_CLIENT_SECRET')
REDDIT_USERNAME = config('REDDIT_USERNAME')
REDDIT_PASSWORD = config('REDDIT_PASSWORD')
TWITTER_CONSUMER_KEY = config('TWITTER_CONSUMER_KEY')
TWITTER_CONSUMER_SECRET = config('TWITTER_CONSUMER_SECRET')
TWITTER_ACCESS_TOKEN = config('TWITTER_ACCESS_TOKEN')
TWITTER_ACCESS_TOKEN_SECRET = config('TWITTER_ACCESS_TOKEN_SECRET')
USER_AGENT = 'python:saralgyaan_social_updates:v1.0.0 (by /u/uditvashisht)'
FACEBOOK_PAGE_ID = config('FACEBOOK_PAGE_ID')
FACEBOOK_ACCESS_TOKEN = config('FACEBOOK_ACCESS_TOKEN')
# Dictionary containing subreddits and tags
SUBREDDIT_DICT = {'programmerhumor': ['progammer', 'programmerhumor', 'humor'],
'programmingmemes': ['programming', 'programmingmemes', 'programmerhumor'],
'xkcd': ['xkcd', 'xkcdcomics']
}
current_dir = os.getcwd()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(f'{os.path.join(current_dir, "social-update.log")}')
fmt = logging.Formatter('%(levelname)s : %(name)s : %(asctime)s : %(message)s')
file_handler.setFormatter(fmt)
logger.addHandler(file_handler)
def auto_post_facebook(picture, message):
"""A function which auto-posts the photos with hastags on facebook.
Requires
--------
facebook: module
pip install facebook-sdk, import facebook
page_id : str
Page ID of the facebook page.
access_token : str
Access token of facebook account.
Can be obtained from https://developers.facebook.com/tools.
Use this tutorial
https://pythoncircle.com/post/666/automating-facebook-page-posts-using-python-script/
Parameters
__________
message : str
title and hashtags of the photo.
picture: str
Complete link of the header image.
Posts
_____
A post containing photo title and hashtags
"""
graph = facebook.GraphAPI(FACEBOOK_ACCESS_TOKEN)
facebook_page_id = FACEBOOK_PAGE_ID
#IF you want to post a status update
# graph.put_object(facebook_page_id, "feed", message='test message')
graph.put_photo(image=open(picture, 'rb'),
message=message)
def login_to_reddit():
""" This function log into to the reddit account and returns the Reddit Instance by interacting with Reddit's API through PRAW
Parameters:
-----------
None
Returns:
--------
A Reddit Instance
"""
try:
logger.info('* Logging into Reddit Account')
reddit = praw.Reddit(client_id=REDDIT_CLIENT_ID,
client_secret=REDDIT_CLIENT_SECRET,
password=REDDIT_PASSWORD,
user_agent=USER_AGENT,
username=REDDIT_USERNAME)
logger.info('* Login successful')
return reddit
except:
logger.info('* Login failed')
def grab_new_image(url):
""" This function grabs the image from the URL of the reddit post and save it as img.jpg
Parameters:
-----------
url : str
URL of the subreddit containing the image
Returns:
--------
An Image
"""
logger.info('* Fetching image from the Reddit')
try:
response = requests.get(url)
with open('img.jpg', 'wb') as image:
image.write(response.content)
image.close()
logger.info('* Image saved successfully')
except:
logger.info('* Something went wrong while downloading image')
def post_tweet(tweet_content):
""" This function post the tweet update with the image
Parameters:
-----------
tweet_content : str
Execute:
--------
Post the tweet with the image
"""
try:
logger.info('* Logging into twitter')
auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY,
TWITTER_CONSUMER_SECRET)
auth.set_access_token(TWITTER_ACCESS_TOKEN,
TWITTER_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
logger.info('* Login successful')
tweet = tweet_content
image_path = 'img.jpg'
logger.info('* Posting on twitter')
api.update_with_media(image_path, tweet)
logger.info("* Successfully posted")
except:
logger.info('* Something went wrong while posting tweet')
def main(sub_reddit, tags):
""" This main function check the sub reddit for images, download the images using grab_new_image() and then tweet it using post_tweet()
Parameters:
-----------
sub_reddit : str
Name of the sub reddit to check
tags : list
list of hashtags to be used
"""
reddit = login_to_reddit()
try:
for submission in reddit.subreddit(sub_reddit).hot(limit=8):
if submission.stickied == False:
logger.info("* Fetching submission from reddit")
post_url = f'redd.it/{str(submission)}'
title = submission.title
tweet_content = f'{title} posted by {str(submission.author)} {post_url} #{" #".join(tags)}'
url = submission.url
if 'jpg' in url:
grab_new_image(url)
post_tweet(tweet_content)
auto_post_facebook('img.jpg', f'{title} #{" #".join(tags)}')
time.sleep(20)
elif 'png' in url:
grab_new_image(url)
post_tweet(tweet_content)
auto_post_facebook('img.jpg', f'{title} #{" #".join(tags)}')
time.sleep(20)
else:
logger.info("* Not an image url")
# exception handling
except prawcore.exceptions.ServerError as e:
logger.info(e)
time.sleep(20)
pass
# excepts errors like rate limit
except praw.exceptions.APIException as e:
logger.info(e)
time.sleep(60)
# excepts other PRAW errors
except praw.exceptions.PRAWException as e:
logger.info(e)
time.sleep(20)
# excepts network connection errors
except prawcore.exceptions.RequestException:
logger.info("* Please check your network connection")
logger.info("* Sleeping for 1 minute")
time.sleep(60)
if __name__ == "__main__":
for key, value in SUBREDDIT_DICT.items():
main(key, value)
| 6,711 | 2,093 |
class Board():
def __init__(self, score=None, move=None, board=None):
self.score = score
self.move = move
self.board = board
self.previous = None
def __repr__(self):
return str(self.move) | 237 | 72 |
from peewee import *
database = MySQLDatabase('hopes_wings', **{'charset': 'utf8', 'use_unicode': True, 'host': 'localhost', 'user': 'root', 'password': 'david'})
class UnknownField(object):
def __init__(self, *_, **__): pass
class BaseModel(Model):
class Meta:
database = database
class Donation(BaseModel):
amount = DecimalField(column_name='Amount')
comment = CharField(column_name='Comment', null=True)
date = DateField(column_name='Date')
donation_id = AutoField(column_name='DonationID')
letter_sent = CharField(column_name='LetterSent')
class Meta:
table_name = 'Donation'
class Cash(BaseModel):
amount = DecimalField(column_name='Amount')
cash_id = AutoField(column_name='CashID')
donation = ForeignKeyField(column_name='DonationID', field='donation_id', model=Donation)
class Meta:
table_name = 'Cash'
class CheckTable(BaseModel):
amount = DecimalField(column_name='Amount')
check_id = AutoField(column_name='CheckID')
check_number = IntegerField(column_name='CheckNumber')
donation = ForeignKeyField(column_name='DonationID', field='donation_id', model=Donation)
class Meta:
table_name = 'CheckTable'
class Donors(BaseModel):
address = CharField(column_name='Address', null=True)
city = CharField(column_name='City', null=True)
comment = CharField(column_name='Comment', null=True)
donor_id = AutoField(column_name='DonorID')
donor_name = CharField(column_name='DonorName')
email_address = CharField(column_name='EmailAddress', null=True)
phone_number = IntegerField(column_name='PhoneNumber', null=True)
state = CharField(column_name='State', null=True)
zip = IntegerField(column_name='Zip', null=True)
class Meta:
table_name = 'donors'
class DonorsToDonations(BaseModel):
dtd_id = AutoField(column_name='DTD_ID')
donation = ForeignKeyField(column_name='DonationID', field='donation_id', model=Donation)
donor = ForeignKeyField(column_name='DonorID', field='donor_id', model=Donors)
class Meta:
table_name = 'DonorsToDonations'
class Eft(BaseModel):
amount = DecimalField(column_name='Amount')
donation = ForeignKeyField(column_name='DonationID', field='donation_id', model=Donation)
eftid = AutoField(column_name='EFTID')
class Meta:
table_name = 'EFT'
class Grant(BaseModel):
amount = DecimalField(column_name='Amount')
donation = ForeignKeyField(column_name='DonationID', field='donation_id', model=Donation)
grant_id = AutoField(column_name='GrantID')
grant_name = CharField(column_name='GrantName')
class Meta:
table_name = 'Grant'
class InKind(BaseModel):
amount = DecimalField(column_name='Amount')
donation = ForeignKeyField(column_name='DonationID', field='donation_id', model=Donation)
in_kind_id = AutoField(column_name='InKindID')
item_given = CharField(column_name='ItemGiven')
class Meta:
table_name = 'In Kind'
class PayPal(BaseModel):
amount = DecimalField(column_name='Amount')
donation = ForeignKeyField(column_name='DonationID', field='donation_id', model=Donation)
pay_pal_id = AutoField(column_name='PayPalID')
receipt_number = CharField(column_name='ReceiptNumber', null=True)
class Meta:
table_name = 'PayPal'
# Practice query
def getDonorsByID(db, donorID):
sql_query = "SELECT * FROM donors WHERE DonorID = " + str(donorID)
return db.execute_sql(sql_query).fetchall()
database.connect()
#query = getDonorsByID(database, 10)
query = TotalAmountbyAlltimeByDonorID(database, 1)
print(query[0])
database.close()
#This query shows the amount donated in a particular year by a donor with DonorID ___
def AmountByYearByDonorID(db, donorID):
return db.execute_sql("SELECT SUM(Donation.Amount) FROM donors INNER JOIN DonorsToDonations ON donors.DonorID INNER JOIN Donation ON Donation.DonationID WHERE Donation.Date >= '2020-01-01 00:00:00' and Date < '2021-01-01 00:00:00' and donors.DonorID= " + str(donorID)).fetchall()
#This query shows the total amount of donation made by a donor with DonorID ___ since the beginning of time
def TotalAmountbyAlltimeByDonorID(db, donorID):
return db.execute_sql("SELECT SUM(Donation.Amount) FROM donors INNER JOIN DonorsToDonations ON donors.DonorID INNER JOIN Donation ON Donation.DonationID WHERE donors.DonorID= " + str(donorID)).fetchall()
#This query shows the number of donors for a particular year
def NumberOfDonorsByYear(db):
return db.execute_sql("SELECT DISTINCT COUNT(donors.DonorID) FROM donors INNER JOIN DonorsToDonations ON donors.DonorID INNER JOIN Donation ON Donation.DonationID WHERE Donation.Date >= '2020-01-01 00:00:00' and Date < '2021-01-01 00:00:00'").fetchall()
#This query list all donations and its information (Date, comment, amount, type, donor) for all time (from the beginning of time)
def AllDonationsByAlltime(db):
return db.execute_sql("SELECT * FROM `Donation` INNER JOIN DonorsToDonations WHERE DonorsToDonations.DonationID= Donation.DonationID").fetchall()
#This query list all donations and its information (Date, comment, amount, type, donor) for a particular year
def AllDonationsByYear(db):
return db.execute_sql("SELECT * FROM `Donation` INNER JOIN DonorsToDonations WHERE DonorsToDonations.DonationID= Donation.DonationID AND Donation.Date >= '2020-01-01 00:00:00' and Date < '2021-01-01 00:00:00'").fetchall()
#This query shows whether thank you letter is sent for a donation
def LetterSentCheck(db, donationID):
return db.execute_sql("SELECT Donation.LetterSent FROM Donation WHERE DonationID" + str(donationID)).fetchall()
#This query shows the total amount donated since the beginning of time (excluding grants)
def TotalAmountDonatedByAlltime(db):
return db.execute_sql("SELECT SUM(Amount) FROM `Donation`").fetchall()
#This query shows the total amount donated for a particular year (ex: 2019) (excluding grant)
def TotalAmountDonatedByYear(db):
return db.execute_sql("SELECT SUM(Amount) FROM `Donation` WHERE Date >= '2020-01-01 00:00:00' and Date < '2021-01-01 00:00:00'").fetchall()
#This query shows the total amount of grant received since the beginning of time
def TotalGrantByAlltime(db):
return db.execute_sql("SELECT SUM(Amount) FROM `Grant`").fetchall()
#This query shows the total amount of grant received for one year
def TotalGrantByYear(db):
return db.execute_sql("SELECT * FROM `Donation` WHERE Date >= '2020-01-01 00:00:00' and Date < '2021-01-01 00:00:00' and DonationType='Grant'").fetchall()
#This query list all donors and their information (DonorName, Address, City, State, Zip, Comment, Email, Phone) for all time (from the beginning of time)
def AllDonorInformationByAlltime(db):
return db.execute_sql("SELECT DonorName,Address,City,State,Zip,Comment,EmailAddress,PhoneNumber FROM donors").fetchall()
#This query list all donors and their information (DonorName, Address, City, State, Zip, Comment, Email, Phone) for this current year only
def AllDonorInformationByYear(db):
return db.execute_sql("SELECT DISTINCT donors.DonorName,donors.Address,donors.City,donors.State,donors.Zip,donors.Comment,donors.EmailAddress,donors.PhoneNumber FROM donors INNER JOIN DonorsToDonations AS dtd ON dtd.DonorID INNER JOIN Donation ON Donation.DonationID WHERE Donation.Date >= '2020-01-01 00:00:00' and Donation.Date < '2021-01-01 00:00:00'").fetchall()
#This query shows the number of donors for all time (from the beginning of time)
def AllDonorAllTime(db):
return db.execute_sql("SELECT COUNT(*) FROM donors").fetchall()
#This query update data for a donor – DonorName, Address, City, State, Zip, Comment, Email, Phone)
def UpdateDonor(db, donorID):
db.execute_sql("SELECT DonorName, Address, City,State,Zip,Comment,EmailAddress, PhoneNumber FROM donors WHERE DonorID = 1;UPDATE donors SET EmailAddress = `hello@gmail.com` WHERE DonorID = " + str(donorID))
db.execute_sql("SELECT DonorName, Address, City,State,Zip,Comment,EmailAddress, PhoneNumber FROM donors WHERE DonorID = 1;UPDATE donors SET Phone = NULL WHERE DonorID = " + str(donorID))
db.execute_sql("SELECT DonorName, Address, City,State,Zip,Comment,EmailAddress, PhoneNumber FROM donors WHERE DonorID = 1;UPDATE donors SET Address = NULL WHERE DonorID = " + str(donorID))
db.execute_sql("SELECT DonorName, Address, City,State,Zip,Comment,EmailAddress, PhoneNumber FROM donors WHERE DonorID = 1;UPDATE donors SET City = NULL WHERE DonorID = " + str(donorID))
db.execute_sql("SELECT DonorName, Address, City,State,Zip,Comment,EmailAddress, PhoneNumber FROM donors WHERE DonorID = 1;UPDATE donors SET State = NULL WHERE DonorID = " + str(donorID))
db.execute_sql("SELECT DonorName, Address, City,State,Zip,Comment,EmailAddress, PhoneNumber FROM donors WHERE DonorID = 1;UPDATE donors SET Zip = NULL WHERE DonorID = " + str(donorID))
db.execute_sql("SELECT DonorName, Address, City,State,Zip,Comment,EmailAddress, PhoneNumber FROM donors WHERE DonorID = 1;UPDATE donors SET Comment = NULL WHERE DonorID = " str(donorID))
#This query delete a donor
def DeleteDonor(db, donorID):
db.execute_sql("DELETE FROM donors WHERE DonorID = "+str(donorID))
#This query shows the date of the last thank you letter sent for a particular donor
def LastLetterDateByDonorID(db):
return db.execute_sql("SELECT DonorID, Donation.LetterSent, MAX(Donation.Date) AS date_last_donation FROM Donation INNER JOIN DonorsToDonations AS dtd ON dtd.DonorID WHERE DonorID = " +str(donorID) + "AND Donation.LetterSent = 'yes' GROUP BY Donation.LetterSent").fetchall()
#This query shows all the donation made by the donor which thank you letter had not been sent to, as well as the date, amount, and type of each of those donations
def AllDonationsByDonorNoLetter(db):
return db.execute_sql("SELECT * ,Date,Amount,DonationType FROM `Donation` WHERE LetterSent = 'NO'").fetchall()
#This query shows the donor name, address, city, state, zip, email, and phone for a donor whose DonorID is _____
#This query shows the date, amount, and type of the donation, for the donations selected by the user to send thank you letter to, and the sum of all the donations selected
#This query list all grants and its information (Date, grant name, amount, comment) for a particular year
#This query list all grants and its information (Date, grant name, amount, funding source?, comment) for all time
#This query update data for a grant – grant name, amount)
#This query inserts a new grant – grant name, amount)
#This query list all donations and its information (Date, comment, amount, type) made by a particular donor with DonorID ___
#This query shows the 5 top donors for all time (from the beginning of time)
#This query shows the 5 top donors for this year only
#This query update data for a donation – Address, City, State, Zip, Comment, Email, Phone, type, amount, comments)
#This query inserts a new donation (Data: Date, DonorName, Address, City, State, Zip, Comment, Email, Phone, type, amount, comments) – if the donor doesn’t exist, make a new donor; otherwise, if donor already exists, simply add a donation to the donor.
| 11,214 | 3,611 |
import json
from django.contrib.auth import get_user_model
from django.core import serializers
from django.core.management.base import BaseCommand
from django.db import transaction
from django.db.models.signals import post_save
from helusers.models import ADGroup, ADGroupMapping
from sequences.models import Sequence
from youths.models import YouthProfile
from youths.signals import generate_membership_number
from youths.utils import generate_admin_group
User = get_user_model()
class Command(BaseCommand):
help = "Import youth data from a JSON file created using the open-city-profile backend's export_youth_data command."
def add_arguments(self, parser):
parser.add_argument("filename", nargs="+", type=str)
def handle(self, *args, **kwargs):
filename = kwargs["filename"][0]
with open(filename, "r") as infile:
data = json.load(infile)
post_save.disconnect(generate_membership_number, sender=YouthProfile)
with transaction.atomic():
YouthProfile.objects.all().delete()
User.objects.exclude(is_superuser=True).delete()
ADGroup.objects.all().delete()
User.objects.get_by_natural_key = lambda uuid: User.objects.get(uuid=uuid)
ADGroup.objects.get_by_natural_key = lambda name: ADGroup.objects.get(
name=name
)
YouthProfile.objects.get_by_natural_key = (
lambda uuid: YouthProfile.objects.get(id=uuid)
)
max_membership_number = 0
for obj in serializers.deserialize("json", json.dumps(data)):
obj.save()
if obj.object.__class__ == YouthProfile:
membership_number = int(obj.object.membership_number.lstrip("0"))
if membership_number > max_membership_number:
max_membership_number = membership_number
Sequence.objects.filter(name="membership_number").update(
last=max_membership_number
)
YouthProfile.objects.update(approval_token="")
admin_group = generate_admin_group()
for ad_group in ADGroup.objects.all():
ADGroupMapping.objects.create(group=admin_group, ad_group=ad_group)
self.stdout.write(
self.style.SUCCESS(
f"Successfully read {get_user_model().objects.count()} users and "
f"{YouthProfile.objects.count()} from {filename}"
)
)
| 2,553 | 692 |
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 Michael Purcaro, Henry Pratt, Jill Moore, Zhiping Weng
from __future__ import print_function
import sys
import os
import gzip
import json
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"../../../metadata/utils"))
from utils import AddPath, Utils, Timer, printt, printWroteNumLines
AddPath(__file__, '../../common/')
from common import printr, printt
def doIntersection(cres, others):
try:
return [p.rstrip().split("\t")[4] for p in Utils.runCmds([
"bedtools", "intersect", "-a", cres, "-b", others, "-wa"
])]
except:
print("pcommon$doIntersection: failed to intersect %s with %s" % (cres, others),
file=sys.stderr)
def runIntersectJob(jobargs, bedfnp):
if not os.path.exists(jobargs["bed"]["fnp"]):
print("pcommon$runIntersectJob: missing bed %s; cannot intersect" % jobargs["bed"]["fnp"],
file=sys.stderr)
return None
ret = []
printr("pcommon$runIntersectJob: (exp %d of %d)" % (jobargs["i"], jobargs["total"]),
"intersecting", jobargs["etype"], jobargs["label"])
accessions = doIntersection(bedfnp, jobargs["bed"]["fnp"])
if accessions is None:
print("pcommon$runIntersectJob: warning: unable to intersect REs with bed %s" % jobargs["bed"]["fnp"],
file=sys.stderr)
else:
ret.append((jobargs["etype"], jobargs["label"], jobargs["bed"]["fileID"], accessions))
return ret
def processResults(results, outFnp):
tfImap = {}
fileJsons = []
for fileJson, accessions in results:
if not accessions:
continue
for etype, label, fileID, accs in accessions:
for acc in accs:
if acc not in tfImap:
tfImap[acc] = {"tf": {}, "histone": {}}
if label not in tfImap[acc][etype]:
tfImap[acc][etype][label] = []
tfImap[acc][etype][label].append(fileID)
fileJsons += fileJson
printt("completed hash merge")
with gzip.open(outFnp, 'w') as f:
for k, v in tfImap.iteritems():
f.write('\t'.join([k,
json.dumps(v["tf"]),
json.dumps(v["histone"])
]) + '\n')
printt("wrote", outFnp)
| 2,412 | 781 |
# Generated by Django 3.2.4 on 2021-11-10 10:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('finliveapp', '0019_remove_organization_apikey'),
]
operations = [
migrations.RenameField(
model_name='gasmeasurement',
old_name='equipmentid',
new_name='equipment',
),
migrations.RemoveField(
model_name='weight',
name='equipment_id',
),
migrations.AddField(
model_name='barn',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='breed',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='equipment',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='equipment',
name='barn',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='finliveapp.barn'),
),
migrations.AddField(
model_name='equipment',
name='uuid',
field=models.UUIDField(default=None, null=True),
),
migrations.AddField(
model_name='laboratory',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='milkingsystem',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='organization',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='seedingtype',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='weight',
name='equipment',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='finliveapp.equipment'),
),
migrations.AlterField(
model_name='equipment',
name='equipmentid',
field=models.CharField(max_length=128),
),
migrations.AlterField(
model_name='equipment',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 2,610 | 744 |
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
import json
import os
import argparse
import utils
from model import vgg
import itertools
import numpy as np
import random
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import torch.nn.functional as F
### ------------------------------------ Dataloader -------------------------------------- ###
def get_dataloader(dataset, train_dir, val_dir, batchsize):
if dataset == 'Animal10N':
nb_cls = 10
# transformation of the training set
transform_train = transforms.Compose([
transforms.ToTensor()])
# transformation of the validation set
transform_test = transforms.Compose([
transforms.ToTensor()])
trainloader = DataLoader(ImageFolder(train_dir, transform_train),
batch_size=batchsize,
shuffle=True,
drop_last=True,
num_workers = 4,
pin_memory = True)
valloader = DataLoader(ImageFolder(val_dir, transform_test),
batch_size=batchsize,
shuffle=False,
drop_last=False,
num_workers = 4,
pin_memory = True)
return trainloader, valloader, nb_cls
### --------------------------------------------------------------------------------------------
### ------------------------------------ Distribution -------------------------------------- ###
def GaussianDist(mu, std, N):
dist = np.array([np.exp(-((i - mu) / std)**2) for i in range(1, N + 1)])
return dist / np.sum(dist)
### ---------------------------------------------------------------------------------------------
### ------------------------ Test with Nested (iterate all possible K) --------------------- ###
def TestNested(epoch, best_acc, best_k, net_feat, net_cls, valloader, out_dir, mask_feat_dim):
net_feat.eval()
net_cls.eval()
bestTop1 = 0
true_pred = torch.zeros(len(mask_feat_dim)).cuda()
nb_sample = 0
for batchIdx, (inputs, targets) in enumerate(valloader):
inputs = inputs.cuda()
targets = targets.cuda()
feature = net_feat(inputs)
outputs = []
for i in range(len(mask_feat_dim)):
feature_mask = feature * mask_feat_dim[i]
outputs.append( net_cls(feature_mask).unsqueeze(0) )
outputs = torch.cat(outputs, dim=0)
_, pred = torch.max(outputs, dim=2)
targets = targets.unsqueeze(0).expand_as(pred)
true_pred = true_pred + torch.sum(pred == targets, dim=1).type(torch.cuda.FloatTensor)
nb_sample += len(inputs)
acc, k = torch.max((true_pred / nb_sample - 1e-5 * torch.arange(len(mask_feat_dim)).type_as(true_pred)), dim=0)
acc, k = acc.item(), k.item()
msg = '\nNested ... Epoch {:d}, Acc {:.3f} %, K {:d} (Best Acc {:.3f} %)'.format(epoch, acc * 100, k, best_acc * 100)
print (msg)
# save checkpoint
if acc > best_acc:
msg = 'Best Performance improved from {:.3f} --> {:.3f}'.format(best_acc, acc)
print(msg)
print ('Saving Best!!!')
param = {'feat': net_feat.state_dict(),
'cls': net_cls.state_dict(),
}
torch.save(param, os.path.join(out_dir, 'netBest.pth'))
best_acc = acc
best_k = k
return best_acc, acc, best_k
### --------------------------------------------------------------------------------------------
### --------------- Test standard (used for model w/o nested, baseline, dropout) ------------###
def TestStandard(epoch, best_acc, best_k, net_feat, net_cls, valloader, out_dir, mask_feat_dim):
net_feat.eval()
net_cls.eval()
bestTop1 = 0
true_pred = torch.zeros(1).cuda()
nb_sample = 0
for batchIdx, (inputs, targets) in enumerate(valloader):
inputs = inputs.cuda()
targets = targets.cuda()
feature = net_feat(inputs)
outputs = net_cls(feature)
_, pred = torch.max(outputs, dim=1)
true_pred = true_pred + torch.sum(pred == targets).type(torch.cuda.FloatTensor)
nb_sample += len(inputs)
acc = true_pred / nb_sample
acc = acc.item()
msg = 'Standard ... Epoch {:d}, Acc {:.3f} %, (Best Acc {:.3f} %)'.format(epoch, acc * 100, best_acc * 100)
print (msg)
# save checkpoint
if acc > best_acc:
msg = 'Best Performance improved from {:.3f} --> {:.3f}'.format(best_acc * 100, acc * 100)
print (msg)
print ('Saving Best!!!')
param = {'feat': net_feat.state_dict(),
'cls': net_cls.state_dict(),
}
torch.save(param, os.path.join(out_dir, 'netBest.pth'))
best_acc = acc
return best_acc, acc, len(mask_feat_dim)
### --------------------------------------------------------------------------------------------
### -------------------------------------- Training --------------------------------------- ###
def Train(epoch, optimizer, net_feat, net_cls, trainloader, criterion, dist1, dist2, mask_feat_dim, alter_train, freeze_bn):
msg = '\nEpoch: {:d}'.format(epoch)
print (msg)
net_feat.train(freeze_bn = freeze_bn)
net_cls.train()
losses = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
for batchIdx, (inputs, targets) in enumerate(trainloader):
inputs = inputs.cuda()
targets = targets.cuda()
for optim in optimizer:
optim.zero_grad()
# whether to use alterative training for the nested mode
if alter_train:
alter = random.randint(0, 1)
else:
alter = None
if dist1 is not None:
if alter == 0 or alter is None:
k1 = np.random.choice(range(len(mask_feat_dim)), p=dist1)
mask1 = mask_feat_dim[k1]
else:
# train both nested layers
mask1 = mask_feat_dim[-1]
else:
mask1 = mask_feat_dim[-1]
feature = net_feat(inputs, mask1)
if dist2 is not None:
if alter == 1 or alter is None:
k2 = np.random.choice(range(len(mask_feat_dim)), p=dist2)
mask2 = mask_feat_dim[k2]
feature_masked = feature * mask2
else:
feature_masked = feature
else:
feature_masked = feature
outputs = net_cls(feature_masked)
loss = criterion(outputs, targets)
loss.backward()
for optim in optimizer:
optim.step()
acc1, acc5 = utils.accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size()[0])
top1.update(acc1[0].item(), inputs.size()[0])
top5.update(acc5[0].item(), inputs.size()[0])
msg = 'Loss: {:.3f} | Top1: {:.3f}% | Top5: {:.3f}%'.format(losses.avg, top1.avg, top5.avg)
utils.progress_bar(batchIdx, len(trainloader), msg)
return losses.avg, top1.avg, top5.avg
### --------------------------------------------------------------------------------------------
### ------------------------------------ Lr Warm Up --------------------------------------- ###
def LrWarmUp(warmUpIter, lr, optimizer, net_feat, net_cls, trainloader, criterion, dist1, dist2, mask_feat_dim, alter_train, freeze_bn):
nbIter = 0
while nbIter < warmUpIter:
net_feat.train(freeze_bn = freeze_bn)
net_cls.train()
losses = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
for batchIdx, (inputs, targets) in enumerate(trainloader):
nbIter += 1
if nbIter == warmUpIter:
break
lrUpdate = nbIter / float(warmUpIter) * lr
for optim in optimizer:
for g in optim.param_groups:
g['lr'] = lrUpdate
inputs = inputs.cuda()
targets = targets.cuda()
for optim in optimizer:
optim.zero_grad()
# whether to use alterative training for the nested mode
if alter_train:
alter = random.randint(0, 1)
else:
# train both nested layers
alter = None
if dist1 is not None:
if alter == 0 or alter is None:
k1 = np.random.choice(range(len(mask_feat_dim)), p=dist1)
mask1 = mask_feat_dim[k1]
else:
mask1 = mask_feat_dim[-1]
else:
mask1 = mask_feat_dim[-1]
feature = net_feat(inputs, mask1)
if dist2 is not None:
if alter == 1 or alter is None:
k2 = np.random.choice(range(len(mask_feat_dim)), p=dist2)
mask2 = mask_feat_dim[k2]
feature_masked = feature * mask2
else:
feature_masked = feature
else:
feature_masked = feature
outputs = net_cls(feature_masked)
loss = criterion(outputs, targets)
loss.backward()
for optim in optimizer:
optim.step()
acc1, acc5 = utils.accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size()[0])
top1.update(acc1[0].item(), inputs.size()[0])
top5.update(acc5[0].item(), inputs.size()[0])
msg = 'Loss: {:.3f} | Lr : {:.5f} | Top1: {:.3f}% | Top5: {:.3f}%'.format(losses.avg, lrUpdate, top1.avg, top5.avg)
utils.progress_bar(batchIdx, len(trainloader), msg)
### --------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
########################################-- MAIN FUNCTION --#####################################
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
def main(gpu, arch, vgg_dropout, out_dir, dataset, train_dir, val_dir, warmUpIter, lr, nbEpoch, batchsize, momentum=0.9, weightDecay = 5e-4, lrSchedule = [200, 300], lr_gamma=0.1, mu=0, nested1=1.0, nested2=1.0, alter_train=False, resumePth=None, freeze_bn=False, pretrained=False):
best_acc = 0 # best test accuracy
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
trainloader, valloader, nb_cls = get_dataloader(dataset, train_dir, val_dir, batchsize)
# feature net + classifier net (a linear layer)
net_feat = vgg.NetFeat(arch = arch,
pretrained = pretrained,
dataset = dataset,
vgg_dropout = vgg_dropout)
net_cls = vgg.NetClassifier(feat_dim = net_feat.feat_dim,
nb_cls = nb_cls)
net_feat.cuda()
net_cls.cuda()
feat_dim = net_feat.feat_dim
best_k = feat_dim
# generate mask
mask_feat_dim = []
for i in range(feat_dim):
tmp = torch.cuda.FloatTensor(1, feat_dim).fill_(0)
tmp[:, : (i + 1)] = 1
mask_feat_dim.append(tmp)
# distribution and test function
dist1 = GaussianDist(mu, nested1, feat_dim) if nested1 > 0 else None
dist2 = GaussianDist(mu, nested2, feat_dim) if nested2 > 0 else None
Test = TestNested if (nested1 > 0) or (nested2 > 0) else TestStandard
# load model
if resumePth:
param = torch.load(resumePth)
net_feat.load_state_dict(param['feat'])
print ('Loading feature weight from {}'.format(resumePth))
net_cls.load_state_dict(param['cls'])
print ('Loading classifier weight from {}'.format(resumePth))
# output dir + loss + optimizer
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
criterion = nn.CrossEntropyLoss()
optimizer = [torch.optim.SGD(itertools.chain(*[net_feat.parameters()]),
1e-7,
momentum=args.momentum,
weight_decay=args.weightDecay),
torch.optim.SGD(itertools.chain(*[net_cls.parameters()]),
1e-7,
momentum=args.momentum,
weight_decay=args.weightDecay)] # remove the weight decay in classifier
# learning rate warm up
LrWarmUp(warmUpIter, lr, optimizer, net_feat, net_cls, trainloader, criterion, dist1, dist2, mask_feat_dim, alter_train, freeze_bn)
with torch.no_grad():
best_acc, acc, best_k = Test(0, best_acc, best_k, net_feat, net_cls, valloader, out_dir, mask_feat_dim)
best_acc, best_k = 0, feat_dim
for optim in optimizer:
for g in optim.param_groups:
g['lr'] = lr
history = {'trainTop1':[], 'best_acc':[], 'trainTop5':[], 'valTop1':[], 'trainLoss':[], 'best_k':[]}
lrScheduler = [MultiStepLR(optim, milestones=lrSchedule, gamma=lr_gamma) for optim in optimizer]
for epoch in range(nbEpoch):
trainLoss, trainTop1, trainTop5 = Train(epoch, optimizer, net_feat, net_cls, trainloader, criterion, dist1, dist2, mask_feat_dim, alter_train, freeze_bn)
with torch.no_grad():
best_acc, valTop1, best_k = Test(epoch, best_acc, best_k, net_feat, net_cls, valloader, out_dir, mask_feat_dim)
history['trainTop1'].append(trainTop1)
history['trainTop5'].append(trainTop5)
history['trainLoss'].append(trainLoss)
history['valTop1'].append(valTop1)
history['best_acc'].append(best_acc)
history['best_k'].append(best_k)
with open(os.path.join(out_dir, 'history.json'), 'w') as f:
json.dump(history, f)
for lr_schedule in lrScheduler:
lr_schedule.step()
msg = 'mv {} {}'.format(out_dir, '{}_Acc{:.3f}_K{:d}'.format(out_dir, best_acc, best_k))
print (msg)
os.system(msg)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Classification', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# data
parser.add_argument('--train-dir', type=str, default='../data/Animal10N/train/', help='train directory')
parser.add_argument('--val-dir', type=str, default='../data/Animal10N/test/', help='val directory')
parser.add_argument('--dataset', type=str, choices=['Animal10N'], default='Animal10N', help='which dataset?')
# training
parser.add_argument('--warmUpIter', type=int, default=6000, help='total iterations for learning rate warm')
parser.add_argument('--lr', default=1e-1, type=float, help='learning rate')
parser.add_argument('--weightDecay', default=5e-4, type=float, help='weight decay')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--batchsize', type=int, default=128, help='batch size')
parser.add_argument('--nbEpoch', type=int, default=100, help='nb epoch')
parser.add_argument('--lrSchedule', nargs='+', type=int, default=[50, 75], help='lr schedule')
parser.add_argument('--lr-gamma', type=float, default=0.2, help='decrease learning rate by lr-gamma')
parser.add_argument('--gpu', type=str, default='0', help='gpu devices')
# model
parser.add_argument('--arch', type=str, choices=['vgg19-bn'], default='vgg19-bn', help='which archtecture?')
parser.add_argument('--out-dir', type=str, help='output directory')
parser.add_argument('--mu', type=float, default=0.0, help='nested mean hyperparameter')
parser.add_argument('--nested1', type=float, default=0.0, help='nested1 std hyperparameter')
parser.add_argument('--nested2', type=float, default=0.0, help='nested2 std hyperparameter')
parser.add_argument('--alter-train', action='store_true', help='whether to use alternative training for nested')
parser.add_argument('--vgg-dropout', type=float, default=0.0, help='dropout ratio')
parser.add_argument('--resumePth', type=str, help='resume path')
parser.add_argument('--freeze-bn', action='store_true', help='freeze the BN layers')
parser.add_argument('--pretrained', action='store_true', help='Start with ImageNet pretrained model (Pytorch Model Zoo)')
args = parser.parse_args()
print (args)
if (args.nested1 > 0 or args.nested2 > 0) and args.vgg_dropout > 0:
raise RuntimeError('Activating both nested1 / nested2 (eta = {:.3f} / {:.3f}) and vgg_dropout \
(ratio = {:.3f})'.format(args.nested1, args.nested2, args.vgg_dropout))
main(gpu = args.gpu,
arch = args.arch,
vgg_dropout= args.vgg_dropout,
out_dir = args.out_dir,
dataset = args.dataset,
train_dir = args.train_dir,
val_dir = args.val_dir,
warmUpIter = args.warmUpIter,
lr = args.lr,
nbEpoch = args.nbEpoch,
batchsize = args.batchsize,
momentum = args.momentum,
weightDecay = args.weightDecay,
lrSchedule = args.lrSchedule,
lr_gamma = args.lr_gamma,
mu = args.mu,
nested1 = args.nested1,
nested2 = args.nested2,
alter_train = args.alter_train,
resumePth = args.resumePth,
freeze_bn = args.freeze_bn,
pretrained = args.pretrained) | 18,882 | 5,903 |
""" Fun with unpacking stuff. """
from typing import Any, Tuple
def print_integers(val01: int, val02: int) -> None:
""" Prints some message for the integers. """
print(f"--> print_integers() -- VAL 1 = {val01}, VAL 2 = {val02}")
def print_tuples(tup: tuple[Any, ...]) -> None:
""" Prints some message for the tuple. """
print(f"--> print_tuples() -- TUPLE = {tup}")
def get_tuples(val01: int, val02: int) -> Tuple[int, int]:
""" Constructs a tuple out of integers. """
return (val01, val02)
tup01: Tuple[int, int] = get_tuples(11, 14)
# --> print_tuples() -- TUPLE = (11, 14)
print_tuples(tup01)
# --> print_integers() -- VAL 1 = 11, VAL 2 = 14
print_integers(*tup01)
tup02 = (13, get_tuples(17, 19), 23)
# --> print_tuples() -- TUPLE = (13, (17, 19), 23)
print_tuples(tup02)
tup03 = (13, *get_tuples(17, 19), 23)
# --> print_tuples() -- TUPLE = (13, 17, 19, 23)
print_tuples(tup03)
| 918 | 425 |
import codecs
print("""encodings :
# hex
# quopri
# uu
# uu_codec
# zip
# zlib
# bz2
# bz2_codec
base64""")
en=input('enter encoding -: ')
i=open(input("enter file to be encoded"),'r')
k=i.read()
main=eval(f"codecs.encode(b{k},'{en}')")
n=open(input('enter output file name -: '),'w')
n.write(f'a=codecs.decode(b"{main}","{en}")')
n.write('exec(a)')
| 355 | 170 |
#!/usr/bin/python
from ui import Window, Panel, Label, Image
import pygame
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GRAY = (100, 100, 100)
class Loader(object):
""" Loader class. """
def __init__(self, size=(640, 480)):
""" Default constructor. """
self.size = size
self.window = Window(size=size, fullscreen=False, backgroundColor=WHITE)
self.container = Panel(orientation='vertical')
self.window.add(self.container)
def welcome(self):
""" Welcome screen. """
header = Label('Bienvenue', color=BLACK, size='huge')
message = Label('Appuyer pour commencer', color=BLACK, size='medium')
self.container.add(header)
self.container.add(message)
def onClick(position):
""" Window click callback. """
self.container.remove(header)
self.container.remove(message)
self.window.onWindowClick = None
self.prompt('Voulez vous configurer la connection internet ?', lambda r: self.wifi(r))
self.window.onWindowClick = onClick
def prompt(self, question, callback):
""" Prompt screen (Yes / No question only) """
header = Label(question, color=BLACK, size='medium')
panel = Panel(orientation='horizontal', padding=20)
def createPromptCallback(callback, answer):
def delegate():
self.container.remove(header)
self.container.remove(panel)
callback(answer)
return delegate
yes = Label(' Oui ', color=WHITE, background=GRAY, size='medium')
no = Label(' Non ', color=WHITE, background=GRAY, size='medium')
yes.onClick = createPromptCallback(callback, True)
no.onClick = createPromptCallback(callback, False)
panel.add(yes)
panel.add(no)
self.container.add(header)
self.container.add(panel)
self.window.invalidate()
def wifi(self, configure):
""" WiFi configuration screen. """
if configure:
# TODO : Set RPI as WiFi hotspot.
# TODO : Start webserver.
# TODO : Quit and go next.
pass
else:
quit()
if __name__ == '__main__':
info = pygame.display.Info()
loader = Loader()
loader.welcome()
loader.window.start() | 2,352 | 679 |
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from core.database import get_db
from db.services.userservices import UserService
from schemas.users import RegisterUser
from core.token import get_currentUser
from db.models.usermodels import User
router = APIRouter()
@router.get("/")
def getAllUser(db: Session = Depends(get_db)):
return UserService.get_allUsers(db=db)
@router.post("/")
def createUser(user: RegisterUser, db: Session = Depends(get_db)):
invalid = False
if UserService.get_user_by_username(db=db, username=user.username):
invalid = True
if UserService.get_user_by_email(db=db, email=user.email):
invalid = True
if not invalid:
return UserService.create_user(user, db)
else:
return {"error_message":"User or email already exists "}
@router.get("/me")
def getMe(current_user: User = Depends(get_currentUser)):
return current_user
@router.put("/{userid}")
def updateUser(userid: str, user: RegisterUser, db: Session = Depends(get_db)):
return UserService.update_user(id=userid, user=user, db=db)
@router.delete("/{userid}")
def deleteUser(userid: str, db: Session = Depends(get_db)):
return UserService.delete_user_by_id(id=userid, db=db) | 1,255 | 402 |
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from users.models import Profile
from django.contrib.auth.models import User
from statistics import mean
#
from django.urls import reverse
# Create your models here.
class Post(models.Model):
'''
'''
title=models.CharField(max_length=30)
description=models.TextField()
link=models.CharField(max_length=100)
image=models.ImageField(upload_to='poster/',default='')
date_posted=models.DateTimeField(auto_now_add=True)
author=models.ForeignKey(User,on_delete=models.CASCADE)
design=models.IntegerField(blank=True,default=0)
usability=models.IntegerField(blank=True,default=0)
creativity=models.IntegerField(blank=True,default=0)
content=models.IntegerField(blank=True,default=0)
mobile=models.IntegerField(blank=True,default=0)
def __str__(self):
return f'Post{self.title}--{self.description}--{self.author.username}'
def get_absolute_url(self):
'''
return full path of a url
'''
return reverse('post-detail',kwargs={'pk':self.pk})
def save_post(self):
'''
method to save a post
'''
self.save()
@classmethod
def get_posts(cls):
'''
method to fetch all posts
'''
posts=cls.objects.order_by('-date_posted')
return posts
@classmethod
def get_post_by_id(cls,id):
try:
post=cls.objects.get(id=id)
except ObjectDoesNotExist:
raise Http404()
assert False
return post
@classmethod
def get_posts_by_username(cls,username):
posts=cls.objects.filter(author=username).order_by('-date_posted')
return posts
@classmethod
def delete_post(cls,post_id):
'''
method to delete a post
'''
img=cls.objects.get(id=post_id).delete()
@classmethod
def search(cls,search_term):
'''
method that returns a post based on search query
'''
posts=cls.objects.filter(Q(title__icontains=search_term) |Q(author__username__icontains=search_term))
return posts
class Review(models.Model):
'''
'''
design=models.IntegerField(blank=True,default=0)
usability=models.IntegerField(blank=True,default=0)
creativity=models.IntegerField(blank=True,default=0)
content=models.IntegerField(blank=True,default=0)
mobile=models.IntegerField(blank=True,default=0)
post=models.ForeignKey(Post,on_delete=models.CASCADE)
judge=models.ForeignKey(User,blank=True,null=True,on_delete=models.CASCADE)
average_review=models.IntegerField(blank=True,default=0)
def save_review(self):
self.save()
def __str__(self):
return f'{self.post.title}:Review-{self.design}-{self.usability}-{self.creativity}-{self.content}-{self.mobile}-{self.post.id}'
@classmethod
def get_all_reviews(cls,post_id):
design=round(mean(cls.objects.filter(post_id=post_id).values_list('design',flat=True)))
usability=round(mean(cls.objects.filter(post_id=post_id).values_list('usability',flat=True)))
creativity=round(mean(cls.objects.filter(post_id=post_id).values_list('creativity',flat=True)))
content=round(mean(cls.objects.filter(post_id=post_id).values_list('content',flat=True)))
mobile=round(mean(cls.objects.filter(post_id=post_id).values_list('mobile',flat=True)))
average_review=(design+usability+creativity+content+mobile)/5
return {
'design':design,
'usability':usability,
'creativity':creativity,
'content':content,
'mobile':mobile,
'average_review':average_review
}
| 4,027 | 1,260 |
from timeit import default_timer as timer
import xgboost as xgb
import common
import gc
NUM_LOOPS = 100
PARAMS = {
'objective': 'reg:squarederror',
'alpha': 0.9,
'max_bin': 256,
'scale_pos_weight': 2,
'learning_rate': 0.1,
'subsample': 1,
'reg_lambda': 1,
'min_child_weight': 0,
'max_depth': 8,
'max_leaves': 2**8,
'tree_method': 'hist',
'predictor': 'cpu_predictor'
}
TRAIN_DF = xgb.DMatrix(data=common.X, label=common.y)
MODEL = xgb.train(params=PARAMS, dtrain=TRAIN_DF)
def run_inference(num_observations:int = 1000):
"""Run xgboost for specified number of observations"""
# Load data
test_df = common.get_test_data(num_observations)
num_rows = len(test_df)
# print(f"Running {NUM_LOOPS} inference loops with batch size {num_rows}...")
run_times3 = []
inference_times3 = []
for _ in range(NUM_LOOPS):
start_time = timer()
data = xgb.DMatrix(test_df)
MODEL.predict(data)
end_time = timer()
total_time3 = end_time - start_time
run_times3.append(total_time3*10e3)
inference_time3 = total_time3*(10e6)/num_rows
inference_times3.append(inference_time3)
print(num_observations, ", ", common.calculate_stats(inference_times3)) | 1,291 | 508 |
# irjunk olyan python kodot, amely kitorli egy listabol a duplumokat
a = [10,20,30,20,10,50,60,40,80,50,40]
dup_items = set()
uniq_items = []
for x in a:
if x not in dup_items:
uniq_items.append(x)
dup_items.add(x)
print(dup_items)
| 255 | 124 |
tasklist = None
my_monitor = None
| 34 | 13 |
from .common import *
from ..components import env
if DEBUG:
SECRET_KEY = env('SECRET_KEY', default='-qf)o7hs$jk@b8o)zidroo9wskuf^95m2$@k)5^@hl-=)349-7')
from .development import *
else:
SECRET_KEY = env('SECRET_KEY')
from .production import *
| 261 | 113 |
""" forcealloc.py - Map commanded thrust to generalized, cartesian forces """
# BSD 2-Clause License
#
# Copyright (c) 2001-2017, Karl-Petter Lindegaard
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from math import sin, cos
from cs2data import T1LX, T1LY, T2LX, T2LY, T3LX, T3LY
class ForceAllocation:
"""
ForceAllocation - maps from tau_c to generalized forces
"""
def __init__(self, theta1, theta2, c1, c2):
"""
:param theta1: Port main propeller/rudder postive force angle span [rad]
:param theta2: Starboard main propeller/rudder postive force angle span [rad]
:param c1: Port main propeller positive thrust bias [N]
:param c2: Starboard main properller positive thrust bias [N]
"""
self.theta1 = theta1
self.theta2 = theta2
self.c1 = c1
self.c2 = c2
self.Q1 = np.eye(4)
self.Q2 = np.eye(4)
# Full allocation matrix
self.A = np.array([
[1, 0, 1, 0, 0],
[0, 1, 0, 1, 1],
[-T1LY, T1LX, -T2LY, T2LX, T3LX]
])
# Filters. f1 = Rudder 2 inactive, f2 = Rudder 1 inactive
self.f1 = np.array([True, True, True, False, True])
self.f2 = np.array([True, False, True, True, True])
# Configure A1, n1 and A1dagger etc.
self.A1 = self.A[:,self.f1]
# Null-vector for A1
self.n1 = np.zeros(4)
self.n1[0] = (T1LX - T3LX) / (T1LY - T2LY)
self.n1[1] = 1.0
self.n1[2] = -self.n1[0]
self.n1[3] = -1.0
# A1_dagger = A1'*inv(A1*A1')
self.A1_dagger = self.A1.T.dot(np.linalg.inv(self.A1.dot(self.A1.T)))
# Configure A2, n2 and A2dagger etc.
self.A2 = self.A[:,self.f2]
# Null-vector for A2
self.n2 = np.zeros(4)
self.n2[0] = (T3LX - T2LX) / (T1LY - T2LY)
self.n2[1] = -self.n2[0]
self.n2[2] = -1.0
self.n2[3] = 1.0
# A2_dagger = A2'*inv(A2*A2')
self.A2_dagger = self.A2.T.dot(np.linalg.inv(self.A2.dot(self.A2.T)))
def nullsub1(self, tauc, Adagger, n, theta, c):
# type: (np.array, np.array, np.array, float, float) -> np.array
# Step 0: Prepare the a-vector (sector boundary)
a1 = cos(theta)
a2 = -sin(theta)
# Step 1: Find optimal solution based on pseudo-inverse
u0 = Adagger.dot(tauc)
# Step 2: Extract prop/rudder and translate to "the other" ref. frame
u0m1 = u0[0] - c
u0m2 = u0[1]
# Step 3: Sector check
nn1 = n[1]
nn2 = -n[0]
dp = nn1*u0m1 + nn2*u0m2
insector = False
if dp <= 0.0:
# Traverse in x-asxis (fx,0)
b1 = 0.0
b2 = 1.0
else:
# Are we in sector "1"
if u0m2 >= 0.0:
b1 = 0.0
b2 = 1.0
# Or perhaps we are already within the valid sector
elif u0m1*a2 < u0m2*a1:
insector = True
# Otherwise, traverse along the nullvector until sector limit "a"
else:
b1 = a2
b2 = -a1
# Step 4: Find lambda, the distance to traverse
gamma = 0.0
if not insector:
gamma = -(u0m1*b1 + u0m2*b2) / (n[0]*b1 + n[1]*b2)
# Step 5: Adjust solution
u = u0 + gamma*n
return u
def nullsub2(self, tauc, Adagger, n, theta, c):
# type: (np.array, np.array, np.array, float, float) -> np.array
# Step 0: Prepare the a-vector (sector boundary)
a1 = cos(theta)
a2 = sin(theta)
# Step 1: Find optimal solution based on pseudo-inverse
u0 = Adagger.dot(tauc)
# Step 2: Extract prop/rudder and translate to "the other" ref. frame
u0m1 = u0[1] - c
u0m2 = u0[2]
# Step 3: Sector check
nn1 = n[2]
nn2 = -n[1]
dp = nn1 * u0m1 + nn2 * u0m2
insector = False
if dp >= 0.0:
# Traverse in x-asxis (fx,0)
b1 = 0.0
b2 = 1.0
else:
# Are we in sector "1"
if u0m2 <= 0.0:
b1 = 0.0
b2 = 1.0
# Or perhaps we are already within the valid sector
elif u0m1 * a2 > u0m2 * a1:
insector = True
# Otherwise, traverse along the nullvector until sector limit "a"
else:
b1 = a2
b2 = -a1
# Step 4: Find lambda, the distance to traverse
gamma = 0.0
if not insector:
gamma = -(u0m1 * b1 + u0m2 * b2) / (n[1] * b1 + n[2] * b2)
# Step 5: Adjust solution
u = u0 + gamma * n
return u
def allocate(self, tau):
"""
Map 3-DOF commanded thrust to generalized forces. First two elements are surge and sway
for thruster 1 (port main prop+rudder), next two for starboard main prop+rudder, fifth
element is the bow thruster's sway force.
:param tau: Commanded thrust vector (surge, sway, yaw)
:return: Generalized forces
"""
# type: (np.array) -> np.array
# Call subroutines for each rudder
x1 = self.nullsub1(tau, self.A1_dagger, self.n1, self.theta1, self.c1)
x2 = self.nullsub2(tau, self.A2_dagger, self.n2, self.theta2, self.c2)
# Compare results and pick the best solution J = x'*Q*x
j1 = x1.dot(self.Q1.dot(x1))
j2 = x2.dot(self.Q2.dot(x2))
u = np.zeros(5)
if j1 <= j2:
# Use: u = [x1(0) x1(1) x1(2) 0 x1(3)];
u[self.f1] = x1
else:
# u = [x2(0) 0 x2(1) x2(2) x2(3)];
u[self.f2] = x2
return u
| 7,034 | 2,671 |
#!/usr/bin/python
def add(num1, num2):
return num1 + num2
print add(10, 5)
print add('sloan ', 'kelly')
print add(3.14, 1.61)
print add((1,2,3), (4,5,6)) | 158 | 83 |
import os
import sys
from io import open
import numpy as np
import torch
import torch.nn as nn
from .example import make_example, make_new_example
from .input_features import convert_examples_to_features
from torch.utils.data import DataLoader, Dataset, SequentialSampler, TensorDataset
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaModel, RobertaTokenizer)
MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)}
model_name_or_path = "microsoft/codebert-base"
beam_size = 10
max_target_length = 128
max_source_length = 256
seed = 42
def load_model(model_path, is_old=False):
if is_old:
from .model import Seq2Seq
else:
from .model_new import Seq2Seq
config_class, model_class, tokenizer_class = MODEL_CLASSES['roberta']
config = config_class.from_pretrained(model_name_or_path)
if is_old:
tokenizer = tokenizer_class.from_pretrained(model_name_or_path)
else:
tokenizer = tokenizer_class.from_pretrained(model_name_or_path, do_lower_case=False)
encoder = model_class.from_pretrained(model_name_or_path, config=config)
decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size,
nhead=config.num_attention_heads)
decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
model = Seq2Seq(encoder=encoder,
decoder=decoder,
config=config,
beam_size=beam_size,
max_length=max_target_length,
sos_id=tokenizer.cls_token_id,
eos_id=tokenizer.sep_token_id
)
if is_old:
if not torch.cuda.is_available():
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
else:
model.load_state_dict(torch.load(model_path))
else:
if not torch.cuda.is_available():
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')), strict=False)
else:
model.load_state_dict(torch.load(model_path), strict=False)
if not torch.cuda.is_available():
model.to("cpu")
model.eval()
return model, tokenizer
def predict_docstring(model, tokenizer, code_tokens, is_old):
examples = make_example(code_tokens) if is_old else make_new_example(code_tokens)
features = convert_examples_to_features(examples, tokenizer)
if is_old:
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask for f in features], dtype=torch.long)
else:
all_source_ids = torch.tensor([f.source_ids[: max_source_length] for f in features], dtype=torch.long)
all_source_mask = torch.tensor([f.source_mask[: max_source_length] for f in features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids, all_source_mask)
eval_sampler = SequentialSampler(eval_data)
batch_size = len(code_tokens) if is_old else len(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=batch_size)
p=[]
for batch in eval_dataloader:
if not torch.cuda.is_available():
batch = tuple(t.to('cpu') for t in batch)
else:
batch = tuple(t for t in batch)
source_ids, source_mask = batch
with torch.no_grad():
preds = model(source_ids=source_ids, source_mask=source_mask)
for pred in preds:
t=pred[0].cpu().numpy()
t=list(t)
if 0 in t:
t=t[:t.index(0)]
text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
p.append(text)
px = p[0].split()
if px[-1] == ".":
px[-2] = px[-2].strip() + "."
px.pop()
return [" ".join(px)]
| 3,982 | 1,328 |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
from flask import Flask
from common.BaseClass import Student
app = Flask(__name__)
@app.route('/')
def home_index():
BQ = Student('jerry', 18)
return BQ.get_user_info()
if __name__ == '__main__':
app.run()
| 268 | 108 |
#!/usr/bin/env python
import logging
from os.path import basename
from dae.variants.attributes import VariantType
from dae.annotation.tools.score_annotator import VariantScoreAnnotatorBase
logger = logging.getLogger(__name__)
class FrequencyAnnotator(VariantScoreAnnotatorBase):
def __init__(self, config, genomes_db):
super(FrequencyAnnotator, self).__init__(config, genomes_db)
def _init_score_file(self):
super(FrequencyAnnotator, self)._init_score_file()
self.score_filename_base = basename(self.score_file.score_filename)
self.variant_col_name = self.score_file.config.columns.variant
assert self.variant_col_name
assert self.variant_col_name in self.score_file.schema.col_names, \
"'{}' not in score file schema! Schema columns: {}".format(
self.variant_col_name, self.score_file.schema.col_names)
logger.debug(f"variants builder {self.variant_builder}")
def collect_annotator_schema(self, schema):
super(FrequencyAnnotator, self).collect_annotator_schema(schema)
def do_annotate(self, aline, variant, liftover_variants):
if VariantType.is_cnv(variant.variant_type):
logger.info(
f"skip trying to add frequency for CNV variant {variant}")
self._scores_not_found(aline)
return
if self.liftover:
variant = liftover_variants.get(self.liftover)
if variant is None:
self._scores_not_found(aline)
return
if self.liftover and liftover_variants.get(self.liftover):
variant = liftover_variants.get(self.liftover)
chrom = variant.chromosome
pos = variant.details.cshl_position
logger.debug(
f"{self.score_filename_base}: looking for DAE frequency of "
f"{variant}; {chrom}:{pos};")
scores = self.score_file.fetch_scores(chrom, pos, pos)
if not scores:
self._scores_not_found(aline)
return
variant_detail = variant.details.cshl_variant
variant_occurrences = scores[self.variant_col_name] \
.count(variant_detail)
if variant_occurrences > 0:
if variant_occurrences > 1:
logger.warning(
f"WARNING {self.score_filename_base}: "
f"multiple variant occurrences of {chrom}:{pos} {variant}")
variant_index = scores[self.variant_col_name].index(variant_detail)
for native, output in self.config.columns.items():
# FIXME: this conversion should come from schema
val = scores[native][variant_index]
try:
if val in set(["", " "]):
aline[output] = self.score_file.no_score_value
else:
aline[output] = float(val)
logger.debug(
f"DAE frequency: aline[{output}]={aline[output]}")
except ValueError as ex:
logger.error(
f"problem with: {output}: {chrom}:{pos} - {val}")
logger.error(ex)
raise ex
| 3,228 | 929 |
#!/usr/bin/python26
"""Watches a list of directories for file updates.
The classes in this module will watch a list of subdirectories for file
updates. A class is passed in at object initialization time and is used to
create objects as new files are discovered. If a file is updated then the
reload() function on that class will be called. If the file is removed the
class will be deleted.
It is important to verify that __init__, __del__, and reload() are all
defined properly.
A simple example of this module use looks like this:
class watcher(object):
def __init__(self, filename):
self._filename = filename
print 'Init: %s' % filename
def __del__(self):
print 'Del: %s' % self._filename
def reload(self):
print 'reload: %s' % self._filename
x = inotify.InotifyWatcher(['/tmp/bar'], watcher)
Only one InotifyWatcher can be registered per process due to the way that
inotify works.
Author: Brady Catherman (brady@twitter.com)
"""
import fcntl
import logging
import os
import signal
import stat
WATCH_MASK = (fcntl.DN_MODIFY | fcntl.DN_CREATE | fcntl.DN_DELETE |
fcntl.DN_RENAME | fcntl.DN_MULTISHOT)
class WatchClass(object):
"""Interface class to be passed into InotifyWatcher()"""
def __init__(self, filename):
pass
def __del__(self):
pass
def reload(self):
"""Called when the file is updated on disk."""
pass
class InotifyWatcher(object):
"""Watches a list of directories for updates to the files in them.
This class will watch the directories in watch_directories and will
automatically make a class of watch_class type when a new one is found.
Args:
watch_directories: An iterable list of directories to watch for files in.
watch_class: The class that will be used to wrap each file.
file_pattern: An optional function that filters filenames. The basic
footprint takes a single parameter (the filename) and returns
True/False if it should be watched or not. If this is not
given then all files will be watched.
"""
def __init__(self, watch_directories, watch_class, file_pattern=None):
if file_pattern is None:
file_pattern = (lambda x: True)
self._watch_directories = watch_directories
self._watch_class = watch_class
self._file_pattern = file_pattern
self._watch_fds = {}
self._watch_files = {}
signal.signal(signal.SIGIO, self._inotify)
signal.signal(signal.SIGHUP, self._inotify)
self.rescan()
def _recurse_directory(self):
"""Recurses through all self._watch_directories finding files."""
all_files = set()
dirs = set(self._watch_directories)
all_dirs = set()
while dirs:
dir = dirs.pop()
try:
files = [os.path.join(dir, f) for f in os.listdir(dir)]
all_dirs.add(dir)
all_files.update([f for f in files
if os.path.isfile(f) and self._file_pattern(f)])
dirs.update([f for f in files if os.path.isdir(f) and f[0] != '.'])
except IOError, e:
logging.warning('Unable to access: %s' % dir)
except OSError, e:
logging.warning('Unable to access: %s' % dir)
return (all_dirs, all_files)
def _register_inotify(self, dir):
"""Registers a watch on the given directory."""
if dir in self._watch_fds:
return
logging.info('Registering a inotify watch on %s' % dir)
try:
fd = os.open(dir, os.O_RDONLY)
fcntl.fcntl(fd, fcntl.F_NOTIFY, WATCH_MASK)
self._watch_fds[dir] = fd
except IOError, e:
logging.error('Unable to register watch on %s: %s' % (dir, e))
def _unregister_inotify(self, dir):
"""Unregisters the directory for update notification."""
if dir not in self._watch_fds:
return
logging.info('Unregistering a inotify watch on %s' % dir)
del self._watch_fds[dir]
def _inotify(self, signum, frame):
"""Called when either SIGHUP or SIGIO (inotify) is received."""
logging.info('Received SIGHUP or a file update notification.')
signal.signal(signal.SIGIO, self._inotify)
signal.signal(signal.SIGHUP, self._inotify)
self.rescan()
def _mtime(self, filename):
"""Returns the mtime of the given file (in seconds)."""
try:
s = os.stat(filename)
return s[stat.ST_MTIME]
except IOError:
# On error we just return zero..
# FIXME[brady]: Make this work better.
return 0
def files(self):
"""Returns a list of all WatchFile objects we are watching.
This will return a list of all WatchFile objects associated with config
files in the list of directories that we are currently watching.
Returns:
A list of all WatchConfig objects we are maintaining.
"""
return [w for _, w in self._watch_files.itervalues()]
def rescan(self):
"""Rescans all directories looking for files inside.
This will walk all the directories listed when this class was created
looking for configuration files. If new config files are found then
a object will be created using the class passed in at init time. If a
file that used to exist was deleted then the config object for it
will also be deleted.
"""
new_dirs, new_files = self._recurse_directory()
# Old directories, unregister watches.
for dir in set(self._watch_fds.iterkeys()).difference(new_dirs):
self._unregister_inotify(dir)
# New directories, register watches.
for dir in new_dirs:
self._register_inotify(dir)
# Walk through all files that no longer exist.
for file in set(self._watch_files).difference(new_files):
logging.info('File deleted (%s): Removing its object.', file)
del self._watch_files[file]
for file in new_files:
if file not in self._watch_files:
w = self._watch_class(file)
self._watch_files[file] = [None, w]
logging.info('Found new file (%s): Making new object', file)
t = self._watch_files[file]
m = self._mtime(file)
if t and t[0] != m:
t[0] = m
t[1].reload()
| 6,101 | 1,903 |
from .location_test import *
from .array_test import *
from .view_test import *
from .board_test import *
| 106 | 33 |
# Copyright (c) 2021, TS and Contributors
# See license.txt
# import frappe
import unittest
class TestTS_Payroll(unittest.TestCase):
pass
| 141 | 54 |
"""
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution:
def connect(self, root: 'Optional[Node]') -> 'Optional[Node]':
if not root: return None
nd = root
while root.left:
nl = root.left
while root:
root.left.next = root.right
root.right.next = root.next.left if root.next else None
root = root.next
root = nl
return nd | 646 | 191 |
import numpy as np
def get_relative_coordinates(sample,
references=(4, 8, 12, 16)):
# input: C, T, V, M
c, t, v, m = sample.shape
final_sample = np.zeros((4 * c, t, v, m))
valid_frames = (sample != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0
start = valid_frames.argmax()
end = len(valid_frames) - valid_frames[::-1].argmax()
sample = sample[:, start:end, :, :]
rel_coords = []
for i in range(len(references)):
ref_loc = sample[:, :, references[i], :]
coords_diff = (sample.transpose((2, 0, 1, 3)) - ref_loc).transpose((1, 2, 0, 3))
rel_coords.append(coords_diff)
# Shape: 4*C, t, V, M
rel_coords = np.vstack(rel_coords)
# Shape: C, T, V, M
final_sample[:, start:end, :, :] = rel_coords
return final_sample
| 821 | 334 |
import os
import librosa
from joblib import Parallel, delayed
import json
import config_file
import argparse
import pickle
import numpy as np
from pathlib import Path
DEBUG = False
def compute_audio_repr(audio_file, audio_repr_file):
audio, sr = librosa.load(audio_file, sr=config['resample_sr'])
if config['type'] == 'waveform':
audio_repr = audio
audio_repr = np.expand_dims(audio_repr, axis=1)
elif config['spectrogram_type'] == 'mel':
audio_repr = librosa.feature.melspectrogram(y=audio, sr=sr,
hop_length=config['hop'],
n_fft=config['n_fft'],
n_mels=config['n_mels']).T
# Compute length
print(audio_repr.shape)
length = audio_repr.shape[0]
# Transform to float16 (to save storage, and works the same)
audio_repr = audio_repr.astype(np.float16)
# Write results:
with open(audio_repr_file, "wb") as f:
pickle.dump(audio_repr, f) # audio_repr shape: NxM
return length
def do_process(files, index):
try:
[id, audio_file, audio_repr_file] = files[index]
if not os.path.exists(audio_repr_file[:audio_repr_file.rfind('/') + 1]):
path = Path(audio_repr_file[:audio_repr_file.rfind('/') + 1])
path.mkdir(parents=True, exist_ok=True)
# compute audio representation (pre-processing)
length = compute_audio_repr(audio_file, audio_repr_file)
# index.tsv writing
fw = open(config_file.DATA_FOLDER + config['audio_representation_folder'] + "index_" + str(config['machine_i']) + ".tsv", "a")
fw.write("%s\t%s\t%s\n" % (id, audio_repr_file[len(config_file.DATA_FOLDER):], audio_file[len(config_file.DATA_FOLDER):]))
fw.close()
print(str(index) + '/' + str(len(files)) + ' Computed: %s' % audio_file)
except Exception as e:
ferrors = open(config_file.DATA_FOLDER + config['audio_representation_folder'] + "errors" + str(config['machine_i']) + ".txt", "a")
ferrors.write(audio_file + "\n")
ferrors.write(str(e))
ferrors.close()
print('Error computing audio representation: ', audio_file)
print(str(e))
def process_files(files):
if DEBUG:
print('WARNING: Parallelization is not used!')
for index in range(0, len(files)):
do_process(files, index)
else:
Parallel(n_jobs=config['num_processing_units'])(
delayed(do_process)(files, index) for index in range(0, len(files)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('configurationID', help='ID of the configuration dictionary')
args = parser.parse_args()
config = config_file.config_preprocess[args.configurationID]
config['audio_representation_folder'] = "audio_representation/%s__%s/" % (config['identifier'], config['type'])
# set audio representations folder
if not os.path.exists(config_file.DATA_FOLDER + config['audio_representation_folder']):
os.makedirs(config_file.DATA_FOLDER + config['audio_representation_folder'])
else:
print("WARNING: already exists a folder with this name!"
"\nThis is expected if you are splitting computations into different machines.."
"\n..because all these machines are writing to this folder. Otherwise, check your config_file!")
# list audios to process: according to 'index_file'
files_to_convert = []
f = open(config_file.DATA_FOLDER + config["index_file"])
for line in f.readlines():
id, audio = line.strip().split("\t")
audio_repr = audio[:audio.rfind(".")] + ".pk" # .npy or .pk
files_to_convert.append((id, config['audio_folder'] + audio,
config_file.DATA_FOLDER + config['audio_representation_folder'] + audio_repr))
# compute audio representation
if config['machine_i'] == config['n_machines'] - 1:
process_files(files_to_convert[int(len(files_to_convert) / config['n_machines']) * (config['machine_i']):])
# we just save parameters once! In the last thread run by n_machine-1!
json.dump(config, open(config_file.DATA_FOLDER + config['audio_representation_folder'] + "config.json", "w"))
else:
first_index = int(len(files_to_convert) / config['n_machines']) * (config['machine_i'])
second_index = int(len(files_to_convert) / config['n_machines']) * (config['machine_i'] + 1)
assigned_files = files_to_convert[first_index:second_index]
process_files(assigned_files)
print("Audio representation folder: " + config_file.DATA_FOLDER + config['audio_representation_folder'])
| 4,774 | 1,515 |
N = int(input('Ordem da matriz: '))
A = [[int(input()) for i in range(N)] for j in range(N)]
At = [[0 for i in range(N)] for j in range(N)]
for i in range(N):
for j in range(N):
At[i][j] = A[j][i]
for i in range(N):
print(At[i])
| 245 | 105 |
ltn = []
lpn = []
for i in range(1, 1000000):
t = 143 + i
p = 165 + i
ltn.append(t * (2 * t - 1))
lpn.append(p * (3 * p - 1) // 2)
def bst(n, b = 0, e = len(ltn)):
if b >= e:
if ltn[b] == n:
return True
else:
return False
else:
m = (b + e)//2
if n > ltn[m]:
return bst(n, m+1, e)
elif n < ltn[m]:
return bst(n, b, m)
else:
return True
for p in lpn:
if (bst(p)):
print(p)
break
| 532 | 231 |
import copy
import pytest
from RPA.Robocloud.Items import BaseAdapter, Items
VALID_DATABASE = {
("test-ws", "test-item"): {"username": "testguy", "address": "guy@company.com"},
("test-ws", "second-item"): {"username": "another", "address": "dude@company.com"},
}
class MockAdapter(BaseAdapter):
DATABASE = {}
@classmethod
def validate(cls, item, key, val):
data = cls.DATABASE.get((item.workspace_id, item.item_id))
assert data is not None
assert data[key] == val
def save(self, workspace_id, item_id, data):
self.DATABASE[(workspace_id, item_id)] = data
def load(self, workspace_id, item_id):
return self.DATABASE.get((workspace_id, item_id), {})
@pytest.fixture
def valid_adapter(monkeypatch):
monkeypatch.setenv("RC_WORKSPACE_ID", "test-ws")
monkeypatch.setenv("RC_WORKITEM_ID", "test-item")
MockAdapter.DATABASE = copy.deepcopy(VALID_DATABASE)
yield MockAdapter
MockAdapter.DATABASE = {}
def test_no_env(monkeypatch):
monkeypatch.delenv("RC_WORKSPACE_ID", raising=False)
monkeypatch.delenv("RC_WORKITEM_ID", raising=False)
lib = Items(default_adapter=MockAdapter)
assert lib.current is None
def test_load_env(valid_adapter):
lib = Items(default_adapter=valid_adapter)
# Called by Robot Framework listener
lib._start_suite(None, None)
# Work item loaded using env variables
env = lib.current
assert env is not None
assert env.data["username"] == "testguy"
def test_load_env_disable(valid_adapter):
lib = Items(load_env=False, default_adapter=valid_adapter)
# Called by Robot Framework listener
lib._start_suite(None, None)
assert lib.current is None
def test_keyword_load_item(valid_adapter):
lib = Items(default_adapter=valid_adapter)
item = lib.load_work_item("test-ws", "second-item")
assert item.data["username"] == "another"
assert item == lib.current
def test_keyword_save_item(valid_adapter):
lib = Items(default_adapter=valid_adapter)
item = lib.load_work_item("test-ws", "second-item")
MockAdapter.validate(item, "username", "another")
item.data["username"] = "changed"
lib.save_work_item()
MockAdapter.validate(item, "username", "changed")
def test_keyword_no_active_item():
lib = Items(default_adapter=MockAdapter)
assert lib.current is None
with pytest.raises(AssertionError) as err:
lib.save_work_item()
assert str(err.value) == "No active work item"
| 2,508 | 846 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of a basic seq2seq model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pydoc import locate
import tensorflow as tf
from seq2seq.contrib.seq2seq import helper as tf_decode_helper
from seq2seq.models.seq2seq_model import Seq2SeqModel
from seq2seq.graph_utils import templatemethod
from seq2seq.models import bridges
from seq2seq.inference import beam_search
class ConvSeq2Seq(Seq2SeqModel):
"""Basic Sequence2Sequence model with a unidirectional encoder and decoder.
The last encoder state is used to initialize the decoder and thus both
must share the same type of RNN cell.
Args:
source_vocab_info: An instance of `VocabInfo`
for the source vocabulary
target_vocab_info: An instance of `VocabInfo`
for the target vocabulary
params: A dictionary of hyperparameters
"""
def __init__(self, params, mode, name="conv_seq2seq"):
super(ConvSeq2Seq, self).__init__(params, mode, name)
self.encoder_class = locate(self.params["encoder.class"])
self.decoder_class = locate(self.params["decoder.class"])
@staticmethod
def default_params():
params = Seq2SeqModel.default_params().copy()
params.update({
"encoder.class": "seq2seq.encoders.ConvEncoderFairseq",
"encoder.params": {}, # Arbitrary parameters for the encoder
"decoder.class": "seq2seq.decoders.ConvDecoder",
"decoder.params": {}, # Arbitrary parameters for the decoder
"source.max_seq_len": 50,
"source.reverse": False,
"target.max_seq_len": 50,
"embedding.dim": 256,
"embedding.init_scale": 0.04,
"embedding.share": False,
"position_embeddings.num_positions": 100,
"inference.beam_search.beam_width": 0,
"inference.beam_search.length_penalty_weight": 1.0,
"inference.beam_search.choose_successors_fn": "choose_top_k",
"vocab_source": "",
"vocab_target": "",
"optimizer.name": "Momentum",
"optimizer.learning_rate": 0.25,
"optimizer.params": {"momentum": 0.99, "use_nesterov": True}, # Arbitrary parameters for the optimizer
#"optimizer.params": { "epsilon": 0.0000008}, # Arbitrary parameters for the optimizer
"optimizer.lr_decay_type": "exponential_decay",
"optimizer.lr_decay_steps": 5000, # one epoch steps
"optimizer.lr_decay_rate": 0.9,
"optimizer.lr_start_decay_at": 0, # start annealing epoch 0
"optimizer.lr_stop_decay_at": tf.int32.max,
"optimizer.lr_min_learning_rate": 1e-5,
"optimizer.lr_staircase": True,
"optimizer.clip_gradients": 0.1,
"optimizer.clip_embed_gradients": 5,
"optimizer.sync_replicas": 0,
"optimizer.sync_replicas_to_aggregate": 0,
})
return params
def source_embedding_fairseq(self):
"""Returns the embedding used for the source sequence.
"""
return tf.get_variable(
name="W",
shape=[self.source_vocab_info.total_size, self.params["embedding.dim"]],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=0.1))
def target_embedding_fairseq(self):
"""Returns the embedding used for the target sequence.
"""
if self.params["embedding.share"]:
return self.source_embedding_fairseq()
return tf.get_variable(
name="W",
shape=[self.target_vocab_info.total_size, self.params["embedding.dim"]],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=0.1))
def source_pos_embedding_fairseq(self):
return tf.get_variable(
name="pos",
shape=[self.params["position_embeddings.num_positions"], self.params["embedding.dim"]],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=0.1))
def target_pos_embedding_fairseq(self):
return tf.get_variable(
name="pos",
shape=[self.params["position_embeddings.num_positions"], self.params["embedding.dim"]],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=0.1))
def _create_decoder(self, encoder_output, features, _labels):
config = beam_search.BeamSearchConfig(
beam_width=self.params["inference.beam_search.beam_width"],
vocab_size=self.target_vocab_info.total_size,
eos_token=self.target_vocab_info.special_vocab.SEQUENCE_END,
length_penalty_weight=self.params[
"inference.beam_search.length_penalty_weight"],
choose_successors_fn=getattr(
beam_search,
self.params["inference.beam_search.choose_successors_fn"]))
return self.decoder_class(
params=self.params["decoder.params"],
mode=self.mode,
vocab_size=self.target_vocab_info.total_size,
config=config,
target_embedding=self.target_embedding_fairseq(),
pos_embedding=self.target_pos_embedding_fairseq(),
start_tokens=self.target_vocab_info.special_vocab.SEQUENCE_END)
def _decode_train(self, decoder, _encoder_output, _features, labels):
"""Runs decoding in training mode"""
target_embedded = tf.nn.embedding_lookup(decoder.target_embedding,
labels["target_ids"])
return decoder(_encoder_output, labels=target_embedded[:,:-1], sequence_length=labels["target_len"]-1)
def _decode_infer(self, decoder, _encoder_output, features, labels):
"""Runs decoding in inference mode"""
return decoder(_encoder_output, labels)
@templatemethod("encode")
def encode(self, features, labels):
features["source_ids"] = tf.reverse_sequence(features["source_ids"], features["source_len"], batch_dim=0, seq_dim=1) # [[1,2,3,4,PAD,PAD,PAD],[2,3,PAD,PAD,PAD,PAD,PAD]] [4,2]
features["source_ids"] = tf.reverse(features["source_ids"],[1]) # --> [[4,3,2,1,PAD,PAD,PAD],[3,2,PAD,PAD,PAD,PAD,PAD]] --> [[PAD,PAD,PAD,1,2,3,4],[PAD,PAD,PAD,PAD,PAD,2,3]]
source_embedded = tf.nn.embedding_lookup(self.source_embedding_fairseq(),
features["source_ids"])
encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode, self.source_pos_embedding_fairseq())
return encoder_fn(source_embedded, features["source_len"])
@templatemethod("decode")
def decode(self, encoder_output, features, labels):
decoder = self._create_decoder(encoder_output, features, labels)
if self.mode == tf.contrib.learn.ModeKeys.INFER:
return self._decode_infer(decoder, encoder_output, features,
labels)
else:
return self._decode_train(decoder, encoder_output, features,
labels)
| 7,418 | 2,478 |
def emailValida(email):
if '@gmail.com' in email or '@hotmail.com' in email or '@outlook.com' in email:
return True
else:
return False
| 159 | 50 |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""nova HACKING file compliance testing
built on top of pep8.py
"""
import inspect
import os
import re
import sys
import pep8
#N1xx comments
#N2xx except
#N3xx imports
#N4xx docstrings
#N5xx dictionaries/lists
#N6xx Calling methods
def nova_todo_format(physical_line):
"""
nova HACKING guide recommendation for TODO:
Include your name with TODOs as in "#TODO(termie)"
N101
"""
pos = physical_line.find('TODO')
pos1 = physical_line.find('TODO(')
pos2 = physical_line.find('#') # make sure its a comment
if (pos != pos1 and pos2 >= 0 and pos2 < pos):
return pos, "NOVA N101: Use TODO(NAME)"
def nova_except_format(logical_line):
"""
nova HACKING guide recommends not using except:
Do not write "except:", use "except Exception:" at the very least
N201
"""
if logical_line.startswith("except:"):
return 6, "NOVA N201: no 'except:' at least use 'except Exception:'"
def nova_except_format(logical_line):
"""
nova HACKING guide recommends not using assertRaises(Exception...):
Do not use overly broad Exception type
N202
"""
if logical_line.startswith("self.assertRaises(Exception"):
return 1, "NOVA N202: assertRaises Exception too broad"
def nova_one_import_per_line(logical_line):
"""
nova HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
BAD: from nova.rpc.common import RemoteError, LOG
BAD: from sqlalchemy import MetaData, Table
N301
"""
pos = logical_line.find(',')
if (pos > -1 and (logical_line.startswith("import ") or
(logical_line.startswith("from ") and
logical_line.split()[2] == "import"))):
return pos, "NOVA N301: one import per line"
def nova_import_module_only(logical_line):
"""
nova HACKING guide recommends importing only modules:
Do not import objects, only modules
N302 import only modules
N303 Invalid Import
N304 Relative Import
"""
def importModuleCheck(mod, parent=None, added=False):
"""
If can't find module on first try, recursively check for relative
imports
"""
current_path = os.path.dirname(pep8.current_file)
try:
valid = True
if parent:
parent_mod = __import__(parent, globals(), locals(), [mod], -1)
valid = inspect.ismodule(getattr(parent_mod, mod))
else:
__import__(mod, globals(), locals(), [], -1)
valid = inspect.ismodule(sys.modules[mod])
if not valid:
if added:
sys.path.pop()
added = False
return logical_line.find(mod), ("NOVA N304: No relative "
"imports. '%s' is a relative import" % logical_line)
return logical_line.find(mod), ("NOVA N302: import only "
"modules. '%s' does not import a module" % logical_line)
except (ImportError, NameError) as exc:
if not added:
added = True
sys.path.append(current_path)
return importModuleCheck(mod, parent, added)
else:
print >> sys.stderr, ("ERROR: import '%s' failed: %s" %
(logical_line, exc))
added = False
sys.path.pop()
return
except AttributeError:
# Invalid import
return logical_line.find(mod), ("NOVA N303: Invalid import, "
"AttributeError raised")
split_line = logical_line.split()
# handle "import x"
# handle "import x as y"
if (logical_line.startswith("import ") and "," not in logical_line and
(len(split_line) == 2 or
(len(split_line) == 4 and split_line[2] == "as"))):
mod = split_line[1]
return importModuleCheck(mod)
# handle "from x import y"
# handle "from x import y as z"
elif (logical_line.startswith("from ") and "," not in logical_line and
split_line[2] == "import" and split_line[3] != "*" and
split_line[1] != "__future__" and
(len(split_line) == 4 or
(len(split_line) == 6 and split_line[4] == "as"))):
mod = split_line[3]
return importModuleCheck(mod, split_line[1])
# TODO(jogo) handle "from x import *"
#TODO(jogo) Dict and list objects
current_file = ""
def readlines(filename):
"""
record the current file being tested
"""
pep8.current_file = filename
return open(filename).readlines()
def add_nova():
"""
Look for functions that start with nova_ and have arguments
and add them to pep8 module
Assumes you know how to write pep8.py checks
"""
for name, function in globals().items():
if not inspect.isfunction(function):
continue
args = inspect.getargspec(function)[0]
if args and name.startswith("nova"):
exec("pep8.%s = %s" % (name, name))
if __name__ == "__main__":
#include nova path
sys.path.append(os.getcwd())
#NOVA error codes start with an N
pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}')
add_nova()
pep8.current_file = current_file
pep8.readlines = readlines
pep8._main()
| 6,090 | 1,876 |
import torch, warnings
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from argparse import ArgumentParser
from .incremental_learning import Inc_Learning_Appr
from datasets.exemplars_dataset import ExemplarsDataset
class Appr(Inc_Learning_Appr):
"""Class implementing the finetuning baseline"""
def __init__(self, model, device, nepochs=100, lr=0.05, lr_min=1e-4, lr_factor=3, lr_patience=5, clipgrad=10000,
momentum=0, wd=0, multi_softmax=False, wu_nepochs=0, wu_lr_factor=1, fix_bn=False, eval_on_train=False,
logger=None, exemplars_dataset=None, all_outputs=False, CE=True, OPL=False, gamma=0.5, opl_weight=1.0):
super(Appr, self).__init__(model, device, nepochs, lr, lr_min, lr_factor, lr_patience, clipgrad, momentum, wd,
multi_softmax, wu_nepochs, wu_lr_factor, fix_bn, eval_on_train, logger,
exemplars_dataset)
self.all_out = all_outputs
self.CE = CE
self.OPL = OPL
self.gamma = gamma
self.opl_weight = opl_weight
self.means = []
self.covs = []
self.class_labels = []
@staticmethod
def exemplars_dataset_class():
return ExemplarsDataset
@staticmethod
def extra_parser(args):
"""Returns a parser containing the approach specific parameters"""
parser = ArgumentParser()
parser.add_argument('--all-outputs', action='store_true', required=False,
help='Allow all weights related to all outputs to be modified (default=%(default)s)')
parser.add_argument('--CE', action='store_false', required=False,
help='CE loss (default=%(default)s)')
parser.add_argument('--OPL', action='store_true', required=False,
help='OPL loss (default=%(default)s)')
parser.add_argument('--gamma', default=0.5, type=float, required=False,
help='Gamma for neg pair in OPL (default=%(default)s)')
parser.add_argument('--opl_weight', default=1, type=float, required=False,
help='Weight for OPL loss (default=%(default)s)')
return parser.parse_known_args(args)
def _get_optimizer(self):
"""Returns the optimizer"""
if len(self.exemplars_dataset) == 0 and len(self.model.heads) > 1 and not self.all_out:
# if there are no exemplars, previous heads are not modified
params = list(self.model.model.parameters()) + list(self.model.heads[-1].parameters())
else:
params = self.model.parameters()
return torch.optim.SGD(params, lr=self.lr, weight_decay=self.wd, momentum=self.momentum)
def save_protype(self, trained_model, loader):
trained_model.eval()
features = []
labels = []
with torch.no_grad():
for images, targets in loader:
output, feature = trained_model(images.to(self.device), return_features=True)
labels.append(targets.numpy())
features.append(feature.cpu().numpy())
labels = np.hstack(labels)
labels_set = np.unique(labels)
features = np.concatenate(features, 0)
feature_dim = features.shape[1]
for item in labels_set:
index = np.where(item==labels)[0]
feature_classwise = features[index]
self.class_labels.append(item)
self.means.append(torch.from_numpy(np.mean(feature_classwise, axis=0)))
self.covs.append(torch.from_numpy(np.cov(feature_classwise.T)))
def pre_train_process(self, t, trn_loader):
"""Runs before training all epochs of the task (before the train session)"""
if t == 0:
# Sec. 4.1: "the ReLU in the penultimate layer is removed to allow the features to take both positive and
# negative values"
if self.model.model.__class__.__name__ == 'ResNet':
old_block = self.model.model.layer3[-1]
self.model.model.layer3[-1] = BasicBlockNoRelu(old_block.conv1, old_block.bn1, old_block.relu,
old_block.conv2, old_block.bn2, old_block.downsample)
elif self.model.model.__class__.__name__ == 'SmallCNN':
self.model.model.last_relu = False
else:
warnings.warn("Warning: ReLU not removed from last block.")
super().pre_train_process(t, trn_loader)
def train_epoch(self, t, trn_loader):
"""Runs a single epoch"""
self.model.train()
if self.fix_bn and t > 0:
self.model.freeze_bn()
for images, targets in trn_loader:
# Forward current model
if not self.OPL:
features = None
outputs = self.model(images.to(self.device))
else:
outputs, features = self.model(images.to(self.device), return_features=True)
loss = self.criterion(t, outputs, targets.to(self.device), features)
# Backward
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clipgrad)
self.optimizer.step()
def train_loop(self, t, trn_loader, val_loader):
"""Contains the epochs loop"""
# add exemplars to train_loader
if len(self.exemplars_dataset) > 0 and t > 0:
trn_loader = torch.utils.data.DataLoader(trn_loader.dataset + self.exemplars_dataset,
batch_size=trn_loader.batch_size,
shuffle=True,
num_workers=trn_loader.num_workers,
pin_memory=trn_loader.pin_memory)
# FINETUNING TRAINING -- contains the epochs loop
super().train_loop(t, trn_loader, val_loader)
# EXEMPLAR MANAGEMENT -- select training subset
self.exemplars_dataset.collect_exemplars(self.model, trn_loader, val_loader.dataset.transform)
self.save_protype(self.model, trn_loader)
def classify(self, task, features, targets):
# expand means to all batch images # bs*256*num_classes
means = torch.stack(self.means)
means = torch.stack([means]*features.shape[0])
means = means.transpose(1,2)
# expand all features to all classes
features = features.unsqueeze(2)
features = features.expand_as(means)
# get cosine-similarities for all images to all prototypes
# note: features and means do not need normalize
cos_sim = torch.nn.functional.cosine_similarity(features, means.to(self.device), dim=1, eps=1e-08) # bs*num_classes
pred = cos_sim.argmax(1)
hits_tag = (pred == targets.to(self.device)).float()
return hits_tag, hits_tag
def eval_ncm(self, t, val_loader):
with torch.no_grad():
total_loss, total_acc_taw, total_acc_tag, total_num = 0, 0, 0, 0
self.model.eval()
for images, targets in val_loader:
# Forward old model
old_features = None
if t > 0:
old_outputs, old_features = self.model_old(images.to(self.device), return_features=True)
# Forward current model
outputs, feats = self.model(images.to(self.device), return_features=True)
loss = self.criterion(t, outputs, targets.to(self.device), feats)
# during training, the usual accuracy is not computed
if t > len(self.means)-1:
print('No means created yet!')
hits_taw, hits_tag = torch.zeros(targets.shape[0]).float(), torch.zeros(targets.shape[0]).float()
else:
hits_taw, hits_tag = self.classify(t, feats, targets)
# Log
total_loss += loss.item() * len(targets)
total_acc_taw += hits_taw.sum().item()
total_acc_tag += hits_tag.sum().item()
total_num += len(targets)
return total_loss / total_num, total_acc_taw / total_num, total_acc_tag / total_num
def eval(self, t, val_loader):
"""Contains the evaluation code"""
with torch.no_grad():
total_loss, total_acc_taw, total_acc_tag, total_num = 0, 0, 0, 0
self.model.eval()
for images, targets in val_loader:
# Forward current model
if self.OPL:
outputs, features = self.model(images.to(self.device), return_features=True)
else:
outputs = self.model(images.to(self.device))
features = None
loss = self.criterion(t, outputs, targets.to(self.device), features)
hits_taw, hits_tag = self.calculate_metrics(outputs, targets)
# Log
total_loss += loss.item() * len(targets)
total_acc_taw += hits_taw.sum().item()
total_acc_tag += hits_tag.sum().item()
total_num += len(targets)
return total_loss / total_num, total_acc_taw / total_num, total_acc_tag / total_num
def criterion(self, t, outputs, targets, features=None):
"""Returns the loss value"""
if self.all_out or len(self.exemplars_dataset) > 0:
if self.CE and not self.OPL:
return torch.nn.functional.cross_entropy(torch.cat(outputs, dim=1), targets)
if self.CE and self.OPl:
return torch.nn.functional.cross_entropy(torch.cat(outputs, dim=1), targets) + self.opl_weight*OrthogonalProjectionLoss(self.gamma)(features, targets, normalize=True)
if not self.CE and self.OPL:
return OrthogonalProjectionLoss(self.gamma)(features, targets, normalize=True)
else:
if self.CE and not self.OPL:
return torch.nn.functional.cross_entropy(outputs[t], targets - self.model.task_offset[t])
if self.CE and self.OPL:
return torch.nn.functional.cross_entropy(outputs[t], targets - self.model.task_offset[t]) + self.opl_weight*OrthogonalProjectionLoss(self.gamma)(features, targets - self.model.task_offset[t], normalize=True)
if not self.CE and self.OPL:
return OrthogonalProjectionLoss(self.gamma)(features, targets, normalize=True)
class OrthogonalProjectionLoss(nn.Module):
def __init__(self, gamma=0.5):
super(OrthogonalProjectionLoss, self).__init__()
self.gamma = gamma
def forward(self, features, labels=None, normalize=True):
device = (torch.device('cuda') if features.is_cuda else torch.device('cpu'))
# features are normalized
if normalize:
features = F.normalize(features, p=2, dim=1)
labels = labels[:, None] # extend dim
mask = torch.eq(labels, labels.t()).bool().to(device)
eye = torch.eye(mask.shape[0], mask.shape[1]).bool().to(device)
mask_pos = mask.masked_fill(eye, 0).float()
mask_neg = (~mask).float()
dot_prod = torch.matmul(features, features.t())
pos_pairs_mean = (mask_pos * dot_prod).sum() / (mask_pos.sum() + 1e-6)
neg_pairs_mean = (mask_neg * dot_prod).sum() / (mask_neg.sum() + 1e-6) # TODO: removed abs
loss = (1.0 - pos_pairs_mean) + self.gamma * neg_pairs_mean
return loss
# This class implements a ResNet Basic Block without the final ReLu in the forward
class BasicBlockNoRelu(nn.Module):
expansion = 1
def __init__(self, conv1, bn1, relu, conv2, bn2, downsample):
super(BasicBlockNoRelu, self).__init__()
self.conv1 = conv1
self.bn1 = bn1
self.relu = relu
self.conv2 = conv2
self.bn2 = bn2
self.downsample = downsample
def forward(self, x):
residual = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.downsample is not None:
residual = self.downsample(x)
out += residual
# Removed final ReLU
return out | 12,374 | 3,774 |
from flask import Blueprint, jsonify
api = Blueprint('api', __name__)
@api.route("/")
def home():
return {"home": "page"}
@api.route("/<string:variable>")
def greeting(variable):
return {"hello": variable}
| 234 | 87 |
import json
import random
from loguru import logger
from simfleet.customer import CustomerStrategyBehaviour
from simfleet.fleetmanager import FleetManagerStrategyBehaviour
from simfleet.helpers import PathRequestException, distance_in_meters
from simfleet.protocol import (
REQUEST_PERFORMATIVE,
ACCEPT_PERFORMATIVE,
REFUSE_PERFORMATIVE,
PROPOSE_PERFORMATIVE,
CANCEL_PERFORMATIVE,
INFORM_PERFORMATIVE,
QUERY_PROTOCOL,
REQUEST_PROTOCOL,
)
from simfleet.transport import TransportStrategyBehaviour
from simfleet.utils import (
TRANSPORT_WAITING,
TRANSPORT_WAITING_FOR_APPROVAL,
CUSTOMER_WAITING,
TRANSPORT_MOVING_TO_CUSTOMER,
CUSTOMER_ASSIGNED,
TRANSPORT_WAITING_FOR_STATION_APPROVAL,
TRANSPORT_MOVING_TO_STATION,
TRANSPORT_CHARGING,
TRANSPORT_CHARGED,
TRANSPORT_NEEDS_CHARGING,
)
################################################################
# #
# FleetManager Strategy #
# #
################################################################
class MyFleetManagerStrategy(FleetManagerStrategyBehaviour):
"""
The default strategy for the FleetManager agent. By default it delegates all requests to all transports.
# Modified to sent request only to the closest taxi to the customer
"""
async def run(self):
if not self.agent.registration:
await self.send_registration()
msg = await self.receive(timeout=5)
logger.debug("Manager received message: {}".format(msg))
if msg:
content = json.loads(msg.body)
customer = content["customer_id"]
position = content["origin"]
destination = content["dest"]
best_transport = None
min_distance = 10e99
for transport in self.get_transport_agents().values():
logger.warning("EEeeeee")
logger.warning(type(transport))
logger.warning((transport))
dst = distance_in_meters(transport.get_position(), position)
if dst < min_distance:
min_distance = dst
best_transport = transport
msg.to = str(best_transport["jid"])
logger.debug(
"Manager sent request to transport {}".format(best_transport["name"])
)
await self.send(msg)
################################################################
# #
# Transport Strategy #
# #
################################################################
class MyTransportStrategy(TransportStrategyBehaviour):
"""
The default strategy for the Transport agent. By default it accepts every request it receives if available.
"""
async def run(self):
if self.agent.needs_charging():
if self.agent.stations is None or len(self.agent.stations) < 1:
logger.warning(
"Transport {} looking for a station.".format(self.agent.name)
)
await self.send_get_stations()
else:
station = random.choice(list(self.agent.stations.keys()))
logger.info(
"Transport {} reserving station {}.".format(
self.agent.name, station
)
)
await self.send_proposal(station)
self.agent.status = TRANSPORT_WAITING_FOR_STATION_APPROVAL
msg = await self.receive(timeout=5)
if not msg:
return
logger.debug("Transport received message: {}".format(msg))
try:
content = json.loads(msg.body)
except TypeError:
content = {}
performative = msg.get_metadata("performative")
protocol = msg.get_metadata("protocol")
if protocol == QUERY_PROTOCOL:
if performative == INFORM_PERFORMATIVE:
self.agent.stations = content
logger.info(
"Got list of current stations: {}".format(
list(self.agent.stations.keys())
)
)
elif performative == CANCEL_PERFORMATIVE:
logger.info("Cancellation of request for stations information.")
elif protocol == REQUEST_PROTOCOL:
logger.debug(
"Transport {} received request protocol from customer/station.".format(
self.agent.name
)
)
if performative == REQUEST_PERFORMATIVE:
if self.agent.status == TRANSPORT_WAITING:
if not self.has_enough_autonomy(content["origin"], content["dest"]):
await self.cancel_proposal(content["customer_id"])
self.agent.status = TRANSPORT_NEEDS_CHARGING
else:
await self.send_proposal(content["customer_id"], {})
self.agent.status = TRANSPORT_WAITING_FOR_APPROVAL
elif performative == ACCEPT_PERFORMATIVE:
if self.agent.status == TRANSPORT_WAITING_FOR_APPROVAL:
logger.debug(
"Transport {} got accept from {}".format(
self.agent.name, content["customer_id"]
)
)
try:
self.agent.status = TRANSPORT_MOVING_TO_CUSTOMER
await self.pick_up_customer(
content["customer_id"], content["origin"], content["dest"]
)
except PathRequestException:
logger.error(
"Transport {} could not get a path to customer {}. Cancelling...".format(
self.agent.name, content["customer_id"]
)
)
self.agent.status = TRANSPORT_WAITING
await self.cancel_proposal(content["customer_id"])
except Exception as e:
logger.error(
"Unexpected error in transport {}: {}".format(
self.agent.name, e
)
)
await self.cancel_proposal(content["customer_id"])
self.agent.status = TRANSPORT_WAITING
else:
await self.cancel_proposal(content["customer_id"])
elif performative == REFUSE_PERFORMATIVE:
logger.debug(
"Transport {} got refusal from customer/station".format(
self.agent.name
)
)
self.agent.status = TRANSPORT_WAITING
elif performative == INFORM_PERFORMATIVE:
if self.agent.status == TRANSPORT_WAITING_FOR_STATION_APPROVAL:
logger.info(
"Transport {} got accept from station {}".format(
self.agent.name, content["station_id"]
)
)
try:
self.agent.status = TRANSPORT_MOVING_TO_STATION
await self.send_confirmation_travel(content["station_id"])
await self.go_to_the_station(
content["station_id"], content["dest"]
)
except PathRequestException:
logger.error(
"Transport {} could not get a path to station {}. Cancelling...".format(
self.agent.name, content["station_id"]
)
)
self.agent.status = TRANSPORT_WAITING
await self.cancel_proposal(content["station_id"])
except Exception as e:
logger.error(
"Unexpected error in transport {}: {}".format(
self.agent.name, e
)
)
await self.cancel_proposal(content["station_id"])
self.agent.status = TRANSPORT_WAITING
elif self.agent.status == TRANSPORT_CHARGING:
if content["status"] == TRANSPORT_CHARGED:
self.agent.transport_charged()
await self.agent.drop_station()
elif performative == CANCEL_PERFORMATIVE:
logger.info(
"Cancellation of request for {} information".format(
self.agent.fleet_type
)
)
################################################################
# #
# Customer Strategy #
# #
################################################################
class MyCustomerStrategy(CustomerStrategyBehaviour):
"""
The default strategy for the Customer agent. By default it accepts the first proposal it receives.
"""
async def run(self):
if self.agent.fleetmanagers is None:
await self.send_get_managers(self.agent.fleet_type)
msg = await self.receive(timeout=5)
if msg:
performative = msg.get_metadata("performative")
if performative == INFORM_PERFORMATIVE:
self.agent.fleetmanagers = json.loads(msg.body)
return
elif performative == CANCEL_PERFORMATIVE:
logger.info(
"Cancellation of request for {} information".format(
self.agent.type_service
)
)
return
if self.agent.status == CUSTOMER_WAITING:
await self.send_request(content={})
msg = await self.receive(timeout=5)
if msg:
performative = msg.get_metadata("performative")
transport_id = msg.sender
if performative == PROPOSE_PERFORMATIVE:
if self.agent.status == CUSTOMER_WAITING:
logger.debug(
"Customer {} received proposal from transport {}".format(
self.agent.name, transport_id
)
)
await self.accept_transport(transport_id)
self.agent.status = CUSTOMER_ASSIGNED
else:
await self.refuse_transport(transport_id)
elif performative == CANCEL_PERFORMATIVE:
if self.agent.transport_assigned == str(transport_id):
logger.warning(
"Customer {} received a CANCEL from Transport {}.".format(
self.agent.name, transport_id
)
)
self.agent.status = CUSTOMER_WAITING
| 11,609 | 2,773 |
"""Unix utilities
Author Rory Byrne <rory@rory.bio>
"""
from shutil import which
import subprocess
from typing import List, Optional
def is_installed(name: str) -> bool:
"""Check whether `name` is on PATH and marked as executable."""
return which(name) is not None
def run_command(cmd: List[str], capture_output: bool = True) -> Optional[str]:
"""Run a shell command"""
result = subprocess.run(cmd, capture_output=capture_output, check=True)
if result.stdout:
return result.stdout.decode()
return None
| 540 | 167 |
import mysql_config as mysql
#Funcion para validar usuario
def validateUser(userID, userPassword):
print("Validando usuario con la base de datos....")
#Pedir al servidor mysql los datos
data=mysql.fetchDataFromDatabase("SELECT password FROM usuario WHERE userID='"+userID+"'")
row=mysql.getFirstElement(data)
#Ver si las claves estan correctas
if(row[0]!=userPassword):
print("Las claves no son validas para el usuario " + userID)
return mysql.sendErrorMssg("Error, las claves no son correctas")
#Regresar success
print("Usuario validado con exito!")
returnJson={"success","yes"}
return returnJson
| 655 | 196 |
import deepchem as dc
import deepchem.models.tensorgraph.layers as layers
import numpy as np
import os
import re
RETRAIN = False
# Load the datasets.
image_dir = 'BBBC005_v1_images'
files = []
labels = []
for f in os.listdir(image_dir):
if f.endswith('.TIF'):
files.append(os.path.join(image_dir, f))
labels.append(int(re.findall('_C(.*?)_', f)[0]))
loader = dc.data.ImageLoader()
dataset = loader.featurize(files, np.array(labels))
splitter = dc.splits.RandomSplitter()
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(dataset, seed=123)
# Create the model.
learning_rate = dc.models.optimizers.ExponentialDecay(0.001, 0.9, 250)
model = dc.models.TensorGraph(learning_rate=learning_rate, model_dir='models/model')
features = layers.Feature(shape=(None, 520, 696))
labels = layers.Label(shape=(None,))
prev_layer = features
for num_outputs in [16, 32, 64, 128, 256]:
prev_layer = layers.Conv2D(num_outputs, kernel_size=5, stride=2, in_layers=prev_layer)
output = layers.Dense(1, in_layers=layers.Flatten(prev_layer))
model.add_output(output)
loss = layers.ReduceSum(layers.L2Loss(in_layers=(output, labels)))
model.set_loss(loss)
if not os.path.exists('./models'):
os.mkdir('models')
if not os.path.exists('./models/model'):
os.mkdir('models/model')
if not RETRAIN:
model.restore()
# Train it and evaluate performance on the test set.
if RETRAIN:
print("About to fit model for 50 epochs")
model.fit(train_dataset, nb_epoch=50)
y_pred = model.predict(test_dataset).flatten()
print(np.sqrt(np.mean((y_pred-test_dataset.y)**2)))
| 1,582 | 610 |
# -*- coding: utf-8 -*-
#(c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
#pragma pylint: disable=unused-argument, no-self-use, line-too-long
"""AppFunction implementation"""
from cachetools import cached, TTLCache
from resilient_circuits import AppFunctionComponent, app_function, FunctionResult
from fn_playbook_utils.lib.common import get_playbooks_by_incident_id, parse_inputs
PACKAGE_NAME = "fn_playbook_utils"
FN_NAME = "pb_get_playbook_data"
class FunctionComponent(AppFunctionComponent):
"""Component that implements function 'pb_get_playbook_data'"""
def __init__(self, opts):
super(FunctionComponent, self).__init__(opts, PACKAGE_NAME)
self.restclient = self.rest_client()
@app_function(FN_NAME)
def _app_function(self, fn_inputs):
"""
Function: Get information on workflows run for this incident or for a range of incidents
Inputs:
- fn_inputs.pb_min_incident_id
- fn_inputs.pb_max_incident_id
- fn_inputs.pb_min_incident_date
- fn_inputs.pb_max_incident_date
- fn_inputs.pb_object_name
- fn_inputs.pb_object_type
"""
yield self.status_message("Starting App Function: '{0}'".format(FN_NAME))
min_id, max_id = parse_inputs(self.restclient, fn_inputs)
yield self.status_message("Using min_incident: {} max_incident: {}".format(min_id, max_id))
result_data = self.get_all_incident_playbooks(min_id, max_id)
yield self.status_message("Finished running App Function: '{0}'".format(FN_NAME))
yield FunctionResult(result_data)
@cached(cache=TTLCache(maxsize=30, ttl=60))
def get_all_incident_playbooks(self, min_id, max_id):
# get all the incident data to return
result_dict = {}
result_data = {
"org_id" : self.restclient.org_id,
"min_id": min_id,
"max_id": max_id,
"playbook_content": result_dict
}
# don't continue if no values
if bool(min_id and max_id):
search_results = get_playbooks_by_incident_id(self.restclient, min_id, max_id)
for pb in search_results.get('data', []):
if pb['incident_id'] in result_dict:
result_dict[pb['incident_id']].append(pb)
else:
result_dict[pb['incident_id']] = [pb]
return result_data
| 2,428 | 806 |
# -*- coding: utf-8 -*-
"""
IMSCC File Builder
Produce an lxml entity ready to convert to text
"""
import io
def course_output_file(model, ioopen=io.open) -> None:
for item in model:
file_output(item, ioopen)
def section_output_file(model, ioopen=io.open) -> None:
for item in model:
file_output(item, ioopen)
def label_output_file(model, ioopen=io.open) -> None:
pass
def assessment_output_file(model, ioopen=io.open) -> None:
pass
def discussion_output_file(model, ioopen=io.open) -> None:
# Output the discussion.xml file
pass
def binaryfile_output_file(model, ioopen=io.open) -> None:
with ioopen(model.name, 'w') as f:
f.write(model.export())
def image_output_file(model, ioopen=io.open) -> None:
with ioopen(model.name, 'w') as f:
f.write(model.export())
def file_output(model, ioopen=io.open):
modelclassname = model.__class__.__name__
builderfunctname = '%s_output_file' % modelclassname.lower()
builderfunct = globals().get(builderfunctname, None)
if builderfunct:
return builderfunct(model, ioopen)
else:
raise NotImplementedError(
'cannot find file output builder function {} for {}'.format(builderfunctname, modelclassname))
| 1,274 | 433 |
# Local imports
from uplink import Consumer, PartMap, post, multipart
# Constants
BASE_URL = "https://example.com/"
def test_without_converter(mock_response, mock_client):
class Calendar(Consumer):
@multipart
@post("/attachments")
def upload_attachments(self, **files: PartMap):
pass
mock_client.with_response(mock_response)
calendar = Calendar(base_url=BASE_URL, client=mock_client)
file = object()
# Run
calendar.upload_attachments(file=file)
# Assertion: should not convert if converter is None
request = mock_client.history[0]
assert request.files == {"file": file}
| 648 | 194 |
import csv
from bs4 import BeautifulSoup
from selenium import webdriver
import csv
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver import Chrome
def get_url(search_item):
template="https://www.amazon.in/s?k={}&crid=1GNY6Q6AHOOKS&sprefix=and%2Caps%2C524&ref=nb_sb_ss_ts-oa-p_1_3"
search_item=search_item.replace(' ','+')
#add query tool
url =template.format(search_item)
url+='&page{}'
return url
def extract_record(item):
#description Url and heading
atag=item.h2.a
description=atag.text.strip()
url="https://www.amazon.in/" +atag.get('href')
try:
#price
price_present=item.find('span','a-price')
price=price_present.find('span' ,'a-offscreen').text
except AttributeError:
return
try:
#rating and review
rating=item.i.text
review_count = item.find('span',{'class':'a-size-base','dir':'auto'}).text
except AttributeError:
rating=''
review_count
results=(description,price,rating,review_count,url)
return results
def main(search_item):
record=[]
url=get_url(search_item)
for page in range(1,21):
driver.get(url.format(page))
soup=BeautifulSoup(driver.page_source,'html.parser')
results =soup.find_all('div',{"data-component-type":"s-search-result"})
for item in results:
record =extract_record(item)
if record:
records.append(record)
driver.close()
#save data as csv file
with open('results.csv','w',newline='',encoding='utf-8')as f:
writer=csv.writer(f)
writer.writerow(['Description','Price','Rating','ReviewCount','url'])
writer.writerows(records)
print(main('android phone'))
| 1,864 | 602 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# GUI module generated by PAGE version 4.21
# in conjunction with Tcl version 8.6
# Apr 22, 2019 01:28:38 AM +0530 platform: Windows NT
import sys
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
import unknown_support
import os.path
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
global prog_location
prog_call = sys.argv[0]
print ('prog_call = {}'.format(prog_call))
prog_location = os.path.split(prog_call)[0]
print ('prog_location = {}'.format(prog_location))
sys.stdout.flush()
root = tk.Tk()
top = Toplevel1 (root)
unknown_support.init(root, top)
root.mainloop()
w = None
def create_Toplevel1(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt
global prog_location
prog_call = sys.argv[0]
print ('prog_call = {}'.format(prog_call))
prog_location = os.path.split(prog_call)[0]
print ('prog_location = {}'.format(prog_location))
rt = root
w = tk.Toplevel (root)
top = Toplevel1 (w)
unknown_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Toplevel1():
global w
w.destroy()
w = None
class Toplevel1:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
font17 = "-family {Berlin Sans FB} -size 15"
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.',background=_bgcolor)
self.style.configure('.',foreground=_fgcolor)
self.style.configure('.',font="TkDefaultFont")
self.style.map('.',background=
[('selected', _compcolor), ('active',_ana2color)])
top.geometry("1600x837+6+194")
top.title("New Toplevel")
top.configure(background="#ffff24")
top.configure(highlightbackground="#d9d9d9")
top.configure(highlightcolor="black")
self.Label1 = tk.Label(top)
self.Label1.place(relx=0.013, rely=0.024, height=81, width=156)
self.Label1.configure(activebackground="#f9f9f9")
self.Label1.configure(activeforeground="black")
self.Label1.configure(background="#ffff24")
self.Label1.configure(disabledforeground="#a3a3a3")
self.Label1.configure(font="-family {Britannic Bold} -size 48 -weight bold")
self.Label1.configure(foreground="#ff250d")
self.Label1.configure(highlightbackground="#d9d9d9")
self.Label1.configure(highlightcolor="black")
self.Label1.configure(text='''eTAX''')
self.Label1_1 = tk.Label(top)
self.Label1_1.place(relx=0.113, rely=0.024, height=81, width=156)
self.Label1_1.configure(activebackground="#f9f9f9")
self.Label1_1.configure(activeforeground="black")
self.Label1_1.configure(background="#ffff24")
self.Label1_1.configure(disabledforeground="#a3a3a3")
self.Label1_1.configure(font="-family {Britannic Bold} -size 48 -weight bold")
self.Label1_1.configure(foreground="#2212ff")
self.Label1_1.configure(highlightbackground="#d9d9d9")
self.Label1_1.configure(highlightcolor="black")
self.Label1_1.configure(text='''2019''')
self.Label2 = tk.Label(top)
self.Label2.place(relx=0.069, rely=0.108, height=31, width=141)
self.Label2.configure(activebackground="#f9f9f9")
self.Label2.configure(activeforeground="black")
self.Label2.configure(background="#ffff24")
self.Label2.configure(disabledforeground="#a3a3a3")
self.Label2.configure(font="-family {Segoe Script} -size 12 -slant italic")
self.Label2.configure(foreground="#13c12a")
self.Label2.configure(highlightbackground="#d9d9d9")
self.Label2.configure(highlightcolor="black")
self.Label2.configure(text='''working for you''')
self.backbutton = tk.Button(top)
self.backbutton.place(relx=0.013, rely=0.167, height=44, width=97)
self.backbutton.configure(activebackground="#ececec")
self.backbutton.configure(activeforeground="#000000")
self.backbutton.configure(background="#120bd8")
self.backbutton.configure(disabledforeground="#a3a3a3")
self.backbutton.configure(font="-family {Rockwell Extra Bold} -size 12 -weight bold")
self.backbutton.configure(foreground="#fcffff")
self.backbutton.configure(highlightbackground="#d9d9d9")
self.backbutton.configure(highlightcolor="black")
self.backbutton.configure(pady="0")
self.backbutton.configure(text='''Back''')
self.exit = tk.Button(top)
self.exit.place(relx=0.1, rely=0.167, height=44, width=97)
self.exit.configure(activebackground="#ececec")
self.exit.configure(activeforeground="#000000")
self.exit.configure(background="#120bd8")
self.exit.configure(disabledforeground="#a3a3a3")
self.exit.configure(font="-family {Rockwell Extra Bold} -size 12 -weight bold")
self.exit.configure(foreground="#fcffff")
self.exit.configure(highlightbackground="#d9d9d9")
self.exit.configure(highlightcolor="black")
self.exit.configure(pady="0")
self.exit.configure(text='''Exit''')
self.Label3 = tk.Label(top)
self.Label3.place(relx=0.013, rely=0.944, height=21, width=56)
self.Label3.configure(activebackground="#f9f9f9")
self.Label3.configure(activeforeground="black")
self.Label3.configure(background="#ffff24")
self.Label3.configure(disabledforeground="#a3a3a3")
self.Label3.configure(foreground="#000000")
self.Label3.configure(highlightbackground="#d9d9d9")
self.Label3.configure(highlightcolor="black")
self.Label3.configure(text='''etax-2019''')
self.Label3_3 = tk.Label(top)
self.Label3_3.place(relx=0.013, rely=0.968, height=21, width=34)
self.Label3_3.configure(activebackground="#f9f9f9")
self.Label3_3.configure(activeforeground="black")
self.Label3_3.configure(background="#ffff24")
self.Label3_3.configure(disabledforeground="#a3a3a3")
self.Label3_3.configure(foreground="#000000")
self.Label3_3.configure(highlightbackground="#d9d9d9")
self.Label3_3.configure(highlightcolor="black")
self.Label3_3.configure(text='''v 1.0.2''')
self.Label3_4 = tk.Label(top)
self.Label3_4.place(relx=0.006, rely=1.016, height=21, width=134)
self.Label3_4.configure(activebackground="#f9f9f9")
self.Label3_4.configure(activeforeground="black")
self.Label3_4.configure(background="#ffff24")
self.Label3_4.configure(disabledforeground="#a3a3a3")
self.Label3_4.configure(foreground="#000000")
self.Label3_4.configure(highlightbackground="#d9d9d9")
self.Label3_4.configure(highlightcolor="black")
self.Label3_4.configure(text='''Working On Windows''')
self.Label3_1 = tk.Label(top)
self.Label3_1.place(relx=0.013, rely=0.992, height=21, width=164)
self.Label3_1.configure(activebackground="#f9f9f9")
self.Label3_1.configure(activeforeground="black")
self.Label3_1.configure(background="#ffff24")
self.Label3_1.configure(disabledforeground="#a3a3a3")
self.Label3_1.configure(foreground="#000000")
self.Label3_1.configure(highlightbackground="#d9d9d9")
self.Label3_1.configure(highlightcolor="black")
self.Label3_1.configure(text='''Connected to MySQL server 8.0''')
self.Label4 = tk.Label(top)
self.Label4.place(relx=0.375, rely=0.024, height=68, width=361)
self.Label4.configure(activebackground="#f9f9f9")
self.Label4.configure(activeforeground="black")
self.Label4.configure(background="#ffff24")
self.Label4.configure(disabledforeground="#36911a")
self.Label4.configure(font="-family {Rockwell Extra Bold} -size 40 -weight bold")
self.Label4.configure(foreground="#36911a")
self.Label4.configure(highlightbackground="#d9d9d9")
self.Label4.configure(highlightcolor="black")
self.Label4.configure(text='''Workspace''')
self.Label5 = tk.Label(top)
self.Label5.place(relx=0.763, rely=0.036, height=28, width=192)
self.Label5.configure(activebackground="#f9f9f9")
self.Label5.configure(activeforeground="black")
self.Label5.configure(background="#ffff24")
self.Label5.configure(disabledforeground="#a3a3a3")
self.Label5.configure(font="-family {Rockwell} -size 15")
self.Label5.configure(foreground="#000000")
self.Label5.configure(highlightbackground="#d9d9d9")
self.Label5.configure(highlightcolor="black")
self.Label5.configure(text='''Village : Kalamwadi''')
self.Label5_2 = tk.Label(top)
self.Label5_2.place(relx=0.781, rely=0.072, height=28, width=172)
self.Label5_2.configure(activebackground="#f9f9f9")
self.Label5_2.configure(activeforeground="black")
self.Label5_2.configure(background="#ffff24")
self.Label5_2.configure(disabledforeground="#a3a3a3")
self.Label5_2.configure(font="-family {Rockwell} -size 15")
self.Label5_2.configure(foreground="#000000")
self.Label5_2.configure(highlightbackground="#d9d9d9")
self.Label5_2.configure(highlightcolor="black")
self.Label5_2.configure(text='''District : Sangli''')
self.Label5_3 = tk.Label(top)
self.Label5_3.place(relx=0.863, rely=0.968, height=28, width=172)
self.Label5_3.configure(activebackground="#f9f9f9")
self.Label5_3.configure(activeforeground="black")
self.Label5_3.configure(background="#ffff24")
self.Label5_3.configure(disabledforeground="#a3a3a3")
self.Label5_3.configure(font="-family {Rockwell} -size 9")
self.Label5_3.configure(foreground="#000000")
self.Label5_3.configure(highlightbackground="#d9d9d9")
self.Label5_3.configure(highlightcolor="black")
self.Label5_3.configure(text='''Server Status : Online''')
self.Label5_4 = tk.Label(top)
self.Label5_4.place(relx=0.869, rely=0.992, height=28, width=172)
self.Label5_4.configure(activebackground="#f9f9f9")
self.Label5_4.configure(activeforeground="black")
self.Label5_4.configure(background="#ffff24")
self.Label5_4.configure(disabledforeground="#a3a3a3")
self.Label5_4.configure(font="-family {Rockwell} -size 9")
self.Label5_4.configure(foreground="#000000")
self.Label5_4.configure(highlightbackground="#d9d9d9")
self.Label5_4.configure(highlightcolor="black")
self.Label5_4.configure(text='''Host : localhost''')
self.Label5_5 = tk.Label(top)
self.Label5_5.place(relx=0.869, rely=1.016, height=28, width=172)
self.Label5_5.configure(activebackground="#f9f9f9")
self.Label5_5.configure(activeforeground="black")
self.Label5_5.configure(background="#ffff24")
self.Label5_5.configure(disabledforeground="#a3a3a3")
self.Label5_5.configure(font="-family {Rockwell} -size 9")
self.Label5_5.configure(foreground="#000000")
self.Label5_5.configure(highlightbackground="#d9d9d9")
self.Label5_5.configure(highlightcolor="black")
self.Label5_5.configure(text='''Port : 3306''')
self.Label5_1 = tk.Label(top)
self.Label5_1.place(relx=0.875, rely=0.096, height=28, width=172)
self.Label5_1.configure(activebackground="#f9f9f9")
self.Label5_1.configure(activeforeground="black")
self.Label5_1.configure(background="#ffff24")
self.Label5_1.configure(disabledforeground="#a3a3a3")
self.Label5_1.configure(font="-family {Rockwell} -size 12")
self.Label5_1.configure(foreground="#000000")
self.Label5_1.configure(highlightbackground="#d9d9d9")
self.Label5_1.configure(highlightcolor="black")
self.Label5_1.configure(text='''User : user''')
self.box1o1 = ScrolledListBox(top)
self.box1o1.place(relx=0.388, rely=0.287, relheight=0.639
, relwidth=0.238)
self.box1o1.configure(background="white")
self.box1o1.configure(disabledforeground="#a3a3a3")
self.box1o1.configure(font="TkFixedFont")
self.box1o1.configure(foreground="black")
self.box1o1.configure(highlightbackground="#d9d9d9")
self.box1o1.configure(highlightcolor="#d9d9d9")
self.box1o1.configure(selectbackground="#c4c4c4")
self.box1o1.configure(selectforeground="black")
self.box1o1.configure(width=10)
self.box2o1 = ScrolledListBox(top)
self.box2o1.place(relx=0.625, rely=0.287, relheight=0.639
, relwidth=0.326)
self.box2o1.configure(background="white")
self.box2o1.configure(disabledforeground="#a3a3a3")
self.box2o1.configure(font="TkFixedFont")
self.box2o1.configure(foreground="black")
self.box2o1.configure(highlightbackground="#d9d9d9")
self.box2o1.configure(highlightcolor="#d9d9d9")
self.box2o1.configure(selectbackground="#c4c4c4")
self.box2o1.configure(selectforeground="black")
self.box2o1.configure(width=10)
self.TSeparator1 = ttk.Separator(top)
self.TSeparator1.place(relx=0.888, rely=0.012, relheight=0.119)
self.TSeparator1.configure(orient="vertical")
self.TSeparator2 = ttk.Separator(top)
self.TSeparator2.place(relx=0.013, rely=0.143, relwidth=0.194)
self.TSeparator3 = ttk.Separator(top)
self.TSeparator3.place(relx=0.013, rely=0.239, relwidth=0.938)
self.TSeparator3_6 = ttk.Separator(top)
self.TSeparator3_6.place(relx=0.013, rely=0.938, relwidth=0.938)
self.viewbutton = tk.Button(top)
self.viewbutton.place(relx=0.425, rely=0.98, height=33, width=148)
self.viewbutton.configure(activebackground="#ececec")
self.viewbutton.configure(activeforeground="#000000")
self.viewbutton.configure(background="#2020d8")
self.viewbutton.configure(disabledforeground="#a3a3a3")
self.viewbutton.configure(font="-family {Rockwell} -size 13 -weight bold")
self.viewbutton.configure(foreground="#ffffff")
self.viewbutton.configure(highlightbackground="#d9d9d9")
self.viewbutton.configure(highlightcolor="black")
self.viewbutton.configure(pady="0")
self.viewbutton.configure(takefocus="0")
self.viewbutton.configure(text='''View all Names''')
self.viewbutton_8 = tk.Button(top)
self.viewbutton_8.place(relx=0.581, rely=0.98, height=33, width=148)
self.viewbutton_8.configure(activebackground="#ececec")
self.viewbutton_8.configure(activeforeground="#000000")
self.viewbutton_8.configure(background="#2020d8")
self.viewbutton_8.configure(disabledforeground="#a3a3a3")
self.viewbutton_8.configure(font="-family {Rockwell} -size 13 -weight bold")
self.viewbutton_8.configure(foreground="#ffffff")
self.viewbutton_8.configure(highlightbackground="#d9d9d9")
self.viewbutton_8.configure(highlightcolor="black")
self.viewbutton_8.configure(pady="0")
self.viewbutton_8.configure(takefocus="0")
self.viewbutton_8.configure(text='''View all Data''')
self.viewbutton_9 = tk.Button(top)
self.viewbutton_9.place(relx=0.744, rely=0.98, height=33, width=108)
self.viewbutton_9.configure(activebackground="#ececec")
self.viewbutton_9.configure(activeforeground="#000000")
self.viewbutton_9.configure(background="#2020d8")
self.viewbutton_9.configure(disabledforeground="#a3a3a3")
self.viewbutton_9.configure(font="-family {Rockwell} -size 13 -weight bold")
self.viewbutton_9.configure(foreground="#ffffff")
self.viewbutton_9.configure(highlightbackground="#d9d9d9")
self.viewbutton_9.configure(highlightcolor="black")
self.viewbutton_9.configure(pady="0")
self.viewbutton_9.configure(takefocus="0")
self.viewbutton_9.configure(text='''Clear all''')
self.Label6 = tk.Label(top)
self.Label6.place(relx=0.913, rely=0.036, height=44, width=44)
self.Label6.configure(activebackground="#f9f9f9")
self.Label6.configure(activeforeground="black")
self.Label6.configure(background="#d9d9d9")
self.Label6.configure(disabledforeground="#a3a3a3")
self.Label6.configure(foreground="#000000")
self.Label6.configure(highlightbackground="#d9d9d9")
self.Label6.configure(highlightcolor="black")
photo_location = os.path.join(prog_location,"../../../view database/original/login3.png")
self._img0 = tk.PhotoImage(file=photo_location)
self.Label6.configure(image=self._img0)
self.Label6.configure(text='''Label''')
self.Frame1 = tk.Frame(top)
self.Frame1.place(relx=0.013, rely=0.251, relheight=0.675
, relwidth=0.197)
self.Frame1.configure(relief='groove')
self.Frame1.configure(borderwidth="2")
self.Frame1.configure(relief='groove')
self.Frame1.configure(background="#d9d9d9")
self.Frame1.configure(highlightbackground="#d9d9d9")
self.Frame1.configure(highlightcolor="black")
self.Frame1.configure(width=315)
self.villagename = tk.Entry(top)
self.villagename.place(relx=0.375, rely=0.179, height=20, relwidth=0.153)
self.villagename.configure(background="white")
self.villagename.configure(disabledforeground="#a3a3a3")
self.villagename.configure(font="TkFixedFont")
self.villagename.configure(foreground="#1b1391")
self.villagename.configure(insertbackground="black")
self.villagename.configure(width=244)
self.Entry2 = tk.Entry(top)
self.Entry2.place(relx=0.675, rely=0.179,height=20, relwidth=0.146)
self.Entry2.configure(background="white")
self.Entry2.configure(disabledforeground="#a3a3a3")
self.Entry2.configure(font="TkFixedFont")
self.Entry2.configure(foreground="#000000")
self.Entry2.configure(insertbackground="black")
self.Entry2.configure(width=234)
self.Label7 = tk.Label(top)
self.Label7.place(relx=0.269, rely=0.179, height=21, width=154)
self.Label7.configure(background="#ffff24")
self.Label7.configure(disabledforeground="#a3a3a3")
self.Label7.configure(font=font17)
self.Label7.configure(foreground="#000000")
self.Label7.configure(text='''Village Name :''')
self.Label7.configure(width=154)
self.Label7_1 = tk.Label(top)
self.Label7_1.place(relx=0.575, rely=0.179, height=21, width=154)
self.Label7_1.configure(activebackground="#f9f9f9")
self.Label7_1.configure(activeforeground="black")
self.Label7_1.configure(background="#ffff24")
self.Label7_1.configure(disabledforeground="#a3a3a3")
self.Label7_1.configure(font="-family {Berlin Sans FB} -size 15")
self.Label7_1.configure(foreground="#000000")
self.Label7_1.configure(highlightbackground="#d9d9d9")
self.Label7_1.configure(highlightcolor="black")
self.Label7_1.configure(text='''UID Number :''')
self.Label7_1.configure(width=154)
self.btn_find = tk.Button(top)
self.btn_find.place(relx=0.856, rely=0.167, height=34, width=97)
self.btn_find.configure(activebackground="#ececec")
self.btn_find.configure(activeforeground="#000000")
self.btn_find.configure(background="#ff330a")
self.btn_find.configure(disabledforeground="#a3a3a3")
self.btn_find.configure(font="-family {Rockwell Extra Bold} -size 12 -weight bold")
self.btn_find.configure(foreground="#fcffff")
self.btn_find.configure(highlightbackground="#d9d9d9")
self.btn_find.configure(highlightcolor="black")
self.btn_find.configure(pady="0")
self.btn_find.configure(text='''FIND''')
self.btn_find.configure(width=97)
# The following code is added to facilitate the Scrolled widgets you specified.
class AutoScroll(object):
'''Configure the scrollbars for a widget.'''
def __init__(self, master):
# Rozen. Added the try-except clauses so that this class
# could be used for scrolled entry widget for which vertical
# scrolling is not supported. 5/7/14.
try:
vsb = ttk.Scrollbar(master, orient='vertical', command=self.yview)
except:
pass
hsb = ttk.Scrollbar(master, orient='horizontal', command=self.xview)
#self.configure(yscrollcommand=_autoscroll(vsb),
# xscrollcommand=_autoscroll(hsb))
try:
self.configure(yscrollcommand=self._autoscroll(vsb))
except:
pass
self.configure(xscrollcommand=self._autoscroll(hsb))
self.grid(column=0, row=0, sticky='nsew')
try:
vsb.grid(column=1, row=0, sticky='ns')
except:
pass
hsb.grid(column=0, row=1, sticky='ew')
master.grid_columnconfigure(0, weight=1)
master.grid_rowconfigure(0, weight=1)
# Copy geometry methods of master (taken from ScrolledText.py)
if py3:
methods = tk.Pack.__dict__.keys() | tk.Grid.__dict__.keys() \
| tk.Place.__dict__.keys()
else:
methods = tk.Pack.__dict__.keys() + tk.Grid.__dict__.keys() \
+ tk.Place.__dict__.keys()
for meth in methods:
if meth[0] != '_' and meth not in ('config', 'configure'):
setattr(self, meth, getattr(master, meth))
@staticmethod
def _autoscroll(sbar):
'''Hide and show scrollbar as needed.'''
def wrapped(first, last):
first, last = float(first), float(last)
if first <= 0 and last >= 1:
sbar.grid_remove()
else:
sbar.grid()
sbar.set(first, last)
return wrapped
def __str__(self):
return str(self.master)
def _create_container(func):
'''Creates a ttk Frame with a given master, and use this new frame to
place the scrollbars and the widget.'''
def wrapped(cls, master, **kw):
container = ttk.Frame(master)
container.bind('<Enter>', lambda e: _bound_to_mousewheel(e, container))
container.bind('<Leave>', lambda e: _unbound_to_mousewheel(e, container))
return func(cls, container, **kw)
return wrapped
class ScrolledListBox(AutoScroll, tk.Listbox):
'''A standard Tkinter Text widget with scrollbars that will
automatically show/hide as needed.'''
@_create_container
def __init__(self, master, **kw):
tk.Listbox.__init__(self, master, **kw)
AutoScroll.__init__(self, master)
import platform
def _bound_to_mousewheel(event, widget):
child = widget.winfo_children()[0]
if platform.system() == 'Windows' or platform.system() == 'Darwin':
child.bind_all('<MouseWheel>', lambda e: _on_mousewheel(e, child))
child.bind_all('<Shift-MouseWheel>', lambda e: _on_shiftmouse(e, child))
else:
child.bind_all('<Button-4>', lambda e: _on_mousewheel(e, child))
child.bind_all('<Button-5>', lambda e: _on_mousewheel(e, child))
child.bind_all('<Shift-Button-4>', lambda e: _on_shiftmouse(e, child))
child.bind_all('<Shift-Button-5>', lambda e: _on_shiftmouse(e, child))
def _unbound_to_mousewheel(event, widget):
if platform.system() == 'Windows' or platform.system() == 'Darwin':
widget.unbind_all('<MouseWheel>')
widget.unbind_all('<Shift-MouseWheel>')
else:
widget.unbind_all('<Button-4>')
widget.unbind_all('<Button-5>')
widget.unbind_all('<Shift-Button-4>')
widget.unbind_all('<Shift-Button-5>')
def _on_mousewheel(event, widget):
if platform.system() == 'Windows':
widget.yview_scroll(-1*int(event.delta/120),'units')
elif platform.system() == 'Darwin':
widget.yview_scroll(-1*int(event.delta),'units')
else:
if event.num == 4:
widget.yview_scroll(-1, 'units')
elif event.num == 5:
widget.yview_scroll(1, 'units')
def _on_shiftmouse(event, widget):
if platform.system() == 'Windows':
widget.xview_scroll(-1*int(event.delta/120), 'units')
elif platform.system() == 'Darwin':
widget.xview_scroll(-1*int(event.delta), 'units')
else:
if event.num == 4:
widget.xview_scroll(-1, 'units')
elif event.num == 5:
widget.xview_scroll(1, 'units')
if __name__ == '__main__':
vp_start_gui()
| 25,995 | 9,328 |
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def bstFromPreorder(self, preorder: List[int]) -> Optional[TreeNode]:
def subtree(lo, hi):
if lo >= hi: return None
rootval = preorder[lo]
root = TreeNode( rootval )
mid = bisect.bisect_left(preorder, rootval, lo+1, hi)
root.left = subtree(lo+1, mid)
root.right = subtree(mid, hi)
return root
return subtree( 0, len(preorder) ) | 658 | 201 |
import os
import re
import numpy as np
from . import atom_data, bundle
# TODO: Maybe should be in atom data
_N_table = {val: key for key, val in list(atom_data.atom_symbol_table.items())}
def parse_xyz(
filename: str,
label=1,
w=1.0,
I=1,
t0=0.0,
dt=20.0,
ts=None,
N_table=None,
) -> bundle.Bundle:
"""Parse an XYZ adiabatic bundle file directly into a Bundle.
filename (str): the absolute or relative path to the xyz file.
label (hashable): the label of this bundle
w (float): the weight of this bundle
I (int): electronic state label
t0 (float): the initial time in au
dt (float): the timestep in au
ts (list of float): an explicit list of times in au, overrides t0 and dt
N_table (dict of str : int): an optional dictionary mapping atomic
symbol to atomic number, used for non-standard atom names.
Returns:
bundle (Bundle): the Bundle object.
"""
lines = open(filename).readlines()
natom = int(lines[0]) # This should always work
if len(lines) % (natom + 2):
raise ValueError("Invalid number of lines in xyz file")
nframe = len(lines) / (natom + 2)
xyzs = []
Zs = []
for frame in range(nframe):
lines2 = lines[frame * (natom + 2) + 2 : (frame + 1) * (natom + 2)]
Z = []
xyz = []
for line in lines2:
mobj = re.match(r"^\s*(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s*$", line)
Z.append(mobj.group(1))
xyz.append([float(mobj.group(x)) for x in (2, 3, 4)])
xyz = np.array(xyz)
xyzs.append(xyz)
Zs.append(Z)
# User symbol table or default?
N_table2 = N_table if N_table else _N_table
frames2 = []
for ind, xyz in enumerate(xyzs):
Z = Zs[ind]
Ns = [N_table2[key] for key in Z]
widths = atom_data.from_Ns_to_widths(Ns)
frame2 = bundle.Frame(
label=label,
t=dt * ind + t0 if ts is None else ts[ind],
w=w,
I=I,
N=Ns,
xyz=xyz,
widths=widths,
)
frames2.append(frame2)
parsed_bundle = bundle.Bundle(frames2)
return parsed_bundle
def write_xyzs(
bundle: bundle.Bundle,
dirname: str,
atom_format_str: str = "%-3s %24.16E %24.16E %24.16E\n",
):
"""Write a directory of xyz files to represent a Bundle, with
one xyz file containing all frames for each label
Params:
bundle: Bundle to write xyz file representation of
dirname: the directory to place the xyz files in (created if does not exist)
atom_format_str: the format string for each atom line in the xyz
file (useful to change precision).
Result:
xyz files are written for each label in bundle. Each xyz
file contains all frames for the label, in time-order
"""
# Make sure directoy exists
if not os.path.exists(dirname):
os.makedirs(dirname)
# Write xyz files
for label in bundle.labels:
bundle2 = bundle.subset_by_label(label)
xyzfilename = str(label)
# Munging with filename label
xyzfilename = xyzfilename.replace(" ", "")
xyzfilename = xyzfilename.replace("(", "")
xyzfilename = xyzfilename.replace(")", "")
xyzfilename = xyzfilename.replace(",", "-")
fh = open("%s/%s.xyz" % (dirname, xyzfilename), "w")
for frame in bundle2.frames:
fh.write("%d\n" % frame.xyz.shape[0])
fh.write(
"t = %24.16E, w = %24.16E, I = %d\n"
% (
frame.t,
frame.w,
frame.I,
)
)
for A in range(frame.xyz.shape[0]):
fh.write(
atom_format_str
% (
atom_data.atom_symbol_table[frame.N[A]],
frame.xyz[A, 0],
frame.xyz[A, 1],
frame.xyz[A, 2],
)
)
| 4,113 | 1,327 |
from sqlalchemy import *
from sqlalchemy.orm import relationship
from . import Base
class FITestParameter(Base):
__tablename__ = 'test_parameter'
id = Column(Integer, primary_key=True)
test_id = Column(Integer, ForeignKey('test.id'))
test = relationship('FITest')
name = Column(String(255), index=True)
test_parameter_service_id = Column(Integer, ForeignKey('test_parameter_service.id'))
test_parameter_service = relationship('FITestParameterService')
test_parameter_context_id = Column(Integer, ForeignKey('test_parameter_context.id'))
test_parameter_context = relationship('FITestParameterContext')
created_at = Column(DateTime)
updated_at = Column(DateTime)
updated_count = Column(Integer)
| 745 | 219 |
#!/usr/bin/python
""" merge_cluster_roles.py - merge OpenShift cluster roles into one """
# Copyright (c) 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, print_function
import os.path
import sys
import yaml
def main():
base_role = {}
sources = [os.path.relpath(sys.argv[1])]
with open(sys.argv[1], 'r') as f:
base_role = yaml.load(f)
manifests = sys.argv[2:]
for manifest in manifests:
sources.append(os.path.relpath(manifest))
with open(manifest, 'r') as f:
rules = yaml.load(f)['rules']
if rules not in base_role['rules']:
base_role['rules'] += rules
print("---")
print("# This is a generated file. DO NOT EDIT")
print("# Run `make merge-cluster-roles` to generate.")
print("# Sources: ")
for source in sources:
print("# \t" + source)
print(yaml.dump(base_role))
if __name__ == "__main__":
main()
| 1,481 | 467 |
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils.translation import gettext_lazy as _, gettext
import itertools
from internationalflavor.timezone._cldr_data import TIMEZONE_NAMES, METAZONE_NAMES, METAZONE_MAPPING_FROM_TZ, \
METAZONE_MAPPING_TO_TZ, TZ_REGION_FORMAT, TZ_GMT_FORMAT, TZ_HOUR_FORMAT
from internationalflavor._helpers import orig_str, string_format
try:
from pytz import common_timezones as COMMON_TIMEZONES
except ImportError:
COMMON_TIMEZONES = [x for x in TIMEZONE_NAMES if not x.startswith("Etc")]
CURRENT_METAZONES = [x for x in set(METAZONE_MAPPING_FROM_TZ.values()) if x is not None]
def get_timezones_cities(timezones=None, exclude=None):
"""Returns a list of choices with (timezone code, exemplar city)-pairs, grouped by their territory.
Only timezones present in the timezones argument, and not present in the exclude argument, are returned.
"""
# We require sorting for the groupby
timezones = COMMON_TIMEZONES if timezones is None else timezones
exclude = exclude if exclude else []
values = sorted(TIMEZONE_NAMES.items(), key=lambda item: orig_str(item[1][0]))
result = []
for territory, zones in itertools.groupby(values, lambda item: item[1][0]):
items = [(k, v[1]) for k, v in zones if k in timezones and k not in exclude]
if items:
result.append((territory, items))
return result
get_timezones_cities_lazy = lazy(get_timezones_cities, list)
def _get_metazone_cities(metazone, limit=5):
zones = [tz for mz, tz in METAZONE_MAPPING_TO_TZ.items() if mz[0] == metazone]
cities = sorted([territory[1] for tz, territory in TIMEZONE_NAMES.items() if tz in zones])
if len(cities) > limit:
return ", ".join(map(force_text, cities[:limit])) + ", ..."
else:
return ", ".join(map(force_text, cities))
_get_metazone_cities_lazy = lazy(_get_metazone_cities, str)
def _get_metazone_offset(metazone, correct_dst=True):
try:
import pytz
except ImportError:
raise ImproperlyConfigured("You can not use this display format without pytz")
# We need to ensure that we do utcoffset - dst to get the normal offset for this timezone
try:
tzinfo = pytz.timezone(get_timezone_by_metazone(metazone))
offset = tzinfo.utcoffset(datetime.datetime.now(), is_dst=False)
if correct_dst:
offset -= tzinfo.dst(datetime.datetime.now(), is_dst=False)
except pytz.UnknownTimeZoneError:
offset = datetime.timedelta(0)
return offset
def _get_metazone_offset_str(metazone, correct_dst=True, include_gmt=True):
offset = _get_metazone_offset(metazone, correct_dst=correct_dst)
# Format the timezone
if offset >= datetime.timedelta(0):
offset_str = force_text(TZ_HOUR_FORMAT).split(';')[0]
else:
offset = -offset
offset_str = force_text(TZ_HOUR_FORMAT).split(';')[1]
offset_str = offset_str.replace('HH', "%02d" % (offset.total_seconds() // 3600))
offset_str = offset_str.replace('mm', "%02d" % ((offset.total_seconds() % 3600) // 60))
if include_gmt:
return force_text(TZ_GMT_FORMAT) % offset_str
else:
return offset_str
_get_metazone_offset_str_lazy = lazy(_get_metazone_offset_str, str)
def get_metazone_name(metazone, display_format='name'):
"""Returns the name of a metazone, given a display_format. Available formats:
*name* -- The name of the metazone, e.g.
Central European Time
*name_cities* -- The above two options combined, e.g.
Central European Time (Abidjan, Accra, Bamako, Banjul, Conakry, ...)
*offset_name* -- The offset and the name, e.g.
GMT+01:00 Central European Time
*offset_name_cities* -- The offset and the name, e.g.
GMT+01:00 Central European Time (Abidjan, Accra, Bamako, Banjul, Conakry, ...)
Everything else is string formatted using traditional Python string formatting, with the following arguments
available:
* tzname
* cities
* offset
* gmt_offset -- The offset including the GMT string
* dst_offset -- The offset with current DST applied
* gmt_dst_offset - The above two combined
"""
if display_format == 'name':
display_format = gettext("%(tzname)s")
elif display_format == 'name_cities':
display_format = gettext("%(tzname)s (%(cities)s)")
elif display_format == 'offset_name':
display_format = gettext("%(gmt_offset)s %(tzname)s")
elif display_format == 'offset_name_cities':
display_format = gettext("%(gmt_offset)s %(tzname)s (%(cities)s)")
name = force_text(METAZONE_NAMES.get(metazone, string_format(TZ_REGION_FORMAT, _(metazone))))
result = display_format % {
'tzname': name,
'cities': _get_metazone_cities_lazy(metazone),
'offset': _get_metazone_offset_str_lazy(metazone, True, False),
'gmt_offset': _get_metazone_offset_str_lazy(metazone, True, True),
'dst_offset': _get_metazone_offset_str_lazy(metazone, False, False),
'gmt_dst_offset': _get_metazone_offset_str_lazy(metazone, False, True)
}
return result
get_metazone_name_lazy = lazy(get_metazone_name, str)
def get_metazones(metazones=None, exclude=None, display_format='name'):
"""Returns a list of metazones.
By default, returns all current metazones. If the metazones argument defines metazones, they are returned. Values
in exclude are never returned.
"""
metazones = CURRENT_METAZONES if metazones is None else metazones
exclude = exclude if exclude else []
return [(k, get_metazone_name_lazy(k, display_format)) for k in metazones if k not in exclude]
get_metazones_lazy = lazy(get_metazones, list)
def get_timezone_by_metazone(metazone, territories=None, fallback='001'):
"""Returns the timezone name from the metazone name. It takes three arguments:
:param metazone: Name of the metazone
:param territories: String of a single territory or a list of territories in order of preference for retrieving
the correct timezone. This is used when a metazone has multiple base timezones. It is optional
as there is always a fallback to the default 'World' territory (001). Use case: you could use
it to fill in the country of the user.
:param fallback: The territory to use when no other territory could be found. This should always be 001 (=world)
"""
if territories is None:
territories = []
elif isinstance(territories, str):
territories = [territories]
for ter in territories:
if (metazone, ter) in METAZONE_MAPPING_TO_TZ:
return METAZONE_MAPPING_TO_TZ[(metazone, ter)]
return METAZONE_MAPPING_TO_TZ[(metazone, fallback)]
| 7,005 | 2,337 |
import six
import colander
from colander import SchemaNode, String
from kinto.core.utils import strip_whitespace, msec_time, decode_header, native_value
class ResourceSchema(colander.MappingSchema):
"""Base resource schema, with *Cliquet* specific built-in options."""
class Options:
"""
Resource schema options.
This is meant to be overriden for changing values:
.. code-block:: python
class Product(ResourceSchema):
reference = colander.SchemaNode(colander.String())
class Options:
readonly_fields = ('reference',)
"""
readonly_fields = tuple()
"""Fields that cannot be updated. Values for fields will have to be
provided either during record creation, through default values using
``missing`` attribute or implementing a custom logic in
:meth:`kinto.core.resource.UserResource.process_record`.
"""
preserve_unknown = True
"""Define if unknown fields should be preserved or not.
The resource is schema-less by default. In other words, any field name
will be accepted on records. Set this to ``False`` in order to limit
the accepted fields to the ones defined in the schema.
"""
@classmethod
def get_option(cls, attr):
default_value = getattr(ResourceSchema.Options, attr)
return getattr(cls.Options, attr, default_value)
@classmethod
def is_readonly(cls, field):
"""Return True if specified field name is read-only.
:param str field: the field name in the schema
:returns: ``True`` if the specified field is read-only,
``False`` otherwise.
:rtype: bool
"""
return field in cls.get_option("readonly_fields")
def schema_type(self):
if self.get_option("preserve_unknown") is True:
unknown = 'preserve'
else:
unknown = 'ignore'
return colander.Mapping(unknown=unknown)
class PermissionsSchema(colander.SchemaNode):
"""A permission mapping defines ACEs.
It has permission names as keys and principals as values.
::
{
"write": ["fxa:af3e077eb9f5444a949ad65aa86e82ff"],
"groups:create": ["fxa:70a9335eecfe440fa445ba752a750f3d"]
}
"""
def __init__(self, *args, **kwargs):
self.known_perms = kwargs.pop('permissions', tuple())
super(PermissionsSchema, self).__init__(*args, **kwargs)
@staticmethod
def schema_type():
return colander.Mapping(unknown='preserve')
def deserialize(self, cstruct=colander.null):
# Start by deserializing a simple mapping.
permissions = super(PermissionsSchema, self).deserialize(cstruct)
# In case it is optional in parent schema.
if permissions in (colander.null, colander.drop):
return permissions
# Remove potential extra children from previous deserialization.
self.children = []
for perm in permissions.keys():
# If know permissions is limited, then validate inline.
if self.known_perms:
colander.OneOf(choices=self.known_perms)(self, perm)
# Add a String list child node with the name of ``perm``.
self.add(self._get_node_principals(perm))
# End up by deserializing a mapping whose keys are now known.
return super(PermissionsSchema, self).deserialize(permissions)
def _get_node_principals(self, perm):
principal = colander.SchemaNode(colander.String())
return colander.SchemaNode(colander.Sequence(), principal, name=perm,
missing=colander.drop)
class TimeStamp(colander.SchemaNode):
"""Basic integer schema field that can be set to current server timestamp
in milliseconds if no value is provided.
.. code-block:: python
class Book(ResourceSchema):
added_on = TimeStamp()
read_on = TimeStamp(auto_now=False, missing=-1)
"""
schema_type = colander.Integer
title = 'Epoch timestamp'
"""Default field title."""
auto_now = True
"""Set to current server timestamp (*milliseconds*) if not provided."""
missing = None
"""Default field value if not provided in record."""
def deserialize(self, cstruct=colander.null):
if cstruct is colander.null and self.auto_now:
cstruct = msec_time()
return super(TimeStamp, self).deserialize(cstruct)
class URL(SchemaNode):
"""String field representing a URL, with max length of 2048.
This is basically a shortcut for string field with
`~colander:colander.url`.
.. code-block:: python
class BookmarkSchema(ResourceSchema):
url = URL()
"""
schema_type = String
validator = colander.All(colander.url, colander.Length(min=1, max=2048))
def preparer(self, appstruct):
return strip_whitespace(appstruct)
class Any(colander.SchemaType):
"""Colander type agnostic field."""
def deserialize(self, node, cstruct):
return cstruct
class HeaderField(colander.SchemaNode):
"""Basic header field SchemaNode."""
missing = colander.drop
def deserialize(self, cstruct=colander.null):
if isinstance(cstruct, six.binary_type):
try:
cstruct = decode_header(cstruct)
except UnicodeDecodeError:
raise colander.Invalid(self, msg='Headers should be UTF-8 encoded')
return super(HeaderField, self).deserialize(cstruct)
class QueryField(colander.SchemaNode):
"""Basic querystring field SchemaNode."""
missing = colander.drop
def deserialize(self, cstruct=colander.null):
if isinstance(cstruct, six.string_types):
cstruct = native_value(cstruct)
return super(QueryField, self).deserialize(cstruct)
class FieldList(QueryField):
"""String field representing a list of attributes."""
schema_type = colander.Sequence
error_message = "The value should be a list of comma separated attributes"
missing = colander.drop
fields = colander.SchemaNode(colander.String(), missing=colander.drop)
def deserialize(self, cstruct=colander.null):
if isinstance(cstruct, six.string_types):
cstruct = cstruct.split(',')
return super(FieldList, self).deserialize(cstruct)
class HeaderQuotedInteger(HeaderField):
"""Integer between "" used in precondition headers."""
schema_type = colander.String
error_message = "The value should be integer between double quotes"
validator = colander.Any(colander.Regex('^"([0-9]+?)"$', msg=error_message),
colander.Regex('\*'))
def deserialize(self, cstruct=colander.null):
param = super(HeaderQuotedInteger, self).deserialize(cstruct)
if param is colander.drop or param == '*':
return param
return int(param[1:-1])
class HeaderSchema(colander.MappingSchema):
"""Schema used for validating and deserializing request headers. """
def response_behavior_validator():
return colander.OneOf(['full', 'light', 'diff'])
if_match = HeaderQuotedInteger(name='If-Match')
if_none_match = HeaderQuotedInteger(name='If-None-Match')
response_behaviour = HeaderField(colander.String(), name='Response-Behavior',
validator=response_behavior_validator())
@staticmethod
def schema_type():
return colander.Mapping(unknown='preserve')
class QuerySchema(colander.MappingSchema):
"""
Schema used for validating and deserializing querystrings. It will include
and try to guess the type of unknown fields (field filters) on deserialization.
"""
_limit = QueryField(colander.Integer())
_fields = FieldList()
_sort = FieldList()
_token = QueryField(colander.String())
_since = QueryField(colander.Integer())
_to = QueryField(colander.Integer())
_before = QueryField(colander.Integer())
last_modified = QueryField(colander.Integer())
@staticmethod
def schema_type():
return colander.Mapping(unknown='ignore')
def deserialize(self, cstruct=colander.null):
"""
Deserialize and validate the QuerySchema fields and try to deserialize and
get the native value of additional filds (field filters) that may be present
on the cstruct.
e.g:: ?exclude_id=a,b&deleted=true -> {'exclude_id': ['a', 'b'], deleted: True}
"""
values = {}
schema_values = super(QuerySchema, self).deserialize(cstruct)
if schema_values is colander.drop:
return schema_values
# Deserialize querystring field filters (see docstring e.g)
for k, v in cstruct.items():
# Deserialize lists used on in_ and exclude_ filters
if k.startswith('in_') or k.startswith('exclude_'):
as_list = FieldList().deserialize(v)
if isinstance(as_list, list):
values[k] = [native_value(v) for v in as_list]
else:
values[k] = native_value(v)
values.update(schema_values)
return values
class JsonPatchOperationSchema(colander.MappingSchema):
"""Single JSON Patch Operation."""
def op_validator():
op_values = ['test', 'add', 'remove', 'replace', 'move', 'copy']
return colander.OneOf(op_values)
def path_validator():
return colander.Regex('(/\w*)+')
op = colander.SchemaNode(colander.String(), validator=op_validator())
path = colander.SchemaNode(colander.String(), validator=path_validator())
from_ = colander.SchemaNode(colander.String(), name='from',
validator=path_validator(), missing=colander.drop)
value = colander.SchemaNode(Any(), missing=colander.drop)
@staticmethod
def schema_type():
return colander.Mapping(unknown='raise')
class JsonPatchBodySchema(colander.SequenceSchema):
"""Body used with JSON Patch (application/json-patch+json) as in RFC 6902."""
operations = JsonPatchOperationSchema(missing=colander.drop)
class RequestSchema(colander.MappingSchema):
"""Baseline schema for kinto requests."""
header = HeaderSchema(missing=colander.drop)
querystring = QuerySchema(missing=colander.drop)
class JsonPatchRequestSchema(RequestSchema):
body = JsonPatchBodySchema()
| 10,520 | 2,954 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project :mask_rcnn_pytorch
@File :__init__.py.py
@Author :zhuofalin
@Date :2021/11/24 21:18
'''
| 156 | 82 |
#!/usr/bin/env python3
import json
from assets.lib.bottle import route, run, static_file, response, request, redirect
#get default routes for files/paths
@route('/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root="./")
#route index as default page
@route('/')
def index():
filename='index.html'
return static_file(filename, root="./")
#return json object
@route('/commands')
def getCommands():
objFile = 'commands.json'
response.content_type = 'application/json'
with open(objFile, "r") as file:
data = json.load(file)
return json.dumps(data)
@route('/addcommand', method="POST")
def addCommands():
command = request.forms.get('newCommand')
commandName = request.forms.get('newCommandName')
commandOS = request.forms.get('newCommandOS')
commandDescription = request.forms.get('newCommandDescription')
commandObj = {
"Command":command,
"Name":commandName,
"OS":commandOS,
"Description":commandDescription
}
objFile = 'commands.json'
response.content_type = 'application/json'
with open(objFile, "r") as file:
data = json.load(file)
data.append(commandObj)
with open(objFile, "w") as file:
json.dump(data, file)
redirect('/')
run(host='localhost', port=8080, debug=True, reloader=True)
| 1,366 | 425 |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from .forms import SignupForm, HoodForm, UserProfileUpdateForm, UserUpdateForm
from .models import *
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib import messages
# Create your views here.
@login_required(login_url="/accounts/login/")
def index(request):
hoods = Hood.objects.all()
return render(request,"index.html",locals())
def home (request):
return render(request, "index.html", locals())
def signup(request):
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = True
user.save()
return render(request, 'index.html')
else:
form = SignupForm()
return render(request, 'signup.html', {'form': form})
def new_hood(request):
current_user = request.user
if request.method == "POST":
form = HoodForm(request.POST, request.FILES)
if form.is_valid():
hood = form.save(commit=False)
hood.user = current_user
hood.save()
return redirect("index")
else:
form = HoodForm()
return render(request, "new_hood.html", {"form": form})
@login_required
def profile(request):
"""Display user profile information."""
user = request.user
return render(request, 'profile.html', {'user': user})
@login_required
def update_profile(request):
"""Edit user profile information."""
user = request.user
form1 = UserUpdateForm(instance=user)
form2 = UserProfileUpdateForm(instance=user.profile)
if request.method == 'POST':
form1 = UserUpdateForm(instance=user, data=request.POST)
form2 = UserProfileUpdateForm(
instance=user,
data=request.POST,
files=request.FILES
)
if form1.is_valid() and form2.is_valid():
form1.save()
form2.save()
messages.success(request, "Your profile has been updated!")
return HttpResponseRedirect(reverse('profile'))
return render(request, 'update_profile.html',
{'form1': form1, 'form2': form2})
def details(request, hood_id):
hood = Hood.objects.get(id=hood_id)
return render(request, "details.html", locals())
| 2,569 | 734 |
import os
class DotMan:
def get_dotisitfit(self):
# get home
import pathlib
p1_home = str(pathlib.Path.home())
# check dot folder
p2_dot = os.path.join(p1_home, ".isitfit")
if not os.path.exists(p2_dot):
pathlib.Path(p2_dot).mkdir(exist_ok=True)
return p2_dot
def get_myuid(self, is_reentry=False):
"""
Create a UUID for each installation of isitfit
This also creates a .isitfit folder in the user's home directory
and caches the generated UUID in a txt file for re-use
is_reentry - internally used flag to identify that this is a case when
UUID is identified as invalid and needs to be set again
"""
p2_dot = self.get_dotisitfit()
# check uid file within dot folder
p3_uidtxt = os.path.join(p2_dot, "uid.txt")
uuid_val = None
if not os.path.exists(p3_uidtxt):
import uuid
uuid_val = uuid.uuid4().hex
with open(p3_uidtxt, 'w') as fh:
fh.write(uuid_val)
# if not created above, read from file
if uuid_val is None:
with open(p3_uidtxt, 'r') as fh:
uuid_val = fh.read()
uuid_val = uuid_val.strip() # strip the new-line or spaces if any
# if re-entry due to invalid ID or not
if is_reentry:
# any further processing of this would be an overkill
pass
else:
# verify that the UUID is valid (in case of accidental overwrite)
if len(uuid_val)!=32:
# drop the uid.txt file and overwrite it
os.remove(p3_uidtxt)
uuid_val = self.get_myuid(True)
# return
return uuid_val
def tempdir(self):
import os
import tempfile
isitfit_tmpdir = os.path.join(tempfile.gettempdir(), 'isitfit')
os.makedirs(isitfit_tmpdir, exist_ok=True)
return isitfit_tmpdir
import os
class DotFile:
"""
Base class to set/get files in ~/.isitfit like ~/.isitfit/last_email.txt
"""
filename = None
def __init__(self):
self._init_fn()
def _init_fn(self):
if self.filename is None:
raise Exception("Derived classes should set filename member")
from isitfit.dotMan import DotMan
dm = DotMan()
fold = dm.get_dotisitfit()
self.fn = os.path.join(fold, self.filename)
def get(self):
if not os.path.exists(self.fn):
return None
with open(self.fn, 'r') as fh:
val = fh.read()
val = val.strip()
if val=='':
return None
return val
def set(self, val):
with open(self.fn, 'w') as fh:
fh.write(val)
class DotLastEmail(DotFile):
filename = "last_email.txt"
class DotLastProfile(DotFile):
filename = "last_profile.txt"
| 2,636 | 944 |
import json
from django.shortcuts import render
def base_js_snippet(request, input_dict, output_dict, widget):
try:
inputs = json.dumps(input_dict['in'])
except:
raise Exception("Problem serializing the inputs. Only JSON-serializable objects can be used.")
return render(request, 'interactions/base_js_snippet.html',
{'widget': widget, 'snippet': input_dict['snippet'], 'inputs': inputs})
| 437 | 129 |
import os
from django.apps import AppConfig
class ReleaseNotesAppConfig(AppConfig):
name = 'jira_devops.release_notes'
verbose_name = 'Release Notes'
DEFAULT_JIRA_RELEASE_FIELD_MAP = {
"hac_update": "customfield_13359",
"need_impex": "customfield_13360",
"need_manual": "customfield_13361",
"special_notes": "customfield_13362",
"responsible_person": "customfield_12200",
}
def ready(self):
from django.conf import settings
settings = settings._wrapped.__dict__
settings.setdefault('JIRA_RELEASE_FIELD_MAP', self.DEFAULT_JIRA_RELEASE_FIELD_MAP)
settings.setdefault('FILE_CLEAN_UP_PREFIX', self.get_env_variable("FILE_CLEAN_UP_PREFIX", "hybris/bin/custom"))
@staticmethod
def get_env_variable(variable, default=""):
value = os.getenv(variable)
if not value:
return default
return value
| 928 | 313 |
import logging
from datetime import datetime, timedelta
from typing import Optional
from fastapi import Depends
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt
from passlib.context import CryptContext
from app import schemas
from app.config import settings
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="login")
pwd_ctx = CryptContext(schemes=["bcrypt"], deprecated="auto")
class NotAuthorized(Exception):
"""Exception when a request does not have the right login"""
def __init__(self, username: str = ""):
msg = f"Not authorized (user: {username})"
logging.exception(f"Exception {self.__class__.__name__}: {msg}")
super().__init__(msg)
class Hash:
@staticmethod
def bcrypt(password: str):
return pwd_ctx.hash(password)
@staticmethod
def verify(plain_pass: str, hashed_pass: str):
return pwd_ctx.verify(plain_pass, hashed_pass)
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(
minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES
)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(
to_encode, settings.SECRET_KEY, algorithm=settings.ALGORITHM
)
return encoded_jwt
def verify_token(token: str):
try:
payload = jwt.decode(
token, settings.SECRET_KEY, algorithms=[settings.ALGORITHM]
)
email: str = payload.get("sub")
if email is None:
raise NotAuthorized(email)
token_data = schemas.TokenData(email=email)
except JWTError:
raise NotAuthorized(email)
return token_data
def get_current_user(token: str = Depends(oauth2_scheme)):
return verify_token(token)
| 1,888 | 592 |
"""Tune the dqn2 model of wythoff's using the opotune lib"""
import optuna
import fire
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from torchvision import datasets
from torchvision import transforms
from azad.exp.alternatives import wythoff_dqn2
from copy import deepcopy
def _build(trial):
"""Build a nn.Module MLP model"""
# Sample hidden layers and features
in_features = 4 # Initial
n_layers = trial.suggest_int('n_layers', 2, 6)
layers = []
for l in range(n_layers):
out_features = trial.suggest_int(f'{l}', in_features, MAX_FEATURES)
layers.append(nn.Linear(in_features, out_features))
layers.append(nn.ReLU())
in_features = deepcopy(out_features)
# Output layer topo is fixed
layers.append(nn.Linear(in_features, 1))
# Define the nn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
return Model
def _objective(trial):
"""Runs a single HP trial"""
# Build a new Model
Model = _build(trial)
# Sample new HP
learning_rate = trial.suggest_float("learning_rate", 0.005, 0.5)
gamma = trial.suggest_float("gamma", 0.01, 0.5)
epsilon = trial.suggest_float("epsilon", 0.1, 0.9)
# Run wythoff_dqn2
result = wythoff_dqn2(epsilon=epsilon,
gamma=gamma,
learning_rate=learning_rate,
num_episodes=NUM_EPISODES,
batch_size=20,
memory_capacity=1000,
game=GAME,
network=Model,
anneal=True,
tensorboard=None,
update_every=1,
double=False,
double_update=10,
save=False,
save_model=False,
monitor=None,
return_none=False,
debug=False,
device=DEVICE,
clip_grad=True,
progress=False,
zero=False,
seed=SEED)
return result["score"] # the final
def optuna_dqn2(save=None,
num_trials=100,
num_episodes=100,
max_features=20,
game='Wythoff15x15',
num_jobs=1,
device="cpu",
debug=True,
seed=None):
# Set globals used in _objective. A lazy bad soln.
global DEVICE
global SEED
global GAME
global NUM_EPISODES
global MAX_FEATURES
DEVICE = device
SEED = seed
GAME = game
NUM_EPISODES = num_episodes
MAX_FEATURES = max_features
# Run the study
study = optuna.create_study(direction="maximize")
study.optimize(_objective, n_trials=num_trials, n_jobs=num_jobs)
trial = study.best_trial
if debug:
print(f">>> Saving to {save}")
print(f">>> Number of finished trials: {study.trials}")
print(f">>> Best trial {trial}")
print(f">>> score: {trial.value}")
print(f">>> params:\n")
for k, v in trial.params.items():
print(f"\t{k}: {v}")
# Save?
if save is not None:
torch.save(study, save)
return study
| 3,581 | 1,123 |
"""
Tests for ``datajunction.sql.build``.
"""
# pylint: disable=invalid-name, too-many-lines, line-too-long
import datetime
import pytest
from pytest_mock import MockerFixture
from sqlalchemy.engine import create_engine
from sqlmodel import Session
from datajunction.models.column import Column
from datajunction.models.database import Database
from datajunction.models.node import Node, NodeType
from datajunction.models.table import Table
from datajunction.sql.build import (
find_on_clause,
get_dimensions_from_filters,
get_filter,
get_join_columns,
get_query_for_node,
get_query_for_sql,
)
from datajunction.typing import ColumnType
def test_get_query_for_node(mocker: MockerFixture) -> None:
"""
Test ``get_query_for_node``.
"""
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
parent = Node(name="A")
child = Node(
name="B",
tables=[
Table(
database=database,
table="B",
columns=[Column(name="cnt", type=ColumnType.INT)],
),
],
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[parent],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE B (cnt INTEGER)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session = mocker.MagicMock()
create_query = get_query_for_node(session, child, [], [])
assert create_query.database_id == 1
assert create_query.submitted_query == 'SELECT "B".cnt \nFROM "B"'
def test_get_query_for_node_with_groupbys(mocker: MockerFixture) -> None:
"""
Test ``get_query_for_node`` with group bys.
"""
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
parent = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="user_id", type=ColumnType.INT),
Column(name="comment", type=ColumnType.STR),
],
),
],
columns=[
Column(name="user_id", type=ColumnType.INT),
Column(name="comment", type=ColumnType.STR),
],
)
child = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[parent],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (user_id INTEGER, comment TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session = mocker.MagicMock()
create_query = get_query_for_node(session, child, ["A.user_id"], [])
space = " "
assert create_query.database_id == 1
assert (
create_query.submitted_query
== f"""SELECT count('*') AS cnt, "A".user_id{space}
FROM (SELECT "A".user_id AS user_id, "A".comment AS comment{space}
FROM "A") AS "A" GROUP BY "A".user_id"""
)
def test_get_query_for_node_specify_database(mocker: MockerFixture) -> None:
"""
Test ``get_query_for_node`` when a database is specified.
"""
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
parent = Node(name="A")
child = Node(
name="B",
tables=[
Table(
database=database,
table="B",
columns=[Column(name="cnt", type=ColumnType.INT)],
),
],
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[parent],
columns=[Column(name="cnt", type=ColumnType.INT)],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE B (cnt INTEGER)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session = mocker.MagicMock()
session.exec().one.return_value = database
create_query = get_query_for_node(session, child, [], [], 1)
assert create_query.database_id == 1
assert create_query.submitted_query == 'SELECT "B".cnt \nFROM "B"'
with pytest.raises(Exception) as excinfo:
get_query_for_node(session, child, [], [], 2)
assert str(excinfo.value) == "Database ID 2 is not valid"
def test_get_query_for_node_no_databases(mocker: MockerFixture) -> None:
"""
Test ``get_query_for_node``.
"""
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
parent = Node(name="A")
child = Node(
name="B",
tables=[
Table(
database=database,
table="B",
columns=[Column(name="one", type=ColumnType.STR)],
),
],
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[parent],
columns=[Column(name="one", type=ColumnType.STR)],
)
mocker.patch("datajunction.sql.dag.get_computable_databases", return_value=set())
session = mocker.MagicMock()
with pytest.raises(Exception) as excinfo:
get_query_for_node(session, child, [], [])
assert str(excinfo.value) == "No valid database was found"
def test_get_query_for_node_with_dimensions(mocker: MockerFixture) -> None:
"""
Test ``get_query_for_node`` when filtering/grouping by a dimension.
"""
database = Database(id=1, name="one", URI="sqlite://")
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
parent = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[parent],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE dim_users (id INTEGER, age INTEGER, gender TEXT)")
connection.execute("CREATE TABLE comments (ds TEXT, user_id INTEGER, text TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session = mocker.MagicMock()
session.exec().one.return_value = dimension
create_query = get_query_for_node(
session,
child,
["core.users.gender"],
["core.users.age>25"],
)
space = " "
assert create_query.database_id == 1
assert (
create_query.submitted_query
== f"""SELECT count('*') AS count_1, "core.users".gender{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id{space}
WHERE "core.users".age > 25 GROUP BY "core.users".gender"""
)
with pytest.raises(Exception) as excinfo:
get_query_for_node(session, child, ["aaaa"], [])
assert str(excinfo.value) == "Invalid dimension: aaaa"
with pytest.raises(Exception) as excinfo:
get_query_for_node(session, child, ["aaaa", "bbbb"], [])
assert str(excinfo.value) == "Invalid dimensions: aaaa, bbbb"
def test_get_query_for_node_with_multiple_dimensions(mocker: MockerFixture) -> None:
"""
Test ``get_query_for_node`` when filtering/grouping by a dimension.
"""
database = Database(id=1, name="one", URI="sqlite://")
dimension_1 = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
dimension_2 = Node(
name="core.bands",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_bands",
columns=[
Column(name="uuid", type=ColumnType.INT),
Column(name="name", type=ColumnType.STR),
Column(name="genre", type=ColumnType.STR),
],
),
],
columns=[
Column(name="uuid", type=ColumnType.INT),
Column(name="name", type=ColumnType.STR),
Column(name="genre", type=ColumnType.STR),
],
)
parent = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="band_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension_1),
Column(
name="band_id",
type=ColumnType.INT,
dimension=dimension_2,
dimension_column="uuid",
),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[parent],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE dim_users (id INTEGER, age INTEGER, gender TEXT)")
connection.execute("CREATE TABLE dim_bands (uuid INTEGER, name TEXT, genre TEXT)")
connection.execute(
"CREATE TABLE comments (ds TEXT, user_id INTEGER, band_id INTEGER, text TEXT)",
)
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session = mocker.MagicMock()
session.exec().one.side_effect = [dimension_1, dimension_2]
create_query = get_query_for_node(
session,
child,
["core.users.gender"],
["core.bands.genre='rock'"],
)
space = " "
assert create_query.database_id == 1
assert (
create_query.submitted_query
== f"""SELECT count('*') AS count_1, "core.users".gender{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.band_id AS band_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id, (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.band_id AS band_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_bands.uuid AS uuid, dim_bands.name AS name, dim_bands.genre AS genre{space}
FROM dim_bands) AS "core.bands" ON "core.comments".band_id = "core.bands".uuid{space}
WHERE "core.bands".genre = 'rock' GROUP BY "core.users".gender"""
)
def test_get_filter(mocker: MockerFixture) -> None:
"""
Test ``get_filter``.
"""
greater_than = mocker.MagicMock()
less_than = mocker.MagicMock()
equals = mocker.MagicMock()
mocker.patch(
"datajunction.sql.build.COMPARISONS",
new={
">": greater_than,
"<": less_than,
"=": equals,
},
)
column_a = mocker.MagicMock()
column_date = mocker.MagicMock()
column_date.type.python_type = datetime.date
column_dt = mocker.MagicMock()
column_dt.type.python_type = datetime.datetime
columns = {"a": column_a, "day": column_date, "dt": column_dt}
# basic
get_filter(columns, "a>0")
greater_than.assert_called_with(column_a, 0)
# date
get_filter(columns, "day=2020-01-01")
equals.assert_called_with(column_date, "2020-01-01 00:00:00")
get_filter(columns, "day<20200202")
less_than.assert_called_with(column_date, "2020-02-02 00:00:00")
get_filter(columns, "day=3/3/2020")
equals.assert_called_with(column_date, "2020-03-03 00:00:00")
# datetime
get_filter(columns, "dt=2012-01-19 17:21:00")
equals.assert_called_with(column_dt, "2012-01-19 17:21:00")
with pytest.raises(Exception) as excinfo:
get_filter(columns, "dt>foo/bar-baz")
assert str(excinfo.value) == "Invalid date or datetime value: foo/bar-baz"
# exceptions
with pytest.raises(Exception) as excinfo:
get_filter(columns, "invalid")
assert (
str(excinfo.value)
== """The filter "invalid" is invalid
The following error happened:
- The filter "invalid" is not a valid filter. Filters should consist of a dimension name, follow by a valid operator (<=|<|>=|>|!=|=), followed by a value. If the value is a string or date/time it should be enclosed in single quotes. (error code: 100)"""
)
with pytest.raises(Exception) as excinfo:
get_filter(columns, "b>0")
assert str(excinfo.value) == "Invalid column name: b"
with pytest.raises(Exception) as excinfo:
get_filter(columns, "a>open('/etc/passwd').read()")
assert str(excinfo.value) == "Invalid value: open('/etc/passwd').read()"
def test_get_query_for_sql(mocker: MockerFixture, session: Session) -> None:
"""
Test ``get_query_for_sql``.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT B FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f'''SELECT count('*') AS "B"{space}
FROM (SELECT "A".one AS one, "A".two AS two{space}
FROM "A") AS "A"'''
)
def test_get_query_for_sql_no_metrics(mocker: MockerFixture, session: Session) -> None:
"""
Test ``get_query_for_sql`` when no metrics are requested.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="db", URI="sqlite://")
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE dim_users (id INTEGER, age INTEGER, gender TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session.add(dimension)
session.commit()
sql = 'SELECT "core.users.gender", "core.users.age" FROM metrics'
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f'''SELECT "core.users".gender, "core.users".age{space}
FROM (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users"'''
)
other_dimension = Node(
name="core.other_dim",
type=NodeType.DIMENSION,
columns=[
Column(name="full_name", type=ColumnType.STR),
],
)
session.add(other_dimension)
session.commit()
sql = 'SELECT "core.users.gender", "core.other_dim.full_name" FROM metrics'
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert (
str(excinfo.value)
== "Cannot query from multiple dimensions when no metric is specified"
)
def test_get_query_for_sql_no_tables(mocker: MockerFixture, session: Session) -> None:
"""
Test ``get_query_for_sql`` when no tables are involved.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="memory", URI="sqlite://")
session.add(database)
session.commit()
sql = "SELECT 1"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
assert create_query.submitted_query == "SELECT 1"
def test_get_query_for_sql_having(mocker: MockerFixture, session: Session) -> None:
"""
Test ``get_query_for_sql``.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT B FROM metrics HAVING B > 10"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f"""SELECT count('*') AS "B"{space}
FROM (SELECT "A".one AS one, "A".two AS two{space}
FROM "A") AS "A"{space}
HAVING count('*') > 10"""
)
sql = "SELECT B FROM metrics HAVING C > 10"
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "Invalid dimension: C"
def test_get_query_for_sql_with_dimensions(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with dimensions in the query.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
parent = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[parent],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE dim_users (id INTEGER, age INTEGER, gender TEXT)")
connection.execute("CREATE TABLE comments (ds TEXT, user_id INTEGER, text TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session.add(child)
session.add(dimension)
session.commit()
sql = """
SELECT "core.users.gender", "core.num_comments"
FROM metrics
WHERE "core.users.age" > 25
GROUP BY "core.users.gender"
"""
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f"""SELECT "core.users".gender, count('*') AS "core.num_comments"{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id{space}
WHERE "core.users".age > 25 GROUP BY "core.users".gender"""
)
sql = """
SELECT "core.users.invalid", "core.num_comments"
FROM metrics
WHERE "core.users.age" > 25
GROUP BY "core.users.invalid"
"""
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "Invalid dimension: core.users.invalid"
def test_get_query_for_sql_with_dimensions_order_by(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with dimensions in the query and ``ORDER BY``.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
parent = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[parent],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE dim_users (id INTEGER, age INTEGER, gender TEXT)")
connection.execute("CREATE TABLE comments (ds TEXT, user_id INTEGER, text TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
session.add(child)
session.add(dimension)
session.commit()
sql = """
SELECT "core.users.gender" AS "core.users.gender",
"core.num_comments" AS "core.num_comments"
FROM main.metrics
GROUP BY "core.users.gender"
ORDER BY "core.num_comments" DESC
LIMIT 100;
"""
create_query = get_query_for_sql(sql)
space = " "
assert create_query.database_id == 1
assert (
create_query.submitted_query
== f"""SELECT "core.users".gender AS "core.users.gender", count('*') AS "core.num_comments"{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id GROUP BY "core.users".gender ORDER BY count('*') DESC
LIMIT 100 OFFSET 0"""
)
sql = """
SELECT "core.users.gender" AS "core.users.gender",
"core.num_comments" AS "core.num_comments"
FROM main.metrics
GROUP BY "core.users.gender"
ORDER BY "core.num_comments" ASC
LIMIT 100;
"""
create_query = get_query_for_sql(sql)
assert (
create_query.submitted_query
== f"""SELECT "core.users".gender AS "core.users.gender", count('*') AS "core.num_comments"{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id GROUP BY "core.users".gender ORDER BY count('*')
LIMIT 100 OFFSET 0"""
)
sql = """
SELECT "core.users.gender" AS "core.users.gender",
"core.num_comments" AS "core.num_comments"
FROM main.metrics
GROUP BY "core.users.gender"
ORDER BY "core.num_comments" ASC
LIMIT 100;
"""
create_query = get_query_for_sql(sql)
assert (
create_query.submitted_query
== f"""SELECT "core.users".gender AS "core.users.gender", count('*') AS "core.num_comments"{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id GROUP BY "core.users".gender ORDER BY count('*')
LIMIT 100 OFFSET 0"""
)
sql = """
SELECT "core.users.gender" AS "core.users.gender",
"core.num_comments" AS "core.num_comments"
FROM main.metrics
GROUP BY "core.users.gender"
ORDER BY "core.users.gender" ASC
LIMIT 100;
"""
create_query = get_query_for_sql(sql)
assert (
create_query.submitted_query
== f"""SELECT "core.users".gender AS "core.users.gender", count('*') AS "core.num_comments"{space}
FROM (SELECT comments.ds AS ds, comments.user_id AS user_id, comments.text AS text{space}
FROM comments) AS "core.comments" JOIN (SELECT dim_users.id AS id, dim_users.age AS age, dim_users.gender AS gender{space}
FROM dim_users) AS "core.users" ON "core.comments".user_id = "core.users".id GROUP BY "core.users".gender ORDER BY "core.users".gender
LIMIT 100 OFFSET 0"""
)
sql = """
SELECT "core.users.gender" AS "core.users.gender",
"core.num_comments" AS "core.num_comments"
FROM main.metrics
GROUP BY "core.users.gender"
ORDER BY invalid ASC
LIMIT 100;
"""
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "Invalid identifier: invalid"
def test_get_query_for_sql_compound_names(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with nodes with compound names.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="core.A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="core.B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM core.A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT core.B FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f'''SELECT count('*') AS "core.B"{space}
FROM (SELECT "A".one AS one, "A".two AS two{space}
FROM "A") AS "core.A"'''
)
def test_get_query_for_sql_multiple_databases(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` when the parents are in multiple databases.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database_1 = Database(id=1, name="slow", URI="sqlite://", cost=10.0)
database_2 = Database(id=2, name="fast", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database_1,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
Table(
database=database_2,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
],
),
],
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
)
engine = create_engine(database_1.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT B FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 2 # fast
B.expression = "SELECT COUNT(two) AS cnt FROM A"
session.add(B)
session.commit()
sql = "SELECT B FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1 # slow
def test_get_query_for_sql_multiple_metrics(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with multiple metrics.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
C = Node(
name="C",
type=NodeType.METRIC,
expression="SELECT MAX(one) AS max_one FROM A",
parents=[A],
)
session.add(C)
session.commit()
sql = "SELECT B, C FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f'''SELECT count('*') AS "B", max("A".one) AS "C"{space}
FROM (SELECT "A".one AS one, "A".two AS two{space}
FROM "A") AS "A"'''
)
def test_get_query_for_sql_non_identifiers(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with metrics and non-identifiers in the ``SELECT``.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
C = Node(
name="C",
type=NodeType.METRIC,
expression="SELECT MAX(one) AS max_one FROM A",
parents=[A],
)
session.add(C)
session.commit()
sql = "SELECT B, C, 'test' FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f'''SELECT count('*') AS "B", max("A".one) AS "C", test{space}
FROM (SELECT "A".one AS one, "A".two AS two{space}
FROM "A") AS "A"'''
)
def test_get_query_for_sql_different_parents(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with metrics with different parents.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
B = Node(
name="B",
tables=[
Table(
database=database,
table="B",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
C = Node(
name="C",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(C)
D = Node(
name="D",
type=NodeType.METRIC,
expression="SELECT MAX(one) AS max_one FROM A",
parents=[B],
)
session.add(D)
session.commit()
sql = "SELECT C, D FROM metrics"
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "Metrics C and D have non-shared parents"
def test_get_query_for_sql_not_metric(mocker: MockerFixture, session: Session) -> None:
"""
Test ``get_query_for_sql`` when the projection is not a metric node.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
B = Node(
name="B",
expression="SELECT one FROM A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT B FROM metrics"
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "Invalid dimension: B"
def test_get_query_for_sql_no_databases(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` when no common databases are found.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
A = Node(
name="A",
tables=[],
)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT B FROM metrics"
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "No valid database was found"
def test_get_query_for_sql_alias(mocker: MockerFixture, session: Session) -> None:
"""
Test ``get_query_for_sql`` with aliases.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
A = Node(
name="A",
tables=[
Table(
database=database,
table="A",
columns=[
Column(name="one", type=ColumnType.STR),
Column(name="two", type=ColumnType.STR),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE A (one TEXT, two TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
B = Node(
name="B",
type=NodeType.METRIC,
expression="SELECT COUNT(*) AS cnt FROM A",
parents=[A],
)
session.add(B)
session.commit()
sql = "SELECT B AS my_metric FROM metrics"
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f'''SELECT count('*') AS my_metric{space}
FROM (SELECT "A".one AS one, "A".two AS two{space}
FROM "A") AS "A"'''
)
def test_get_query_for_sql_where_groupby(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with a where and a group by.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
comments = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="user_id", type=ColumnType.INT),
Column(name="comment", type=ColumnType.STR),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE comments (user_id INT, comment TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
num_comments = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[comments],
)
session.add(num_comments)
session.commit()
sql = """
SELECT "core.num_comments", "core.comments.user_id" FROM metrics
WHERE "core.comments.user_id" > 1
GROUP BY "core.comments.user_id"
"""
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f"""SELECT count('*') AS "core.num_comments", "core.comments".user_id{space}
FROM (SELECT comments.user_id AS user_id, comments.comment AS comment{space}
FROM comments) AS "core.comments"{space}
WHERE "core.comments".user_id > 1 GROUP BY "core.comments".user_id"""
)
def test_get_query_for_sql_date_trunc(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with a call to ``DATE_TRUNC``.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="db", URI="sqlite://")
comments = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="user_id", type=ColumnType.INT),
Column(name="timestamp", type=ColumnType.DATETIME),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE comments (user_id INT, timestamp DATETIME)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
num_comments = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[comments],
)
session.add(num_comments)
session.commit()
sql = """
SELECT
DATE_TRUNC('day', "core.comments.timestamp") AS "__timestamp",
"core.num_comments"
FROM metrics
GROUP BY
DATE_TRUNC('day', "core.comments.timestamp")
"""
create_query = get_query_for_sql(sql)
assert create_query.database_id == 1
space = " "
assert (
create_query.submitted_query
== f"""SELECT datetime("core.comments".timestamp, 'start of day') AS __timestamp, count('*') AS "core.num_comments"{space}
FROM (SELECT comments.user_id AS user_id, comments.timestamp AS timestamp{space}
FROM comments) AS "core.comments" GROUP BY datetime("core.comments".timestamp, 'start of day')"""
)
def test_get_query_for_sql_invalid_column(
mocker: MockerFixture,
session: Session,
) -> None:
"""
Test ``get_query_for_sql`` with an invalid column.
"""
get_session = mocker.patch("datajunction.sql.build.get_session")
get_session().__next__.return_value = session
database = Database(id=1, name="slow", URI="sqlite://", cost=1.0)
comments = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="user_id", type=ColumnType.INT),
Column(name="comment", type=ColumnType.STR),
],
),
],
)
engine = create_engine(database.URI)
connection = engine.connect()
connection.execute("CREATE TABLE comments (user_id INT, comment TEXT)")
mocker.patch("datajunction.sql.transpile.create_engine", return_value=engine)
num_comments = Node(
name="core.num_comments",
type=NodeType.METRIC,
expression="SELECT COUNT(*) FROM core.comments",
parents=[comments],
)
session.add(num_comments)
session.commit()
sql = """
SELECT "core.num_comments" FROM metrics
WHERE "core.some_other_parent.user_id" > 1
"""
with pytest.raises(Exception) as excinfo:
get_query_for_sql(sql)
assert str(excinfo.value) == "Invalid dimension: core.some_other_parent.user_id"
def test_get_dimensions_from_filters() -> None:
"""
Test ``get_dimensions_from_filters``.
"""
assert get_dimensions_from_filters(["a>1", "b=10"]) == {"a", "b"}
with pytest.raises(Exception) as excinfo:
get_dimensions_from_filters(["aaaa"])
assert (
str(excinfo.value)
== """The filter "aaaa" is invalid
The following error happened:
- The filter "aaaa" is not a valid filter. Filters should consist of a dimension name, follow by a valid operator (<=|<|>=|>|!=|=), followed by a value. If the value is a string or date/time it should be enclosed in single quotes. (error code: 100)"""
)
def test_find_on_clause(mocker: MockerFixture) -> None:
"""
Test ``find_on_clause``.
"""
database = Database(id=1, name="one", URI="sqlite://")
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
parent = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(name="core.num_comments", parents=[parent])
node_select = mocker.MagicMock()
subquery = mocker.MagicMock()
find_on_clause(child, node_select, dimension, subquery)
assert node_select.columns.__getitem__.called_with("user_id")
assert subquery.columns.__getitem__.called_with("id")
def test_find_on_clause_parent_no_columns(mocker: MockerFixture) -> None:
"""
Test ``find_on_clause`` when a parent has no columns.
I think we expect all nodes to have at least one column, so this test is just for
completeness.
"""
database = Database(id=1, name="one", URI="sqlite://")
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
parent_1 = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
)
parent_2 = Node(
name="a_weird_node",
tables=[
Table(
database=database,
table="empty",
columns=[],
),
],
columns=[],
)
child = Node(name="core.num_comments", parents=[parent_2, parent_1])
node_select = mocker.MagicMock()
subquery = mocker.MagicMock()
find_on_clause(child, node_select, dimension, subquery)
assert node_select.columns.__getitem__.called_with("user_id")
def test_find_on_clause_parent_invalid_reference(mocker: MockerFixture) -> None:
"""
Test ``find_on_clause`` when a parent has no columns.
The compiler should check that the dimension is valid, but the table could change.
"""
database = Database(id=1, name="one", URI="sqlite://")
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
parent = Node(
name="core.comments",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(name="core.num_comments", parents=[parent])
node_select = mocker.MagicMock()
subquery = mocker.MagicMock()
with pytest.raises(Exception) as excinfo:
find_on_clause(child, node_select, dimension, subquery)
assert (
str(excinfo.value)
== "Node core.num_comments has no columns with dimension core.users"
)
def test_get_join_columns() -> None:
"""
Test ``get_join_columns``.
"""
database = Database(id=1, name="one", URI="sqlite://")
dimension = Node(
name="core.users",
type=NodeType.DIMENSION,
tables=[
Table(
database=database,
table="dim_users",
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
),
],
columns=[
Column(name="id", type=ColumnType.INT),
Column(name="age", type=ColumnType.INT),
Column(name="gender", type=ColumnType.STR),
],
)
orphan = Node(name="orphan")
with pytest.raises(Exception) as excinfo:
get_join_columns(orphan, dimension)
assert str(excinfo.value) == "Node orphan has no columns with dimension core.users"
parent_without_columns = Node(name="parent_without_columns")
broken = Node(name="broken", parents=[parent_without_columns])
with pytest.raises(Exception) as excinfo:
get_join_columns(broken, dimension)
assert str(excinfo.value) == "Node broken has no columns with dimension core.users"
parent = Node(
name="parent",
tables=[
Table(
database=database,
table="comments",
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT),
Column(name="text", type=ColumnType.STR),
],
),
],
columns=[
Column(name="ds", type=ColumnType.STR),
Column(name="user_id", type=ColumnType.INT, dimension=dimension),
Column(name="text", type=ColumnType.STR),
],
)
child = Node(name="child", parents=[parent_without_columns, parent])
parent_name, column_name, dimension_column = get_join_columns(child, dimension)
assert parent_name == "parent"
assert column_name == "user_id"
assert dimension_column == "id"
| 54,850 | 16,821 |
# -*- coding: utf-8 -*-
# Copyright 2018 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.4@@
from google.appengine.ext import ndb
from framework.models.common import NdbModel
from plugins.rogerthat_api.plugin_utils import Enum
from plugins.tff_backend.plugin_consts import NAMESPACE
class NodeStatus(Enum):
HALTED = 'halted'
RUNNING = 'running'
class WalletStatus(Enum):
ERROR = 'error'
LOCKED = 'locked'
UNLOCKED = 'unlocked'
class NodeChainStatus(NdbModel):
wallet_status = ndb.StringProperty(choices=WalletStatus.all())
block_height = ndb.IntegerProperty(default=0)
active_blockstakes = ndb.IntegerProperty(default=0)
network = ndb.StringProperty(default='standard', choices=['devnet', 'testnet', 'standard'])
confirmed_balance = ndb.IntegerProperty(default=0)
connected_peers = ndb.IntegerProperty(default=0)
address = ndb.StringProperty()
class Node(NdbModel):
NAMESPACE = NAMESPACE
serial_number = ndb.StringProperty()
last_update = ndb.DateTimeProperty()
username = ndb.StringProperty()
status = ndb.StringProperty(default=NodeStatus.HALTED)
status_date = ndb.DateTimeProperty()
info = ndb.JsonProperty()
chain_status = ndb.StructuredProperty(NodeChainStatus)
@property
def id(self):
return self.key.string_id().decode('utf-8')
@classmethod
def create_key(cls, node_id):
# type: (unicode) -> ndb.Key
return ndb.Key(cls, node_id, namespace=NAMESPACE)
@classmethod
def list_by_user(cls, username):
return cls.query().filter(cls.username == username)
@classmethod
def list_by_property(cls, property_name, ascending):
prop = None
if '.' in property_name:
for part in property_name.split('.'):
prop = getattr(prop if prop else cls, part)
else:
prop = getattr(cls, property_name)
return cls.query().order(prop if ascending else - prop)
@classmethod
def list_running_by_last_update(cls, date):
return cls.query().filter(cls.last_update < date).filter(cls.status == NodeStatus.RUNNING)
| 2,671 | 863 |
for case in range(int(input())):
a,b = input().split()
k = False
try:
print(a,int(b,8),int(b),int(b,16))
except:
print(a,0,int(b),int(b,16)) | 172 | 76 |
from ..piecewisefunction.piecewisefunction import cPiecewiseFunction
import json
import numpy as np
import gsplines.basis
def piecewise2json(_pw):
basis_name = _pw.basis_.__class__.__name__
if hasattr(_pw.basis_, 'params_'):
basis_params = _pw.basis_.params_
else:
basis_params = None
basis = [basis_name, basis_params]
result = [_pw.tau_.tolist(), _pw.y_.tolist(), _pw.dim_, basis]
return json.dumps(result)
def json2piecewise(_data):
array = json.loads(_data)
for i, element in enumerate(array[:-2]):
array[i] = np.array(element)
basis_data = array[-1]
class_ = getattr(gsplines.basis, basis_data[0])
if basis_data[1] is not None:
basis = class_(basis_data[1])
else:
basis = class_()
array[-1] = basis
result = cPiecewiseFunction(*array)
return result
| 869 | 314 |
# Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import yaml
import pathlib
class SettingsGuiGenerator:
"""Skill Settings Generator For GUI. """
def __init__(self):
""" Create a SettingList Object """
self.settings_list = []
def populate(self, skill_id, settings_file, settings_dict):
"""
Populates settings list for current skill.
Arguments:
skill_id: ID of target skill.
settings_file: Settings meta file from skill folder.
settings_dict: Dictionary of current settings.json file.
"""
file_type = pathlib.Path(settings_file).suffix
if file_type == ".json":
with open(settings_file, 'r') as f:
settingsmeta_dict = json.load(f)
__skillMetaData = settingsmeta_dict.get('skillMetadata')
for section in __skillMetaData.get('sections'):
self.settings_list.append(section)
if file_type == ".yaml":
with open(settings_file, 'r') as f:
settingsmeta_dict = yaml.safe_load(f)
__skillMetaData = settingsmeta_dict.get('skillMetadata')
for section in __skillMetaData.get('sections'):
self.settings_list.append(section)
if settings_dict is not None:
__updated_list = []
for sections in self.settings_list:
for fields in sections['fields']:
if "name" in fields:
if fields["name"] in settings_dict.keys():
fields["value"] = settings_dict[fields["name"]]
__updated_list.append(sections)
self.clear()
self.settings_list = __updated_list
def fetch(self):
"""Return Settings List """
return self.settings_list
def clear(self):
"""Clear Settings List """
self.settings_list.clear()
def update(self, settings_dict):
"""Getting Changed Settings & Update List.
Arguments:
settings_dict: Dictionary of current settings.json file.
"""
__updated_list = []
for sections in self.settings_list:
for fields in sections['fields']:
if "name" in fields:
if fields["name"] in settings_dict.keys():
fields["value"] = settings_dict[fields["name"]]
__updated_list.append(sections)
self.clear()
self.settings_list = __updated_list | 3,085 | 816 |
#
# PySNMP MIB module FD-SYSTEM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FD-SYSTEM-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:59:07 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
epon, DeviceOperation, DeviceType, LedStatus, DeviceStatus = mibBuilder.importSymbols("EPON-EOC-MIB", "epon", "DeviceOperation", "DeviceType", "LedStatus", "DeviceStatus")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Counter32, MibIdentifier, iso, ModuleIdentity, Integer32, TimeTicks, Bits, Gauge32, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Counter64, ObjectIdentity, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "MibIdentifier", "iso", "ModuleIdentity", "Integer32", "TimeTicks", "Bits", "Gauge32", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Counter64", "ObjectIdentity", "NotificationType")
RowStatus, TextualConvention, MacAddress, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TextualConvention", "MacAddress", "DisplayString")
systemInfo = ModuleIdentity((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1))
if mibBuilder.loadTexts: systemInfo.setLastUpdated('201005271056Z')
if mibBuilder.loadTexts: systemInfo.setOrganization('epon eoc factory.')
sysBaseInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 1))
sysModel = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 1, 1), DeviceType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysModel.setStatus('current')
sysDesc = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 1, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sysDesc.setStatus('current')
sysLocation = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 1, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sysLocation.setStatus('current')
sysContact = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sysContact.setStatus('current')
sysMajAlarmLed = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 1, 5), LedStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysMajAlarmLed.setStatus('current')
sysCriAlarmLed = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 1, 6), LedStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCriAlarmLed.setStatus('current')
sysAlarmDesc = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysAlarmDesc.setStatus('current')
sysConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 2))
consolePortSpd = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("bps2400", 1), ("bps4800", 2), ("bps9600", 3), ("bps19200", 4), ("bps38400", 5), ("bps57600", 6), ("bps115200", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: consolePortSpd.setStatus('current')
manageIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 2, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: manageIpAddr.setStatus('current')
manageNetMask = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 2, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: manageNetMask.setStatus('current')
manageGateway = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 2, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: manageGateway.setStatus('current')
snmpReadCommunity = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 2, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmpReadCommunity.setStatus('current')
snmpRWCommunity = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 2, 6), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmpRWCommunity.setStatus('current')
trapDstIpAddr1 = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 2, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: trapDstIpAddr1.setStatus('current')
trapDstIpAddr2 = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 2, 9), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: trapDstIpAddr2.setStatus('current')
trapDstIpAddr3 = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 2, 10), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: trapDstIpAddr3.setStatus('current')
trapDstIpAddr4 = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 2, 11), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: trapDstIpAddr4.setStatus('current')
sysOperate = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 2, 12), DeviceOperation()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sysOperate.setStatus('current')
chassisInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 3))
chassisType = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 3, 1), DeviceType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: chassisType.setStatus('current')
chassisFactorySerial = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 3, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: chassisFactorySerial.setStatus('current')
chassisRevision = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 3, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: chassisRevision.setStatus('current')
chassisTemperature = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 3, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: chassisTemperature.setStatus('current')
powerStatusBit = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 3, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerStatusBit.setStatus('current')
fanStatusBit = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 3, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanStatusBit.setStatus('current')
cardModule = MibIdentifier((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5))
mainCard = MibIdentifier((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 1))
mainCardType = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 1, 1), DeviceType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mainCardType.setStatus('current')
mainCardFactorySerial = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mainCardFactorySerial.setStatus('current')
mainCardHWRevision = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mainCardHWRevision.setStatus('current')
mainCardSWVersion = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mainCardSWVersion.setStatus('current')
mainCardRunningStatus = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 1, 5), DeviceStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mainCardRunningStatus.setStatus('current')
mainCardRunningTime = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 1, 6), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mainCardRunningTime.setStatus('current')
mainCardOperate = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 1, 7), DeviceOperation()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mainCardOperate.setStatus('current')
ponCard = MibIdentifier((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 2))
ponCardTable = MibTable((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 2, 1), )
if mibBuilder.loadTexts: ponCardTable.setStatus('current')
ponCardEntry = MibTableRow((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 2, 1, 1), ).setIndexNames((0, "FD-SYSTEM-MIB", "ponCardSlotId"))
if mibBuilder.loadTexts: ponCardEntry.setStatus('current')
ponCardSlotId = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4)))
if mibBuilder.loadTexts: ponCardSlotId.setStatus('current')
ponCardType = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 2, 1, 1, 2), DeviceType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ponCardType.setStatus('current')
ponCardFactorySerial = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 2, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 30))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ponCardFactorySerial.setStatus('current')
ponCardHwRev = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 2, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ponCardHwRev.setStatus('current')
ponCardFwVer = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 2, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ponCardFwVer.setStatus('current')
ponCardRunningStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 2, 1, 1, 7), DeviceStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ponCardRunningStatus.setStatus('current')
ponCardRuningTime = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 2, 1, 1, 8), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ponCardRuningTime.setStatus('current')
ponCardOperate = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 2, 1, 1, 9), DeviceOperation()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ponCardOperate.setStatus('current')
ponCardUpgradeStat = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 5, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("booting", 1), ("normalRun", 2), ("rcvFileIng", 3), ("rcvFileOk", 4), ("rcvFileErr", 5), ("upgrading", 6), ("upgradeOk", 7), ("upgradeErr", 8), ("upgradeOlt", 9), ("upgradeOnu", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ponCardUpgradeStat.setStatus('current')
onuAuth = MibIdentifier((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6))
authMethod = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("blackList", 1), ("whiteList", 2), ("none", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: authMethod.setStatus('current')
nonAuthOper = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2))).clone(namedValues=NamedValues(("clearNonAuthMacList", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nonAuthOper.setStatus('current')
onuAuthMacCfgTable = MibTable((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 3), )
if mibBuilder.loadTexts: onuAuthMacCfgTable.setStatus('current')
onuAuthMacCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 3, 1), ).setIndexNames((0, "FD-SYSTEM-MIB", "authMacEntryId"))
if mibBuilder.loadTexts: onuAuthMacCfgEntry.setStatus('current')
authMacEntryId = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 3, 1, 1), Unsigned32())
if mibBuilder.loadTexts: authMacEntryId.setStatus('current')
beginMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 3, 1, 2), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: beginMacAddr.setStatus('current')
endMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 3, 1, 3), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: endMacAddr.setStatus('current')
macAttr = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("blackMac", 1), ("whiteMac", 2), ("obsolete", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: macAttr.setStatus('current')
onuAuthMacRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 3, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: onuAuthMacRowStatus.setStatus('current')
nonAuthOnuListTable = MibTable((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 4), )
if mibBuilder.loadTexts: nonAuthOnuListTable.setStatus('current')
nonAuthOnuListEntry = MibTableRow((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 4, 1), ).setIndexNames((0, "FD-SYSTEM-MIB", "nonAuthOnuMacIndex"))
if mibBuilder.loadTexts: nonAuthOnuListEntry.setStatus('current')
nonAuthOnuMacIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 4, 1, 1), Unsigned32())
if mibBuilder.loadTexts: nonAuthOnuMacIndex.setStatus('current')
nonAuthOnuMac = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 4, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nonAuthOnuMac.setStatus('current')
nonAuthOnuTries = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 6, 4, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nonAuthOnuTries.setStatus('current')
userManage = MibIdentifier((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 7))
userManageTable = MibTable((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 7, 1), )
if mibBuilder.loadTexts: userManageTable.setStatus('current')
userManageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 7, 1, 1), ).setIndexNames((0, "FD-SYSTEM-MIB", "userId"))
if mibBuilder.loadTexts: userManageEntry.setStatus('current')
userId = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 7, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)))
if mibBuilder.loadTexts: userId.setStatus('current')
userName = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 7, 1, 1, 2), DisplayString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: userName.setStatus('current')
userPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 7, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: userPassword.setStatus('current')
userPermission = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 7, 1, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: userPermission.setStatus('current')
userAccessDeviceMap = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 7, 1, 1, 5), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: userAccessDeviceMap.setStatus('current')
loginTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 7, 1, 1, 6), Unsigned32().clone(300)).setMaxAccess("readonly")
if mibBuilder.loadTexts: loginTimeout.setStatus('current')
userEntryRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 7, 1, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: userEntryRowStatus.setStatus('current')
upgrade = MibIdentifier((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 8))
ftpServerIp = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 8, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ftpServerIp.setStatus('current')
ftpServerUserName = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 8, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ftpServerUserName.setStatus('current')
ftpServerUserPasswd = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 8, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ftpServerUserPasswd.setStatus('current')
ftpOperFileName = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 8, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ftpOperFileName.setStatus('current')
ftpOperTarget = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 8, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("ctrlCardImage", 1), ("ponCardImage", 2), ("oltApp", 3), ("oltPers", 4), ("oltBoot", 5), ("onuApp", 6), ("onuPers", 7), ("onuBoot", 8), ("otherSpecifiedFile", 9)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ftpOperTarget.setStatus('current')
dwLoadFileCrcCheck = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 8, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("checkCrc", 1), ("dontCheckCrc", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dwLoadFileCrcCheck.setStatus('current')
dwLoadFileCrcValue = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 8, 8), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dwLoadFileCrcValue.setStatus('current')
operDeviceMap = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 8, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(10, 10)).setFixedLength(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: operDeviceMap.setStatus('current')
upgradeStatus = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 8, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("paraErr", 1), ("initFtpErr", 2), ("transmitting", 3), ("transmitErr", 4), ("transmitOk", 5), ("upgrading", 6), ("upgradeErr", 7), ("upgradeOk", 8), ("uploading", 9), ("uploadErr", 10), ("uploadOk", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: upgradeStatus.setStatus('current')
upgradeOperation = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 8, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("downloadFile", 1), ("upgrade", 2), ("reboot", 3), ("uploadFile", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: upgradeOperation.setStatus('current')
ftpProgress = MibScalar((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 8, 12), Integer32()).setUnits('percent').setMaxAccess("readonly")
if mibBuilder.loadTexts: ftpProgress.setStatus('current')
fdSysConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 13))
fdSystemGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 13, 1))
sysBaseManageGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 13, 1, 1)).setObjects(("FD-SYSTEM-MIB", "sysModel"), ("FD-SYSTEM-MIB", "sysDesc"), ("FD-SYSTEM-MIB", "sysLocation"), ("FD-SYSTEM-MIB", "sysContact"), ("FD-SYSTEM-MIB", "sysMajAlarmLed"), ("FD-SYSTEM-MIB", "sysCriAlarmLed"), ("FD-SYSTEM-MIB", "sysAlarmDesc"), ("FD-SYSTEM-MIB", "consolePortSpd"), ("FD-SYSTEM-MIB", "manageIpAddr"), ("FD-SYSTEM-MIB", "manageNetMask"), ("FD-SYSTEM-MIB", "manageGateway"), ("FD-SYSTEM-MIB", "snmpReadCommunity"), ("FD-SYSTEM-MIB", "snmpRWCommunity"), ("FD-SYSTEM-MIB", "trapDstIpAddr1"), ("FD-SYSTEM-MIB", "trapDstIpAddr2"), ("FD-SYSTEM-MIB", "trapDstIpAddr3"), ("FD-SYSTEM-MIB", "trapDstIpAddr4"), ("FD-SYSTEM-MIB", "sysOperate"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
sysBaseManageGroup = sysBaseManageGroup.setStatus('current')
chassisInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 13, 1, 2)).setObjects(("FD-SYSTEM-MIB", "chassisType"), ("FD-SYSTEM-MIB", "chassisFactorySerial"), ("FD-SYSTEM-MIB", "chassisRevision"), ("FD-SYSTEM-MIB", "chassisTemperature"), ("FD-SYSTEM-MIB", "powerStatusBit"), ("FD-SYSTEM-MIB", "fanStatusBit"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
chassisInfoGroup = chassisInfoGroup.setStatus('current')
cardModuleGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 13, 1, 3)).setObjects(("FD-SYSTEM-MIB", "mainCardType"), ("FD-SYSTEM-MIB", "mainCardFactorySerial"), ("FD-SYSTEM-MIB", "mainCardHWRevision"), ("FD-SYSTEM-MIB", "mainCardSWVersion"), ("FD-SYSTEM-MIB", "mainCardRunningStatus"), ("FD-SYSTEM-MIB", "mainCardRunningTime"), ("FD-SYSTEM-MIB", "mainCardOperate"), ("FD-SYSTEM-MIB", "ponCardType"), ("FD-SYSTEM-MIB", "ponCardFactorySerial"), ("FD-SYSTEM-MIB", "ponCardHwRev"), ("FD-SYSTEM-MIB", "ponCardFwVer"), ("FD-SYSTEM-MIB", "ponCardRunningStatus"), ("FD-SYSTEM-MIB", "ponCardRuningTime"), ("FD-SYSTEM-MIB", "ponCardOperate"), ("FD-SYSTEM-MIB", "ponCardUpgradeStat"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cardModuleGroup = cardModuleGroup.setStatus('current')
onuAuthGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 13, 1, 4)).setObjects(("FD-SYSTEM-MIB", "authMethod"), ("FD-SYSTEM-MIB", "nonAuthOper"), ("FD-SYSTEM-MIB", "beginMacAddr"), ("FD-SYSTEM-MIB", "endMacAddr"), ("FD-SYSTEM-MIB", "macAttr"), ("FD-SYSTEM-MIB", "onuAuthMacRowStatus"), ("FD-SYSTEM-MIB", "nonAuthOnuMac"), ("FD-SYSTEM-MIB", "nonAuthOnuTries"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
onuAuthGroup = onuAuthGroup.setStatus('current')
userManageGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 13, 1, 5)).setObjects(("FD-SYSTEM-MIB", "userName"), ("FD-SYSTEM-MIB", "userPassword"), ("FD-SYSTEM-MIB", "userPermission"), ("FD-SYSTEM-MIB", "userAccessDeviceMap"), ("FD-SYSTEM-MIB", "loginTimeout"), ("FD-SYSTEM-MIB", "userEntryRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
userManageGroup = userManageGroup.setStatus('current')
systemUpgradeGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 13, 1, 6)).setObjects(("FD-SYSTEM-MIB", "ftpServerIp"), ("FD-SYSTEM-MIB", "ftpServerUserName"), ("FD-SYSTEM-MIB", "ftpServerUserPasswd"), ("FD-SYSTEM-MIB", "ftpOperFileName"), ("FD-SYSTEM-MIB", "dwLoadFileCrcCheck"), ("FD-SYSTEM-MIB", "dwLoadFileCrcValue"), ("FD-SYSTEM-MIB", "operDeviceMap"), ("FD-SYSTEM-MIB", "upgradeStatus"), ("FD-SYSTEM-MIB", "ftpProgress"), ("FD-SYSTEM-MIB", "upgradeOperation"), ("FD-SYSTEM-MIB", "ftpOperTarget"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
systemUpgradeGroup = systemUpgradeGroup.setStatus('current')
fdSystemCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 13, 2))
fdSystemCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 34592, 1, 3, 1, 13, 2, 1)).setObjects(("FD-SYSTEM-MIB", "sysBaseManageGroup"), ("FD-SYSTEM-MIB", "chassisInfoGroup"), ("FD-SYSTEM-MIB", "cardModuleGroup"), ("FD-SYSTEM-MIB", "onuAuthGroup"), ("FD-SYSTEM-MIB", "userManageGroup"), ("FD-SYSTEM-MIB", "systemUpgradeGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
fdSystemCompliance = fdSystemCompliance.setStatus('current')
mibBuilder.exportSymbols("FD-SYSTEM-MIB", ponCardType=ponCardType, userManageTable=userManageTable, dwLoadFileCrcCheck=dwLoadFileCrcCheck, ftpServerIp=ftpServerIp, userEntryRowStatus=userEntryRowStatus, sysModel=sysModel, userManageEntry=userManageEntry, manageIpAddr=manageIpAddr, PYSNMP_MODULE_ID=systemInfo, onuAuthMacCfgTable=onuAuthMacCfgTable, sysCriAlarmLed=sysCriAlarmLed, ponCardUpgradeStat=ponCardUpgradeStat, userManage=userManage, authMacEntryId=authMacEntryId, chassisRevision=chassisRevision, ponCardFwVer=ponCardFwVer, fdSystemGroups=fdSystemGroups, ftpOperFileName=ftpOperFileName, ponCardFactorySerial=ponCardFactorySerial, manageGateway=manageGateway, mainCardRunningStatus=mainCardRunningStatus, ftpServerUserPasswd=ftpServerUserPasswd, sysContact=sysContact, chassisType=chassisType, userId=userId, snmpRWCommunity=snmpRWCommunity, nonAuthOnuMac=nonAuthOnuMac, manageNetMask=manageNetMask, nonAuthOper=nonAuthOper, userName=userName, mainCardType=mainCardType, upgradeStatus=upgradeStatus, ponCardSlotId=ponCardSlotId, userPassword=userPassword, nonAuthOnuMacIndex=nonAuthOnuMacIndex, macAttr=macAttr, ponCardOperate=ponCardOperate, sysOperate=sysOperate, nonAuthOnuListEntry=nonAuthOnuListEntry, fdSysConformance=fdSysConformance, powerStatusBit=powerStatusBit, ponCard=ponCard, ftpOperTarget=ftpOperTarget, fdSystemCompliance=fdSystemCompliance, onuAuthGroup=onuAuthGroup, sysLocation=sysLocation, sysConfig=sysConfig, sysBaseManageGroup=sysBaseManageGroup, sysDesc=sysDesc, systemUpgradeGroup=systemUpgradeGroup, fanStatusBit=fanStatusBit, nonAuthOnuTries=nonAuthOnuTries, mainCardRunningTime=mainCardRunningTime, chassisInfo=chassisInfo, mainCardOperate=mainCardOperate, trapDstIpAddr2=trapDstIpAddr2, mainCard=mainCard, sysAlarmDesc=sysAlarmDesc, loginTimeout=loginTimeout, operDeviceMap=operDeviceMap, userAccessDeviceMap=userAccessDeviceMap, upgrade=upgrade, onuAuthMacRowStatus=onuAuthMacRowStatus, ftpProgress=ftpProgress, chassisInfoGroup=chassisInfoGroup, onuAuthMacCfgEntry=onuAuthMacCfgEntry, snmpReadCommunity=snmpReadCommunity, sysBaseInfo=sysBaseInfo, sysMajAlarmLed=sysMajAlarmLed, trapDstIpAddr1=trapDstIpAddr1, ftpServerUserName=ftpServerUserName, upgradeOperation=upgradeOperation, trapDstIpAddr4=trapDstIpAddr4, mainCardSWVersion=mainCardSWVersion, ponCardRunningStatus=ponCardRunningStatus, systemInfo=systemInfo, trapDstIpAddr3=trapDstIpAddr3, mainCardFactorySerial=mainCardFactorySerial, ponCardEntry=ponCardEntry, ponCardTable=ponCardTable, mainCardHWRevision=mainCardHWRevision, endMacAddr=endMacAddr, consolePortSpd=consolePortSpd, userManageGroup=userManageGroup, cardModule=cardModule, onuAuth=onuAuth, dwLoadFileCrcValue=dwLoadFileCrcValue, ponCardRuningTime=ponCardRuningTime, fdSystemCompliances=fdSystemCompliances, beginMacAddr=beginMacAddr, nonAuthOnuListTable=nonAuthOnuListTable, chassisFactorySerial=chassisFactorySerial, cardModuleGroup=cardModuleGroup, ponCardHwRev=ponCardHwRev, userPermission=userPermission, chassisTemperature=chassisTemperature, authMethod=authMethod)
| 25,483 | 11,534 |
#!BPY
"""
Name: 'Template Completion | Tab'
Blender: 246
Group: 'TextPlugin'
Shortcut: 'Tab'
Tooltip: 'Completes templates based on the text preceding the cursor'
"""
# Only run if we have the required modules
try:
import bpy
from BPyTextPlugin import *
from Blender import Text
except ImportError:
OK = False
else:
OK = True
templates = {
'ie':
'if ${1:cond}:\n'
'\t${2}\n'
'else:\n'
'\t${3}\n',
'iei':
'if ${1:cond}:\n'
'\t${2}\n'
'elif:\n'
'\t${3}\n'
'else:\n'
'\t${4}\n',
'def':
'def ${1:name}(${2:params}):\n'
'\t"""(${2}) - ${3:comment}"""\n'
'\t${4}',
'cls':
'class ${1:name}(${2:parent}):\n'
'\t"""${3:docs}"""\n'
'\t\n'
'\tdef __init__(self, ${4:params}):\n'
'\t\t"""Creates a new ${1}"""\n'
'\t\t${5}',
'class':
'class ${1:name}(${2:parent}):\n'
'\t"""${3:docs}"""\n'
'\t\n'
'\tdef __init__(self, ${4:params}):\n'
'\t\t"""Creates a new ${1}"""\n'
'\t\t${5}'
}
def main():
txt = bpy.data.texts.active
if not txt:
return
row, c = txt.getCursorPos()
line = txt.asLines(row, row+1)[0]
indent=0
while indent<c and (line[indent]==' ' or line[indent]=='\t'):
indent += 1
# Check we are in a normal context
if get_context(txt) != CTX_NORMAL:
return
targets = get_targets(line, c-1);
if len(targets) != 1: return
color = (0, 192, 32)
for trigger, template in templates.items():
if trigger != targets[0]: continue
inserts = {}
txt.delete(-len(trigger)-1)
y, x = txt.getCursorPos()
first = None
# Insert template text and parse for insertion points
count = len(template); i = 0
while i < count:
if i<count-1 and template[i]=='$' and template[i+1]=='{':
i += 2
e = template.find('}', i)
item = template[i:e].split(':')
if len(item)<2: item.append('')
if not inserts.has_key(item[0]):
inserts[item[0]] = (item[1], [(x, y)])
else:
inserts[item[0]][1].append((x, y))
item[1] = inserts[item[0]][0]
if not first: first = (item[1], x, y)
txt.insert(item[1])
x += len(item[1])
i = e
else:
txt.insert(template[i])
if template[i] == '\n':
txt.insert(line[:indent])
y += 1
x = indent
else:
x += 1
i += 1
# Insert markers at insertion points
for id, (text, points) in inserts.items():
for x, y in points:
txt.setCursorPos(y, x)
txt.setSelectPos(y, x+len(text))
txt.markSelection((hash(text)+int(id)) & 0xFFFF, color,
Text.TMARK_TEMP | Text.TMARK_EDITALL)
if first:
text, x, y = first
txt.setCursorPos(y, x)
txt.setSelectPos(y, x+len(text))
break
# Check we are running as a script and not imported as a module
if __name__ == "__main__" and OK:
main()
| 2,690 | 1,310 |
"""
Basic file-checking functionality used by Kive.
"""
import hashlib
import mimetypes
import os
from contextlib import contextmanager
from django.http import FileResponse
def build_download_response(field_file):
# Intentionally leave this open for streaming response.
# FileResponse will close it when streaming finishes.
field_file.open('rb')
mimetype = mimetypes.guess_type(field_file.name)[0]
response = FileResponse(field_file, content_type=mimetype)
response['Content-Length'] = field_file.size
response['Content-Disposition'] = 'attachment; filename="{}"'.format(
os.path.basename(field_file.name))
return response
def compute_md5(file_to_checksum, chunk_size=1024*64):
"""Computes MD5 checksum of specified file.
file_to_checksum should be an open, readable, file handle, with
its position at the beginning, i.e. so that .read() gets the
entire contents of the file.
NOTE: under python3, the file should have been open in binary mode ("rb")
so that bytes (not strings) are returned when iterating over the file.
"""
md5gen = hashlib.md5()
while True:
chunk = file_to_checksum.read(chunk_size)
if not chunk:
return md5gen.hexdigest()
md5gen.update(chunk)
@contextmanager
def use_field_file(field_file, mode='rb'):
""" Context manager for FieldFile objects.
Tries to leave a file object in the same state it was in when the context
manager started.
It's hard to tell when to close a FieldFile object. It opens implicitly
when you first read from it. Sometimes, it's an in-memory file object, and
it can't be reopened.
"""
was_closed = field_file.closed
field_file.open(mode)
start_position = field_file.tell()
try:
yield field_file
finally:
if was_closed:
field_file.close()
else:
field_file.seek(start_position)
| 1,939 | 576 |
import math
import six.moves as sm
from gem import vector
from gem import matrix
def quat_identity():
''' Returns the quaternion identity. '''
return [1.0, 0.0, 0.0, 0.0]
def quat_add(quat, quat1):
''' Add two quaternions. '''
return [quat[0] + quat1[0], quat[1] + quat1[1], quat[2] + quat1[2], quat[3] + quat1[3]]
def quat_sub(quat, quat1):
''' Subtract two quaternions. '''
return [quat[0] - quat1[0], quat[1] - quat1[1], quat[2] - quat1[2], quat[3] - quat1[3]]
def quat_mul_quat(quat, quat1):
''' Multiply a quaternion with a quaternion. '''
w = quat[0] * quat1[0] - quat[1] * quat1[1] - quat[2] * quat1[2] - quat[3] * quat1[3]
x = quat[0] * quat1[1] + quat[1] * quat1[0] + quat[2] * quat1[3] - quat[3] * quat1[2]
y = quat[0] * quat1[2] + quat[2] * quat1[0] + quat[3] * quat1[1] - quat[1] * quat1[3]
z = quat[0] * quat1[3] + quat[3] * quat1[0] + quat[1] * quat1[2] - quat[2] * quat1[1]
return [w, x, y, z]
def quat_mul_vect(quat, vect):
''' Multiply a quaternion with a vector. '''
w = -quat[1] * vect[0] - quat[2] * vect[1] - quat[3] * vect[2]
x = quat[0] * vect[0] + quat[2] * vect[2] - quat[3] * vect[1]
y = quat[0] * vect[1] + quat[3] * vect[0] - quat[1] * vect[2]
z = quat[0] * vect[2] + quat[1] * vect[1] - quat[2] * vect[0]
return [w, x, y, z]
def quat_mul_float(quat, scalar):
''' Multiply a quaternion with a scalar (float). '''
return [quat[0] * scalar, quat[1] * scalar, quat[2] * scalar, quat[3] * scalar]
def quat_div_float(quat, scalar):
''' Divide a quaternion with a scalar (float). '''
return [quat[0] / scalar, quat[1] / scalar, quat[2] / scalar, quat[3] / scalar]
def quat_neg(quat):
''' Negate the elements of a quaternion. '''
return [-quat[0], -quat[1], -quat[2], -quat[3]]
def quat_dot(quat1, quat2):
''' Dot product between two quaternions. Returns a scalar. '''
rdp= 0
for i in sm.range(4):
rdp += quat1[i] * quat2[i]
return rdp
def quat_magnitude(quat):
''' Compute magnitude of a quaternion. Returns a scalar. '''
rmg = 0
for i in sm.range(4):
rmg += quat[i] * quat[i]
return math.sqrt(rmg)
def quat_normalize(quat):
''' Returns a normalized quaternion. '''
length = quat_magnitude(quat)
oquat = quat_identity()
if length is not 0:
for i in sm.range(4):
oquat[i] = quat[i] / length
return oquat
def quat_conjugate(quat):
''' Returns the conjugate of a quaternion. '''
idquat = quat_identity()
for i in sm.range(4):
idquat[i] = -quat[i]
idquat[0] = -idquat[0]
return idquat
def quat_inverse(quat):
''' Returns the inverse of a quaternion. '''
lengthSquared = quat[0] * quat[0] + quat[1] * quat[1] + quat[2] * quat[2] + quat[3] * quat[3]
return [quat[0] / lengthSquared,
quat[1] / lengthSquared,
quat[2] / lengthSquared,
quat[3] / lengthSquared]
def quat_from_axis_angle(axis, theta):
''' Returns a quaternion from a given axis and a angle. '''
thetaOver2 = theta * 0.5
sto2 = math.sin(math.radians(thetaOver2))
cto2 = math.cos(math.radians(thetaOver2))
quat1List = []
if isinstance(axis, vector.Vector):
axis.i_normalize()
quat1List = [cto2, axis.vector[0] * sto2, axis.vector[1] * sto2, axis.vector[2] * sto2]
elif isinstance(axis, list):
naxis = axis.normalize()
quat1List = (cto2, naxis[0] * sto2, naxis[1] * sto2, naxis[2] * sto2)
else:
return NotImplemented
return Quaternion(data=quat1List)
def quat_rotate(origin, axis, theta):
''' Returns a vector that is rotated around an axis. '''
thetaOver2 = theta * 0.5
sinThetaOver2 = math.sin(math.radians(thetaOver2))
cosThetaOver2 = math.cos(math.radians(thetaOver2))
quat = Quaternion(data = [cosThetaOver2, axis[0] * sinThetaOver2, axis[1] * sinThetaOver2, axis[2] * sinThetaOver2])
rotation = (quat * origin) * quat.conjugate()
return vector.Vector(3, data=[rotation.data[1], rotation.data[2], rotation.data[3]])
def quat_rotate_x_from_angle(theta):
''' Creates a quaternion that rotates around X axis given an angle. '''
thetaOver2 = theta * 0.5
cto2 = math.cos(thetaOver2)
sto2 = math.sin(thetaOver2)
return [cto2, sto2, 0.0, 0.0]
def quat_rotate_y_from_angle(theta):
''' Creates a quaternion that rotates around Y axis given an angle. '''
thetaOver2 = theta * 0.5
cto2 = math.cos(thetaOver2)
sto2 = math.sin(thetaOver2)
return [cto2, 0.0, sto2, 0.0]
def quat_rotate_z_from_angle(theta):
''' Creates a quaternion that rotates around Z axis given an angle. '''
thetaOver2 = theta * 0.5
cto2 = math.cos(thetaOver2)
sto2 = math.sin(thetaOver2)
return [cto2, 0.0, 0.0, sto2]
def quat_rotate_from_axis_angle(axis, theta):
''' Creates a quaternion that rotates around an arbitary axis given an angle. '''
thetaOver2 = theta * 0.5
sto2 = math.sin(math.radians(thetaOver2))
cto2 = math.cos(math.radians(thetaOver2))
quat1List = []
if isinstance(axis, vector.Vector):
axis.i_normalize()
quat1List = [cto2, axis.vector[0] * sto2, axis.vector[1] * sto2, axis.vector[2] * sto2]
elif isinstance(axis, list):
naxis = axis.normalize()
quat1List = (cto2, naxis[0] * sto2, naxis[1] * sto2, naxis[2] * sto2)
else:
return NotImplemented
quat1 = Quaternion(data=quat1List)
rotation = (quat1 * axis) * quat1.conjugate()
return rotation
def quat_rotate_vector(quat, vec):
''' Rotates a vector by a quaternion, returns a vector. '''
outQuat = (quat * vec) * quat.conjugate()
return vector.Vector(3, data=[outQuat.data[1], outQuat.data[2], outQuat.data[3]])
def quat_pow(quat, exp):
''' Returns a quaternion to the power of N. '''
quatExp = Quaternion()
if quat.data[0] is not 0.0:
angle = math.acos(quat.data[0])
newAngle = angle * exp
quatExp.data[0] = math.cos(newAngle)
divAngle = math.sin(newAngle) / math.sin(angle)
quatExp.data[1] *= divAngle
quatExp.data[2] *= divAngle
quatExp.data[3] *= divAngle
return quatExp
def quat_log(quat):
''' Returns the logatithm of a quaternion. '''
alpha = math.acos(quat.data[0])
sinAlpha = math.sin(alpha)
outList = [1.0, 0.0, 0.0, 0.0]
if sinAlpha > 0.0:
outList[1] = quat.data[1] * alpha / sinAlpha
outList[2] = quat.data[2] * alpha / sinAlpha
outList[3] = quat.data[3] * alpha / sinAlpha
else:
outList = quat.data
return outList
def quat_lerp(quat0, quat1, t):
''' Linear interpolation between two quaternions. '''
k0 = 1.0 - t
k1 = t
output = Quaternion()
output = (quat0 * k0) + (quat1 * k1)
return output
def quat_slerp(quat0, quat1, t):
''' Spherical interpolation between two quaternions. '''
k0 = 0.0
k1 = 0.0
output = Quaternion()
quat1Neg = Quaternion()
cosTheta = quat0.dot(quat1)
if cosTheta < 0.0:
quat1Neg = quat1.negate()
cosTheta = -cosTheta
else:
quat1Neg = quat1
if cosTheta > 0.999:
k0 = 1.0 - t
k1 = t
else:
theta = math.acos(cosTheta)
oneOverSinTheta = 1.0 / math.sin(theta)
k0 = math.sin((1.0 - t) * theta) * oneOverSinTheta
k1 = math.sin(t * theta) * oneOverSinTheta
output = (quat0 * k0) + (quat1Neg * k1)
return output
def quat_slerp_no_invert(quat0, quat1, t):
''' Spherical interpolation between two quaternions, it does not check for theta > 90. Used by SQUAD. '''
dotP = quat0.dot(quat1)
output = Quaternion()
if (dotP > -0.95) and (dotP < 0.95):
angle = math.acos(dotP)
k0 = math.sin(angle * (1.0 - t)) / math.sin(angle)
k1 = math.sin(t * angle) / math.sin(angle)
output = (quat0 * k0) + (quat1 * k1)
else:
output = quat_lerp(quat0, quat1, t)
return output
def quat_squad(quat0, quat1, quat2, t):
''' Quaternion splines. '''
return quat_slerp_no_invert(quat_slerp_no_invert(quat0, quat2, t), quat_slerp_no_invert(quat0, quat1, t), 2 * t(1 - t))
def quat_to_matrix(quat):
''' Converts a quaternion to a rotational 4x4 matrix. '''
x2 = quat.data[1] * quat.data[1]
y2 = quat.data[2] * quat.data[2]
z2 = quat.data[3] * quat.data[3]
xy = quat.data[1] * quat.data[2]
xz = quat.data[1] * quat.data[3]
yz = quat.data[2] * quat.data[3]
wx = quat.data[0] * quat.data[1]
wy = quat.data[0] * quat.data[2]
wz = quat.data[0] * quat.data[3]
outputMatrix = matrix.Matrix(4)
outputMatrix.matrix[0][0] = 1.0 - 2.0 * y2 - 2.0 * z2
outputMatrix.matrix[0][1] = 2.0 * xy + 2.0 * wz
outputMatrix.matrix[0][2] = 2.0 * xz - 2.0 * wy
outputMatrix.matrix[0][3] = 0.0
outputMatrix.matrix[1][0] = 2.0 * xy - 2.0 * wz
outputMatrix.matrix[1][1] = 1.0 - 2.0 * x2 - 2.0 * z2
outputMatrix.matrix[1][2] = 2.0 * yz + 2.0 * wx
outputMatrix.matrix[1][3] = 0.0
outputMatrix.matrix[2][0] = 2.0 * xz + 2.0 * wy
outputMatrix.matrix[2][1] = 2.0 * yz - 2.0 * wx
outputMatrix.matrix[2][2] = 1.0 - 2.0 * x2 - 2.0 * y2
outputMatrix.matrix[2][3] = 0.0
return outputMatrix
class Quaternion(object):
def __init__(self, data=None):
if data is None:
self.data = quat_identity()
else:
self.data = data
def __add__(self, other):
if isinstance(other, Quaternion):
return Quaternion(quat_add(self.data, other.data))
else:
return NotImplemented
def __iadd__(self, other):
if isinstance(other, Quaternion):
self.data = quat_add(self.data, other.data)
return self
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, Quaternion):
return Quaternion(quat_sub(self.data, other.data))
else:
return NotImplemented
def __isub__(self, other):
if isinstance(other, Quaternion):
self.data = quat_sub(self.data, other.data)
return self
else:
return NotImplemented
def __mul__(self, other):
if isinstance(other, Quaternion):
return Quaternion(quat_mul_quat(self.data, other.data))
elif isinstance(other, vector.Vector):
return Quaternion(quat_mul_vect(self.data, other.vector))
elif isinstance(other, float):
return Quaternion(quat_mul_float(self.data, other))
else:
return NotImplemented
def __imul__(self, other):
if isinstance(other, Quaternion):
self.data = quat_mul_quat(self.data, other.data)
return self
elif isinstance(other, vector.Vector):
self.data = quat_mul_vect(self.data, other.data)
return self
elif isinstance(other, float):
self.data = quat_mul_float(self.data, other)
return self
else:
return NotImplemented
def __div__(self, other):
if isinstance(other, float):
return Quaternion(quat_div_float(self.data, other))
else:
return NotImplemented
def __idiv__(self, other):
if isinstance(other, float):
self.data = quat_div_float(self.data, other)
return self
else:
return NotImplemented
def i_negate(self):
self.data = quat_neg(self.data)
return self
def negate(self):
quatList = quat_neg(self.data)
return Quaternion(quatList)
def i_identity(self):
self.data = quat_identity()
return self
def identity(self):
quatList = quat_identity()
return Quaternion(quatList)
def magnitude(self):
return quat_magnitude(self.data)
def dot(self, quat2):
if isinstance(quat2, Quaternion):
return quat_dot(self.data, quat2.data)
else:
return NotImplemented
def i_normalize(self):
self.data = quat_normalize(self.data)
return self
def normalize(self):
quatList = quat_normalize(self.data)
return Quaternion(quatList)
def i_conjugate(self):
self.data = quat_conjugate(self.data)
return self
def conjugate(self):
quatList = quat_conjugate(self.data)
return Quaternion(quatList)
def inverse(self):
quatList = quat_inverse(self.data)
return Quaternion(quatList)
def pow(self, e):
exponent = e
return quat_pow(self, exponent)
def log(self):
return quat_log(self)
def lerp(self, quat1, time):
return quat_lerp(self, quat1, time)
def slerp(self, quat1, time):
return quat_slerp(self, quat1, time)
def slerp_no_invert(self, quat1, time):
return quat_slerp_no_invert(self, quat1, time)
def squad(self, quat1, quat2, time):
return quat_squad(self, quat1, quat2, time)
def toMatrix(self):
return quat_to_matrix(self)
# The following are used for orientation and motion
def getForward(self):
''' Returns the forward vector. '''
return quat_rotate_vector(self, vector.Vector(3, data=[0.0, 0.0, 1.0]))
def getBack(self):
''' Returns the backwards vector. '''
return quat_rotate_vector(self, vector.Vector(3, data=[0.0, 0.0, -1.0]))
def getLeft(self):
''' Returns the left vector. '''
return quat_rotate_vector(self, vector.Vector(3, data=[-1.0, 0.0, 0.0]))
def getRight(self):
''' Returns the right vector. '''
return quat_rotate_vector(self, vector.Vector(3, data=[1.0, 0.0, 0.0]))
def getUp(self):
''' Returns the up vector. '''
return quat_rotate_vector(self, vector.Vector(3, data=[0.0, 1.0, 0.0]))
def getDown(self):
''' Returns the down vector. '''
return quat_rotate_vector(self, vector.Vector(3, data=[0.0, -1.0, 0.0]))
def quat_from_matrix(matrix):
''' Converts a 4x4 rotational matrix to quaternion. '''
fourXSquaredMinus1 = matrix.matrix[0][0] - matrix.matrix[1][1] - matrix.matrix[2][2]
fourYSquaredMinus1 = matrix.matrix[1][1] - matrix.matrix[0][0] - matrix.matrix[2][2]
fourZSquaredMinus1 = matrix.matrix[2][2] - matrix.matrix[0][0] - matrix.matrix[1][1]
fourWSquaredMinus1 = matrix.matrix[0][0] + matrix.matrix[1][1] + matrix.matrix[2][2]
biggestIndex = 0
fourBiggestSquaredMinus1 = fourWSquaredMinus1
if (fourXSquaredMinus1 > fourBiggestSquaredMinus1):
biggestIndex = 1
elif(fourYSquaredMinus1 > fourBiggestSquaredMinus1):
biggestIndex = 2
elif(fourZSquaredMinus1 > fourBiggestSquaredMinus1):
biggestIndex = 3
biggestVal = math.sqrt(fourBiggestSquaredMinus1 + 1) * 0.5
mult = 0.25 / biggestVal
rquat = Quaternion()
if biggestIndex is 0:
rquat.data[0] = biggestVal
rquat.data[1] = (matrix.matrix[1][2] - matrix.matrix[2][1]) * mult
rquat.data[2] = (matrix.matrix[2][0] - matrix.matrix[0][2]) * mult
rquat.data[3] = (matrix.matrix[0][1] - matrix.matrix[1][0]) * mult
return rquat
if biggestIndex is 1:
rquat.data[0] = (matrix.matrix[1][2] - matrix.matrix[2][1]) * mult
rquat.data[1] = biggestVal
rquat.data[2] = (matrix.matrix[0][1] + matrix.matrix[1][0]) * mult
rquat.data[3] = (matrix.matrix[2][0] + matrix.matrix[0][2]) * mult
return rquat
if biggestIndex is 2:
rquat.data[0] = (matrix.matrix[2][0] - matrix.matrix[0][2]) * mult
rquat.data[1] = (matrix.matrix[0][1] + matrix.matrix[1][0]) * mult
rquat.data[2] = biggestVal
rquat.data[3] = (matrix.matrix[1][2] + matrix.matrix[2][1]) * mult
return rquat
if biggestIndex is 3:
rquat.data[0] = (matrix.matrix[0][1] - matrix.matrix[1][0]) * mult
rquat.data[1] = (matrix.matrix[2][0] + matrix.matrix[0][2]) * mult
rquat.data[2] = (matrix.matrix[1][2] + matrix.matrix[2][1]) * mult
rquat.data[3] = biggestVal
return rquat
| 16,170 | 6,353 |