text string | size int64 | token_count int64 |
|---|---|---|
"""
출처: https://yabmoons.tistory.com/450
"""
import sys
N = int(sys.stdin.readline())
M = [
list(map(int, sys.stdin.readline().rstrip())) for _ in range(N)
]
def DFS(x, y, size):
color = M[x][y]
for i in range(x, x + size):
for j in range(y, y + size):
if color != M[i][j]:
print('(', end='')
DFS(x, y, size // 2)
DFS(x, y + size // 2, size // 2)
DFS(x + size // 2, y, size // 2)
DFS(x + size // 2, y + size // 2, size // 2)
print(')', end='')
return
if color == 0:
print(0, end='')
return
else:
print(1, end='')
return
DFS(0, 0, N)
| 722 | 283 |
from zemfrog.globals import celery, mail
@celery.task
def send_email(*args, **kwds):
mail.send_message(*args, **kwds)
| 124 | 48 |
"""
Python3 compatible agave binding. WARNING -- only the token and clients service are supported.
NOTE -- This is a temporary module!
once python3 support is properly added to agavepy this module will be removed.
"""
from urllib.parse import urlparse, urljoin
import requests
import time
from agaveflask.logs import get_logger
logger = get_logger(__name__)
class AgaveError(Exception):
pass
class AgaveClientFailedCanRetry(AgaveError):
"""
Thrown when client creation fails but the caller is free to retry the creation, as the library was able to
clean up after the failure.
"""
pass
class AgaveClientFailedDoNotRetry(AgaveError):
"""
Thrown when client creation fails and caller should NOT retry the creation because the library was not able to
clean up after the failure.
"""
pass
class Token(object):
def __init__(self,
username, password,
api_server, api_key, api_secret, verify,
parent, _token=None, _refresh_token=None, token_username=None):
self.username = username
self.password = password
self.api_server = api_server
self.api_key = api_key
self.api_secret = api_secret
self.token_username = token_username
# Agave object that created this token
self.parent = parent
self.verify = verify
if _token and _refresh_token:
self.token_info = {'access_token': _token,
'refresh_token': _refresh_token}
self.parent._token = _token
self.token_url = urljoin(self.api_server, 'token')
def _token(self, data):
logger.debug("top of _token")
auth = requests.auth.HTTPBasicAuth(self.api_key, self.api_secret)
logger.debug("about to make POST request for token; URL: {}; "
"data: {}; auth: {}:{}".format(self.token_url, data, self.api_key, self.api_secret))
resp = requests.post(self.token_url, data=data, auth=auth,
verify=self.verify)
logger.debug("made request for token; rsp: {}".format(resp))
resp.raise_for_status()
self.token_info = resp.json()
try:
expires_in = int(self.token_info.get('expires_in'))
except ValueError:
expires_in = 3600
created_at = int(time.time())
self.token_info['created_at'] = created_at
self.token_info['expiration'] = created_at + expires_in
self.token_info['expires_at'] = time.ctime(created_at + expires_in)
token = self.token_info['access_token']
# Notify parent that a token was created
self.parent._token = token
return token
def create(self):
logger.debug("top of token.create for username: {}; password: ****".format(self.username))
data = {'grant_type': 'password',
'username': self.username,
'password': self.password,
'scope': 'PRODUCTION'}
if self.token_username:
data['grant_type'] = 'admin_password'
data['token_username'] = self.token_username
return self._token(data)
def refresh(self):
data = {'grant_type': 'refresh_token',
'scope': 'PRODUCTION',
'refresh_token': self.token_info['refresh_token']}
return self._token(data)
class Agave(object):
PARAMS = [
# param name, mandatory?, attr_name, default
('username', False, 'username', None),
('password', False, 'password', None),
('token_username', False, 'token_username', None),
('jwt', False, 'jwt', None),
('jwt_header_name', False, 'header_name', None),
('api_server', True, 'api_server', None),
('client_name', False, 'client_name', None),
('api_key', False, 'api_key', None),
('api_secret', False, 'api_secret', None),
('token', False, '_token', None),
('refresh_token', False, '_refresh_token', None),
('verify', False, 'verify', True),
]
def __init__(self, **kwargs):
for param, mandatory, attr, default in self.PARAMS:
try:
value = (kwargs[param] if mandatory
else kwargs.get(param, default))
except KeyError:
raise AgaveError(
'parameter "{}" is mandatory'.format(param))
setattr(self, attr, value)
# If we are passed a JWT directly, we can bypass all OAuth-related tasks
if self.jwt:
if not self.header_name:
raise AgaveError("The jwt header name is required to use the jwt authenticator.")
self.token = None
if self.api_key is not None and self.api_secret is not None and self.jwt is None:
self.set_client(self.api_key, self.api_secret)
# set the clients object to the AgaveClientsService
self.clients = AgaveClientsService(self)
def set_client(self, key, secret):
"""
:type key: str
:type secret: str
:rtype: None
"""
logger.debug("top of set_client")
self.api_key = key
self.api_secret = secret
self.token = Token(
self.username, self.password,
self.api_server, self.api_key, self.api_secret,
self.verify,
self, self._token, self._refresh_token, self.token_username)
if self._token:
pass
else:
logger.debug("calling token.create()")
self.token.create()
class AgaveClientsService(object):
"""Class for interacting with the Agave OAuth2 clients service."""
def __init__(self, parent):
# maintain pointer to parent Agave client
self.parent = parent
def create(self, body):
"""Create a new Agave OAuth client. `body` should be a dictionary with `clientName` parameter."""
if not body or not hasattr(body, 'get'):
raise AgaveError('body dictionary required.')
auth = requests.auth.HTTPBasicAuth(self.parent.username, self.parent.password)
client_name = body.get('clientName')
try:
rsp = requests.post(url='{}/clients/v2'.format(self.parent.api_server),
auth=auth,
data={'clientName': client_name},
verify=self.parent.verify)
result = rsp.json().get('result')
logger.debug("response from POST to create client: {}; content: {}; client_name:".format(rsp,
rsp.content,
client_name))
logger.debug("result from POST to create client: {}; client_name: {}".format(result, client_name))
# there is a known issue with APIM where client creation fails due to failing to generate the client
# credentials. in this case, the result object returned by the clients API is {} and the error message
# returned is: "Unable to generate credentials for <client_name>"
# we want to detect that situation and try to delete the client in this case (the fact that
if not result:
logger.debug(f"clientg got an empty result back from clients API. client_nane: {client_name}")
need_to_delete = False
if 'Unable to generate credentials' in rsp.json().get('message'):
logger.debug(f"clientg got Unable to generate credentials from clients. "
f"will try to delete client. client_name: {client_name}")
need_to_delete = True
delete_attempts = 0
# try 5 times to delete:
while need_to_delete and delete_attempts < 5:
delete_attempts = delete_attempts + 1
logger.debug(f"attempting attempt #{delete_attempts} to delete client {client_name}.")
try:
self.delete(clientName=body.get('clientName'))
logger.debug(f"client {client_name} deleted successfully.")
need_to_delete = False
except Exception as e:
logger.debug(f"got an Exception trying to delete client. e: {e}")
if need_to_delete:
logger.debug(f"tried 5 times to delete client {client_name}. giving up...")
# regardless of whether we were able to delete the client, we need to raise an exception because
# we did not successfully generate the client:
err = AgaveClientFailedCanRetry()
# however, we indicate if delete was successful via the Exception type -- if we were able to delete,
# the caller can try to create the client again:
if need_to_delete:
err = AgaveClientFailedDoNotRetry()
raise err
self.parent.set_client(result['consumerKey'], result['consumerSecret'])
logger.debug("set_client in parent, returning result.")
return result
except Exception as e:
raise AgaveError('Error creating client: {}'.format(e))
def test(self, arg):
print(self)
print(self.parent)
print('Here is a URL: {}/clients/v2'.format(self.parent.api_server))
print(arg)
def delete(self, clientName):
"""Delete an Agave OAuth2 client."""
auth = requests.auth.HTTPBasicAuth(self.parent.username, self.parent.password)
try:
rsp = requests.delete(url='{}/clients/v2/{}'.format(self.parent.api_server, clientName),
auth=auth)
rsp.raise_for_status()
return {}
except Exception as e:
raise AgaveError('Error creating client: {}'.format(e))
def list(self):
"""List all Agave OAuth2 clients."""
auth = requests.auth.HTTPBasicAuth(self.parent.username, self.parent.password)
try:
rsp = requests.get(url='{}/clients/v2'.format(self.parent.api_server), auth=auth)
rsp.raise_for_status()
return rsp
except Exception as e:
raise AgaveError('Error listing clients: {}'.format(e))
| 10,568 | 2,782 |
from glob import glob
import os
import shutil
import simplejson
import xmltodict
pwd = os.getcwd()
output_dir = "%s/collection" %(pwd)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
cookies = glob("%s/matched/img/*.png" %(pwd))
def read_xml(xml_file):
'''read_xml reads an xml file and returns a dict
'''
with open(xml_file) as filey:
doc = xmltodict.parse(filey.read())
return doc
def write_json(json_obj,filename,mode="w",print_pretty=True):
'''write_json will (optionally,pretty print) a json object to file
:param json_obj: the dict to print to json
:param filename: the output file to write to
:param pretty_print: if True, will use nicer formatting
'''
with open(filename,mode) as filey:
if print_pretty == True:
filey.writelines(simplejson.dumps(json_obj, indent=4, separators=(',', ': ')))
else:
filey.writelines(simplejson.dumps(json_obj))
return filename
def make_kv(key,value):
'''make_kv will return a dict
with a key and value pair
'''
return {'key':key,'value':value}
def extract_features(marks):
features = []
if isinstance(marks,list):
for mark in marks:
mark_features = []
mark_features.append(make_kv('name',mark['@name']))
mark_features.append(make_kv('type',mark['@variableType']))
mark_features.append(make_kv('value',mark['#text']))
if "@units" in mark:
mark_features.append(make_kv('units',mark['@units']))
features.append(mark_features)
else:
mark_features = []
mark_features.append(make_kv('name',marks['@name']))
mark_features.append(make_kv('type',marks['@variableType']))
mark_features.append(make_kv('value',marks['#text']))
if "@units" in marks:
mark_features.append(make_kv('units',marks['@units']))
features.append(mark_features)
return features
def parse_xml(xml_file,root=None):
'''parse_xml will iterate over an xml file
and return as a dictionary
'''
if root == None:
root = "CandyTumor"
doc = read_xml(xml_file)
metadata = []
metadata.append(make_kv('id',doc[root]['@uniqueIdentifier']))
metadata.append(make_kv('rx',doc[root]['ClassAnnotation']['@class']))
marks = doc[root]['ClassAnnotation']['TumorAnnotation']
if isinstance(marks,dict):
features = extract_features(marks['TumorFeature'])
else:
features = []
for mark in marks:
new_features = extract_features(mark['TumorFeature'])
features = features + new_features
metadata.append(make_kv('features',features))
return metadata
# Main function is here, parsing the cookies!
for cookie in cookies:
# First find cookie images based on image files
cookie_image = os.path.basename(cookie)
cookie_id = os.path.splitext(cookie_image)[0]
cookie_dir = "%s/%s" %(output_dir,cookie_id)
cookie_images = "%s/images" %(cookie_dir)
if not os.path.exists(cookie_dir):
os.mkdir(cookie_dir)
os.mkdir("%s/images" %(cookie_dir))
os.mkdir("%s/images/image1" %(cookie_dir))
shutil.copyfile(cookie,"%s/image1/image1.png" %(cookie_images))
# Is there a matching overlay (mask?)
cookie_overlay = "%s/matched/mask/%s" %(pwd,cookie_image)
if os.path.exists(cookie_overlay):
shutil.copyfile(cookie_overlay,"%s/image1/overlay1.png" %(cookie_images))
# Is there metadata?
cookie_xml = "%s/matched/%s.xml" %(pwd,cookie_id)
if os.path.exists(cookie_xml):
cookie_metadata = parse_xml(cookie_xml)
write_json(cookie_metadata,"%s/image1/overlay1.json" %(cookie_images))
| 3,760 | 1,213 |
import numpy as np
import itertools
def generate_binary_truth_table(num_variables):
'''
Returns a binary truth table
Inputs :
num_variables : Number of variables to consider in constructing the table.
Can be a single number or a list of cardinalities.
If input is a single number, a cardinality of 2 is assumed.
Outputs :
Truth table
Usage :
generate_binary_truth_table(num_variables = 2)
Out: array([[0, 0],
[1, 0],
[0, 1],
[1, 1]])
'''
if type(num_variables) == type([]):
if len(num_variables) == 1:
num_variables = [2] * num_variables[0]
else:
num_variables = [2] * int(num_variables)
permute_list = []
for card in num_variables:
permute_list.append([x for x in range(card)])
truth_table = []
for r in itertools.product(*permute_list):
truth_table.append(list(r))
return np.array(truth_table)
def dict_to_string(in_dict):
string = ""
for k, v in in_dict.items():
string = string + str(k) + " = " + str(v) + " | "
return string.strip(' | ') | 1,094 | 438 |
import numpy as np
# import matplotlib.pyplot as plt
import astropy.table as at
import astropy.units as u
import astropy.coordinates as coord
import astropy.constants as const
import astropy.time as atime
import scipy.ndimage as ndimage
import numpy.polynomial as polynomial
# import jabble.model as wobble_model
import jax.numpy as jnp
def find_nearest(array,value):
array = np.asarray(array)
idx = (np.abs(array-value)).argmin()
return idx
def velocityfromshift(shifts):
expon = np.exp(2*shifts)
vel = const.c * (expon-1)/(1 + expon)
return vel
def get_loss_array(shift_grid,model,xs,ys,yerr,loss,*args):
# so so so shit
if len(xs.shape) == 1:
xs = np.expand_dims(xs,axis=0)
if len(shift_grid.shape) == 1:
loss_arr = np.empty((xs.shape[0],shift_grid.shape[0]))
for i in range(xs.shape[0]):
for j,shift in enumerate(shift_grid):
loss_arr[i,j] = loss(model.p,xs[i,:]+shift,ys[i,:],yerr[i,:],i,model,*args)
if len(shift_grid.shape) == 2:
loss_arr = np.empty((xs.shape[0],shift_grid.shape[1]))
for i in range(xs.shape[0]):
for j,shift in enumerate(shift_grid[i,:]):
loss_arr[i,j] = loss(model.p,xs[i,:]+shift,ys[i,:],yerr[i,:],i,model,*args)
return loss_arr
def get_parabolic_min(loss_array,grid,return_all=False):
epoches = loss_array.shape[0]
grid_min = np.empty(epoches)
xss = np.empty((epoches,3))
yss = np.empty((epoches,3))
polys = []
for n in range(epoches):
idx = loss_array[n,:].argmin()
print("epch {}: min {}".format(n,idx))
if idx == 0:
print("minimum likely out of range")
idx = 1
if idx == grid.shape[1]-1:
print("minimum likely out of range")
idx -= 1
# else:
xs = grid[n,idx-1:idx+2]
xss[n,:] = xs
ys = loss_array[n,idx-1:idx+2]
yss[n,:] = ys
poly = np.polyfit(xs,ys,deg=2)
polys.append(poly)
deriv = np.polyder(poly)
x_min = np.roots(deriv)
x_min = x_min[x_min.imag==0].real
y_min = np.polyval(poly,x_min)
grid_min[n] = x_min
if (return_all):
return grid_min, xss, yss, polys
else:
return grid_min
def zplusone(vel):
return np.sqrt((1 + vel/(const.c))/(1 - vel/(const.c)))
def shifts(vel):
return np.log(zplusone(vel))
def get_star_velocity(BJD,star_name,observatory_name,parse=False):
hatp20_c = coord.SkyCoord.from_name(star_name,parse=parse)
loc = coord.EarthLocation.of_site(observatory_name)
ts = atime.Time(BJD, format='jd', scale='tdb')
bc = hatp20_c.radial_velocity_correction(obstime=ts, location=loc).to(u.km/u.s)
return bc
def interpolate_mask(flux,mask):
new_flux = np.zeros(flux.shape)
new_flux = flux
for j,mask_row in enumerate(mask):
cnt = 0
for i, mask_ele in enumerate(mask_row):
if mask_ele != 0:
cnt += 1
if mask_ele == 0 and cnt != 0:
new_flux[j,i-cnt:i] = np.linspace(flux[j,i-cnt-1],flux[j,i],cnt+2)[1:-1]
cnt = 0
return new_flux
def gauss_filter(flux,sigma):
filtered_flux = ndimage.gaussian_filter1d(flux,sigma)
return filtered_flux
def normalize_flux(flux,sigma):
return flux/gauss_filter(flux,sigma)
def convert_xy(lamb,flux,ferr):
y = np.log(flux)
x = np.log(lamb)
yerr = ferr/flux
return x, y, yerr
def set_masked(y,yerr,mask,y_const=0.0,err_const=10.0):
y[mask] = y_const
yerr[mask] = err_const
return y, yerr
class WobbleDataset:
def __init__(self,wave,flux,flux_error,mask,sigma=80):
self.mask = mask
self.flux = flux
self.wave = wave
flux = interpolate_mask(flux,mask)
flux_norm = normalize_flux(flux,sigma)
self.xs, self.ys, self.yerr = np.log(wave/u.Angstrom), np.log(flux_norm), flux_error/flux
self.ys, self.yerr = set_masked(self.ys,self.yerr,mask)
self.epoches = self.ys.shape[0]
| 4,103 | 1,612 |
# https://github.com/matplotlib/matplotlib/issues/18168#issuecomment-670211108
from matplotlib.testing.conftest import mpl_test_settings
| 137 | 51 |
# Copyright The IETF Trust 2020, All Rights Reserved
# -*- coding: utf-8 -*-
from django.db import migrations
def forward(apps, schema_editor):
DocTagName = apps.get_model('name','DocTagName')
DocTagName.objects.get_or_create(slug='verified-errata', name='Has verified errata', desc='', used=True, order=0)
def reverse(apps, schema_editor):
DocTagName = apps.get_model('name','DocTagName')
DocTagName.objects.filter(slug='verified-errata').delete()
class Migration(migrations.Migration):
dependencies = [
('name', '0008_reviewerqueuepolicyname'),
]
operations = [
migrations.RunPython(forward, reverse)
]
| 660 | 216 |
def get_first_digit(number):
return int(str(number)[0])
def get_last_digit(number):
return number % 10
def remove_first_last_digit(number):
s_number = str(number)
s_number = s_number[1:-1]
return s_number
assert get_first_digit(12021) == 1
assert get_first_digit(92021) == 9
assert get_first_digit(72003443) == 7
assert get_last_digit(12021) == 1
assert get_last_digit(92021) == 1
assert get_last_digit(72003443) == 3
| 429 | 196 |
import morle.algorithms.fst as FST
from morle.datastruct.lexicon import LexiconEntry
from morle.datastruct.rules import Rule
from morle.utils.files import file_exists, read_tsv_file
import morle.shared as shared
import hfst
import logging
import math
from operator import itemgetter
from typing import List, Tuple
# TODO use RuleSet instead!
def load_rules() -> List[Tuple[Rule, float]]:
rules_filename = None
if shared.config['compile'].getboolean('weighted'):
if shared.config['Models'].get('edge_model') == 'simple':
rules_filename = shared.filenames['edge-model']
max_cost = None \
if shared.config['compile'].get('max_cost') == 'none' \
else shared.config['compile'].getfloat('max_cost')
rules = [(Rule.from_string(rule), -math.log(prod))\
for rule, prod in\
read_tsv_file(rules_filename, (str, float))\
if max_cost is None or -math.log(prod) < max_cost ] +\
[(Rule.from_string(':/:___:'), 0.0)]
return rules
else:
raise Exception('Compiling a weighted analyzer is only possible'
' for the Bernoulli edge model.')
else:
rules_filename = shared.filenames['rules-modsel']
if not file_exists(rules_filename):
rules_filename = shared.filenames['rules']
return [(Rule.from_string(rule), 0.0)\
for (rule,) in read_tsv_file(rules_filename, (str,))] +\
[(Rule.from_string(':/:___:'), 0.0)]
# TODO use Lexicon instead!
def load_roots() -> List[LexiconEntry]:
def root_reader():
col = 0
for row in read_tsv_file(shared.filenames['wordlist']):
if col < len(row) and row[col]:
yield row[col]
roots = []
for root_str in root_reader():
try:
roots.append(LexiconEntry(root_str))
except Exception as ex:
logging.getLogger('main').warning(str(ex))
return roots
def build_rule_transducer(rules :List[Tuple[Rule, float]]) \
-> hfst.HfstTransducer:
transducers = []
for rule, weight in rules:
rule_tr = rule.to_fst(weight=weight)
transducers.append(rule_tr)
result = FST.binary_disjunct(transducers, print_progress=True)
return result
def build_root_transducer(roots :List[LexiconEntry]) -> hfst.HfstTransducer:
transducers = []
for root in roots:
seq = root.word + root.tag
transducers.append(FST.seq_to_transducer(zip(seq, seq)))
result = FST.binary_disjunct(transducers, print_progress=True)
return result
# def build_rootgen_transducer(roots :List[LexiconEntry]) -> hfst.HfstTransducer:
# alergia = AlergiaStringFeature()
# alergia.fit(roots)
# return alergia.automaton
def run() -> None:
rules = load_rules()
roots = load_roots()
logging.getLogger('main').info('Building the rule transducer...')
rules_tr = build_rule_transducer(rules)
FST.save_transducer(rules_tr, shared.filenames['rules-tr'])
if shared.config['General'].getboolean('supervised'):
logging.getLogger('main').info('Building the root transducer...')
roots_tr = build_root_transducer(roots)
FST.save_transducer(roots_tr, shared.filenames['roots-tr'])
# logging.getLogger('main').info('Building the root generator transducer...')
# rootgen_tr = algorithms.fst.load_transducer(shared.filenames['root-model'])
# algorithms.fst.save_transducer(rootgen_tr, shared.filenames['rootgen-tr'])
| 3,635 | 1,148 |
import requests
import os
import json
db_url = os.getenv('REPLIT_DB_URL')
def set(key, val, type='string'):
try:
if (type == 'string'):
return requests.post(db_url, data = { key: val })
if (type == 'json'):
return requests.post(db_url, data = { key: json.dumps(val)})
except:
return({'error': { 'message': 'There was an issue with writing to the database'}})
def get(key):
resp = None
try:
resp = requests.get("{0}/{1}".format(db_url, key))
# Return JSON dict by default
return json.loads(resp.text)
except:
try:
# Return as string if not JSON
return resp.text
except:
return({'error': { 'message': 'There was an issue with getting data from the database'}})
def delete(key):
try:
return requests.delete("{0}/{1}".format(db_url, key))
except:
return({'error': { 'message': 'There was an issue with deleting data from the database'}})
def list(key):
"List db entries that start with [key]"
try:
resp = requests.get('{0}?prefix={1}'.format(db_url, key))
resp_list = resp.text.split('\n')
return resp_list
except:
return({'error': { 'message': 'There was an issue with listing data from the database'}}) | 1,214 | 398 |
from subprocess import call
from sc2streamhelper.passwords import *
call([
"curl",
"--user " + SC2STREAMHELPER_APIKEY + ":" + SC2STREAMHELPER_APISECRET,
"--data-urlencode ""grant_type=client_credentials"" ""http://us.battle.net/oauth/token"""
])
| 263 | 100 |
import torch
from models import EfficientDet
if __name__ == '__main__':
inputs = torch.randn(4, 3, 512, 512)
model = EfficientDet(levels=3)
output = model(inputs)
for p in output:
print(p.size()) | 221 | 86 |
import os
import struct
import gevent
import gevent.event
import gevent.socket
import google.protobuf.message
from msg_pb2 import Response
from msg_pb2 import Request
CONNECT_TIMEOUT = 5.0
REQUEST_TIMEOUT = 2.0
DEFAULT_URI = "doozer:?%s" % "&".join([
"ca=127.0.0.1:8046",
"ca=127.0.0.1:8041",
"ca=127.0.0.1:8042",
"ca=127.0.0.1:8043",
])
_spawner = gevent.spawn
class ConnectError(Exception): pass
class ResponseError(Exception):
def __init__(self, response, request):
self.code = response.err_code
self.detail = response.err_detail
self.response = response
self.request = request
def __str__(self):
return str(pb_dict(self.request))
class TagInUse(ResponseError): pass
class UnknownVerb(ResponseError): pass
class Readonly(ResponseError): pass
class TooLate(ResponseError): pass
class RevMismatch(ResponseError): pass
class BadPath(ResponseError): pass
class MissingArg(ResponseError): pass
class Range(ResponseError): pass
class NotDirectory(ResponseError): pass
class IsDirectory(ResponseError): pass
class NoEntity(ResponseError): pass
def response_exception(response):
"""Takes a response, returns proper exception if it has an error code"""
exceptions = {
Response.TAG_IN_USE: TagInUse, Response.UNKNOWN_VERB: UnknownVerb,
Response.READONLY: Readonly, Response.TOO_LATE: TooLate,
Response.REV_MISMATCH: RevMismatch, Response.BAD_PATH: BadPath,
Response.MISSING_ARG: MissingArg, Response.RANGE: Range,
Response.NOTDIR: NotDirectory, Response.ISDIR: IsDirectory,
Response.NOENT: NoEntity, }
if 'err_code' in [field.name for field, value in response.ListFields()]:
return exceptions[response.err_code]
else:
return None
def pb_dict(message):
"""Create dict representation of a protobuf message"""
return dict([(field.name, value) for field, value in message.ListFields()])
def parse_uri(uri):
"""Parse the doozerd URI scheme to get node addresses"""
if uri.startswith("doozer:?"):
before, params = uri.split("?", 1)
addrs = []
for param in params.split("&"):
key, value = param.split("=", 1)
if key == "ca":
addrs.append(value)
return addrs
else:
raise ValueError("invalid doozerd uri")
def connect(uri=None):
"""Start a Doozer client connection"""
uri = uri or os.environ.get("DOOZER_URI", DEFAULT_URI)
addrs = parse_uri(uri)
if not addrs:
raise ValueError("there were no addrs supplied in the uri (%s)" % uri)
return Client(addrs)
class Connection(object):
def __init__(self, addrs=None):
if addrs is None:
addrs = []
self.addrs = addrs
self.pending = {}
self.loop = None
self.sock = None
self.address = None
self.ready = gevent.event.Event()
def connect(self):
self.reconnect()
def reconnect(self):
self.disconnect()
for retry in range(5):
addrs = list(self.addrs)
while len(addrs):
try:
host, port = addrs.pop(0).split(':')
self.address = "%s:%s" % (host, port)
self.sock = gevent.socket.create_connection((host, int(port)))
self.ready.set()
self.loop = _spawner(self._recv_loop)
return
except IOError:
pass
gevent.sleep(retry * 2)
raise ConnectError("Can't connect to any of the addresses: %s" % self.addrs)
def disconnect(self):
if self.loop:
self.loop.kill()
if self.sock:
self.sock.close()
self.ready.clear()
def send(self, request, retry=True):
request.tag = 0
while request.tag in self.pending:
request.tag += 1
request.tag %= 2**31
self.pending[request.tag] = gevent.event.AsyncResult()
data = request.SerializeToString()
head = struct.pack(">I", len(data))
packet = ''.join([head, data])
try:
self.ready.wait(timeout=2)
self.sock.send(packet)
except IOError, e:
self.reconnect()
if retry:
self.ready.wait()
self.sock.send(packet)
else:
raise e
response = self.pending[request.tag].get(timeout=REQUEST_TIMEOUT)
del self.pending[request.tag]
exception = response_exception(response)
if exception:
raise exception(response, request)
return response
def _recv_loop(self):
while True:
try:
head = self.sock.recv(4)
length = struct.unpack(">I", head)[0]
data = self.sock.recv(length)
response = Response()
response.ParseFromString(data)
if response.tag in self.pending:
self.pending[response.tag].set(response)
except struct.error, e:
# If some extra bytes are sent, just reconnect.
# This is related to this bug:
# https://github.com/ha/doozerd/issues/5
self.reconnect()
except IOError, e:
self.reconnect()
class Client(object):
def __init__(self, addrs=None):
if addrs is None:
addrs = []
self.connection = Connection(addrs)
self.connect()
def rev(self):
request = Request(verb=Request.REV)
return self.connection.send(request)
def set(self, path, value, rev):
request = Request(path=path, value=value, rev=rev, verb=Request.SET)
return self.connection.send(request, retry=False)
def get(self, path, rev=None):
request = Request(path=path, verb=Request.GET)
if rev:
request.rev = rev
return self.connection.send(request)
def delete(self, path, rev):
request = Request(path=path, rev=rev, verb=Request.DEL)
return self.connection.send(request, retry=False)
def wait(self, path, rev):
request = Request(path=path, rev=rev, verb=Request.WAIT)
return self.connection.send(request)
def stat(self, path, rev):
request = Request(path=path, rev=rev, verb=Request.STAT)
return self.connection.send(request)
def access(self, secret):
request = Request(value=secret, verb=Request.ACCESS)
return self.connection.send(request)
def _getdir(self, path, offset=0, rev=None):
request = Request(path=path, offset=offset, verb=Request.GETDIR)
if rev:
request.rev = rev
return self.connection.send(request)
def _walk(self, path, offset=0, rev=None):
request = Request(path=path, offset=offset, verb=Request.WALK)
if rev:
request.rev = rev
return self.connection.send(request)
def watch(self, path, rev):
raise NotImplementedError()
def _list(self, method, path, offset=None, rev=None):
offset = offset or 0
entities = []
try:
while True:
response = getattr(self, method)(path, offset, rev)
entities.append(response)
offset += 1
except ResponseError, e:
if e.code == Response.RANGE:
return entities
else:
raise e
def walk(self, path, offset=None, rev=None):
return self._list('_walk', path, offset, rev)
def getdir(self, path, offset=None, rev=None):
return self._list('_getdir', path, offset, rev)
def disconnect(self):
self.connection.disconnect()
def connect(self):
self.connection.connect()
| 7,978 | 2,363 |
from .train_gpkconv1d import train_gpkconv1d_classifier
from .train_gprnn import train_gprnn_classifier
from .train_gpsig import train_gpsig_classifier
from .train_gpsig_ import train_gpsig_classifier_
from .train_gpsig_regression import train_gpsig_regressor
from .train_gpsig_vosf import train_gpsig_vosf_classifier
from .train_gpsig_vosf_regression import train_gpsig_vosf_regressor
from .train_gpsigrnn import train_gpsigrnn_classifier
from .train_gpsigrnn_vosf import train_gpsigrnn_vosf_classifier
| 507 | 193 |
from django.shortcuts import render, redirect, get_object_or_404
from rest_framework import generics
from .models import MyUser
from .forms.forms import UserRegisterForm, UpdateUserForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
# messages.success(request, f'Your account has been created! You are now able to log in')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'user/registration.html', {'form': form})
def update_user(request):
if request.method == 'POST':
u_form = UpdateUserForm(request.POST, instance=request.user)
if u_form.is_valid():
u_form.save()
return redirect('profile')
else:
u_form = UpdateUserForm(instance=request.user)
context = {
'form': u_form,
}
return render(request, 'dashboard/profile-update.html', context)
def delete_user(request, id):
# dictionary for initial data with
# field names as keys
context ={}
# fetch the object related to passed id
obj = get_object_or_404(MyUser, id = id)
if request.method =="POST":
# delete object
obj.delete()
# after deleting redirect to
# home page
return redirect("login")
return render(request, "dashboard/delete_user.html", context)
def profile(request):
context = {}
return render(request, 'user/profile.html', context) | 1,640 | 475 |
"""Admin de los pacientes"""
# Django
from django.contrib import admin
# Modelos
from apis.pacient.models import Pacient
@admin.register(Pacient)
class PacientsAdmin(admin.ModelAdmin):
"""Admin de pacientes"""
list_display = ('user',)
| 247 | 82 |
from collections import Counter
class Solution:
def findPairs(self, nums: List[int], k: int) -> int:
counter = Counter(nums)
if k == 0:
return sum([counter[num] > 1 for num in counter])
elif k > 0:
return sum([num + k in counter for num in set(nums)])
else:
return 0
| 341 | 107 |
# Copyright 2013-2014 Mitchell Stanton-Cook Licensed under the
# Educational Community License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.osedu.org/licenses/ECL-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
def get_depth_cutoff(run_path, sid):
"""
Read the conensus.log to determine the base calling depth...
.. note:: not used anymore see: get_N_char_positions
"""
filename = 'consensus_log.txt'
cut_off = 0
with open(os.path.join(os.path.join(run_path, sid), filename)) as fin:
for line in fin:
if line.find('base with no errors') != -1:
cut_off = int(line.split()[0])
print "Using depth coverage < %iX as missing" % (cut_off)
return cut_off
return -1
def get_N_char_positions(run_path, sid):
"""
Return all the positions of N calls in the consensus.fa
"""
full, no_call, filename = '', [], 'consensus.fa'
with open(os.path.join(os.path.join(run_path, sid), filename)) as fin:
for idx, line in enumerate(fin):
if not line.startswith('>'):
full = full+line.strip()
full = list(full)
for idx, e in enumerate(full):
if e == 'N':
no_call.append(idx)
return no_call
| 1,660 | 515 |
# This file was created automatically from build script
__revision__ = '7581'
__branch__ = 'default'
__node__ = 'c386bcd24648e215a00249e32d2d4bbd1a6992db'
| 155 | 75 |
import os
def pytest_configure(config: dict) -> None:
setup()
def setup() -> None:
os.environ['AIRFLOW_API_ROOT_URL'] = 'http://test-airflow/api/experimental/'
os.environ['API_DATABASE_URL'] = 'postgres://postgres:postgres@postgres:5432/postgres'
os.environ['S3_ENDPOINT_URL'] = 'http://localhost:5000/'
os.environ['S3_BUCKET'] = 'test-bucket'
os.environ['S3_ACCESS_KEY_ID'] = 'inner-access-key'
os.environ['S3_SECRET_ACCESS_KEY'] = 'inner-secret-access-key'
os.environ['CELERY_BROKER_URL'] = 'pyamqp://rabbitmq:rabbitmq@rabbit:5672//'
os.environ['CELERY_RESULT_BACKEND'] = 'rpc://'
| 626 | 267 |
# You have given an array of non-negative integers.
# Each element in the array represents your maximum jump length at that position,
# you are initially positioned at the first index of the array.
# Return the minimum number of jumps required to reach the last index.
# If it is not possible to reach the last index, return -1.
"""
Input: arr[] = {1, 3, 5, 8, 9, 2, 6, 7, 6, 8, 9}
Output: 3 (1-> 3 -> 8 -> 9)
Explanation: Jump from 1st element
to 2nd element as there is only 1 step,
now there are three options 5, 8 or 9.
If 8 or 9 is chosen then the end node 9
can be reached. So 3 jumps are made.
Input: arr[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
Output: 10
Explanation: In every step a jump
is needed so the count of jumps is 10.
"""
# [1, 3]
# Input: arr[] = {1, 3, 5, 8, 9, 2, 6, 7, 6, 8, 9}
def find_min_jumps(arr, curr=0) -> int:
options = arr[curr] # value in the array
min_jump = ?
for i in range(options):
no_of_jumps = find_min_jumps(arr, (curr + i + 1))
min_jump = min(no_of_jumps, min_jump)
min_jump
| 1,056 | 437 |
from instagram_private_api import Client, ClientCompatPatch
import numpy as np
import time
import getpass
import sys
def list_diff(li1, li2):
li_dif = [i for i in li1 + li2 if i not in li1]
return li_dif
print("\n \nIMPORTANT DISCLAIMER: DO NOT USE THIS TOOL TOO FREQUENTLY (MORE THAN SEVERAL TIMES IN A MINUTE), YOUR ACCOUNT MIGHT GET FLAGGED! \n \n ")
USER_NAME = input("What is your instagram username? \n")
PASSWORD = getpass.getpass(prompt='What is your instagram password? (Your input might not appear on console, just type and press enter.) \n')
try:
api = Client(USER_NAME, PASSWORD)
except:
sys.exit('Login unsucessful, wrong password?')
results = api.feed_timeline()
rnk_token = api.generate_uuid()
u_id = api.authenticated_user_id
fwing = api.user_following(u_id, rnk_token)
fwers = api.user_followers(u_id, rnk_token)
fwing_list = []
fwers_list = []
fwing_list.append(np.sort([fwing['users'][idx]['username'] for idx in np.arange(len(fwing['users']))]))
fwers_list.append(np.sort([fwers['users'][idx]['username'] for idx in np.arange(len(fwers['users']))]))
fwing_nmid = fwing['next_max_id']
fwers_nmid = fwers['next_max_id']
print('Parsing the following list! Please be patient. There is a delay to prevent your account from getting flagged!')
while (fwing_nmid is not None):
time.sleep(5)
fwing = api.user_following(u_id, rnk_token, max_id=fwing_nmid)
fwing_list.append(np.sort([fwing['users'][idx]['username'] for idx in np.arange(len(fwing['users']))]))
fwing_nmid = fwing['next_max_id']
print('Parsing the followers list! Please be patient. There is a delay to prevent your account from getting flagged!')
while (fwers_nmid is not None):
time.sleep(5)
fwers = api.user_followers(u_id, rnk_token, max_id=fwers_nmid)
fwers_list.append(np.sort([fwers['users'][idx]['username'] for idx in np.arange(len(fwers['users']))]))
fwers_nmid = fwers['next_max_id']
fwers_list_flat = [j for sub in fwers_list for j in sub]
fwing_list_flat = [j for sub in fwing_list for j in sub]
print('Here are the unfollowers! \n\n')
print(np.sort(list_diff(fwers_list_flat, fwing_list_flat)))
print('_________________________________________')
print('Here are the followers you are not following! \n\n')
print(np.sort(list_diff(fwing_list_flat, fwers_list_flat)))
os.system("pause") | 2,343 | 863 |
from lxml import html, etree
from requests import get
from urllib.parse import unquote
agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
def getSynonims(word):
syns = []
headers = {'User-Agent': agent}
tree = html.fromstring(get(
'https://jeck.ru/tools/SynonymsDictionary/{}'.format(word), headers=headers).content)
urls = tree.xpath('//td/a/@href')
for url in urls:
if not '+' in url:
dem = url.split('Dictionary/')
if len(dem) > 1:
word = unquote(dem[1])
syns.append(word)
return syns
def getAntonyms(word):
antonyms = []
headers = {'User-Agent': agent}
tree = html.fromstring(
get('https://ru.wiktionary.org/wiki/{}'.format(word), headers=headers).content)
for k in range(1, 100):
for i in range(1, 100):
try:
anto = tree.xpath(
'/html/body/div[3]/div[3]/div[5]/div[1]/ol[3]/li[{}]/a[{}]/text()'.format(i, k))
antonyms.append(anto[0])
except:
break
return list(dict.fromkeys(antonyms))
def getPhraseologs(word):
phraseols = []
headers = {'User-Agent': agent}
tree = html.fromstring(
get('https://ru.wiktionary.org/wiki/{}'.format(word), headers=headers).content)
for k in range(1, 100):
for i in range(1, 100):
try:
phrase = tree.xpath(
'/html/body/div[3]/div[3]/div[5]/div[1]/ul[1]/li[{}]/a[{}]/text()'.format(k, i))
if not 'МФА' in phrase:
phraseols.append(phrase[0])
except:
break
return list(dict.fromkeys(phraseols))
def getRandomWord():
headers = {'User-Agent': agent}
tree = html.fromstring(
get('https://ru.wiktionary.org/wiki/%D0%A1%D0%BB%D1%83%D0%B6%D0%B5%D0%B1%D0%BD%D0%B0%D1%8F:%D0%A1%D0%BB%D1%83%D1%87%D0%B0%D0%B9%D0%BD%D0%B0%D1%8F_%D1%81%D1%82%D1%80%D0%B0%D0%BD%D0%B8%D1%86%D0%B0', headers=headers).content)
return tree.xpath('/html/body/div[3]/h1/text()')[0]
def getAssociations(word):
assocs = []
headers = {'User-Agent': agent}
tree = html.fromstring(get(
'https://wordassociations.net/ru/%D0%B0%D1%81%D1%81%D0%BE%D1%86%D0%B8%D0%B0%D1%86%D0%B8%D0%B8-%D0%BA-%D1%81%D0%BB%D0%BE%D0%B2%D1%83/{}'.format(word), headers=headers).content)
urls = tree.xpath('//li/a/@href')
for url in urls:
if 'D1%83/' in url:
assocs.append(unquote(url.split('D1%83/')[1]).lower())
return assocs
def getHyperonims(word):
headers = {'User-Agent': agent}
tree = html.fromstring(
get('https://ru.wiktionary.org/wiki/{}'.format(word), headers=headers).content)
phraseols = []
for k in range(1, 100):
for i in range(1, 100):
try:
phrase = tree.xpath(
'/html/body/div[3]/div[3]/div[5]/div[1]/ol[4]/li[{}]/a[{}]/text()'.format(k, i))
phraseols.append(phrase[0])
except:
break
return list(dict.fromkeys(phraseols))
| 3,259 | 1,312 |
from .program import program
if __name__ == "__main__":
program(prog_name="dfour")
| 88 | 30 |
"""
# Data Structures and Algorithms - Part B
# Created by Reece Benson (16021424)
"""
class Player():
name = None
gender = None
season = None
win_count = None
lost_count = None
total_win_count = None
total_lost_count = None
total_money = None
in_first_16 = None
def __init__(self, _name, _gender, _season):
self.name = _name
self.gender = _gender
self.season = _season
self.win_count = { }
self.lost_count = { }
self.total_win_count = 0
self.total_lost_count = 0
self.total_money = { }
self.score = { }
self.in_first_16 = False
def get_name(self):
return self.name
def get_gender(self):
return self.gender
def get_season(self):
return self.season
def set_money(self, tournament_name, money):
if(tournament_name in self.total_money):
self.total_money[tournament_name] = money
else:
self.total_money.update({ tournament_name: money })
def get_money(self, tournament_name):
if(tournament_name in self.total_money):
return self.total_money[tournament_name]
else:
self.set_money(tournament_name, 0)
return self.get_money(tournament_name)
def set_score(self, tournament_name, score):
if(tournament_name in self.score):
self.score[tournament_name] = score
else:
self.score.update({ tournament_name: score })
def get_score(self, tournament_name):
if(tournament_name in self.score):
return self.score[tournament_name]
else:
self.set_score(tournament_name, 0)
return self.get_score(tournament_name)
def get_wins(self, tournament_name):
if(tournament_name in self.win_count):
return self.win_count[tournament_name]
else:
self.win_count.update({ tournament_name: 0 })
return self.get_wins(tournament_name)
def get_lost(self, tournament_name):
if(tournament_name in self.lost_count):
return self.lost_count[tournament_name]
else:
self.lost_count.update({ tournament_name: 0 })
return self.get_lost(tournament_name)
def get_total_wins(self):
return self.total_win_count
def get_total_lost(self):
return self.total_lost_count
def increment_wins(self, tournament_name):
if(tournament_name in self.win_count):
self.win_count[tournament_name] += 1
self.total_win_count += 1
return self.get_wins(tournament_name)
else:
self.win_count.update({ tournament_name: 0 })
return self.increment_wins(tournament_name)
def increment_losts(self, tournament_name):
if(tournament_name in self.lost_count):
self.lost_count[tournament_name] += 1
self.total_lost_count += 1
return self.get_lost(tournament_name)
else:
self.lost_count.update({ tournament_name: 0 })
return self.increment_losts(tournament_name) | 3,144 | 1,002 |
# Code by Lachlan Marnoch, 2018
import numpy as np
import matplotlib.pyplot as plt
# import math as m
from astropy import wcs
from astropy.coordinates import SkyCoord
from astropy.io import fits
import random as r
def sub_stars(condition):
subtract = np.zeros(len(B), dtype=bool)
# Set seed for replicability
r.seed(729626)
for i in range(4):
for j in range(20):
# This gives the number of stars to remove1 from the corresponding cell in the cluster CMD
n = sum((B_I_cell == i) & (B_cell == j) & condition)
# This gives the indices of stars in the corresponding cell in the cluster CMD
ind_cell_cluster = np.nonzero((B_I_cell == i) & (B_cell == j) & in_cluster)[0]
# If the number of stars to be removed from a cell is greater than the number of stars in the cell, just
# remove1 them all
if n >= len(ind_cell_cluster):
for k in ind_cell_cluster:
subtract[k] = True
else:
# This while loop randomly selects stars in the respective cluster CMD cell and removes them until the
# same number has been removed as is in the background CMD, or else all have been.
k = 0
while k < n:
# Pick a random index from the indices of stars in the same cell in the cluster CMD
rr = r.randint(0, len(ind_cell_cluster) - 1)
ind = ind_cell_cluster[rr]
# If that star has not been subtracted, do so. If it has, pick a new random index.
if not subtract[ind]:
subtract[ind] = True
k += 1
print('Subtracted', sum(subtract), 'stars')
print('Stars after decontamination:', sum(in_cluster & (subtract == False)))
return subtract, condition
def draw_cells(ax):
for z in B_I_grid:
ax.plot([z, z], [B_min, B_max], c='blue')
for z in B_grid:
ax.plot([B_I_max, B_I_min], [z, z], c='blue')
def draw_sgb_box(ax):
ax.plot([left_sgb, left_sgb], [top_sgb, bot_sgb], c='red')
ax.plot([left_sgb, right_sgb], [bot_sgb, bot_sgb], c='red')
ax.plot([right_sgb, right_sgb], [top_sgb, bot_sgb], c='red')
ax.plot([left_sgb, right_sgb], [top_sgb, top_sgb], c='red')
def draw_isochrones():
plt.plot(B_I_iso_min, B_iso_min, c='purple', label='1 Gyrs')
plt.plot(B_I_iso_max, B_iso_max, c='violet', label='3.09 Gyrs')
plt.plot(B_I_138, B_138, c='blue', label='1.38 Gyrs')
plt.plot(B_I_218, B_218, c='red', label='2.18 Gyrs')
plt.plot(B_I_best, B_best, c='green', label='1.58 Gyrs')
def draw_all_isochrones():
for iso in iso_list:
plt.plot(iso[:, 29] - iso[:, 34] + B_I_offset, iso[:, 29] + DM)
def get_nearest(xx, arr):
return np.abs(xx - arr).argmin()
def mse(yy, y_dash):
return sum((yy - y_dash) ** 2) / len(yy)
# Distance modulus (correction for absolute madnitudes, which are the values we have in the isochrone data, to apparent
# magnitudes, the values in the DOLPHOT data) to the SMC
DM = 19.35
B_I_offset = 0.14
# B_I_offset = 0
# Pixel Scale, from FITS header
pixel_scale = 0.03962000086903572 # arcsec
# Limits of the frame in equatorial coordinates, to correct orientation
top = -71.785
bottom = -71.737
left = 16.904
right = 17.051
# Define CMD region of interest
B_I_max = 3
B_I_min = -1
B_max = 23
B_min = 17
# Define CMD region of subgiant branch
top_sgb = 21.3
bot_sgb = 20.25
left_sgb = 0.8
right_sgb = 1.25
# Importing data
print('Importing Data, thank you for your patience')
data_dir = "..//data//"
# The following two lines provide a means of converting between image pixel coordinates and sky coordinates
# (ie Right Ascension and Declination)
hdulist = fits.open(data_dir + "ibhy12050_drz.fits")
w = wcs.WCS(hdulist[2].header)
data = np.genfromtxt(data_dir + "wfc3_attempt_2")
print('Number of Stars:', len(data))
# Include bright stars only; this excludes various artifacts, galaxies, and some background stars
print('Excluding bad stars')
data = data[data[:, 10] == 1]
print('Number of Stars:', len(data))
# Trim out objects with sharpness not in the range -0.5 < sharpness < 0.5
print('Excluding stars with |sharpness| > 0.5')
data = data[data[:, 6] < 0.5]
data = data[data[:, 6] > -0.5]
print('Number of Stars:', len(data))
# Cut any stars outside of the CMD region of interest (Main Sequence Turnoff and subgiant branch).
print('Excluding stars outside CMD region of interest')
# data = data[data[:, 15] - data[:, 28] < B_I_max]
# data = data[data[:, 15] - data[:, 28] > B_I_min]
# data = data[data[:, 15] < B_max]
print('Number of Stars:', len(data))
print()
print('Calculating')
# F475W magnitude
B = data[:, 15]
# F814 magnitude
I = data[:, 28]
# B-I color
B_I = B - I
x_pix = data[:, 2]
y_pix = data[:, 3]
x = pixel_scale * x_pix
y = pixel_scale * y_pix
# Convert x_pix and y_pix (pixel coordinates) to world coordinates.
pixel_coords = np.array([x_pix, y_pix]).transpose()
world_coords = w.all_pix2world(pixel_coords, 1)
ra = world_coords[:, 0]
dec = world_coords[:, 1]
# In the FITS files, Right Ascension is treated as the horizontal coordinate, and Declination as the vertical. We will
# continue to do so here for consistency.
# The centre of the cluster is at RA = 01h 07m 56.22s, Dec = -71deg 46' 04.40'', according to Li et al
# Convert these to degrees (because the sky coordinate system is clunky as hell)
c = SkyCoord(ra='01h07m56.22s', dec='-71d46min04.40s')
centre_ra = c.ra.deg
centre_dec = c.dec.deg
print()
print('Centre position: ')
print('RA:', centre_ra)
print('DEC:', centre_dec)
# Find the centre of the cluster in pixel coordinates.
centre = w.all_world2pix([[centre_ra, centre_dec]], 1)
centre_x_pix = centre[0, 0]
centre_y_pix = centre[0, 1]
centre_x = centre_x_pix * pixel_scale
centre_y = centre_y_pix * pixel_scale
print('x (pixels):', centre_x_pix)
print('y (pixels):', centre_y_pix)
# Calculate angular distance of each star from centre of cluster.
print()
print('Finding stars within 50 arcsec of cluster centre')
pix_dist = np.sqrt((x_pix - centre_x_pix) ** 2 + (y_pix - centre_y_pix) ** 2)
equ_dist = np.sqrt((ra - centre_ra) ** 2 + (dec - centre_dec) ** 2)
dist = pix_dist * pixel_scale
# Find the stars that are within 50 arcsec of the cluster centre (in accordance with Li et al)
in_cluster = dist < 50
in_cluster_equ = np.array(equ_dist < 50. / 3600.)
print('Stars in cluster:', sum(in_cluster == True))
# Find stars in the SGB region
in_sgb = (B < top_sgb) & (B > bot_sgb) & (B_I < right_sgb) & (B_I > left_sgb)
print('SGB: ', sum(in_sgb))
# Decontamination
# Method from Hu et al
print('Decontaminating')
# Divide CMD field1 into cells 0.5*0.25 mag^2
B_I_grid = np.arange(B_I_min, B_I_max, step=0.5)
B_grid = np.arange(B_min, B_max, step=0.25)
B_I_cell = np.floor(B_I / 0.5)
B_cell = np.floor((B - 18.) / 0.25)
remove1, field1 = sub_stars(condition=y_pix <= 800)
remove2, field2 = sub_stars(condition=x_pix <= 800)
remove3, field3 = sub_stars(condition=(80 <= dist) & (dist <= 100))
remove4, field4 = sub_stars(condition=dist >= 80)
# Import isochrone data
isos = np.genfromtxt(data_dir + "isochrones3.dat")
# Get the list of ages used in the isochrones
ages = np.unique(isos[:, 1])
# Seperate the big bad isochrone file into
iso_list = []
for i, a in enumerate(ages):
iso = isos[isos[:, 1] == a]
iso_list.append(iso)
select_sgb = in_sgb & in_cluster & (remove3 == False)
# Find the points on the isochrone with the nearest x-values to our SGB stars
mses = np.zeros(len(ages))
for j, iso in enumerate(iso_list):
y_dash = np.zeros(sum(select_sgb))
for i, xx in enumerate(B_I[select_sgb]):
nearest = get_nearest(xx, iso[:, 29] - iso[:, 34] + B_I_offset)
y_dash[i] = iso[nearest, 29] + DM
mses[j] = (mse(B[select_sgb], y_dash))
print('Selected stars in SGB:', sum(select_sgb))
# Extract some useful individual isochrones
# Our youngest isochrone
iso_min = iso_list[0]
B_iso_min = iso_min[:, 29] + DM
I_iso_min = iso_min[:, 34] + DM
B_I_iso_min = B_iso_min - I_iso_min + B_I_offset
# 1.38 Gyrs
iso_138 = iso_list[np.abs(ages - 1.38e9).argmin()]
B_138 = iso_138[:, 29] + DM
I_138 = iso_138[:, 34] + DM
B_I_138 = B_138 - I_138 + B_I_offset
# 2.18 Gyrs
iso_218 = iso_list[np.abs(ages - 2.18e9).argmin()]
B_218 = iso_218[:, 29] + DM
I_218 = iso_218[:, 34] + DM
B_I_218 = B_218 - I_218 + B_I_offset
# Our best-fitting isochrone:
iso_best = iso_list[mses.argmin()]
B_best = iso_best[:, 29] + DM
I_best = iso_best[:, 34] + DM
B_I_best = B_best - I_best + B_I_offset
# Our oldest isochrone:
iso_max = iso_list[-1]
B_iso_max = iso_max[:, 29] + DM
I_iso_max = iso_max[:, 34] + DM
B_I_iso_max = B_iso_max - I_iso_max + B_I_offset
# PLOTS
print('Plotting')
# Sky maps
# Pixel Coordinates, showing cluster centre
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.set_title('Star Pixel Coordinates')
ax1.scatter(x_pix, y_pix, c='black', marker=',', s=1)
# ax1.scatter(x_pix[in_cluster], y_pix[in_cluster], c='blue', marker=',', s=1)
# ax1.scatter(centre_x_pix, centre_y_pix, c='green')
ax1.axis('equal')
ax1.set_title('')
ax1.set_xlabel('x (pixels)')
ax1.set_ylabel('y (pixels)')
ax1.set_xlim(0, 4000)
ax1.set_ylim(0, 4500)
plt.show()
# Equatorial Coordinates, showing cluster centre
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.set_title('Star Equatorial Coordinates')
ax2.scatter(ra, dec, c='black', marker=',', s=1)
ax2.scatter(centre_ra, centre_dec, c='green')
# ax2.set_xlim(left, right)
# ax2.set_ylim(bottom, top)
# ax2.axis('equal')
ax2.set_xlabel('Right Ascension (deg)')
ax2.set_ylabel('Declination (deg)')
plt.show(fig2)
# Histogram of angular distance from cluster centre
plt.title('Angular Distance from Cluster Centre')
plt.xlabel('Angle x (arcseconds)')
plt.ylabel('Angle y (arcsecs)')
plt.hist(dist, bins=50)
plt.show()
# Plot of both cluster determination methods (pixel and WCS coordinates), in equatorial coordinates
fig4 = plt.figure()
ax4 = fig4.add_subplot(111)
ax4.axis('equal')
ax4.set_title('Equatorial Coordinates of Stars')
ax4.scatter(ra, dec, c='black', marker=',', s=1)
ax4.scatter(ra[in_cluster], dec[in_cluster], c='blue', marker=',', s=1)
ax4.scatter(ra[in_cluster_equ], dec[in_cluster_equ], c='red', marker=',', s=1)
ax4.scatter(centre_ra, centre_dec, c='green')
ax4.set_xlim(left, right)
ax4.set_ylim(bottom, top)
ax4.set_xlabel('Right Ascension (deg)')
ax4.set_ylabel('Declination (deg)')
plt.show(fig4)
# The same again, but in image (pixel*pixel scale) coordinates
fig5 = plt.figure()
ax5 = fig5.add_subplot(111)
ax5.axis('equal')
ax5.set_title('Image Coordinates of Stars')
ax5.scatter(x, y, c='black', marker=',', s=1)
ax5.scatter(x[in_cluster], y[in_cluster], c='blue', marker=',', s=1)
# ax5.scatter(x[in_cluster_equ], y[in_cluster_equ], c='red', marker=',', s=1)
ax5.scatter(centre_x, centre_y, c='green')
# ax5.set_xlim(left, right)
# ax5.set_ylim(bottom, top)
ax5.set_xlabel('x (arcsec)')
ax5.set_ylabel('y (arcsec)')
plt.show(fig5)
# Hertzsprung-Russell / Colour-Magnitude Diagrams
# Raw HR Diagram
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.set_title('Unprocessed Colour-Magnitude Diagram for stars in image')
ax1.scatter(B_I, B, c='black', marker=',', s=1)
ax1.set_xlim(0, 2)
ax1.set_ylim(23, 18)
ax1.set_xlabel('B - I Colour Index')
ax1.set_ylabel('B magnitude')
plt.show()
# HR Diagram limited to cluster stars
plt.title('Cluster CMD Before Statistical Subtraction')
plt.scatter(B_I[in_cluster], B[in_cluster], c='black', marker=',', s=1)
plt.xlim(0, 2)
plt.ylim(23, 18)
plt.xlabel('B - I Colour Index')
plt.ylabel('B magnitude')
plt.show()
# HR Diagram with stars labelled not in cluster
plt.title('Background Star Colour-Magnitude Diagram')
plt.scatter(B_I[in_cluster == False], B[in_cluster == False], c='black', marker=',', s=1)
plt.xlim(0, 2)
plt.ylim(23, 18)
plt.xlabel('B - I Colour Index')
plt.ylabel('B magnitude')
plt.show()
# Demonstration of Decontamination Technique
fig7, ax7 = plt.subplots(2, 2)
ax7[0, 0].set_title('Background CMD, with y <= 800')
draw_cells(ax7[0, 0])
ax7[0, 0].scatter(B_I[field4], B[field4], c='black', marker=',', s=1)
ax7[0, 0].set_xlabel('B - I Colour Index')
ax7[0, 0].set_ylabel('B magnitude')
ax7[0, 0].set_xlim(0, 2)
ax7[0, 0].set_ylim(23, 18)
draw_sgb_box(ax7[0, 0])
ax7[0, 1].set_title('Cluster CMD')
draw_cells(ax7[0, 1])
ax7[0, 1].scatter(B_I[in_cluster], B[in_cluster], c='black', marker=',', s=1)
ax7[0, 1].set_xlabel('B - I Colour Index')
ax7[0, 1].set_ylabel('B magnitude')
ax7[0, 1].set_xlim(0, 2)
ax7[0, 1].set_ylim(23, 18)
draw_sgb_box(ax7[0, 1])
ax7[1, 0].set_title('Cluster CMD, subtracted stars in red')
draw_cells(ax7[1, 0])
ax7[1, 0].scatter(B_I[in_cluster & (remove4 == False)], B[in_cluster & (remove4 == False)], c='black', marker=',', s=1)
ax7[1, 0].scatter(B_I[remove4], B[remove4], c='red', marker=',', s=1)
ax7[1, 0].set_xlabel('B - I Colour Index')
ax7[1, 0].set_ylabel('B magnitude')
ax7[1, 0].set_xlim(0, 2)
ax7[1, 0].set_ylim(23, 18)
draw_sgb_box(ax7[1, 0])
ax7[1, 1].set_title('Cluster CMD after subtraction')
draw_cells(ax7[1, 1])
ax7[1, 1].scatter(B_I[in_cluster & (remove4 == False)], B[in_cluster & (remove4 == False)], c='black', marker=',', s=1)
ax7[1, 1].set_xlabel('B - I Colour Index')
ax7[1, 1].set_ylabel('B magnitude')
ax7[1, 1].set_xlim(0, 2)
ax7[1, 1].set_ylim(23, 18)
draw_sgb_box(ax7[1, 1])
plt.show()
# Recreate Figure 1 in Li et al
fig8, ax8 = plt.subplots(3, 4)
ax8[0, 0].scatter(B_I[in_cluster & (remove1 == False)], B[in_cluster & (remove1 == False)], c='black', marker=',', s=1)
ax8[0, 0].set_xlabel('B - I Colour Index')
ax8[0, 0].set_ylabel('B magnitude')
ax8[0, 0].set_xlim(0, 2)
ax8[0, 0].set_ylim(23, 18)
draw_sgb_box(ax8[0, 0])
ax8[1, 0].scatter(B_I[field1], B[field1], c='black', marker=',', s=1)
ax8[1, 0].set_xlabel('B - I Colour Index')
ax8[1, 0].set_ylabel('B magnitude')
ax8[1, 0].set_xlim(0, 2)
ax8[1, 0].set_ylim(23, 18)
ax8[2, 0].axis('equal')
ax8[2, 0].scatter(x, y, c='black', marker=',', s=1)
ax8[2, 0].scatter(x[in_cluster], y[in_cluster], c='blue', marker=',', s=1)
ax8[2, 0].scatter(x[field1], y[field1], c='red', marker=',', s=1)
ax8[2, 0].set_xlabel('x (arcsec)')
ax8[2, 0].set_ylabel('y (arcsec)')
ax8[0, 1].scatter(B_I[in_cluster & (remove2 == False)], B[in_cluster & (remove2 == False)], c='black', marker=',', s=1)
ax8[0, 1].set_xlabel('B - I Colour Index')
ax8[0, 1].set_ylabel('B magnitude')
ax8[0, 1].set_xlim(0, 2)
ax8[0, 1].set_ylim(23, 18)
draw_sgb_box(ax8[0, 1])
ax8[1, 1].scatter(B_I[field2], B[field2], c='black', marker=',', s=1)
ax8[1, 1].set_xlabel('B - I Colour Index')
ax8[1, 1].set_ylabel('B magnitude')
ax8[1, 1].set_xlim(0, 2)
ax8[1, 1].set_ylim(23, 18)
ax8[2, 1].axis('equal')
ax8[2, 1].scatter(x, y, c='black', marker=',', s=1)
ax8[2, 1].scatter(x[in_cluster], y[in_cluster], c='blue', marker=',', s=1)
ax8[2, 1].scatter(x[field2], y[field2], c='red', marker=',', s=1)
ax8[2, 1].set_xlabel('x (arcsec)')
ax8[2, 1].set_ylabel('y (arcsec)')
ax8[0, 2].scatter(B_I[in_cluster & (remove3 == False)], B[in_cluster & (remove3 == False)], c='black', marker=',', s=1)
ax8[0, 2].set_xlabel('B - I Colour Index')
ax8[0, 2].set_ylabel('B magnitude')
ax8[0, 2].set_xlim(0, 2)
ax8[0, 2].set_ylim(23, 18)
draw_sgb_box(ax8[0, 2])
ax8[1, 2].scatter(B_I[field3], B[field3], c='black', marker=',', s=1)
ax8[1, 2].set_xlabel('B - I Colour Index')
ax8[1, 2].set_ylabel('B magnitude')
ax8[1, 2].set_xlim(0, 2)
ax8[1, 2].set_ylim(23, 18)
ax8[2, 2].axis('equal')
ax8[2, 2].scatter(x, y, c='black', marker=',', s=1)
ax8[2, 2].scatter(x[in_cluster], y[in_cluster], c='blue', marker=',', s=1)
ax8[2, 2].scatter(x[field3], y[field3], c='red', marker=',', s=1)
ax8[2, 2].set_xlabel('x (arcsec)')
ax8[2, 2].set_ylabel('y (arcsec)')
ax8[0, 3].scatter(B_I[in_cluster & (remove4 == False)], B[in_cluster & (remove4 == False)], c='black', marker=',', s=1)
ax8[0, 3].set_xlabel('B - I Colour Index')
ax8[0, 3].set_ylabel('B magnitude')
ax8[0, 3].set_xlim(0, 2)
ax8[0, 3].set_ylim(23, 18)
draw_sgb_box(ax8[0, 3])
ax8[1, 3].scatter(B_I[field4], B[field4], c='black', marker=',', s=1)
ax8[1, 3].set_xlabel('B - I Colour Index')
ax8[1, 3].set_ylabel('B magnitude')
ax8[1, 3].set_xlim(0, 2)
ax8[1, 3].set_ylim(23, 18)
ax8[2, 3].axis('equal')
ax8[2, 3].scatter(x, y, c='black', marker=',', s=1)
ax8[2, 3].scatter(x[in_cluster], y[in_cluster], c='blue', marker=',', s=1)
ax8[2, 3].scatter(x[field4], y[field4], c='red', marker=',', s=1)
ax8[2, 3].set_xlabel('x (arcsec)')
ax8[2, 3].set_ylabel('y (arcsec)')
plt.show()
# Show isochrones
plt.title('Colour-Magnitude Diagram with isochrones')
plt.scatter(B_I[in_cluster & (remove3 == False)], B[in_cluster & (remove3 == False)], c='black', marker=',', s=1)
plt.scatter(B_I[select_sgb], B[select_sgb], c='violet',
marker=',', label='SGB stars')
draw_isochrones()
draw_sgb_box(plt)
plt.xlim(0.1, 1.5)
plt.ylim(22.5, 19.5)
plt.xlabel('B - I Colour Index')
plt.ylabel('B magnitude')
plt.legend()
plt.show()
plt.title('Colour-Magnitude Diagram with isochrones')
plt.xlabel('B - I Colour Index')
plt.ylabel('B magnitude')
plt.scatter(B_I[in_cluster & (remove3 == False)], B[in_cluster & (remove3 == False)], c='black', marker=',', s=1)
draw_all_isochrones()
draw_sgb_box(plt)
plt.scatter(B_I[select_sgb], B[select_sgb], c='violet',
marker=',', label='SGB stars')
plt.xlim(0.1, 1.5)
plt.ylim(22.5, 19.5)
plt.legend()
plt.show()
# Plot MSEs of isochrones
plt.title("Mean Squared Error of Isochrones to the Subgiant Branch")
plt.plot(ages, mses)
plt.xlabel('Age')
plt.ylabel('Mean Squared Error')
plt.show()
print('Optimum age:', ages[mses.argmin()])
print(len(ages))
print(min(ages))
print(max(ages))
| 17,728 | 7,987 |
# Generated by Django 2.1.5 on 2019-02-11 20:15
import jsonfield.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('spider_tags', '0004_auto_20190117_1253'),
]
operations = [
migrations.AddField(
model_name='spidertag',
name='updateable_by',
field=jsonfield.fields.JSONField(blank=True, default=list),
),
]
| 435 | 158 |
from picamera import PiCamera
from time import sleep
camera = PiCamera()
# preview
# camera.start_preview()
# sleep(5)
# camera.stop_preview()
# take 5 pics
camera.start_preview()
for i in range(5):
sleep(5)
camera.capture('/home/pi/Desktop/image%s.jpg' % i)
camera.stop_preview()
# video record
# camera.start_preview()
# camera.start_recording('/home/pi/Desktop/video.h264')
# sleep(5)
# camera.stop_recording()
# camera.stop_preview() | 449 | 162 |
"""
Copyright (c) 2020 - present NekrodNIK, Stepan Skriabin, rus-ai and other.
Look at the file AUTHORS.md(located at the root of the project) to get the
full list.
This file is part of Morelia Server.
Morelia Server is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Morelia Server is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with Morelia Server. If not, see <https://www.gnu.org/licenses/>.
"""
import sqlobject as orm
class UserConfig(orm.SQLObject):
"""
Table containing data about user and his settings.
Args:
uuid (str, required, unique): unique user id which automated
generating server
login (str, required): name for authentication on server
password (str, required): password for authentication on server
hash_password (str, optional): hash-function generated from user
password
username (str, optional): name for added in information about user
is_bot (bool, optional): True if user not human
auth_id (str, optional): authentication token
email (str, optional): user email for added in information about user
avatar (str, optional): user image for added in information about user
bio (str, optional): text for added in information about user
salt (str, optional): added in password string for create hash_password
key (str, optional): added in password string for create hash_password
"""
uuid = orm.StringCol(notNone=True, unique=True)
login = orm.StringCol(notNone=True)
password = orm.StringCol(notNone=True)
hash_password = orm.StringCol(default=None)
username = orm.StringCol(default=None)
is_bot = orm.BoolCol(default=False)
auth_id = orm.StringCol(default=None)
token_ttl = orm.IntCol(default=None)
email = orm.StringCol(default=None)
avatar = orm.BLOBCol(default=None)
bio = orm.StringCol(default=None)
salt = orm.BLOBCol(default=None)
key = orm.BLOBCol(default=None)
# Connection to Message and Flow table
messages = orm.MultipleJoin('Message')
flows = orm.RelatedJoin('Flow')
class Flow(orm.SQLObject):
"""
Flow table containing information about threads and their types.
Args:
uuid (str, required, unique): unique flow id which automated
generating server
time_created (int, optional): data and time when flow is created
flow_type (str, optional): which contains chat, channel, group
title (str, optional): name added in public information about flow
info (str, optional): text added in public information about flow
"""
uuid = orm.StringCol(notNone=True, unique=True)
time_created = orm.IntCol(default=None)
flow_type = orm.StringCol(default=None)
title = orm.StringCol(default=None)
info = orm.StringCol(default=None)
owner = orm.StringCol(default=None)
# Connection to the Message and UserConfig table
messages = orm.MultipleJoin('Message')
users = orm.RelatedJoin('UserConfig')
class Message(orm.SQLObject):
"""
Message table containing information about user messages.
Args:
uuid (str, required, unique): unique flow id which automated
generating server
text (str, optional): contains message text
time (int, optional): time when message is created
file_picture (byte, optional): contains appended image
file_video (byte, optional): contains appended video
file_audio (byte, optional): contains appended audio
file_document (byte, optional): contains appended document
emoji (str, optional): contains appended image/emoji
edited_time (int, optional): time when user last time is corrected his
message
edited_status (bool, optional): True if user corrected his message
"""
uuid = orm.StringCol(notNone=True, unique=True)
text = orm.StringCol(default=None)
time = orm.IntCol(default=None)
file_picture = orm.BLOBCol(default=None)
file_video = orm.BLOBCol(default=None)
file_audio = orm.BLOBCol(default=None)
file_document = orm.BLOBCol(default=None)
emoji = orm.BLOBCol(default=None)
edited_time = orm.IntCol(default=None)
edited_status = orm.BoolCol(default=False)
# Connection to UserConfig and Flow table
user = orm.ForeignKey('UserConfig')
flow = orm.ForeignKey('Flow')
class Admin(orm.SQLObject):
"""
Admin table containing information about users with administrators role.
Args:
username (str, required, unique): name user which granted administrator
rights
hash_password (str, required): hash-function generated from
administrator password
"""
username = orm.StringCol(notNone=True, unique=True)
hash_password = orm.StringCol(notNone=True)
| 5,466 | 1,494 |
"""
Author : Mehmet Gokcay Kabatas
Mail : mgokcaykdev@gmail.com
Version : 0.1
Date : 04/12/2019
Update : 14/12/2019
Python : 3.6.5
Update Note : Arranging system of ODE methods and descriptions.
This script written by @Author for personal usage.
Prerequest : numpy
"""
import numpy as np
class ODE():
"""
This class written for numerical methods for Ordinary
Differential Equations(ODE).
@Methods :
- Euler
- Heun
- Midpoint
- RK2
- RK3
- RK4
- RK5
- System of ODE's Euler
- System of ODE's RK4
@Usage :
...
solver = ODE()
solver.@Methods
...
"""
def Euler(self, xi, xf, yi, h, dydx):
""" Euler Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.Euler(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
y_next = yi + dydx(xi,yi) * h
xi += h
yi = y_next
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def SystemEuler(self, xi, xf, yi, h, dydx):
""" Euler Method for System of ODE.
@Note : yi and dydx should be array.
`Derivative functions parameter should be written
w.r.t args. Description in '@Args'.`
Arguments :
-------------
xi = Initial value of x for each function.
xf = Final value of x for each function.
yi = Initial value of y for each function.
h = Step size.
dydx : Target functions's derivative function
which argument depend on args.
@Args :
Order of parameter of function should be same. \n
If f1(x,y1,y2,...) and f2(x,y1,y2,...) then function's arguments should be in array args = [x,y1,y2,...]. \n
@ Example :
dy1dx : -0.5x + y1
dy2dx : 0.2y1 + 0.6y2 - 3x
: First function x parameter (x) in args[0] and y
parameter (y1) in args[1]. \n
: Second function y
parameter (y2) in args[2].
def df1(args):
return (-0.5 args[0] + args[1])
def df2(args):
return (0.2 args[1] + 0.6 args[2] - 3 args[0])
...
solver = ODE()
solver.SystemEuler(0,5,[2,2],0.2,[df1,df2])
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr, args = np.array([xi]), np.array([yi]), []
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
args.append(xi)
for g in range(len(dydx)):
args.append(yi[g])
for j in range(len(dydx)):
yi[j] = yi[j] + dydx[j](args) * h
xi += h
x_arr = np.append(x_arr,[xi],0)
y_arr = np.append(y_arr,[yi],0)
args = []
return x_arr, y_arr
def Heun(self, xi, xf, yi, h, dydx):
""" Heun Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.Heun(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
y_next_0 = yi + dydx(xi,yi) * h
y_next_1 = dydx(xi + h, y_next_0)
yi = yi + (dydx(xi,yi) + y_next_1) / 2 * h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def Midpoint(self, xi, xf, yi, h, dydx):
""" Midpoint Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.Midpoint(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
y_next_hl = yi + dydx(xi,yi) * h / 2
yi = yi + dydx(xi + h/2, y_next_hl) * h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def RK2(self, xi, xf, yi, h, a1, a2, p1, q11, dydx):
""" Second Order Runge Kutta Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
a1, a2, p1, q11 = Calculation constants.
@Prop:
a1 + a2 = 1
a2 . p1 = 1/2
a2 . q11 = 1/2
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.RK2(0,5,2,0.2,1/2,1/2,1,1,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
k1 = dydx(xi, yi)
k2 = dydx(xi + p1 * h, yi + q11 * k1 * h)
yi = yi + (a1*k1 + a2*k2)*h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def RK3(self, xi, xf, yi, h, dydx):
""" Third Order Runge Kutta Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.RK3(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
k1 = dydx(xi, yi)
k2 = dydx(xi + 1/2 * h, yi + 1/2 * k1 * h)
k3 = dydx(xi + h, yi - k1*h + 2*k2*h)
yi = yi + 1/6 * (k1 + 4*k2 + k3)*h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def RK4(self, xi, xf, yi, h, dydx):
""" Fourth Order Runge Kutta Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.RK4(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
k1 = dydx(xi, yi)
k2 = dydx(xi + 1/2 * h, yi + 1/2 * k1 * h)
k3 = dydx(xi + 1/2 * h, yi + 1/2 * k2 * h)
k4 = dydx(xi + h , yi + k3 * h)
yi = yi + 1/6 * (k1 + 2*k2 + 2*k3 + k4)*h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def SystemRK4(self, xi, xf, yi, h, dydx):
""" Forth Order Runge Kutta Method for System of ODE.
@Note : yi and dydx should be array.
`Derivative functions parameter should be written
w.r.t args. Description in '@Args'.`
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'args'.
@Args :
Order of parameter of function should be same. \n
If f1(x,y1,y2,...) and f2(x,y1,y2,...) then function's arguments should be in array args = [x,y1,y2,...]. \n
@ Example :
dy1dx : -0.5x + y1
dy2dx : 0.2y1 + 0.6y2 - 3x
: First function x parameter (x) in args[0] and y
parameter (y1) in args[1]. \n
: Second function y
parameter (y2) in args[2].
def df1(args):
return (-0.5 args[0] + args[1])
def df2(args):
return (0.2 args[1] + 0.6 args[2] - 3 args[0])
...
solver = ODE()
solver.SystemRK4(0,5,[2,2],0.2,[df1,df2])
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr, args = np.array([xi]), np.array([yi]), []
k_arr = np.empty((4,len(dydx)))
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
args.append(xi)
for g in range(len(dydx)):
args.append(yi[g])
for i in range(len(dydx)):
k_arr[0][i] = dydx[i](args)
args[0] = xi + 1/2 * h
for i in range(len(dydx)):
args[i+1] = yi[i] + 1/2 * k_arr[0][i] * h
k_arr[1][i] = dydx[i](args)
args[0] = xi + 1/2 * h
for i in range(len(dydx)):
args[i+1] = yi[i] + 1/2 * k_arr[1][i] * h
k_arr[2][i] = dydx[i](args)
args[0] = xi + h
for i in range(len(dydx)):
args[i+1] = yi[i] + k_arr[2][i] * h
k_arr[3][i] = dydx[i](args)
yi[i] = yi[i] + 1/6 * (k_arr[0][i] + 2*k_arr[1][i] + 2*k_arr[2][i] + k_arr[3][i])*h
xi += h
x_arr = np.append(x_arr,[xi],0)
y_arr = np.append(y_arr,[yi],0)
args = []
return x_arr, y_arr
def RK5(self, xi, xf, yi, h, dydx):
""" Fifth Order Runge Kutta Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.RK5(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
k1 = dydx(xi, yi)
k2 = dydx(xi + 1/4 * h, yi + 1/4 * k1 * h)
k3 = dydx(xi + 1/4 * h, yi + 1/8 * k1 * h + 1/8 * k2 * h)
k4 = dydx(xi + 1/2 * h, yi - 1/2 * k2 * h + k3 * h)
k5 = dydx(xi + 3/4 * h, yi + 3/16 * k1 * h + 9/16 * k4 * h)
k6 = dydx(xi + h , yi - 3/7 * k1 * h + 2/7 * k2 * h + 12/7 * k3 * h - 12/7 * k4 * h + 8/7 * k5 * h)
yi = yi + 1/90 * (7 * k1 + 32*k3 + 12*k4 + 32*k5 + 7*k6)*h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
| 13,086 | 4,745 |
"""
{{ cookiecutter.project_short_description }}.
"""
__version__ = '{{ cookiecutter.version }}'
default_app_config = '{{ cookiecutter.app_name }}.apps.{{ cookiecutter.config_class_name }}' # pylint: disable=invalid-name
| 224 | 78 |
import json
import logging
import discord
import requests
from discord.ext import commands
logger = logging.getLogger(__name__)
class search(commands.Cog):
"""Search commands for various websites"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def urban(self, ctx, *, term: str):
"""Search Urban Dictionary"""
req = requests.get(
"http://api.urbandictionary.com/v0/define?term={}".format(term)
)
# Get JSON data for the first result
dictTerm = req.json()
dictTerm = dictTerm["list"][0]
word = dictTerm["word"]
definition = dictTerm["definition"]
example = dictTerm["example"]
message = "{} \n\n *{}*".format(definition, example)
# Get rid of any square brackets
message = message.replace("[", "")
message = message.replace("]", "")
embed = discord.Embed()
embed.add_field(name=word, value=message, inline=False)
await ctx.send(embed=embed)
@commands.command()
async def anime(self, ctx, *, text: str):
"""Search AniList for anime"""
query = """
query ($search: String) {
Media (search: $search, type: ANIME) {
id
title {
romaji
english
native
}
description
episodes
duration
status
genres
averageScore
coverImage {
large
}
}
}
"""
variables = {"search": text}
response = requests.post(
"https://graphql.anilist.co", json={"query": query, "variables": variables}
)
rateLimitRemaining = int(response.headers["X-RateLimit-Remaining"])
# Rate limiting is currently set to 90 requests per minute
# If you go over the rate limit you'll receive a 1-minute timeout
# https://anilist.gitbook.io/anilist-apiv2-docs/overview/rate-limiting
if rateLimitRemaining > 0:
animeJSON = response.json()["data"]["Media"]
description = animeJSON["description"]
description = description.replace("<br>", "")
genres = ""
for g in animeJSON["genres"]:
genres += "{}, ".format(g)
embed = discord.Embed(
title="{} / {}".format(
animeJSON["title"]["romaji"], animeJSON["title"]["native"]
),
url="https://anilist.co/anime/{}".format(animeJSON["id"]),
description=description,
)
embed.set_thumbnail(url=animeJSON["coverImage"]["large"])
embed.add_field(
name="Episode Count", value=animeJSON["episodes"], inline=True
)
embed.add_field(
name="Duration",
value="{} minutes per episode".format(animeJSON["duration"]),
inline=True,
)
embed.add_field(name="Status", value=animeJSON["status"], inline=True)
embed.add_field(name="Genres", value=genres[:-2], inline=True)
embed.add_field(
name="Average Score", value=animeJSON["averageScore"], inline=True
)
embed.set_footer(text="Powered by anilist.co")
await ctx.send(embed=embed)
else:
await ctx.send(
"The bot is currently being rate limited :( Try again in {} seconds".format(
response.headers["Retry-After"]
)
)
@commands.command()
async def manga(self, ctx, *, text: str):
"""Search AniList for manga"""
query = """
query ($search: String) {
Media (search: $search, type: MANGA) {
id
title {
romaji
english
native
}
description
chapters
volumes
status
genres
averageScore
coverImage {
large
}
}
}
"""
variables = {"search": text}
response = requests.post(
"https://graphql.anilist.co", json={"query": query, "variables": variables}
)
rateLimitRemaining = int(response.headers["X-RateLimit-Remaining"])
# Rate limiting is currently set to 90 requests per minute
# If you go over the rate limit you'll receive a 1-minute timeout
# https://anilist.gitbook.io/anilist-apiv2-docs/overview/rate-limiting
if rateLimitRemaining > 0:
mangaJSON = response.json()["data"]["Media"]
description = mangaJSON["description"]
description = description.replace("<br>", "")
genres = ""
for g in mangaJSON["genres"]:
genres += "{}, ".format(g)
embed = discord.Embed(
title="{} / {}".format(
mangaJSON["title"]["romaji"], mangaJSON["title"]["native"]
),
url="https://anilist.co/manga/{}".format(mangaJSON["id"]),
description=description,
)
embed.set_thumbnail(url=mangaJSON["coverImage"]["large"])
embed.add_field(name="Chapters", value=mangaJSON["chapters"], inline=True)
embed.add_field(name="Volumes", value=mangaJSON["volumes"], inline=True)
embed.add_field(name="Status", value=mangaJSON["status"], inline=True)
embed.add_field(name="Genres", value=genres[:-2], inline=True)
embed.add_field(
name="Average Score", value=mangaJSON["averageScore"], inline=True
)
embed.set_footer(text="Powered by anilist.co")
await ctx.send(embed=embed)
else:
await ctx.send(
"The bot is currently being rate limited :( Try again in {} seconds".format(
response.headers["Retry-After"]
)
)
def setup(bot):
bot.add_cog(search(bot))
| 6,314 | 1,682 |
from rsock import ReliableSocket
from packet import Packet, Header, Payload
from collections import namedtuple
import struct
Symbol = namedtuple('Symbol', 'address name')
Tracepoint = namedtuple('Tracepoint', 'address handler')
Injectable = namedtuple('Injectable', 'id filename refs type name comment')
class ADBIException(Exception):
pass
class ADBI(object):
def __init__(self):
def seqgen():
while True:
for seq in xrange(2 ** 32):
yield seq
self.connection = None
self.seqgen = seqgen()
def __check_connection(self):
if not self.connection:
raise ADBIException('Not connected.')
def connect(self, address):
if self.connection:
raise ADBIException('Already connected.')
self.connection = ReliableSocket(address)
def disconnect(self):
self.__check_connection()
self.connection.close()
self.connection = None
def __recv(self):
header = Header.unpack_from(self.connection.recv(Header.size))
payload = self.connection.recv(header.length)
payload = Payload.unpack_from(payload)
return Packet(header, payload)
def __send(self, packet):
self.connection.send(packet.pack())
def request(self, type, payload=None):
self.__check_connection()
if payload is None:
payload = Payload()
header = Header(type,
next(self.seqgen),
len(payload.pack()))
packet = Packet(header, payload)
self.__send(packet)
response = self.__recv()
if response.type == 'FAIL':
raise ADBIException(response.get('msg', 'Request failed.'))
if response.type == 'USUP':
raise ADBIException(response.get('msg', 'Not supported.'))
if response.type == 'MALF':
raise ADBIException('Protocol error: {:}'.format(response.get('msg', '?')))
return response
def ping(self):
return self.request('PING')
def quit(self):
return self.request('QUIT')
@property
def executables(self):
return self.get_text(0)
def get_memory(self, pid):
payload = Payload()
payload.put_u32('pid', pid)
response = self.request('MAPS', payload)
def iter_segments():
for i in xrange(response.get('segc', 0)):
def get(what):
return response.get('seg{:}[{:}]'.format(what, i))
yield get('lo'), get('hi'), get('type'), get('file'), get('off')
return sorted(iter_segments())
def explain_address(self, pid, address):
payload = Payload()
payload.put_u32('pid', pid)
payload.put_u64('address', address)
return self.request('ADDR', payload)
def dump(self, pid, address, size):
payload = Payload()
payload.put_u32('pid', pid)
payload.put_u64('address', address)
payload.put_u32('size', size)
response = self.request('MEMD', payload)
words = (response.get('word[%i]' % x) for x in xrange(response.get('size', 0)))
def tobytes(word):
a = (word >> 24) & 0xff
b = (word >> 16) & 0xff
c = (word >> 8) & 0xff
d = (word) & 0xff
return ''.join(chr(x) for x in (d, c, b, a))
bytes = (tobytes(x) for x in words)
return ''.join(bytes)
@property
def processes(self):
response = self.request('PROC')
return set([response.get('procv[{:}]'.format(i)) for i in
xrange(response.get('procc', 0))])
def start(self):
return self.request('STRT')
def stop(self):
return self.request('STOP')
def ls(self, path):
payload = Payload()
payload.put_str('path', path)
response = self.request('LDIR', payload)
return set([response.get('entv[{:}]'.format(i))
+ ('/' if response.get('entd[{:}]'.format(i)) else '')
for i in xrange(response.get('entc', 0))])
def loglevel(self, loglevel):
payload = Payload()
payload.put_u32('loglevel', loglevel)
return self.request('LLEV', payload)
def attach(self, pid):
payload = Payload()
payload.put_u32('pid', pid)
return self.request('ATTC', payload)
def detach(self, pid):
payload = Payload()
payload.put_u32('pid', pid)
return self.request('DETC', payload)
def spawn(self, args):
payload = Payload()
payload.put_u32('argc', len(args))
for i, v in enumerate(args):
payload.put_str('argv[{:}]'.format(i), v)
return self.request('SPWN', payload)
def iter_injectable_symbols(self, iid, which):
if which not in 'EIA':
raise ValueError
payload = Payload()
payload.put_u32('iid', iid)
response = self.request('INJ' + which, payload)
for i in xrange(response.get('symc', 0)):
postfix = '[%i]' % i
yield Symbol(response.get('symad' + postfix), response.get('symnm' + postfix))
def get_injectable_imports(self, iid):
return self.iter_injectable_symbols(iid, 'I')
def get_injectable_exports(self, iid):
return self.iter_injectable_symbols(iid, 'E')
def get_injectable_adbi(self, iid):
return self.iter_injectable_symbols(iid, 'A')
def get_injectable_tracepoints(self, iid):
payload = Payload()
payload.put_u32('iid', iid)
response = self.request('INJT', payload)
for i in xrange(response.get('tptc', 0)):
postfix = '[%i]' % i
yield Tracepoint(response.get('tpta' + postfix), response.get('tpth' + postfix))
def iter_injectables(self):
response = self.request('INJQ')
for i in xrange(response.get('injc', 0)):
postfix = '[%i]' % i
yield Injectable(response.get('injid' + postfix),
response.get('injfn' + postfix),
response.get('injrc' + postfix),
response.get('injtp' + postfix),
response.get('injnm' + postfix),
response.get('injcm' + postfix))
@property
def injectables(self):
return sorted(self.iter_injectables())
def injectable_load(self, path):
payload = Payload()
payload.put_str('path', path)
return self.request('INJL', payload)
def injectable_unload(self, iid):
payload = Payload()
payload.put_u32('iid', iid)
return self.request('INJU', payload)
def kill(self, pid):
payload = Payload()
payload.put_u32('pid', pid)
return self.request('KILL', payload) | 6,867 | 2,108 |
from .errors import *
from .basic import Basic
import logging
class Property(Basic):
command = 'PROPERTY'
type = 'Property'
storage_type = 'label'
def __init__(self, label, non_structural_mass = 0.0):
super().__init__(id=None, label=label)
self.nsm = non_structural_mass
@property
def nsm(self):
return self.__nsm
@nsm.setter
def nsm(self, non_structural_mass):
self.__nsm = non_structural_mass
| 432 | 157 |
#!/usr/bin/env python3
from collections import deque
def pilingUp(q):
if len(d) > 1:
value = "Yes"
currentEl = 0
if d[0] > d[-1]:
currentEl = d[0]
d.popleft()
else:
currentEl = d[-1]
d.pop()
for _ in range(len(d) - 1):
if d[0] > d[-1] and d[0] <= currentEl:
d.popleft()
else:
if d[-1] <= currentEl:
d.pop()
else:
value = "No"
break
else:
value = "Yes"
return value
if __name__ == "__main__":
# number of testcases
t = int(input())
results = []
for _ in range(t):
d = deque()
# number of cubes
n = int(input())
# side lenght of each cube
row_of_cubes = map(int, input().split())
# put elements in deque
for e in row_of_cubes:
d.append(e)
results.append(pilingUp(d))
for i in results:
print(i)
| 1,042 | 341 |
"""
Redoes what was originally PyDVR but in the _right_ way using proper subclassing and abstract properties
"""
import abc, numpy as np, scipy.sparse as sp, scipy.interpolate as interp
from McUtils.Data import UnitsData
__all__ = ["BaseDVR", "DVRResults", "DVRException"]
class BaseDVR(metaclass=abc.ABCMeta):
"""
Provides the abstract interface for creating a
convenient runnable DVR that can be cleanly subclassed to provide
extensions
"""
def __init__(self,
domain=None,
divs=None,
potential_function=None,
**base_opts
):
"""
:param base_opts: base opts to use when running
:type base_opts:
"""
self.domain = domain
base_opts['domain'] = domain
self.divs = divs
base_opts['divs'] = divs
self.potential_function = potential_function
base_opts['potential_function'] = potential_function
self.opts = base_opts
def __repr__(self):
if self.potential_function is not None:
return "{}({}, pts={}, pot={})".format(
type(self).__name__,
self.domain,
self.divs,
self.potential_function
)
else:
return "{}({}, pts={}, pot={})".format(
type(self).__name__,
self.domain,
self.divs,
self.potential_function
)
@abc.abstractmethod
def get_grid(self, domain=None, divs=None, **kwargs):
raise NotImplementedError("abstract interface")
def grid(self, domain=None, divs=None, **kwargs):
if domain is None:
domain = self.domain
if divs is None:
divs = self.divs
if domain is None:
raise ValueError("need a value for `domain`")
if divs is None:
raise ValueError("need a value for `divs`")
return self.get_grid(domain=domain, divs=divs, **kwargs)
@abc.abstractmethod
def get_kinetic_energy(self, grid=None, mass=None, hb=1, **kwargs):
raise NotImplementedError("abstract interface")
def kinetic_energy(self, grid=None, mass=None, hb=1, g=None, g_deriv=None, **kwargs):
if grid is None:
grid = self.grid()
if g is not None:
mass = 1
if mass is None:
raise ValueError("need a value for the mass")
ke_1D = self.get_kinetic_energy(grid=grid, mass=mass, hb=hb, **kwargs)
if g is not None:
if g_deriv is None:
raise ValueError(
"if a function for `g` is supplied, also need a function, `g_deriv` for the second derivative of `g`")
# add the average value of `g` across the grid points
try:
iter(g)
except TypeError:
g_vals = g(grid)
else:
print(g)
g_vals = np.asanyarray(g)
try:
iter(g_deriv)
except TypeError:
g_deriv_vals = g_deriv(grid)
else:
g_deriv_vals = np.asanyarray(g_deriv)
g_vals = 1 / 2 * (g_vals[:, np.newaxis] + g_vals[np.newaxis, :])
g_deriv_vals = (hb ** 2) / 2 * np.diag(g_deriv_vals)
ke_1D = ke_1D * g_vals + g_deriv_vals
return ke_1D
def real_momentum(self, grid=None, mass=None, hb=1, **kwargs):
raise NotImplementedError("real momentum needs to be implemented")
def potential_energy(self, grid=None,
potential_function=None,
potential_values=None,
potential_grid=None,
**pars
):
"""
Calculates the potential energy at the grid points based
on dispatching on the input form of the potential
:param grid: the grid of points built earlier in the DVR
:type grid:
:param potential_function: a function to evaluate the potential energy at the points
:type potential_function:
:param potential_values: the values of the potential at the DVR points
:type potential_values:
:param potential_grid: a grid of points and values to be interpolated
:type potential_grid:
:param pars: ignored keyword arguments
:type pars:
:return:
:rtype:
"""
if grid is None:
grid = self.grid()
if potential_function is None and potential_grid is None and potential_values is None:
potential_function = self.potential_function
if potential_function is not None:
# explicit potential function passed; map over coords
pf=potential_function
dim = len(grid.shape)
if dim > 1:
npts = np.prod(grid.shape[:-1], dtype=int)
grid = np.reshape(grid, (npts, grid.shape[-1]))
pot = sp.diags([pf(grid)], [0])
else:
pot = np.diag(pf(grid))
elif potential_values is not None:
# array of potential values at coords passed
dim = len(grid.shape)
if dim > 1:
pot = sp.diags([potential_values], [0])
else:
pot = np.diag(potential_values)
elif potential_grid is not None:
# TODO: extend to include ND, scipy.griddata
dim = len(grid.shape)
if dim > 1:
dim -= 1
npts = npts = np.prod(grid.shape[:-1], dtype=int)
grid = np.reshape(grid, (npts, grid.shape[-1]))
if dim == 1:
# use a cubic spline interpolation
interpolator = lambda g1, g2: interp.interp1d(g1[:, 0], g1[:, 1], kind='cubic')(g2)
else:
# use griddata to do a general purpose interpolation
def interpolator(g, g2):
# g is an np.ndarray of potential points and values
# g2 is the set of grid points to interpolate them over
shape_dim = len(g.shape)
if shape_dim == 2:
points = g[:, :-1]
vals = g[:, -1]
return interp.griddata(points, vals, g2)
else:
# assuming regular structured grid
mesh = np.moveaxis(g, 0, shape_dim)
points = tuple(np.unique(x) for x in mesh[:-1])
vals = mesh[-1]
return interp.interpn(points, vals, g2)
wtf = np.nan_to_num(interpolator(potential_grid, grid))
pot = sp.diags([wtf], [0])
else:
raise DVRException("couldn't construct potential matrix")
return pot
def hamiltonian(self, kinetic_energy=None, potential_energy=None, potential_threshold=None, **pars):
"""
Calculates the total Hamiltonian from the kinetic and potential matrices
:param kinetic_energy:
:type kinetic_energy:
:param potential_energy:
:type potential_energy: np.ndarray | sp.spmatrix
:param potential_threshold:
:type potential_threshold:
:param pars:
:type pars:
:return:
:rtype:
"""
if potential_threshold is not None:
diag = potential_energy.diagonal()
chops = np.where(diag > 0)
if len(chops) == 0:
return kinetic_energy + potential_energy
chops = chops[0]
ham = kinetic_energy + potential_energy
ham[chops, :] = 0
ham[:, chops] = 0
return ham
else:
return kinetic_energy + potential_energy
def wavefunctions(self, hamiltonian=None, num_wfns=25, nodeless_ground_state=False, diag_mode=None, **pars):
"""
Calculates the wavefunctions for the given Hamiltonian.
Doesn't support any kind of pruning based on potential values although that might be a good feature
to support explicitly in the future
:param hamiltonian:
:type hamiltonian:
:param num_wfns:
:type num_wfns:
:param nodeless_ground_state:
:type nodeless_ground_state:
:param diag_mode:
:type diag_mode:
:param pars:
:type pars:
:return:
:rtype:
"""
if isinstance(hamiltonian, sp.spmatrix) and diag_mode == 'dense':
hamiltonian = hamiltonian.toarray()
if isinstance(hamiltonian, sp.spmatrix):
import scipy.sparse.linalg as la
engs, wfns = la.eigsh(hamiltonian, num_wfns, which='SM')
else:
engs, wfns = np.linalg.eigh(hamiltonian)
if num_wfns is not None:
engs = engs[:num_wfns]
wfns = wfns[:, :num_wfns]
if nodeless_ground_state:
s = np.sign(wfns[:, 0])
wfns *= s[:, np.newaxis]
return engs, wfns
def run(self, result='wavefunctions', **opts):
"""
:return:
:rtype: DVRResults
"""
from .Wavefunctions import DVRWavefunctions
opts = dict(self.opts, **opts)
res = DVRResults(parent=self, **opts)
grid = self.grid(**opts)
res.grid = grid
if result == 'grid':
return res
pe = self.potential_energy(grid=res.grid, **opts)
res.potential_energy = pe
if result == 'potential_energy':
return res
ke = self.kinetic_energy(grid=res.grid, **opts)
res.kinetic_energy = ke
if result == 'kinetic_energy':
return res
h = self.hamiltonian(
kinetic_energy=res.kinetic_energy,
potential_energy=res.potential_energy,
**opts
)
res.hamiltonian = h
if result == 'hamiltonian':
return res
energies, wfn_data = self.wavefunctions(
hamiltonian=res.hamiltonian,
**opts
)
wfns = DVRWavefunctions(energies=energies, wavefunctions=wfn_data, results=res, **opts)
res.wavefunctions = wfns
return res
class DVRException(Exception):
"""
Base exception class for working with DVRs
"""
class DVRResults:
"""
A subclass that can wrap all of the DVR run parameters and results into a clean interface for reuse and extension
"""
def __init__(self,
grid=None,
kinetic_energy=None,
potential_energy=None,
hamiltonian=None,
wavefunctions=None,
parent=None,
**opts
):
self.parent=None,
self.grid=grid
self.kinetic_energy=kinetic_energy
self.potential_energy=potential_energy
self.parent=parent
self.wavefunctions=wavefunctions
self.hamiltonian=hamiltonian
self.opts = opts
@property
def dimension(self):
dim = len(self.grid.shape)
if dim > 1:
dim -= 1
return dim
def plot_potential(self, plot_class=None, figure=None, plot_units=None, energy_threshold=None, zero_shift=False, **opts):
"""
Simple plotting function for the potential.
Should be updated to deal with higher dimensional cases
:param plot_class: the graphics class to use for the plot
:type plot_class: McUtils.Plots.Graphics
:param opts: plot styling options
:type opts:
:return:
:rtype: McUtils.Plots.Graphics
"""
from McUtils.Plots import Plot, ContourPlot
# get the grid for plotting
MEHSH = self.grid
dim = self.dimension
if dim == 1:
mesh = [MEHSH]
else:
mesh = np.moveaxis(MEHSH, dim, 0)
if plot_class is None:
if dim == 1:
plot_class = Plot
elif dim == 2:
plot_class = ContourPlot
else:
raise DVRException("{}.{}: don't know how to plot {} dimensional potential".format(
type(self).__name__,
'plot',
dim
))
pot = self.potential_energy.diagonal()
if isinstance(plot_units, str) and plot_units == 'wavenumbers':
pot = pot * UnitsData.convert("Hartrees", "Wavenumbers")
if zero_shift:
pot = pot - np.min(pot)
if energy_threshold:
pot[pot > energy_threshold] = energy_threshold
return plot_class(*mesh, pot.reshape(mesh[0].shape), figure=figure, **opts)
| 12,864 | 3,698 |
# import libraries
from tkinter import *
# initialized window
root = Tk()
root.geometry('480x350')
root.resizable(0, 0)
root.title('Weight Converter')
# defining the function for converting weights
def WeightConv():
# making textbox user-friendly that is editable
t1.configure(state='normal')
t1.delete("1.0", END)
t2.configure(state='normal')
t2.delete("1.0", END)
t3.configure(state='normal')
t3.delete("1.0", END)
t4.configure(state='normal')
t4.delete("1.0", END)
t5.configure(state='normal')
t5.delete("1.0", END)
t6.configure(state='normal')
t6.delete("1.0", END)
# exception handling
try:
kilograms = float(e1.get())
# insert the output in textboxes correct upto 2 places after decimal
t1.insert(END, "%.2f" % (kilograms * 5000))
t2.insert(END, "%.2f" % (kilograms * 1000))
t3.insert(END, "%.2f" % (kilograms * 35.274))
t4.insert(END, "%.2f" % (kilograms * 2.20462))
t5.insert(END, "%.2f" % (kilograms * 0.01))
t6.insert(END, "%.2f" % (kilograms * 0.001))
# if blank or invalid input is given then exception is thrown
except ValueError:
t1.insert(END, " ~ Invalid input ~ ")
t2.insert(END, " ~ Invalid input ~ ")
t3.insert(END, " ~ Invalid input ~ ")
t4.insert(END, " ~ Invalid input ~ ")
t5.insert(END, " ~ Invalid input ~ ")
t6.insert(END, " ~ Invalid input ~ ")
# making textbox uneditable
t1.configure(state='disabled')
t2.configure(state='disabled')
t3.configure(state='disabled')
t4.configure(state='disabled')
t5.configure(state='disabled')
t6.configure(state='disabled')
# creating a label to display
l1 = Label(root, text="Enter the weight in kilograms (kg) : ")
l1.grid(row=1, column=1, columnspan=2)
value = StringVar()
# creating a entry box for input
e1 = Entry(root, textvariable=value)
e1.grid(row=1, column=3, columnspan=2)
# create a button for conversion
button = Button(root, text="Convert", command=WeightConv)
button.grid(row=2, column=2, columnspan=2, rowspan=2)
# make labels for textbox
t1l1 = Label(root, text="kg to ct : ")
t1l1.grid(row=4, column=1, columnspan=1)
t2l2 = Label(root, text="kg to g : ")
t2l2.grid(row=5, column=1, columnspan=1)
t3l3 = Label(root, text="kg to oz : ")
t3l3.grid(row=6, column=1, columnspan=1)
t4l4 = Label(root, text="kg to lb : ")
t4l4.grid(row=7, column=1, columnspan=1)
t5l5 = Label(root, text="kg to q : ")
t5l5.grid(row=8, column=1, columnspan=1)
t6l6 = Label(root, text="kg to t : ")
t6l6.grid(row=9, column=1, columnspan=1)
t1r1 = Label(root, text="Carat")
t1r1.grid(row=4, column=4, columnspan=1)
t2r2 = Label(root, text="Gram")
t2r2.grid(row=5, column=4, columnspan=1)
t3r3 = Label(root, text="Ounce")
t3r3.grid(row=6, column=4, columnspan=1)
t4r4 = Label(root, text="Pound")
t4r4.grid(row=7, column=4, columnspan=1)
t5r5 = Label(root, text="Quintal")
t5r5.grid(row=8, column=4, columnspan=1)
t6r6 = Label(root, text="Tonne")
t6r6.grid(row=9, column=4, columnspan=1)
# creating textbox and defining grid to show output
t1 = Text(root, height=1, width=20)
t1.grid(row=4, column=2, columnspan=2)
t2 = Text(root, height=1, width=20)
t2.grid(row=5, column=2, columnspan=2)
t3 = Text(root, height=1, width=20)
t3.grid(row=6, column=2, columnspan=2)
t4 = Text(root, height=1, width=20)
t4.grid(row=7, column=2, columnspan=2)
t5 = Text(root, height=1, width=20)
t5.grid(row=8, column=2, columnspan=2)
t6 = Text(root, height=1, width=20)
t6.grid(row=9, column=2, columnspan=2)
# making blank spaces in GUI
for r in range(10):
root.grid_rowconfigure(r, minsize=30)
for c in range(6):
root.grid_columnconfigure(c, minsize=50)
# infinite loop to run program
root.mainloop()
| 3,802 | 1,555 |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: pooling"""
import akg
import akg.utils as utils
from akg.utils.format_transform import get_shape
from akg.ops.nn.ascend import MaxPool, Avgpool
def _pooling_compute(x, window, stride,
mode=0, pad_mode=5, pad=(0, 0, 0, 0)):
"""compute for pooling"""
# convert mode&pad_mode to str
if mode == 0:
mode = "MAX"
elif mode == 1:
mode = "AVG"
else:
raise RuntimeError("Invalid mode parameters, mode must set 0 or 1.")
if pad_mode == 5:
pad_mode = "VALID"
elif pad_mode == 6:
pad_mode = "SAME"
else:
raise RuntimeError("Invalid pad_mode parameters, pad_mode must set 5 or 6.")
# check pad
if pad not in ((0, 0, 0, 0), [0, 0, 0, 0]):
raise RuntimeError("Not support pad now!")
in_size_h = x.shape[2].value
in_size_w = x.shape[3].value
window = list(window)
if window[0] >= in_size_h and window[1] >= in_size_w:
window[0] = in_size_h
window[1] = in_size_w
pad_mode = "VALID"
stride = [1, 1]
if mode == "MAX":
res = MaxPool(x, window, stride, pad_mode)
else:
# AVG
res = Avgpool(x, window, stride, pad_mode)
return res
@utils.check_input_type(akg.tvm.tensor.Tensor,
(list, tuple), (list, tuple), (int, type(None)),
(int, type(None)), (list, tuple, type(None)),
(bool, type(None)), (int, type(None)))
def pooling(x, window, stride,
mode=0, pad_mode=5, pad=(0, 0, 0, 0),
global_pooling=False, ceil_mode=0):
"""
Pooling operation, including MaxPool and AvgPool.
Args:
x (tvm.tensor.Tensor): Input tensor, only support float16
dtype, and NC1HWC0 format.
window (Union[list, tuple]): Pooling window, only support pooling
in H or W.
stride (Union[list, tuple]): Pooling stride, only support pooling
in H or W.
mode (int): Mode of pooling, support MaxPool and AvgPool. 0 for MaxPool,
1 for AvgPool.
pad_mode (int): Mode of padding, 5 for VALID, 6 for SAME.
pad (Union[list, tuple]): Implicit padding size to up/down/left/right.
global_pooling (bool): Global pooling flag, invalid now, should be False.
ceil_mode (int): Round_mode params, invalid now, should be 0.
Returns:
A tvm.tensor.Tensor with same dtype as input.
"""
utils.check_shape(get_shape(x))
utils.ops_dtype_check(x.dtype, utils.DtypeForDavinci.FLOAT16)
if len(window) != 2:
raise RuntimeError("Invalid shape params, window shape must be 2 dims, "
"including window_h and window_w.")
if len(stride) != 2:
raise RuntimeError("Invalid shape params, stride shape must be 2 dims, "
"including stride_h and stride_w.")
if global_pooling or ceil_mode != 0:
raise RuntimeError("Not support global_pooling and ceil_mode for now.")
return _pooling_compute(x, window, stride, mode, pad_mode, pad)
| 3,789 | 1,236 |
#!/usr/bin/python3
import collections
import typing
import inspect
import time
from jk_cachefunccalls import cacheCalls
from _test_and_evaluate import testAndEvaluate
class MyTestClass(object):
@cacheCalls(seconds=2, dependArgs=[0])
def returnSomething(self, n):
return time.time()
#
#
o = MyTestClass()
testAndEvaluate(o.returnSomething)
| 363 | 136 |
# Code from Chapter 18 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2014
import pylab as pl
import numpy as np
import scipy.optimize as so
def kernel(data1, data2, theta, wantderiv=True, measnoise=1.):
# Uses exp(theta) to ensure positive hyperparams
theta = np.squeeze(theta)
theta = np.exp(theta)
# Squared exponential
if np.ndim(data1) == 1:
d1 = np.shape(data1)[0]
n = 1
else:
(d1, n) = np.shape(data1)
d2 = np.shape(data2)[0]
sumxy = np.zeros((d1, d2))
for d in range(n):
D1 = np.transpose([data1[:, d]]) * np.ones((d1, d2))
D2 = [data2[:, d]] * np.ones((d1, d2))
sumxy += (D1 - D2) ** 2 * theta[d + 1]
k = theta[0] * np.exp(-0.5 * sumxy)
# k = theta[0]**2 * np.exp(-sumxy/(2.0*theta[1]**2))
# print k
# print measnoise*theta[2]**2*np.eye(d1,d2)
if wantderiv:
K = np.zeros((d1, d2, len(theta) + 1))
K[:, :, 0] = k + measnoise * theta[2] * np.eye(d1, d2)
K[:, :, 1] = k
K[:, :, 2] = -0.5 * k * sumxy
K[:, :, 3] = theta[2] * np.eye(d1, d2)
return K
else:
return k + measnoise * theta[2] * np.eye(d1, d2)
def kernel2(data1, data2, theta, wantderiv=True, measnoise=1.):
theta = np.squeeze(theta)
theta2 = 0.3
# Squared exponential
(d1, n) = np.shape(data1)
d2 = np.shape(data2)[0]
sumxy = np.zeros((d1, d2))
for d in range(n):
D1 = np.transpose([data1[:, d]]) * np.ones((d1, d2))
D2 = [data2[:, d]] * np.ones((d1, d2))
sumxy += (D1 - D2) ** 2
k = theta[0] ** 2 * np.exp(-sumxy / (2.0 * theta[1] ** 2))
if wantderiv:
K = np.zeros((d1, d2, len(theta) + 1))
K[:, :, 0] = k + measnoise * theta[2] ** 2 * np.eye(d1, d2)
K[:, :, 1] = 2.0 * k / theta[0]
K[:, :, 2] = k * sumxy / (theta[1] ** 3)
K[:, :, 3] = 2.0 * theta[2] * np.eye(d1, d2)
return K
else:
return k + measnoise * theta[2] ** 2 * np.eye(d1, d2)
def NRiteration(data, targets, theta):
K = kernel(data, data, theta, wantderiv=False)
n = np.shape(targets)[0]
f = np.zeros((n, 1))
tol = 0.1
phif = 1e100
scale = 1.
count = 0
while True:
count += 1
s = np.where(f < 0, f, 0)
W = np.diag(np.squeeze(np.exp(2 * s - f) / ((np.exp(s) + np.exp(s - f)) ** 2)))
sqrtW = np.sqrt(W)
L = np.linalg.cholesky(np.eye(n) + np.dot(sqrtW, np.dot(K, sqrtW)))
p = np.exp(s) / (np.exp(s) + np.exp(s - f))
b = np.dot(W, f) + 0.5 * (targets + 1) - p
a = scale * (b - np.dot(sqrtW, np.linalg.solve(L.transpose(), np.linalg.solve(L, np.dot(sqrtW, np.dot(K, b))))))
f = np.dot(K, a)
oldphif = phif
phif = np.log(p) - 0.5 * np.dot(f.transpose(), np.dot(np.linalg.inv(K), f)) - 0.5 * np.sum(np.log(np.diag(L))) - \
np.shape(data)[0] / 2. * np.log(2 * np.pi)
# print "loop",np.sum((oldphif-phif)**2)
if (np.sum((oldphif - phif) ** 2) < tol):
break
elif (count > 100):
count = 0
scale = scale / 2.
s = -targets * f
ps = np.where(s > 0, s, 0)
logq = -0.5 * np.dot(a.transpose(), f) - np.sum(np.log(ps + np.log(np.exp(-ps) + np.exp(s - ps)))) - np.trace(
np.log(L))
return (f, logq, a)
def predict(xstar, data, targets, theta):
K = kernel(data, data, theta, wantderiv=False)
n = np.shape(targets)[0]
kstar = kernel(data, xstar, theta, wantderiv=False, measnoise=0)
(f, logq, a) = NRiteration(data, targets, theta)
s = np.where(f < 0, f, 0)
W = np.diag(np.squeeze(np.exp(2 * s - f) / ((np.exp(s) + np.exp(s - f)) ** 2)))
sqrtW = np.sqrt(W)
L = np.linalg.cholesky(np.eye(n) + np.dot(sqrtW, np.dot(K, sqrtW)))
p = np.exp(s) / (np.exp(s) + np.exp(s - f))
fstar = np.dot(kstar.transpose(), (targets + 1) * 0.5 - p)
v = np.linalg.solve(L, np.dot(sqrtW, kstar))
V = kernel(xstar, xstar, theta, wantderiv=False, measnoise=0) - np.dot(v.transpose(), v)
return (fstar, V)
def logPosterior(theta, args):
data, targets = args
(f, logq, a) = NRiteration(data, targets, theta)
return -logq
def gradLogPosterior(theta, args):
data, targets = args
theta = np.squeeze(theta)
n = np.shape(targets)[0]
K = kernel(data, data, theta, wantderiv=True)
(f, logq, a) = NRiteration(data, targets, theta)
s = np.where(f < 0, f, 0)
W = np.diag(np.squeeze(np.exp(2 * s - f) / ((np.exp(s) + np.exp(s - f)) ** 2)))
sqrtW = np.sqrt(W)
L = np.linalg.cholesky(np.eye(n) + np.dot(sqrtW, np.dot(K[:, :, 0], sqrtW)))
R = np.dot(sqrtW, np.linalg.solve(L.transpose(), np.linalg.solve(L, sqrtW)))
C = np.linalg.solve(L, np.dot(sqrtW, K[:, :, 0]))
p = np.exp(s) / (np.exp(s) + np.exp(s - f))
hess = -np.exp(2 * s - f) / (np.exp(s) + np.exp(s - f)) ** 2
s2 = -0.5 * np.dot(np.diag(np.diag(K[:, :, 0]) - np.diag(np.dot(C.transpose(), C))), 2 * hess * (0.5 - p))
gradZ = np.zeros(len(theta))
for d in range(1, len(theta) + 1):
s1 = 0.5 * (np.dot(a.transpose(), np.dot(K[:, :, d], a))) - 0.5 * np.trace(np.dot(R, K[:, :, d]))
b = np.dot(K[:, :, d], (targets + 1) * 0.5 - p)
p = np.exp(s) / (np.exp(s) + np.exp(s - f))
s3 = b - np.dot(K[:, :, 0], np.dot(R, b))
gradZ[d - 1] = s1 + np.dot(s2.transpose(), s3)
return -gradZ
def test():
pl.ion()
data = np.array([[-2.1, -2.0, -1.9, -0.1, 0., 0.1, 1.9, 2.0, 2.1]]).transpose()
labels = np.array([[-1., -1., -1., 1., 1., 1., -1., -1., -1.]]).transpose()
theta = np.zeros((3, 1))
theta[0] = 1.0 # np.random.rand()*3
theta[1] = 0.7 # np.random.rand()*3
theta[2] = 0.3
args = (data, labels)
print(theta, logPosterior(theta, args))
result = so.fmin_cg(logPosterior, theta, fprime=gradLogPosterior, args=[args], gtol=1e-4, maxiter=100, disp=1)
newTheta = result
print("=======")
print(newTheta, logPosterior(newTheta, args))
print("=======")
test = np.array([[-2.2, -2.05, -1.8, -0.2, 0.05, 0.15, 1.8, 2.05, 2.01]]).transpose()
tlabels = np.array([[-1., -1., -1., 1., 1., 1., -1., -1., -1.]]).transpose()
# Compute the mean and covariance of the data
xstar = np.reshape(np.linspace(-5, 5, 100), (100, 1))
K = kernel(data, data, newTheta, wantderiv=False)
kstar = [kernel(data, xs * np.ones((1, 1)), theta, wantderiv=False, measnoise=False) for xs in xstar]
kstar = np.squeeze(kstar)
kstarstar = [kernel(xs * np.ones((1, 1)), xs * np.ones((1, 1)), theta, wantderiv=False, measnoise=False) for xs in
xstar]
kstarstar = np.squeeze(kstarstar)
invk = np.linalg.inv(K)
mean = np.dot(kstar, np.dot(invk, labels))
var = kstarstar - np.diag(np.dot(kstar, np.dot(invk, kstar.transpose())))
var = np.reshape(var, (100, 1))
pl.plot(xstar, mean, '-k')
pl.fill_between(np.squeeze(xstar), np.squeeze(mean - 2 * np.sqrt(var)), np.squeeze(mean + 2 * np.sqrt(var)),
color='0.75')
pl.xlabel('x')
pl.ylabel('Latent f(x)')
# xstar = np.arange(1,1e3 + 1,1)/1e3 * 2.1 - 1.8
pred = np.squeeze(np.array([predict(np.reshape(i, (1, 1)), data, labels, newTheta) for i in test]))
output = np.reshape(np.where(pred[:, 0] < 0, -1, 1), (9, 1))
print(np.sum(np.abs(output - tlabels)))
print(pred)
# pl.figure()
which = np.where(labels == 1)
pl.plot(data[which], labels[which], 'ro')
which = np.where(labels == -1)
pl.plot(data[which], labels[which], 'gx')
which = np.where((tlabels == 1) & (output == 1))
pl.plot(test[which], tlabels[which], 'r^')
which = np.where((tlabels == -1) & (output == -1))
pl.plot(test[which], tlabels[which], 'gv')
which = np.where((tlabels == 1) & (output == -1))
pl.plot(test[which], tlabels[which], 'rs')
which = np.where((tlabels == -1) & (output == 1))
pl.plot(test[which], tlabels[which], 'gs')
pl.figure()
pred2 = np.squeeze(np.array([predict(np.reshape(i, (1, 1)), data, labels, newTheta) for i in xstar]))
pl.plot(xstar, pred2[:, 0], 'k-')
pl.fill_between(np.squeeze(xstar), np.squeeze(pred2[:, 0] - 2 * np.sqrt(pred2[:, 1])),
np.squeeze(pred2[:, 0] + 2 * np.sqrt(pred2[:, 1])), color='0.75')
pl.xlabel('x')
pl.ylabel('$\sigma(f(x))$')
def modified_XOR(sdev=0.3):
m = 100
data = sdev * np.random.randn(m, 2)
data[m / 2:, 0] += 1.
data[m / 4:m / 2, 1] += 1.
data[3 * m / 4:, 1] += 1.
labels = -np.ones((m, 1))
labels[:m / 4, 0] = 1.
labels[3 * m / 4:, 0] = 1.
# labels = (np.where(X[:,0]*X[:,1]>=0,1,-1)*np.ones((1,np.shape(X)[0]))).T
Y = sdev * np.random.randn(m, 2)
Y[m / 2:, 0] += 1.
Y[m / 4:m / 2, 1] += 1.
Y[3 * m / 4:m, 1] += 1.
test = -np.ones((m, 1))
test[:m / 4, 0] = 1.
test[3 * m / 4:, 0] = 1.
theta = np.zeros((3, 1))
theta[0] = 1.0 # np.random.rand()*3
theta[1] = 0.7 # np.random.rand()*3
theta[2] = 0.
args = (data, labels)
print(theta, logPosterior(theta, args))
result = so.fmin_cg(logPosterior, theta, fprime=gradLogPosterior, args=[args], gtol=1e-4, maxiter=20, disp=1)
# result = so.fmin_cg(logPosterior, theta, fprime=gradLogPosterior, args=[args], gtol=1e-4,maxiter=10,disp=1)
newTheta = result
print("=======")
print(newTheta, logPosterior(newTheta, args))
print("=======")
# xstar = np.reshape(np.linspace(-5,5,100),(100,1))
# K = kernel(data,data,newTheta,wantderiv=False)
# kstar = [kernel(data,xs*np.ones((1,1)),theta,wantderiv=False,measnoise=False) for xs in xstar]
# kstar = np.squeeze(kstar)
# kstarstar = [kernel(xs*np.ones((2,1)),xs*np.ones((2,1)),theta,wantderiv=False,measnoise=False) for xs in xstar]
# kstarstar = np.squeeze(kstarstar)
# invk = np.linalg.inv(K)
# mean = np.dot(kstar,np.dot(invk,labels))
# var = kstarstar - np.diag(np.dot(kstar,np.dot(invk,kstar.transpose())))
# var = np.reshape(var,(100,1))
# pl.plot(xstar,mean,'-k')
# pl.fill_between(np.squeeze(xstar),np.squeeze(mean-2*np.sqrt(var)),np.squeeze(mean+2*np.sqrt(var)),color='0.75')
# pl.xlabel('x')
# pl.ylabel('Latent f(x)')
# xstar = np.arange(1,1e3 + 1,1)/1e3 * 2.1 - 1.8
pred = np.squeeze(np.array([predict(np.reshape(i, (1, 2)), data, labels, newTheta) for i in Y]))
output = np.reshape(np.where(pred[:, 0] < 0, -1, 1), (m, 1))
print(np.sum(np.abs(output - test)))
# print pred
err1 = np.where((output == 1.) & (test == -1.))[0]
err2 = np.where((output == -1.) & (test == 1.))[0]
print("Class 1 errors ", len(err1), " from ", len(test[test == 1]))
print("Class 2 errors ", len(err2), " from ", len(test[test == -1]))
print("Test accuracy ", 1. - (float(len(err1) + len(err2))) / (len(test[test == 1]) + len(test[test == -1])))
pl.figure()
l1 = np.where(labels == 1)[0]
l2 = np.where(labels == -1)[0]
pl.plot(data[l1, 0], data[l1, 1], 'ko')
pl.plot(data[l2, 0], data[l2, 1], 'wo')
# l1 = np.where(test==1)[0]
# l2 = np.where(test==-1)[0]
# pl.plot(Y[l1,0],Y[l1,1],'ks')
# pl.plot(Y[l2,0],Y[l2,1],'ws')
pl.axis('tight')
pl.axis('off')
xmin = np.min(data[:, 0])
xmax = np.max(data[:, 0])
x = np.arange(xmin, xmax, 0.1)
y = np.arange(xmin, xmax, 0.1)
predgrid = np.zeros((len(x), len(y)))
for i in range(len(x)):
for j in range(len(y)):
d = np.array([[x[i], y[j]]])
predgrid[i, j] = predict(d, data, labels, newTheta)[0]
# pgrid = np.where(predgrid<0,-1,1)
# print predgrid
xx, yy = np.meshgrid(x, y)
pl.contour(xx, yy, predgrid, 1)
def test2():
pl.ion()
data = np.array([[-2.1, -2.0, -1.9, -0.1, 0., 0.1, 1.9, 2.0, 2.1]]).transpose()
labels = np.array([[-1., -1., -1., 1., 1., 1., -1., -1., -1.]]).transpose()
theta = np.zeros((3, 1))
theta[0] = 1.0 # np.random.rand()*3
theta[1] = 0.7 # np.random.rand()*3
theta[2] = 0.3
args = (data, labels)
print(theta, logPosterior(theta, args))
result = so.fmin_cg(logPosterior, theta, fprime=gradLogPosterior, args=[args], gtol=1e-4, maxiter=100, disp=1)
newTheta = result
print("=======")
print(newTheta, logPosterior(newTheta, args))
print("=======")
test = np.array([[-2.2, -2.05, -1.8, -0.2, 0.05, 0.15, 1.8, 2.05, 2.01]]).transpose()
tlabels = np.array([[-1., -1., -1., 1., 1., 1., -1., -1., -1.]]).transpose()
# Compute the mean and covariance of the data
xstar = np.reshape(np.linspace(-5, 5, 100), (100, 1))
K = kernel(data, data, newTheta, wantderiv=False)
kstar = [kernel(data, xs * np.ones((1, 1)), theta, wantderiv=False, measnoise=False) for xs in xstar]
kstar = np.squeeze(kstar)
kstarstar = [kernel(xs * np.ones((1, 1)), xs * np.ones((1, 1)), theta, wantderiv=False, measnoise=False) for xs in
xstar]
kstarstar = np.squeeze(kstarstar)
invk = np.linalg.inv(K)
mean = np.dot(kstar, np.dot(invk, labels))
var = kstarstar - np.diag(np.dot(kstar, np.dot(invk, kstar.transpose())))
var = np.reshape(var, (100, 1))
pl.plot(xstar, mean, '-k')
pl.fill_between(np.squeeze(xstar), np.squeeze(mean - 2 * np.sqrt(var)), np.squeeze(mean + 2 * np.sqrt(var)),
color='0.75')
pl.xlabel('x')
pl.ylabel('Latent f(x)')
# xstar = np.arange(1,1e3 + 1,1)/1e3 * 2.1 - 1.8
pred = np.squeeze(np.array([predict(np.reshape(i, (1, 1)), data, labels, newTheta) for i in test]))
output = np.reshape(np.where(pred[:, 0] < 0, -1, 1), (9, 1))
print(np.sum(np.abs(output - tlabels)))
print(pred)
# pl.figure()
which = np.where(labels == 1)
pl.plot(data[which], labels[which], 'ro')
which = np.where(labels == -1)
pl.plot(data[which], labels[which], 'gx')
which = np.where((tlabels == 1) & (output == 1))
pl.plot(test[which], tlabels[which], 'r^')
which = np.where((tlabels == -1) & (output == -1))
pl.plot(test[which], tlabels[which], 'gv')
which = np.where((tlabels == 1) & (output == -1))
pl.plot(test[which], tlabels[which], 'rs')
which = np.where((tlabels == -1) & (output == 1))
pl.plot(test[which], tlabels[which], 'gs')
pl.figure()
pred2 = np.squeeze(np.array([predict(np.reshape(i, (1, 1)), data, labels, newTheta) for i in xstar]))
pl.plot(xstar, pred2[:, 0], 'k-')
pl.fill_between(np.squeeze(xstar), np.squeeze(pred2[:, 0] - 2 * np.sqrt(pred2[:, 1])),
np.squeeze(pred2[:, 0] + 2 * np.sqrt(pred2[:, 1])), color='0.75')
pl.xlabel('x')
pl.ylabel('$\sigma(f(x))$')
# test()
# test2()
# modified_XOR(sdev=0.1)
# modified_XOR(sdev=0.3)
# modified_XOR(sdev=0.4)
| 15,039 | 6,982 |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - group access via various backends.
The composite_groups is a backend that does not have direct storage,
but composes other backends to a new one, so group definitions are
retrieved from several backends. This allows to mix different
backends.
@copyright: 2009 DmitrijsMilajevs
@license: GPL, see COPYING for details
"""
from MoinMoin.datastruct.backends import BaseGroupsBackend, GroupDoesNotExistError
class CompositeGroups(BaseGroupsBackend):
"""
Manage several group backends.
"""
def __init__(self, request, *backends):
"""
@param backends: list of group backends which are used to get
access to the group definitions.
"""
super(CompositeGroups, self).__init__(request)
self._backends = backends
def __getitem__(self, group_name):
"""
Get a group by its name. First match counts.
"""
for backend in self._backends:
try:
return backend[group_name]
except GroupDoesNotExistError:
pass
raise GroupDoesNotExistError(group_name)
def __iter__(self):
"""
Iterate over group names in all backends (filtering duplicates).
If a group with same name is defined in several backends, the
composite_groups backend yields only backend which is listed
earlier in self._backends.
"""
yielded_groups = set()
for backend in self._backends:
for group_name in backend:
if group_name not in yielded_groups:
yield group_name
yielded_groups.add(group_name)
def __contains__(self, group_name):
"""
Check if a group called group_name is available in any of the backends.
@param group_name: name of the group [unicode]
"""
for backend in self._backends:
if group_name in backend:
return True
return False
def __repr__(self):
return "<%s backends=%s>" % (self.__class__, self._backends)
| 2,130 | 579 |
import argparse
import logging
import os
import random
import socket
import sys
import datetime
import numpy as np
import psutil
import setproctitle
import torch
import torchvision.models
#import wandb
# add the FedML root directory to the python path
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../")))
from FedML.fedml_api.distributed.utils.gpu_mapping import mapping_processes_to_gpu_device_from_yaml_file
from FedML.fedml_api.distributed.feddetec.FedDetecAPI import FedML_init, FedML_FedDetec_distributed
from FedML.fedml_api.distributed.feddetec.utils import count_parameters
from data_preprocessing.coco.coco_detection.data_loader import load_partition_data_coco,load_partition_data_electric
# from data_preprocessing.coco.segmentation.data_loader.py import load_partition_data_distributed_coco_segmentation, load_partition_data_coco_segmentation
from data_preprocessing.pascal_voc_augmented.data_loader import load_partition_data_distributed_pascal_voc, \
load_partition_data_pascal_voc
from data_preprocessing.coco.coco_detection.datasets import create_dataloader
from data_preprocessing.cityscapes.data_loader import load_partition_data_distributed_cityscapes, \
load_partition_data_cityscapes
#from model.segmentation.deeplabV3_plus import DeepLabV3_plus
#from model.segmentation.unet import UNet
from training.detection_trainer import DetectionTrainer
#from training.segmentation_trainer import SegmentationTrainer
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
# Training settings
parser.add_argument('--process_name', type=str, default='FedDetec-distributed:',
help='Machine process names')
parser.add_argument('--model', type=str, default='fasterrcnn_resnet50_rpn', metavar='N',
help='neural network used in training')
parser.add_argument('--device', default='',
help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--backbone', type=str, default='resnet',
help='employ with backbone (default: xception)')
parser.add_argument('--backbone_pretrained', type=str2bool, nargs='?', const=True, default=True,
help='pretrained backbone (default: True)')
parser.add_argument('--backbone_freezed', type=str2bool, nargs='?', const=True, default=False,
help='Freeze backbone to extract features only once (default: False)')
parser.add_argument('--extract_feat', type=str2bool, nargs='?', const=True, default=False,
help='Extract Feature Maps of (default: False) NOTE: --backbone_freezed has to be True for this argument to be considered')
parser.add_argument('--outstride', type=int, default=16,
help='network output stride (default: 16)')
parser.add_argument('--dataset', type=str, default='pascal_voc', metavar='N',
choices=['coco','electric', 'pascal_voc', 'cityscapes'],
help='dataset used for training')
parser.add_argument('--data_dir', type=str, default='/home/chaoyanghe/BruteForce/FedML/data/pascal_voc',
help='data directory (default = /home/chaoyanghe/BruteForce/FedML/data/pascal_voc)')
parser.add_argument('--log_dir', type=str, default='./runs_test/',
help='data directory')
parser.add_argument('--checkname', type=str, default='deeplab-resnet-finetune-hetero',
help='set the checkpoint name')
parser.add_argument('--partition_method', type=str, default='hetero', metavar='N',
help='how to partition the dataset on local workers')
parser.add_argument('--partition_alpha', type=float, default=0.5, metavar='PA',
help='partition alpha (default: 0.5)')
parser.add_argument('--client_num_in_total', type=int, default=3, metavar='NN',
help='number of workers in a distributed cluster')
parser.add_argument('--client_num_per_round', type=int, default=3, metavar='NN',
help='number of workers')
parser.add_argument('--save_client_model', type=str2bool, nargs='?', const=True, default=False,
help='whether to save locally trained model by clients (default: False')
parser.add_argument('--save_model', type=str2bool, nargs='?', const=True, default=False,
help='whether to save best averaged model (default: False')
parser.add_argument('--load_model', type=str2bool, nargs='?', const=True, default=False,
help='whether to load pre-trained model weights (default: False')
parser.add_argument('--model_path', type=str, default=None,
help='Pre-trained saved model path NOTE: --load has to be True for this argument to be considered')
parser.add_argument('--batch_size', type=int, default=10, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--sync_bn', type=str2bool, nargs='?', const=True, default=False,
help='whether to use sync bn (default: False)')
parser.add_argument('--freeze_bn', type=str2bool, nargs='?', const=True, default=False,
help='whether to freeze bn parameters (default: False)')
parser.add_argument('--client_optimizer', type=str, default='sgd',
help='adam')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--lr_scheduler', type=str, default='poly',
choices=['poly', 'step', 'cos'],
help='lr scheduler mode: (default: poly)')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=5e-4,
metavar='M', help='w-decay (default: 5e-4)')
parser.add_argument('--nesterov', action='store_true', default=False,
help='whether use nesterov (default: False)')
parser.add_argument('--loss_type', type=str, default='ce',
choices=['ce', 'focal'],
help='loss func type (default: ce)')
parser.add_argument('--epochs', type=int, default=2, metavar='EP',
help='how many epochs will be trained locally')
parser.add_argument('--comm_round', type=int, default=200,
help='how many round of communications we shoud use')
parser.add_argument('--is_mobile', type=int, default=0,
help='whether the program is running on the FedML-Mobile server side')
parser.add_argument('--evaluation_frequency', type=int, default=5,
help='Frequency of model evaluation on training dataset (Default: every 5th round)')
parser.add_argument('--gpu_server_num', type=int, default=1,
help='gpu_server_num')
parser.add_argument('--gpu_num_per_server', type=int, default=4,
help='gpu_num_per_server')
parser.add_argument('--gpu_mapping_file', type=str, default="gpu_mapping.yaml",
help='the gpu utilization file for servers and clients. If there is no \
gpu_util_file, gpu will not be used.')
parser.add_argument('--gpu_mapping_key', type=str, default="mapping_config1_5",
help='the key in gpu utilization file')
parser.add_argument('--image_size', type=int, default=512,
help='Specifies the input size of the model (transformations are applied to scale or crop the image)')
parser.add_argument('--ci', type=int, default=0,
help='CI')
args = parser.parse_args()
return args
def load_data(process_id, args, dataset_name):
data_loader = None
if dataset_name == "coco":
data_loader = load_partition_data_coco
elif dataset_name == "pascal_voc":
data_loader = load_partition_data_pascal_voc
elif dataset_name == 'cityscapes':
data_loader = load_partition_data_cityscapes
elif dataset_name == 'electric':
data_loader = load_partition_data_electric
train_data_num, test_data_num, train_data_global, test_data_global, data_local_num_dict, \
train_data_local_dict, test_data_local_dict, class_num = data_loader(args)
dataset = [train_data_num, test_data_num, train_data_global, test_data_global, data_local_num_dict,
train_data_local_dict, test_data_local_dict, class_num]
return dataset
def create_model(args, model_name, output_dim, img_size):
print("Creating model")
kwargs = {
"trainable_backbone_layers": 5
}
num_classes=91
model=torchvision.models.detection.fasterrcnn_resnet50_fpn(
num_classes=num_classes, pretrained=False,
**kwargs
)
# model = DeepLabV3_plus(backbone=args.backbone,
# image_size=img_size,
# n_classes=output_dim,
# output_stride=args.outstride,
# pretrained=args.backbone_pretrained,
# freeze_bn=args.freeze_bn,
# sync_bn=args.sync_bn)
num_params = count_parameters(model)
logging.info("Fasterrcnn_resnet50_fpn Model Size : {}".format(num_params))
return model
def init_training_device(process_ID, fl_worker_num, gpu_num_per_machine, gpu_server_num):
# initialize the mapping from process ID to GPU ID: <process ID, GPU ID>
if process_ID == 0:
device = torch.device("cuda:" + str(gpu_server_num) if torch.cuda.is_available() else "cpu")
return device
process_gpu_dict = dict()
for client_index in range(fl_worker_num):
gpu_index = (client_index % gpu_num_per_machine)
process_gpu_dict[client_index] = gpu_index + gpu_server_num
device = torch.device("cuda:" + str(process_gpu_dict[process_ID - 1]) if torch.cuda.is_available() else "cpu")
logging.info('GPU process allocation {0}'.format(process_gpu_dict))
logging.info('GPU device available {0}'.format(device))
return device
if __name__ == "__main__":
# initialize distributed computing (MPI)
comm, process_id, worker_number = FedML_init()
# customize the log format
logging.basicConfig(filename='info.log',
level=logging.INFO,
format=str(
process_id) + ' - %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
now = datetime.datetime.now()
time_start = now.strftime("%Y-%m-%d %H:%M:%S")
logging.info("Executing Image Detection at time: {0}".format(time_start))
# parse python script input parameters
parser = argparse.ArgumentParser()
args = add_args(parser)
logging.info('Given arguments {0}'.format(args))
# customize the process name
str_process_name = args.process_name + str(process_id)
setproctitle.setproctitle(str_process_name)
hostname = socket.gethostname()
logging.info("Host and process details")
logging.info(
"process ID: {0}, host name: {1}, process ID: {2}, process name: {3}, worker number: {4}".format(process_id,
hostname,
os.getpid(),
psutil.Process(
os.getpid()),
worker_number))
# initialize the wandb machine learning experimental tracking platform (https://www.wandb.com/).
#if process_id == 0:
# wandb.init(
# project="fedcv-detection",
# name=args.process_name + str(args.partition_method) + "r" + str(args.comm_round) + "-e" + str(
# args.epochs) + "-lr" + str(
# args.lr),
# config=args
# )
# Set the random seed. The np.random seed determines the dataset partition.
# The torch_manual_seed determines the initial weight.
# We fix these two, so that we can reproduce the result.
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
# GPU arrangement: Please customize this function according your own topology.
# The GPU server list is configured at "mpi_host_file".
# If we have 4 machines and each has two GPUs, and your FL network has 8 workers and a central worker.
# The 4 machines will be assigned as follows:
# machine 1: worker0, worker4, worker8;
# machine 2: worker1, worker5;
# machine 3: worker2, worker6;
# machine 4: worker3, worker7;
# Therefore, we can see that workers are assigned according to the order of machine list.
device = mapping_processes_to_gpu_device_from_yaml_file(process_id, worker_number , args.gpu_mapping_file,args.gpu_mapping_key)
#device = init_training_device(process_id, worker_number - 1, args.gpu_num_per_server, args.gpu_server_num)
# load data
dataset = load_data(process_id, args, args.dataset)
[train_data_num, test_data_num, train_data_global, test_data_global, data_local_num_dict,
train_data_local_dict, test_data_local_dict, class_num] = dataset
# create model.
# Note if the model is DNN (e.g., ResNet), the training will be very slow.
# In this case, please use our FedML distributed version (./fedml_experiments/distributed_fedavg)
model = create_model(args, model_name=args.model, output_dim=class_num,
img_size=torch.Size([args.image_size, args.image_size]))
if args.load_model:
try:
checkpoint = torch.load(args.model_path)
model.load_state_dict(checkpoint['state_dict'])
except:
raise ("Failed to load pre-trained model")
# define my own trainer
model_trainer = DetectionTrainer(model, args)
logging.info("Calling FedML_FedSeg_distributed")
FedML_FedDetec_distributed(process_id, worker_number, device, comm, model, train_data_num, data_local_num_dict,
train_data_local_dict, test_data_local_dict, args, model_trainer)
| 15,192 | 4,616 |
# standard libraries
import time
# non-standard libraries
from sklearn.linear_model import LogisticRegression
# all of the local modules
from prep_data import read_images, dataset_split
from explore_data import array_info, image_info
from models import run_model
from assess_model import count_time, roc_assess, F1score_assess
def main():
"""The place where I put all the 'glue-code' that calls all the various functions together
"""
dir_path =r'/Users/dustin/CS/projects/ship_detector/data/ships-in-satellite-imagery/shipsnet/'
data_array, label_array = read_images(dir_path)
array_info(data_array, label_array)
image_info(data_array[0,:], plot_image=False)
split_ratios = [0.8, 0.1, 0.1] #splitting the dataset into 80% train, 10% dev, 10% test
Xtrain, Xdev, Xtest, ytrain, ydev, ytest = dataset_split(data_array, label_array, split_ratios)
print(f"xtrain, xdev, xtest, ytrain, ydev, ytest shapes: {Xtrain.shape}, {Xdev.shape}, {Xtest.shape}, {ytrain.shape}, {ydev.shape} {ytest.shape} ")
print(type(LogisticRegression()))
model = LogisticRegression(solver='lbfgs')
model_fit = model.fit(Xtrain, ytrain)
train_acc=model_fit.score(Xtrain, ytrain)
test_acc=model_fit.score(Xtest,ytest)
print("Training Data Accuracy: %0.2f" %(train_acc))
print("Test Data Accuracy: %0.2f" %(test_acc))
roc_assess(model_fit, Xtest, ytest, print_values=True)
F1score_assess(model_fit, Xtest, ytest, print_values=True)
# run_model(logreg, Xtrain, Xdev, ytrain, ydev)
# output = count_time(run_model(Xtrain, Xdev, ytrain, ydev))
# print(output[0])
if __name__ == "__main__": main() | 1,678 | 627 |
"""
This file belongs to the PANIC Alarm Suite,
developed by ALBA Synchrotron for Tango Control System
GPL Licensed
"""
import sys, re, os, traceback, time
import PyTango, fandango
from fandango.functional import *
from fandango import Catched
import taurus, taurus.qt.qtgui.base
from taurus.core import TaurusEventType
from taurus.qt.qtgui.base import TaurusBaseComponent
import panic
from panic.properties import SEVERITIES
from panic.gui.utils import *
from panic.gui.utils import WindowManager #Order of imports matters!
from panic.gui.editor import AlarmForm
from panic.gui.alarmhistory import ahWidget
from panic.gui.devattrchange import dacWidget
#from htmlview import *
class QAlarmManager(iValidatedWidget,object): #QAlarm):
def setCurrentAlarm(self,alarm):
self._selected = [alarm]
def getCurrentAlarm(self):
return self._selected[0]
def getSelectedAlarms(self,extend=False):
return self._selected
def connectContextMenu(self,widget):
self._manager = widget
Qt.QObject.connect(widget,
Qt.SIGNAL('customContextMenuRequested(const QPoint&)'),
self.onContextMenu)
@Catched
def onContextMenu(self, point):
self.popMenu = QtGui.QMenu(self)
view = getattr(self,'view')
items = self.getSelectedAlarms(extend=False)
print('In onContextMenu(%s)'%str([a.tag for a in items]))
alarm = self.getCurrentAlarm()
#self.popMenu.addAction(getThemeIcon("face-glasses"),
# "Preview Attr. Values",self.onSelectAll)
act = self.popMenu.addAction(getThemeIcon("face-glasses"),
"See Alarm Details",self.onView)
act.setEnabled(len(items)==1)
act = self.popMenu.addAction(getThemeIcon("accessories-calculator"),
"Preview Formula/Values",
lambda s=self:WindowManager.addWindow(s.showAlarmPreview()))
act.setEnabled(len(items)==1)
#self.popMenu.addAction(getThemeIcon("view-refresh"),
#"Sort/Update List",self.onSevFilter)
act = self.popMenu.addAction(getThemeIcon("office-calendar"),
"View History",self.viewHistory)
act.setEnabled(SNAP_ALLOWED and len(items)==1)
# and row.get_alarm_tag() in self.ctx_names)
sevMenu = self.popMenu.addMenu('Change Priority')
for S in SEVERITIES:
action = sevMenu.addAction(S)
self.connect(action, QtCore.SIGNAL("triggered()"),
lambda ks=items,sev=S,o=self:
ChangeSeverity(parent=o,severity=sev))
# Reset / Acknowledge options
act = self.popMenu.addAction(getThemeIcon("edit-undo"),
"Reset Alarm(s)",lambda s=self:ResetAlarm(s))
act.setEnabled(any(i.active for i in items))
#if len([i.acknowledged for i in items]) in (len(items),0):
if len(items)==1:
self.popMenu.addAction(getThemeIcon("media-playback-pause"),
"Acknowledge/Renounce Alarm(s)",
lambda s=self:AcknowledgeAlarm(s))
#if len([i.disabled for i in items]) in (len(items),0):
if len(items)==1:
self.popMenu.addAction(getThemeIcon("dialog-error"),
"Disable/Enable Alarm(s)",
lambda s=self:ChangeDisabled(s))
# Edit options
if getattr(self,'expert',None):
self.popMenu.addSeparator()
act = self.popMenu.addAction(
getThemeIcon("accessories-text-editor"),
"Edit Alarm",self.onEdit)
act.setEnabled(len(items)==1)
act = self.popMenu.addAction(getThemeIcon("edit-copy"),
"Clone Alarm",self.onClone)
act.setEnabled(len(items)==1)
act = self.popMenu.addAction(getThemeIcon("edit-clear"),
"Delete Alarm",self.onDelete)
act.setEnabled(len(items)==1)
self.popMenu.addAction(getThemeIcon("applications-system"),
"Advanced Config",lambda s=self:ShowConfig(s))
self.popMenu.addSeparator()
act = self.popMenu.addAction(
getThemeIcon("accessories-text-editor"), "TestDevice",
lambda d=alarm.device:testDevice(d))
act.setEnabled(len(items)==1)
#self.popMenu.addSeparator()
#self.popMenu.addAction(getThemeIcon("process-stop"), "close App",self.close)
if getattr(self,'_manager',None):
self.popMenu.exec_(self._manager.mapToGlobal(point))
else:
self.popMenu.exec_(point)
def onEdit(self,edit=True):
alarm = self.getCurrentAlarm()
print "AlarmGUI.onEdit(%s)"%alarm
forms = [f for f in WindowManager.WINDOWS
if isinstance(f,AlarmForm) and f.getCurrentAlarm().tag==alarm.tag]
if forms: #Bring existing forms to focus
form = forms[0]
form.enableEditForm(edit)
form.hide()
form.show()
else: #Create a new form
form = WindowManager.addWindow(AlarmForm(self.parent()))
#form.connect(form,Qt.SIGNAL('valueChanged'),self.hurry)
if edit: form.onEdit(alarm)
else: form.setAlarmData(alarm)
form.show()
return form
def onView(self):
return self.onEdit(edit=False)
def onNew(self):
try:
trace('onNew()')
if not self.api.devices:
v = Qt.QMessageBox.warning(self,'Warning',
'You should create a PyAlarm device first '\
'(using jive or config panel)!',Qt.QMessageBox.Ok)
return
try:
for item in self._manager.selectedItems():
item.setSelected(False)
except: pass
form = AlarmForm(self.parent())
trace('form')
#form.connect(form,Qt.SIGNAL('valueChanged'),self.hurry)
form.onNew()
form.show()
return form
except:
traceback.print_exc()
def onClone(self):
alarm = self.getCurrentAlarm().tag
trace("onClone(%s)"%alarm)
new_tag,ok = Qt.QInputDialog.getText(self,'Input dialog',
'Please provide tag name for cloned alarm.',
Qt.QLineEdit.Normal,alarm)
if (ok and len(str(new_tag)) > 3):
try:
obj = self.api[alarm]
self.api.add(str(new_tag), obj.device, formula=obj.formula,
description=obj.description,
receivers=obj.receivers,
severity=obj.severity)
self.onReload()
except Exception,e:
Qt.QMessageBox.critical(self,"Error!",str(e),
QtGui.QMessageBox.AcceptRole,
QtGui.QMessageBox.AcceptRole)
trace(traceback.format_exc())
def onDelete(self,tag=None,ask=True):
tags = tag and [tag] or [getattr(r,'tag',r)
for r in self.getSelectedAlarms(extend=False)]
if ask:
v = QtGui.QMessageBox.warning(None,'Pending Changes',\
'The following alarms will be deleted:\n\t'+'\n\t'.join(tags),\
QtGui.QMessageBox.Ok|QtGui.QMessageBox.Cancel)
if v == QtGui.QMessageBox.Cancel:
return
self.setAllowedUsers(self.api.get_admins_for_alarm(
len(tags)==1 and tags[0]))
if not self.validate('onDelete(%s)'%([a for a in tags])):
return
if len(tags)>1:
print('-'*80)
[self.onDelete(tag,ask=False) for tag in tags]
else:
try:
tag = tags[0]
trace('onDelete(%s)'%tag)
view = getattr(self,'view',None)
if view:
view.api.remove(tag)
view.apply_filters()
view.disconnect(tag)
#self.removeAlarmRow(tag)
if self.api.has_tag(tag):
self.api.remove(tag)
[f.close() for f in WindowManager.WINDOWS
if isinstance(f,AlarmForm)
and f.getCurrentAlarm().tag==tag]
self.onReload(clear_selection=True)
trace('onDelete(%s): done'%tag)
except:
traceback.print_exc()
def onReload(self,clear_selection=False):
raise Exception('onReload():NotImplemented!')
###########################################################################
def viewHistory(self):
alarm = self.getCurrentAlarm().tag
if SNAP_ALLOWED and not self.snapi:
self.snapi = get_snap_api()
if self.snapi:
self.ctx_names=[c.name for c in self.snapi.get_contexts().values()]
if alarm in self.ctx_names:
self.ahApp = ahWidget()
self.ahApp.show()
#self.ahApp.setAlarmCombo(alarm=str(self._ui.listWidget.\
#currentItem().text().split('|')[0]).strip(' '))
self.ahApp.setAlarmCombo(alarm=alarm)
else:
v = QtGui.QMessageBox.warning(None,'Not Archived', \
'This alarm has not recorded history',QtGui.QMessageBox.Ok)
return
def showAlarmPreview(self):
form = AlarmPreview(tag=self.getCurrentAlarm(),parent=self.parent())
form.show()
return form
##############################################################################
def getTargetAlarms(obj,alarms=None,active=False):
if alarms is None:
if isinstance(obj,AlarmForm):
alarms = [obj.getCurrentAlarm()]
elif hasattr(obj,'getSelectedAlarms'):
alarms = [t for t in obj.getSelectedAlarms()
if (not active or t.active)]
elif not isSequence(alarms):
alarms = [alarms]
return alarms
def testDevice(device):
import os
os.system('tg_devtest %s &'%device)
def emitValueChanged(self):
if hasattr(self,'emitValueChanged'):
self.emitValueChanged()
elif hasattr(self,'valueChanged'):
self.valueChanged()
#[o.get_acknowledged(force=True) for o in items]
#[f.setAlarmData() for f in WindowManager.WINDOWS
#if isinstance(f,AlarmForm)]
#self.onFilter()
def ShowConfig(parent=None):
dac = dacWidget(device=parent.getCurrentAlarm().device)
WindowManager.addWindow(dac)
dac.show()
def ResetAlarm(parent=None,alarm=None):
try:
self = parent
prompt,cmt=QtGui.QInputDialog,''
alarms = getTargetAlarms(parent,alarm,active=True)
action = 'RESET'
text = 'The following alarms will be %s:\n\t'%action\
+'\n\t'.join([t.tag for t in alarms])
trace('In ResetAlarm(): %s'%text)
text += '\n\n'+'Must type a comment to continue:'
for a in alarms:
try:
r = parent.api.evaluate(a.formula)
if r:
v = QtGui.QMessageBox.warning(self,'Warning',
'%s condition is still active'%a.tag
+'. Do you want to reset it anyway?',
QtGui.QMessageBox.Ok|QtGui.QMessageBox.Cancel)
if v == QtGui.QMessageBox.Cancel:
return
else:
break
except:
traceback.print_exc()
self.setAllowedUsers(self.api.get_admins_for_alarm(len(alarms)==1
and alarms[0].tag))
if not self.validate('%s(%s)'%(action,[a.tag for a in alarms])):
raise Exception('Invalid login or password!')
comment, ok = QtGui.QInputDialog.getText(self,'Input dialog',text)
if not ok:
return
elif ok and len(str(comment)) < 4:
raise Exception('comment was too short')
comment = get_user()+': '+str(comment)
for alarm in alarms:
print('ResetAlarm(%s):%s'%(alarm.tag,comment))
alarm.reset(comment)
emitValueChanged(self)
except:
msg = traceback.format_exc()
v = QtGui.QMessageBox.warning(self,'Warning',msg,QtGui.QMessageBox.Ok)
def AcknowledgeAlarm(parent,alarm=None):
try:
self = parent
min_comment,comment_error = 4,'Comment too short!'
prompt,cmt=QtGui.QInputDialog,''
alarms = getTargetAlarms(parent,alarm,active=True)
acks = len([a for a in alarms if a.acknowledged])
action = 'ACKNOWLEDGED' if acks!=len(alarms) else 'RENOUNCED'
text = 'The following alarms will be %s,\n\t'%action\
+'\n\t'.join([t.tag for t in alarms])
trace('In %s(): %s'%(action,text))
text += '\n\n'+'Must type a comment to continue:'
self.setAllowedUsers(self.api.get_admins_for_alarm(len(alarms)==1
and alarms[0].tag))
if not self.validate('%s(%s)'%(action,[a.tag for a in alarms])):
raise Exception('Invalid login or password!')
comment, ok = QtGui.QInputDialog.getText(self,'Input dialog',text)
if not ok:
return
elif ok and len(str(comment)) < min_comment:
raise Exception(comment_error)
comment = str(get_user()+': '+str(comment))
for alarm in alarms:
if not alarm.acknowledged and action == 'ACKNOWLEDGED':
alarm.acknowledge(comment)
elif alarm.acknowledged:
alarm.renounce(comment)
emitValueChanged(self)
except Exception,e:
msg = traceback.format_exc() if e.message!=comment_error else e.message
v = QtGui.QMessageBox.warning(self,'Warning',
msg,QtGui.QMessageBox.Ok)
if e.message == comment_error: AcknowledgeAlarm(parent,alarm)
def ChangeDisabled(parent,alarm=None):
try:
self = parent
min_comment,comment_error = 4,'Comment too short!'
prompt,cmt=QtGui.QInputDialog,''
alarms = getTargetAlarms(parent,alarm,active=False)
check = len([a for a in alarms if not a.disabled])
action = 'ENABLED' if check!=len(alarms) else 'DISABLED'
text = 'The following alarms will be %s,\n\t'%action\
+'\n\t'.join([t.tag for t in alarms])
trace('In %s(): %s'%(action,text))
text += '\n\n'+'Must type a comment to continue:'
self.setAllowedUsers(self.api.get_admins_for_alarm(len(alarms)==1
and alarms[0].tag))
if not self.validate('%s(%s)'%(action,[a.tag for a in alarms])):
raise Exception('Invalid login or password!')
comment, ok = QtGui.QInputDialog.getText(self,'Input dialog',text)
if not ok:
return
elif ok and len(str(comment)) < min_comment:
raise Exception(comment_error)
comment = get_user()+': '+str(comment)
for alarm in alarms:
if not alarm.disabled and action == 'DISABLED':
print('Disabling %s'%alarm.tag)
alarm.disable(comment)
elif alarm.disabled:
print('Enabling %s'%alarm.tag)
alarm.enable(comment)
emitValueChanged(self)
except Exception,e:
msg = traceback.format_exc() if e.message!=comment_error else e.message
v = QtGui.QMessageBox.warning(self,'Warning',
msg,QtGui.QMessageBox.Ok)
if e.message == comment_error: ChangeDisabled(parent,alarm)
def ChangeSeverity(parent,severity,alarm=None):
try:
alarms = getTargetAlarms(parent,alarm,active=False)
assert severity in SEVERITIES
parent.setAllowedUsers(parent.api.get_admins_for_alarm(len(alarms)==1
and alarms[0].tag))
if not parent.validate('%s(%s)'%(
'ChangePriority',[a.tag for a in alarms])):
raise Exception('Invalid login or password!')
for alarm in alarms:
alarm.setup(severity=severity.upper(),write=True)
[f.setAlarmData() for f in WindowManager.WINDOWS
if isinstance(f,AlarmForm)]
emitValueChanged(parent)
except Exception,e:
msg = traceback.format_exc()
v = QtGui.QMessageBox.warning(parent,'Warning',
msg,QtGui.QMessageBox.Ok)
| 17,246 | 5,141 |
"""
Flask app
"""
from views import APP_ENV, application
if __name__ == "__main__":
application.debug = False
if APP_ENV == "debug":
application.debug = True
application.run()
| 197 | 63 |
class Solution:
def pancakeSort(self, A: List[int]) -> List[int]:
result = []
def flip(idx):
for i in range(0, idx//2+1):
temp = A[i]
A[i] = A[idx-i]
A[idx-i] = temp
for i in range(len(A)-1, 0, -1):
for j in range(1, i+1):
if A[j] == i+1:
flip(j)
result.append(j+1)
break
flip(i)
result.append(i+1)
return result
| 521 | 173 |
# coding: utf-8
"""
Paragon Insights APIs
API interface for PI application # noqa: E501
OpenAPI spec version: 4.0.0
Contact: healthbot-feedback@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from jnpr.healthbot.swagger.api_client import ApiClient
class OrganizationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_healthbot_organization_site_edge_edge_by_id(self, organization_name, site_name, edge_name, edge, **kwargs): # noqa: E501
"""Create edge by ID # noqa: E501
Create operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_healthbot_organization_site_edge_edge_by_id(organization_name, site_name, edge_name, edge, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param EdgeSchema edge: edgebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, edge, **kwargs) # noqa: E501
else:
(data) = self.create_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, edge, **kwargs) # noqa: E501
return data
def create_healthbot_organization_site_edge_edge_by_id_with_http_info(self, organization_name, site_name, edge_name, edge, **kwargs): # noqa: E501
"""Create edge by ID # noqa: E501
Create operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, edge, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param EdgeSchema edge: edgebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'edge_name', 'edge', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_healthbot_organization_site_edge_edge_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `create_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `create_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'edge_name' is set
if ('edge_name' not in params or
params['edge_name'] is None):
raise ValueError("Missing the required parameter `edge_name` when calling `create_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'edge' is set
if ('edge' not in params or
params['edge'] is None):
raise ValueError("Missing the required parameter `edge` when calling `create_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
if 'edge_name' in params:
path_params['edge_name'] = params['edge_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'edge' in params:
body_params = params['edge']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/edge/{edge_name}/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_healthbot_organization_site_site_by_id(self, organization_name, site_name, site, **kwargs): # noqa: E501
"""Create site by ID # noqa: E501
Create operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_healthbot_organization_site_site_by_id(organization_name, site_name, site, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param SiteSchema site: sitebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, site, **kwargs) # noqa: E501
else:
(data) = self.create_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, site, **kwargs) # noqa: E501
return data
def create_healthbot_organization_site_site_by_id_with_http_info(self, organization_name, site_name, site, **kwargs): # noqa: E501
"""Create site by ID # noqa: E501
Create operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, site, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param SiteSchema site: sitebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'site', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_healthbot_organization_site_site_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `create_healthbot_organization_site_site_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `create_healthbot_organization_site_site_by_id`") # noqa: E501
# verify the required parameter 'site' is set
if ('site' not in params or
params['site'] is None):
raise ValueError("Missing the required parameter `site` when calling `create_healthbot_organization_site_site_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'site' in params:
body_params = params['site']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_healthbot_organization_site_edge_edge_by_id(self, organization_name, site_name, edge_name, **kwargs): # noqa: E501
"""Delete edge by ID # noqa: E501
Delete operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_healthbot_organization_site_edge_edge_by_id(organization_name, site_name, edge_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, **kwargs) # noqa: E501
else:
(data) = self.delete_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, **kwargs) # noqa: E501
return data
def delete_healthbot_organization_site_edge_edge_by_id_with_http_info(self, organization_name, site_name, edge_name, **kwargs): # noqa: E501
"""Delete edge by ID # noqa: E501
Delete operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'edge_name', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_healthbot_organization_site_edge_edge_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `delete_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `delete_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'edge_name' is set
if ('edge_name' not in params or
params['edge_name'] is None):
raise ValueError("Missing the required parameter `edge_name` when calling `delete_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
if 'edge_name' in params:
path_params['edge_name'] = params['edge_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/edge/{edge_name}/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_healthbot_organization_site_site_by_id(self, organization_name, site_name, **kwargs): # noqa: E501
"""Delete site by ID # noqa: E501
Delete operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_healthbot_organization_site_site_by_id(organization_name, site_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, **kwargs) # noqa: E501
else:
(data) = self.delete_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, **kwargs) # noqa: E501
return data
def delete_healthbot_organization_site_site_by_id_with_http_info(self, organization_name, site_name, **kwargs): # noqa: E501
"""Delete site by ID # noqa: E501
Delete operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_healthbot_organization_site_site_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `delete_healthbot_organization_site_site_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `delete_healthbot_organization_site_site_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_healthbot_organization_site_edge_edge_by_id(self, organization_name, site_name, edge_name, **kwargs): # noqa: E501
"""Retrieve edge by ID # noqa: E501
Retrieve operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_organization_site_edge_edge_by_id(organization_name, site_name, edge_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: EdgeSchema
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, **kwargs) # noqa: E501
else:
(data) = self.retrieve_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, **kwargs) # noqa: E501
return data
def retrieve_healthbot_organization_site_edge_edge_by_id_with_http_info(self, organization_name, site_name, edge_name, **kwargs): # noqa: E501
"""Retrieve edge by ID # noqa: E501
Retrieve operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: EdgeSchema
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'edge_name', 'x_iam_token', 'working'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_healthbot_organization_site_edge_edge_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `retrieve_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `retrieve_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'edge_name' is set
if ('edge_name' not in params or
params['edge_name'] is None):
raise ValueError("Missing the required parameter `edge_name` when calling `retrieve_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
if 'edge_name' in params:
path_params['edge_name'] = params['edge_name'] # noqa: E501
query_params = []
if 'working' in params:
query_params.append(('working', params['working'])) # noqa: E501
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/edge/{edge_name}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EdgeSchema', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retrieve_healthbot_organization_site_site_by_id(self, organization_name, site_name, **kwargs): # noqa: E501
"""Retrieve site by ID # noqa: E501
Retrieve operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_organization_site_site_by_id(organization_name, site_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: SiteSchema
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.retrieve_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, **kwargs) # noqa: E501
else:
(data) = self.retrieve_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, **kwargs) # noqa: E501
return data
def retrieve_healthbot_organization_site_site_by_id_with_http_info(self, organization_name, site_name, **kwargs): # noqa: E501
"""Retrieve site by ID # noqa: E501
Retrieve operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str x_iam_token: authentication header object
:param bool working: true queries undeployed configuration
:return: SiteSchema
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'x_iam_token', 'working'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retrieve_healthbot_organization_site_site_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `retrieve_healthbot_organization_site_site_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `retrieve_healthbot_organization_site_site_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
query_params = []
if 'working' in params:
query_params.append(('working', params['working'])) # noqa: E501
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SiteSchema', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_healthbot_organization_site_edge_edge_by_id(self, organization_name, site_name, edge_name, edge, **kwargs): # noqa: E501
"""Update edge by ID # noqa: E501
Update operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_healthbot_organization_site_edge_edge_by_id(organization_name, site_name, edge_name, edge, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param EdgeSchema edge: edgebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, edge, **kwargs) # noqa: E501
else:
(data) = self.update_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, edge, **kwargs) # noqa: E501
return data
def update_healthbot_organization_site_edge_edge_by_id_with_http_info(self, organization_name, site_name, edge_name, edge, **kwargs): # noqa: E501
"""Update edge by ID # noqa: E501
Update operation of resource: edge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_healthbot_organization_site_edge_edge_by_id_with_http_info(organization_name, site_name, edge_name, edge, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param str edge_name: ID of edge-name (required)
:param EdgeSchema edge: edgebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'edge_name', 'edge', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_healthbot_organization_site_edge_edge_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `update_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `update_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'edge_name' is set
if ('edge_name' not in params or
params['edge_name'] is None):
raise ValueError("Missing the required parameter `edge_name` when calling `update_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
# verify the required parameter 'edge' is set
if ('edge' not in params or
params['edge'] is None):
raise ValueError("Missing the required parameter `edge` when calling `update_healthbot_organization_site_edge_edge_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
if 'edge_name' in params:
path_params['edge_name'] = params['edge_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'edge' in params:
body_params = params['edge']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/edge/{edge_name}/', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_healthbot_organization_site_site_by_id(self, organization_name, site_name, site, **kwargs): # noqa: E501
"""Update site by ID # noqa: E501
Update operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_healthbot_organization_site_site_by_id(organization_name, site_name, site, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param SiteSchema site: sitebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, site, **kwargs) # noqa: E501
else:
(data) = self.update_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, site, **kwargs) # noqa: E501
return data
def update_healthbot_organization_site_site_by_id_with_http_info(self, organization_name, site_name, site, **kwargs): # noqa: E501
"""Update site by ID # noqa: E501
Update operation of resource: site # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_healthbot_organization_site_site_by_id_with_http_info(organization_name, site_name, site, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_name: ID of organization-name (required)
:param str site_name: ID of site-name (required)
:param SiteSchema site: sitebody object (required)
:param str x_iam_token: authentication header object
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_name', 'site_name', 'site', 'x_iam_token'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_healthbot_organization_site_site_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_name' is set
if ('organization_name' not in params or
params['organization_name'] is None):
raise ValueError("Missing the required parameter `organization_name` when calling `update_healthbot_organization_site_site_by_id`") # noqa: E501
# verify the required parameter 'site_name' is set
if ('site_name' not in params or
params['site_name'] is None):
raise ValueError("Missing the required parameter `site_name` when calling `update_healthbot_organization_site_site_by_id`") # noqa: E501
# verify the required parameter 'site' is set
if ('site' not in params or
params['site'] is None):
raise ValueError("Missing the required parameter `site` when calling `update_healthbot_organization_site_site_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_name' in params:
path_params['organization_name'] = params['organization_name'] # noqa: E501
if 'site_name' in params:
path_params['site_name'] = params['site_name'] # noqa: E501
query_params = []
header_params = {}
if 'x_iam_token' in params:
header_params['x-iam-token'] = params['x_iam_token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'site' in params:
body_params = params['site']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/config/organization/{organization_name}/site/{site_name}/', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 46,746 | 13,689 |
import json
def create_json(input):
return json.load(input)
def extract_value(expect_keys, json_dict):
"""
expect_keys: tuple , e.g ("nodeuser", "user")
return: None if no value, else value
"""
temp_val = json_dict.get(expect_keys[0])
if isinstance(temp_val, dict) and len(expect_keys) > 1:
return extract_value(expect_keys[1:], temp_val)
elif len(expect_keys) > 1:
return None
else:
return temp_val
| 465 | 161 |
import os
if os.getenv('MODULE_GRAPH_HOOKER') in ('1', 'true', 'True'):
import module_graph
module_graph.setup_hooker(save_to='data/rssant_worker_module_graph.json', verbose=True)
from rssant_common.actor_helper import start_actor # noqa: F402
if __name__ == "__main__":
start_actor('worker', port=6792)
| 321 | 126 |
import numpy as np
import aiohttp
import asyncio
import os.path
from pathlib import Path
import torch
import torch.nn.functional as F
from torchvision import transforms, utils, io
from typing import Dict, List, Optional, Tuple, Union, Any
from enum import Enum
from knn import utils
from knn.mappers import Mapper
from knn.utils import JSONType
import config
from model import Model
class BGSplittingMapper(Mapper):
class ReturnType(Enum):
SAVE = 0
SERIALIZE = 1
def initialize_container(self):
# Create connection pool
self.session = aiohttp.ClientSession()
self.use_cuda = False
async def initialize_job(self, job_args):
return_type = job_args.get("return_type", "serialize")
if return_type == "save":
job_args["return_type"] = BGSplittingMapper.ReturnType.SAVE
elif return_type == "serialize":
job_args["return_type"] = BGSplittingMapper.ReturnType.SERIALIZE
else:
raise ValueError(f"Unknown return type: {return_type}")
# Get checkpoint data
if job_args["checkpoint_path"] == 'TEST':
model = Model(num_main_classes=2, num_aux_classes=1)
else:
map_location = torch.device('cuda') if self.use_cuda else torch.device('cpu')
checkpoint_state = torch.load(job_args["checkpoint_path"],
map_location=map_location)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in checkpoint_state['state_dict'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
if 'model_kwargs' in checkpoint_state:
kwargs = checkpoint_state['model_kwargs']
num_aux_classes = kwargs['num_aux_classes']
else:
num_aux_classes = 1
# Create model
model = Model(num_main_classes=2, num_aux_classes=num_aux_classes)
# Load model weights
model.load_state_dict(new_state_dict)
model.eval()
if self.use_cuda:
model = model.cuda()
job_args["model"] = model
job_args["transform"] = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ConvertImageDtype(torch.float32),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
job_args["n_chunks_saved"] = 0
return job_args
@utils.log_exception_from_coro_but_return_none
async def process_chunk(
self, chunk: List[JSONType], job_id: str, job_args: Any, request_id: str
) -> Tuple[np.ndarray, np.ndarray]:
image_paths = [c["path"] for c in chunk]
# Download images
if "http" not in image_paths[0]:
image_bucket = job_args["input_bucket"]
image_paths = [
os.path.join(config.GCS_URL_PREFIX, image_bucket, image_path)
for image_path in image_paths]
transform = job_args["transform"]
async def download_transform(image_path):
return await self.transform_image(
await self.download_image(image_path),
transform=transform)
with self.profiler(request_id, "download_time"):
input_images = await asyncio.gather(
*[
download_transform(image_path)
for image_path in image_paths
])
# Run inference
model = job_args["model"]
with self.profiler(request_id, "inference_time"):
image_batch = torch.stack(input_images)
if self.use_cuda:
image_batch = image_batch.cuda()
embeddings = model.forward_backbone(image_batch)
scores = F.softmax(model.main_head(embeddings), dim=1)[:, 1]
return (embeddings.detach().cpu().numpy(),
scores.detach().cpu().numpy())
async def download_image(
self, image_path: str, num_retries: int = config.DOWNLOAD_NUM_RETRIES
) -> bytes:
for i in range(num_retries + 1):
try:
async with self.session.get(image_path) as response:
assert response.status == 200
return await response.read()
except Exception:
if i < num_retries:
await asyncio.sleep(2 ** i)
else:
raise
assert False # unreachable
async def transform_image(
self, image_bytes: bytes, transform,
) -> torch.Tensor:
data = torch.tensor(
list(image_bytes),
dtype=torch.uint8)
image = io.decode_image(data, mode=io.image.ImageReadMode.RGB)
return transform(image)
async def postprocess_chunk(
self,
inputs,
outputs: Tuple[np.ndarray, np.ndarray],
job_id,
job_args,
request_id,
) -> Union[Tuple[str, List[Optional[int]]],
Tuple[None, List[Optional[str]]]]:
if job_args["return_type"] == BGSplittingMapper.ReturnType.SAVE:
with self.profiler(request_id, "save_time"):
data_path_tmpl = config.DATA_FILE_TMPL.format(
job_id, self.worker_id, job_args["n_chunks_saved"]
)
job_args["n_chunks_saved"] += 1
Path(data_path_tmpl).parent.mkdir(parents=True, exist_ok=True)
data = {'ids': np.array([inp['id'] for inp in inputs], dtype=np.int),
'embeddings': outputs[0],
'scores': outputs[1]}
np.save(data_path_tmpl.format(None), data)
return data_path_tmpl.format(None), [
len(output) if output is not None else None for output in outputs[0]
]
else:
with self.profiler(request_id, "reduce_time"):
reduce_fn = config.REDUCTIONS[job_args.get("reduction")]
reduced_outputs = [
reduce_fn(output) if output is not None else None
for output in outputs
]
with self.profiler(request_id, "serialize_time"):
serialized_outputs = [
utils.numpy_to_base64(output) if output is not None else None
for output in reduced_outputs
]
return None, serialized_outputs
async def process_element(
self,
input: JSONType,
job_id: str,
job_args: Any,
request_id: str,
element_index: int,
) -> Any:
pass
app = BGSplittingMapper().server
| 6,816 | 1,995 |
from netapp.volume.volume_hybrid_cache_attributes import VolumeHybridCacheAttributes
from netapp.volume.volume_mirror_attributes import VolumeMirrorAttributes
from netapp.volume.volume_space_attributes import VolumeSpaceAttributes
from netapp.volume.volume_directory_attributes import VolumeDirectoryAttributes
from netapp.volume.volume_state_attributes import VolumeStateAttributes
from netapp.volume.volume_autosize_attributes import VolumeAutosizeAttributes
from netapp.volume.volume_flexcache_attributes import VolumeFlexcacheAttributes
from netapp.volume.volume_id_attributes import VolumeIdAttributes
from netapp.volume.volume_antivirus_attributes import VolumeAntivirusAttributes
from netapp.volume.volume_qos_attributes import VolumeQosAttributes
from netapp.volume.volume_transition_attributes import VolumeTransitionAttributes
from netapp.volume.volume_snapshot_attributes import VolumeSnapshotAttributes
from netapp.volume.volume_language_attributes import VolumeLanguageAttributes
from netapp.volume.volume_security_attributes import VolumeSecurityAttributes
from netapp.volume.volume_sis_attributes import VolumeSisAttributes
from netapp.volume.volume_performance_attributes import VolumePerformanceAttributes
from netapp.volume.volume_inode_attributes import VolumeInodeAttributes
from netapp.volume.volume_snapshot_autodelete_attributes import VolumeSnapshotAutodeleteAttributes
from netapp.volume.volume_vm_align_attributes import VolumeVmAlignAttributes
from netapp.volume.volume_64bit_upgrade_attributes import Volume64BitUpgradeAttributes
from netapp.volume.volume_clone_attributes import VolumeCloneAttributes
from netapp.volume.volume_infinitevol_attributes import VolumeInfinitevolAttributes
from netapp.volume.volume_export_attributes import VolumeExportAttributes
from netapp.netapp_object import NetAppObject
class VolumeAttributes(NetAppObject):
"""
Attributes of a volume.
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_volume_hybrid_cache_attributes = None
@property
def volume_hybrid_cache_attributes(self):
"""
This field contains information on Flash Pool caching
attributes on a volume
"""
return self._volume_hybrid_cache_attributes
@volume_hybrid_cache_attributes.setter
def volume_hybrid_cache_attributes(self, val):
if val != None:
self.validate('volume_hybrid_cache_attributes', val)
self._volume_hybrid_cache_attributes = val
_volume_mirror_attributes = None
@property
def volume_mirror_attributes(self):
"""
This field contains information applying exclusive to
volume mirror.
"""
return self._volume_mirror_attributes
@volume_mirror_attributes.setter
def volume_mirror_attributes(self, val):
if val != None:
self.validate('volume_mirror_attributes', val)
self._volume_mirror_attributes = val
_volume_space_attributes = None
@property
def volume_space_attributes(self):
"""
This field contains information related to volume disk
space management including on-disk layout.
"""
return self._volume_space_attributes
@volume_space_attributes.setter
def volume_space_attributes(self, val):
if val != None:
self.validate('volume_space_attributes', val)
self._volume_space_attributes = val
_volume_directory_attributes = None
@property
def volume_directory_attributes(self):
"""
This field contains information related to directories in
a volume.
"""
return self._volume_directory_attributes
@volume_directory_attributes.setter
def volume_directory_attributes(self, val):
if val != None:
self.validate('volume_directory_attributes', val)
self._volume_directory_attributes = val
_volume_state_attributes = None
@property
def volume_state_attributes(self):
"""
This field contains information about the state or status
of a volume or its features.
"""
return self._volume_state_attributes
@volume_state_attributes.setter
def volume_state_attributes(self, val):
if val != None:
self.validate('volume_state_attributes', val)
self._volume_state_attributes = val
_volume_autosize_attributes = None
@property
def volume_autosize_attributes(self):
"""
This field contains information about the autosize
settings of the volume.
"""
return self._volume_autosize_attributes
@volume_autosize_attributes.setter
def volume_autosize_attributes(self, val):
if val != None:
self.validate('volume_autosize_attributes', val)
self._volume_autosize_attributes = val
_volume_flexcache_attributes = None
@property
def volume_flexcache_attributes(self):
"""
This field contains information applying exclusively to
flexcache volumes.
"""
return self._volume_flexcache_attributes
@volume_flexcache_attributes.setter
def volume_flexcache_attributes(self, val):
if val != None:
self.validate('volume_flexcache_attributes', val)
self._volume_flexcache_attributes = val
_volume_id_attributes = None
@property
def volume_id_attributes(self):
"""
This field contains identification information about the
volume.
"""
return self._volume_id_attributes
@volume_id_attributes.setter
def volume_id_attributes(self, val):
if val != None:
self.validate('volume_id_attributes', val)
self._volume_id_attributes = val
_volume_antivirus_attributes = None
@property
def volume_antivirus_attributes(self):
"""
This field contains information about Antivirus On-Access
settings for the volume.
"""
return self._volume_antivirus_attributes
@volume_antivirus_attributes.setter
def volume_antivirus_attributes(self, val):
if val != None:
self.validate('volume_antivirus_attributes', val)
self._volume_antivirus_attributes = val
_volume_qos_attributes = None
@property
def volume_qos_attributes(self):
"""
This field contains the information that relates to QoS.
"""
return self._volume_qos_attributes
@volume_qos_attributes.setter
def volume_qos_attributes(self, val):
if val != None:
self.validate('volume_qos_attributes', val)
self._volume_qos_attributes = val
_volume_transition_attributes = None
@property
def volume_transition_attributes(self):
"""
This field contains information applying exclusively to
transitioned or transitioning volumes.
"""
return self._volume_transition_attributes
@volume_transition_attributes.setter
def volume_transition_attributes(self, val):
if val != None:
self.validate('volume_transition_attributes', val)
self._volume_transition_attributes = val
_volume_snapshot_attributes = None
@property
def volume_snapshot_attributes(self):
"""
This field contains information applying exclusively to
all the snapshots in the volume. Volume disk
space-related settings are excluded.
"""
return self._volume_snapshot_attributes
@volume_snapshot_attributes.setter
def volume_snapshot_attributes(self, val):
if val != None:
self.validate('volume_snapshot_attributes', val)
self._volume_snapshot_attributes = val
_volume_language_attributes = None
@property
def volume_language_attributes(self):
"""
This field contains information about volume
language-related settings.
"""
return self._volume_language_attributes
@volume_language_attributes.setter
def volume_language_attributes(self, val):
if val != None:
self.validate('volume_language_attributes', val)
self._volume_language_attributes = val
_volume_security_attributes = None
@property
def volume_security_attributes(self):
"""
This field contains information about volume security
settings.
"""
return self._volume_security_attributes
@volume_security_attributes.setter
def volume_security_attributes(self, val):
if val != None:
self.validate('volume_security_attributes', val)
self._volume_security_attributes = val
_volume_sis_attributes = None
@property
def volume_sis_attributes(self):
"""
This field contains information about Deduplication, file
clone, compression, etc.
"""
return self._volume_sis_attributes
@volume_sis_attributes.setter
def volume_sis_attributes(self, val):
if val != None:
self.validate('volume_sis_attributes', val)
self._volume_sis_attributes = val
_volume_performance_attributes = None
@property
def volume_performance_attributes(self):
"""
This field contains information that relates to the
performance of the volume.
"""
return self._volume_performance_attributes
@volume_performance_attributes.setter
def volume_performance_attributes(self, val):
if val != None:
self.validate('volume_performance_attributes', val)
self._volume_performance_attributes = val
_volume_inode_attributes = None
@property
def volume_inode_attributes(self):
"""
This field contains information about inodes in a
volume.
"""
return self._volume_inode_attributes
@volume_inode_attributes.setter
def volume_inode_attributes(self, val):
if val != None:
self.validate('volume_inode_attributes', val)
self._volume_inode_attributes = val
_volume_snapshot_autodelete_attributes = None
@property
def volume_snapshot_autodelete_attributes(self):
"""
This field contains information about snapshot autodelete
policy settings.
"""
return self._volume_snapshot_autodelete_attributes
@volume_snapshot_autodelete_attributes.setter
def volume_snapshot_autodelete_attributes(self, val):
if val != None:
self.validate('volume_snapshot_autodelete_attributes', val)
self._volume_snapshot_autodelete_attributes = val
_volume_vm_align_attributes = None
@property
def volume_vm_align_attributes(self):
"""
This field contains information related to the Virtual
Machine alignment settings on a volume
"""
return self._volume_vm_align_attributes
@volume_vm_align_attributes.setter
def volume_vm_align_attributes(self, val):
if val != None:
self.validate('volume_vm_align_attributes', val)
self._volume_vm_align_attributes = val
_volume_64bit_upgrade_attributes = None
@property
def volume_64bit_upgrade_attributes(self):
"""
Information related to 64-bit upgrade. After 64-bit
upgrade completes, this information is no longer
available.
"""
return self._volume_64bit_upgrade_attributes
@volume_64bit_upgrade_attributes.setter
def volume_64bit_upgrade_attributes(self, val):
if val != None:
self.validate('volume_64bit_upgrade_attributes', val)
self._volume_64bit_upgrade_attributes = val
_volume_clone_attributes = None
@property
def volume_clone_attributes(self):
"""
This field contains information applying exclusively to
clone volumes.
"""
return self._volume_clone_attributes
@volume_clone_attributes.setter
def volume_clone_attributes(self, val):
if val != None:
self.validate('volume_clone_attributes', val)
self._volume_clone_attributes = val
_volume_infinitevol_attributes = None
@property
def volume_infinitevol_attributes(self):
"""
This field contains information about the state of an
Infinite Volume.
"""
return self._volume_infinitevol_attributes
@volume_infinitevol_attributes.setter
def volume_infinitevol_attributes(self, val):
if val != None:
self.validate('volume_infinitevol_attributes', val)
self._volume_infinitevol_attributes = val
_volume_export_attributes = None
@property
def volume_export_attributes(self):
"""
This field contains information about export settings of
the volume.
"""
return self._volume_export_attributes
@volume_export_attributes.setter
def volume_export_attributes(self, val):
if val != None:
self.validate('volume_export_attributes', val)
self._volume_export_attributes = val
@staticmethod
def get_api_name():
return "volume-attributes"
@staticmethod
def get_desired_attrs():
return [
'volume-hybrid-cache-attributes',
'volume-mirror-attributes',
'volume-space-attributes',
'volume-directory-attributes',
'volume-state-attributes',
'volume-autosize-attributes',
'volume-flexcache-attributes',
'volume-id-attributes',
'volume-antivirus-attributes',
'volume-qos-attributes',
'volume-transition-attributes',
'volume-snapshot-attributes',
'volume-language-attributes',
'volume-security-attributes',
'volume-sis-attributes',
'volume-performance-attributes',
'volume-inode-attributes',
'volume-snapshot-autodelete-attributes',
'volume-vm-align-attributes',
'volume-64bit-upgrade-attributes',
'volume-clone-attributes',
'volume-infinitevol-attributes',
'volume-export-attributes',
]
def describe_properties(self):
return {
'volume_hybrid_cache_attributes': { 'class': VolumeHybridCacheAttributes, 'is_list': False, 'required': 'optional' },
'volume_mirror_attributes': { 'class': VolumeMirrorAttributes, 'is_list': False, 'required': 'optional' },
'volume_space_attributes': { 'class': VolumeSpaceAttributes, 'is_list': False, 'required': 'optional' },
'volume_directory_attributes': { 'class': VolumeDirectoryAttributes, 'is_list': False, 'required': 'optional' },
'volume_state_attributes': { 'class': VolumeStateAttributes, 'is_list': False, 'required': 'optional' },
'volume_autosize_attributes': { 'class': VolumeAutosizeAttributes, 'is_list': False, 'required': 'optional' },
'volume_flexcache_attributes': { 'class': VolumeFlexcacheAttributes, 'is_list': False, 'required': 'optional' },
'volume_id_attributes': { 'class': VolumeIdAttributes, 'is_list': False, 'required': 'optional' },
'volume_antivirus_attributes': { 'class': VolumeAntivirusAttributes, 'is_list': False, 'required': 'optional' },
'volume_qos_attributes': { 'class': VolumeQosAttributes, 'is_list': False, 'required': 'optional' },
'volume_transition_attributes': { 'class': VolumeTransitionAttributes, 'is_list': False, 'required': 'optional' },
'volume_snapshot_attributes': { 'class': VolumeSnapshotAttributes, 'is_list': False, 'required': 'optional' },
'volume_language_attributes': { 'class': VolumeLanguageAttributes, 'is_list': False, 'required': 'optional' },
'volume_security_attributes': { 'class': VolumeSecurityAttributes, 'is_list': False, 'required': 'optional' },
'volume_sis_attributes': { 'class': VolumeSisAttributes, 'is_list': False, 'required': 'optional' },
'volume_performance_attributes': { 'class': VolumePerformanceAttributes, 'is_list': False, 'required': 'optional' },
'volume_inode_attributes': { 'class': VolumeInodeAttributes, 'is_list': False, 'required': 'optional' },
'volume_snapshot_autodelete_attributes': { 'class': VolumeSnapshotAutodeleteAttributes, 'is_list': False, 'required': 'optional' },
'volume_vm_align_attributes': { 'class': VolumeVmAlignAttributes, 'is_list': False, 'required': 'optional' },
'volume_64bit_upgrade_attributes': { 'class': Volume64BitUpgradeAttributes, 'is_list': False, 'required': 'optional' },
'volume_clone_attributes': { 'class': VolumeCloneAttributes, 'is_list': False, 'required': 'optional' },
'volume_infinitevol_attributes': { 'class': VolumeInfinitevolAttributes, 'is_list': False, 'required': 'optional' },
'volume_export_attributes': { 'class': VolumeExportAttributes, 'is_list': False, 'required': 'optional' },
}
| 17,794 | 4,578 |
#!/usr/bin/env python3
import argparse
import os.path
from itertools import combinations
import matplotlib.cm as cm
import matplotlib.pyplot as pl
import numpy as np
from opensfm import dataset
from opensfm import features
from opensfm import io
from numpy import ndarray
from typing import List
def plot_matches(im1, im2, p1: ndarray, p2: ndarray) -> None:
h1, w1, c = im1.shape
h2, w2, c = im2.shape
image = np.zeros((max(h1, h2), w1 + w2, 3), dtype=im1.dtype)
image[0:h1, 0:w1, :] = im1
image[0:h2, w1 : (w1 + w2), :] = im2
p1 = features.denormalized_image_coordinates(p1, w1, h1)
p2 = features.denormalized_image_coordinates(p2, w2, h2)
pl.imshow(image)
for a, b in zip(p1, p2):
pl.plot([a[0], b[0] + w1], [a[1], b[1]], "c")
pl.plot(p1[:, 0], p1[:, 1], "ob")
pl.plot(p2[:, 0] + w1, p2[:, 1], "ob")
def plot_graph(data) -> None:
cmap = cm.get_cmap("viridis")
connectivity = {}
for im1 in images:
for im2, matches in data.load_matches(im1).items():
if len(matches) == 0:
continue
connectivity[tuple(sorted([im1, im2]))] = len(matches)
all_values = connectivity.values()
lowest = np.percentile(list(all_values), 5)
highest = np.percentile(list(all_values), 95)
exifs = {im: data.load_exif(im) for im in data.images()}
reference = data.load_reference()
for (node1, node2), edge in sorted(connectivity.items(), key=lambda x: x[1]):
gps1 = exifs[node1]["gps"]
o1 = np.array(
reference.to_topocentric(gps1["latitude"], gps1["longitude"], 0)[:2]
)
gps2 = exifs[node2]["gps"]
o2 = np.array(
reference.to_topocentric(gps2["latitude"], gps2["longitude"], 0)[:2]
)
c = max(0, min(1.0, 1 - (edge - lowest) / (highest - lowest)))
pl.plot([o1[0], o2[0]], [o1[1], o2[1]], linestyle="-", color=cmap(c))
for node in data.images():
gps = exifs[node]["gps"]
o = np.array(reference.to_topocentric(gps["latitude"], gps["longitude"], 0)[:2])
c = 0
pl.plot(o[0], o[1], linestyle="", marker="o", color=cmap(c))
pl.xticks([])
pl.yticks([])
ax = pl.gca()
for b in ["top", "bottom", "left", "right"]:
ax.spines[b].set_visible(False)
pl.savefig(os.path.join(data.data_path, "matchgraph.png"))
def plot_matches_for_images(data, image, images) -> None:
if image:
pairs = [(image, o) for o in images if o != image]
elif images:
subset = images.split(",")
pairs = combinations(subset, 2)
else:
pairs = combinations(images, 2)
i = 0
for im1, im2 in pairs:
matches = data.find_matches(im1, im2)
if len(matches) == 0:
continue
print("plotting {} matches between {} {}".format(len(matches), im1, im2))
features_data1 = data.load_features(im1)
features_data2 = data.load_features(im2)
assert features_data1
assert features_data2
p1 = features_data1.points[matches[:, 0]]
p2 = features_data2.points[matches[:, 1]]
pl.figure(figsize=(20, 10))
pl.title("Images: " + im1 + " - " + im2 + ", matches: " + str(matches.shape[0]))
plot_matches(data.load_image(im1), data.load_image(im2), p1, p2)
i += 1
if args.save_figs:
p = os.path.join(args.dataset, "plot_tracks")
io.mkdir_p(p)
pl.savefig(os.path.join(p, "{}_{}.jpg".format(im1, im2)), dpi=100)
pl.close()
else:
if i >= 10:
i = 0
pl.show()
if not args.save_figs and i > 0:
pl.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plot matches between images")
parser.add_argument("dataset", help="path to the dataset to be processed")
parser.add_argument("--image", help="show tracks for a specific")
parser.add_argument(
"--images", help="show tracks between a subset of images (separated by commas)"
)
parser.add_argument("--graph", help="display image graph", action="store_true")
parser.add_argument(
"--save_figs", help="save figures instead of showing them", action="store_true"
)
args: argparse.Namespace = parser.parse_args()
data = dataset.DataSet(args.dataset)
images: List[str] = data.images()
if args.graph:
plot_graph(data)
else:
plot_matches_for_images(data, args.image, args.images)
| 4,517 | 1,642 |
import numpy as np
from subprocess import call
import astropy.io.fits as pyf
import time
from .utils import *
class measure:
def __init__(self, nPCF=4, projected=False, m_max=None, density_field_data = None, save_dir=None, save_name=None, ell_max=5,
nbins=4, bin_spacing='LIN',bin_min=1, physical_boxsize = None, rmin = None, rmax = None):
"""
This class allows us to measure the 3/4pcf from some input data field
"""
self.ell_max = ell_max
self.eps = 1e-15
self.nbins = nbins
self.projected = projected
self.ld_one_d = np.shape(density_field_data)[0]
self.bin_min = bin_min-1e-5
self.bin_max = (self.ld_one_d // 2) + 1e-5
####################################
# Initialization Case Handling
####################################
if nPCF == 3 or nPCF == 4:
self.nPCF = nPCF
if self.projected:
if m_max is None:
raise ValueError("You need to provide an m_max you would like to compute up to.")
else:
self.m_max = m_max
else:
raise ValueError("Sarabande only calculates 3 or 4 point correlation functions. Please give an integer 3 or 4.")
if physical_boxsize or rmin or rmax is not None:
if physical_boxsize and rmin and rmax is not None:
self.bin_min = (rmin/physical_boxsize)*self.ld_one_d - 1e-5
self.bin_max = (rmax/physical_boxsize)*self.ld_one_d + 1e-5
else:
raise AssertionError("""If you want to use physical scales, you need to give physical_boxsize, rmin, and rmax""")
if bin_spacing == 'LIN' or bin_spacing == 'INV' or bin_spacing == 'LOG':
#We can toggle what binning we want to use using the bin_spacing argument
switch = {
'LIN' : np.linspace(self.bin_min, self.bin_max, self.nbins+1),
'INV' : 1./np.linspace(1./self.bin_min, 1./self.bin_max, self.nbins+1),
'LOG' : np.exp(np.linspace(np.log(self.bin_min), np.log(self.bin_max), self.nbins+1))}
else:
raise ValueError("""Please put a valid bin_spacing argument, acceptable options are: \n LIN \n INV \n LOG \n in string format.""")
self.bin_edges = switch[bin_spacing]
# self.ld_one_d = ld_one_d
if density_field_data is not None:
if len(np.shape(density_field_data)) == 3 and self.projected == True:
raise AssertionError("""Projected 3/4 PCFs can only be computed on a 2D data set, use full 3/4 PCFs for 3D data sets.""")
elif len(np.shape(density_field_data)) == 2 and self.projected == False:
raise AssertionError("""Projected 3/4 PCFs can only be computed on a 2D data set, use full 3/4 PCFs for 3D data sets.""")
else:
self.density_field_data = density_field_data
else:
if self.projected == True:
raise ValueError("Please include a density_field_data argument. Should be a density sheet in the form of a numpy array")
else:
raise ValueError("Please include a density_field_data argument. Should be a density cube in the form of a numpy array")
if save_name is not None:
self.save_name = save_name
else:
raise ValueError("Please include a save_name argument")
if save_dir is not None:
self.save_dir = save_dir
else:
raise ValueError("Please include a save_dir argument")
| 3,738 | 1,138 |
def swap_columns(M, m, n, i, j):
x = 0
while x < len(M):
M[x][i], M[x][j] = M[x][j], M[x][i]
x = x +1
return print(M)
| 133 | 74 |
import pandas as pd
import numpy as np
class AggregateAllEpitopes:
def __init__(self, input_file, output_file, file_type='pVACseq'):
self.input_file = input_file
self.output_file = output_file
self.file_type = file_type
#assign mutations to a "Classification" based on their favorability
def get_tier(self, mutation, vaf_clonal):
anchor_residue_pass = True
anchors = [1, 2, len(mutation["MT Epitope Seq"])-1, len(mutation["MT Epitope Seq"])]
if mutation["Mutation Position"] in anchors:
if mutation["Median WT Score"] == "NA":
anchor_residue_pass = False
elif mutation["Median WT Score"] < 1000:
anchor_residue_pass = False
#writing these out as explicitly as possible for ease of understanding
if (mutation["Median MT Score"] < 500 and
mutation["Tumor RNA VAF"] * mutation["Gene Expression"] > 3 and
mutation["Tumor DNA VAF"] >= (vaf_clonal/2) and
anchor_residue_pass):
return "Pass"
#relax mt and expr
if (mutation["Median MT Score"] < 1000 and
mutation["Tumor RNA VAF"] * mutation["Gene Expression"] > 1 and
mutation["Tumor DNA VAF"] >= (vaf_clonal/2) and
anchor_residue_pass):
return "Relaxed"
#anchor residues
if (mutation["Median MT Score"] < 1000 and
mutation["Tumor RNA VAF"] * mutation["Gene Expression"] > 1 and
mutation["Tumor DNA VAF"] >= (vaf_clonal/2) and
not anchor_residue_pass):
return "Anchor"
#not in founding clone
if (mutation["Median MT Score"] < 1000 and
mutation["Tumor RNA VAF"] * mutation["Gene Expression"] > 1 and
mutation["Tumor DNA VAF"] < (vaf_clonal/2) and
anchor_residue_pass):
return "Subclonal"
#relax expression. Include sites that have reasonable vaf but zero overall gene expression
lowexpr=False
if ((mutation["Tumor RNA VAF"] * mutation["Gene Expression"] > 0) or
(mutation["Gene Expression"] == 0 and
mutation["Tumor RNA Depth"] > 50 and
mutation["Tumor RNA VAF"] > 0.10)):
lowexpr=True
#if low expression is the only strike against it, it gets lowexpr label (multiple strikes will pass through to poor)
if (mutation["Median MT Score"] < 1000 and
lowexpr==True and
mutation["Tumor DNA VAF"] >= (vaf_clonal/2) and
anchor_residue_pass):
return "LowExpr"
#zero expression
if (mutation["Gene Expression"] == 0 or mutation["Tumor RNA VAF"] == 0) and lowexpr==False:
return "NoExpr"
#everything else
return "Poor"
def get_best_mut_line(self, df, hla_types, vaf_clonal, max_ic50=1000):
#order by best median score and get best ic50 peptide
if self.file_type == 'pVACbind':
df.sort_values(by=["Median Score"], inplace=True, ascending=True)
else:
df.sort_values(by=["Median MT Score", "Median WT Score"], inplace=True, ascending=[True, False])
best = df.iloc[0]
if self.file_type == 'pVACbind':
tier = "NA"
else:
tier = self.get_tier(best, vaf_clonal)
#these counts should represent only the "good binders" with ic50 < max
#for all sites other than tier4 slop
if self.file_type == 'pVACbind':
good_binders = df[df["Median Score"] < max_ic50]
else:
good_binders = df[df["Median MT Score"] < max_ic50]
if len(good_binders) > 0:
good_binders_hla = good_binders["HLA Allele"].unique()
hla = dict(map(lambda x : (x, 'X') if x in good_binders_hla else (x, ""), hla_types))
#get a list of all unique gene/transcript/aa_change combinations
#store a count of all unique peptides that passed
if self.file_type == 'pVACbind':
anno_count = "NA"
peptide_count = len(good_binders["Epitope Seq"].unique())
else:
anno_count = len(good_binders[['Transcript', 'Gene Name', 'Mutation', 'Protein Position']].agg('-'.join, axis=1).unique())
peptide_count = len(good_binders["MT Epitope Seq"].unique())
else:
hla = dict(map(lambda x : (x, ""), hla_types))
anno_count = 0
peptide_count = 0
if self.file_type == 'bedpe':
best['aachange'] = best['key']
elif self.file_type == 'pVACbind':
best['aachange'] = best['Mutation']
else:
if best['Variant Type'] == 'FS':
best['aachange'] = 'FS{}'.format(best['Protein Position'])
else:
(wt_aa, mt_aa) = best["Mutation"].split("/")
best["aachange"] = "".join([wt_aa, best["Protein Position"], mt_aa])
#assemble the line
out_dict = hla
if self.file_type == 'pVACbind':
out_dict.update({
'Gene': ["NA"],
'AA_change': [best["aachange"]],
'Num_Transcript': [anno_count],
'Peptide': [best["Epitope Seq"]],
'Pos': ["NA"],
'Num_Peptides': [peptide_count],
'ic50_MT': [best["Median Score"]],
'ic50_WT': ["NA"],
'percentile_MT': [best["Median Percentile"]],
'percentile_WT': ["NA"],
'RNA_expr': ["NA"],
'RNA_VAF': ["NA"],
'RNA_Depth': ["NA"],
'DNA_VAF': ["NA"],
'tier': [tier],
})
else:
out_dict.update({
'Gene': [best["Gene Name"]],
'AA_change': [best["aachange"]],
'Num_Transcript': [anno_count],
'Peptide': [best["MT Epitope Seq"]],
'Pos': [best["Mutation Position"]],
'Num_Peptides': [peptide_count],
'ic50_MT': [best["Median MT Score"]],
'ic50_WT': [best["Median WT Score"]],
'percentile_MT': [best["Median MT Percentile"]],
'percentile_WT': [best["Median WT Percentile"]],
'RNA_expr': [best["Gene Expression"]],
'RNA_VAF': [best["Tumor RNA VAF"]],
'RNA_Depth': [best["Tumor RNA Depth"]],
'DNA_VAF': [best["Tumor DNA VAF"]],
'tier': [tier],
})
df_out = pd.DataFrame.from_dict(out_dict)
return df_out
#sort the table in our preferred manner
def sort_table(self, df):
if self.file_type == 'pVACbind':
df.sort_values(by=["ic50_MT"], inplace=True, ascending=True)
else:
#make sure the tiers sort in the expected order
tier_sorter = ["Pass", "Relaxed", "LowExpr", "Anchor", "Subclonal", "Poor", "NoExpr"]
sorter_index = dict(zip(tier_sorter,range(len(tier_sorter))))
df["rank_tier"] = df['tier'].map(sorter_index)
df["rank_ic50"] = df["ic50_MT"].rank(ascending=True, method='dense')
df["expr"] = df["RNA_expr"] * df["RNA_VAF"]
df["rank_expr"] = df["expr"].rank(ascending=False, method='dense')
df["rank"] = df["rank_ic50"] + df["rank_expr"]
df.sort_values(by=["rank_tier", "rank", "Gene", "AA_change"], inplace=True, ascending=True)
df.drop('rank_tier', 1, inplace=True)
df.drop('rank_ic50', 1, inplace=True)
df.drop('expr', 1, inplace=True)
df.drop('rank_expr', 1, inplace=True)
df.drop('rank', 1, inplace=True)
return df
def execute(self):
df = pd.read_csv(self.input_file, delimiter='\t', float_precision='high', low_memory=False, na_values="NA", keep_default_na=False)
df.fillna(value={"Tumor RNA Depth": 0, "Tumor RNA VAF": 0, "Tumor DNA VAF": 0, "Gene Expression": 0}, inplace=True)
## get a list of all represented hla types
hla_types = df['HLA Allele'].unique()
## get a list of unique mutations
if self.file_type == 'pVACbind':
df["key"] = df["Mutation"]
vaf_clonal = None
else:
for column in ['Chromosome', 'Start', 'Stop', 'Protein Position', 'Mutation']:
df[column] = df[column].astype(str)
if self.file_type == 'bedpe':
df["key"] = df[['Chromosome', 'Start', 'Stop']].agg(' | '.join, axis=1)
else:
df["key"] = df[['Chromosome', 'Start', 'Stop', 'Reference', 'Variant']].agg('-'.join, axis=1)
#do a crude estimate of clonal vaf/purity
vafs = np.sort(df['Tumor DNA VAF'].unique())[::-1]
vaf_clonal = list(filter(lambda vaf: vaf < 0.6, vafs))[0]
keys = df["key"].unique()
columns = hla_types.tolist()
columns.extend(['Gene', 'AA_change', 'Num_Transcript', 'Peptide', 'Pos', 'Num_Peptides', 'ic50_MT', 'ic50_WT', 'percentile_MT', 'percentile_WT', 'RNA_expr', 'RNA_VAF', 'RNA_Depth', 'DNA_VAF', 'tier'])
peptide_table = pd.DataFrame(columns=columns)
for key in keys:
df_subset = df[df["key"] == key]
best_mut_line = self.get_best_mut_line(df_subset, hla_types, vaf_clonal, 1000)
peptide_table = peptide_table.append(best_mut_line, sort=False)
peptide_table = self.sort_table(peptide_table)
peptide_table.to_csv(self.output_file, sep='\t', na_rep='NA', index=False)
| 9,631 | 3,227 |
from rest_framework.views import APIView
from rest_framework.response import Response
"""List of HTTP status code"""
from rest_framework import status
from profiles_api import serializer
from rest_framework import viewsets
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializer.HelloSerializer
def get(self, request, format=None):
"""Return a list of APIView features"""
an_apiview = [
'Use HTTP methods as function (get, post, patch, put, delete',
'Is similar to a traditional Django View',
'Gives you the most control over you application logic',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
def post(self, request):
"""Create a message with our name"""
serializer = self.serializer_class(data=request.data)
# Validate the serializer
if serializer.is_valid():
name = serializer.validated_data.get('name')
# f'' insert variable into string
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
# pk is ID of object to update by put request
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handle partial update of an object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializer.HelloSerializer
def list(self, request):
"""Return a hello message"""
a_viewset = [
'Uses action (list, create, retrieve, update, partial_update',
'Teomoney',
'I want to make a website',
]
return Response({'message': 'Hello', 'a_viewset': a_viewset})
def create(self, request):
"""Create a new hello massage"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST )
def retrieve(self, request, pk=None):
"""Handling getting an object by its ID"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
return Response({'http_method': 'DELETE'}) | 2,600 | 916 |
from django.db import models
from django.utils.datetime_safe import datetime
from simple_history.models import HistoricalRecords
class Project(models.Model):
"""
Used to house information pertaining to a particular project. The below fields are the bare minimum - additional
relevant fields can be added.
"""
name = models.CharField(max_length=100, blank=False)
description = models.TextField(max_length=300, blank=False)
users_assigned = models.ManyToManyField(
"users.Profile", related_name="projects", blank=True
)
def __str__(self):
return self.name
class Risk(models.Model):
"""
Used to create records describing particular Project Risks.
"""
class Background(models.TextChoices):
FINANCE = "1", "Finance"
OPERATIONS = "2", "Operations"
STAFFING = "3", "Staffing"
class Priority(models.TextChoices):
LOW = "1", "Low"
MEDIUM = "2", "Medium"
HIGH = "3", "High"
class Probability(models.TextChoices):
ZERO_PERCENT = "0", "0%"
TEN_PERCENT = "1", "10%"
TWENTY_PERCENT = "2", "20%"
THIRTY_PERCENT = "3", "30%"
FORTY_PERCENT = "4", "40%"
FIFTY_PERCENT = "5", "50%"
SIXTY_PERCENT = "6", "60%"
SEVENTY_PERCENT = "7", "70%"
EIGHTY_PERCENT = "8", "80%"
NINETY_PERCENT = "9", "90%"
HUNDRED_PERCENT = "10", "100%"
name = models.CharField(max_length=100, blank=False)
project = models.ForeignKey(
Project, on_delete=models.CASCADE, related_name="risks", blank=False
)
background = models.CharField(
max_length=50, choices=Background.choices, blank=False
)
priority = models.CharField(max_length=2, choices=Priority.choices, blank=False)
probability_percentage = models.CharField(
max_length=2, choices=Probability.choices, blank=False
)
resolvers_assigned = models.ManyToManyField(
"users.Profile", related_name="resolvers_assigned", blank=True
)
change_history = HistoricalRecords()
def __str__(self):
return self.name
@property
def get_change_history(self) -> list:
"""
Retrieves information from the historical records of a Model and presents them in the form of
:return: a list of changes applied to a Risk object
"""
history = self.change_history.all().values()
changes_list = list(history)
irrelevant_changes = ["history_id", "history_date", "history_type"]
changes_descriptions = list()
for index, change in enumerate(changes_list):
if index != 0:
for key, value in change.items():
if changes_list[index - 1][key] != changes_list[index][key]:
if key not in irrelevant_changes:
new_value = changes_list[index - 1][key]
old_value = changes_list[index][key]
timestamp = datetime.strftime(
changes_list[index]["history_date"],
"%d-%m-%Y, %H:%M:%S",
)
changes_descriptions.append({
"change": {
"field_changed": key,
"old_value": old_value,
"new_value": new_value,
"changed_on": timestamp}
}
)
return changes_descriptions
| 3,624 | 1,057 |
from mowgli_etl.loader._kg_node_loader import _KgNodeLoader
from mowgli_etl.loader.json._json_loader import _JsonLoader
class JsonNodeLoader(_KgNodeLoader, _JsonLoader):
_JSON_FILE_NAME = "nodes.json"
close = _JsonLoader.close
load_kg_node = _JsonLoader._load_model
open = _JsonLoader.open
| 309 | 110 |
import tensorflow as tf
a = tf.ones([1, 3])
b = tf.fill([1, 3], 3.)
print("a:", a)
print("b:", b)
print("a+b:", tf.add(a, b))
print("a-b:", tf.subtract(a, b))
print("a*b:", tf.multiply(a, b))
print("b/a:", tf.divide(b, a))
| 234 | 126 |
# -*- coding: utf-8 -*-
"""
基础native方法收集器,完成c/c++native方法收集,并整合到对应的脚本语言中
"""
import re
from os import path as op
from cpputils import *
from c import Cfg
from defs import LangType
class BaseNativeMethodCollector(object):
def __init__(self, search_path, classname_base=None, filename_base=None):
self.search_path = search_path
self.proj_name = Cfg.getprojname()
self.search_base = op.basename(search_path).capitalize()
if classname_base is not None:
self.classname_base = classname_base
else:
self.classname_base = self.search_base
if filename_base is not None:
self.filename_base = filename_base
else:
self.filename_base = self.search_base
def build(self):
"""构建方法文件"""
raise NotImplementedError()
def _buildable(self):
if not op.exists(self.search_path):
print 'Path [{0}] not found, skip build methods...'.format(self.search_path)
return False
return True
def _build_filematch_re(self):
return re.compile(r'(_[a-zA-Z]+)+\.h')
def _build_cpp_cls_name(self):
return '{0}_{1}Methods'.format(self.proj_name, self.classname_base)
def _build_cpp_file(self):
cpp_file_path = op.join(self.search_path, '_{0}Methods.h'.format(self.filename_base))
cpp_file = CppFile(
cpp_file_path, author=Cfg.getauthor(), ver=Cfg.getver(), include_macro_prefix=self.proj_name.upper())
cpp_file.custom_filehead = Cfg.getlicensehead(LangType.cplusplus) + '\n//!!! This file is auto generated by script tool, do not modify it!!!\n'
cpp_file.addincl('{0}/common/LibHeader.h'.format(self.proj_name))
cpp_file.addincl('{0}/common/Macro.h'.format(self.proj_name))
return cpp_file
| 1,823 | 632 |
from .contexts import (
DID_V1,
SECURITY_V1,
SECURITY_V2,
SECURITY_V3_UNSTABLE,
CREDENTIALS_V1,
EXAMPLES_V1,
BBS_V1,
CITIZENSHIP_V1,
ODRL,
SCHEMA_ORG,
)
from ..ld_proofs.constants import (
SECURITY_CONTEXT_V2_URL,
SECURITY_CONTEXT_V1_URL,
DID_V1_CONTEXT_URL,
SECURITY_CONTEXT_BBS_URL,
CREDENTIALS_CONTEXT_V1_URL,
SECURITY_CONTEXT_V3_URL,
)
from .dids import (
DID_z6Mkgg342Ycpuk263R9d8Aq6MUaxPn1DDeHyGo38EefXmgDL,
DID_zUC72Q7XD4PE4CrMiDVXuvZng3sBvMmaGgNeTUJuzavH2BS7ThbHL9FhsZM9QYY5fqAQ4MB8M9oudz3tfuaX36Ajr97QRW7LBt6WWmrtESe6Bs5NYzFtLWEmeVtvRYVAgjFcJSa,
DID_EXAMPLE_48939859,
DID_SOV_QqEfJxe752NCmWqR5TssZ5,
)
DOCUMENTS = {
DID_z6Mkgg342Ycpuk263R9d8Aq6MUaxPn1DDeHyGo38EefXmgDL.get(
"id"
): DID_z6Mkgg342Ycpuk263R9d8Aq6MUaxPn1DDeHyGo38EefXmgDL,
DID_zUC72Q7XD4PE4CrMiDVXuvZng3sBvMmaGgNeTUJuzavH2BS7ThbHL9FhsZM9QYY5fqAQ4MB8M9oudz3tfuaX36Ajr97QRW7LBt6WWmrtESe6Bs5NYzFtLWEmeVtvRYVAgjFcJSa.get(
"id"
): DID_zUC72Q7XD4PE4CrMiDVXuvZng3sBvMmaGgNeTUJuzavH2BS7ThbHL9FhsZM9QYY5fqAQ4MB8M9oudz3tfuaX36Ajr97QRW7LBt6WWmrtESe6Bs5NYzFtLWEmeVtvRYVAgjFcJSa,
DID_EXAMPLE_48939859.get("id"): DID_EXAMPLE_48939859,
DID_SOV_QqEfJxe752NCmWqR5TssZ5.get("id"): DID_SOV_QqEfJxe752NCmWqR5TssZ5,
SECURITY_CONTEXT_V1_URL: SECURITY_V1,
SECURITY_CONTEXT_V2_URL: SECURITY_V2,
SECURITY_CONTEXT_V3_URL: SECURITY_V3_UNSTABLE,
DID_V1_CONTEXT_URL: DID_V1,
CREDENTIALS_CONTEXT_V1_URL: CREDENTIALS_V1,
SECURITY_CONTEXT_BBS_URL: BBS_V1,
"https://www.w3.org/2018/credentials/examples/v1": EXAMPLES_V1,
"https://w3id.org/citizenship/v1": CITIZENSHIP_V1,
"https://www.w3.org/ns/odrl.jsonld": ODRL,
"http://schema.org/": SCHEMA_ORG,
}
def custom_document_loader(url: str, options: dict):
# Check if full url (with fragments is in document map)
if url in DOCUMENTS:
return {
"contentType": "application/ld+json",
"contextUrl": None,
"document": DOCUMENTS[url],
"documentUrl": url,
}
# Otherwise look if it is present without fragment
without_fragment = url.split("#")[0]
if without_fragment in DOCUMENTS:
return {
"contentType": "application/ld+json",
"contextUrl": None,
"document": DOCUMENTS[without_fragment],
"documentUrl": url,
}
print("Could not find")
raise Exception(f"No custom context support for {url}")
| 2,490 | 1,299 |
from collections import OrderedDict
def parse_codeblock_args(elem):
syntax = elem.classes[0] if elem.classes else ''
args = OrderedDict(elem.attributes)
for k, v in args.items():
if v.lower() in ('false', 'no'):
args[k] = False
return syntax, args
| 288 | 90 |
""" openconfig_aaa
This module defines configuration and operational state data
related to authorization, authentication, and accounting (AAA)
management.
Portions of this model reuse data definitions or structure from
RFC 7317 \- A YANG Data Model for System Management
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
from ydk.models.openconfig.openconfig_aaa_types import AAASERVERTYPE
class TACACS(AAASERVERTYPE):
"""
Terminal Access Controller Access Control System (TACACS+)
AAA server
"""
_prefix = 'oc-aaa'
_revision = '2017-09-18'
def __init__(self, ns="http://openconfig.net/yang/aaa", pref="openconfig-aaa", tag="openconfig-aaa:TACACS"):
if sys.version_info > (3,):
super().__init__(ns, pref, tag)
else:
super(TACACS, self).__init__(ns, pref, tag)
class RADIUS(AAASERVERTYPE):
"""
Remote Authentication Dial In User Service (RADIUS) AAA
server
"""
_prefix = 'oc-aaa'
_revision = '2017-09-18'
def __init__(self, ns="http://openconfig.net/yang/aaa", pref="openconfig-aaa", tag="openconfig-aaa:RADIUS"):
if sys.version_info > (3,):
super().__init__(ns, pref, tag)
else:
super(RADIUS, self).__init__(ns, pref, tag)
| 1,740 | 608 |
#!/usr/local/bin/python3
import atheris
import sys
import os
def TestOneInput(data):
if len(data) >= 3 :
if data[0] == ord('b'):
if data[1] == ord('u'):
if data[2] == ord('g'):
raise Exception("Made it to the bug!")
atheris.instrument_all()
atheris.Setup(sys.argv, TestOneInput)
atheris.Fuzz()
| 361 | 125 |
from django.conf import urls
from djfw.tinymce import views
app_name = 'djfw.tinymce'
urlpatterns = [
urls.re_path(r'^$', views.Smiles.as_view(), name='index'),
urls.re_path(
r'^emotions/emotions.htm$', views.Smiles.as_view(), name='smiles'),
urls.re_path(
r'^uploaded_files/$',
views.Uploaded_files.as_view(),
name='uploaded_files'),
urls.re_path(
r'^upload_file/$', views.Upload_file.as_view(), name='upload_file'),
]
| 496 | 200 |
# -*- coding: utf-8 -*-
__author__ = """Christian Strappazzon"""
__email__ = 'lab@strap.it'
__version__ = '1.0.9'
| 115 | 53 |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file copies the logic from GYP to find the MSVC configuration. It's not
# currently used because it is too slow. We will probably build this
# functionality into the C++ code in the future.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
if (os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# Use the 64-on-64 compiler if we can.
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValue(key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _RegistryKeyExists(key):
"""Use reg.exe to see if a key exists.
Args:
key: The registry key to check.
Return:
True if the key exists
"""
if not _RegistryQuery(key):
return False
return True
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (11)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, 'vcexpress.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif os.path.exists(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto'):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version or 'e' not in msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to an "e" version (e.g. 2010e)')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return open(path, mode)
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
return vs_version
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
env_block = _FormatAsEnvironmentBlock(env)
f = OpenOutput(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
#print SelectVisualStudioVersion().DefaultToolset()
#GenerateEnvironmentFiles("D:\\src\\src1\\src\\out\\gn\\eraseme", {})
#print '"', GetVSVersion({}).Path(), '"'
print '"', GetVSVersion({}).sdk_based, '"'
#-------------------------------------------------------------------------------
version_info = {
'2010': {
'includes': [
'VC\\atlmfc\\include',
],
},
}
| 22,345 | 6,866 |
import pytest
import dist_zero.ids
from dist_zero.spawners.simulator import SimulatedSpawner
from dist_zero.spawners.docker import DockerSpawner
from dist_zero.spawners.cloud.aws import Ec2Spawner
from dist_zero.system_controller import SystemController
from .demo import demo, cloud_demo
from .common import dz
| 314 | 96 |
"""taskorganizer.config.settings.test ."""
from .base import *
import json
# JSON-based secrets module
with open('test_secrets.json') as f:
secrets = json.loads(f.read())
def get_secret(setting, secrets=secrets):
"""Get the secret variable or return explicit exception."""
try:
return secrets[setting]
except KeyError:
error_msg = 'Set the {0} environment variable'.format(setting)
raise ImproperlyConfigured(error_msg)
SECRET_KEY = get_secret('SECRET_KEY')
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'andrew',
'HOST': 'localhost',
}}
| 661 | 215 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
from salina import Agent, TAgent
from brax.envs.to_torch import JaxToTorchWrapper
from salina_examples.rl.LoP.envs import create_gym_env
from salina_examples.rl.LoP.subspace import Linear, Sequential
from torch.distributions.dirichlet import Dirichlet
from torch.distributions.categorical import Categorical
from torch.distributions.uniform import Uniform
from salina.agents.brax import BraxAgent
class CustomBraxAgent(BraxAgent):
def _initialize_envs(self, n_envs):
assert self._seed is not None, "[GymAgent] seeds must be specified"
self.gym_env = create_gym_env(
self.brax_env_name, batch_size=n_envs, seed=self._seed, **self.args
)
self.gym_env = JaxToTorchWrapper(self.gym_env)
class AlphaAgent(TAgent):
def __init__(self, device, n_dim = 2, geometry = "simplex", dist = "flat"):
super().__init__()
self.n_dim = n_dim
self.geometry = geometry
self.device = device
assert geometry in ["simplex","bezier"], "geometry must be 'simplex' or 'bezier'"
if geometry == "bezier":
assert n_dim == 3, "number of dimensions must be equal to 3 for Bezier subspaces"
assert dist in ["flat","categorical"], "distribution must be 'flat' or 'categorical'"
if dist == "flat":
self.dist = Dirichlet(torch.ones(n_dim)) if geometry == "simplex" else Uniform(0,1)
else:
self.dist = Categorical(torch.ones(n_dim))
def forward(self, t, replay = False, **args):
B = self.workspace.batch_size()
alphas = self.dist.sample(torch.Size([B])).to(self.device)
if isinstance(self.dist,Categorical):
alphas = F.one_hot(alphas,num_classes = self.n_dim).float()
elif self.geometry == "bezier":
alphas = torch.stack([(1 - alphas) ** 2, 2 * alphas * (1 - alphas), alphas ** 2],dim = 1)
if (t > 0) and (not replay):
done = self.get(("env/done", t)).float().unsqueeze(-1)
alphas_old = self.get(("alphas", t-1))
alphas = alphas * done + alphas_old * (1 - done)
self.set(("alphas", t), alphas)
class LoPAgent(TAgent):
def __init__(self, **args):
super().__init__()
env = JaxToTorchWrapper(create_gym_env(args["env"].env_name))
input_size = env.observation_space.shape[0]
num_outputs = env.action_space.shape[0]
hs = args["hidden_size"]
self.n_models = args["n_models"]
n_layers = args["n_layers"]
hidden_layers = [Linear(self.n_models,hs,hs) if i%2==0 else nn.ReLU() for i in range(2*(n_layers - 1))] if n_layers >1 else [nn.Identity()]
self.model = Sequential(
Linear(self.n_models, input_size, hs),
nn.ReLU(),
*hidden_layers,
Linear(self.n_models, hs, num_outputs),
)
def cosine_similarity(self,i,j):
assert (i < self.n_models) and (j < self.n_models), "index higher than n_models"
cos_sim = torch.Tensor([0.]).to(self.model[0].weight.device)
n = 0
for w in self.parameters():
p = ((w[i] * w[j]).sum() / max(((w[i] ** 2).sum().sqrt() * (w[j] ** 2).sum().sqrt()),1e-8)) ** 2
cos_sim += p
n += 1
return cos_sim / n
def L2_norm(self,i,j):
assert (i < self.n_models) and (j < self.n_models), "index higher than n_models"
L2_norm = torch.Tensor([0.]).to(self.model[0].weight.device)
n = 0
for w in self.parameters():
L2_norm += torch.linalg.norm(w[i] - w[j])
n += 1
return L2_norm / n
def forward(self, t, replay, action_std, **args):
if replay:
input = self.get("env/transformed_obs")
alphas = self.get("alphas")
mean = self.model(input,alphas)
std = torch.ones_like(mean) * action_std + 0.000001
dist = Normal(mean, std)
action = self.get("real_action")
logp_pi = dist.log_prob(action).sum(axis=-1)
logp_pi -= (2 * (np.log(2) - action - F.softplus(-2 * action))).sum(axis=-1)
self.set("action_logprobs", logp_pi)
else:
input = self.get(("env/transformed_obs", t))
alphas = self.get(("alphas",t))
with torch.no_grad():
mean = self.model(input,alphas)
std = torch.ones_like(mean) * action_std + 0.000001
dist = Normal(mean, std)
action = dist.sample() if action_std > 0 else dist.mean
self.set(("real_action", t), action)
logp_pi = dist.log_prob(action).sum(axis=-1)
logp_pi -= (2 * (np.log(2) - action - F.softplus(-2 * action))).sum(axis=-1)
self.set(("old_action_logprobs", t), logp_pi)
action = torch.tanh(action)
self.set(("action", t), action)
def seed(self,seed):
pass
class CriticAgent(Agent):
def __init__(self, **args):
super().__init__()
env = JaxToTorchWrapper(create_gym_env(args["env"].env_name))
input_size = env.observation_space.shape[0]
alpha_size = args["alpha_size"]
hs = args["hidden_size"]
n_layers = args["n_layers"]
hidden_layers = [nn.Linear(hs,hs) if i%2==0 else nn.ReLU() for i in range(2*(n_layers - 1))] if n_layers >1 else [nn.Identity()]
self.model_critic = nn.Sequential(
nn.Linear(input_size + alpha_size, hs),
nn.ReLU(),
*hidden_layers,
nn.Linear(hs, 1),
)
def forward(self, t = None, **args):
if t == None:
input = self.get("env/transformed_obs")
alphas = self.get("alphas")
x = torch.cat([input,alphas], dim=-1)
critic = self.model_critic(x).squeeze(-1)
self.set("critic", critic)
else:
input = self.get(("env/transformed_obs",t))
alphas = self.get(("alphas",t))
x = torch.cat([input,alphas], dim=-1)
critic = self.model_critic(x).squeeze(-1)
self.set(("critic",t), critic)
class Normalizer(TAgent):
def __init__(self, env):
super().__init__()
env = JaxToTorchWrapper(create_gym_env(env.env_name))
self.n_features = env.observation_space.shape[0]
self.n = None
self.mean = nn.Parameter(torch.zeros(self.n_features), requires_grad = False)
self.mean_diff = torch.zeros(self.n_features)
self.var = nn.Parameter(torch.ones(self.n_features), requires_grad = False)
def forward(self, t, update_normalizer=True, **kwargs):
input = self.get(("env/env_obs", t))
if update_normalizer:
self.update(input)
input = self.normalize(input)
self.set(("env/transformed_obs", t), input)
def update(self, x):
if self.n is None:
device = x.device
self.n = torch.zeros(self.n_features).to(device)
self.mean = self.mean.to(device)
self.mean_diff = self.mean_diff.to(device)
self.var = self.var.to(device)
self.n += 1.0
last_mean = self.mean.clone()
self.mean += (x - self.mean).mean(dim=0) / self.n
self.mean_diff += (x - last_mean).mean(dim=0) * (x - self.mean).mean(dim=0)
self.var = nn.Parameter(torch.clamp(self.mean_diff / self.n, min=1e-2), requires_grad = False).to(x.device)
def normalize(self, inputs):
obs_std = torch.sqrt(self.var)
return (inputs - self.mean) / obs_std
def seed(self, seed):
torch.manual_seed(seed) | 7,727 | 2,669 |
num = int(input())
for i in range(num):
no_star = num-i-1
yes_star = i+1
for i1 in range(no_star):
print(" ",sep="",end="")
for i2 in range(yes_star):
print("*",sep="",end="")
print() | 219 | 86 |
import gpop
while True:
nick = input()
data = gpop.get_data(nick)
print(f'''
{nick} stats:
Level: {data.level}
Time played: {data.time}
Levels played: {data.played}
Levels created: {data.created}
Views: {data.views}
G-Coins: {data.coins}
GBobs: {data.gbobs}
''')
| 275 | 111 |
# -*- coding=utf-8 -*-
#Copyright 2012 Daniel Osvaldo Mondaca Seguel
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import sys
import os
from setuptools import setup
reload(sys)
sys.setdefaultencoding('utf-8')
files = ["pyscrap/*"]
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
scripts=["bin/wscrap"],
name="pyscrap",
version="0.0.9",
author="Daniel Mondaca",
author_email="daniel@analitic.cl",
description=("micro framework for web scraping"),
license = "Apache 2.0 License",
keywords = "web scraping",
url = "http://github.com/Nievous/pyscrap",
packages=["pyscrap"],
install_requires = ["lxml", "simplejson"],
long_description=read("README.txt"),
package_data = {"package": files},
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Software Development",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
],
)
| 1,541 | 500 |
#!/usr/bin/env python
#####
# simple implementation of "Densely Connected Convolutional Networks, CVPR 2017"
#####
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import numpy as np
class DenseBlock(nn.Module):
""" Dense blocks introduced in paper
"""
def __init__(self, k_0, k, L, use_dropout = False):
"""
Args:
k_0 (int): number of input channels to dense block
k (int): growth rate described in paper
L (int): number of layers in dense block
use_dropout (bool) : whether use dropout after conv layer
"""
super(DenseBlock, self).__init__()
self.L = L
self.use_dropout = use_dropout
for i in range(L):
# TODO: per-dimension batchnormalization is provided in torch.nn
# however, in the original batchnormalization paper,
# per feature map batchnormalization is applied for convolutional layer
n_in = k_0 if i == 0 else (k_0 + i * k) # number of input channels
self.add_module('bn' + str(i), nn.BatchNorm2d(n_in))
self.add_module('conv' + str(i), nn.Conv2d(n_in, k, 3, padding = 1))
def forward(self, x):
children = self.children()
for i in range(self.L):
bn = children.next()
y = F.relu(bn(x))
conv = children.next()
y = conv(y)
if self.use_dropout:
y = F.dropout(y, p = 0.2, training = self.training)
if (i + 1) == self.L:
x = y # return last conv layer output
else:
x = torch.cat((x, y), 1)
return x
class TransitionLayer(nn.Module):
""" TransitionLayer between dense blocks
"""
def __init__(self, n_in, n_out, use_dropout = False):
"""
Args:
n_in (int) : number of input channels
n_out (int) : number of output channels
use_dropout (bool) : whether use dropout after conv layer
"""
super(TransitionLayer, self).__init__()
self.conv1x1 = nn.Conv2d(n_in, n_out, 1) # 1x1 conv layer
self.use_dropout = use_dropout
def forward(self, x):
x = self.conv1x1(x)
if self.use_dropout:
x = F.dropout(x, p = 0.2, training = self.training)
x = F.avg_pool2d(x, 2)
return x
def init_weights(m):
"""
TODO: initialization
"""
pass
class DenseNet(nn.Module):
""" Whole framework of dense net for 32 x 32 color image
"""
def __init__(self, k, L, C, use_dropout = False):
"""
Args:
k (int): growth rate for denseblocks
L (int): number of layers for denseblocks
C (int) : number of classes
use_dropout (bool) : whether use dropout after conv layer
"""
super(DenseNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3, padding = 1) # first conv layer applied on input images
# dense blocks, connected by transition layer
self.db1 = DenseBlock(16, k, L, use_dropout)
self.trl1 = TransitionLayer(k, k, use_dropout)
self.db2 = DenseBlock(k, k, L, use_dropout)
self.trl2 = TransitionLayer(k, k, use_dropout)
self.db3 = DenseBlock(k, k, L, use_dropout)
# linear layer
self.fc = nn.Linear(k, C)
def forward(self, x):
x = self.conv1(x)
x = self.db1(x)
x = self.trl1(x)
x = self.db2(x)
x = self.trl2(x)
x = self.db3(x)
# global average pooling
x = F.avg_pool2d(x, 8)
x = x.view(-1, self.num_flat_features(x))
x = self.fc(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimension except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def split_train(dataset, valid_percentage):
"""
Utility function to split training set into training and validation
return sample index of both training and validation
Args:
dataset (Dataset) : original training dataset
valid_percentage (float) : percentage of validation
Returns:
train_idx (list) : training split index
valid_idx (list) : validataion split index
"""
n = len(dataset)
n_valid = int(np.floor(n * valid_percentage))
perm = np.random.permutation(n)
train_idx = perm[:(n - n_valid)]
valid_idx = perm[(n - n_valid):]
return (train_idx, valid_idx)
if __name__ == '__main__':
#######
# test on CIFAR10 dataset
#######
# compute CIFAR10 mean and variance
#data_dir = '../data/cifar10/'
data_dir = './cifar10/'
data = torchvision.datasets.CIFAR10(root = data_dir, train = True, download = True).train_data
data = data.astype(np.float32) / 255.
cifar_mean = np.mean(data, axis = (0, 1, 2))
cifar_std = np.std(data, axis = (0, 1, 2))
cifar_mean = torch.from_numpy(cifar_mean).float()
cifar_std = torch.from_numpy(cifar_std).float()
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(cifar_mean, cifar_std)])
augment = None # no data augmentation
#augment = transforms.Compose([]) # data augmentation
valid_transform = transform
if augment is None:
train_transform = transform
else:
train_transform = transforms.Compose([augment, transform])
use_cuda = True
use_cuda = use_cuda and torch.cuda.is_available()
kwargs = {'num_workers' : 1, 'pin_memory': True} if use_cuda else {}
# simply duplicate dataset
cifar10_train = torchvision.datasets.CIFAR10(root = data_dir, train = True,
download = True, transform = train_transform)
train_idx, valid_idx = split_train(cifar10_train, 0.1) # 5000 validation samples
cifar10_valid = torchvision.datasets.CIFAR10(root = data_dir, train = True,
download = True, transform = valid_transform)
batch_sz = 64
train_loader = torch.utils.data.DataLoader(cifar10_train, batch_size = batch_sz,
sampler = torch.utils.data.sampler.SubsetRandomSampler(train_idx),
**kwargs)
valid_loader = torch.utils.data.DataLoader(cifar10_valid, batch_size = batch_sz,
sampler = torch.utils.data.sampler.SubsetRandomSampler(valid_idx),
**kwargs)
# last epoch use the whole training dataset
whole_train_loader = torch.utils.data.DataLoader(cifar10_train, batch_size = batch_sz, **kwargs)
#######
# training stage
######
#net = DenseNet(5, 3, 10, True) # just for CPU test
cuda_device = 2 # avoid Tensors on different GPU
net = DenseNet(12, 25, 10, use_dropout = True)
if use_cuda:
net.cuda(cuda_device)
criterion = nn.CrossEntropyLoss()
init_lr = 0.1
weight_decay = 1e-4
momentum = 0.9
optimizer = optim.SGD(net.parameters(), lr = init_lr, weight_decay = weight_decay,
momentum = momentum, nesterov = True)
n_epoch = 300
valid_freq = 50 # validation frequency
net.train() # training mode for dropout and batch normalization layer
for epoch in range(n_epoch):
running_loss = 0.0
if epoch + 1 == n_epoch: ## use the whole training in last epoch
data_loader = whole_train_loader
else:
data_loader = train_loader
# divide lr by 10 after 50% and 75% epochs
if epoch + 1 == .5 * n_epoch:
for param_group in optimizer.param_groups:
param_group['lr'] = init_lr * 0.1
if epoch + 1 == .75 * n_epoch:
for param_group in optimizer.param_groups:
param_group['lr'] = init_lr * 0.01
for i, data in enumerate(data_loader, 0):
inputs, labels = data
# wrap in variable
if use_cuda:
inputs, labels = Variable(inputs.cuda(cuda_device)), Variable(labels.cuda(cuda_device))
else:
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.data[0]
# print statistics
if (i + 1) % valid_freq == 0:
print('[%d, %d] training loss: %.4f' %(epoch + 1,
i + 1, running_loss / valid_freq))
running_loss = 0.
# validation loss
valid_loss_val = 0.0
net.eval() # eval mode
for inputs, labels in valid_loader:
if use_cuda:
inputs = Variable(inputs.cuda(cuda_device), volatile = True)
labels = Variable(labels.cuda(cuda_device))
else:
inputs, labels = Variable(inputs, volatile = True), Variable(labels)
outputs = net(inputs)
valid_loss = criterion(outputs, labels)
valid_loss_val += valid_loss.data[0]
print('\t\t validation loss: %.4f' %(valid_loss_val / len(valid_loader)))
net.train()
print('Finished Training')
#######
# testing stage
#######
test_dataset = torchvision.datasets.CIFAR10(root = data_dir, train = False,
download = True, transform = valid_transform)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size = batch_sz,
shuffle = False, **kwargs)
net.eval()
correct = 0
total = 0
for inputs, labels in test_loader:
if use_cuda:
inputs = Variable(inputs.cuda(cuda_device), volatile = True)
#labels = Variable(labels.cuda(cuda_device))
else:
#inputs, labels = Variable(inputs, volatile = True), Variable(labels)
inputs = Variable(inputs, volatile = True)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
correct += (predicted.cpu() == labels).squeeze().sum()
total += labels.size(0)
print ("Test accuracy : %f %% " % (correct * 100.0 / total))
| 10,615 | 3,411 |
def double_it(num):
return num*2
start = 3
for i in range(6):
start = double_it(start)
| 97 | 40 |
from .solver1d import *
# p coefficient functions
class Solver2d(Solver1d):
def __init__(self, equation):
super().__init__(equation)
# Equation dependent functions
self.flux_y = equation.flux_y
self.spectral_radius_y = equation.spectral_radius_y
def fd2(self, u):
u_star = np.ones(u.shape)
un_half = np.ones(u.shape)
u_prime_x = np.ones(u.shape)
u_prime_y = np.ones(u.shape)
u_prime_x[1:-1, 1:-1] = limiter_x(u)
u_prime_y[1:-1, 1:-1] = limiter_y(u)
if self.odd:
un_half[1:-2, 1:-2] = 0.25 * (
(u[1:-2, 1:-2] + u[2:-1, 1:-2] + u[1:-2, 2:-1] + u[2:-1, 2:-1])
+ 0.25
* (
(u_prime_x[1:-2, 1:-2] - u_prime_x[2:-1, 1:-2])
+ (u_prime_x[1:-2, 2:-1] - u_prime_x[2:-1, 2:-1])
+ (u_prime_y[1:-2, 1:-2] - u_prime_y[1:-2, 2:-1])
+ (u_prime_y[2:-1, 1:-2] - u_prime_y[2:-1, 2:-1])
)
)
else:
un_half[2:-1, 2:-1] = 0.25 * (
(u[1:-2, 1:-2] + u[2:-1, 1:-2] + u[1:-2, 2:-1] + u[2:-1, 2:-1])
+ 0.25
* (
(u_prime_x[1:-2, 1:-2] - u_prime_x[2:-1, 1:-2])
+ (u_prime_x[1:-2, 2:-1] - u_prime_x[2:-1, 2:-1])
+ (u_prime_y[1:-2, 1:-2] - u_prime_y[1:-2, 2:-1])
+ (u_prime_y[2:-1, 1:-2] - u_prime_y[2:-1, 2:-1])
)
)
f = self.flux_x(u)
g = self.flux_y(u)
f_prime_x = limiter_x(f)
g_prime_y = limiter_y(g)
u_star[1:-1, 1:-1] = u[1:-1, 1:-1] - 0.5 * self.dt * (
f_prime_x / self.dx + g_prime_y / self.dy
)
self.boundary_conditions(u_star)
f_star = self.flux_x(u_star)
g_star = self.flux_y(u_star)
if self.odd:
u[1:-2, 1:-2] = (
un_half[1:-2, 1:-2]
- 0.5
* self.dt
/ self.dx
* (
(f_star[2:-1, 1:-2] - f_star[1:-2, 1:-2])
+ (f_star[2:-1, 2:-1] - f_star[1:-2, 2:-1])
)
- 0.5
* self.dt
/ self.dy
* (
(g_star[1:-2, 2:-1] - g_star[1:-2, 1:-2])
+ (g_star[2:-1, 2:-1] - g_star[2:-1, 1:-2])
)
)
else:
u[2:-1, 2:-1] = (
un_half[2:-1, 2:-1]
- 0.5
* self.dt
/ self.dx
* (
(f_star[2:-1, 1:-2] - f_star[1:-2, 1:-2])
+ (f_star[2:-1, 2:-1] - f_star[1:-2, 2:-1])
)
- 0.5
* self.dt
/ self.dy
* (
(g_star[1:-2, 2:-1] - g_star[1:-2, 1:-2])
+ (g_star[2:-1, 2:-1] - g_star[2:-1, 1:-2])
)
)
self.boundary_conditions(u)
self.odd = not self.odd
return u
#################
# SD2
#################
def reconstruction_sd2(self, u):
u_N, u_S, u_E, u_W = np.ones((4,) + u.shape)
ux = limiter_x(u[1:-1, 1:-1])
uy = limiter_y(u[1:-1, 1:-1])
u_N[j0, j0], u_S[j0, j0], u_E[j0, j0], u_W[j0, j0] = u[None, j0, j0] + np.array(
[0.5 * uy, -0.5 * uy, 0.5 * ux, -0.5 * ux]
)
list(map(self.boundary_conditions, [u_N, u_S, u_E, u_W]))
return u_N, u_S, u_E, u_W
def Hx_flux_sd2(self, u_E, u_W):
a = np.maximum(self.spectral_radius_x(u_E), self.spectral_radius_x(u_W))
f_E = self.flux_x(u_E)
f_W = self.flux_x(u_W)
if u_W.shape == a.shape:
return 0.5 * (f_W + f_E) - 0.5 * a * (u_W - u_E) # scalar
else:
return 0.5 * (f_W + f_E) - 0.5 * np.multiply(
a[:, :, None], (u_W - u_E)
) # systems
def Hy_flux_sd2(self, u_E, u_W):
a = np.maximum(self.spectral_radius_y(u_E), self.spectral_radius_y(u_W))
f_E = self.flux_y(u_E)
f_W = self.flux_y(u_W)
if u_W.shape == a.shape:
return 0.5 * (f_W + f_E) - 0.5 * a * (u_W - u_E) # scalar
else:
return 0.5 * (f_W + f_E) - 0.5 * np.multiply(
a[:, :, None], (u_W - u_E)
) # systems
def c_flux_sd2(self, u_N, u_S, u_E, u_W):
Hx_halfm = self.Hx_flux_sd2(u_E[jm, j0], u_W[j0, j0])
Hx_halfp = self.Hx_flux_sd2(u_E[j0, j0], u_W[jp, j0])
Hy_halfm = self.Hy_flux_sd2(u_N[j0, jm], u_S[j0, j0])
Hy_halfp = self.Hy_flux_sd2(u_N[j0, j0], u_S[j0, jp])
return -self.dt / self.dx * (Hx_halfp - Hx_halfm) - self.dt / self.dy * (
Hy_halfp - Hy_halfm
)
def sd2(self, u):
self.boundary_conditions(u)
u_N, u_S, u_E, u_W = self.reconstruction_sd2(u)
C0 = self.c_flux_sd2(u_N, u_S, u_E, u_W)
u[j0, j0] += C0
self.boundary_conditions(u)
u_N, u_S, u_E, u_W = self.reconstruction_sd2(u)
C1 = self.c_flux_sd2(u_N, u_S, u_E, u_W)
u[j0, j0] += 0.5 * (C1 - C0)
self.boundary_conditions(u)
return u
#################
# SD3
#################
# indicators: indicators_2d_sd3, indicators_diag_2d_sd3
def indicators_sd3(self, u):
u_norm = np.sqrt(self.dx * self.dy) * np.linalg.norm(u[j0, j0])
pl0, pl1, pr0, pr1, pcx0, pcx1, pcx2 = px_coefs(u)
ISl = pl1 ** 2 / (u_norm + eps)
IScx = 1.0 / (u_norm + eps) * ((13.0 / 3.0) * pcx2 ** 2 + pcx1 ** 2)
ISr = pr1 ** 2 / (u_norm + eps)
pb0, pb1, pt0, pt1, pcy0, pcy1, pcy2 = py_coefs(u)
ISb = pb1 ** 2 / (u_norm + eps)
IScy = 1.0 / (u_norm + eps) * ((13.0 / 3.0) * pcy2 ** 2 + pcy1 ** 2)
ISt = pt1 ** 2 / (u_norm + eps)
return ISl, IScx, ISr, ISb, IScy, ISt
def indicators_diag_sd3(self, u):
u_norm = np.sqrt(self.dx * self.dy) * np.linalg.norm(u[j0, j0])
pl0, pl1, pr0, pr1, pcx0, pcx1, pcx2 = pdx_coefs(u)
dISl = pl1 ** 2 / (u_norm + eps)
dIScx = 1.0 / (u_norm + eps) * ((13.0 / 3.0) * pcx2 ** 2 + pcx1 ** 2)
dISr = pr1 ** 2 / (u_norm + eps)
pb0, pb1, pt0, pt1, pcy0, pcy1, pcy2 = pdy_coefs(u)
dISb = pb1 ** 2 / (u_norm + eps)
dIScy = 1.0 / (u_norm + eps) * ((13.0 / 3.0) * pcy2 ** 2 + pcy1 ** 2)
dISt = pt1 ** 2 / (u_norm + eps)
return dISl, dIScx, dISr, dISb, dIScy, dISt
# reconstruction: reconstruction_2d_sd3, reconstruction_diag_2d_sd3
def reconstruction_sd3(self, u, ISl, IScx, ISr, ISb, IScy, ISt):
u_N, u_S, u_E, u_W = np.ones((4,) + u.shape)
cl = 0.25
ccx = 0.5
cr = 0.25
cb = 0.25
ccy = 0.5
ct = 0.25
pl0, pl1, pr0, pr1, pcx0, pcx1, pcx2 = px_coefs(u)
alpl = cl / ((eps + ISl) ** 2)
alpcx = ccx / ((eps + IScx) ** 2)
alpr = cr / ((eps + ISr) ** 2)
alp_sum = alpl + alpcx + alpr
wl = alpl / alp_sum
wcx = alpcx / alp_sum
wr = alpr / alp_sum
pb0, pb1, pt0, pt1, pcy0, pcy1, pcy2 = py_coefs(u)
alpb = cb / ((eps + ISb) ** 2)
alpcy = ccy / ((eps + IScy) ** 2)
alpt = ct / ((eps + ISt) ** 2)
alp_sum = alpb + alpcy + alpt
wb = alpb / alp_sum
wcy = alpcy / alp_sum
wt = alpt / alp_sum
u_N[j0, j0] = (
wb * (pb0 + 0.5 * pb1)
+ wcy * (pcy0 + 0.5 * pcy1 + 0.25 * pcy2)
+ wt * (pt0 + 0.5 * pt1)
)
u_S[j0, j0] = (
wb * (pb0 - 0.5 * pb1)
+ wcy * (pcy0 - 0.5 * pcy1 + 0.25 * pcy2)
+ wt * (pt0 - 0.5 * pt1)
)
u_E[j0, j0] = (
wl * (pl0 + 0.5 * pl1)
+ wcx * (pcx0 + 0.5 * pcx1 + 0.25 * pcx2)
+ wr * (pr0 + 0.5 * pr1)
)
u_W[j0, j0] = (
wl * (pl0 - 0.5 * pl1)
+ wcx * (pcx0 - 0.5 * pcx1 + 0.25 * pcx2)
+ wr * (pr0 - 0.5 * pr1)
)
return u_N, u_S, u_E, u_W
def reconstruction_diag_sd3(self, u, dISl, dIScx, dISr, dISb, dIScy, dISt):
u_NE, u_SE, u_NW, u_SW = np.ones((4,) + u.shape)
cl = 0.25
ccx = 0.5
cr = 0.25
cb = 0.25
ccy = 0.5
ct = 0.25
pl0, pl1, pr0, pr1, pcx0, pcx1, pcx2 = pdx_coefs(u)
alpl = cl / (eps + dISl) ** 2
alpcx = ccx / (eps + dIScx) ** 2
alpr = cr / (eps + dISr) ** 2
alp_sum = alpl + alpcx + alpr
wl = alpl / alp_sum
wcx = alpcx / alp_sum
wr = alpr / alp_sum
pb0, pb1, pt0, pt1, pcy0, pcy1, pcy2 = pdy_coefs(u)
alpb = cb / (eps + dISb) ** 2
alpcy = ccy / (eps + dIScy) ** 2
alpt = ct / (eps + dISt) ** 2
alp_sum = alpb + alpcy + alpt
wb = alpb / alp_sum
wcy = alpcy / alp_sum
wt = alpt / alp_sum
u_NW[j0, j0] = (
wb * (pb0 + 0.5 * pb1)
+ wcy * (pcy0 + 0.5 * pcy1 + 0.25 * pcy2)
+ wt * (pt0 + 0.5 * pt1)
)
u_SE[j0, j0] = (
wb * (pb0 - 0.5 * pb1)
+ wcy * (pcy0 - 0.5 * pcy1 + 0.25 * pcy2)
+ wt * (pt0 - 0.5 * pt1)
)
u_NE[j0, j0] = (
wl * (pl0 + 0.5 * pl1)
+ wcx * (pcx0 + 0.5 * pcx1 + 0.25 * pcx2)
+ wr * (pr0 + 0.5 * pr1)
)
u_SW[j0, j0] = (
wl * (pl0 - 0.5 * pl1)
+ wcx * (pcx0 - 0.5 * pcx1 + 0.25 * pcx2)
+ wr * (pr0 - 0.5 * pr1)
)
return u_NW, u_SE, u_NE, u_SW
# numerical fluxes: Hx_flux_2d_sd3, Hy_flux_2d_sd3, c_flux_2d_sd3
def Hx_flux_sd3(self, u_NW, u_W, u_SW, u_NE, u_E, u_SE):
a = np.maximum(self.spectral_radius_x(u_E), self.spectral_radius_x(u_W))
f_E = self.flux_x(u_E)
f_W = self.flux_x(u_W)
f_NE = self.flux_x(u_NE)
f_NW = self.flux_x(u_NW)
f_SE = self.flux_x(u_SE)
f_SW = self.flux_x(u_SW)
Hx = (
1.0
/ 12.0
* (
(f_NW + f_NE + 4.0 * (f_W + f_E) + f_SW + f_SE)
- a * (u_NW - u_NE + 4.0 * (u_W - u_E) + u_SW - u_SE)
)
)
return Hx
def Hy_flux_sd3(self, u_SW, u_S, u_SE, u_NE, u_N, u_NW):
b = np.maximum(self.spectral_radius_y(u_N), self.spectral_radius_y(u_S))
g_N = self.flux_y(u_N)
g_S = self.flux_y(u_S)
g_NE = self.flux_y(u_NE)
g_NW = self.flux_y(u_NW)
g_SE = self.flux_y(u_SE)
g_SW = self.flux_y(u_SW)
Hy = (
1.0
/ 12.0
* (
(g_SW + g_NW + 4.0 * (g_S + g_N) + g_SE + g_NE)
- b * (u_SW - u_NW + 4.0 * (u_S - u_N) + u_SE - u_NE)
)
)
return Hy
def c_flux_sd3(self, u_N, u_S, u_E, u_W, u_NE, u_SE, u_SW, u_NW):
Hx_fluxm = self.Hx_flux_sd3(
u_NW[j0, j0],
u_W[j0, j0],
u_SW[j0, j0],
u_NE[jm, j0],
u_E[jm, j0],
u_SE[jm, j0],
)
Hx_fluxp = self.Hx_flux_sd3(
u_NW[jp, j0],
u_W[jp, j0],
u_SW[jp, j0],
u_NE[j0, j0],
u_E[j0, j0],
u_SE[j0, j0],
)
Hy_fluxm = self.Hy_flux_sd3(
u_SW[j0, j0],
u_S[j0, j0],
u_SE[j0, j0],
u_NE[j0, jm],
u_N[j0, jm],
u_NW[j0, jm],
)
Hy_fluxp = self.Hy_flux_sd3(
u_SW[j0, jp],
u_S[j0, jp],
u_SE[j0, jp],
u_NE[j0, j0],
u_N[j0, j0],
u_NW[j0, j0],
)
return -self.dt / self.dx * (Hx_fluxp - Hx_fluxm) - self.dt / self.dy * (
Hy_fluxp - Hy_fluxm
)
# final scheme sd3_2d
def sd3(self, u):
self.boundary_conditions(u)
ISl, IScx, ISr, ISb, IScy, ISt = self.indicators_sd3(u)
u_N, u_S, u_E, u_W = self.reconstruction_sd3(u, ISl, IScx, ISr, ISb, IScy, ISt)
dISl, dIScx, dISr, dISb, dIScy, dISt = self.indicators_diag_sd3(u)
u_NW, u_SE, u_NE, u_SW = self.reconstruction_diag_sd3(
u, dISl, dIScx, dISr, dISb, dIScy, dISt
)
list(
map(self.boundary_conditions, [u_N, u_S, u_E, u_W, u_NE, u_SE, u_SW, u_NW])
)
C0 = self.c_flux_sd3(u_N, u_S, u_E, u_W, u_NE, u_SE, u_SW, u_NW)
u[j0, j0] += C0
self.boundary_conditions(u)
u_N, u_S, u_E, u_W = self.reconstruction_sd3(u, ISl, IScx, ISr, ISb, IScy, ISt)
u_NW, u_SE, u_NE, u_SW = self.reconstruction_diag_sd3(
u, dISl, dIScx, dISr, dISb, dIScy, dISt
)
list(
map(self.boundary_conditions, [u_N, u_S, u_E, u_W, u_NE, u_SE, u_SW, u_NW])
)
C1 = self.c_flux_sd3(u_N, u_S, u_E, u_W, u_NE, u_SE, u_SW, u_NW)
u[j0, j0] += 0.25 * (C1 - 3.0 * C0)
self.boundary_conditions(u)
u_N, u_S, u_E, u_W = self.reconstruction_sd3(u, ISl, IScx, ISr, ISb, IScy, ISt)
u_NW, u_SE, u_NE, u_SW = self.reconstruction_diag_sd3(
u, dISl, dIScx, dISr, dISb, dIScy, dISt
)
list(
map(self.boundary_conditions, [u_N, u_S, u_E, u_W, u_NE, u_SE, u_SW, u_NW])
)
C2 = self.c_flux_sd3(u_N, u_S, u_E, u_W, u_NE, u_SE, u_SW, u_NW)
u[j0, j0] += +1.0 / 12.0 * (8.0 * C2 - C1 - C0)
self.boundary_conditions(u)
return u
def set_dt(self):
r_max_x = np.max(self.spectral_radius_x(self.u))
r_max_y = np.max(self.spectral_radius_y(self.u))
dt = self.cfl / np.sqrt((r_max_x / self.dx) ** 2 + (r_max_y / self.dy) ** 2)
return dt
| 13,871 | 6,682 |
from sadie.cluster import Cluster
from sadie.airr import AirrTable, LinkedAirrTable
def test_cluster(heavy_catnap_airrtable, light_catnap_airrtable):
for table in [heavy_catnap_airrtable, light_catnap_airrtable]:
cluster = Cluster(table)
clustered_df = cluster.cluster(10)
assert "cluster" in clustered_df.columns
assert isinstance(clustered_df, AirrTable)
linked = LinkedAirrTable(
heavy_catnap_airrtable.merge(light_catnap_airrtable, on="cellid", suffixes=["_heavy", "_light"]),
key_column="cellid",
)
cluster = Cluster(
linked,
groupby=["v_call_top_heavy", "v_call_top_light"],
lookup=["cdr1_aa_heavy", "cdr2_aa_heavy", "cdr3_aa_heavy", "cdr1_aa_light", "cdr2_aa_light", "cdr3_aa_light"],
)
cluster_df_linked = cluster.cluster(10)
assert isinstance(cluster_df_linked, LinkedAirrTable)
assert "cluster" in cluster_df_linked.columns
| 942 | 342 |
import os
import re
import sys
from collections import Counter
from optparse import OptionParser
import numpy as np
import pandas as pd
from util import file_handling as fh
def main():
usage = "%prog input_dir output_dir"
parser = OptionParser(usage=usage)
#parser.add_option('--keyword', dest='key', default=None,
# help='Keyword argument: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
input_dir = args[0]
output_dir = args[1]
if not os.path.exists(output_dir):
sys.exit("Error: Output directory does not exist")
city_lookup = dict()
print("Reading in business data")
lines = fh.read_jsonlist(os.path.join(input_dir, 'business.json'))
for line in lines:
city = line['city']
business_id = line['business_id']
city_lookup[business_id] = city
city_counts = Counter()
print("Reading in review data")
lines = fh.read_jsonlist(os.path.join(input_dir, 'review.json'))
pairs = [('Las Vegas', 'Phoenix'), ('Toronto', 'Scottsdale'), ('Charlotte', 'Pittsburgh'), ('Tempe', 'Henderson')]
for pair in pairs:
text_lines = []
labels = []
years = []
year_counts = Counter()
count = 0
city1, city2 = pair
for i, line in enumerate(lines):
if i % 100000 == 0:
print(i, count)
review_id = line['review_id']
text = line['text']
date = line['date']
year = date.split('-')[0]
funny = int(line['funny'])
useful = int(line['useful'])
cool = int(line['cool'])
business_id = line['business_id']
if business_id in city_lookup:
city = city_lookup[business_id]
city_counts.update([city])
label = None
if city == city1:
label = [1, 0]
elif city == city2:
label = [0, 1]
if label is not None:
text_lines.append({'text': text, 'city': city, 'year': year, 'id': count, 'review_id': review_id, 'label': label, 'funny': funny, 'useful': useful, 'cool': cool})
labels.append(label)
years.append(year)
year_counts.update([year])
count += 1
n_reviews = len(text_lines)
print(pair)
print("Found {:d} reviews".format(n_reviews))
name = '_'.join([re.sub('\s', '_', city) for city in pair])
fh.write_jsonlist(text_lines, os.path.join(output_dir, name + '.jsonlist'))
labels_df = pd.DataFrame(np.vstack(labels), index=np.arange(n_reviews), columns=[city1, city2])
labels_df.to_csv(os.path.join(output_dir, name + '.labels.csv'))
years_df = pd.DataFrame(years, index=np.arange(n_reviews), columns=['year'])
years_df.to_csv(os.path.join(output_dir, name + '.years.csv'))
print("Year counts")
keys = list(year_counts.keys())
keys.sort()
for k in keys:
print(k, year_counts[k])
if __name__ == '__main__':
main()
| 3,313 | 1,040 |
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ''
server_port = 10001
server = (server_address, server_port)
sock.bind(server)
print("Listening on " + server_address + ":" + str(server_port))
while True:
payload, client_address = sock.recvfrom(1024)
print("Echoing data back to " + str(client_address))
sent = sock.sendto(payload, client_address) | 393 | 145 |
version https://git-lfs.github.com/spec/v1
oid sha256:0b658b7b289160eaf9545f85d87f9c8baa0c04e58f265cf125e952a2fb821816
size 1379
| 129 | 93 |
# logger.py
# App logger initialization function
# r1
# TODO: logger.propagate using
import logging
from logging.handlers import RotatingFileHandler
from helpers.config import Config
def logger_setup(log_file, loggers=None, touch_root=False):
log_formatter = logging.Formatter(Config.get('APP_LOG_FORMAT'), datefmt='%Y/%m/%d %H:%M:%S')
work_mode = Config.get('APP_WORK_MODE')
full_debug = Config.get('APP_CAN_OUTPUT')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(log_formatter)
if work_mode == 'dev':
log_level = logging.DEBUG
elif work_mode == 'info':
log_level = logging.INFO
else:
log_level = logging.ERROR
root = logging.getLogger()
if touch_root:
root.setLevel(log_level)
root.addHandler(logging.NullHandler())
if full_debug == 'True':
root.addHandler(console)
root.info('Console output enabled')
handler = RotatingFileHandler(log_file, backupCount=1, encoding='utf-8')
handler.setLevel(log_level)
handler.setFormatter(log_formatter)
handler.doRollover()
if loggers:
for logger_name in loggers:
logger = logging.getLogger(logger_name)
logger.setLevel(log_level)
logger.addHandler(handler)
logger.propagate = False
if full_debug == 'True':
logger.addHandler(console)
else:
root.warning('Empty loggers list')
| 1,496 | 453 |
import tensorflow as tf
from .quickshift import quickshift
class QuickshiftTest(tf.test.TestCase):
def test_quickshift(self):
image = tf.constant([
[[255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0]],
[[255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [255, 255, 255], [255, 255, 255]],
[[0, 0, 0], [0, 0, 0], [255, 255, 255], [255, 255, 255]],
])
expected = [
[0, 0, 1, 1],
[0, 0, 1, 1],
[1, 1, 0, 0],
[1, 1, 0, 0],
]
with self.test_session() as sess:
segmentation = quickshift(image)
# TODO Quickshift is stateful, so we get a different result every
# time we ran it.
# self.assertAllEqual(segmentation.eval(), expected)
| 852 | 381 |
# Micropython Http Server
# Erni Tron ernitron@gmail.com
# Copyright (c) 2016
# Content Callback functions.
# They should receive parameters and return a HTML formatted string
# By convention they start with cb_
import gc
import time
from config import save_config, set_config, get_config
# Content Functions
def cb_index(title):
with open('index.txt', 'r') as f:
return f.readlines()
return []
def cb_status():
uptime = time.time()
import os
filesystem = os.listdir()
chipid = get_config('chipid')
macaddr = get_config('mac')
address = get_config('address')
return '<h2>Device %s</h2>' \
'<p>MacAddr: %s' \
'<p>Address: %s' \
'<p>Free Mem: %d (alloc %d)' \
'<p>Files: %s' \
'<p>Uptime: %d"</div>' % (chipid, macaddr, address, gc.mem_free(), gc.mem_alloc(), filesystem, uptime)
def cb_help():
with open('help.txt', 'r') as f:
return f.readlines()
return []
def cb_setplace(place):
set_config('place', place)
save_config()
return 'Place set to %s' % place
def cb_setparam(param, value):
if param == None:
return '<p>Set configuration parameter<form action="/conf">' \
'Param <input type="text" name="param"> ' \
'Value <input type="text" name="value"> ' \
'<input type="submit" value="Submit">' \
'</form></p></div>'
else:
set_config(param, value)
save_config()
return 'Param set to %s' % value
def cb_setwifi(ssid, pwd):
if len(ssid) < 3 or len(pwd) < 8:
return '<h2>WiFi too short, try again</h2>'
set_config('ssid', ssid)
set_config('pwd', pwd)
save_config()
return '<h2>WiFi set to %s %s</h2>' % (ssid, pwd)
| 1,777 | 618 |
__version__ = "1.0.0"
__all__ = ["full_compare"]
import re
from functools import reduce
from itertools import permutations, product, islice, zip_longest
from operator import mul
from Levenshtein import jaro, jaro_winkler
def _smart_jaro(a, b, func=jaro):
if func(a[1:], b[1:]) > 0.99:
return True
if func(a, b[1:]) > 0.99:
return True
if func(a[1:], b) > 0.99:
return True
chunk_distance = max([func(a, b)])
if abs(len(a) - len(b)) >= 3:
chunk_distance -= 0.2
return chunk_distance
def _compare_two_names(
name1, name2, max_splits=7, straight_limit=0.70, smart_limit=0.96
):
straight_similarity = jaro(name1, name2)
if straight_similarity > smart_limit:
return True
if straight_similarity > straight_limit:
min_pair_distance = 1
for a, b in zip_longest(name1.split(" "), name2.split(" ")):
if a is not None and b is not None:
chunk_distance = _smart_jaro(a, b, func=jaro_winkler)
min_pair_distance = min(chunk_distance, min_pair_distance)
if min_pair_distance > 0.88:
return True
return False
def _normalize_name(s):
return (
re.sub(r"\s+", " ", s.strip().replace("-", " "))
.replace(".", "")
.replace(",", "")
.replace('"', "")
.replace("'", "")
.replace("’", "")
.replace("є", "е")
.replace("i", "и")
.replace("і", "и")
.replace("ь", "")
.replace("'", "")
.replace('"', "")
.replace("`", "")
.replace("конст", "кост")
.replace("’", "")
.replace("ʼ", "")
)
def _slugify_name(s):
s = s.replace(" ", "")
return re.sub(r"\d+", "", s)
def _thorough_compare(name1, name2, max_splits=7):
splits = name2.split(" ")
limit = reduce(mul, range(1, max_splits + 1))
for opt in islice(permutations(splits), limit):
if _compare_two_names(name1, " ".join(opt)):
return True
return False
def full_compare(name1, name2):
name1 = _normalize_name(name1)
name2 = _normalize_name(name2)
slugified_name1 = _slugify_name(name1)
slugified_name2 = _slugify_name(name2)
if slugified_name1 == slugified_name2:
return True
if slugified_name1.startswith(slugified_name2) and len(slugified_name2) >= 10:
return True
if slugified_name2.startswith(slugified_name1) and len(slugified_name1) >= 10:
return True
if slugified_name1.endswith(slugified_name2) and len(slugified_name2) >= 10:
return True
if slugified_name2.endswith(slugified_name1) and len(slugified_name1) >= 10:
return True
if jaro(slugified_name1, slugified_name2) > 0.95:
return True
if jaro(slugified_name2, slugified_name1) > 0.95:
return True
if _compare_two_names(name1, name2):
return True
if _compare_two_names(name2, name1):
return True
return _thorough_compare(name1, name2) or _thorough_compare(name2, name1)
def test_file(csv_file, debug):
import csv
from veryprettytable import VeryPrettyTable
pt = VeryPrettyTable([" ", "Positive", "Negative"])
with open(csv_file, "r") as fp:
r = csv.DictReader(fp)
res = {True: {True: 0, False: 0}, False: {True: 0, False: 0}}
for l in r:
expected = l["ground truth"].lower() in ["true", "1", "on"]
predicted = full_compare(l["name1"], l["name2"])
if predicted != expected and debug:
print(predicted, expected, l["name1"], l["name2"])
res[predicted][expected] += 1
for predicted in [True, False]:
pt.add_row(
[
"Predicted positive" if predicted else "Predicted negative",
res[predicted][True],
res[predicted][False],
]
)
precision = res[True][True] / (res[True][True] + res[True][False])
recall = res[True][True] / (res[True][True] + res[False][True])
f1 = 2 * precision * recall / (precision + recall)
print(pt)
print("Precision: {:5.2f}".format(precision))
print("Recall: {:5.2f}".format(recall))
print("F1 score: {:5.2f}".format(f1))
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
test_file(sys.argv[1], len(sys.argv) > 2)
else:
print(
"Supply .csv file with ground truth data to calculate precision/recall/f1 metrics"
)
| 4,512 | 1,611 |
# Parse input:
# line 1 = call_numbers separated by ,
# Matrices separated by blank row
# Store input:
# Bingo Cards: Dictionary of Dictionaires of lists? (card - column - numbers)
# call_numbers: list
# function to x numbers as called
# function to check if card(s) won
# function to add all unmarked numbers on winning board (if card won)
# store unmarked numbers as dictionary of lists (card - numbers)
# sum unmarked numbers in each list
# multiply sum of each list by called number and determine highest score
import collections
number_list = []
cards = {}
numbers_called = 0
card_winners = []
with open('data.txt') as data:
# numbers to call
numbers = data.readline().strip().split(',')
for number in numbers:
number = int(number)
number_list.append(number)
# bingo cards
card_number = 0
for line in data:
line.strip()
if line == "\n":
card_number += 1
elif line != "\n":
# parse line
y = line.rstrip().split(" ")
while("" in y):
y.remove("")
if card_number not in cards:
cards[card_number] = {}
cards[card_number]['B'] = [int(y[0])]
cards[card_number]['I'] = [int(y[1])]
cards[card_number]['N'] = [int(y[2])]
cards[card_number]['G'] = [int(y[3])]
cards[card_number]['O'] = [int(y[4])]
elif card_number in cards:
cards[card_number]['B'].append(int(y[0]))
cards[card_number]['I'].append(int(y[1]))
cards[card_number]['N'].append(int(y[2]))
cards[card_number]['G'].append(int(y[3]))
cards[card_number]['O'].append(int(y[4]))
else:
"crymost"
# print(f'{cards[1]}\n{cards[2]}\n{cards[3]}')
# sample data testing
# number_list = [7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1]
# cardz = {
# 1: {
# 'B': [22,8,21,6,1],
# 'I': [13,2,9,10,12],
# 'N': [17,23,14,3,20],
# 'G': [11,4,16,18,15],
# 'O': [0,24,7,5,19]
# },
# 2: {
# 'B': [3,9,19,20,14],
# 'I': [15,18,8,11,21],
# 'N': [0,13,7,10,16],
# 'G': [2,17,25,24,12],
# 'O': [22,5,23,4,6]
# },
# 3: {
# 'B': [14,10,18,22,2],
# 'I': [21,16,8,11,0],
# 'N': [17,15,23,13,12],
# 'G': [24,9,26,6,3],
# 'O': [4,19,20,5,7]
# }
# }
# print(f'{cardz[1]}\n{cardz[2]}\n{cardz[3]}')
# exit()
def check_number(last_called):
for card in cards:
for x in range(5):
if cards[card]['B'][x] == last_called:
cards[card]['B'][x] = 'x'
elif cards[card]['I'][x] == last_called:
cards[card]['I'][x] = 'x'
elif cards[card]['N'][x] == last_called:
cards[card]['N'][x] = 'x'
elif cards[card]['G'][x] == last_called:
cards[card]['G'][x] = 'x'
elif cards[card]['O'][x] == last_called:
cards[card]['O'][x] = 'x'
def check_winner(last_called):
winning_scores = []
winners = False
for card in cards:
for y in range(5):
if card in card_winners:
break
elif card not in card_winners:
# check horizontal & vertical win for all cards
if ((cards[card]['B'][y] == "x" and cards[card]['I'][y] == 'x' and cards[card]['N'][y] == 'x' and cards[card]['G'][y] == 'x' and cards[card]['O'][y] == 'x')
or cards[card]['B'][0] == 'x' and cards[card]['B'][1] == 'x' and cards[card]['B'][2] == 'x' and cards[card]['B'][3] == 'x' and cards[card]['B'][4] == 'x'
or cards[card]['I'][0] == 'x' and cards[card]['I'][1] == 'x' and cards[card]['I'][2] == 'x' and cards[card]['I'][3] == 'x' and cards[card]['I'][4] == 'x'
or cards[card]['N'][0] == 'x' and cards[card]['N'][1] == 'x' and cards[card]['N'][2] == 'x' and cards[card]['N'][3] == 'x' and cards[card]['N'][4] == 'x'
or cards[card]['G'][0] == 'x' and cards[card]['G'][1] == 'x' and cards[card]['G'][2] == 'x' and cards[card]['G'][3] == 'x' and cards[card]['G'][4] == 'x'
or cards[card]['O'][0] == 'x' and cards[card]['O'][1] == 'x' and cards[card]['O'][2] == 'x' and cards[card]['O'][3] == 'x' and cards[card]['O'][4] == 'x'):
winners = True
# add all unmarked numbers for this card
add_number = 0
for z in range(5):
if cards[card]['B'][z] != 'x':
add_number += cards[card]['B'][z]
if cards[card]['I'][z] != 'x':
add_number += cards[card]['I'][z]
if cards[card]['N'][z] != 'x':
add_number += cards[card]['N'][z]
if cards[card]['G'][z] != 'x':
add_number += cards[card]['G'][z]
if cards[card]['O'][z] != 'x':
add_number += cards[card]['O'][z]
winning_scores.append(add_number)
card_winners.append(card)
# After all cards checked
if winners:
final = compare_scores(winning_scores, last_called)
return final
def compare_scores(winning_scores, last_called):
final_winning_number = 0
for score in winning_scores:
last_score = score
if last_score:
if score <= last_score:
final_winning_number = score
elif last_score < score:
final_winning_number = last_score
return final_winning_number * last_called
for last_called in number_list:
numbers_called += 1
check_number(last_called)
if numbers_called > 4:
winning = check_winner(last_called)
if winning:
print(winning)
| 6,134 | 2,194 |
import matplotlib.pyplot as plt
import numpy as np
from dliplib.utils.helper import set_use_latex
plt.style.use('seaborn-whitegrid')
set_use_latex()
sizes = [0.0001, 0.001, 0.01, 0.10, 1.00]
ticks = range(len(sizes))
# performance on the different data sizes
learnedgd = {'PSNR [db]': [29.87, 31.28, 31.83, 32.7, 32.7],
'SSIM': [0.7151, 0.7473, 0.7602, 0.7802, 0.7802]}
learnedpd = {'PSNR [db]': [29.65, 32.48, 33.21, 33.53, 33.64],
'SSIM': [0.7343, 0.7771, 0.7929, 0.799, 0.8020]}
fbpunet = {'PSNR [db]': [29.33, 31.58, 32.6, 33.19, 33.55],
'SSIM': [0.7143, 0.7616, 0.7818, 0.7931, 0.7994]}
iradonmap = {'PSNR [db]': [14.61, 18.77, 24.63, 31.27, 32.45],
'SSIM': [0.3529, 0.4492, 0.6031, 0.7569, 0.7781]}
tv = {'PSNR [db]': 30.89,
'SSIM': 0.7563}
# psnr: 28.38, ssim: 0.6492
fbp = {'PSNR [db]': 28.38,
'SSIM': 0.6492}
diptv = {'PSNR [db]': 32.51,
'SSIM': 0.7803}
learnedpd_dip = {'PSNR [db]': [32.52, 32.78, 33.21],
'SSIM': [0.7822, 0.7821, 0.7929]}
fig, ax = plt.subplots(1, 2, figsize=(8, 4.0))
for i, measure in enumerate(['PSNR [db]', 'SSIM']):
ax[i].axhline(fbp[measure], ticks[0], ticks[-1], label='FBP', color='tab:gray',
linestyle=':', linewidth=1.5)
ax[i].axhline(tv[measure], ticks[0], ticks[-1], label='TV', color='tab:orange',
linestyle='--', linewidth=1.5)
ax[i].axhline(diptv[measure], ticks[0], ticks[-1], label='DIP+TV', color='tab:brown',
linestyle='-.', linewidth=1.5)
ax[i].plot(ticks, iradonmap[measure], label='iRadonMap', color='tab:green',
linewidth=1.5, marker='o')
ax[i].plot(ticks, fbpunet[measure], label='FBP+U-Net', color='tab:blue',
linewidth=1.5, marker='o')
ax[i].plot(ticks, learnedgd[measure], label='LearnedGD', color='tab:red',
linewidth=1.5, marker='o')
ax[i].plot(ticks, learnedpd[measure], label='LearnedPD', color='tab:purple',
linewidth=1.5, marker='o')
ax[i].plot(ticks[:3], learnedpd_dip[measure], label='LearnedPD + DIP', color='tab:purple',
linewidth=1.5, marker='o', markerfacecolor='white')
ax[i].set_xticks(ticks)
ax[i].set_xticklabels(np.array(sizes) * 100, rotation=45)
ax[i].set_xlabel('Data size [$\%$]')
ax[i].set_ylabel(measure)
ax[i].set_title('LoDoPaB (200) - Test error')
ax[0].set_ylim([24.0, 35.0])
ax[1].set_ylim([0.58, 0.82])
for i in range(2):
box = ax[i].get_position()
ax[i].set_position([box.x0, box.y0, box.width, box.height * 0.6])
h, l = ax[0].get_legend_handles_labels()
ax[0].legend([h[3], h[4], h[5], h[6]], [l[3], l[4], l[5], l[6]], bbox_to_anchor=(0.0, -0.45, 1., 0.5), loc=3,
ncol=2, mode="expand", frameon=False)
h, l = ax[1].get_legend_handles_labels()
ax[1].legend([h[2], h[7], h[0], h[1]], [l[2], l[7], l[0], l[1]], bbox_to_anchor=(0.0, -0.45, 1., 0.5), loc=3,
ncol=2, mode="expand", frameon=False)
plt.tight_layout()
plt.tight_layout()
plt.tight_layout()
plt.savefig('lodopab-200-performance.pdf')
plt.show()
| 3,120 | 1,580 |
s = open('input.txt','r').read()
s = [k for k in s.split("\n")]
di = {}
ok = False
ans = 0
f = [set() for i in range(20)]
nums = set()
cnt = 0
for line in s:
for each in line.split(" "):
if "-" in each:
x, y = each.split("-")
x, y = int(x), int(y)
if line.split(":")[0] not in di:
di[line.split(":")[0]] = set()
key = line.split(":")[0]
for i in range(x,y+1):
nums.add(i)
di[key].add(i)
for i in range(20):
f[i].add(key)
if "nearby tickets" in line:
ok = True
elif ok:
x = list(map(int, line.split(",")))
pos=True
for i in x:
if i not in nums:
pos=False
break
if not pos:continue
for i in range(20):
st = set()
for key in di:
if x[i] in di[key]:
st.add(key)
#print(st)
f[i] = f[i].intersection(st)
elif not ok and len(line.split(",")) == 20:
my = list(map(int, line.split(",")))
st = set()
ind = {}
for j in range(20):
for i in range(20):
f[i] = f[i].difference(st)
if len(f[i]) == 1:
for k in f[i]:
ind[k] = i
st.add(k)
break
ans = 1
for title in ind:
if "departure" in title:
ans *= my[ind[title]]
print(ans)
| 1,448 | 518 |
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.db.models import Q
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions, generics
from rest_framework_jwt.settings import api_settings
from accounts.api.user.serializers import UserDetailSerializer
from .serializers import AccountSerializer
from .permission import AnonymousPermission
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER
class AuthAPIView(APIView):
permission_classes = [AnonymousPermission]
def post(self, request, *args, **kwargs):
# print(request.user)
if request.user.is_authenticated:
return Response({"detail": "User is already authenticated"}, status=400)
data = request.data
print(data)
username = data.get("username")
password = data.get("password")
qs = User.objects.filter(
Q(username__iexact=username) |
Q(email__iexact=username)
).distinct()
if qs.count() == 1:
userObj = qs.first()
if userObj.check_password(password):
user = userObj
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
response = jwt_response_payload_handler(
token, user, request=request)
return Response(response)
return Response({"detail": "Invalid credentials"})
class RegisterAPIView(generics.CreateAPIView):
serializer_class = AccountSerializer
queryset = User.objects.all()
permission_classes = [AnonymousPermission]
def get_serializer_context(self, *args, **kwargs):
return {"request": self.request}
# class RegisterAPIView(APIView):
# permission_classes = [permissions.AllowAny]
# def post(self, request, *args, **kwargs):
# request.user
# if request.user.is_authenticated:
# return Response({"detail": "You is already registered"})
# data = request.data
# username = data.get("username")
# email = data.get("email")
# password = data.get("password")
# password2 = data.get("password2")
# if password != password2:
# return Response({"password":"Both password does not match"})
# qs = User.objects.filter(
# Q(username__iexact=username)|
# Q(email__iexact=email)
# )
# if qs.exists():
# return Response({"detail": "User already exist"})
# else:
# user = User.objects.create(username=username, email=email)
# user.set_password(password)
# user.save()
# payload = jwt_payload_handler(user)
# token = jwt_encode_handler(payload)
# response = jwt_response_payload_handler(token, user, request=request)
# return Response(response)
| 3,089 | 849 |
from __future__ import annotations
import sys
from pathlib import Path
from unittest.mock import MagicMock, create_autospec
import pytest
from _pytest.monkeypatch import MonkeyPatch
from pypi_changes._cli import Options
from tests import MakeDist
@pytest.fixture(autouse=True)
def _no_index(monkeypatch: MonkeyPatch) -> None:
monkeypatch.delenv("PIP_INDEX_URL", raising=False)
@pytest.fixture(autouse=True)
def _no_proxy(monkeypatch: MonkeyPatch) -> None:
monkeypatch.delenv("https_proxy", raising=False)
monkeypatch.delenv("http_proxy", raising=False)
monkeypatch.delenv("no_proxy", raising=False)
@pytest.fixture()
def option_simple(tmp_path: Path) -> Options:
return Options(cache_path=tmp_path / "a.sqlite", jobs=1, cache_duration=0.01)
@pytest.fixture()
def make_dist() -> MakeDist:
def func(path: Path, name: str, version: str) -> MagicMock:
of_type = f"importlib{'.' if sys.version_info >= (3, 8) else '_'}metadata.PathDistribution"
dist: MagicMock = create_autospec(of_type)
dist.metadata = {"Name": name}
dist._path = path / "dist"
dist.version = version
return dist
return func
| 1,176 | 408 |
def treegenerator(region, cleancountryco2):
"""
@Author Haoyang Ding
generate a dictionary in a special format
treedata={"value": co2 emission in a specific region
"name": region's name
"children": [{country's name: co2 emission data}, {}....]
}
:param region: list of countries' name
:param cleancountryco2: a dict {"country": co2 emission}
:return: treedata dictionary
"""
assert isinstance(region, list)
assert isinstance(cleancountryco2, dict)
tempco2 = cleancountryco2.copy()
treedata = {}
treedata["value"] = 0
treedata["name"] = region[0]
treedata["children"] = []
for i in region:
if i in list(tempco2.keys()):
temp = {}
temp["name"] = i
temp["value"] = sum(tempco2[i])
treedata["value"] += temp["value"]
treedata["children"].append(temp)
for i in treedata["children"]:
i["value"] = i["value"] / treedata["value"]
return treedata
def treemapdata(africa, asia, europe, na, sa, oceania):
"""
@Author Haoyang Ding
generate a txt file
tree is a list [region1, region2...]
region: a dictionary in a special format which is generated by treegenerator
:param: six lists. Each list contains the countries' name in a specific region
:return: None
"""
assert isinstance(africa, list)
assert isinstance(asia, list)
assert isinstance(europe, list)
assert isinstance(na, list)
assert isinstance(sa, list)
assert isinstance(oceania, list)
import os
file_address = os.path.abspath(os.path.join(os.getcwd(), "../../.."))
co2address = os.path.abspath(os.path.join(file_address, "Data\VisualizationData\CO2_GDP", "cleanglobalco2.txt"))
f = open(co2address, 'r')
a = f.read()
globalco2 = eval(a)
f.close()
cleancountryco2 = globalco2.copy() # 2008-2017 co2 emission data by country
for i in list(cleancountryco2.keys()):
temp = cleancountryco2[i]
templist = list(temp.keys())
if templist[len(templist) - 1] != 2017.0 or templist[len(templist) - 10] != 2008.0:
cleancountryco2.pop(i)
tempvalue = list(temp.values())
cleancountryco2[i] = tempvalue[-11:-1]
tree = []
for i in [africa, asia, europe, na, sa, oceania]:
tree.append(treegenerator(i, cleancountryco2))
treeaddress = os.path.abspath(os.path.join(file_address, "Data\VisualizationData\CO2_GDP", "tree.txt"))
f = open(treeaddress, 'w')
f.write(str(tree))
f.close()
| 2,578 | 881 |
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import List, Optional
from aws_cdk.aws_events import EventPattern, IRuleTarget
from constructs import Construct
class Stage(Construct):
"""
Class that represents a stage within a data pipeline.
To create a Stage, inherit from this class, add infrastructure required by the stage, and implement
get_event_pattern and get_targets methods. For example:
.. code-block:: python
class MyStage(Stage):
def __init__(
self,
scope: Construct,
id: str,
environment_id: str,
) -> None:
# Define stage infrastructure, for example a queue
self._queue = SQSFactory.queue(
self,
id,
environment_id,
)
@property
def queue(self) -> Queue:
'''
Return: Queue
The SQS queue
'''
return self._queue
def get_event_pattern(self) -> Optional[EventPattern]:
return EventPattern(
detail_type=["my-detail-type"],
)
def get_targets(self) -> Optional[List[IRuleTarget]]:
return [SqsQueue(self._queue)]
"""
def __init__(
self,
scope: Construct,
id: str,
name: Optional[str] = None,
description: Optional[str] = None,
) -> None:
"""
Create a stage.
Parameters
----------
scope : Construct
Scope within which this construct is defined
id : str
Identifier of the stage
name : Optional[str]
Name of the stage
description : Optional[str]
Description of the stage
"""
super().__init__(scope, id)
self.id: str = id
self.name: Optional[str] = name
self.description: Optional[str] = description
@abstractmethod
def get_targets(self) -> Optional[List[IRuleTarget]]:
"""
Get input targets of the stage.
Targets are used by Event Rules to describe what should be invoked when a rule matches an event.
Returns
-------
targets : Optional[List[IRuleTarget]]
List of targets
"""
pass
@abstractmethod
def get_event_pattern(self) -> Optional[EventPattern]:
"""
Get output event pattern of the stage.
Event pattern describes the structure of output event(s) produced by this stage. Event Rules use
event patterns to select events and route them to targets.
Returns
-------
event_pattern : Optional[EventPattern]
Event pattern
"""
pass
| 3,472 | 874 |
import datetime,threading,random
from termcolor import cprint, colored
import colorama
class Logger():
def __init__(self):
colorama.init()
def __timestamp(self):
now = str(datetime.datetime.now())
now = now.split(' ')[1]
threadname = threading.currentThread().getName()
threadname = str(threadname).replace('Thread', 'Task')
now = '[' + str(now) + ']' + '[' + str(threadname) + ']'
return now
def log(self, text):
print("{} {}".format(self.__timestamp(), text))
return
def success(self, text):
print("{} {}".format(self.__timestamp(), colored(text, "green")))
return
def warn(self, text):
print("{} {}".format(self.__timestamp(), colored(text, "yellow")))
return
def error(self, text):
print("{} {}".format(self.__timestamp(), colored(text, "red")))
return
def status(self, text):
print("{} {}".format(self.__timestamp(), colored(text, "magenta")))
return
class ProxyManager():
def __init__(self):
self.proxies = []
with open('proxy.txt') as f:
for item in f.read().splitlines():
if not item == '':
item = item.split(":")
if len(item) == 4:
proxyDict = {
'http': 'http://{}:{}@{}:{}'.format(item[2], item[3], item[0], item[1]),
'https': 'https://{}:{}@{}:{}'.format(item[2], item[3], item[0], item[1])
}
self.proxies.append(proxyDict)
elif len(item) == 2:
proxyDict = {
'http': 'http://{}:{}'.format(item[0], item[1]),
'https': 'https://{}:{}'.format(item[0], item[1])
}
self.proxies.append(proxyDict)
else:
pass
f.close()
def get_proxy(self):
return random.choice(self.proxies)
| 1,640 | 685 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
from scipy import signal
import timeit
from numba import jit
import datashader as ds
import xarray as xr
from datashader import transfer_functions as tf
#1. Define the boundary conditions
# Needed: surface temperature forcing (sin wave at the surce), temperature profile (intinal conditions), bottom boundary
# condition, time step, grid size, thermal conductivity, n (number of vertical grid cells)
"""
# Define set up of the parameters of the model
n = 1500 # number of vertical grids (includes top and bottom)
n_coeffs = n-2 # number of coefficients for the tridiag solver
dz = 0.001266667 # vertical grid spacing (in meters)
dt = 1 # time step in seconds
depth = dz * n # the depth of the soil modeled
kap = 8e-7 # soil diffusivity (m2 s-1)
la = (dt*kap)/(dz**2) # la as defined with dt*kappa/dz^2 (unitless)
time_steps = 84600*7 # number of time steps to calculate
T_bar = 20. # Average temperature of bottom layer
A = 10. # Amplitude of sine wave for surface layer
"""
"""
## Set of parameters we used with a decent looking output
## (uncomment by taking away triple quotes)
# Define set up of the parameters of the model
n = 30 # number of vertical grids (includes top and bottom)
n_coeffs = n-2 # number of coefficients for the tridiag solver
dz = 0.05 # vertical grid spacing (in meters)
dt = 3600 # time step in seconds
depth = dz * n # the depth of the soil modeled
kap = 8e-7 # soil diffusivity (m2 s-1)
la = (dt*kap)/(dz**2) # la as defined with dt*kappa/dz^2 (unitless)
time_steps = 200 # number of time steps to calculate
T_bar = 20. # Average temperature of bottom layer
A = 10. # Amplitude of sine wave for surface layer
"""
## Set of parameters we used with a decent looking output
## (uncomment by taking away triple quotes)
# Define set up of the parameters of the model
n = 150 # number of vertical grids (includes top and bottom)
n_coeffs = n-2 # number of coefficients for the tridiag solver
dz = 0.01 # vertical grid spacing (in meters)
dt = 1800 # time step in seconds
depth = dz * n # the depth of the soil modeled
kap = 8e-7 # soil diffusivity (m2 s-1)
la = (dt*kap)/(dz**2) # la as defined with dt*kappa/dz^2 (unitless)
time_steps = 400 # number of time steps to calculate
T_bar = 20. # Average temperature of bottom layer
A = 10. # Amplitude of sine wave for surface layer
print(f"la: {la}")
print(f"dt/(dz^2): {dt / (dz**2)}")
## Tri Diagonal Matrix Algorithm(a.k.a Thomas algorithm) solver
# https://gist.github.com/cbellei/8ab3ab8551b8dfc8b081c518ccd9ada9
# Modified to take in coefficient array
def TDMAsolver_no_vec(coeffs):
"""
TDMA solver, a b c d can be NumPy array type or Python list type.
refer to http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
and to http://www.cfd-online.com/Wiki/Tridiagonal_matrix_algorithm_-_TDMA_(Thomas_algorithm)
"""
a = coeffs[1:, 0]
b = coeffs[:, 1]
c = coeffs[:-1, 2]
d = coeffs[:, 3]
nf = len(d) # number of equations
ac, bc, cc, dc = map(np.array, (a, b, c, d)) # copy arrays
for it in range(1, nf):
mc = ac[it-1]/bc[it-1]
bc[it] = bc[it] - mc*cc[it-1]
dc[it] = dc[it] - mc*dc[it-1]
xc = bc
xc[-1] = dc[-1]/bc[-1]
for il in range(nf-2, 1, -1):
xc[il] = (dc[il]-cc[il]*xc[il+1])/bc[il]
return xc
# https://stackoverflow.com/questions/8733015/tridiagonal-matrix-algorithm-tdma-aka-thomas-algorithm-using-python-with-nump
@jit
def TDMAsolver(coeff):
# Set up diagonal coefficients
a = coeff[1:, 0]
b = coeff[:, 1]
c = coeff[:-1, 2]
d = coeff[:, 3]
n = len(d)
w = np.zeros(n-1)
g = np.zeros(n)
p = np.zeros(n)
w[0] = c[0]/b[0]
g[0] = d[0]/b[0]
for i in range(1,n-1):
w[i] = c[i]/(b[i] - a[i-1]*w[i-1])
for i in range(1,n):
g[i] = (d[i] - a[i-1]*g[i-1])/(b[i] - a[i-1]*w[i-1])
p[n-1] = g[n-1]
for i in range(n-1,0,-1):
p[i-1] = g[i-1] - w[i-1]*p[i]
return p
## Define boundary conditions
# Initialize temperature, time, and depth arrays
Temps = np.full((n, time_steps), np.nan)
tao = np.array([t * dt for t in np.arange(time_steps)])
depths = np.array([-d * dz for d in np.arange(n)])
def temp_surface(tao, T_bar, A):
"""
Calculate surface temperature for a set of times (tao)
"""
omega = (2 * np.pi) / (86400)
T = T_bar + A * np.sin(omega * tao) # + np.pi/2 for time offset
return T
# Initialize boundary conditions in Temps array
Temps[0, :] = temp_surface(tao, T_bar, A) # Surface temperature
Temps[-1, :] = T_bar # Temperature at lower boundary
Temps[:, 0] = T_bar # Temperature at tau=0
print(Temps)
# Some initial tries of tao=0 boundary coundition
# Linear
# Temps[:, 0] = np.linspace(T_bar+10, T_bar, n) # Lowest depth = T_bar
# Diverging from tmax in center
# gauss = signal.gaussian(n, std=3)
# Temps[:,0] = gauss
# Coefficient matrix for tridiagonal solver
coeffs = np.full((n_coeffs, 4), 0.)
## 2. Finding the coefficents for a, b, c, d
for i, t in enumerate(tao[1:-1]):
# Index in temperature array
Temp_idx = i + 1
# depth = 1
coeffs[0, 1] = 1 + 2 * la
coeffs[0, 2] = - la
coeffs[0, 3] = Temps[1, Temp_idx - 1] + la * Temps[0, Temp_idx]
# depth = bottom
coeffs[-1, 0] = -la
coeffs[-1, 1] = 1 + 2 * la
coeffs[-1, 3] = Temps[-2, Temp_idx - 1] + la * Temps[-1, Temp_idx]
# Loop through
for depth in np.arange(coeffs.shape[0])[1:-1]:
coeffs[depth, 0] = -la
coeffs[depth, 1] = 1 + 2 * la
coeffs[depth, 2] = -la
coeffs[depth, 3] = Temps[depth, Temp_idx - 1]
#print(coeffs)
Temps[1:-1, Temp_idx] = TDMAsolver(coeffs)
## Some initial tests to make sure the tridiag solver was working
# Tridiag solver from github
def test_tridiag(test_coeff, tridiag_func, print_output=False):
v = tridiag_func(test_coeff)
if print_output:
print(F"Function: {str(tridiag_func)},\n Solution: {v}")
test_coeff = np.array([[0, 2, -.5, 35],
[-.5, 2, -.5, 20],
[-.5, 2, 0, 30]])
time_novec = timeit.timeit('test_tridiag(test_coeff, TDMAsolver_no_vec)',
'from __main__ import test_tridiag, test_coeff, TDMAsolver_no_vec')
time_vec = timeit.timeit('test_tridiag(test_coeff, TDMAsolver)',
'from __main__ import test_tridiag, test_coeff, TDMAsolver')
print(f"No vectorization: {time_novec},\n Vectorized: {time_vec}")
## Save output (in case of large file
# Create grid to plot on (time is in hours)
x, y = np.meshgrid(tao, depths)
print(x, y)
da = xr.DataArray(Temps, coords=[('depth',depths), ('tau',tao)]).to_dataset(name='temp')
da.to_netcdf(f'data/dt_{dt}_dz_{dz}_data.nc')
## Sample output plot
# NOTE: does not work for large (e.g. 1 billion) points, need to use
# a different plotting package like datashade
fig, (ax1, ax2) = plt.subplots(nrows=2, **{'figsize':(10,10)})
# Plot temperatures
try:
# temp_plt = ax.pcolormesh(x, y, Temps)
# temp_plt = ax.contourf(x, y, Temps) # Contour plot
temp_plt = ax1.pcolormesh(Temps[:30,:])
# plt2 = ax2.pcolormesh(x, y, Temps)
ax1.set_xlabel('Time [hr]')
ax1.set_ylabel('Depth [m]')
#fig.colorbar(temp_plt)
plt.savefig(f"figures/dt_{dt}_{dz}_output_xy_subset_2.png", dpi=300)
except Exception as e:
print(e)
tf.shade(ds.Canvas(plot_height=400, plot_width=1200).raster(da['Temps']))
| 7,486 | 3,033 |
#!/usr/bin/python
import os
import sys
import glob
import argparse
from scai_utils import *
REQ_FSL_VER = "5.0"
DATA_DIR = "/users/cais/STUT/DATA"
DTIPREP_DIR = "/users/cais/STUT/analysis/dti"
DTI_BASE = "/users/cais/STUT/analysis/dti2"
EDDY_CORRECT_SPLINE_BIN = "/users/cais/STUT/scripts/eddy_correct_spline"
#FDT_ROTATE_BVECS_BIN = "/users/cais/STUT/scripts/fdt_rotate_bvecs"
FDT_ROTATE_BVECS_BIN = "/users/cais/STUT/scripts/rotate_bvecs.py"
TRACULA2_BIN = "/users/cais/STUT/scripts/tracula2.py"
BEDP_SCRIPT = "/users/cais/STUT/scripts/trac-all_bedp_3.sh"
DATA_DIR_RHY = "/users/cais/RHY/DATA"
DTIPREP_DIR_RHY = "/users/cais/RHY/analysis/dwi"
DTI_BASE_RHY = "/users/cais/RHY/analysis/dwi"
if __name__ == "__main__":
ap = argparse.ArgumentParser(description="Run DWI analysis on qced 4d data (see remove_dtiprep_bad_frames.py")
ap.add_argument("sID", help="Subject ID")
ap.add_argument("--rerun", dest="bForceRerun", action="store_true", \
help="Force rerun finished steps")
ap.add_argument("--RHY", dest="bRHY", action="store_true", \
help="Project RHY, instead of the default project STUT")
if len(sys.argv) == 1:
ap.print_help()
sys.exit(0)
args = ap.parse_args()
sID = args.sID
bForceRerun = args.bForceRerun
if args.bRHY:
DATA_DIR = DATA_DIR_RHY
DTIPREP_DIR = DTIPREP_DIR_RHY
DTI_BASE = DTI_BASE_RHY
# Check the version of FSL
env = os.environ
if env["FSLDIR"].count(REQ_FSL_VER) == 0:
raise Exception, "It appears that a version of FSL other than %s is being used" % REQ_FSL_VER
# Locate the original 4d input and accompanying bvals and bvecs files
qcedDir = os.path.join(DATA_DIR, sID, "diffusion", "qced");
check_dir(qcedDir)
if not args.bRHY:
d0 = glob.glob(os.path.join(qcedDir, "%s_run??_??_qced.nii.gz" % sID))
else:
d0 = glob.glob(os.path.join(qcedDir, \
"%s_diffusion_*_qced.nii.gz" % sID))
if len(d0) != 1:
raise Exception, "Not exactly one 4D series found in directory: %s" % \
qcedDir
dwi4d = d0[0];
if not args.bRHY:
bvalsFN = dwi4d.replace(".nii.gz", ".mghdti.bvals")
bvecsFN = dwi4d.replace(".nii.gz", ".mghdti.bvecs")
else:
bvalsFN = dwi4d.replace("_qced.nii.gz", ".qced.bval")
bvecsFN = dwi4d.replace("_qced.nii.gz", ".qced.bvec")
check_file(bvalsFN)
check_file(bvecsFN)
# Execute eddy_correct_spline
outBaseDir = os.path.join(DTI_BASE, sID)
check_dir(outBaseDir, bCreate=True)
ec4d = os.path.join(outBaseDir, "ecdwi.nii.gz")
ec_cmd = "%s %s %s 0" % (EDDY_CORRECT_SPLINE_BIN, \
dwi4d, ec4d)
ecclog = os.path.join(outBaseDir, "ecdwi.ecclog")
b_ecDone = os.path.isfile(ec4d) and os.path.isfile(ecclog)
if not b_ecDone or bForceRerun:
saydo("rm -f %s" % ecclog)
saydo(ec_cmd)
check_file(ec4d)
check_file(ecclog)
else:
if not bForceRerun:
print("Skipping step eddy_correct_splint (already done)")
# Execute fdt_rotate_bvecs
rotBvecsFN = os.path.join(outBaseDir, "rotated.bvecs")
rotBvalsFN = os.path.join(outBaseDir, "rotated.bvals")
rot_cmd = "%s %s %s %s -v" % (FDT_ROTATE_BVECS_BIN, \
bvecsFN, rotBvecsFN, ecclog)
b_rotDone = os.path.isfile(rotBvecsFN) and os.path.isfile(rotBvalsFN)
if not b_rotDone or bForceRerun:
saydo(rot_cmd)
check_file(rotBvecsFN)
saydo("cp %s %s" % (bvalsFN, rotBvalsFN))
check_file(rotBvalsFN)
else:
if not bForceRerun:
print("Skipping step fdt_rotate_bvecs (already done)")
# Execute tracula2.py
tracula2_cmd = "%s %s prep" % (TRACULA2_BIN, sID)
if args.bRHY:
tracula2_cmd += " --RHY"
# Determine if tracula prep has already finished
tracBase = os.path.join(DTI_BASE, "tracula")
sTracDir = os.path.join(tracBase, sID)
bTraculaPrepDone = \
os.path.isfile(os.path.join(sTracDir, "dmri", "bvals")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "bvecs")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "dtifit_FA.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "dtifit_L1.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "dtifit_L2.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "dtifit_L3.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "dtifit_MD.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "dtifit_MO.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "dtifit_S0.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "dtifit_V1.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "dtifit_V2.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "dtifit_V3.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "dwi.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "lowb.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "lowb_brain.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "brain_anat_mni.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dmri", "brain_anat.nii.gz")) and \
os.path.isfile(os.path.join(sTracDir, "dlabel", "diff", "lowb_brain_mask.nii.gz"))
if not bTraculaPrepDone or bForceRerun:
saydo(tracula2_cmd)
else:
print("Skipping step tracula2.py prep (already done)")
# Run bedpostx (trac-all_bedp_3.sh)
check_file(BEDP_SCRIPT)
bedp_cmd = "%s %s" % (BEDP_SCRIPT, sID)
# Determine if bedp is already done
# bvecs1 =
| 5,901 | 2,568 |
# MIT License
# Copyright (c) 2020 me is me
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#Define dependencies of the bot and import them
import discord
from discord.ext import commands
from discord.ext.commands import has_permissions
#Define the class and initialize the bot
class Moderation(commands.Cog):
def _init_(self, client):
self.client = client
#Define the commands
@commands.command()
@has_permissions(manage_roles=True)
async def mute(self,ctx,member : discord.Member, *, reason = None):
role = discord.utils.get(ctx.guild.roles, name="Muted")
perms = discord.PermissionOverwrite()
perms.send_messages = False
perms.read_messages = True
if discord.utils.get(ctx.guild.roles, name="Muted"):
await member.add_roles(role)
else:
role = await ctx.guild.create_role(name='Muted', permissions=discord.Permissions(0))
for channel in ctx.guild.channels:
await channel.set_permissions(role, overwrite=perms)
await member.add_roles(role)
embedVar = discord.Embed(title="Muted", description=f"{member.mention} was muted for {reason}.", color=0x35a64f)
await ctx.message.delete()
await ctx.send(embed=embedVar)
@commands.command()
@has_permissions(administrator=True)
async def unmute(self,ctx,member : discord.Member):
role = discord.utils.get(ctx.guild.roles, name="Muted")
await member.remove_roles(role)
embedVar = discord.Embed(title=
"Unmuted", description=f"{member.mention} was unmuted.", color=0x35a64f)
await ctx.message.delete()
await ctx.send(embed=embedVar)
#Connect the cog to the main bot
def setup(client):
client.add_cog(Moderation(client)) | 2,803 | 903 |
from django.apps import AppConfig
class GmenuConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'gmenu'
| 142 | 47 |
import numpy as np
from pyadept.strutil import vec_to_str
DELIMITER = b'\r\n'
BREAK_CMD = b'break' + DELIMITER
def join_commands(*commands):
return JoinedCommand(commands)
def create_motion_command(template, vec, break_move=True):
vec_bytes = vec_to_str(vec)
cmd_bytes = template.format(vec_bytes).encode() + DELIMITER
if break_move:
return cmd_bytes, BREAK_CMD
return cmd_bytes,
class RobotCommand(object):
def get_messages(self):
return DELIMITER,
def __repr__(self):
class_name = self.__class__.__name__
return '{}[{}]'.format(class_name, self._repr_args())
def _repr_args(self):
return ''
def __len__(self):
return sum((len(msg) for msg in self.get_messages()))
class JoinedCommand(RobotCommand):
def __init__(self, commands):
messages = tuple()
for cmd in commands:
messages += cmd.get_messages()
self._messages = messages
self._commands_str = ','.join((str(cmd) for cmd in commands))
def get_messages(self):
return self._messages
def _repr_args(self):
return self._commands_str
class DirectCommand(RobotCommand):
def __init__(self, cmd):
self._cmd = cmd
def get_messages(self):
return self._cmd.encode() + DELIMITER,
def _repr_args(self):
return '"{}"'.format(self._cmd)
class SetSpeed(RobotCommand):
def __init__(self, speed_factor):
self._speed_factor = speed_factor
def get_messages(self):
return 'set_speed:{:d}'.format(self._speed_factor).encode() + DELIMITER,
def _repr_args(self):
return '{:d}'.format(self._speed_factor)
class MotionCommand(RobotCommand):
def __init__(self, template, vec, break_move=True):
assert len(vec) == 6
self._template = template
self._vec = vec
self._break = break_move
def get_messages(self):
return create_motion_command(self._template, self._vec, self._break)
def _repr_args(self):
vs = vec_to_str(self._vec)
return '{}, break={}'.format(vs, self._break)
class MoveToPose(MotionCommand):
def __init__(self, pose, break_move=True):
template = 'move_to:{:s}'
super(MoveToPose, self).__init__(template, pose, break_move)
class MoveRelWorld(MotionCommand):
def __init__(self, pose, break_move=True):
template = 'move_rel_world:{:s}'
super(MoveRelWorld, self).__init__(template, pose, break_move)
class MoveJoints(MotionCommand):
def __init__(self, jconf, break_move=True):
template = 'move_joints:{:s}'
super(MoveJoints, self).__init__(template, jconf, break_move)
class MoveRelJoints(MotionCommand):
def __init__(self, jconf, break_move=True):
template = 'move_rel_joints:{:s}'
super(MoveRelJoints, self).__init__(template, jconf, break_move)
class MoveRelTool(MotionCommand):
def __init__(self, pose, break_move=True):
template = 'move_rel_tool:{:s}'
super(MoveRelTool, self).__init__(template, pose, break_move)
class MoveToolZ(MoveRelTool):
def __init__(self, z, break_move=True):
self._z = z
pose = np.array([0, 0, z, 0, 0, 0])
super(MoveToolZ, self).__init__(pose, break_move)
def _repr_args(self):
return 'z={:.3f}, break={}'.format(self._z, self._break)
| 3,405 | 1,152 |
class MinMaxScalerTranspiler(object):
def __init__(self, model):
self.model = model
self.n_features = len(self.model.min_)
self.min = ','.join(self.model.min_.astype(str))
self.std = ','.join(self.model.scale_.astype(str))
def generate_code(self):
return """
/*
The following code was generated using Clara.Transpiler. For more information please visit: https://github.com/asergiobranco/clara
*/
#define N_FEATURES %d
double min[N_FEATURES] = {%s};
double scale[N_FEATURES] = {%s};
double * transform(double * sample){
unsigned int i = 0;
for(i = 0; i < N_FEATURES; i++){
sample[i] *= scale[i];
}
for(i = 0; i < N_FEATURES; i++){
sample[i] += min[i];
}
return sample;
}
""" % (self.n_features, self.min, self.std)
| 949 | 311 |
import base64
from datetime import datetime, timedelta
import re
import os
from django.core.mail import EmailMultiAlternatives
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, Http404, HttpResponseBadRequest
from django.urls import reverse
from django.core.validators import validate_email
from django import forms
from django.utils import timezone
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from django.conf import settings
from django.contrib import messages
from django.template.loader import get_template
from django.contrib.auth import authenticate, login, logout
from .models import Person, Group, Event, Participation, EmailSchedule, Token, EventOption
RE_EMAIL = re.compile(r'^[^@]+@[^@]+\.[^@.]+')
TS_FORMAT = '%Y-%m-%dT%H:%M'
def index(req):
if not req.user.is_authenticated:
return HttpResponseRedirect(reverse('login'))
organiser = Person.objects.get(email=req.user.email)
ctx = { 'events': [ {'id': e.id, 'name': e.name, 'date': e.date} for e in Event.objects.filter(organiser=organiser).order_by('-date')] }
return render(req, 'sova/index.html', ctx)
def vlogin(req):
ctx = {}
if req.method == 'GET':
return render(req, 'sova/login.html', ctx)
user = authenticate(username=req.POST['username'], password=req.POST['password'])
if user == None:
ctx['error'] = 'Invalid login'
return render(req, 'sova/login.html', ctx)
if not user.is_staff:
ctx['error'] = 'Access denied'
return render(req, 'sova/login.html', ctx)
login(req, user)
return HttpResponseRedirect(reverse('index'))
def vlogout(req):
logout(req)
return HttpResponseRedirect(reverse('index'))
def about(req):
ctx = {}
return render(req, 'sova/about.html', ctx)
def newevent(req):
if not (req.user.is_authenticated and req.user.is_staff):
return HttpResponseRedirect(reverse('index'))
organiser = Person.objects.get(email=req.user.email)
if req.method == 'GET':
now = datetime.now()
ctx = {'groups': [ {"id": g.id, "name": g.name } for g in Group.objects.order_by('name') ], 'eventDate': (now + timedelta(days=7)).strftime(TS_FORMAT), 'invitationDate': (now + timedelta(days=3)).strftime(TS_FORMAT), 'acceptDate': (now + timedelta(days=6, hours=12)).strftime(TS_FORMAT), 'reminderDate': (now + timedelta(days=5)).strftime(TS_FORMAT),'detailsDate': (now + timedelta(days=6)).strftime(TS_FORMAT), 'thanksDate': (now + timedelta(days=8)).strftime(TS_FORMAT)}
return render(req, 'sova/newevent.html', ctx)
dateEvent = datetime.strptime(req.POST['dateEvent'], TS_FORMAT)
dateAccept = datetime.strptime(req.POST['dateAccept'], TS_FORMAT)
maxPeople = int(req.POST['maxPeople']) if req.POST['maxPeople'] != '' else None
group = Group.objects.get(id=int(req.POST['group']))
e = Event(name=req.POST['name'], hype_text=req.POST['hypeText'], mail_prefix=req.POST['slug'], organiser=organiser, header=mark_safe(req.POST['header']), footer=mark_safe(req.POST['footer']), date=dateEvent, deadline_for_joining=dateAccept,
max_people=maxPeople)
e.save()
invitationDate = datetime.strptime(req.POST['invitationDate'], TS_FORMAT)
invitationES = EmailSchedule(name='Pozivnica: %s' % e.name,
group=group, target=EmailSchedule.SEND_EVERYONE, event=e, type=EmailSchedule.TYPE_INVITATION, date=invitationDate, subject='Pozivnica: %s' % e.name, message=mark_safe(req.POST['invitationText']))
invitationES.save()
reminderDate = datetime.strptime(req.POST['reminderDate'], TS_FORMAT)
reminderES = EmailSchedule(name='Pozivnica: %s (podsjetnik)' % e.name, group=group, target=EmailSchedule.SEND_NOT_ACCEPTED, event=e, type=EmailSchedule.TYPE_INVITATION, date=reminderDate, subject='Pozivnica: %s (podsjetnik)' % e.name, message=mark_safe(req.POST['invitationText']))
reminderES.save()
detailsDate = datetime.strptime(req.POST['detailsDate'], TS_FORMAT)
detailsES = EmailSchedule(name='Detalji: %s' % e.name, group=group, target=EmailSchedule.SEND_ACCEPTED, event=e, type=EmailSchedule.TYPE_MESSAGE, date=detailsDate, subject='%s' % e.name, message=mark_safe(req.POST['detailsText']))
detailsES.save()
thanksDate = datetime.strptime(req.POST['thanksDate'], TS_FORMAT)
thanksES = EmailSchedule(name='Zahvalnica: %s' % e.name, group=group, target=EmailSchedule.SEND_ACCEPTED, event=e, type=EmailSchedule.TYPE_EXIT_POLL, date=thanksDate, subject='Zahvalnica: %s' % e.name, message=mark_safe(req.POST['thanksText']))
thanksES.save()
return HttpResponseRedirect(reverse('index'))
def join(req, event, person):
person = get_object_or_404(Person, pk=int(person))
event = get_object_or_404(Event, pk=int(event))
try:
participation = Participation.objects.get(person=person, event=event)
except Participation.DoesNotExist:
participation = None
context = {
'person': person,
'event': event,
'participation': participation
}
return render(req, 'sova/join.html', context)
def vote(req, event, person):
person = get_object_or_404(Person, pk=int(person))
event = get_object_or_404(Event, pk=int(event))
accepted = req.POST['choice']
if not (accepted == 'True' or accepted == 'False'):
# redisplay the form
return render(req, 'sova/join.html', {
'person': person,
'event': event,
'error_message': "You didn't select a choice.",
})
participation = Participation(person=person, event=event, accepted=accepted)
participation.save()
return HttpResponseRedirect(reverse('join', args=(person.pk, event.pk,)))
def accept(req, schedule, person):
"""
Shows event info and allows the user to accept.
"""
schedule = get_object_or_404(EmailSchedule, pk=int(schedule))
person = get_object_or_404(Person, pk=int(person))
people_count = Participation.objects.filter(event=schedule.event, accepted=True).count()
people_percent = int((people_count / schedule.event.max_people) * 100) if schedule.event.max_people else 0
try:
participation = Participation.objects.get(person=person, event=schedule.event)
if participation.accepted:
return render(req, 'sova/unaccept.html', { 'person': person, 'schedule': schedule, 'people_count': people_count, 'people_percent': people_percent })
options = EventOption.objects.filter(event_id = schedule.event_id)
except Participation.DoesNotExist:
options = []
if schedule.event.max_people and people_count >= schedule.event.max_people:
return render(req, 'sova/noroom.html', { 'person': person, 'schedule': schedule })
if timezone.now() > schedule.event.date or (schedule.event.deadline_for_joining and timezone.now() > schedule.event.deadline_for_joining):
return render(req, 'sova/toolate.html', { 'person': person, 'schedule': schedule })
return render(req, 'sova/accept.html', { 'person': person, 'schedule': schedule, 'people_count': people_count, 'people_percent': people_percent, 'options': options })
def confirm(req, schedule, person):
"""
Notifies the user he/she has confirmed attendance.
"""
schedule = get_object_or_404(EmailSchedule, pk=int(schedule))
person = get_object_or_404(Person, pk=int(person))
try:
participation = Participation.objects.get(person=person, event=schedule.event)
participation.accepted = True
except Participation.DoesNotExist:
participation = Participation(person=person, event=schedule.event, accepted=True)
participation.save()
tpl = get_template('sova/confirmemail.html')
html = tpl.render({ 'person': person, 'schedule': schedule, 'participation': participation, 'email_admin': settings.EMAIL_ADMIN })
subject = "[%s] %s - potvrda!" % (schedule.event.mail_prefix, schedule.name)
plain_text = strip_tags(html)
msg = EmailMultiAlternatives(subject, plain_text, settings.EMAIL_FROM, [person.email])
msg.attach_alternative(html, "text/html")
msg.send()
return render(req, 'sova/confirm.html', { 'person': person, 'schedule': schedule, 'participation': participation })
def unaccept(req, schedule, person):
"""
Notifies the user he/she has canceled attendance.
"""
schedule = get_object_or_404(EmailSchedule, pk=int(schedule))
person = get_object_or_404(Person, pk=int(person))
try:
participation = Participation.objects.get(person=person, event=schedule.event)
participation.accepted = False
except Participation.DoesNotExist:
participation = Participation(person=person, event=schedule.event, accepted=False)
participation.save()
tpl = get_template('sova/unacceptemail.html')
html = tpl.render({ 'person': person, 'schedule': schedule, 'participation': participation, 'email_admin': settings.EMAIL_ADMIN })
subject = "[%s] %s - otkazivanje" % (schedule.event.mail_prefix, schedule.name)
plain_text = strip_tags(html)
msg = EmailMultiAlternatives(subject, plain_text, settings.EMAIL_FROM, [person.email])
msg.attach_alternative(html, "text/html")
msg.send()
return render(req, 'sova/unacceptconfirm.html', { 'person': person, 'schedule': schedule, 'participation': participation })
def exitpoll(req, schedule, person):
"""
Shows event exit poll.
"""
schedule = get_object_or_404(EmailSchedule, pk=int(schedule))
person = get_object_or_404(Person, pk=int(person))
people_count = Participation.objects.filter(event=schedule.event, accepted=True, participated=True).count()
people_percent = int((people_count / schedule.event.max_people) * 100) if schedule.event.max_people else 0
participation = get_object_or_404(Participation, person=person, event=schedule.event)
return render(req, 'sova/exitpoll.html', { 'person': person, 'schedule': schedule, 'people_count': people_count, 'people_percent': people_percent, 'participation': participation })
def exitpollsave(req, schedule, person):
"""
Saves the exit poll results.
"""
schedule = get_object_or_404(EmailSchedule, pk=int(schedule))
person = get_object_or_404(Person, pk=int(person))
participation = get_object_or_404(Participation, person=person, event=schedule.event)
participation.poll_grade = int(req.POST['grade'])
participation.poll_best = req.POST['best']
participation.poll_worst = req.POST['worst']
participation.poll_futureorg = True if 'futureorg' in req.POST and req.POST['futureorg'] == '1' else False
participation.poll_change = req.POST['change']
participation.poll_note = req.POST['note']
participation.save()
return render(req, 'sova/exitpollthanks.html', { 'person': person, 'schedule': schedule })
def unsubscribe(req, person):
"""
Shows the unsubscribe form to the user.
"""
person = get_object_or_404(Person, pk=int(person))
return render(req, 'sova/unsubscribe.html', { 'person': person })
def unsubscribesave(req, person):
if req.POST['unsubscribe'] == '1':
person = get_object_or_404(Person, pk=int(person))
person.email_enabled = False
person.save()
return render(req, 'sova/unsubscribe_yes.html', { 'person': person })
else:
return render(req, 'sova/unsubscribe_no.html', { 'person': person })
def subscribe(req):
if settings.SUBSCRIBE_ENABLED:
return render(req, 'sova/subscribe.html', { 'org_title': settings.ORG_TITLE, 'cfg': settings.CFG })
else:
raise Http404("Subscribing not enabled")
def subscribesave(req):
if not settings.SUBSCRIBE_ENABLED:
raise Http404("Subscribing not enabled")
m = RE_EMAIL.match(req.POST['email'])
if m == None:
return subscribe(req)
"""
When your users submit the form where you integrated reCAPTCHA, you'll get as part of the payload a string with the name "g-recaptcha-response". In order to check whether Google has verified that user, send a POST request with these parameters:
URL: https://www.google.com/recaptcha/api/siteverify
secret (required) 6LdfeFsUAAAAAIJpr3bug3TF3BQzNGN_MIAQ1QR5
response (required) The value of 'g-recaptcha-response'.
remoteip The end user's ip address.
The reCAPTCHA documentation site describes more details and advanced configurations.
"""
def subscribeconfirm(req):
if not settings.SUBSCRIBE_ENABLED:
raise Http404("Subscribing not enabled")
def contact(req):
return render(req, 'sova/contact.html', {})
def get_profile_token(req, person=0):
try:
person = Person.objects.get(pk=int(person))
token = Token.objects.filter(person=person.id, date_created__gte=timezone.now() - timezone.timedelta(
minutes=settings.TOKEN_EXPIRY_TIME)).order_by('-id')[0]
except Person.DoesNotExist:
person = None
token = None
except Token.DoesNotExist:
token = None
return render(req, 'sova/getprofiletoken.html', {
'person': person,
'token' : token,
})
def send_profile_token(req):
profile = req.POST.get('profile_email', False)
# otherwise, validate it and retrieve the Person
try:
validate_email(profile)
person = get_object_or_404(Person, email=str(profile))
token = Token(token=base64.urlsafe_b64encode(os.urandom(12)), person=person)
token.save()
# send email, something along these lines
# subject, from_email, to = 'Token', settings.EMAIL_FROM, person.email
# text_content = 'Your token is ' + token.token
# html_content = '<a href="{% url edituserprofile token.token %} ">'
# msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
# msg.attach_alternative(html_content, "text/html")
# msg.send()
return HttpResponseRedirect(reverse('getprofiletoken', args=(person.id,)))
except forms.ValidationError:
messages.error(req, "You've entered an invalid e-mail address")
return render(req, 'sova/getprofiletoken.html')
def edit_user_profile(req, token=''):
try:
token = Token.objects.get(token=token, date_created__gte=timezone.now() - timezone.timedelta(
minutes=settings.TOKEN_EXPIRY_TIME))
# either no token or token has expired
except Token.DoesNotExist:
token = None
return render(req, 'sova/edituserprofile.html', {
'token': token,
})
def save_user_profile(req, token=''):
try:
retrieved_token = Token.objects.get(token=token, date_created__gte=timezone.now() - timezone.timedelta(
minutes=settings.TOKEN_EXPIRY_TIME))
retrieved_token.person.name = req.POST.get('username', retrieved_token.person.name)
if req.POST.get('email_enabled', False):
retrieved_token.person.email_enabled = True
else:
retrieved_token.person.email_enabled = False
retrieved_token.person.phone = req.POST.get('phone', retrieved_token.person.phone)
if req.POST.get('phone_enabled', False):
retrieved_token.person.phone_enabled = True
else:
retrieved_token.person.phone_enabled = False
retrieved_token.person.save()
messages.success(req, 'Successfull edit')
except Token.DoesNotExist:
token = None
messages.error(req, 'Token expired :(')
return render(req, 'sova/edituserprofile.html', {
'token': token,
})
except forms.ValidationError:
messages.error(req, 'Form validation problem :(')
return render(req, 'sova/edituserprofile.html', {
'token': token,
})
return HttpResponseRedirect(reverse('edituserprofile', args=(retrieved_token.token,)))
| 15,839 | 5,083 |