id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11309401 | """
Created on 11 Jan 2021
@author: <NAME> (<EMAIL>)
"""
from scs_core.aws.greengrass.aws_group import AWSGroup
# --------------------------------------------------------------------------------------------------------------------
class AWSGroupDeployer(object):
"""
classdocs
"""
BUILDING = "Building"
IN_PROGRESS = "InProgress"
SUCCESS = "Success"
FAILURE = "Failure"
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, group_name, client):
self.__group_name = group_name
self.__client = client
# ----------------------------------------------------------------------------------------------------------------
def retrieve_deployment_info(self, client):
aws_group_info = AWSGroup(self.__group_name, client)
datum = aws_group_info.get_group_info_from_name()
group_id = datum.node("GroupID")
group_version_id = datum.node("GroupLatestVersionID")
return group_id, group_version_id
def deploy(self):
group_id, group_version_id = self.retrieve_deployment_info(self.__client)
response = self.__client.create_deployment(
DeploymentType="NewDeployment",
GroupId=group_id,
GroupVersionId=group_version_id
)
return response
def status(self, response):
group_id, _ = self.retrieve_deployment_info(self.__client)
w_response = self.__client.get_deployment_status(
DeploymentId=response.get("DeploymentId"),
GroupId=group_id
)
return w_response.get("DeploymentStatus")
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "AWSGroupDeployer:{group_name:%s, client:%s}" % (self.__group_name, self.__client)
| StarcoderdataPython |
5013800 | import statistics
def custom_mean(arr):
if len(arr) < 1:
return 0
else:
return statistics.mean(arr)
def custom_var(arr):
if len(arr) < 2:
return 0
else:
return statistics.variance(arr)
class FinderAccount:
def __init__(self, balance, identifier):
self.balance = balance
self.identifier = identifier
# Confidence Level
self.cl = 0.0
self.voters = 0
def __eq__(self, other):
if isinstance(other, FinderAccount):
return self.balance == other.balance
def __lt__(self, other):
if isinstance(other, FinderAccount):
return self.balance < other.balance
def __le__(self, other):
if isinstance(other, FinderAccount):
return self.balance <= other.balance
def __ge__(self, other):
if isinstance(other, FinderAccount):
return self.balance >= other.balance
def __gt__(self, other):
if isinstance(other, FinderAccount):
return self.balance > other.balance
def __str__(self):
return f"(balance: {self.balance}, id:{self.identifier}, score: {self.get_score()})"
def __repr__(self):
return self.__str__()
def get_score(self):
if self.voters < 1:
return self.cl
return self.cl / self.voters
class FinderAnswer:
def __init__(self, dictionary):
self.d = dictionary
def __eq__(self, other):
if isinstance(other, FinderAnswer):
a1, v1 = self.get_score()
a2, v2 = other.get_score()
return a1 == a2 and v1 == v2
def __lt__(self, other):
if isinstance(other, FinderAnswer):
a1, v1 = self.get_score()
a2, v2 = other.get_score()
if a1 < a2:
return True
elif a1 == a2:
return v1 > v2
else:
return False
def __le__(self, other):
if isinstance(other, FinderAnswer):
a1, v1 = self.get_score()
a2, v2 = other.get_score()
return a1 <= a2
def __ge__(self, other):
if isinstance(other, FinderAnswer):
a1, v1 = self.get_score()
a2, v2 = other.get_score()
return a1 >= a2
def __gt__(self, other):
if isinstance(other, FinderAnswer):
a1, v1 = self.get_score()
a2, v2 = other.get_score()
if a1 > a2:
return True
elif a1 == a2:
return v1 < v2
else:
return False
def __str__(self):
a, v = self.get_score()
return f"({len(self.d.keys())} answers, avg={statistics.mean(a)})"
def __repr__(self):
return self.__str__()
def get_score(self):
averages = [custom_mean([acc.get_score() for acc in arr]) for arr in self.d.values()]
variances = [custom_var([acc.get_score() for acc in arr]) for arr in self.d.values()]
return averages, variances
class FinderAnswerPaper:
def __init__(self, dictionary, alpha):
self.d = dictionary
self.alpha = alpha
def __eq__(self, other):
if isinstance(other, FinderAnswer):
return self.get_score() == other.get_score()
elif isinstance(other, int) or isinstance(other, float):
return self.get_score() == other
def __lt__(self, other):
if isinstance(other, FinderAnswer):
return self.get_score() < other.get_score()
elif isinstance(other, int) or isinstance(other, float):
return self.get_score() < other
def __le__(self, other):
if isinstance(other, FinderAnswer):
return self.get_score() <= other.get_score()
elif isinstance(other, int) or isinstance(other, float):
return self.get_score() <= other
def __ge__(self, other):
if isinstance(other, FinderAnswer):
return self.get_score() >= other.get_score()
elif isinstance(other, int) or isinstance(other, float):
return self.get_score() >= other
def __gt__(self, other):
if isinstance(other, FinderAnswer):
return self.get_score() > other.get_score()
elif isinstance(other, int) or isinstance(other, float):
return self.get_score() > other
def __str__(self):
return f"Answer {self.d.keys()} score: {self.get_score()}"
def __repr__(self):
return self.__str__()
def get_score(self):
m = len(self.alpha.keys())
bsum = sum(a.balance for a in self.d.values())
return m - sum(abs(self.alpha[key] - self.d[key].balance / bsum) for key in self.d.keys())
| StarcoderdataPython |
6671226 | <reponame>rnburn/authz-service
from flask import Flask, request
app = Flask(__name__)
@app.route('/service')
def hello():
print(request.headers)
return 'Hello, Hello'
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080, debug=False)
| StarcoderdataPython |
11303343 | <gh_stars>0
import numpy as np
import gym
from gym import wrappers
import tensorflow as tf
import json, sys, os
from os import path
import random
from collections import deque
#####################################################################################################
## Algorithm
# Deep Q-Networks (DQN)
# An off-policy action-value function based approach (Q-learning) that uses epsilon-greedy exploration
# to generate experiences (s, a, r, s'). It uses minibatches of these experiences from replay memory
# to update the Q-network's parameters.
# Neural networks are used for function approximation.
# A slowly-changing "target" Q network, as well as gradient norm clipping, are used to improve
# stability and encourage convergence.
# Parameter updates are made via Adam.
#####################################################################################################
## Setup
env_to_use = 'LunarLander-v2'
# hyperparameters
gamma = 0.99 # reward discount factor
h1 = 512 # hidden layer 1 size
h2 = 512 # hidden layer 2 size
h3 = 512 # hidden layer 3 size
lr = 5e-5 # learning rate
lr_decay = 1 # learning rate decay (per episode)
l2_reg = 1e-6 # L2 regularization factor
dropout = 0 # dropout rate (0 = no dropout)
num_episodes = 5000 # number of episodes
max_steps_ep = 10000 # default max number of steps per episode (unless env has a lower hardcoded limit)
slow_target_burnin = 1000 # number of steps where slow target weights are tied to current network weights
update_slow_target_every = 100 # number of steps to use slow target as target before updating it to latest weights
train_every = 1 # number of steps to run the policy (and collect experience) before updating network weights
replay_memory_capacity = int(1e6) # capacity of experience replay memory
minibatch_size = 1024 # size of minibatch from experience replay memory for updates
epsilon_start = 1.0 # probability of random action at start
epsilon_end = 0.05 # minimum probability of random action after linear decay period
epsilon_decay_length = 1e5 # number of steps over which to linearly decay epsilon
epsilon_decay_exp = 0.97 # exponential decay rate after reaching epsilon_end (per episode)
load_model = 1 # loads a checkpoint model if the value is 1
# game parameters
env = gym.make(env_to_use)
state_dim = np.prod(np.array(env.observation_space.shape)) # Get total number of dimensions in state
n_actions = env.action_space.n # Assuming discrete action space
# set seeds to 0
env.seed(0)
np.random.seed(0)
# prepare monitorings
outdir = 'tmp/dqn-agent-results'
#env = wrappers.Monitor(env, outdir, force=True)
def writefile(fname, s):
with open(path.join(outdir, fname), 'w') as fh: fh.write(s)
info = {}
info['env_id'] = env.spec.id
info['params'] = dict(
gamma=gamma,
h1=h1,
h2=h2,
h3=h3,
lr=lr,
lr_decay=lr_decay,
l2_reg=l2_reg,
dropout=dropout,
num_episodes=num_episodes,
max_steps_ep=max_steps_ep,
slow_target_burnin=slow_target_burnin,
update_slow_target_every=update_slow_target_every,
train_every=train_every,
replay_memory_capacity=replay_memory_capacity,
minibatch_size=minibatch_size,
epsilon_start=epsilon_start,
epsilon_end=epsilon_end,
epsilon_decay_length=epsilon_decay_length,
epsilon_decay_exp=epsilon_decay_exp
)
#####################################################################################################
## Tensorflow
tf.reset_default_graph()
# placeholders
state_ph = tf.placeholder(dtype=tf.float32, shape=[None, state_dim]) # input to Q network
next_state_ph = tf.placeholder(dtype=tf.float32, shape=[None, state_dim]) # input to slow target network
action_ph = tf.placeholder(dtype=tf.int32, shape=[None]) # action indices (indices of Q network output)
reward_ph = tf.placeholder(dtype=tf.float32, shape=[None]) # rewards (go into target computation)
is_not_terminal_ph = tf.placeholder(dtype=tf.float32, shape=[None]) # indicators (go into target computation)
is_training_ph = tf.placeholder(dtype=tf.bool, shape=()) # for dropout
# episode counter
episodes = tf.Variable(0.0, trainable=False, name='episodes')
episode_inc_op = episodes.assign_add(1)
# will use this to initialize both Q network and slowly-changing target network with same structure
def generate_network(s, trainable, reuse):
hidden = tf.layers.dense(s, h1, activation=tf.nn.relu, trainable=trainable, name='dense', reuse=reuse)
hidden_drop = tf.layers.dropout(hidden, rate=dropout, training=trainable & is_training_ph)
hidden_2 = tf.layers.dense(hidden_drop, h2, activation=tf.nn.relu, trainable=trainable, name='dense_1', reuse=reuse)
hidden_drop_2 = tf.layers.dropout(hidden_2, rate=dropout, training=trainable & is_training_ph)
hidden_3 = tf.layers.dense(hidden_drop_2, h3, activation=tf.nn.relu, trainable=trainable, name='dense_2',
reuse=reuse)
hidden_drop_3 = tf.layers.dropout(hidden_3, rate=dropout, training=trainable & is_training_ph)
action_values = tf.squeeze(
tf.layers.dense(hidden_drop_3, n_actions, trainable=trainable, name='dense_3', reuse=reuse))
return action_values
with tf.variable_scope('q_network') as scope:
# Q network applied to state_ph
q_action_values = generate_network(state_ph, trainable=True, reuse=False)
# Q network applied to next_state_ph (for double Q learning)
q_action_values_next = tf.stop_gradient(generate_network(next_state_ph, trainable=False, reuse=True))
# slow target network
with tf.variable_scope('slow_target_network', reuse=False):
# use stop_gradient to treat the output values as constant targets when doing backprop
slow_target_action_values = tf.stop_gradient(generate_network(next_state_ph, trainable=False, reuse=False))
# isolate vars for each network
q_network_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q_network')
slow_target_network_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='slow_target_network')
# update values for slowly-changing target network to match current critic network
update_slow_target_ops = []
for i, slow_target_var in enumerate(slow_target_network_vars):
update_slow_target_op = slow_target_var.assign(q_network_vars[i])
update_slow_target_ops.append(update_slow_target_op)
update_slow_target_op = tf.group(*update_slow_target_ops, name='update_slow_target')
# Q-learning targets y_i for (s,a) from experience replay
# = r_i + gamma*Q_slow(s',argmax_{a}Q(s',a)) if s' is not terminal
# = r_i if s' terminal
# Note that we're using Q_slow(s',argmax_{a}Q(s',a)) instead of max_{a}Q_slow(s',a)
# to address the maximization bias problem via Double Q-Learning
targets = reward_ph + is_not_terminal_ph * gamma * \
tf.gather_nd(slow_target_action_values, tf.stack(
(tf.range(minibatch_size), tf.cast(tf.argmax(q_action_values_next, axis=1), tf.int32)),
axis=1))
# Estimated Q values for (s,a) from experience replay
estim_taken_action_vales = tf.gather_nd(q_action_values, tf.stack((tf.range(minibatch_size), action_ph), axis=1))
# loss function (with regularization)
loss = tf.reduce_mean(tf.square(targets - estim_taken_action_vales))
for var in q_network_vars:
if not 'bias' in var.name:
loss += l2_reg * 0.5 * tf.nn.l2_loss(var)
# optimizer
train_op = tf.train.AdamOptimizer(lr * lr_decay ** episodes).minimize(loss)
# saver
saver = tf.train.Saver()
# initialize session
sess = tf.Session()
if load_model:
saver.restore(sess, "tmp/mdl700.ckpt")
print("Model loaded.")
else:
sess.run(tf.global_variables_initializer())
#####################################################################################################
## Training
total_steps = 0
experience = deque(maxlen=replay_memory_capacity)
epsilon = epsilon_start
epsilon_linear_step = (epsilon_start - epsilon_end) / epsilon_decay_length
for ep in range(num_episodes):
total_reward = 0
steps_in_ep = 0
# Initial state
observation = env.reset()
# env.render()
for t in range(max_steps_ep):
# choose action according to epsilon-greedy policy wrt Q
if np.random.random() < epsilon:
action = np.random.randint(n_actions)
else:
q_s = sess.run(q_action_values,
feed_dict={state_ph: observation[None], is_training_ph: False})
action = np.argmax(q_s)
# take step
next_observation, reward, done, _info = env.step(action)
# env.render()
total_reward += reward
# add this to experience replay buffer
experience.append((observation, action, reward, next_observation,
# is next_observation a terminal state?
0.0 if done else 1.0))
# update the slow target's weights to match the latest q network if it's time to do so
if total_steps % update_slow_target_every == 0:
_ = sess.run(update_slow_target_op)
# update network weights to fit a minibatch of experience
if total_steps % train_every == 0 and len(experience) >= minibatch_size:
# grab N (s,a,r,s') tuples from experience
minibatch = random.sample(experience, minibatch_size)
# do a train_op with all the inputs required
_ = sess.run(train_op,
feed_dict={
state_ph: np.asarray([elem[0] for elem in minibatch]),
action_ph: np.asarray([elem[1] for elem in minibatch]),
reward_ph: np.asarray([elem[2] for elem in minibatch]),
next_state_ph: np.asarray([elem[3] for elem in minibatch]),
is_not_terminal_ph: np.asarray([elem[4] for elem in minibatch]),
is_training_ph: True})
observation = next_observation
total_steps += 1
steps_in_ep += 1
# linearly decay epsilon from epsilon_start to epsilon_end over epsilon_decay_length steps
if total_steps < epsilon_decay_length:
epsilon -= epsilon_linear_step
# then exponentially decay it every episode
elif done:
epsilon *= epsilon_decay_exp
if total_steps == epsilon_decay_length:
print(
'--------------------------------MOVING TO EXPONENTIAL '
'EPSILON DECAY-----------------------------------------')
if done:
# Increment episode counter
_ = sess.run(episode_inc_op)
break
print('Episode %2i, Reward: %7.3f, Steps: %i, Next eps: %7.3f' % (ep, total_reward, steps_in_ep, epsilon))
if ep % 100 == 0:
save_path = saver.save(sess, "tmp/mdl" + str(ep) + ".ckpt")
print("Model saved in path: %s" % save_path)
# Finalize and upload results
writefile('info.json', json.dumps(info))
env.close()
#gym.upload(outdir) | StarcoderdataPython |
4837353 | <filename>aioquant/utils/web.py
# -*- coding:utf-8 -*-
"""
Web module.
Author: HuangTao
Date: 2018/08/26
Email: <EMAIL>
"""
import json
import aiohttp
from urllib.parse import urlparse
from aioquant.utils import logger
from aioquant.configure import config
from aioquant.tasks import LoopRunTask, SingleTask
from aioquant.utils.decorator import async_method_locker
__all__ = ("AsyncHttpRequests", )
class AsyncHttpRequests(object):
""" Asynchronous HTTP Request Client.
"""
# Every domain name holds a connection session, for less system resource utilization and faster request speed.
_SESSIONS = {} # {"domain-name": session, ... }
@classmethod
async def fetch(cls, method, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" Create a HTTP request.
Args:
method: HTTP request method. `GET` / `POST` / `PUT` / `DELETE`
url: Request url.
params: HTTP query params.
body: HTTP request body, string or bytes format.
data: HTTP request body, dict format.
headers: HTTP request header.
timeout: HTTP request timeout(seconds), default is 30s.
kwargs:
proxy: HTTP proxy.
Return:
code: HTTP response code.
success: HTTP response data. If something wrong, this field is None.
error: If something wrong, this field will holding a Error information, otherwise it's None.
Raises:
HTTP request exceptions or response data parse exceptions. All the exceptions will be captured and return
Error information.
"""
session = cls._get_session(url)
if not kwargs.get("proxy"):
kwargs["proxy"] = config.proxy # If there is a `HTTP PROXY` Configuration in config file?
try:
if method == "GET":
response = await session.get(url, params=params, headers=headers, timeout=timeout, **kwargs)
elif method == "POST":
response = await session.post(url, params=params, data=body, json=data, headers=headers,
timeout=timeout, **kwargs)
elif method == "PUT":
response = await session.put(url, params=params, data=body, json=data, headers=headers,
timeout=timeout, **kwargs)
elif method == "DELETE":
response = await session.delete(url, params=params, data=body, json=data, headers=headers,
timeout=timeout, **kwargs)
else:
error = "http method error!"
return None, None, error
except Exception as e:
logger.error("method:", method, "url:", url, "headers:", headers, "params:", params, "body:", body,
"data:", data, "Error:", e, caller=cls)
return None, None, e
code = response.status
if code not in (200, 201, 202, 203, 204, 205, 206):
text = await response.text()
logger.error("method:", method, "url:", url, "headers:", headers, "params:", params, "body:", body,
"data:", data, "code:", code, "result:", text, caller=cls)
return code, None, text
try:
result = await response.json()
except:
result = await response.text()
logger.debug("response data is not json format!", "method:", method, "url:", url, "headers:", headers,
"params:", params, "body:", body, "data:", data, "code:", code, "result:", result, caller=cls)
logger.debug("method:", method, "url:", url, "headers:", headers, "params:", params, "body:", body,
"data:", data, "code:", code, "result:", json.dumps(result), caller=cls)
return code, result, None
@classmethod
async def get(cls, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" HTTP GET
"""
result = await cls.fetch("GET", url, params, body, data, headers, timeout, **kwargs)
return result
@classmethod
async def post(cls, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" HTTP POST
"""
result = await cls.fetch("POST", url, params, body, data, headers, timeout, **kwargs)
return result
@classmethod
async def delete(cls, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" HTTP DELETE
"""
result = await cls.fetch("DELETE", url, params, body, data, headers, timeout, **kwargs)
return result
@classmethod
async def put(cls, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" HTTP PUT
"""
result = await cls.fetch("PUT", url, params, body, data, headers, timeout, **kwargs)
return result
@classmethod
def _get_session(cls, url):
""" Get the connection session for url's domain, if no session, create a new.
Args:
url: HTTP request url.
Returns:
session: HTTP request session.
"""
parsed_url = urlparse(url)
key = parsed_url.netloc or parsed_url.hostname
if key not in cls._SESSIONS:
session = aiohttp.ClientSession()
cls._SESSIONS[key] = session
return cls._SESSIONS[key]
| StarcoderdataPython |
3311799 | <reponame>Cameron-D/gift-registry
# Generated by Django 3.2 on 2021-05-08 11:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('registry', '0009_rename_og_desciption_item_og_description'),
]
operations = [
migrations.RenameField(
model_name='item',
old_name='claim_count',
new_name='want_count',
),
]
| StarcoderdataPython |
4839273 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Distances in a grid
# <NAME> et <NAME> - 2014-2015
from collections import deque
# snip{
def dist_grid(grid, source, target=None):
"""Distances in a grid by BFS
:param grid: matrix with 4-neighborhood
:param (int,int) source: pair of row, column indices
:param (int,int) target: exploration stops if target is reached
:complexity: linear in grid size
"""
rows = len(grid)
cols = len(grid[0])
dir = [(0, +1, '>'), (0, -1, '<'), (+1, 0, 'v'), (-1, 0, '^')]
i, j = source
grid[i][j] = 's'
Q = deque()
Q.append(source)
while Q:
i1, j1 = Q.popleft()
for di, dj, symbol in dir: # explorer toutes les directions
i2 = i1 + di
j2 = j1 + dj
if not (0 <= i2 and i2 < rows and 0 <= j2 and j2 < cols):
continue # bord de la grille dépassé
if grid[i2][j2] != ' ': # case inacc. ou déjà visitée
continue
grid[i2][j2] = symbol # marquer visite
if (i2, j2) == target:
grid[i2][j2] = 't' # but atteint
return
Q.append((i2, j2))
# snip}
| StarcoderdataPython |
9647511 | <filename>scripts/python/barraProgresoTerminal.py
# -*- coding: utf-8 -*-
from tqdm import tqdm # Requiere instalar la librería -> pip install tqdm
from time import sleep
tareasQueRealizar = 100;
for i in tqdm(range(tareasQueRealizar)):
sleep(0.2)
| StarcoderdataPython |
375078 | <filename>tkbuilder/widgets/widget_wrappers/radiobutton.py<gh_stars>0
from tkinter import ttk
from tkbuilder.widgets.widget_utils.widget_events import WidgetEvents
import tkinter
class RadioButton(tkinter.Radiobutton, WidgetEvents):
def __init__(self, master=None, **kw):
ttk.Radiobutton.__init__(self, master, **kw)
| StarcoderdataPython |
3533090 | <reponame>Bernardoviski/Sincroniza<gh_stars>0
# Sincroniza Web App - Por <NAME> | Desenvolvido como requisito para a Mostra Cientifica
import socket
import threading
from utils import *
from urllib.parse import unquote
from python_parser import pythonfier
content_dir = "web/"
class WebServer(object):
def __init__(self, port):
self.port = port
self.host = socket.gethostbyname(socket.gethostname())
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def _headers(self, status, cookie=""):
preset = f"\nServer: Sincroniza\nConnection: close\n\n"
if cookie != "":
preset = f"\nServer: Sincroniza\nConnection: close\n{cookie}\n\n"
if status == 200:
header = "HTTP/1.1 200 Response OK" + preset
elif status == 401:
header = "HTTP/1.1 400 Not Authorized." + preset
elif status == 403:
header = "HTTP/1.1 403 Permissions Required." + preset
elif status == 404:
header = "HTTP/1.1 404 Not Found." + preset
else:
header = "HTTP/1.1 500 Server Could Not Process the Request." + preset
return header
def _request_handler(self, type, body, addr):
cookies = ""
vars = {"cookies": {}, "url_params": {}, "ip": addr}
for line in body.split("\n"):
if line.startswith("Cookie:"):
cook = line[8:].split("; ")
for cokizinho in cook:
for cokizinho in cook:
if cokizinho.endswith("\r"):
vars["cookies"].update({cokizinho.split("=")[0]: cokizinho.split("=")[1][:-1]})
else:
vars["cookies"].update({cokizinho.split("=")[0]: cokizinho.split("=")[1]})
file = body.split(" ")[1].split("?")[0]
try:
for param in body.split(" ")[1].split("?")[1].split("&"):
vars["url_params"].update({param.split("=")[0]: param.split("=")[1]})
except:
pass
file = content_dir + file
if type in ["GET", "HEAD"]:
if file == content_dir + "/": file = content_dir + "index.html"
try:
file_contents = htmlfy(open(file, "rb").read())
if file.endswith(".html"): cookies, file_contents = pythonfier(file_contents.decode(), vars)
return self._headers(200, cookies).encode() + file_contents
except FileNotFoundError:
return self._headers(
404).encode() + b"<html><head><title>UC | 404</title></head><body><center><h1>Erro 404</h1></center></body></html>"
except OSError:
return self._headers(403).encode() + htmlfy(
f"<html><head><title>UC | 403</title></head><body><center><h1>Erro 403</h1><br><p>Esta página é restrita.</p></center></body></html>").encode()
except Exception as e:
return self._headers(500).encode() + htmlfy(
f"<html><head><title>UC | 500</title></head><body><center><h1>Erro 500</h1><br><p>Um erro occoreu no servidor. detalhes:<br>{e}</p></center></body></html>").encode()
elif type == "POST":
values = {"cookies": {}, "ip": addr}
for line in body.split("\n"):
if line.startswith("Cookie:"):
cook = line[8:].split("; ")
for cokizinho in cook:
if cokizinho.endswith("\r"):
values["cookies"].update({cokizinho.split("=")[0]: cokizinho.split("=")[1][:-1]})
else:
values["cookies"].update({cokizinho.split("=")[0]: cokizinho.split("=")[1]})
try:
for value in unquote(body.split("\n")[-1]).split("&"):
values.update({value.split("=")[0]: value.split("=")[1]})
except Exception as e:
print(e)
if file == content_dir + "/": file = content_dir + "index.html"
try:
file_contents = htmlfy(open(file, "rb").read())
if file.endswith(".html"): cookies, file_contents = pythonfier(file_contents.decode("utf-8"), values)
return self._headers(200, cookies).encode() + file_contents
except FileNotFoundError:
return self._headers(
404).encode() + b"<html><head><title>UC | 404</title></head><body><center><h1>Erro 404</h1></center></body></html>"
except OSError:
return self._headers(403).encode() + htmlfy(
f"<html><head><title>UC | 403</title></head><body><center><h1>Erro 403</h1><br><p>Esta página é restrita.</p></center></body></html>".encode()).encode()
except Exception as e:
return self._headers(500).encode() + htmlfy(
f"<html><head><title>UC | 500</title></head><body><center><h1>Erro 500</h1><br><p>Um erro occoreu no servidor. detalhes:<br>{e}</p></center></body></html>".encode()).encode()
return self._headers(200).encode() + body.encode()
def _handler(self, client, addr):
while True:
data = client.recv(1000024)
if not data: break
try:
data = data.decode('utf-8')
except Exception as e:
print("[WEB] Unknown")
client.close()
break
method = data.split(" ")[0]
response = self._request_handler(method, data, addr[0])
client.send(response)
client.close()
break
def start(self):
try:
print(f"[WEB] Binding to {self.host}:{self.port}")
self.socket.bind((self.host, self.port))
print("[WEB] Binded.")
except Exception as e:
self.socket.close()
print(f"[WEB] Failed to bind. {e}")
exit()
self._listener()
def _listener(self):
self.socket.listen(5)
while True:
(client, addr) = self.socket.accept()
client.settimeout(60)
print(f"[WEB] Recieved incoming connection. {addr}")
print("[WEB] Starting Handler Thread")
threading.Thread(target=self._handler, args=(client, addr)).start()
print("[LOG] Hello from Jão!")
while True:
print("[LOG] Starting WEB")
WebServer(80).start()
| StarcoderdataPython |
1820866 | GOTO_URL = "https://ilias-app2.let.ethz.ch/goto.php?target=fold_"
LOGIN_URL = "https://ilias-app2.let.ethz.ch/shib_login.php"
IDP_DATA = {
"user_idp": "https://aai-logon.ethz.ch/idp/shibboleth",
"Select": "Auswählen",
}
| StarcoderdataPython |
119188 | <reponame>michaeldavie/pyinsteon<gh_stars>10-100
"""Get Device Info command handler."""
from ...address import Address
from ...topics import ENTER_UNLINKING_MODE
from .direct_command import DirectCommandHandlerBase
class EnterUnlinkingModeCommand(DirectCommandHandlerBase):
"""Place a device in linking mode command handler."""
def __init__(self, address: Address):
"""Init the EnterUnlinkingModeCommand class."""
super().__init__(topic=ENTER_UNLINKING_MODE, address=address)
# pylint: disable=arguments-differ
async def async_send(self, group: int = 0):
"""Send the ENTER_UNLINKING_MODE request asyncronously."""
return await super().async_send(group=group)
| StarcoderdataPython |
11389524 | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2020 - Sequana Development Team
#
# File author(s):
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
from pathlib import Path
import re
import os
from sequana.lazy import pandas as pd
from sequana.lazy import pylab
from sequana.lazy import numpy as np
from matplotlib_venn import venn2_unweighted, venn3_unweighted
from sequana.rnadiff import RNADiffTable
import colorlog
logger = colorlog.getLogger(__name__)
__all__ = ["RNADiffCompare"]
class Compare():
def __init__(self):
pass
class RNADiffCompare(Compare):
""" An object representation of results coming from a RNADiff analysis.
::
from sequana.compare import RNADiffCompare
c = RNADiffCompare("data.csv", "data2.csv")
"""
def __init__(self, r1, r2, r3=None, design=None):
if isinstance(r1, RNADiffTable):
self.r1 = r1
elif os.path.exists(r1):
self.r1 = RNADiffTable(r1)
else:
raise NotImplementedError
if isinstance(r2, RNADiffTable):
self.r2 = r2
elif os.path.exists(r2):
self.r2 = RNADiffTable(r2)
else:
raise NotImplementedError
def plot_venn_down(self, labels=None, ax=None,
title="Down expressed genes", mode="all"):
kargs = {}
kargs['title'] = title
kargs['labels'] = labels
kargs['ax'] = ax
kargs['data1'] = set(self.r1.gene_lists['down'])
kargs['data2'] = set(self.r2.gene_lists['down'])
self._venn(**kargs)
def plot_venn_up(self, labels=None,ax=None,
title="Up expressed genes", mode="all"):
"""Venn diagram of cond1 from RNADiff result1 vs cond2 in RNADiff
result 2
.. plot::
:include-source:
from sequana import sequana_data
from sequana.compare import RNADiffCompare
c = RNADiffCompare(
sequana_data("rnadiff_salmon.csv", "doc/rnadiff_compare"),
sequana_data("rnadiff_bowtie.csv", "doc/rnadiff_compare")
)
c.plot_venn_up()
"""
kargs = {}
kargs['title'] = title
kargs['labels'] = labels
kargs['ax'] = ax
kargs['data1'] = set(self.r1.gene_lists['up'])
kargs['data2'] = set(self.r2.gene_lists['up'])
self._venn(**kargs)
def _venn(self, data1, data2, labels=None,
ax=None, title="expressed genes"):
from sequana.viz.venn import plot_venn
if labels is None:
labels = ["A", "B"]
plot_venn([data1, data2],
labels=labels, ax=ax, title=title)
def plot_venn_all(self, labels=None, ax=None,
title="all expressed genes", mode="all"):
kargs = {}
kargs['title'] = title
kargs['labels'] = labels
kargs['ax'] = ax
kargs['data1'] = set(self.r1.gene_lists['all'])
kargs['data2'] = set(self.r2.gene_lists['all'])
self._venn(**kargs)
def plot_corrplot_counts_raw(self, samples=None, log2=True, lower='pie', upper='text'):
from sequana.viz import corrplot
if samples is None:
samples = self.r1.counts_raw.columns
df1 = self.r1.counts_raw[samples]
df2 = self.r2.counts_raw[samples]
df = pd.concat([df1, df2], keys=['r1', 'r2'], axis=1)
if log2:
df = pylab.log2(df)
c = corrplot.Corrplot(df).plot(upper=upper, lower=lower)
return df.corr()
def plot_corrplot_counts_normed(self, samples=None, log2=True, lower='pie', upper='text'):
from sequana.viz import corrplot
if samples is None:
samples = self.r1.counts_raw.columns
df1 = self.r1.counts_norm[samples]
df2 = self.r2.counts_norm[samples]
df = pd.concat([df1, df2], keys=['r1', 'r2'], axis=1)
if log2:
df = pylab.log2(df)
c = corrplot.Corrplot(df).plot(upper=upper, lower=lower)
return df.corr()
def plot_jaccard_distance(self, mode, padjs=[0.0001,0.001,0.01,0.05,0.1],
Nfc=50, smooth=False, window=5):
assert mode in ['down', 'up', 'all']
pylab.clf()
if mode == "down":
m1 = self.r1.df.log2FoldChange.min()
m2 = self.r2.df.log2FoldChange.min()
minimum = min(m1,m2)
print(m1, m2)
X = pylab.linspace(0, minimum, Nfc)
elif mode == "up":
m1 = self.r1.df.log2FoldChange.max()
m2 = self.r2.df.log2FoldChange.max()
maximum = max(m1,m2)
X = pylab.linspace(0, maximum, Nfc)
else:
minmax1 = self.r1.df.log2FoldChange.abs().max()
minmax2 = self.r2.df.log2FoldChange.abs().max()
maximum = max(minmax1, minmax2)
X = pylab.linspace(0, maximum, Nfc)
common = {}
for padj in padjs:
I = []
common[padj] = []
for x in X:
if mode == "down":
# less than a given fold change that is negative
A = set(self.r1.df.query("log2FoldChange<=@x and padj<@padj").index)
B = set(self.r2.df.query("log2FoldChange<=@x and padj<@padj").index)
elif mode == "up":
# greater than a given fold change that is positive
A = set(self.r1.df.query("log2FoldChange>=@x and padj<@padj").index)
B = set(self.r2.df.query("log2FoldChange>=@x and padj<@padj").index)
else:
A = set(self.r1.df.query("(log2FoldChange>=@x or log2FoldChange<=-@x) and padj<@padj").index)
B = set(self.r2.df.query("(log2FoldChange>=@x or log2FoldChange<=-@x) and padj<@padj").index)
if len(A) == 0 or len(B) == 0:
# no overlap yet
I.append(100)
else:
res = len(A.intersection(B)) / (len(A) + len(B) - len(A.intersection(B))) * 100
I.append(res)
common[padj].append(len(A.intersection(B)))
try:
if smooth:
I = pd.Series(I).rolling(window).median().values
else:
assert False
except:
pass
pylab.plot(X, I, 'o-', label=str(padj))
ax = pylab.gca()
ax.set_ylabel("Jaccard similarity (intersection/union)")
ax.set_xlabel("Fold change (log2)")
ax2 = ax.twinx()
for padj in padjs:
ax2.plot(X, common[padj], color='orange', ls='--')
ax2.set_ylabel("Cardinality of the union ")
ax.legend()
ax.set_ylim([0,100])
#ax2.set_ylim([0,100])
if mode == "down":
ax.axvline(-2, ls='--', color='r')
else:
ax.axvline(2, ls='--', color='r')
def plot_common_major_counts(self, mode, labels=None,
switch_up_down_cond2=False, add_venn=True, xmax=None,
title="", fontsize=12, sortby="log2FoldChange"):
"""
:param mode: down, up or all
.. plot::
:include-source:
from sequana import sequana_data
from sequana.compare import RNADiffCompare
c = RNADiffCompare(
sequana_data("rnadiff_salmon.csv", "doc/rnadiff_compare"),
sequana_data("rnadiff_bowtie.csv", "doc/rnadiff_compare")
)
c.plot_common_major_counts("down")
"""
#cond1, cond2 = self._get_cond1_cond2()
if labels is None:
labels = ['r1', 'r2']
if mode in ["down"]:
# Negative values !
gl1 = set(self.r1.gene_lists['down'])
gl2 = set(self.r2.gene_lists['down'])
A = self.r1.df.loc[gl1].sort_values(by=sortby)
B = self.r2.df.loc[gl1].sort_values(by=sortby)
else:
gl1 = set(self.r1.gene_lists[mode])
gl2 = set(self.r2.gene_lists[mode])
A = self.r1.df.loc[gl1].sort_values(by=sortby, ascending=False)
B = self.r2.df.loc[gl1].sort_values(by=sortby, ascending=False)
# sometimes, up and down may be inverted as compared to the other
# conditions
N = []
for i in range(1,max(len(A), len(B))):
a = A.iloc[0:i].index
b = B.iloc[0:i].index
n = len(set(b).intersection(set(a)))
N.append(n / i*100)
max_common = len(set(A.index).intersection(set(B.index)))
pylab.clf()
if len(A) > len(B):
pylab.axhline(max_common/len(A)*100, color="r", ls='--', label="min set intersection")
pylab.axvline(len(B), ls="--", color="k", label="rank of minor set")
else:
pylab.axhline(max_common/len(B)*100, color='r', ls='--', label="min set intersect")
pylab.axvline(len(A), ls="--", color="k", label="rank of minor set")
pylab.plot(N)
pylab.xlabel('rank', fontsize=fontsize)
pylab.ylabel('% common features', fontsize=fontsize)
pylab.grid(True)
pylab.ylim([0,100])
if xmax:
pylab.xlim([0, xmax])
else:
pylab.xlim([0, max(len(A),len(B))])
pylab.title(title, fontsize=fontsize)
ax = pylab.gca()
ax2 = ax.twinx()
ax2.plot(A[sortby].values, "orange", label=sortby)
ax2.set_ylabel(sortby)
pylab.legend(loc="lower left")
ax.legend(loc="lower right")
if add_venn:
f = pylab.gcf()
ax = f.add_axes([0.5,0.5,0.35,0.35], facecolor="grey")
if mode=="down":
self.plot_venn_down(ax=ax, title=None, labels=labels,
mode="two_only")
elif mode=="up":
self.plot_venn_up(ax=ax, title=None, labels=labels,
mode="two_only")
elif mode=="all":
self.plot_venn_all(ax=ax, title=None, labels=labels,
mode="two_only")
def plot_foldchange(self):
mode = "all"
# it may happen that list are not identical due to salmon and bowtie not
# using same input gff for instance.
X = self.r1.df.index
Y = self.r2.df.index
common = list(set(X).intersection(set(Y)))
A = self.r1.df.loc[self.r1.gene_lists[mode]]
B = self.r2.df.loc[self.r2.gene_lists[mode]]
AB = set(A.index).intersection(set(B.index))
Ao = A.loc[set(A.index).difference(set(B.index))]
Bo = B.loc[set(B.index).difference(set(A.index))]
Ac = A.loc[AB]
Bc = B.loc[AB]
pylab.plot(self.r1.df.loc[common].log2FoldChange,
self.r2.df.loc[common].log2FoldChange, 'ko', alpha=0.5, markersize=1)
pylab.plot(Ac.log2FoldChange, Bc.log2FoldChange, 'or', alpha=0.5)
pylab.plot(Ao.log2FoldChange, self.r2.df.loc[Ao.index].log2FoldChange, '*b', alpha=0.5)
pylab.plot(Bo.log2FoldChange, self.r1.df.loc[Bo.index].log2FoldChange,
color='cyan', marker="o", lw=0, alpha=0.5)
def plot_volcano_differences(self, mode="all"):
cond1, cond2 = "cond1", "cond2"
labels = [cond1, cond2]
A = self.r1.df.loc[self.r1.gene_lists[mode]]
B = self.r2.df.loc[self.r2.gene_lists[mode]]
AB = set(A.index).intersection(set(B.index))
Aonly = A.loc[set(A.index).difference(set(B.index))]
Bonly = B.loc[set(B.index).difference(set(A.index))]
Acommon = A.loc[AB]
Bcommon = B.loc[AB]
pylab.clf()
pylab.plot(Acommon.log2FoldChange, -np.log10(Acommon.padj), marker="o",
alpha=0.5, color="r", lw=0, label="Common in experiment 1", pickradius=4,
picker=True)
pylab.plot(Bcommon.log2FoldChange, -np.log10(Bcommon.padj), marker="o",
alpha=0.5, color="orange", lw=0, label="Common in experiment 2", pickradius=4,
picker=True)
for x in AB:
a_l = A.loc[x].log2FoldChange
a_p = -np.log10(A.loc[x].padj)
b_l = B.loc[x].log2FoldChange
b_p = -np.log10(B.loc[x].padj)
pylab.plot([a_l, b_l], [a_p, b_p], 'k', alpha=0.5)
pylab.plot(Bonly.log2FoldChange, -np.log10(Bonly.padj), marker="*",
alpha=0.5, color="blue", lw=0, label="In experiment 2 only", pickradius=4,
picker=True)
pylab.plot(Aonly.log2FoldChange, -np.log10(Aonly.padj), marker="*",
alpha=0.5, color="cyan", lw=0, label="In experiment 1 only", pickradius=4,
picker=True)
for name, x in Bonly.iterrows():
x1 = x.log2FoldChange
y1 = -np.log10(x.padj)
x2 = self.r1.df.loc[name].log2FoldChange
y2 = -np.log10(self.r1.df.loc[name].padj)
pylab.plot( [x1,x2], [y1,y2], ls="--", color='r')
for name, x in Aonly.iterrows():
x1 = x.log2FoldChange
y1 = -np.log10(x.padj)
x2 = self.r2.df.loc[name].log2FoldChange
y2 = -np.log10(self.r2.df.loc[name].padj)
pylab.plot( [x1,x2], [y1,y2], ls="-", color='r')
pylab.axhline(1.33, alpha=0.5, ls="--", color="r")
pylab.xlabel("log2 fold Change")
pylab.ylabel("log10 adjusted p-values")
pylab.legend()
pylab.grid(True)
return Aonly, Bonly, Acommon, Bcommon
def plot_volcano(self, labels=None):
"""Volcano plot of log2 fold change versus log10 of adjusted p-value
.. plot::
:include-source:
from sequana import sequana_data
from sequana.compare import RNADiffCompare
c = RNADiffCompare(
sequana_data("rnadiff_salmon.csv", "doc/rnadiff_compare"),
sequana_data("rnadiff_bowtie.csv", "doc/rnadiff_compare")
)
c.plot_volcano()
"""
cond1, cond2 = "cond1", "cond2"
if labels is None:
labels = [cond1, cond2]
A = self.r1.df.loc[self.r1.gene_lists["all"]]
B = self.r2.df.loc[self.r2.gene_lists["all"]]
if cond1 == cond2:
cond1 += "(1)"
cond2 += "(2)"
pylab.clf()
pylab.plot(A.log2FoldChange, -np.log10(A.padj), marker="o",
alpha=0.5, color="r", lw=0, label=labels[0], pickradius=4,
picker=True)
pylab.plot(B.log2FoldChange, -np.log10(B.padj), marker="x",
alpha=0.5, color="k", lw=0, label=labels[1], pickradius=4,
picker=True)
genes = list(A.index) + list(B.index)
pylab.grid(True)
pylab.xlabel("fold change")
pylab.ylabel("log10 adjusted p-value")
pylab.legend(loc="lower right")
ax = pylab.gca()
def onpick(event):
thisline = event.artist
self.event = event
label = thisline.get_label()
if label == cond1:
gene_name = A.index[event.ind[0]]
x1 = round(A.loc[gene_name].log2FoldChange,1)
y1 = round(-np.log10(A.loc[gene_name].padj),1)
try:
x2 = round(B.loc[gene_name].log2FoldChange,1)
y2 = round(-np.log10(B.loc[gene_name].padj),1)
except:
x2, y2 = None, None
else:
gene_name = B.index[event.ind[0]]
x1 = round(B.loc[gene_name].log2FoldChange,1)
y1 = round(-np.log10(B.loc[gene_name].padj),1)
try:
x2 = round(A.loc[gene_name].log2FoldChange,1)
y2 = round(-np.log10(A.loc[gene_name].padj),1)
except:
x2, y2 = None, None
try:
if x2 is None:
ax.title.set_text("{} at pos [{},{}]".format(
gene_name,x1,y1))
else:
ax.title.set_text("{} at pos [{},{}] and [{},{}]".format(
gene_name,x1,y1,x2,y2))
except:
print("exception")
ax.title.set_text("")
pylab.draw()
fig = pylab.gcf()
fig.canvas.mpl_connect('pick_event', onpick)
| StarcoderdataPython |
8099798 | from aws_mock.lib import get_aws_mock_db, aws_response
from aws_mock.predefined import MASTER_REGION_NAME, MASTER_REGION_IMAGE
@aws_response
def describe_images(region_name: str) -> dict:
if region_name == MASTER_REGION_NAME:
return {"items": [MASTER_REGION_IMAGE]}
return {"items": list(get_aws_mock_db()["ami"].find({"region_name": region_name}))}
| StarcoderdataPython |
3524696 | <reponame>iamwillbar/home-assistant-deako<filename>components/deako/__init__.py
"""The Deako component."""
| StarcoderdataPython |
3258065 | <gh_stars>0
from pynamodb.models import Model
from pynamodb.attributes import UnicodeAttribute, BooleanAttribute
import os
class ProductNotificationTable(Model):
""" DynamoDB table storing information about whether
notification for an in-stock product has already
been sent out.
"""
class Meta:
table_name = "in-stock-notifications"
region = "us-east-1"
if os.environ.get("DEV") and os.environ.get("DEV").lower() == "true":
host = "http://host.docker.internal:8000"
# name and url are not used anywhere but make debugging easier
product_id = UnicodeAttribute(hash_key=True, attr_name="product_id")
product_name = UnicodeAttribute(attr_name="product_name")
url = UnicodeAttribute(attr_name="url")
is_sent = BooleanAttribute(default=False, attr_name="is_sent")
| StarcoderdataPython |
1631843 | <reponame>danielballan/amostra<filename>amostra/test/test_basic.py
import pytest
from amostra.testing import _baseSM, TESTING_CONFIG
from amostra.client.commands import SampleReference
from io import StringIO
import json
import yaml
class TestBasicSampleRef(_baseSM):
db_class = SampleReference
args = ()
kwargs = {}
db_class.host = TESTING_CONFIG['host']
db_class.port = TESTING_CONFIG['port']
def test_disk_round_trip(self):
db = self.db_class(*self.args, **self.kwargs)
tt = StringIO()
def _helper(doc_list):
db2 = SampleReference(doc_list)
for doc in db.find():
doc_id = doc['uid']
rl_doc = db2.find(uid=doc_id)
assert doc == rl_doc
# for exp, imp in ((db.dump_to_json, json.loads),
# (db.dump_to_yaml, yaml.load)):
# tt = StringIO()
# exp(tt)
# rt = imp(tt.getvalue())
# yield _helper, rt
#
# def test_constructor_fails(self):
# def _helper(rt, excp):
# with pytest.raises(excp):
# SampleReference(rt)
#
# rt_lst = [[{'uid': '123', 'name': 'foo'},
# {'uid': '123', 'name': 'bar'}],
# [{'uid': '123', 'name': 'foo'},
# {'uid': '456', 'name': 'foo'}]]
# for rt in rt_lst:
# yield _helper, rt, ValueError
# yield _helper, [{'uid': '123'}], KeyError
# yield _helper, [{'name': 'foo'}], KeyError | StarcoderdataPython |
12819289 | <gh_stars>1-10
"""CODING GUIDELINES:
- Add docstrings following the pattern chosen by the community.
- Add comments explaining step by step how your method works and the purpose of it.
- If possible, add examples showing how to call them properly.
- Remember to add the parameters and return types.
- Add unit tests / integration tests for every feature that you develop in order to cover at least 80% of the code.
- Import order : python std libraries, extendend libs, internal source code.
"""
# stdlib
import logging
import os
# third party
import config
# Extended Python imports
from flask import Flask
from flask_cors import CORS
from geventwebsocket.websocket import Header
from main.core.node import create_domain_app
from main.routes import association_requests_blueprint # noqa: 401
from main.routes import dcfl_blueprint # noqa: 401
from main.routes import groups_blueprint # noqa: 401
from main.routes import roles_blueprint # noqa: 401
from main.routes import root_blueprint # noqa: 401
from main.routes import setup_blueprint # noqa: 401
from main.routes import users_blueprint # noqa: 401
# Internal imports
from main.utils.monkey_patch import mask_payload_fast
DEFAULT_SECRET_KEY = "justasecretkeythatishouldputhere"
# Masking/Unmasking is a process used to guarantee some level of security
# during the transportation of the messages across proxies (as described in WebSocket RFC).
# Since the masking process needs to iterate over the message payload,
# the larger this message is, the longer it takes to process it.
# The flask_sockets / gevent-websocket developed this process using only native language structures.
# Replacing these structures for NumPy structures should increase the performance.
Header.mask_payload = mask_payload_fast
Header.unmask_payload = mask_payload_fast
# Setup log
logging.basicConfig(
level=logging.DEBUG,
format="[%(asctime)s]: {} %(levelname)s %(message)s".format(os.getpid()),
datefmt="%Y-%m-%d %H:%M:%S",
handlers=[logging.StreamHandler()],
)
logger = logging.getLogger()
def create_app(
args,
secret_key=DEFAULT_SECRET_KEY,
debug=False,
testing=False,
) -> Flask:
"""This method creates a new Flask App instance and attach it with some
HTTP/Websocket bluetprints.
PS: In order to keep modularity and reause, do not add any PyGrid logic here, this method should be as logic agnostic as possible.
:return: returns a Flask app instance.
:rtype: Flask
"""
logger.info(f"Starting app in {config.APP_ENV} environment")
# Create Flask app instance
app = Flask(__name__)
app.config.from_object("config")
# Create Domain APP
app = create_domain_app(app=app, args=args, testing=testing)
CORS(app)
app.debug = debug
app.config["SECRET_KEY"] = secret_key
# Send app instance
logger.info(f"App started in {config.APP_ENV} environment")
return app
| StarcoderdataPython |
8148407 | <filename>{{cookiecutter.project_slug}}/backend/app/app/tests/api/api_v1/user/test_user.py<gh_stars>10-100
# Standard library packages
import random
# Installed packages
import requests
# App code
from app.tests.utils.utils import random_lower_string, get_server_api
from app.tests.utils.user import user_authentication_headers
from app.core import config
from app.db.utils import (
create_or_get_user,
get_user,
get_database_for_user,
get_database_id_for_user,
)
def test_get_users_superuser_me(superuser_token_headers):
server_api = get_server_api()
r = requests.get(
f"{server_api}{config.API_V1_STR}/users/me", headers=superuser_token_headers
)
current_user = r.json()
assert current_user
assert "active" in current_user["roles"]
assert "superuser" in current_user["roles"]
assert current_user["name"] == config.FIRST_SUPERUSER
def test_create_user_new_email(superuser_token_headers):
server_api = get_server_api()
username = random_lower_string()
password = <PASSWORD>()
data = {"username": username, "password": password}
r = requests.post(
f"{server_api}{config.API_V1_STR}/users/",
headers=superuser_token_headers,
json=data,
)
assert 200 <= r.status_code < 300
created_user = r.json()
user = get_user(username)
for key in created_user:
assert user[key] == created_user[key]
db = get_database_for_user(username)
assert db.exists()
sd = db.get_security_document()
assert len(sd["admins"]["roles"]) == 1
assert "superuser" in sd["admins"]["roles"]
assert len(sd["members"]["names"]) == 1
assert username in sd["members"]["names"]
def test_get_existing_user(superuser_token_headers):
server_api = get_server_api()
username = random_lower_string()
password = <PASSWORD>()
user = create_or_get_user(username, password)
r = requests.get(
f"{server_api}{config.API_V1_STR}/users/{username}",
headers=superuser_token_headers,
)
assert 200 <= r.status_code < 300
api_user = r.json()
for key in api_user:
assert user[key] == api_user[key]
def test_create_user_existing_username(superuser_token_headers):
server_api = get_server_api()
username = random_lower_string()
# username = email
password = <PASSWORD>()
user = create_or_get_user(username, password) # noqa
data = {"username": username, "password": password}
r = requests.post(
f"{server_api}{config.API_V1_STR}/users/",
headers=superuser_token_headers,
json=data,
)
created_user = r.json()
assert r.status_code == 400
assert "_id" not in created_user
def test_create_user_by_normal_user(superuser_token_headers):
server_api = get_server_api()
username = random_lower_string()
password = <PASSWORD>()
user = create_or_get_user(username, password) # noqa
user_token_headers = user_authentication_headers(server_api, username, password)
data = {"username": username, "password": password}
r = requests.post(
f"{server_api}{config.API_V1_STR}/users/", headers=user_token_headers, json=data
)
assert r.status_code == 400
def test_retrieve_users(superuser_token_headers):
server_api = get_server_api()
username = random_lower_string()
password = <PASSWORD>_lower_string()
user = create_or_get_user(username, password)
username2 = random_lower_string()
password2 = <PASSWORD>_<PASSWORD>_string()
user2 = create_or_get_user(username2, password2)
r = requests.get(
f"{server_api}{config.API_V1_STR}/users/", headers=superuser_token_headers
)
all_users = r.json()
assert len(all_users) > 1
for user in all_users:
assert "_id" in user
assert "name" in user
assert "roles" in user
assert "type" in user
def test_get_specific_user_database_by_id(superuser_token_headers):
server_api = get_server_api()
username = random_lower_string()
password = <PASSWORD>_<PASSWORD>()
user = create_or_get_user(username, password)
user_db_id = get_database_id_for_user(username)
r = requests.get(
f"{server_api}{config.API_V1_STR}/users/{username}/dbid",
headers=superuser_token_headers,
)
response = r.json()
assert r.status_code == 200
assert response["msg"] == user_db_id
| StarcoderdataPython |
3231192 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 4 22:04:22 2019
@author: sercangul
"""
class Difference:
def __init__(self, a):
self.__elements = a
def computeDifference(self):
t = self.__elements
k = []
j = 1
while j<len(t):
i = 0
while i<len(t)-j:
k.append(abs(t[i] - t[i+j]))
i = i+1
j = j+1
self.maximumDifference = max(k)
# Add your code here
# End of Difference class
_ = input()
a = [int(e) for e in input().split(' ')]
d = Difference(a)
d.computeDifference()
print(d.maximumDifference) | StarcoderdataPython |
78336 | <reponame>jacksonhzx95/Joint_segmentation_denoise_for_scoliosis
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.core import add_prefix
from mmseg.ops import resize
from .. import builder
from ..builder import SEGMENTORS
from .base import BaseSegmentor
@SEGMENTORS.register_module()
class EncoderDecoder_joint(BaseSegmentor):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
backbone,
backbone_gan,
decode_head,
feature_selection=None,
neck=None,
auxiliary_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
generator_head=None
):
super(EncoderDecoder_joint, self).__init__()
self.backbone = builder.build_backbone(backbone)
self.backbone_gan = builder.build_backbone(backbone_gan)
self.with_feature_selection = False
if neck is not None:
self.neck = builder.build_neck(neck)
if feature_selection is not None:
self.feature_selection = builder.build_feature_selection(feature_selection)
self.with_feature_selection = True
self._init_decode_head(decode_head)
self._init_auxiliary_head(auxiliary_head)
self.G_head = builder.build_generator_head(generator_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
'''build discriminator'''
self.disc_steps = 1 if self.train_cfg is None else self.train_cfg.get(
'disc_steps', 1)
self.disc_init_steps = (0 if self.train_cfg is None else
self.train_cfg.get('disc_init_steps', 0))
if self.train_cfg is None:
self.direction = ('a2b' if self.test_cfg is None else
self.test_cfg.get('direction', 'a2b'))
else:
self.direction = self.train_cfg.get('direction', 'a2b')
self.step_counter = 0 # counting training steps
# self.discriminator = builder.build_component(discriminator)
self.init_weights(pretrained=pretrained)
assert self.with_decode_head
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = builder.build_head(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes
def _init_auxiliary_head(self, auxiliary_head):
"""Initialize ``auxiliary_head``"""
if auxiliary_head is not None:
if isinstance(auxiliary_head, list):
self.auxiliary_head = nn.ModuleList()
for head_cfg in auxiliary_head:
self.auxiliary_head.append(builder.build_head(head_cfg))
else:
self.auxiliary_head = builder.build_head(auxiliary_head)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone and heads.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(EncoderDecoder_gan, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
self.backbone_gan.init_weights(pretrained=pretrained)
self.decode_head.init_weights()
# init GAN
# self.discriminator.init_weight()
# self.G_head.init_weights()
if self.with_auxiliary_head:
if isinstance(self.auxiliary_head, nn.ModuleList):
for aux_head in self.auxiliary_head:
aux_head.init_weights()
else:
self.auxiliary_head.init_weights()
def extract_feat(self, img):
"""Extract features from images."""
x = self.backbone(img)
y = self.backbone_gan(img)
if self.with_feature_selection:
x, y = self.feature_selection(x, y)
if self.with_neck:
x = self.neck(x)
return x, y
# need correct
def encode_decode(self, img, img_metas):
"""Encode images with backbone and decode into a semantic segmentation
map of the same size as input."""
x, y = self.extract_feat(img)
out = self._decode_head_forward_test(x, img_metas)
out_gen = self.G_head(y, img)
# out_gen = resize(
# input=out_gen,
# size=img.shape[2:],
# mode='bilinear',
# align_corners=self.align_corners)
out = resize(
input=out,
size=img.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
return out, out_gen
def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode'))
return losses
def _decode_head_forward_test(self, x, img_metas):
"""Run forward function and calculate loss for decode head in
inference."""
seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg)
return seg_logits
def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for auxiliary head in
training."""
losses = dict()
if isinstance(self.auxiliary_head, nn.ModuleList):
for idx, aux_head in enumerate(self.auxiliary_head):
loss_aux = aux_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
else:
loss_aux = self.auxiliary_head.forward_train(
x, img_metas, gt_semantic_seg, self.train_cfg)
losses.update(add_prefix(loss_aux, 'aux'))
return losses
def forward_dummy(self, img):
"""Dummy forward function."""
seg_logit = self.encode_decode(img, None)
return seg_logit
def forward_train(self, img, img_metas, gt_semantic_seg):
"""Forward function for training.
Args:
img (Tensor): Input images.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x, y = self.extract_feat(img)
losses = dict()
loss_decode = self._decode_head_forward_train(x, img_metas,
gt_semantic_seg)
losses.update(loss_decode)
gen_out = self.G_head(y, img) # the output of generators
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
x, img_metas, gt_semantic_seg)
losses.update(loss_aux)
return losses, gen_out
# TODO refactor
def slide_inference(self, img, img_meta, rescale):
"""Inference by sliding-window with overlap."""
h_stride, w_stride = self.test_cfg.stride
h_crop, w_crop = self.test_cfg.crop_size
batch_size, _, h_img, w_img = img.size()
assert h_crop <= h_img and w_crop <= w_img, (
'crop size should not greater than image size')
num_classes = self.num_classes
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = img.new_zeros((batch_size, num_classes, h_img, w_img))
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = img[:, :, y1:y2, x1:x2]
crop_seg_logit = self.encode_decode(crop_img, img_meta)
preds += F.pad(crop_seg_logit,
(int(x1), int(preds.shape[3] - x2), int(y1),
int(preds.shape[2] - y2)))
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
if torch.onnx.is_in_onnx_export():
# cast count_mat to constant while exporting to ONNX
count_mat = torch.from_numpy(
count_mat.cpu().detach().numpy()).to(device=img.device)
preds = preds / count_mat
if rescale:
preds = resize(
preds,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return preds
def whole_inference(self, img, img_meta, rescale):
"""Inference with full image."""
if not isinstance(img_meta, list):
img_meta = img_meta.data
seg_logit, gen_out = self.encode_decode(img, img_meta)
if rescale:
seg_logit = resize(
seg_logit,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
gen_out = resize(
gen_out,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return seg_logit, gen_out
def inference(self, img, img_meta, rescale):
"""Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.
"""
if not isinstance(img_meta, list):
img_meta = img_meta.data[0]
assert self.test_cfg.mode in ['slide', 'whole']
# if isinstance(img_meta, list):
# ori_shape = img_meta[0][0]['ori_shape']
# else:
# ori_shape = img_meta[0][0]['ori_shape']
# assert all(_[0]['ori_shape'] == ori_shape for _ in img_meta.data)
if self.test_cfg.mode == 'slide':
seg_logit = self.slide_inference(img, img_meta, rescale)
else:
seg_logit, gen_out = self.whole_inference(img, img_meta, rescale)
output = F.softmax(seg_logit, dim=1)
flip = img_meta[0]['flip']
if flip:
flip_direction = img_meta[0]['flip_direction']
assert flip_direction in ['horizontal', 'vertical']
if flip_direction == 'horizontal':
output = output.flip(dims=(3,))
gen_out = gen_out.flip(dims=(3,))
elif flip_direction == 'vertical':
output = output.flip(dims=(2,))
gen_out = gen_out.flip(dims=(2,))
return output, gen_out
def simple_test(self, img, img_meta, rescale=True, return_logist=False):
"""Simple test with single image."""
seg_logit, gen_out = self.inference(img, img_meta, rescale) #
seg_pred = seg_logit.argmax(dim=1)
if torch.onnx.is_in_onnx_export():
return seg_pred, gen_out
seg_pred = seg_pred.cpu().numpy()
gen_out = gen_out
# unravel batch dim
if return_logist:
# seg_logit = seg_logit.cpu()
return seg_logit, gen_out
seg_pred = list(seg_pred)
gen_out = list(gen_out)
return seg_pred, gen_out
def aug_test(self, imgs, img_metas, rescale=True):
"""Test with augmentations.
Only rescale=True is supported.
"""
# print(imgs.shape())
# aug_test rescale all imgs back to ori_shape for now
assert rescale
# to save memory, we get augmented seg logit inplace
seg_logit, gen_out = self.inference(imgs[0].cuda(), img_metas[0], rescale)
for i in range(1, len(imgs)):
cur_seg_logit, _ = self.inference(imgs[i].cuda(), img_metas[i], rescale)
seg_logit += cur_seg_logit
seg_logit /= len(imgs)
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred, gen_out
def parse_loss(self, losses):
loss, log_vars = self._parse_losses(losses)
return loss, log_vars
def train_step(self, data_batch, optimizer, **kwargs):
"""Training step function.
Args:
data_batch (dict): Dict of the input data batch.
optimizer (dict[torch.optim.Optimizer]): Dict of optimizers for
the generators and discriminator.
Returns:
dict: Dict of loss, information for logger, the number of samples\
and results for visualization.
"""
"""
losses = self(**data_batch)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(data_batch['img'].data))
return outputs
"""
losses = self(**data_batch)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(data_batch['img'].data))
return outputs
# # data
# img = data_batch['img']
# # img_b = data_batch['img_metas']
# meta = data_batch['img_metas']
# image_seg_gt = data_batch['gt_semantic_seg']
# # forward generators
# outputs = self.forward_train(img, meta, image_seg_gt)
# # outputs = self.forward(img_a, img_b, meta, return_loss=True)
# log_vars = dict()
#
# # discriminator
# set_requires_grad(self.discriminator, True)
# # optimize
# optimizer['discriminator'].zero_grad()
# log_vars.update(self.backward_discriminator(outputs=outputs))
# optimizer['discriminator'].step()
#
# # generators, no updates to discriminator parameters.
# if (self.step_counter % self.disc_steps == 0
# and self.step_counter >= self.disc_init_steps):
# set_requires_grad(self.discriminator, False)
# # optimize
# optimizer['generators'].zero_grad()
# log_vars.update(self.backward_generator(outputs=outputs))
# optimizer['generators'].step()
#
# self.step_counter += 1
#
# log_vars.pop('loss', None) # remove the unnecessary 'loss'
# results = dict(
# log_vars=log_vars,
# num_samples=len(outputs['real_a']),
# results=dict(
# real_a=outputs['real_a'].cpu(),
# fake_b=outputs['fake_b'].cpu(),
# real_b=outputs['real_b'].cpu()))
#
# return results
'''def val_step(self, data_batch, **kwargs):
"""Validation step function.
Args:
data_batch (dict): Dict of the input data batch.
kwargs (dict): Other arguments.
Returns:
dict: Dict of evaluation results for validation.
"""
# data
img_a = data_batch['img_a']
img_b = data_batch['img_b']
meta = data_batch['meta']
# forward generators
results = self.forward(img_a, img_b, meta, test_mode=True, **kwargs)
return results'''
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got '
f'{type(var)}')
# img_metas = img_metas[0].data # temporary
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) != '
f'num of image meta ({len(img_metas)})')
# all images in the same aug batch all of the same ori_shape and pad
# shape
# print(img_metas.shape())
if isinstance(img_metas[0], list):
for img_meta in img_metas:
ori_shapes = [_['ori_shape'] for _ in img_meta]
assert all(shape == ori_shapes[0] for shape in ori_shapes)
img_shapes = [_['img_shape'] for _ in img_meta]
assert all(shape == img_shapes[0] for shape in img_shapes)
pad_shapes = [_['pad_shape'] for _ in img_meta]
assert all(shape == pad_shapes[0] for shape in pad_shapes)
else:
for img_meta in img_metas:
ori_shapes = [_[0]['ori_shape'] for _ in img_meta.data]
assert all(shape == ori_shapes[0] for shape in ori_shapes)
img_shapes = [_[0]['img_shape'] for _ in img_meta.data]
assert all(shape == img_shapes[0] for shape in img_shapes)
pad_shapes = [_[0]['pad_shape'] for _ in img_meta.data]
assert all(shape == pad_shapes[0] for shape in pad_shapes)
if num_augs == 1:
return self.simple_test(imgs[0].cuda(), img_metas[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, **kwargs)
| StarcoderdataPython |
3520496 | import numpy as np
from src.pre_processing.core.mesh.functions.verify_flow_region import FlowFieldRegion
from src.constants import SMALL_NUMBER
# Generate the points of the layer
def generate_layer_points_func(airfoil: np.ndarray, circle: np.ndarray, multiplier: float, height: float, region: FlowFieldRegion) -> np.ndarray:
airfoil = np.copy(airfoil)
circle = np.copy(circle)
# Scale airfoil
airfoil_x_scale: float = (max(airfoil[:, 0]) + height) / max(airfoil[:, 0])
airfoil_y_scale: float = (max(airfoil[:, 1]) + height) / max(airfoil[:, 1])
airfoil[:, 0] = airfoil_x_scale * airfoil[:, 0]
airfoil[:, 1] = airfoil_y_scale * airfoil[:, 1]
if region == FlowFieldRegion.BOUNDARY_LAYER:
return np.copy(airfoil)
# Scale circle
circle_scale: float = (max(circle[:, 0]) + height) / max(circle[:, 0])
circle[:, 0] = circle_scale * circle[:, 0]
circle[:, 1] = circle_scale * circle[:, 1]
# Output
output: np.ndarray = airfoil * multiplier + circle * (1 - multiplier)
return output | StarcoderdataPython |
9653906 | L = [1, 2] # Make a 2-item list
L.append(L) # Append L as a single item to itself
print(L) # Print L: a cyclic/circular object | StarcoderdataPython |
8134291 | import pathlib
from taichi.core import ti_core as _ti_core
from taichi.lang.impl import default_cfg
from taichi.lang.kernel_arguments import ext_arr, template
from taichi.lang.kernel_impl import kernel
from taichi.lang.ops import get_addr
from .utils import *
class Gui:
def __init__(self, gui) -> None:
self.gui = gui #reference to a PyGui
def begin(self, name, x, y, width, height):
self.gui.begin(name, x, y, width, height)
def end(self):
self.gui.end()
def text(self, text):
self.gui.text(text)
def checkbox(self, text, old_value):
return self.gui.checkbox(text, old_value)
def slider_float(self, text, old_value, minimum, maximum):
return self.gui.slider_float(text, old_value, minimum, maximum)
def color_edit_3(self, text, old_value):
return self.gui.color_edit_3(text, old_value)
def button(self, text):
return self.gui.button(text)
| StarcoderdataPython |
1801065 | from manga_py.crypt import MangaRockComCrypt
from manga_py.fs import rename, unlink, basename
from manga_py.provider import Provider
from .helpers.std import Std
# api example:
"""
curl 'https://api.mangarockhd.com/query/web401/manga_detail?country=Japan' --compressed --data '{"oids":{"mrs-serie-100226981":0},"sections":["basic_info","summary","artworks","sub_genres","social_stats","author","character","publisher","scanlator","other_fact","chapters","related_series","same_author","feature_collections"]}'
"""
class MangaRockCom(Provider, Std):
crypt = None
__content = ''
__api_uri = 'https://api.mangarockhd.com/query/'
def get_chapter_index(self) -> str:
return str(self.chapter_id)
def get_main_content(self):
name = self._get_name(r'/manga/([^/]+-\d+)')
return self.http_get('{}/manga/{}'.format(
self.domain,
name
))
def get_manga_name(self) -> str:
return self.text_content(self.content, 'h1')
def get_chapters(self):
idx = self._get_name('/manga/([^/]+)')
url = '{}info?oid={}&last=0&country=Japan'.format(self.__api_uri, idx)
items = self.json.loads(self.http_get(url))
return [(i.get('oid'),) for i in items.get('data', {}).get('chapters', [])][::-1]
def __get_url(self):
return '{}pages?oid={}&country=Japan'.format(self.__api_uri, self.chapter[0])
def get_files(self):
items = self.json.loads(self.http_get(self.__get_url()))
return items.get('data')
# decrypt
def after_file_save(self, _path, idx: int):
_path_wp = _path + 'wp'
with open(_path, 'rb') as file_r:
with open(_path_wp, 'wb') as file_w:
file_w.write(self.crypt.decrypt(file_r.read()))
unlink(_path)
rename(_path_wp, _path)
def save_file(self, idx=None, callback=None, url=None, in_arc_name=None):
_path, idx, _url = self._save_file_params_helper(url, idx)
in_arc_name = basename(_path) + '.webp'
return super().save_file(idx, callback, _url, in_arc_name)
def get_cover(self) -> str:
selector = 'div:not([class]) > div[class] > div[class] > div[class] > div[class] > img'
url = '{}{}'.format(self.domain, self._get_name('(/manga/[^/]+)'))
img = self._elements(selector, self.http_get(url))
if img and len(img):
return img[0].get('src')
def prepare_cookies(self):
# patch api version
v = self.re.compile(r'\bAJAX_MRAPI_VERSION\b\s*=\s*[\'"]?(web\d+)')
self.__api_uri += v.search(self.content).group(1) + '/'
self.crypt = MangaRockComCrypt()
def book_meta(self) -> dict:
# todo meta
pass
def chapter_for_json(self):
return self.get_url()
main = MangaRockCom
| StarcoderdataPython |
8118900 | <reponame>ejconlon/pushpluck
from contextlib import contextmanager
from dataclasses import dataclass
from pushpluck import constants
from pushpluck.base import Resettable
from pushpluck.color import Color
from pushpluck.push import PushInterface, ButtonCC, ButtonIllum, ButtonColor, TimeDivCC
from pushpluck.pos import Pos, GridSelPos, ChanSelPos
from typing import Dict, Generator, Optional
class LcdRow:
def __init__(self) -> None:
self._buffer = [ord(' ') for i in range(constants.DISPLAY_MAX_LINE_LEN)]
def get_text(self, start: int, length: int) -> str:
end = start + length
assert start >= 0 and length >= 0 and end <= constants.DISPLAY_MAX_LINE_LEN
return ''.join(chr(i) for i in self._buffer[start:end])
def get_all_text(self) -> str:
return self.get_text(0, constants.DISPLAY_MAX_LINE_LEN)
def set_text(self, start: int, text: str) -> bool:
length = len(text)
end = start + length
assert start >= 0 and end <= constants.DISPLAY_MAX_LINE_LEN
changed = False
for i in range(length):
old_char = self._buffer[start + i]
new_char = ord(text[i])
if old_char != new_char:
changed = True
self._buffer[start + i] = new_char
return changed
def set_all_text(self, text: str) -> bool:
return self.set_text(0, text)
@dataclass(frozen=True, eq=False)
class PushState:
lcd: Dict[int, LcdRow]
pads: Dict[Pos, Optional[Color]]
buttons: Dict[ButtonCC, Optional[ButtonIllum]]
@classmethod
def reset(cls) -> 'PushState':
return cls(
lcd={row: LcdRow() for row in range(constants.DISPLAY_MAX_ROWS)},
pads={pos: None for pos in Pos.iter_all()},
buttons={button: None for button in ButtonCC}
)
@classmethod
def diff(cls) -> 'PushState':
return cls(
lcd={},
pads={},
buttons={}
)
class PushShadow(Resettable):
def __init__(self, push: PushInterface) -> None:
self._push = push
self._state = PushState.reset()
def reset(self) -> None:
self._push.reset()
self._state = PushState.reset()
@contextmanager
def context(self) -> Generator['PushInterface', None, None]:
diff_state = PushState.diff()
managed = PushShadowManaged(diff_state)
yield managed
self._emit(diff_state)
def _emit(self, diff_state: PushState) -> None:
self._emit_lcd(diff_state)
self._emit_pads(diff_state)
self._emit_buttons(diff_state)
def _emit_lcd(self, diff_state: PushState) -> None:
for row, new_row in diff_state.lcd.items():
old_row = self._state.lcd[row]
new_text = new_row.get_all_text()
if old_row.set_all_text(new_text):
self._push.lcd_display_raw(row, 0, new_text)
def _emit_pads(self, diff_state: PushState) -> None:
for pos, new_color in diff_state.pads.items():
old_color = self._state.pads.get(pos)
if old_color != new_color:
if new_color is None:
self._push.pad_led_off(pos)
if pos in self._state.pads:
del self._state.pads[pos]
else:
self._push.pad_set_color(pos, new_color)
self._state.pads[pos] = new_color
def _emit_buttons(self, diff_state: PushState) -> None:
for button, new_illum in diff_state.buttons.items():
old_illum = self._state.buttons[button]
if old_illum != new_illum:
if new_illum is None:
self._push.button_off(button)
del self._state.buttons[button]
else:
self._push.button_set_illum(button, new_illum)
class PushShadowManaged(PushInterface):
def __init__(self, state: PushState):
self._state = state
def pad_led_off(self, pos: Pos) -> None:
self._state.pads[pos] = None
def pad_set_color(self, pos: Pos, color: Color) -> None:
self._state.pads[pos] = color
def lcd_display_raw(self, row: int, line_col: int, text: str) -> None:
if row not in self._state.lcd:
self._state.lcd[row] = LcdRow()
self._state.lcd[row].set_text(line_col, text)
def button_set_illum(self, button: ButtonCC, illum: ButtonIllum) -> None:
self._state.buttons[button] = illum
def button_off(self, button: ButtonCC) -> None:
self._state.buttons[button] = None
def time_div_off(self, time_div: TimeDivCC) -> None:
raise NotImplementedError()
def time_div_reset(self) -> None:
raise NotImplementedError()
def chan_sel_set_color(self, cs_pos: ChanSelPos, illum: ButtonIllum, color: ButtonColor) -> None:
raise NotImplementedError()
def chan_sel_off(self, cs_pos: ChanSelPos) -> None:
raise NotImplementedError()
def grid_sel_set_color(self, gs_pos: GridSelPos, color: Color) -> None:
raise NotImplementedError()
def grid_sel_off(self, gs_pos: GridSelPos) -> None:
raise NotImplementedError()
| StarcoderdataPython |
362389 | <gh_stars>0
"""
Class :py:class:`FWViewHist` is a widget with interactive axes
==============================================================
FWViewHist <- FWView <- QGraphicsView <- QWidget
Usage ::
Create FWViewHist object within pyqt QApplication
--------------------------------------------------
import sys
from PyQt4 import QtGui, QtCore
from graphqt.FWViewHist import FWViewHist
app = QApplication(sys.argv)
rscene=QRectF(0, 0, 100, 100)
w = FWViewHist(None, rscene, origin='UL', fgcolor='red', bgcolor='yellow')
w = FWViewHist(None, rscene, origin='UL')
w = FWViewHist(None, rscene, origin='DR', scale_ctl=True, wwidth=50, wlength=200)
w.show()
app.exec_()
Connect/disconnecr recipient to signals
---------------------------------------
Methods
-------
w.print_attributes()
Internal methods
-----------------
w.reset_original_image_size()
Re-defines methods
------------------
w.update_my_scene() # FWView.update_my_scene() + draw hist
w.set_style() # sets FWView.set_style() + color, font, pen
w.closeEvent() # removes rulers, FWView.closeEvent()
See:
- :class:`FWView`
- :class:`FWViewImage`
- :class:`FWViewHist`
- :class:`FWViewColorBar`
- `lcls2 on github <https://github.com/slac-lcls/lcls2>`_.
This software was developed for the LCLS2 project.
If you use all or part of it, please give an appropriate acknowledgment.
Created on 2020-11-02 by <NAME>
"""
from psana.graphqt.FWView import * # FWView, QtGui, QtCore, Qt
from psana.graphqt.FWHist import FWHist, test_histogram
from PyQt5.QtGui import QColor, QFont
logger = logging.getLogger(__name__)
class FWViewHist(FWView):
def __init__(self, parent=None, rscene=QRectF(0, 0, 10, 10), origin='DL', **kwargs):
self.bgcolor_def = 'black'
self.scale_ctl = kwargs.get('scale_ctl', 'H')
self.orient = kwargs.get('orient', 'H') # histogram orientation H or V
self.zvalue = kwargs.get('zvalue', 10) # z value for visibility
self.wlength = kwargs.get('wlength', 400)
self.wwidth = kwargs.get('wwidth', 60)
self.bgcolor = kwargs.get('bgcolor', self.bgcolor_def)
self.fgcolor = kwargs.get('fgcolor', 'blue')
#self.kwargs = kwargs
self.hist = None
self.side = 'D'
#scctl = ('H' if self.side in ('U','D') else 'V') if self.scale_ctl else ''
#scctl = 'HV'
FWView.__init__(self, parent, rscene, origin, scale_ctl=self.scale_ctl)
self._name = self.__class__.__name__
#self.set_style() # called in FWView
self.update_my_scene()
def print_attributes(self):
print('scale_control: ', self.str_scale_control())
print('origin : ', self.origin())
def set_style(self):
FWView.set_style(self)
#style_default = "background-color: rgb(239, 235, 231, 255); color: rgb(0, 0, 0);" # Gray bkgd
#bgcolor = self.palette().color(QPalette.Background)
#style_default = '' if self.bgcolor is None else 'background-color: %s' % self.bgcolor
#self.setStyleSheet(style_default)
#self.layout().setContentsMargins(0,0,0,0)
#color = Qt.white
color = QColor(self.fgcolor)
self.colhi = QColor(color)
#self.fonax = QFont('Courier', 12, QFont.Normal)
self.penhi = QPen(color, 1, Qt.SolidLine)
#if self.side in ('U','D') :
# self.setMinimumSize(self.wlength, 2)
# self.setFixedHeight(self.wwidth)
#else:
# self.setMinimumSize(2, self.wlength)
# self.setFixedWidth(self.wwidth)
#self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
def update_my_scene(self, hbins=test_histogram()):
FWView.update_my_scene(self)
if self.hist is not None: self.hist.remove()
view = self
if self.bgcolor != self.bgcolor_def:
s = self.scene()
r = s.sceneRect()
s.addRect(r, pen=QPen(Qt.black, 0, Qt.SolidLine), brush=QBrush(QColor(self.bgcolor)))
self.hist = FWHist(view, hbins=hbins, color=self.colhi, brush=QBrush(), orient=self.orient, zvalue=self.zvalue)
def reset_original_image_size(self):
# def in FWView.py with overloaded update_my_scene()
self.reset_original_size()
# def reset_original_image_size(self):
# self.set_view()
# self.update_my_scene()
# self.check_axes_limits_changed()
# def mouseMoveEvent(self, e):
# self.update_my_scene()
# FWView.mouseMoveEvent(self, e)
def mouseReleaseEvent(self, e):
self.update_my_scene()
FWView.mouseReleaseEvent(self, e)
def closeEvent(self, e):
self.hist.remove()
FWView.closeEvent(self, e)
#print('FWViewHist.closeEvent')
if __name__ == "__main__":
import sys
def test_guiview(tname):
print('%s:' % sys._getframe().f_code.co_name)
app = QApplication(sys.argv)
w = None
rs = QRectF(0, 0, 100, 1000)
if tname == '0': w=FWViewHist(None, rs, origin='UL', scale_ctl='V', fgcolor='white', bgcolor='gray')
elif tname == '1': w=FWViewHist(None, rs, origin='DL', scale_ctl='H', fgcolor='black', bgcolor='yellow')
elif tname == '2': w=FWViewHist(None, rs, origin='DR')
elif tname == '3': w=FWViewHist(None, rs, origin='UR')
elif tname == '4': w=FWViewHist(None, rs, origin='DR', scale_ctl='V', fgcolor='yellow', bgcolor='gray', orient='V')
elif tname == '5': w=FWViewHist(None, rs, origin='DR', scale_ctl='V', fgcolor='white', orient='V')
else:
print('test %s is not implemented' % tname)
return
w.print_attributes()
#w.connect_axes_limits_changed_to(w.test_axes_limits_changed_reception)
#w.disconnect_axes_limits_changed_from(w.test_axes_limits_changed_reception)
w.show()
app.exec_()
del w
del app
if __name__ == "__main__":
import os
os.environ['LIBGL_ALWAYS_INDIRECT'] = '1' #export LIBGL_ALWAYS_INDIRECT=1
tname = sys.argv[1] if len(sys.argv) > 1 else '0'
print(50*'_', '\nTest %s' % tname)
test_guiview(tname)
sys.exit('End of Test %s' % tname)
# EOF
| StarcoderdataPython |
9690814 | <reponame>beesandbombs/coldtype<gh_stars>1-10
from coldtype.test import *
@test()
def empty(r):
pass | StarcoderdataPython |
156129 | <gh_stars>10-100
import tensorflow as tf
__all__ = ['AutomatedTrainingMonitor']
class AutomatedTrainingMonitor:
def __init__(self, input_var, output_var, training_input, training_output,
train, cost, sess, training_steps=100,
validation_input=None, validation_output=None,
early_stopping_rounds=None, burn_in=50,
summary_op=None, writer_obj=None):
"""Initialize AutomatedTrainingMonitor class
Parameters
----------
input_var : tf.tensor
Input tensor
output_var : tf.tensor
Output tensor
training_input : np.array
Training input
training_output : np.array
Training output
train : tf.Optimizer
Optimizer for the network
cost : tf.constant
Cost/Loss function operation
sess : tf.Session
Tensorflow Session
training_steps : integer (default 100)
Training steps
validation_input : np.array
Validation input
validation_output : np.array
Validation output
early_stopping_rounds : integer (default None)
Number of iterations to check for early stopping
burn_in : integer (default 50)
Burn in period for the training
summary_op: summary operation (default None)
Summary operation for tensorboard
writer_obj: SummaryWriter object (default None)
SummaryWriter object for tensorboard
"""
self.input_var = input_var
self.output_var = output_var
self.sess = sess
self.cost = cost
self.train_step = train
self.training_input = training_input
self.training_output = training_output
self.training_steps = training_steps
self.validation_input = validation_input
self.validation_output = validation_output
self.early_stopping_rounds = early_stopping_rounds
self._best_value_step = None
self._best_value = None
self._early_stopped = False
self.burn_in = burn_in
self.summary_op = summary_op
self.writer_obj = writer_obj
@property
def early_stopped(self):
"""Returns True if this monitor caused an early stop."""
return self._early_stopped
@property
def best_step(self):
"""Returns the step at which the best early stopping metric was found."""
return self._best_value_step
@property
def best_value(self):
"""Returns the best early stopping metric value found so far."""
return self._best_value
def validate_every_step(self, step):
if self.early_stopping_rounds is not None:
current_value = float(self.sess.run(self.cost,feed_dict={self.input_var:self.validation_input,
self.output_var:self.validation_output}))
if (self._best_value is None or current_value < self._best_value):
self._best_value = current_value
self._best_value_step = step
stop_now = (step - self._best_value_step >= self.early_stopping_rounds)
if stop_now:
self._early_stopped = True
return
def train(self):
for iter_num in range(self.training_steps):
if self.summary_op is not None:
_, summary = self.sess.run([self.train_step, self.summary_op],
feed_dict={self.input_var:self.training_input,
self.output_var:self.training_output})
self.writer_obj.add_summary(summary, iter_num)
else:
self.sess.run(self.train_step, feed_dict={self.input_var: self.training_input,
self.output_var: self.training_output})
if iter_num >= self.burn_in:
self.validate_every_step(iter_num)
if self._early_stopped is True:
break
print "Final Validation loss: ",\
float(self.sess.run(self.cost,feed_dict={self.input_var:self.validation_input,
self.output_var:self.validation_output}))
print "Number of Iterations: ",\
iter_num
if self.summary_op is not None:
return self.writer_obj
else:
return
def reset_early_stopped(self):
self._best_value_step = None
self._best_value = None
self._early_stopped = False
| StarcoderdataPython |
5139646 | from __future__ import print_function
import subprocess
import sys
includes = subprocess.check_output(['python', '-m', 'pybind11', '--includes'])
includes_str = includes.decode()
# offset = 0
# while (i := includes_str.find('-I')) != -1:
# path = includes_str[0:i]
# includes_str = includes_str[i+2:]
# if len(path) > 0:
# print('-I' + path)
# print('-I' + includes_str)
while (i := includes_str.rfind('-I')) != -1:
print(includes_str[i:].strip())
includes_str = includes_str[:i]
# print(includes_str) | StarcoderdataPython |
4939694 | <gh_stars>0
from __future__ import annotations
import copy
import scipy.sparse
import astropy
from astropy import units as qu
from typing import Callable
from metadata import *
class SamplingMethod:
def __init__(self, srate: astropy.units.quantity.Quantity=30e3*qu.Hz):
self.srate = srate.to('Hz')
def stamp_from_t(self, t: astropy.units.quantity.Quantity):
return int(t.to('s').value * self.srate.to('Hz').value)
def t_from_stamp(self, stamp) -> astropy.units.quantity.Quantity:
return (stamp / self.srate.to('Hz')).to('s')
def reset_sampling(self, srate: astropy.units.quantity.Quantity):
self.srate = srate.to('Hz')
def sampling_point(self, t: astropy.units.quantity.Quantity=None, stamp=None) -> SamplingPoint:
return SamplingPoint(self, t, stamp)
def zero(self) -> SamplingPoint:
return SamplingPoint(self, t=0*qu.s)
def __str__(self):
return 'SR:{0}'.format(self.srate)
def __repr__(self):
return '{0} at {1}\n{2}'.format(type(self), hex(id(self)), str(self))
class SamplingPoint:
def __init__(self, sampling: SamplingMethod, t: astropy.units.quantity.Quantity=None, stamp=None):
self._sampling = sampling
if t is not None and stamp is None:
self.t = t.to('s')
self.stamp = self._sampling.stamp_from_t(t)
elif stamp is not None and t is None:
self.stamp = int(stamp)
self.t = self._sampling.t_from_stamp(stamp)
elif t is None and stamp is None:
self.t = None
self.stamp = None
else:
self.t = t.to('s')
self.stamp = int(stamp)
def copy(self) -> SamplingPoint:
return type(self)(self._sampling, self.t, self.stamp)
def get_offset(self, sp: SamplingPoint) -> SamplingPoint:
return type(self)(self._sampling, self.t+sp.t, self.stamp+sp.stamp)
def slide_by_offset(self, sp: SamplingPoint) -> SamplingPoint:
self.t = self.t + sp.t
self.stamp = self.stamp + sp.stamp
def __str__(self):
return '{0} {1} :{2}:]'.format(str(self._sampling), self.t, self.stamp)
def __repr__(self):
return '{0} at {1}\n{2}'.format(type(self), hex(id(self)), str(self))
class SamplingInterval:
def __init__(self, sp_start: SamplingPoint, sp_end: SamplingPoint):
self.start = sp_start
self.end = sp_end
def copy(self) -> SamplingInterval:
return type(self)(self.start.copy(), self.end.copy())
def get_stamp_range_list(self):
return list(range(self.start.stamp, self.end.stamp))
def get_t_range(self):
return [self.start._sampling.t_from_stamp(stamp) for stamp in list(range(self.start.stamp, self.end.stamp))]
def get_offset(self, value) -> SamplingInterval:
offset = resolve_to_interval(value)
return type(self)(self.start.get_offset(offset.start), self.end.get_offset(offset.end))
def resize_by_offset(self, value) -> SamplingInterval:
offset = resolve_to_interval(value)
self.start.slide_by_offset(offset.start)
self.end.slide_by_offset(offset.end)
def slice_by_index(self, slice_range: SamplingInterval) -> SamplingInterval:
return resolve_to_interval(self.start.copy()).get_offset(slice_range)
def index_of_slice(self, slice_interval: SamplingInterval) -> SamplingInterval:
return slice_interval.get_offset(SamplingPoint(self.start._sampling, stamp=-self.start.stamp))
def bins_by_step(self, t_bin: SamplingPoint, t_step: SamplingPoint) -> [SamplingInterval]:
nbins = round(((self.duration().value - t_bin.t.value + t_step.t.value) / t_step.t.value))
sm = self.start._sampling
step_stamps = [int(stamp) for stamp in np.linspace(0, (nbins - 1) * t_step.stamp, nbins)]
return [self.slice_by_index(SamplingInterval(SamplingPoint(sm, stamp=step_stamps[bin_i]),
SamplingPoint(sm, stamp=step_stamps[bin_i] + t_bin.stamp)))
for bin_i in range(nbins)]
def duration(self) -> astropy.units.quantity.Quantity:
return self.end.t - self.start.t
def num_samples(self):
return self.end.stamp - self.start.stamp
def __str__(self):
return '[{0} - {1}]'.format(str(self.start), str(self.end))
def __repr__(self):
return '{0} at {1}\n{2}'.format(type(self), hex(id(self)), str(self))
class Session:
def __init__(self, entry, sampling: SamplingMethod=SamplingMethod(MetaData.base_rate * qu.Hz)):
self.session_entry = entry
self._session_sampling = sampling
session_start = SamplingPoint(self._session_sampling, stamp=0)
session_end = SamplingPoint(self._session_sampling, stamp=entry.TimeStamps - 1)
self.session_interval = SamplingInterval(session_start, session_end)
def __str__(self):
return self.session_entry.Session
def __repr__(self):
return str(self)
class Signal:
padding_value = np.nan
def __init__(self, session, sampling: SamplingMethod, data, interval: SamplingInterval, units: astropy.units.core.Unit=None, src_data=None):
self._session = session
self._sampling = sampling
self.data = data
self.units = units
self.interval = interval
self.interval_dict = {}
# src_data has information of original signal, could be in different sampling format or different slicing,
# or could be a collection of signals (list or other structure)
self.src_data = src_data
def copy(self) -> Signal:
return type(self)(self._session, self._sampling, copy.deepcopy(self.data), self.interval.copy(), self.units, self.src_data)
def slice_by_index(self, slice_range: SamplingInterval, keep_src=False) -> Signal:
return Signal(self._session,
self._sampling,
copy.deepcopy(self.data[slice_range.start.stamp:slice_range.end.stamp]),
self.interval.slice_by_index(slice_range),
self.units,
(self.src_data if keep_src else None))
def index_of_slice(self, slice_interval: SamplingInterval) -> SamplingInterval:
return self.interval.index_of_slice(slice_interval)
def slice_by_sampling_interval(self, slice_interval: SamplingInterval, keep_src=False) -> Signal:
return self.slice_by_index(self.index_of_slice(slice_interval), keep_src)
class SignalAggregation():
def __init__(self):
self.data_list = []
self.signal_list = []
def append_signal(self, signal: Signal) -> SignalAggregation:
self.data_list.append(signal.data)
self.signal_list.append(signal)
class SpikeTrain(Signal):
def __init__(self, session, sampling: SamplingMethod, data: scipy.sparse.coo.coo_matrix, interval: SamplingInterval, src_data=None):
super(SpikeTrain, self).__init__(session, sampling, data, interval, None, src_data)
def select_by_index(self, select_range: SamplingInterval, keep_src=False) -> SpikeTrain:
ind_range = np.searchsorted(self.data.col, [select_range.start.stamp, select_range.end.stamp], 'left')
data_slice_stamps = self.data.col[ind_range[0]:ind_range[1]]
data_slice_vals = self.data.data[ind_range[0]:ind_range[1]]
return SpikeTrain(self._session,
self._sampling,
stamps_to_sparse(data_slice_stamps, data_slice_vals, self.interval.num_samples()),
self.interval,
(self.src_data if keep_src else None))
def slice_by_index(self, slice_range: SamplingInterval, keep_src=False) -> SpikeTrain:
selection = self.select_by_index(slice_range, keep_src)
selection_stamps = selection.data.col - slice_range.start.stamp
selection_vals = selection.data.data
selection_num_data_points = slice_range.num_samples()
selection.data = stamps_to_sparse(selection_stamps, selection_vals, selection_num_data_points)
selection.interval = self.interval.slice_by_index(slice_range)
return selection
def firing_rate(self) -> float:
numspikes = len(self.data.col)
event_duration = self.interval.duration().value
return numspikes / event_duration
def to_MultiSpikeTrain(self) -> MultiSpikeTrain:
multi_spikedata_vals = [1 for _ in np.arange(len(self.data.data))]
multi_spikedata = stamps_to_sparse(self.data.col, multi_spikedata_vals, self.interval.num_samples())
return MultiSpikeTrain(self._session, self._sampling, multi_spikedata, self.interval, qu.Hz, self.src_data)
class MultiSpikeTrain(SpikeTrain):
def __init__(self, session, sampling: SamplingMethod, data, interval: SamplingInterval, units: astropy.units.core.Unit, src_data=None):
super(MultiSpikeTrain, self).__init__(session, sampling, data, interval, src_data)
self.units = units
class SignalSmoothing:
def __init__(self, func: Callable=signal.convolve, window: np.ndarray=MetaData.filt_win_gauss):
self.func = func
self.window = window
def smoothen(self, data: np.ndarray):
return self.func(data, self.window, mode='same')/sum(self.window)
class TimebinInterval:
def __init__(self, timebin, timestep, t_start, t_end):
self.timebin = timebin
self.timestep = timestep
self.t_start = t_start
self.t_end = t_end
def num_of_bins(self):
return int(((self.t_end - self.t_start - self.timebin) / self.timestep) + 1)
def split_to_bins_onset(self):
timebin, timestep, t_start, t_end = self.timebin, self.timestep, self.t_start, self.t_end
return [int(onset)
for onset
in np.linspace(t_start, t_end - timebin, self.num_of_bins(), endpoint=True)]
def split_to_bins_offset(self):
timebin, timestep, t_start, t_end = self.timebin, self.timestep, self.t_start, self.t_end
return [int(offset)
for offset
in np.linspace(t_start + timebin, t_end, self.num_of_bins(), endpoint=True)]
def sub_interval(self, t_start, t_end):
return TimebinInterval(self.timebin, self.timestep, t_start, t_end)
def t_offset_to_ind(self, t):
return self.split_to_bins_offset().index(t)
# TODO: implement later, bin, stepped timeseries
class Timeseries:
def __init__(self):
pass
# ### Misc ### #
def resolve_to_interval(value):
if type(value) == SamplingPoint:
return SamplingInterval(value, value)
elif type(value) == SamplingInterval:
return value
def stamps_to_sparse(stamps, vals, num_data_points):
return scipy.sparse.coo_matrix((vals, ([0 for _ in np.arange(len(stamps))], stamps)), shape=(1, num_data_points)) | StarcoderdataPython |
11340177 | <reponame>marcoag/rmf_demos
# Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, render_template
import os
import sys
import requests
app = Flask(__name__, static_url_path="/static")
@app.route("/")
def home():
return render_template("index.html")
def main(args=None):
server_ip = "0.0.0.0"
port_num = 5000
if "WEB_SERVER_IP_ADDRESS" in os.environ:
server_ip = os.environ['WEB_SERVER_IP_ADDRESS']
print(f"Set Server IP to: {server_ip}:{port_num}")
print("Starting Dispatcher Dashboard GUI Server")
app.run(host=server_ip, debug=False, port=port_num)
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
9703233 | <filename>Globals.py<gh_stars>0
'''
This module contains a bunch of data that needs to be globally accessible all
across AutoNifty. That mostly means the config settings and stuff directly
related to or derived from them.
'''
import time
import datetime
import ConfigParser
import os
# These defaults largely come from AutoFox. I wonder how many of them we need!
CONFIG_DEFAULTS = {
'url':'about:blank',
'basedir':'/home/change_this_setting_seriously',
'updatetime':'0001',
'tzoffset':str(int(float(-time.timezone) / 36)),
'captionsfile':'captions.txt',
'comicsdir':'comics/',
'imagedir':'images/',
'archivedir':'d/',
'comicswebpath':'',
'imagewebpath':'',
'archivewebpath':'',
'uploaddir':'comics/',
'sitedir':'public_html/',
'workdir':'workspace/',
'parsedir':'pages/',
'datadir':'data/',
'storyfile':'storyline.txt',
'logfile':'autonifty.log',
'indexfile':'index.html',
'storylinebanners':'storylinebanners.txt',
'dailyext':'.html',
'usecssnavbuttons':'0',
'lastday':'',
'firstday':'',
'previousday':'',
'nextday':'',
'lastdayghosted':'',
'firstdayghosted':'',
'previousdayghosted':'',
'nextdayghosted':'',
'storystart':'storystart.gif',
'dailytemplate':'dailytemplate.html',
'jsprefix':'af_',
'storylineusejavascript':'1',
'storylineuseplain':'1',
'bigcalwidth':'3',
'calbacka':'#d0d0d0',
'calbackb':'#b0b0b0',
'calhighlight':'#ffffff',
'calnolink':'#000000',
'rssfullgenerate':'0',
'rssfullfilename':'comicfeed.rdf',
'rsslitegenerate':'0',
'rsslitefilename':'comicfeedlite.rdf',
'rsslimit':'10',
'rsstitle':'DEFAULT TITLE',
'rsslink':'http://localhost',
'rssdescription':'Edit this in the config file!',
'rsscopyright':'Something something copyright'
}
config = ConfigParser.RawConfigParser(defaults=CONFIG_DEFAULTS, allow_no_value=True)
config_read = False
today = None
def get_today():
'''
Returns the "today" tuple. Will generate it if it hasn't been generated
yet.
'''
global today
if today is None:
_generate_today()
return today
def _generate_today():
'''
Generates a tuple describing what "today" is. That is, what AutoNifty
thinks the current day's comic should be, if said comic exists, after
accounting for the local time zone, the exact time an update should occur,
and other stuff like that.
The tuple will be in filename order, which is to say (YYYY, MM, DD). It
can be retrieved with the get_today function.
'''
global config, today
# First, get UTC time and apply the current time zone offset to get what
# time it is "now".
now = datetime.datetime.utcnow()
tzoffset = config.getint('AutoNifty', 'tzoffset')
hours = tzoffset / 100
minutes = tzoffset % 100
now += datetime.timedelta(hours=hours, minutes=minutes)
# Now, determine what the update time is. If "now" is before the update
# time, rewind a day.
updatetime = config.getint('AutoNifty', 'updatetime')
updatehour = updatetime / 100
updateminute = updatetime % 100
if now.hour < updatehour or (now.hour == updatehour and now.minute < updateminute):
now -= datetime.timedelta(days=1)
# Now, spit that out as a tuple!
today = (now.year, now.month, now.day)
def read_config(filename):
'''
Reads a config file into the config parser, checking to make sure everything
is sane (i.e. all booleans are booleans, all int values are ints). This
will raise exceptions if anything goes wrong. If an exception HAS been
raised, assume the config object is no longer in a valid state and deal
with the problem.
'''
global CONFIG_DEFAULTS, config_read, config
if config_read:
# We already did this!
raise Exception("The config file's already been parsed! Ignoring...")
# Pop open the file. Things might be raised left and right at this point.
config.readfp(open(filename))
# Presumably, open would've raised if the file can't be opened. So we
# should have a proper config now. Let's go through the sanity checks!
# Everything needs to be in the "AutoNifty" section.
if not config.has_section('AutoNifty'):
raise NoSectionError('AutoNifty')
# Each of these must be boolean-like.
checking = 'usecssnavbuttons'
try:
config.getboolean('AutoNifty', checking)
checking = 'storylineusejavascript'
config.getboolean('AutoNifty', checking)
checking = 'storylineuseplain'
config.getboolean('AutoNifty', checking)
checking = 'rssfullgenerate'
config.getboolean('AutoNifty', checking)
checking = 'rsslitegenerate'
config.getboolean('AutoNifty', checking)
except ValueError:
raise ValueError("The {} config option MUST be something that resolves to True or False!".format(checking))
# The time zone offset has to be an integer between -1200 and 1200.
timezone = 0
try:
timezone = config.getint('AutoNifty', 'tzoffset')
except ValueError:
raise ValueError("The tzoffset config option MUST be something that resolves to an integer!")
if timezone < -1200 or timezone > 1200 or abs(timezone) % 100 >= 60:
raise ValueError("{} isn't a valid timezone offset!".format(timezone))
# The update time has to be, y'know, a time. However, we allow the user to
# specify this time with a space in it, so just in case...
updatetime = "".join(config.get('AutoNifty', 'updatetime').split())
try:
updatetime = int(updatetime)
except ValueError:
raise ValueError("The updatetime config option MUST be something that resolves to an integer!")
if updatetime < 0 or updatetime > 2400 or updatetime % 100 >= 60:
raise ValueError("{} isn't a valid update time!".format(updatetime))
# If it's valid, stuff it back in, corrected.
config.set('AutoNifty', 'updatetime', updatetime)
# I guess we'll allow basedir to be relative if the user's really really
# crazy, but we should still warn them.
curdir = config.get('AutoNifty', 'basedir')
if not curdir[0] == '/':
print "WARNING: The basedir config option doesn't start with a slash, so we're assuming that's a relative path..."
# All directories should END with a slash. Let's clean that up now.
_clean_file_path('basedir')
_clean_file_path('workdir')
_clean_file_path('sitedir')
_clean_file_path('comicsdir')
_clean_file_path('imagedir')
_clean_file_path('archivedir')
_clean_file_path('uploaddir')
_clean_file_path('parsedir')
_clean_file_path('datadir')
# The URL should, too, so for now we'll treat that the same as a directory.
_clean_file_path('url')
# Get the web paths cleaned up, too.
_clean_web_path('comicswebpath', 'comicsdir')
_clean_web_path('imagewebpath', 'imagedir')
_clean_web_path('archivewebpath', 'archivedir')
# If all goes well, mark the config as read!
config_read = True
def _clean_file_path(fileconfig):
global config
curdir = config.get('AutoNifty', fileconfig)
if not curdir[-1] == '/':
config.set('AutoNifty', fileconfig, curdir + '/')
def _clean_web_path(webconfig, fileconfig):
global config
# The web paths, by default, mimic their on-filesystem counterparts. If
# they aren't defined (or are blank), just copy them over. Also, they each
# need to end with a slash, as usual, but they shouldn't START with a slash,
# as the URL config option should cover that.
curdir = config.get('AutoNifty', webconfig)
if not curdir:
curdir = config.get('AutoNifty', fileconfig)
if curdir[0] == '/':
curdir = curdir[1:]
if not curdir[-1] == '/':
curdir += '/'
config.set('AutoNifty', webconfig, curdir)
def get_directory_for(configthingy):
'''
Gets the full, effective path for a configuration directive. That is, this
will return the given config value with basedir prepended, as well as any
other dependent directories (archivedir and comicsdir would have sitedir
prepended, etc), unless the directory starts with a forward slash, in which
case the path is assumed to be absolute regardless.
'''
global config, config_read
if not config_read:
raise RuntimeError("The config file hasn't been properly read yet!")
# First off, basedir. Everything comes from here (unless it's an absolute
# path, but that won't happen often, hopefully).
basedir = config.get('AutoNifty', 'basedir')
# The second-level directories.
sitedir = _attach_path(basedir, config.get('AutoNifty', 'sitedir'))
workdir = _attach_path(basedir, config.get('AutoNifty', 'workdir'))
# The first-or-second-level family!
if configthingy == 'basedir':
return basedir
elif configthingy == 'sitedir':
return sitedir
elif configthingy == 'workdir':
return workdir
# The sitedir family!
elif configthingy in ['comicsdir', 'imagedir', 'archivedir']:
return _attach_path(sitedir, config.get('AutoNifty', configthingy))
# The workdir family!
elif configthingy in ['parsedir', 'datadir', 'uploaddir']:
return _attach_path(workdir, config.get('AutoNifty', configthingy))
else:
# Anything else is invalid.
raise RuntimeError("{} isn't a valid directory-based config option!".format(configthingy))
def _attach_path(basepath, newpath):
# If newpath starts with a forward slash, it's absolute, so just return
# that. Otherwise, attach basepath to the front of it.
if newpath[0] == '/':
return newpath
else:
return basepath + newpath
def get_webpath_for(configthingy):
'''
Gets the absolute URL for a given web path (as per config).
'''
global config
# This one's simple. We've only got a few possibilities.
url = config.get('AutoNifty', 'url')
if configthingy in ['comicswebpath', 'imagewebpath', 'archivewebpath']:
return url + config.get('AutoNifty', configthingy)
else:
raise RuntimeError("{} isn't a valid webpath-based config option!".format(configthingy))
| StarcoderdataPython |
8077953 |
class UnitNames(object):
"""
Unit Names is a namepace to hold units
"""
__slots__ = ()
bits = "bits"
kbits = "K" + bits
mbits = "M" + bits
gbits = "G" + bits
bytes = "Bytes"
kbytes = "KBytes"
mbytes = "MBytes"
gbytes = "GBytes"
# end UnitNames
IDENTITY = 1
ONE = 1.0
KILO = 10**3
TO_KILO = ONE/KILO
MEGA = KILO**2
TO_MEGA = ONE/MEGA
GIGA = KILO * MEGA
TO_GIGA = ONE/GIGA
BYTE = 8
TO_BYTE = ONE/BYTE
to_units = [UnitNames.bits,
UnitNames.kbits,
UnitNames.mbits ,
UnitNames.gbits,
UnitNames.bytes,
UnitNames.kbytes,
UnitNames.mbytes,
UnitNames.gbytes]
bit_row_1 = [IDENTITY, TO_KILO,
TO_MEGA , TO_GIGA,]
bit_row_2 = [KILO] + bit_row_1[:-1]
bit_row_3 = [MEGA] + bit_row_2[:-1]
bit_row_4 = [GIGA] + bit_row_3[:-1]
to_byte_row_1 = [TO_BYTE * converter for converter in bit_row_1]
to_byte_row_2 = [KILO * TO_BYTE] + to_byte_row_1[:-1]
to_byte_row_3 = [MEGA * TO_BYTE] + to_byte_row_2[:-1]
to_byte_row_4 = [GIGA * TO_BYTE] + to_byte_row_3[:-1]
byte_row_1 = [BYTE * conversion for conversion in bit_row_1]
byte_row_2 = [KILO * BYTE] + byte_row_1[:-1]
byte_row_3 = [MEGA * BYTE] + byte_row_2[:-1]
byte_row_4 = [GIGA * BYTE] + byte_row_3[:-1]
from_bits = dict(zip(to_units, bit_row_1 + to_byte_row_1))
from_kbits = dict(zip(to_units, bit_row_2 + to_byte_row_2))
from_mbits = dict(zip(to_units, bit_row_3 + to_byte_row_3))
from_gbits = dict(zip(to_units, bit_row_4 + to_byte_row_4))
from_bytes = dict(zip(to_units, byte_row_1 + bit_row_1))
from_kbytes = dict(zip(to_units, byte_row_2 + bit_row_2))
from_mbytes = dict(zip(to_units, byte_row_3 + bit_row_3))
from_gbytes = dict(zip(to_units, byte_row_4 + bit_row_4))
class UnitConverter(dict):
"""
The UnitConverter is a conversion lookup table.
Use class UnitNames to get valid unit names
"""
def __init__(self):
self[UnitNames.bits] = from_bits
self[UnitNames.kbits] = from_kbits
self[UnitNames.mbits] = from_mbits
self[UnitNames.gbits] = from_gbits
self[UnitNames.bytes] = from_bytes
self[UnitNames.kbytes] = from_kbytes
self[UnitNames.mbytes] = from_mbytes
self[UnitNames.gbytes] = from_gbytes
return
# end class UnitConverter
| StarcoderdataPython |
3219844 | from python_qt_binding import QtGui
class BaseWidget(QtGui.QWidget):
def __init__(self, topic_name, publisher, parent=None):
super(BaseWidget, self).__init__(parent=None)
self._topic_name = topic_name
self._publisher = publisher
def get_text(self):
return ''
def get_range(self):
return (0, 0)
def set_range(self, r):
pass
def is_repeat(self):
return self._publisher.is_repeating()
def set_is_repeat(self, repeat_on):
if repeat_on:
self._publisher.set_timer()
else:
self._publisher.stop_timer()
self._publisher.request_update()
def get_topic_name(self):
return self._topic_name
def update(self):
pass
# def dump_to_yaml(self
| StarcoderdataPython |
6552063 | <reponame>AlexRovan/Python_training
from model.group import Group
import random
import allure
def test_edit_group_by_index(app,db,json_groups,check_ui):
with allure.step('Add group, if no groups now'):
group = json_groups
if app.group.count() == 0:
app.group.create(group)
with allure.step('Given a group list'):
old_groups = db.get_group_list()
with allure.step('Modify random Group'):
group_edit = random.choice(old_groups)
group.id = group_edit.id
app.group.edit_group_by_id(group,group_edit.id)
with allure.step('Group modified'):
new_groups = db.get_group_list()
old_groups.remove(group_edit)
old_groups.append(group)
assert sorted(new_groups, key=Group.id_or_max) == sorted(old_groups, key=Group.id_or_max)
if check_ui:
assert sorted(new_groups, key = Group.id_or_max) == sorted(app.group.get_groups_list(), key= Group.id_or_max) | StarcoderdataPython |
3373703 | <filename>setup.py
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='clippercard',
version='0.2.0',
author='<NAME>',
author_email='<EMAIL>',
packages=['clippercard'],
package_dir = {'clippercard':'clippercard'},
entry_points = {
'console_scripts': [
'clippercard = clippercard.main:main',
]
},
scripts=[],
url='https://github.com/anthonywu/clippercard',
license='LICENSE.txt',
description='Unofficial Python API for Clipper Card (transportation pass used in the San Francisco Bay Area)',
long_description=open('README.rst').read(),
install_requires=[
'BeautifulSoup4 >= 4.3.2',
'configparser == 3.3.0r2',
'docopt >= 0.6.1',
'requests >= 2.2.1'
],
)
| StarcoderdataPython |
8006407 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import string
import os
import json
from contextlib import contextmanager
try:
import serial
except ImportError:
raise CncLibraryException("pyserial needs to be installed!")
from .version import VERSION
__version__ = VERSION
class CncLibrary(object):
"""
Library for controlling a CNC-milling machine controlled by Arduino based motion controller using gcodes. E.g. ShapeOko 2.
Idea here is that you can modify/build such a machine to be used for testing devices that require physical contact e.g push button on a button pad device.
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
ROBOT_LIBRARY_VERSION = VERSION
def __init__(self, device='', baud=115200, xy_speed=10000, z_speed=800):
""" device must be given as the device that is connected e.g. /dev/tty.usbmodem1411 """
self.device = device
self.baud = baud
self._serial = None
self.xy_speed = xy_speed
self.z_speed = z_speed
self.timeout = 10
# if not device or not baud:
# raise CncLibraryException('Device and baud is not set')
self.locations = {}
@property
def serial(self):
""" open serial port connection to the connected device """
if self._serial:
return self._serial
else:
self._serial = serial.Serial(self.device, self.baud)
# self._serial.write("\r\n\r\n".encode())
time.sleep(2) # Wait for grbl to initialize
self._serial.flushInput()
return self._serial
def _send_gcode(self, gcode_line):
""" send gcode to serial device """
print( 'Sending: ' + gcode_line )
# print(type(gcode_line) )
self.serial.write((gcode_line + '\r\n').encode())
self.serial.flush()
grbl_out = self.serial.readline() # Wait for grbl response with carriage return
print( ' : ' + grbl_out.strip().decode() )
time.sleep(2) # discard following contents
self._serial.flushInput()
def _move(self, speed, xcord, ycord):
""" move the tip of the device in xy-plane and wait for it to reach destination """
gcode_line = "G01 " + speed + " X" + xcord + " Y" + ycord
with self._moving(xcord, ycord):
self._send_gcode(gcode_line)
def _current_position(self):
""" return current position from the device """
self.serial.write("?".encode())
grbl_out = self.serial.readline() # Wait for grbl response with carriage return
grbl_out = grbl_out.split("MPos:".encode(), 1)[1]
coordinates = grbl_out.split(','.encode(), 3)
x = round(float(coordinates[0]), 1)
y = round(float(coordinates[1]), 1)
z = round(float(coordinates[2]), 1)
return (x,y,z)
#### Keywords ####
def request_position(self):
""" Returns current position of the device as tuple """
position = self._current_position()
print( position )
return position
def close_connection(self):
""" close serial connection """
self.serial.close()
# Device location initializations
def initialize_device_locations(self, conf_file_path):
""" Initialize coordinate positions of the tested device from .json configuration file see examples """
with open(conf_file_path) as f:
self.locations = json.loads(f.read())
if "device_location" not in self.locations:
raise CncLibraryException("using 'device_location' is mandatory. This should be the highest point of your device.")
# Calibration and home position
def set_home(self):
""" Set home position that will be treated as 0,0,0 for device coordinates """
gcode = 'G92 X0 Y0 Z0'
self._send_gcode(gcode)
time.sleep(0.2)
print( self.locations['device_location']['x'] )
print( self.locations['device_location']['y'] )
print( self.locations['device_location']['z'] )
def raise_tool(self):
""" Raise the tooltip of the mill to its safe height defined as 'device_location' z-coordinate """
gcode = "G01 F500 Z"+str(self.locations['device_location']['z'])
print( gcode )
with self._pressing(self.locations['device_location']['z']):
self._send_gcode(gcode)
def go_to_home(self):
""" Go to home position 0,0,0 """
# ensure up position
self.raise_tool()
xcord = '0.0'
ycord = '0.0'
with self._moving(xcord, ycord):
self._send_gcode("G01 F"+str(self.xy_speed)+" X0.0 Y0.0")
def lower_tool(self):
""" Lower tool to 0 z-coordinate. Use this after you have moved to home position """
# todo: combine with go_to_home?
zcord_down = '0.0'
with self._pressing(zcord_down):
self._send_gcode("G01 F"+str(self.z_speed)+" Z0.0")
def press(self, *args):
"""
1. Raise the tool to safe height.
2. Move the tool in xy-coordinate of given position defined that match definition in config file, relative to 'device_location'.
3. Ensure that correct position is reached in 10 seconds.
4. Lower the tool to z-coordinate of the position and ensure it's reached in 10 seconds.
5. Raise the tool to safe height and ensure it's raised.
"""
for goal in args:
self.go_to(goal)
self.press_button(self.locations[goal]['z'])
# Moving to buttons/camera
def go_to(self, *args):
"""
1. Raise the tool to safe height and ensure it's raised.
2. Move the tool in xy-plane to position relative to 'device_location'
"""
for goal in args:
# ensure up position
self.raise_tool()
xcord = str(float(self.locations[goal]['x']) + float(self.locations['device_location']['x']))
ycord = str(float(self.locations[goal]['y']) + float(self.locations['device_location']['y']))
self._move("F"+str(self.xy_speed), xcord, ycord)
def direct_go_to(self, position):
"""
NOTE: USING THIS IS DANGEROUS AS THE TOOL MIGHT HIT ANYTHING THAT IS ON ITS WAY USING THIS.
Intention here is to use this moving in xy-plane when you need to move on flat surface e.g. touch screen.
Or to move the tool to known safe position e.g. for using a bundled camera.
Directly move to the position. Relative to the home position not 'device_location' like the normal go_to.
"""
x = self.locations[position]['x']
y = self.locations[position]['y']
z = self.locations[position]['z']
with self._moving(x, y, z):
self._send_gcode("G01 F"+str(self.z_speed)+" X"+str(x)+" Y"+str(y)+" Z"+str(z))
def press_button(self, down):
"""
Lower the device to z-coordinate given as argument and raise it back up.
"""
# press down
try:
with self._pressing(down):
self._send_gcode("G01 F"+str(self.z_speed)+" Z" + str(down))
except Exception as e:
raise e
# raise the tip back to safe position
finally:
with self._pressing(self.locations['device_location']['z']):
self._send_gcode("G01 F"+str(self.z_speed)+" Z" + str(self.locations['device_location']['z']))
def execute_gcode_file(self, filename):
"""
Excute gcodes directly from a file where each line is a gcode. Gcodes are sent directly to serial port without any assurance what they actually do.
"""
with open(filename) as f:
for line in f.readlines():
self._send_gcode(line)
def execute_gcode(self, command):
"""
Excute gcode directly. Gcodes are sent directly to serial port without any assurance what they actually do.
"""
self._send_gcode(command)
@contextmanager
def _moving(self, target_x, target_y, target_z=None):
"""
Context manager for moving the tool that ensures the tool moved to given position.
if no target_z argument is given we assume safe height e.g. 'device_location' z-coodinate.
"""
if not target_z:
target_z = self.locations['device_location']['z']
yield
# ensure tip is in correct x,y position
self._ensure_position(target_x, target_y, target_z)
@contextmanager
def _pressing(self, target):
""" Context manager for moving the tool that ensures the tool moved to given position """
yield
self._ensure_z_position(target)
def _ensure_position(self, target_x, target_y, target_z):
""" ensure that tool is in given coordinates """
init_time = time.time()
while self._current_position() != (float(target_x), float(target_y), float(target_z)):
print( self._current_position() )
print( target_x, target_y, target_z )
time.sleep(0.2)
elapsed = time.time() - init_time
if elapsed > self.timeout:
raise CncLibraryException('Exceeded 10s timeout.')
def _ensure_z_position(self, target):
""" ensure that tool is in given z-position """
init_time = time.time()
while (self._current_position()[2] != float(target)):
print( self._current_position()[2] )
print( target )
time.sleep(0.2)
elapsed = time.time() - init_time
if elapsed > self.timeout:
raise CncLibraryException('Exceeded 10s timeout.')
class CncLibraryException(Exception):
pass
| StarcoderdataPython |
188975 | # -*- coding: utf-8 -*-
"""lhs_opt.py: Module to generate design matrix from an optimized
Latin Hypercube design
"""
import numpy as np
from . import lhs
__author__ = "<NAME>"
def create_ese(n: int, d: int, seed: int, max_outer: int,
obj_function: str="w2_discrepancy",
threshold_init: float=0,
num_exchanges: int=0,
max_inner: int = 0,
improving_params: list = [0.1, 0.8],
exploring_params: list = [0.1, 0.8, 0.9, 0.7]) -> np.ndarray:
"""Generate an optimized LHS using Enhanced Stochastic Evolutionary Alg.
The default parameters of the optimization can be overridden, if necessary.
:param n: the number of samples
:param d: the number of dimension
:param seed: the random seed number
:param max_outer: the maximum number of outer iterations
:param obj_function: the objective function to optimize
:param threshold_init: the initial threshold
:param num_exchanges: the number of candidates in perturbation step
:param max_inner: the maximum number of inner iterations
:param improving_params: the 2 parameters used in improve process
(a) the cut-off value to decrease the threshold
(b) the multiplier to decrease or increase the threshold
:param exploring_params: the 4 parameters used in explore process
(a) the cut-off value of acceptance, start increasing the threshold
(b) the cut-off value of acceptance, start decreasing the threshold
(c) the cooling multiplier for the threshold
(d) the warming multiplier for the threshold
"""
from .opt_alg.stochastic_evolutionary import optimize
# If dimension is less than 2, abort optimization
if d < 2:
raise ValueError("Dimension less than 2, optimization irrelevant!")
if seed is not None:
np.random.seed(seed)
# Create initial LHD sample
dm = lhs.create(n, d, seed=seed)
# Optimize the LHD sample
dm_opt = optimize(dm, obj_function, threshold_init, num_exchanges,
max_inner, max_outer, improving_params, exploring_params)
return dm_opt.dm_best
| StarcoderdataPython |
5155243 | import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class ObserverFolder(FileSystemEventHandler):
def on_modified(self, event):
print('File Modified')
def on_created(self, event):
print('File Created')
def on_moved(self, event):
print('File Moved')
def on_closed(self, event):
print('File Closed')
def on_deleted(self, event):
print('File Deleted')
def on_any_event(self, event):
print('Event Occured')
downloads_folder = 'C:/Users/Gene/Downloads/'
destination_folder = './dest/'
event_handler = ObserverFolder()
observer = Observer()
observer.schedule(event_handler, downloads_folder, recursive=True)
observer.start()
try:
while True:
time.sleep(5)
except KeyboardInterrupt:
observer.stop()
observer.join() | StarcoderdataPython |
3299579 | <gh_stars>10-100
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Quarantine (isolate) endpoint an endpoint"
class Input:
AGENT = "agent"
QUARANTINE_STATE = "quarantine_state"
WHITELIST = "whitelist"
class Output:
SUCCESS = "success"
WHITELISTED = "whitelisted"
class QuarantineInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"agent": {
"type": "string",
"title": "Agent",
"description": "Agent to perform quarantine action on. This must be either a MAC address or hostname",
"order": 1
},
"quarantine_state": {
"type": "boolean",
"title": "Quarantine State",
"description": "True to quarantine host, false to unquarantine host",
"default": true,
"order": 3
},
"whitelist": {
"type": "array",
"title": "Whitelist",
"description": "MAC addresses for machines to avoid quarantining. Both hyphenated and colon-delimited formats are acceptable",
"items": {
"type": "string"
},
"order": 2
}
},
"required": [
"agent",
"quarantine_state"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class QuarantineOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"success": {
"type": "boolean",
"title": "Success",
"description": "Whether or not the quarantine/unquarantine was successful",
"order": 1
},
"whitelisted": {
"type": "boolean",
"title": "Whitelisted",
"description": "Whether or not the quarantine/unquarantine failed due to whitelisting",
"order": 2
}
},
"required": [
"success",
"whitelisted"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| StarcoderdataPython |
4932299 | import datetime
import os
import airflow
from data_pipelines.actions.aws import AppDataBaseToS3, S3ToDatawarehouse
from data_pipelines.airflow.operator import ActionOperator
table_list = ["customers", "transactions", "transaction_details"]
dag = airflow.DAG(
dag_id="pipeline.appdb_to_datawarehouse",
start_date=datetime.datetime(2021, 1, 1),
schedule_interval="@daily",
)
with dag:
for table in table_list:
extract_source_kwargs = {
"url": "http://host.docker.internal:5000",
"params": {"date": "{{ ds }}", "table_name": table},
}
extract_sink_kwargs = {
"aws_access_key_id": os.getenv("AWS_ACCESS_KEY"),
"aws_secret_key_id": os.getenv("AWS_SECRET_KEY"),
"s3_key": f"{{{{ ds }}}}/{table}/data.json",
"s3_bucket": "data-pipeline-datalake-henry",
}
extract_kwargs = {
"task_id": "extract_{}".format(table),
"source_kwargs": extract_source_kwargs,
"sink_kwargs": extract_sink_kwargs,
}
extract = ActionOperator(
action_class=AppDataBaseToS3, dag=dag, **extract_kwargs
)
load_source_kwargs = {
"aws_access_key_id": os.getenv("AWS_ACCESS_KEY"),
"aws_secret_key_id": os.getenv("AWS_SECRET_KEY"),
"s3_key": f"{{{{ ds }}}}/{table}/data.json",
"s3_bucket": "data-pipeline-datalake-henry",
}
load_sink_kwargs = {
"host": "host.docker.internal",
"port": 5438,
"username": "henry",
"password": "<PASSWORD>",
"database": "henry",
"table": table,
"schema": "henry",
}
load_kwargs = {
"task_id": "load_{}".format(table),
"source_kwargs": load_source_kwargs,
"sink_kwargs": load_sink_kwargs,
}
load = ActionOperator(
action_class=S3ToDatawarehouse, dag=dag, **load_kwargs
)
extract >> load
| StarcoderdataPython |
1672653 | <filename>secrets.template.py
##
## SP API Developer Settings
##
# This is the first part of the LWA credentials from the developer console
# and is specific to the application you set up. This looks something like
# "amzn1.application-oa2-client.<hex id>"
client_id = None
# This is the hidden part of the LWA credentials from the developer console
client_secret = None
# This is what you get after you click Authorize to initate a self authorization
# for this specific application in the specific marketplace.
refresh_token = None
##
## AWS Credentials
##
# If you aren't in a lambda you need to fill out the following 3 items
# You also don't need the first two if you have system wide credentials
# set up for AWS e.g. via `aws configure`
access_key = None
secret_key = None
registered_role_arn = None
| StarcoderdataPython |
11354510 | <filename>celltraj/__init__.py
"""Top-level package for CellTraj."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| StarcoderdataPython |
6425606 | <reponame>RobertPastor/flight-profile<filename>trajectory/management/commands/AirportsDatabaseLoad.py
from django.core.management.base import BaseCommand
from trajectory.management.commands.Airports.AirportDatabaseFile import AirportsDatabase
from trajectory.models import AirlineAirport
from airline.models import AirlineRoute
class Command(BaseCommand):
help = 'Reads the Synonym file and load the Aircrafts table'
def handle(self, *args, **options):
AirlineAirport.objects.all().delete()
''' load only airports defined in the airline routes '''
airlineRoutes = AirlineRoute()
airlineRoutesAirportsList = airlineRoutes.getAirportsList()
airportsBD = AirportsDatabase()
if (airportsBD.exists()):
print("airports database exists")
ret = airportsBD.read(airlineRoutesAirportsList)
print ("read airports database result = {0}".format(ret))
else:
print("airports database does not exists")
return | StarcoderdataPython |
4817242 | <filename>website/addons/figshare/tests/test_models.py
import mock
from nose.tools import * # noqa
from tests.base import OsfTestCase, get_default_metaschema
from tests.factories import ProjectFactory, AuthUserFactory
from framework.auth import Auth
from website.addons.figshare import settings as figshare_settings
class TestNodeSettings(OsfTestCase):
def setUp(self):
super(TestNodeSettings, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('figshare', auth=Auth(self.user))
self.project.creator.add_addon('figshare')
self.node_settings = self.project.get_addon('figshare')
self.user_settings = self.project.creator.get_addon('figshare')
self.user_settings.oauth_access_token = 'legittoken'
self.user_settings.oauth_access_token_secret = 'legittoken'
self.user_settings.save()
self.node_settings.user_settings = self.user_settings
self.node_settings.figshare_id = '123456'
self.node_settings.figshare_type = 'project'
self.node_settings.figshare_title = 'singlefile'
self.node_settings.save()
def test_complete_true(self):
assert_true(self.node_settings.has_auth)
assert_true(self.node_settings.complete)
def test_complete_false(self):
self.node_settings.figshare_id = None
assert_true(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
def test_complete_auth_false(self):
self.node_settings.user_settings = None
assert_false(self.node_settings.has_auth)
assert_false(self.node_settings.complete)
class TestCallbacks(OsfTestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.auth = self.user.auth
self.project = ProjectFactory(creator=self.user)
self.non_authenticator = AuthUserFactory()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=Auth(self.project.creator),
)
self.project.add_addon('figshare', auth=self.consolidated_auth)
self.project.creator.add_addon('figshare')
self.node_settings = self.project.get_addon('figshare')
self.user_settings = self.project.creator.get_addon('figshare')
self.user_settings.oauth_access_token = 'legittoken'
self.user_settings.oauth_access_token_secret = 'legittoken'
self.user_settings.save()
self.node_settings.user_settings = self.user_settings
self.node_settings.figshare_id = '123456'
self.node_settings.figshare_type = 'project'
self.node_settings.figshare_title = 'singlefile'
self.node_settings.save()
def test_update_fields_project(self):
num_logs = len(self.project.logs)
# try updating fields
newfields = {
'type': 'project',
'id': '313131',
'name': 'A PROJECT'
}
self.node_settings.update_fields(newfields, self.project, Auth(self.project.creator))
#check for updated
assert_equals(self.node_settings.figshare_id, '313131')
assert_equals(self.node_settings.figshare_type, 'project')
assert_equals(self.node_settings.figshare_title, 'A PROJECT')
# check for log added
assert_equals(len(self.project.logs), num_logs + 1)
def test_update_fields_fileset(self):
num_logs = len(self.project.logs)
# try updating fields
newfields = {
'type': 'fileset',
'id': '313131',
'name': 'A FILESET'
}
self.node_settings.update_fields(newfields, self.project, Auth(self.project.creator))
#check for updated
assert_equals(self.node_settings.figshare_id, '313131')
assert_equals(self.node_settings.figshare_type, 'fileset')
assert_equals(self.node_settings.figshare_title, 'A FILESET')
# check for log added
assert_equals(len(self.project.logs), num_logs + 1)
def test_update_fields_some_missing(self):
num_logs = len(self.project.logs)
# try updating fields
newfields = {
'type': 'project',
'id': '313131',
'name': 'A PROJECT'
}
self.node_settings.update_fields(newfields, self.project, Auth(self.project.creator))
#check for updated
assert_equals(self.node_settings.figshare_id, '313131')
assert_equals(self.node_settings.figshare_title, 'A PROJECT')
# check for log added
assert_equals(len(self.project.logs), num_logs + 1)
def test_update_fields_invalid(self):
num_logs = len(self.project.logs)
# try updating fields
newfields = {
'adad': 131313,
'i1513': '313131',
'titladad': 'A PROJECT'
}
self.node_settings.update_fields(newfields, self.project, Auth(self.project.creator))
#check for updated
assert_equals(self.node_settings.figshare_id, '123456')
assert_equals(self.node_settings.figshare_type, 'project')
assert_equals(self.node_settings.figshare_title, 'singlefile')
# check for log added
assert_equals(len(self.project.logs), num_logs)
def test_api_url_no_user(self):
self.node_settings.user_settings = None
self.node_settings.save()
assert_equal(self.node_settings.api_url, figshare_settings.API_URL)
def test_api_url(self):
assert_equal(self.node_settings.api_url, figshare_settings.API_OAUTH_URL)
def test_before_register_linked_content(self):
assert_false(
self.node_settings.before_register(
self.project,
self.project.creator
) is None
)
def test_before_register_no_linked_content(self):
self.node_settings.figshare_id = None
assert_true(
self.node_settings.before_register(
self.project,
self.project.creator
) is None
)
def test_before_remove_contributor_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.project.creator
)
assert_true(message)
def test_before_remove_contributor_not_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.non_authenticator
)
assert_false(message)
def test_after_remove_contributor_authenticator_not_self(self):
auth = Auth(user=self.non_authenticator)
msg = self.node_settings.after_remove_contributor(
self.project, self.project.creator, auth
)
assert_in(
self.project.project_or_component,
msg
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_in("You can re-authenticate", msg)
def test_after_remove_contributor_authenticator_self(self):
msg = self.node_settings.after_remove_contributor(
self.project, self.project.creator, self.consolidated_auth
)
assert_in(
self.project.title,
msg
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_not_in("You can re-authenticate", msg)
def test_after_fork_authenticator(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
self.project, fork, self.project.creator,
)
assert_equal(
self.node_settings.user_settings,
clone.user_settings,
)
def test_after_fork_not_authenticator(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
self.project, fork, self.non_authenticator,
)
assert_equal(
clone.user_settings,
None,
)
def test_after_delete(self):
self.project.remove_node(Auth(user=self.project.creator))
# Ensure that changes to node settings have been saved
self.node_settings.reload()
assert_true(self.node_settings.user_settings is None)
assert_true(self.node_settings.figshare_id is None)
assert_true(self.node_settings.figshare_type is None)
assert_true(self.node_settings.figshare_title is None)
@mock.patch('website.archiver.tasks.archive')
@mock.patch('website.addons.figshare.model.AddonFigShareNodeSettings.archive_errors')
def test_does_not_get_copied_to_registrations(self, mock_errors, mock_archive):
registration = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(user=self.project.creator),
data='hodor'
)
assert_false(registration.has_addon('figshare'))
#TODO Test figshare options and figshare to_json
| StarcoderdataPython |
9687447 | test = { 'name': 'q1c',
'points': 1,
'suites': [ { 'cases': [ {'code': '>>> varied_menu_only.num_rows == 22\nTrue', 'hidden': False, 'locked': False},
{ 'code': '>>> np.all(varied_menu_only.column(\'Restaurant\').take(np.arange(5)) == np.array(["O\'Charley\'s", "Cooper\'s Hawk Winery & Restaurants",\n'
"... 'Ninety Nine Restaurants', 'Bar Louie', 'Seasons 52']))\n"
'True',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| StarcoderdataPython |
1763551 | <filename>twitter_app/twitter_bot/views.py
from django.shortcuts import render
import tweepy, requests
import sys, requests, json, time, os
from django.contrib.messages.views import messages
from .forms import InputForm
from django.conf import settings
CONSUMER_KEY = settings.CONSUMER_KEY
CONSUMER_SECRET = settings.CONSUMER_SECRET
ACCESS_TOKEN = settings.ACCESS_TOKEN
ACCESS_TOKEN_SECRET = settings.ACCESS_TOKEN_SECRET
auth = tweepy.OAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
def unfollowers(request):
form = InputForm()
if request.method == "POST":
try:
form = InputForm(request.POST)
if form.is_valid():
username = form.cleaned_data['user_input']
starttime = time.time()
user = api.get_user(username)
user_id = user._json['id']
followed_users_ids = api.friends_ids(user_id)
followers_ids = api.followers_ids(user_id)
difference_list = diff(followed_users_ids, followers_ids)
counter = 0
counter_2 = 0
a = []
for i in range(len(difference_list)//100+1):
counter = i*100
counter_2 += 100
a.append(api.lookup_users(difference_list[counter:counter_2]))
nons_list = []
for i in a:
for j in i:
nons_list.append(j._json['id'])
unfollowers_ids_list =list(set(nons_list) - set(followers_ids))
counter_3 = 0
counter_4 = 0
b=[]
for i in range(len(unfollowers_ids_list)//100+1):
counter_3 = i*100
counter_4 += 100
b.append(api.lookup_users(unfollowers_ids_list[counter_3:counter_4]))
unfollowers_list = []
times = time.time()-starttime
for i in b:
for j in i:
unfollowers_list.append(j._json['screen_name'])
return render(request, 'twitter_bot/nonfollowers.html', {'form':form, 'unfollowers_list':unfollowers_list, 'times':times})
except tweepy.error.TweepError:
messages.error(request,'Bu kullanıcı adına sahip birisi yok')
return render(request,'twitter_bot/nonfollowers.html', {'form':form})
return render(request,'twitter_bot/nonfollowers.html',{'form':form})
def diff(li1, li2):
return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))
| StarcoderdataPython |
11234704 | import json
from pathlib import Path
import requests
import sqlite3
import sys
import xml.etree.ElementTree as ET
import zipfile
from tqdm import tqdm
# Download bill XML files
congresses = ["115", "116"]
bill_types = ["hr", "s", "hjres", "sjres"]
base_url = "https://www.govinfo.gov/bulkdata/BILLSTATUS/"
bill_dir = Path("Bills")
bill_dir.mkdir()
for congress in tqdm(congresses):
congress_dir = bill_dir / Path(congress)
congress_dir.mkdir()
#print(f"Downloading files for Congress {congress}")
for bill_type in bill_types:
zip_url = f"{base_url}{congress}/{bill_type}/BILLSTATUS-{congress}-{bill_type}.zip"
zip_path = congress_dir / Path(f"BILLSTATUS-{congress}-{bill_type}.zip")
r = requests.get(zip_url)
with open(zip_path, 'wb') as f:
f.write(r.content)
#print(f"Downloaded {zip_path.name}")
extract_dir = congress_dir / Path(f"{congress}-{bill_type}")
with zipfile.ZipFile(zip_path, 'r') as bills_zip:
bills_zip.extractall(extract_dir)
#print(f"Extracted to {str(extract_dir)}")
# Parse XML and create SQLite tables
con = sqlite3.connect('congress_bills.db')
cur = con.cursor()
print('Connected to DB')
# Create tables
cur.execute('''CREATE TABLE IF NOT EXISTS Congress (
congress TEXT NOT NULL,
startDate TEXT NOT NULL,
endDate TEXT NOT NULL,
PRIMARY KEY (congress))''')
cur.execute('''CREATE TABLE IF NOT EXISTS Member (
member_id TEXT NOT NULL,
firstName TEXT NOT NULL,
middleName TEXT,
lastName TEXT NOT NULL,
birthday TEXT NOT NULL,
gender TEXT NOT NULL,
PRIMARY KEY (member_id))''')
cur.execute('''CREATE TABLE IF NOT EXISTS Role (
member_id TEXT NOT NULL,
congress TEXT NOT NULL,
chamber TEXT NOT NULL,
party TEXT NOT NULL,
state TEXT NOT NULL,
district TEXT,
PRIMARY KEY (member_id, congress),
FOREIGN KEY (member_id)
REFERENCES Member (member_id)
ON DELETE CASCADE
ON UPDATE NO ACTION,
FOREIGN KEY (congress)
REFERENCES Congress (congress)
ON DELETE CASCADE
ON UPDATE NO ACTION)''')
cur.execute('''CREATE TABLE IF NOT EXISTS Area (
area TEXT NOT NULL,
PRIMARY KEY (area))''')
cur.execute('''CREATE TABLE IF NOT EXISTS Bill (
bill_num TEXT NOT NULL,
congress TEXT NOT NULL,
title TEXT NOT NULL,
date_intro TEXT NOT NULL,
area TEXT,
enacted TEXT NOT NULL,
vetoed TEXT NOT NULL,
PRIMARY KEY (bill_num, congress),
FOREIGN KEY (congress)
REFERENCES Congress (congress)
ON DELETE CASCADE
ON UPDATE NO ACTION,
FOREIGN KEY (area)
REFERENCES Area (area)
ON DELETE CASCADE
ON UPDATE NO ACTION)''')
cur.execute('''CREATE TABLE IF NOT EXISTS Subject (
subject TEXT NOT NULL,
PRIMARY KEY (subject))''')
cur.execute('''CREATE TABLE IF NOT EXISTS Bill_Subject (
bill_num TEXT NOT NULL,
congress TEXT NOT NULL,
subject TEXT NOT NULL,
PRIMARY KEY (subject, bill_num, congress),
FOREIGN KEY (bill_num, congress)
REFERENCES Bill (bill_num, congress)
ON DELETE CASCADE
ON UPDATE NO ACTION,
FOREIGN KEY (subject)
REFERENCES Subject (subject)
ON DELETE CASCADE
ON UPDATE NO ACTION)''')
cur.execute('''CREATE TABLE IF NOT EXISTS Sponsor (
member_id TEXT NOT NULL,
bill_num TEXT NOT NULL,
congress TEXT NOT NULL,
PRIMARY KEY (member_id, bill_num, congress),
FOREIGN KEY (member_id)
REFERENCES Member (member_id)
ON DELETE CASCADE
ON UPDATE NO ACTION,
FOREIGN KEY (bill_num, congress)
REFERENCES Bill (bill_num, congress)
ON DELETE CASCADE
ON UPDATE NO ACTION)''')
cur.execute('''CREATE TABLE IF NOT EXISTS Cosponsor (
member_id TEXT NOT NULL,
bill_num TEXT NOT NULL,
congress TEXT NOT NULL,
PRIMARY KEY (member_id, bill_num, congress),
FOREIGN KEY (member_id)
REFERENCES Member (member_id)
ON DELETE CASCADE
ON UPDATE NO ACTION,
FOREIGN KEY (bill_num, congress)
REFERENCES Bill (bill_num, congress)
ON DELETE CASCADE
ON UPDATE NO ACTION)''')
print('Tables created')
con.commit()
# Enter data into Congress table
congress_file = open('congress_terms.txt')
terms_dates = congress_file.readlines()
terms_dates = [tuple(i.strip().split(',')) for i in terms_dates]
cur.executemany('INSERT INTO Congress VALUES (?, ?, ?)', terms_dates)
congress_file.close()
print('Congress table filled')
# Enter data into Area table
area_file = open('policy_areas.txt')
area_data = [(x.strip(),) for x in area_file.readlines()]
cur.executemany('INSERT INTO Area VALUES (?)', area_data)
area_file.close()
print('Area table filled')
# Enter data into Subject table
subject_file = open('legislative_subjects.txt')
subject_data = [(x.strip(),) for x in subject_file.readlines()]
cur.executemany('INSERT INTO Subject VALUES (?)', subject_data)
subject_file.close()
print('Subject table filled')
# Enter data into Member table and Role table
for congress in congresses:
for chamber in ['house', 'senate']:
json_file = open(f'./congress_members/{congress}-{chamber}.json', 'r')
json_data = json.load(json_file)
members = json_data['results'][0]['members']
for member in members:
member_data = (member['id'], member['first_name'], member['middle_name'], member['last_name'], member['date_of_birth'], member['gender'])
cur.execute('INSERT OR REPLACE INTO Member VALUES (?,?,?,?,?,?)', member_data)
role_data = (member['id'], congress, chamber, member['party'], member['state'], (member['district'] if chamber == 'house' else None))
cur.execute('INSERT OR REPLACE INTO Role VALUES (?,?,?,?,?,?)', role_data)
print('Member and Role tables filled')
con.commit()
# Enter Bill data
bill_xmls = bill_dir.glob('**/*.xml')
for bill_xml in tqdm(bill_xmls, colour='blue', desc='Bill processing'):
try:
xml_root = ET.parse(bill_xml).getroot()
bill_elem = xml_root.find('bill')
bill_num = bill_elem.findtext('billType').lower() + bill_elem.findtext('billNumber')
bill_congress = bill_elem.findtext('congress')
bill_title = bill_elem.findtext('title')
bill_date = bill_elem.findtext('introducedDate')
bill_area = bill_elem.find('policyArea').findtext('name')
bill_actions = bill_elem.find('actions')
action_codes = [e.findtext('actionCode') for e in bill_actions]
bill_enacted = 'Yes' if ('36000' in action_codes or '41000' in action_codes) else 'No'
bill_vetoed = 'Yes' if ('31000' in action_codes) else 'No'
bill_data = (bill_num, bill_congress, bill_title, bill_date, bill_area, bill_enacted, bill_vetoed)
cur.execute('INSERT INTO Bill VALUES (?,?,?,?,?,?,?)', bill_data)
bill_subjects = [e.findtext('name') for e in bill_elem.find('subjects').find('billSubjects').find('legislativeSubjects').findall('item')]
for subject in bill_subjects:
cur.execute('INSERT INTO Bill_Subject VALUES (?,?,?)', (bill_num, bill_congress, subject))
bill_sponsor = bill_elem.find('sponsors').find('item')
if bill_sponsor:
bill_sponsor = bill_sponsor.findtext('bioguideId')
cur.execute('INSERT INTO Sponsor VALUES (?,?,?)', (bill_sponsor, bill_num, bill_congress))
cosponsor_elems = bill_elem.find('cosponsors').findall('item')
bill_cosponsors = [e.findtext('bioguideId') for e in cosponsor_elems if not e.findtext('sponsorshipWithdrawnDate')]
for cosponsor in bill_cosponsors:
cur.execute('INSERT INTO Cosponsor VALUES (?,?,?)', (cosponsor, bill_num, bill_congress))
except:
print(f'Error parsing {bill_xml.name}')
print(sys.exc_info()[0])
print()
continue
print('Bill, Bill_Subject, Sponsor, and Cosponsor tables filled')
con.commit()
con.close()
| StarcoderdataPython |
3293325 | import os
import importlib.util
from pykeops.common.gpu_utils import get_gpu_number
###############################################################
# Initialize some variables: the values may be redefined later
##########################################################
# Update config module: Search for GPU
gpu_available = get_gpu_number() > 0
numpy_found = importlib.util.find_spec("numpy") is not None
torch_found = importlib.util.find_spec("torch") is not None
###############################################################
# Compilation options
script_folder = os.path.dirname(os.path.abspath(__file__))
bin_folder = (
"" # init bin_folder... shlould be populated with the set_bin_folder() function
)
# Set the verbosity option: display output of compilations. This is a boolean: False or True
verbose = (
bool(int(os.environ["PYKEOPS_VERBOSE"]))
if "PYKEOPS_VERBOSE" in os.environ
else False
)
# Force compiled and set the cmake build type. This is a string with possible value "Release" or "Debug"
build_type = (
str(os.environ["PYKEOPS_BUILD_TYPE"])
if ("PYKEOPS_BUILD_TYPE" in os.environ)
else "Release"
)
| StarcoderdataPython |
11287766 | from __future__ import unicode_literals
from mkdocs_combine.mkdocs_combiner import MkDocsCombiner
| StarcoderdataPython |
5004951 | #%%
import numpy as np
import pandas as pd
import altair as alt
import anthro.io
# Load the Ozone hole data.
full_data = pd.read_csv('../processed/NASA_ozone_hole_evolution_SH_spring.csv')
data = full_data[full_data['Variable'] == 'Ozone hole area'].copy()
proc_data = pd.DataFrame()
proc_data['year'] = pd.to_datetime(data[data['Reported value']=='Mean']['Year'], format='%Y')
proc_data['mean'] = data[data['Reported value']=='Mean']['Value']
proc_data['minimum'] = (data[data['Reported value']=='Minimum']['Value']).to_numpy()
proc_data['maximum'] = (data[data['Reported value']=='Maximum']['Value']).to_numpy()
#%%
# Generate a plot for ozone hole area
chart = alt.Chart(proc_data).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y=alt.Y(field='mean', type='quantitative', title='Ozone hole area, Southern Hemisphere spring [Millions of km2]'),
tooltip=[alt.Tooltip(field='year', type='temporal', title='year', format='%Y'),
alt.Tooltip(field='mean', type='quantitative', title='ozone hole area', format='0.2f')]
).properties(width='container', height=300)
# Add uncertainty bands
bands = chart.mark_area(color='dodgerblue', fillOpacity=0.4).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y='minimum:Q',
y2='maximum:Q'
).properties(width='container', height=300)
l = chart.mark_line(color='dodgerblue')
p = chart.mark_point(color='dodgerblue', filled=True)
layer = alt.layer(bands, l, p)
layer.save('ozone_hole_area.json')
# %%
data = full_data[full_data['Variable'] == 'Ozone minimum'].copy()
proc_data = pd.DataFrame()
proc_data['year'] = pd.to_datetime(data[data['Reported value']=='Mean']['Year'], format='%Y')
proc_data['mean'] = data[data['Reported value']=='Mean']['Value']
proc_data['minimum'] = (data[data['Reported value']=='Minimum']['Value']).to_numpy()
proc_data['maximum'] = (data[data['Reported value']=='Maximum']['Value']).to_numpy()
#%%
# Generate a plot for ozone minimum of ozone hole
chart = alt.Chart(proc_data).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y=alt.Y(field='mean', type='quantitative', title='Ozone minimum, Southern Hemisphere spring [DU]',
scale=alt.Scale(domain=[60, 250])),
tooltip=[alt.Tooltip(field='year', type='temporal', title='year', format='%Y'),
alt.Tooltip(field='mean', type='quantitative', title='ozone minimum', format='0.2f')]
).properties(width='container', height=300)
# Add uncertainty bands
bands = chart.mark_area(color='dodgerblue', fillOpacity=0.4).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y=alt.Y('minimum:Q', scale=alt.Scale(zero=False)),
y2='maximum:Q'
).properties(width='container', height=300)
l = chart.mark_line(color='dodgerblue')
p = chart.mark_point(color='dodgerblue', filled=True)
layer = alt.layer(bands, l, p)
layer.save('ozone_hole_minimum.json')
| StarcoderdataPython |
3289974 | <reponame>Jumpscale/ays_jumpscale8<filename>templates/fs/fs.btrfs/actions.py
def init_actions_(service, args):
"""
this needs to returns an array of actions representing the depencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
# some default logic for simple actions
return {
'autoscale': ['install']
}
def install(job):
service = job.service
# List available devices
code, out, err = service.executor.cuisine.core.run('lsblk -J -o NAME,FSTYPE,MOUNTPOINT')
if code != 0:
raise RuntimeError('failed to list bulk devices: %s' % err)
disks = j.data.serializer.json.loads(out)
btrfs_devices = []
for device in disks['blockdevices']:
if not device['name'].startswith('vd') or device['name'] == 'vda':
continue
btrfs_devices.append(device)
btrfs_devices.sort(key=lambda e: e['name'])
if len(btrfs_devices) == 0:
raise RuntimeError('no data disks on machine')
master = btrfs_devices[0]
if master['fstype'] != 'btrfs':
# creating the filesystem on all of the devices.
cmd = 'mkfs.btrfs -f %s' % ' '.join(map(lambda e: '/dev/%s' % e['name'], btrfs_devices))
code, out, err = service.executor.cuisine.core.run(cmd)
if code != 0:
raise RuntimeError('failed to create filesystem: %s' % err)
if master['mountpoint'] is None:
service.executor.cuisine.core.dir_ensure(service.model.data.mount)
cmd = 'mount /dev/%s %s' % (master['name'], service.model.data.mount)
code, out, err = service.executor.cuisine.core.run(cmd)
if code != 0:
raise RuntimeError('failed to mount device: %s' % err)
# Last thing is to check that all devices are part of the filesystem
# in case we support hot plugging of disks in the future.
code, out, err = service.executor.cuisine.core.run('btrfs filesystem show /dev/%s' % master['name'])
if code != 0:
raise RuntimeError('failed to inspect filesystem on device: %s' % err)
# parse output.
import re
fs_devices = re.findall('devid\s+.+\s/dev/(.+)$', out, re.MULTILINE)
for device in btrfs_devices:
if device['name'] not in fs_devices:
# add device to filesystem
cmd = 'btrfs device add -f /dev/%s %s' % (device['name'], service.model.data.mount)
code, _, err = service.executor.cuisine.core.run(cmd)
if code != 0:
raise RuntimeError('failed to add device %s to fs: %s' % (
device['name'],
err)
)
def autoscale(job):
service = job.service
repo = service.aysrepo
exc = service.executor
cuisine = exc.cuisine
code, out, err = cuisine.core.run('btrfs filesystem usage -b {}'.format(service.model.data.mount), die=False)
if code != 0:
raise RuntimeError('failed to get device usage: %s', err)
# get free space.
import re
match = re.search('Free[^:]*:\s+(\d+)', out)
if match is None:
raise RuntimeError('failed to get free space')
free = int(match.group(1)) / (1024 * 1024) # MB.
node = None
for parent in service.parents:
if parent.model.role == 'node':
node = parent
break
if node is None:
raise RuntimeError('failed to find the parent node')
# DEBUG, set free to 0
current_disks = list(node.model.data.disk)
if free < service.model.data.threshold:
# add new disk to the array.
args = {
'size': service.model.data.incrementSize,
'prefix': 'autoscale',
}
adddiskjob = node.getJob('add_disk')
adddiskjob.model.args = args
adddiskjob.executeInProcess()
node = repo.serviceGet(node.model.role, node.name)
new_disks = list(node.model.data.disk)
added = set(new_disks).difference(current_disks)
# if len(added) != 1:
# raise RuntimeError('failed to find the new added disk (disks found %d)', len(added))
#TODO: add device to volume
# get the disk object.
if added:
disk_name = added.pop()
disk = None
os_svc = service.producers['os'][0]
nod = os_svc.producers['node'][0]
for dsk in nod.producers.get('disk', []):
if dsk.model.dbobj.name == disk_name:
disk = dsk
break
if disk is None:
raise RuntimeError('failed to find disk service instance')
rc, out, err = cuisine.core.run("btrfs device add /dev/{devicename} {mountpoint}".format(devicename=disk.model.data.devicename, mountpoint=service.model.data.mount))
if rc != 0:
raise RuntimeError("Couldn't add device to /data")
| StarcoderdataPython |
3333193 | from core.db import db
class Register(db.Model):
__tablename__ = 'register'
id = db.Column(db.INTEGER, autoincrement=True, primary_key=True)
register_type_id = db.Column(db.INTEGER)
name = db.Column(db.VARCHAR(128))
gender = db.Column(db.BOOLEAN)
student_number = db.Column(db.VARCHAR(32))
telephone = db.Column(db.VARCHAR(32))
qq = db.Column(db.VARCHAR(32))
create_time = db.Column(db.TIMESTAMP)
ip = db.Column(db.VARCHAR(32))
class RegisterType(db.Model):
__tablename__ = 'register_type'
id = db.Column(db.INTEGER, autoincrement=True, primary_key=True)
name = db.Column(db.VARCHAR(128))
create_time = db.Column(db.TIMESTAMP)
last_update_time = db.Column(db.TIMESTAMP)
create_user = db.Column(db.INTEGER)
running_flag = db.Column(db.BOOLEAN, nullable=False, default=False)
visible = db.Column(db.BOOLEAN, nullable=False, default=True)
| StarcoderdataPython |
6470476 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='aiy-projects-python',
version='1.4',
description='AIY Python API',
long_description='A set of Python APIs designed for the AIY Voice Kit and AIY Vision Kit, which help you build intelligent systems that can understand what they hear and see.',
author='<NAME>',
author_email='<EMAIL>',
url="https://aiyprojects.withgoogle.com/",
project_urls={
'GitHub: issues': 'https://github.com/google/aiyprojects-raspbian/issues',
'GitHub: repo': 'https://github.com/google/aiyprojects-raspbian',
},
license='Apache 2',
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=[
'google-assistant-library>=1.0.1',
'google-assistant-grpc>=0.2.0',
'google-auth>=1.5.1',
'google-auth-oauthlib>=0.2.0',
'google-cloud-speech>=0.36.0',
'gpiozero',
'protobuf>=3.6.1',
'picamera',
'Pillow',
'RPi.GPIO',
],
python_requires='>=3.5.3',
)
| StarcoderdataPython |
145234 | # ============================================================================
#
# Copyright (C) 2007-2016 Conceptive Engineering bvba.
# www.conceptive.be / <EMAIL>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Conceptive Engineering nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ============================================================================
"""Default field attributes for various sqlalchemy column types"""
import itertools
import six
import sqlalchemy.types
import camelot.types
from camelot.core.sql import like_op
from sqlalchemy.sql.operators import between_op
import datetime
import operator
from .controls import delegates
from camelot.core import constants
from camelot.view.utils import (
bool_from_string,
date_from_string,
time_from_string,
datetime_from_string,
int_from_string,
float_from_string,
string_from_string,
enumeration_to_string,
default_language,
code_from_string,
)
_numerical_operators = (operator.eq, operator.ne, operator.lt, operator.le, operator.gt, operator.ge, between_op)
_text_operators = (operator.eq, operator.ne, like_op)
#
# operators assuming an order in the values they operate on. these operators don't
# work on None values
#
order_operators = (operator.lt, operator.le, operator.gt, operator.ge, between_op, like_op)
_sqlalchemy_to_python_type_ = {
sqlalchemy.types.Boolean: lambda f: {
'python_type': bool,
'editable': True,
'nullable': True,
'delegate': delegates.BoolDelegate,
'from_string': bool_from_string,
'operators' : (operator.eq,),
},
sqlalchemy.types.Date: lambda f: {
'python_type': datetime.date,
'format': constants.camelot_date_format,
'editable': True,
'min': None,
'max': None,
'nullable': True,
'delegate': delegates.DateDelegate,
'from_string': date_from_string,
'operators' : _numerical_operators,
},
sqlalchemy.types.Time : lambda f: {
'python_type': datetime.time,
'editable': True,
'nullable': True,
'widget': 'time',
'delegate': delegates.TimeDelegate,
'format': constants.camelot_time_format,
'nullable': True,
'from_string': time_from_string,
'operators': _numerical_operators,
},
sqlalchemy.types.DateTime : lambda f: {
'python_type': datetime.datetime,
'editable': True,
'nullable': True,
'widget': 'time',
'format': constants.camelot_datetime_format,
'nullable': True,
'delegate': delegates.DateTimeDelegate,
'from_string': datetime_from_string,
'operators': _numerical_operators,
},
sqlalchemy.types.Float: lambda f: {
'python_type': float,
'precision': (f.precision if not isinstance(f.precision, tuple) else f.precision[1]) or 2,
'editable': True,
'minimum': constants.camelot_minfloat,
'maximum': constants.camelot_maxfloat,
'nullable': True,
'delegate': delegates.FloatDelegate,
'from_string': float_from_string,
'operators': _numerical_operators,
},
sqlalchemy.types.Numeric: lambda f: {
'python_type': float,
'precision': f.scale,
'editable': True,
'minimum': constants.camelot_minfloat,
'maximum': constants.camelot_maxfloat,
'nullable': True,
'delegate': delegates.FloatDelegate,
'from_string': float_from_string,
'operators': _numerical_operators,
'decimal':True,
},
sqlalchemy.types.Integer: lambda f: {
'python_type': int,
'editable': True,
'minimum': constants.camelot_minint,
'maximum': constants.camelot_maxint,
'nullable': True,
'delegate': delegates.IntegerDelegate,
'from_string': int_from_string,
'to_string': six.text_type,
'widget': 'int',
'operators': _numerical_operators,
},
sqlalchemy.types.String: lambda f: {
'python_type': str,
'length': f.length,
'delegate': delegates.PlainTextDelegate,
'editable': True,
'nullable': True,
'widget': 'str',
'from_string': string_from_string,
'operators' : _text_operators,
},
camelot.types.Image: lambda f: {
'python_type': str,
'editable': True,
'nullable': True,
'delegate': delegates.ImageDelegate,
'storage': f.storage,
'preview_width': 100,
'preview_height': 100,
'operators' : _text_operators,
},
camelot.types.Code: lambda f: {
'python_type': str,
'editable': True,
'delegate': delegates.CodeDelegate,
'nullable': True,
'parts': f.parts,
'separator': f.separator,
'operators' : _text_operators,
'from_string' : lambda s:code_from_string(s, f.separator),
},
camelot.types.IPAddress: lambda f: {
'python_type': str,
'editable': True,
'nullable': True,
'parts': f.parts,
'delegate': delegates.CodeDelegate,
'widget': 'code',
'operators' : _text_operators,
},
camelot.types.VirtualAddress: lambda f: {
'python_type': str,
'editable': True,
'nullable': True,
'delegate': delegates.VirtualAddressDelegate,
'operators' : _text_operators,
'from_string' : lambda str:None,
},
camelot.types.RichText: lambda f: {
'python_type': str,
'editable': True,
'nullable': True,
'delegate': delegates.RichTextDelegate,
'from_string': string_from_string,
'operators' : [],
},
camelot.types.Color: lambda f: {
'delegate': delegates.ColorDelegate,
'python_type': str,
'editable': True,
'nullable': True,
'widget': 'color',
'operators' : _text_operators,
},
camelot.types.Rating: lambda f: {
'delegate': delegates.StarDelegate,
'editable': True,
'nullable': True,
'python_type': int,
'widget': 'star',
'from_string': int_from_string,
'operators' : _numerical_operators,
},
camelot.types.Enumeration: lambda f: {
'delegate': delegates.ComboBoxDelegate,
'python_type': str,
'choices': [(v, enumeration_to_string(v)) for v in f.choices],
'from_string': lambda s:dict((enumeration_to_string(v), v) for v in f.choices)[s],
'minimal_column_width':max(itertools.chain((0,), (len(enumeration_to_string(v)) for v in f.choices))),
'editable': True,
'nullable': True,
'widget': 'combobox',
'operators' : _numerical_operators,
},
camelot.types.Language: lambda f: {
'delegate': delegates.LanguageDelegate,
'python_type': str,
'default': default_language,
'from_string': string_from_string,
'editable': True,
'nullable': False,
'widget': 'combobox',
},
camelot.types.File : lambda f: {
'python_type': str,
'editable': True,
'delegate': delegates.FileDelegate,
'storage': f.storage,
'operators' : _text_operators,
'remove_original': False,
},
}
#
# Generate a restructured text table out of the previous data structure
#
class DummyField(object):
def __init__(self):
self.length = 20
self.parts = ['AAA', '99']
self.choices = ['planned', 'canceled']
self.precision = 2
self.scale = 2
self.storage = None
self.separator = u'.'
row_separator = '+' + '-'*50 + '+' + '-'*100 + '+' + '-'*70 + '+'
row_format = """| %-48s | %-98s | %-68s |"""
doc = """Field types handled through introspection :
""" + row_separator + """
""" + row_format%('**Field type**', '**Default delegate**', '**Default editor**') + """
""" + row_separator + """
"""
field_types = sorted( six.iterkeys(_sqlalchemy_to_python_type_),
key = lambda ft:ft.__name__ )
for field_type in field_types:
field_attributes = _sqlalchemy_to_python_type_[field_type](DummyField())
delegate = field_attributes['delegate']
row = row_format%( ':class:`' + field_type.__module__ + '.' + field_type.__name__ + '`',
':class:`' + delegate.__module__ + '.' + delegate.__name__ + '`',
'.. image:: /_static/editors/%s_editable.png'%(delegate.editor.__name__))
doc += row + """
""" + row_separator + """
"""
doc += """
"""
__doc__ = doc
| StarcoderdataPython |
9743690 | import numpy as np
import torch
from pytorchrl.core.parameterized import Parameterized
from pytorchrl.misc.utils import gauss_log_pdf, categorical_log_pdf
DIST_GAUSSIAN = 'gaussian'
DIST_CATEGORICAL = 'categorical'
class ImitationLearning(object):
def __init__(self):
pass
@staticmethod
def compute_path_probs(paths, policy_dist_type=None, insert=True, insert_key='a_logprobs'):
"""
Returns a N * T matrix of action probabilities, where N is number
of trajectories, and T is the length of each trajectories.
Parameters
----------
paths (list): Each element is a dict. Each dict represent a whole
trajectory, contains observations, actions, rewards, env_infos,
agent_infos. observations and actions is of size T * dim, where T
is the length of the trajectory, and dim is the dimension of observation
or action. rewards is a vector of length T. agent_infos contains other
information about the policy. For example, when we have a Gaussian policy,
it may contain the means and log_stds of the Gaussian policy at each step.
policy_dist_type (string): The distribution type
insert (boolean): Whether to insert the action probabilities back into
the paths
insert_key (string): The key to be used when inserting back
Returns
-------
action_probs (numpy.ndarray): The N * T numpy matrix, each element is
the probability of the action at T-th timestep of N-th trajectory.
"""
if policy_dist_type is None:
# figure out the distribution type
path0 = paths[0]
if 'log_std' in path0['agent_infos']:
pol_dist_type = DIST_GAUSSIAN
elif 'prob' in path0['agent_infos']:
pol_dist_type = DIST_CATEGORICAL
else:
raise NotImplementedError()
# compute path probs
num_path = len(paths)
actions = [path['actions'] for path in paths]
if pol_dist_type == DIST_GAUSSIAN:
params = [(path['agent_infos']['mean'], path['agent_infos']['log_std']) for path in paths]
path_probs = [gauss_log_pdf(params[i], actions[i]) for i in range(num_path)]
elif pol_dist_type == DIST_CATEGORICAL:
params = [(path['agent_infos']['prob'],) for path in paths]
path_probs = [categorical_log_pdf(params[i], actions[i]) for i in range(num_path)]
else:
raise NotImplementedError("Unknown distribution type")
if insert:
for i, path in enumerate(paths):
path[insert_key] = path_probs[i]
return np.array(path_probs)
@staticmethod
def extract_paths(paths, keys=['observations', 'actions'], stack=True):
"""
Put all the info in the paths into a single matrix. If stack is True,
then we get a rank-3 tensor, N * T * dim, where the N is number of
paths, T is the length of path, the dim is the dimension of either
observation or action or something else.
If stack is false, the trajectories will be concatenate together to
form a very long trajectory, then we have a rank-2 matrix, where the
first dimension is of N * T.
Parameters
----------
paths (list): See doc of compute_path_probs method.
keys (list): list of string indicate the infos we want to extract
stack (Boolean): Whether stack the data for concatenate them
Returns
-------
matrix (numpy.ndarray): matrix described above.
"""
if stack:
return [np.stack([t[key] for t in paths]).astype(np.float32) for key in keys]
else:
return [np.concatenate([t[key] for t in paths]).astype(np.float32) for key in keys]
@staticmethod
def sample_batch(*args, batch_size=32):
"""
Sample a batch of size batch_size from data.
"""
N = args[0].shape[0]
batch_idxs = np.random.randint(0, N, batch_size) # trajectories are negatives
return [data[batch_idxs] for data in args]
def fit(self, paths, **kwargs):
"""
Train the discriminator
"""
raise NotImplementedError()
def eval(self, paths, **kwargs):
raise NotImplementedError()
def get_params(self):
"""
Returns the parameters of the discriminator
"""
raise NotImplementedError()
def set_params(self, params):
"""
Set the parameters of the discriminator
"""
raise NotImplementedError()
class TrajectoryIRL(ImitationLearning):
"""
Base class for models that score entire trajectories at once.
This method take a whole trajectories as one sample.
"""
@property
def score_trajectories(self):
return True
class SingleTimestepIRL(ImitationLearning):
"""
Base class for models that score single timesteps at once.
This method take a single state action pair as one sample.
SingleTimestepIRL is better than TrajectoryIRL at that it has lower variance
in estimation.
"""
@property
def score_trajectories(self):
return False
@staticmethod
def extract_paths(paths, keys=('observations', 'actions'), stack=False):
"""
See doc of extract_paths in ImitationLearning class
"""
return ImitationLearning.extract_paths(paths, keys=keys, stack=stack)
@staticmethod
def unpack(data, paths):
"""
Chop the data into smaller piece according to the info in paths.
Each of the smaller piece of data should have the same length
as the observations in the corresponding paths. Thus, data should
have the length which is the summation of the all the length of
observations in paths.
Parameters
----------
data (numpy.ndarray): A vector contains the data of size of the
summation of length of all the observations in paths
paths (list): See doc of compute_path_probs method
Returns
-------
unpacked (list): Each element is a numpy array which is a smaller
piece of the data. The rule to divide them is described above.
"""
lengths = [path['observations'].shape[0] for path in paths]
unpacked = []
idx = 0
for l in lengths:
unpacked.append(data[idx:idx+l])
idx += l
return unpacked
| StarcoderdataPython |
3470121 | # 环境变量配置,用于控制是否使用GPU
# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import paddlex as pdx
model_dir = 'output/deeplabv3p_mobilenetv3_large_ssld/best_model'
img_file = "dataset/JPEGImages/5.png"
save_dir = 'output/deeplabv3p_mobilenetv3_large_ssld/'
model = pdx.load_model('output/deeplabv3p_mobilenetv3_large_ssld/best_model')
# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#overlap-tile-predict
pred = model.overlap_tile_predict(
img_file=img_file, tile_size=(769, 769), pad_size=[64, 64], batch_size=32)
pdx.seg.visualize(img_file, pred, weight=0., save_dir=save_dir)
| StarcoderdataPython |
4996942 | <reponame>jayrambhia/Twitter-Data-Mining<gh_stars>1-10
# -*- coding: utf-8 -*-
import twitter
import urllib2
import time
import re
import gdbm
opener = urllib2.build_opener()
urllib2.install_opener(opener)
api = twitter.Api(consumer_key="", consumer_secret="",access_token_key="", access_token_secret="",proxy ={})
def get_proxy_urllib(Proxy=None):
if not Proxy:
proxy = urllib2.ProxyHandler({}) # your proxy here
else:
proxy = urllib2.ProxyHandler(Proxy)
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
return urllib2
def get_user(string):
'''if provided a username, it will return a twitter.User object corresponding the provided username'''
user = api.GetUser(string)
return user
def getfollowers():
'''Returns a list containing all followers as twitter.User'''
followers = api.GetFollowers()
return followers
def getfollowing():
'''Returns a list containing all followings/friends as twitter.User'''
following = api.GetFriends()
return following
def get_user_pic(user):
'''Returns the URL of display picture of the twitter.User'''
image_url = user.profile_image_url
page = opener.open(image_url)
f = open(user.screen_name+'.jpg','wb')
f.write(page.read())
f.close()
return
def get_user_status(user):
'''Returns the status as twitter.Status of twitter.User'''
return user.status
def get_status_detail(status):
'''Returns a tuple (status.id, status.text,status.location,status.user,status.urls,status.user_mentions,status.hashtags) of twitter.Status'''
return(status.id, status.text,status.location,status.user,status.urls,status.user_mentions,status.hashtags)
def show_friends_timeline(since_ids=None, hashtag_list = None, hashtag_db_name=None, tweet_db_name = None):
'''since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
hashtag_list - A list of hashtags if you want to search for particular hashtags
hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
Returns the tweet id of the latest tweet.
'''
timeline = api.GetFriendsTimeline(since_id = since_ids)
if not timeline:
return since_ids
hashtag_timeline_db = None
tweet_db = None
if hashtag_db_name:
hashtag_timeline_db = gdbm.open(hashtag_db_name,'c')
if tweet_db_name:
tweet_db = gdbm.open(tweet_db_name,'c')
since_ids = show_timeline(timeline, hashtag_db = hashtag_timeline_db, tweet_db = tweet_db ,hashtag_list = hashtag_list)
if hashtag_db_name:
hashtag_timeline_db.close()
if tweet_db_name:
tweet_db.close()
return since_ids
def set_continuous_timeline(since_ids=None, hashtag_list = None, hashtag_db_name = None, tweet_db_name = None):
'''
since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
hashtag_list - A list of hashtags if you want to search for particular hashtags
hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
It will run indefinitely untill KeyboardInterrupt is provided. (^c)
Returns the tweet id of the latest tweet.
'''
try:
if not since_ids:
since_ids = None
while 1:
since_ids = show_friends_timeline(since_ids, hashtag_list = hashtag_list, hashtag_db_name = hashtag_db_name, tweet_db_name = tweet_db_name )
time.sleep(15)
except KeyboardInterrupt:
return since_ids
def show_user_timeline(user, since_ids = None, hashtag_list = None, hashtag_db_name = None, tweet_db_name = None):
'''
user - twitter.User object
since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
hashtag_list - A list of hashtags if you want to search for particular hashtags
hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
Returns the tweet id of the latest tweet.
'''
if not user:
return since_ids
if not user.protected:
try:
timeline = api.GetUserTimeline(user.id, since_id = since_ids)
except ValueError:
print 'ValueError'
else:
return since_ids
if not timeline:
return since_ids
hashtag_user_db = None
if hashtag_db_name:
# print hashtag_db_name
hashtag_user_db = gdbm.open(hashtag_db_name+'_hashtag','c')
if tweet_db_name:
tweet_user_db = gdbm.open(tweet_db_name+'_tweets','c')
since_ids = show_timeline(timeline, hashtag_db = hashtag_user_db, tweet_db = tweet_user_db, hashtag_list = hashtag_list)
if hashtag_db_name:
hashtag_user_db.close()
if tweet_db_name:
tweet_user_db.close()
return since_ids
def set_continuous_user_timeline(user, since_ids = None, hashtag_list = None, hashtag_db_name = None, tweet_db_name = None ):
'''
user - twitter.User object
since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
hashtag_list - A list of hashtags if you want to search for particular hashtags
hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
It will run indefinitely untill KeyboardInterrupt is provided. (^c)
Returns the tweet id of the latest tweet.
'''
if not user:
return since_ids
try:
while 1:
# print hashtag_db_name
since_ids = show_user_timeline(user, since_ids, hashtag_list = hashtag_list, hashtag_db_name = hashtag_db_name, tweet_db_name = None)
time.sleep(30)
except KeyboardInterrupt:
return since_ids
def show_public_timeline(since_ids = None, hashtag_list = None, hashtag_db_name = None, tweet_db_name = None):
'''
user - twitter.User object
since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
hashtag_list - A list of hashtags if you want to search for particular hashtags
hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
Returns the tweet id of the latest tweet.
'''
timeline = api.GetPublicTimeline(since_id = since_ids)
if not timeline:
return since_ids
hashtag_public_db = None
tweet_db = None
if hashtag_db_name:
hashtag_public_db = gdbm.open(hashtag_db_name,'c')
if tweet_db_name:
tweet_db = gdbm.open(tweet_db_name,'c')
since_ids = show_timeline(timeline, hashtag_list = hashtag_list, hashtag_db = hashtag_public_db, tweet_db = tweet_db)
if hashtag_db_name:
hashtag_public_db.close()
if tweet_db_name:
tweet_db.close()
return since_ids
def set_continuous_public_timeline(since_ids = None, hashtag_list = None, hashtag_db_name = None, tweet_db_name = None):
'''
since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
hashtag_list - A list of hashtags if you want to search for particular hashtags
hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
It will run indefinitely untill KeyboardInterrupt is provided. (^c)
Returns the tweet id of the latest tweet.
'''
try:
count = 0
if not since_ids:
since_ids = None
while 1:
since_ids = show_public_timeline(since_ids, hashtag_list = hashtag_list, hashtag_db_name = hashtag_db_name, tweet_db_name = tweet_db_name)
count = count+1
time.sleep(1)
if count > 60:
break
except KeyboardInterrupt:
return since_ids
def show_timeline(timeline, hashtag_db=None, tweet_db = None, hashtag_list=None):
for i in range(len(timeline)-1,-1,-1):
ids = timeline[i].id
screen_name = '@'+timeline[i].user.screen_name
user_name = timeline[i].user.name
text = timeline[i].text
tweet = screen_name+' ('+user_name+') '+': '+text
print tweet
res = get_hashtag(text)
if hashtag_list:
for j in range(len(hashtag_list)):
if not hashtag_list[j].startswith('#'):
hashtag_list[j]='#'+hashtag_list[j]
if hashtag_list[j] in res:
# print "opening",hashtag_list[j]+"_hashtag"
py_db = gdbm.open(hashtag_list[j]+'_hashtag','c')
py_db[str(timeline[i].id)] = repr(tweet)
py_db.close()
if res:
# print hashtag_db
if hashtag_db is not None:
# print 'save_hashtag'
hashtag_db = save_hashtag(res, hashtag_db)
if tweet_db is not None:
tweet_db = save_tweet(ids, tweet, tweet_db)
return timeline[0].id
def get_hashtag(tweet):
hashtag = re.compile(u"#\w+")
res = re.findall(hashtag, tweet)
for i in range(len(res)):
res[i] = res[i].lower()
print res
return res
def save_hashtag(res, db):
for i in range(len(res)):
try:
count = int(db[res[i]])
count = count + 1
db[res[i]] = str(count)
except KeyError:
db[res[i]] = '1'
return db
def save_tweet(ids, tweet, db):
print tweet
try:
db[str(ids)] = tweet
except TypeError:
print 'typeerror'
return db
def search_hashtags(hashtag_list, flag = 1, hashtag_db_flag = 1, ids = None, user = None, hashtag_db_name = None, tweet_db_name = None):
'''
hashtag_list - A list of hashtags(must be string)
flag - flag = 1 : Search hashtags in timeline.
flag = 2 : Search hashtags in user's timeline.
flag = 3 : Search hashtags in public timeline
hashtag_db_flag - flag = 0 : Doesn't store hashtags
flag != 0 : Stroe hashtags
ids - twitter.Status.id (the hashtags will be searched in tweets after the tweet with id as since_ids)
user - if flag == 2: twitter.User object
hashtag_db_name -
if flag == 1:
if hashtag_db_flag != 0 : Store hashtags and counts in a gdbm file with given string. If None, hashtag_db_flag = 'hashtag_timeline'
if flag == 2:
if hashtag_db_flag != 0 : Store hashtags and counts in a gdbm file with given string. If None, hashtag_db_flag = username
if flag == 3:
if hashtag_db_flag != 0 : Store hashtags and counts in a gdbm file with given string. If None, hashtag_db_flag = 'hashtag_public'
tweet_db_name - If provided, it will store all the tweets containing the provided hashtags in a gdbm file with tweet ids.
else, it will not store the tweets.
It will run indefinitely untill Keyboard Interrupt (^c) is provided.
Returns the id of the latest tweet.
'''
if hashtag_list:
for i in range(len(hashtag_list)):
hashtag_list[i] = hashtag_list[i].lower()
if not hashtag_list[i].startswith('#'):
hashtag_list[i] = '#'+hashtag_list[i]
if flag == 1:
if hashtag_db_flag:
if not hashtag_db_name:
hashtag_db_name = 'hashtags_timeline'
ids = set_continuous_timeline(ids, hashtag_list, hashtag_db_name = hashtag_db_name, tweet_db_name = tweet_db_name)
if flag == 2:
print 'user hashtags'
if not user:
print 'No user provided'
return ids
if hashtag_db_flag:
if hashtag_db_name is not None:
hashtag_db_name = hashtag_db_name
else:
hashtag_db_name = user.screen_name
else:
hashtag_db_name = None
# print hashtag_db_name
ids = set_continuous_user_timeline(user, ids, hashtag_list, hashtag_db_name = hashtag_db_name, tweet_db_name = tweet_db_name)
if flag == 3:
if hashtag_db_flag:
if not hashtag_db_name:
hashtag_db_name = 'hashtags_public'
ids = set_continuous_public_timeline(ids, hashtag_list = hashtag_list, hashtag_db_name = hashtag_db_name, tweet_db_name = tweet_db_name)
return ids
def get_conv(user_list, hashtag_db_name, tweet_db_name = None) :
'''
user_list - A list containing twitter.User objects
hashtag_db_name - A string name for gdbm file which will save all the hashtags and counts.
tweet_db_name - If a string provided, all the tweets with hashtags will be stored in the gdbm file with tweet ids.
It will run indefinitely untill Keyboard Interrupt (^c) is provided.
Returns nothing
'''
if not user_list:
return
try:
ids = len(user_list)*[None]
while 1:
for i in range(len(user_list)):
time.sleep(2)
ids[i] = show_user_timeline(user=user_list[i], since_ids = ids[i], hashtag_db_name = hashtag_db_name, tweet_db_name = tweet_db_name, hashtag_list = None)
#ids[i] = search_hashtags(ids = ids[i], flag=2,hashtag_db_flag=1,user=user_list[i],hashtag_db_name = hashtag_db_name)
except KeyboardInterrupt:
return
def get_user_profile(user):
user_id = user.id
name = user.name
screen_name = user.screen_name
des = user.description
protected = user.protected
image_url = user.profile_image_url
user_url = user.url
status = user.status
status_count = user.statuses_count
followers_count = user.followers_count
friends_count = user.friends_count
return(user, user_id, name, screen_name, des, protected, image_url, user_url, status, status_count, followers_count, friends_count)
def get_tweet_profile(status):
t = status.created_at
f = status.favorited
in_reply_to = (status.in_reply_to_screen_name, status.in_reply_to_user_id, status.in_reply_to_status_id)
source = status.source
status_id = status.id
tweet = status.text
user = status.user
user_mentions = status.user_mentions
return(status_id, tweet, user, in_reply_to, user_mentions, f, t)
| StarcoderdataPython |
9651546 | <reponame>faramarzmunshi/gluon-nlp<gh_stars>1-10
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""NLP training model."""
import mxnet as mx
import gluonnlp as nlp
from . import cache, embedding, language_model
from .cache import *
from .embedding import *
from .language_model import *
__all__ = language_model.__all__ + cache.__all__ + embedding.__all__ + \
['get_cache_model']
def get_cache_model(name, dataset_name='wikitext-2', window=2000,
theta=0.6, lambdas=0.2, ctx=mx.cpu(), **kwargs):
r"""Returns a cache model using a pre-trained language model.
We implement the neural cache language model proposed in the following work::
@article{grave2016improving,
title={Improving neural language models with a continuous cache},
author={<NAME> and <NAME> and <NAME>},
journal={ICLR},
year={2017}
}
Parameters
----------
name : str
Name of the cache language model.
dataset_name : str or None, default 'wikitext-2'.
The dataset name on which the pre-trained model is trained.
Options are 'wikitext-2'. If specified, then the returned vocabulary is extracted from
the training set of the dataset.
If None, then vocab is required, for specifying embedding weight size, and is directly
returned.
window : int
Size of cache window
theta : float
The scala controls the flatness of the cache distribution
that predict the next word as shown below:
.. math::
p_{cache} \propto \sum_{i=1}^{t-1} \mathbb{1}_{w=x_{i+1}} exp(\theta {h_t}^T h_i)
where :math:`p_{cache}` is the cache distribution, :math:`\mathbb{1}` is
the identity function, and :math:`h_i` is the output of timestep i.
lambdas : float
Linear scalar between only cache and vocab distribution, the formulation is as below:
.. math::
p = (1 - \lambda) p_{vocab} + \lambda p_{cache}
where :math:`p_{vocab}` is the vocabulary distribution and :math:`p_{cache}`
is the cache distribution.
vocab : gluonnlp.Vocab or None, default None
Vocabulary object to be used with the language model.
Required when dataset_name is not specified.
pretrained : bool, default False
Whether to load the pre-trained weights for model.
ctx : Context, default CPU
The context in which to load the pre-trained weights.
root : str, default '~/.mxnet/models'
Location for keeping the pre-trained model parameters.
Returns
-------
Block
The model.
"""
lm_model, vocab = nlp.model.\
get_model(name, dataset_name=dataset_name, pretrained=True, ctx=ctx, **kwargs)
cache_cell = CacheCell(lm_model, len(vocab), window, theta, lambdas)
return cache_cell
| StarcoderdataPython |
11295476 | <filename>tencentcloud/clb/v20180317/errorcodes.py
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# CAM签名/鉴权错误。
AUTHFAILURE = 'AuthFailure'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# LB状态异常。
FAILEDOPERATION_INVALIDLBSTATUS = 'FailedOperation.InvalidLBStatus'
# 内部错误。
INTERNALERROR = 'InternalError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 参数格式错误。
INVALIDPARAMETER_FORMATERROR = 'InvalidParameter.FormatError'
# 负载均衡实例ID错误。
INVALIDPARAMETER_LBIDNOTFOUND = 'InvalidParameter.LBIdNotFound'
# 监听器ID错误。
INVALIDPARAMETER_LISTENERIDNOTFOUND = 'InvalidParameter.ListenerIdNotFound'
# 查找不到符合条件的转发规则。
INVALIDPARAMETER_LOCATIONNOTFOUND = 'InvalidParameter.LocationNotFound'
# 监听器端口检查失败,比如端口冲突。
INVALIDPARAMETER_PORTCHECKFAILED = 'InvalidParameter.PortCheckFailed'
# 监听器协议检查失败,比如相关协议不支持对应操作。
INVALIDPARAMETER_PROTOCOLCHECKFAILED = 'InvalidParameter.ProtocolCheckFailed'
# 地域无效。
INVALIDPARAMETER_REGIONNOTFOUND = 'InvalidParameter.RegionNotFound'
# 转发规则已绑定重定向关系。
INVALIDPARAMETER_REWRITEALREADYEXIST = 'InvalidParameter.RewriteAlreadyExist'
# 一些重定向规则不存在。
INVALIDPARAMETER_SOMEREWRITENOTFOUND = 'InvalidParameter.SomeRewriteNotFound'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 参数值有重复。
INVALIDPARAMETERVALUE_DUPLICATE = 'InvalidParameterValue.Duplicate'
# Filter参数输入错误。
INVALIDPARAMETERVALUE_INVALIDFILTER = 'InvalidParameterValue.InvalidFilter'
# 参数长度错误。
INVALIDPARAMETERVALUE_LENGTH = 'InvalidParameterValue.Length'
# 参数取值范围错误。
INVALIDPARAMETERVALUE_RANGE = 'InvalidParameterValue.Range'
# 超过配额限制。
LIMITEXCEEDED = 'LimitExceeded'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 资源不足。
RESOURCEINSUFFICIENT = 'ResourceInsufficient'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
| StarcoderdataPython |
4924716 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.fields
import modelcluster.contrib.taggit
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
('wagtailimages', '0006_add_verbose_names'),
('wagtaildocs', '0003_add_verbose_names'),
('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
]
operations = [
migrations.CreateModel(
name='BlogIndexPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page', on_delete=models.SET_NULL,)),
('intro', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogIndexPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True, on_delete=models.SET_NULL,)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page', on_delete=models.SET_NULL,)),
('intro', wagtail.core.fields.RichTextField()),
('body', wagtail.core.fields.RichTextField()),
('date', models.DateField(verbose_name=b'Post date')),
('feed_image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogPageCarouselItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('embed_url', models.URLField(verbose_name=b'Embed URL', blank=True)),
('caption', wagtail.core.fields.RichTextField(blank=True)),
('image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True, on_delete=models.SET_NULL,)),
('link_page', models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True, on_delete=models.SET_NULL,)),
('page', modelcluster.fields.ParentalKey(related_name='carousel_items', to='blog.BlogPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='BlogPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True, on_delete=models.SET_NULL,)),
('link_page', models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True, on_delete=models.SET_NULL,)),
('page', modelcluster.fields.ParentalKey(related_name='related_links', to='blog.BlogPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='BlogPageTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content_object', modelcluster.fields.ParentalKey(related_name='tagged_items', to='blog.BlogPage')),
('tag', models.ForeignKey(related_name='blog_blogpagetag_items', to='taggit.Tag', on_delete=models.SET_NULL,)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='blogpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(to='taggit.Tag', through='blog.BlogPageTag', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'),
),
migrations.AddField(
model_name='blogindexpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True, on_delete=models.SET_NULL,),
),
migrations.AddField(
model_name='blogindexpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name='related_links', to='blog.BlogIndexPage'),
),
]
| StarcoderdataPython |
6625437 | from functools import wraps
from flask import redirect, render_template, session
def apology(page, message, code=400, data=None):
return render_template(page, error=message, data=data), code
def apologyBirth(page, message, birthdays, code=400, data=None):
return render_template(page, error=message, data=data, birthdays=birthdays), code
def login_required(f):
"""
Decorate routes to require login.
https://flask.palletsprojects.com/en/1.1.x/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/")
return f(*args, **kwargs)
return decorated_function
def is_logged(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is not None:
return redirect("/birthdays")
return f(*args, **kwargs)
return decorated_function
| StarcoderdataPython |
8011128 | # USAGE
# python server.py --prototxt MobileNetSSD_deploy.prototxt --model MobileNetSSD_deploy.caffemodel --montageW 2 --montageH 2
# import the necessary packages
from imutils import build_montages # build a montage of all incoming frames
from datetime import datetime
import numpy as np
from imagezmq import imagezmq # for streaming video from clients (RPi)
import argparse
import imutils # author's package of OpenCV and other image processing convenience functions
import cv2 # OpenCV's DNN module used for deep learning and object detection interference
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file") # path to our Caffe deep learning proto file
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model") # path to pre-trained Caffe deep learning model
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections") # threshold to filter weak detections
ap.add_argument("-mW", "--montageW", required=True, type=int,
help="montage frame width") # number of columns for montage, i.e., 4 RPis 2x2, 4x1, etc
ap.add_argument("-mH", "--montageH", required=True, type=int,
help="montage frame height") # number of rows in montage
args = vars(ap.parse_args())
# initialize the ImageHub object
imageHub = imagezmq.ImageHub() # accepts connections from each of the RPis
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"]) # instantiate Caffe object detector
# initialize the consider set (class labels we care about and want
# to count), the object count dictionary, and the frame dictionary
#CONSIDER = set(["dog", "person", "car"])
CONSIDER = set(["person"])
objCount = {obj: 0 for obj in CONSIDER} # initializes a dictionary for object counts
frameDict = {} # contain hostname key and the associated latest frame value
# initialize the dictionary which will contain information regarding
# when a device was last active, then store the last time the check
# was made was now
# helps us determine when a Pi last sent a frame to the server
# if it has been a while (i.e. there is a problem), we can get rid of the static, out of date image
# in out montage
# the lastActive dictionary will have hostname key and the associated latest frame value
lastActive = {}
lastActiveCheck = datetime.now()
# stores the estimated number of Pis, active checking period, and
# calculates the duration seconds to wait before making a check to
# see if a device was active
ESTIMATED_NUM_PIS = 1
ACTIVE_CHECK_PERIOD = 10
ACTIVE_CHECK_SECONDS = ESTIMATED_NUM_PIS * ACTIVE_CHECK_PERIOD
# assign montage width and height so we can view all incoming frames
# in a single "dashboard"
mW = args["montageW"]
mH = args["montageH"]
print("[INFO] detecting: {}...".format(", ".join(obj for obj in
CONSIDER)))
# start looping over all the frames
while True:
# receive RPi name and frame from the RPi and acknowledge
# the receipt
(rpiName, frame) = imageHub.recv_image()
imageHub.send_reply(b'OK')
# if a device is not in the last active dictionary then it means
# that its a newly connected device
if rpiName not in lastActive.keys():
print("[INFO] receiving data from {}...".format(rpiName))
# record the last active time for the device from which we just
# received a frame
lastActive[rpiName] = datetime.now()
# perform object detection on the frame
# resize the frame to have a maximum width of 400 pixels, then
# grab the frame dimensions and construct a blob
frame = imutils.resize(frame, width=400)
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
# reset the object count for each object in the CONSIDER set
objCount = {obj: 0 for obj in CONSIDER}
# loop over the detections with the goal of counting and drawing boxes around objects
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# detections
idx = int(detections[0, 0, i, 1])
# check to see if the predicted class is in the set of
# classes that need to be considered
if CLASSES[idx] in CONSIDER:
# increment the count of the particular object
# detected in the frame
objCount[CLASSES[idx]] += 1
# compute the (x, y)-coordinates of the bounding box
# for the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the bounding box around the detected object on
# the frame
cv2.rectangle(frame, (startX, startY), (endX, endY), (255, 0, 0), 2)
print((endX-startX)*(endY-startY))
# draw the sending device name on the frame
cv2.putText(frame, rpiName, (10, 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# draw the object count on the frame
label = ", ".join("{}: {}".format(obj, count) for (obj, count) in
objCount.items())
cv2.putText(frame, label, (10, h - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255,0), 2)
# update the new frame in the frame dictionary
frameDict[rpiName] = frame
# build a montage using images in the frame dictionary
montages = build_montages(frameDict.values(), (w, h), (mW, mH))
# display the montage(s) on the screen
for (i, montage) in enumerate(montages):
cv2.imshow("Home pet location monitor ({})".format(i),
montage)
# detect any kepresses
key = cv2.waitKey(1) & 0xFF
# check the lastActive tismestamps for each client feed and remove frames from the montage
# that have stalled
# will help us get rid of expired frames (i.e. frames that are no longer real-time)
# if current time *minus* last time when the active device check
# was made is greater than the threshold set then do a check
if (datetime.now() - lastActiveCheck).seconds > ACTIVE_CHECK_SECONDS:
# loop over all previously active devices
for (rpiName, ts) in list(lastActive.items()):
# remove the RPi from the last active and frame
# dictionaries if the device hasn't been active recently
if (datetime.now() - ts).seconds > ACTIVE_CHECK_SECONDS:
print("[INFO] lost connection to {}".format(rpiName))
lastActive.pop(rpiName)
frameDict.pop(rpiName)
# set the last active check time as current time
lastActiveCheck = datetime.now()
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
| StarcoderdataPython |
4812926 | N=int(input())
print(sum(len(str(i))%2 for i in range(1,N+1))) | StarcoderdataPython |
1949479 | """
Jasper DR (Dense Residual) for ASR, implemented in Gluon.
Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288.
"""
__all__ = ['JasperDr', 'jasperdr5x3', 'jasperdr10x4', 'jasperdr10x5']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import DualPathSequential, ParallelConcurent
from .jasper import conv1d1, ConvBlock1d, conv1d1_block, JasperFinalBlock
class JasperDrUnit(HybridBlock):
"""
Jasper DR unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels (for actual input and each identity connections).
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
repeat : int
Count of body convolution blocks.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels_list,
out_channels,
kernel_size,
dropout_rate,
repeat,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(JasperDrUnit, self).__init__(**kwargs)
with self.name_scope():
self.identity_convs = ParallelConcurent()
for i, dense_in_channels_i in enumerate(in_channels_list):
self.identity_convs.add(conv1d1_block(
in_channels=dense_in_channels_i,
out_channels=out_channels,
dropout_rate=0.0,
activation=None,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = in_channels_list[-1]
self.body = nn.HybridSequential(prefix="")
for i in range(repeat):
activation = (lambda: nn.Activation("relu")) if i < repeat - 1 else None
dropout_rate_i = dropout_rate if i < repeat - 1 else 0.0
self.body.add(ConvBlock1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=1,
padding=(kernel_size // 2),
dropout_rate=dropout_rate_i,
activation=activation,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.activ = nn.Activation("relu")
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x, y=None):
y = [x] if y is None else y + [x]
identity = self.identity_convs(y)
identity = F.stack(*identity, axis=1)
identity = identity.sum(axis=1)
x = self.body(x)
x = x + identity
x = self.activ(x)
x = self.dropout(x)
return x, y
class JasperDr(HybridBlock):
"""
Jasper DR model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288.
Parameters:
----------
channels : list of int
Number of output channels for each unit and initial/final block.
kernel_sizes : list of int
Kernel sizes for each unit and initial/final block.
dropout_rates : list of int
Dropout rates for each unit and initial/final block.
repeat : int
Count of body convolution blocks.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_channels : int, default 120
Number of input channels (audio features).
classes : int, default 11
Number of classification classes (number of graphemes).
"""
def __init__(self,
channels,
kernel_sizes,
dropout_rates,
repeat,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_channels=120,
classes=11,
**kwargs):
super(JasperDr, self).__init__(**kwargs)
self.in_size = None
self.classes = classes
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=1)
self.features.add(ConvBlock1d(
in_channels=in_channels,
out_channels=channels[0],
kernel_size=kernel_sizes[0],
strides=2,
padding=(kernel_sizes[0] // 2),
dropout_rate=dropout_rates[0],
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = channels[0]
in_channels_list = []
for i, (out_channels, kernel_size, dropout_rate) in\
enumerate(zip(channels[1:-2], kernel_sizes[1:-2], dropout_rates[1:-2])):
in_channels_list += [in_channels]
self.features.add(JasperDrUnit(
in_channels_list=in_channels_list,
out_channels=out_channels,
kernel_size=kernel_size,
dropout_rate=dropout_rate,
repeat=repeat,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.features.add(JasperFinalBlock(
in_channels=in_channels,
channels=channels,
kernel_sizes=kernel_sizes,
dropout_rates=dropout_rates,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = channels[-1]
self.output = conv1d1(
in_channels=in_channels,
out_channels=classes,
use_bias=True)
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_jasperdr(version,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", ""),
**kwargs):
"""
Create Jasper DR model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
import numpy as np
blocks, repeat = tuple(map(int, version.split("x")))
main_stage_repeat = blocks // 5
channels_per_stage = [256, 256, 384, 512, 640, 768, 896, 1024]
kernel_sizes_per_stage = [11, 11, 13, 17, 21, 25, 29, 1]
dropout_rates_per_stage = [0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4]
stage_repeat = np.full((8,), 1)
stage_repeat[1:-2] *= main_stage_repeat
channels = sum([[a] * r for (a, r) in zip(channels_per_stage, stage_repeat)], [])
kernel_sizes = sum([[a] * r for (a, r) in zip(kernel_sizes_per_stage, stage_repeat)], [])
dropout_rates = sum([[a] * r for (a, r) in zip(dropout_rates_per_stage, stage_repeat)], [])
net = JasperDr(
channels=channels,
kernel_sizes=kernel_sizes,
dropout_rates=dropout_rates,
repeat=repeat,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def jasperdr5x3(**kwargs):
"""
Jasper DR 5x3 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_jasperdr(version="5x3", model_name="jasperdr5x3", **kwargs)
def jasperdr10x4(**kwargs):
"""
Jasper DR 10x4 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_jasperdr(version="10x4", model_name="jasperdr10x4", **kwargs)
def jasperdr10x5(**kwargs):
"""
Jasper DR 10x5 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_jasperdr(version="10x5", model_name="jasperdr10x5", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import numpy as np
import mxnet as mx
pretrained = False
audio_features = 120
classes = 11
models = [
jasperdr5x3,
jasperdr10x4,
jasperdr10x5,
]
for model in models:
net = model(
in_channels=audio_features,
classes=classes,
pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != jasperdr5x3 or weight_count == 109848331)
assert (model != jasperdr10x4 or weight_count == 271878411)
assert (model != jasperdr10x5 or weight_count == 332771595)
batch = 4
seq_len = np.random.randint(60, 150)
x = mx.nd.random.normal(shape=(batch, audio_features, seq_len), ctx=ctx)
y = net(x)
assert (y.shape[:2] == (batch, classes))
assert (y.shape[2] in [seq_len // 2, seq_len // 2 + 1])
if __name__ == "__main__":
_test()
| StarcoderdataPython |
1806944 | <reponame>MihanEntalpo/allure-single-html-file
#! /usr/bin/env python3
"""
Allure static files combiner.
Create single html files with all the allure report data, that can be opened from everywhere.
Example:
python3 ./combine.py ../allure_gen [--dest xxx] [--remove-temp-file] [--auto-create-folders]
or
pip install allure-combine
allure-combine allure_report_dir [--dest xxx] [--remove-temp-file] [--auto-create-folders]
ac allure_report_dir [--dest xxx] [--remove-temp-file] [--auto-create-folders]
"""
# pylint: disable=line-too-long
import os
import re
import base64
import argparse
from shutil import copyfile
from bs4 import BeautifulSoup
sep = os.sep
re_sep = os.sep if os.sep == "/" else r"\\"
def combine_allure(folder, dest_folder=None, remove_temp_files=False, auto_create_folders=False):
"""
Read all files,
create server.js,
then run server.js,
"""
if not dest_folder:
dest_folder = folder
if dest_folder and not os.path.exists(dest_folder):
if not auto_create_folders:
raise FileNotFoundError(
"Dest folder does not exists, please create it first, "
"or you can use --auto-create-folders argument if in the command line.")
else:
print("Argument auto_create_folders is True, and Dest folder does not exists, so create it. --- start")
os.makedirs(dest_folder)
print("Done")
cur_dir = os.path.dirname(os.path.realpath(__file__))
print("> Folder to process is " + folder)
print("> Checking for folder contents")
files_should_be = ["index.html", "app.js", "styles.css"]
for file in files_should_be:
if not os.path.exists(folder + sep + file):
raise Exception(f"ERROR: File {folder + sep + file} doesnt exists, but it should!")
default_content_type = "text/plain;charset=UTF-8"
content_types = {
"svg": "image/svg",
"txt": "text/plain;charset=UTF-8",
"js": "application/javascript",
"json": "application/json",
"csv": "text/csv",
"css": "text/css",
"html": "text/html",
"htm": "text/html",
"png": "image/png",
"jpeg": "image/jpeg",
"jpg": "image/jpg",
"gif": "image/gif",
"mp4": "video/mp4",
"avi": "video/avi",
"webm": "video/webm"
}
base64_extensions = ["png", "jpeg", "jpg", "gif", "html", "htm", "mp4", "avi"]
allowed_extensions = list(content_types.keys())
data = []
print("> Scanning folder for data files...")
for path, dirs, files in os.walk(folder):
if files:
folder_url = re.sub(f"^{folder.rstrip(sep).replace(sep, re_sep)}{re_sep}", "", path)
if folder_url and folder_url != folder:
for file in files:
file_url = folder_url + sep + file
ext = file.split(".")[-1]
if ext not in allowed_extensions:
print(f"WARNING: Unsupported extension: "
f"{ext} (file: {path}{sep}{file}) skipping (supported are: {allowed_extensions}")
continue
mime = content_types.get(ext, default_content_type)
if ext in base64_extensions:
with open(path + sep + file, "rb") as f:
content = base64.b64encode(f.read())
else:
with open(path + sep + file, "r", encoding="utf8") as f:
content = f.read()
data.append({"url": file_url, "mime": mime,
"content": content, "base64": (ext in base64_extensions)})
print(f"Found {len(data)} data files")
print("> Building server.js file...")
with open(f"{folder}{sep}server.js", "w", encoding="utf8") as f:
f.write(r"""
function _base64ToArrayBuffer(base64) {
var binary_string = window.atob(base64);
var len = binary_string.length;
var bytes = new Uint8Array(len);
for (var i = 0; i < len; i++) {
bytes[i] = binary_string.charCodeAt(i);
}
return bytes.buffer;
}
function _arrayBufferToBase64( buffer ) {
var binary = '';
var bytes = new Uint8Array( buffer );
var len = bytes.byteLength;
for (var i = 0; i < len; i++) {
binary += String.fromCharCode( bytes[ i ] );
}
return window.btoa( binary );
}
document.addEventListener("DOMContentLoaded", function() {
var old_prefilter = jQuery.htmlPrefilter;
jQuery.htmlPrefilter = function(v) {
var regs = [
/<a[^>]*href="(?<url>[^"]*)"[^>]*>/gi,
/<img[^>]*src="(?<url>[^"]*)"\/?>/gi,
/<source[^>]*src="(?<url>[^"]*)"/gi
];
var replaces = {};
for (i in regs)
{
reg = regs[i];
var m = true;
var n = 0;
while (m && n < 100)
{
n += 1;
m = reg.exec(v);
if (m)
{
if (m['groups'] && m['groups']['url'])
{
var url = m['groups']['url'];
if (server_data.hasOwnProperty(url))
{
console.log(`Added url:${url} to be replaced with data of ${server_data[url].length} bytes length`);
replaces[url] = server_data[url];
}
}
}
}
}
for (let src in replaces)
{
let dest = replaces[src];
v = v.replace(src, dest);
}
return old_prefilter(v);
};
});
""")
f.write("var server_data={\n")
for d in data:
url = d['url'].replace(sep, "/")
b64 = d['base64']
if b64:
content = "data:" + d['mime'] + ";base64, " + d['content'].decode("utf-8")
f.write(f""" "{url}": "{content}", \n""")
else:
content = d['content'].replace("\\", "\\\\").replace('"', '\\"')\
.replace("\n", "\\n").replace("<", "<").replace(">", ">")
f.write(f""" "{url}": "{content}", \n""")
f.write("};\n")
f.write(" var server = sinon.fakeServer.create();\n")
for d in data:
content_type = d['mime']
url = d['url'].replace(sep, "/")
f.write("""
server.respondWith("GET", "{url}", [
200, { "Content-Type": "{content_type}" }, server_data["{url}"],
]);
""".replace("{url}", url).replace("{content_type}", content_type))
f.write("server.autoRespond = true;")
size = os.path.getsize(f'{folder}{sep}server.js')
print(f"server.js is build, it's size is: {size} bytes")
print("> Copying file sinon-9.2.4.js into folder...")
copyfile(cur_dir + f"{sep}sinon-9.2.4.js", folder + f"{sep}sinon-9.2.4.js")
print("sinon-9.2.4.js is copied")
print("> Reading index.html file")
with open(folder + f"{sep}index.html", "r", encoding="utf8") as f:
index_html = f.read()
if "sinon-9.2.4.js" not in index_html:
print("> Patching index.html file to make it use sinon-9.2.4.js and server.js")
index_html = index_html.replace(
"""<script src="app.js"></script>""",
"""<script src="sinon-9.2.4.js"></script><script src="server.js"></script><script src="app.js"></script>""")
with open(folder + f"{sep}index.html", "w", encoding="utf8") as f:
print("> Saving patched index.html file, so It can be opened without --allow-file-access-from-files")
f.write(index_html)
print("Done")
else:
print("> Skipping patching of index.html as it's already patched")
print("> Parsing index.html")
soup = BeautifulSoup(''.join(index_html), features="html.parser")
print("> Filling script tags with real files contents")
for tag in soup.findAll('script'):
file_path = folder + sep + tag['src']
print("...", tag, file_path)
with open(file_path, "r", encoding="utf8") as ff:
file_content = ff.read()
full_script_tag = soup.new_tag("script")
full_script_tag.insert(0, file_content)
tag.replaceWith(full_script_tag)
print("Done")
print("> Replacing link tags with style tags with real file contents")
for tag in soup.findAll('link'):
if tag['rel'] == ["stylesheet"]:
file_path = folder + sep + tag['href']
print("...", tag, file_path)
with open(file_path, "r", encoding="utf8") as ff:
file_content = ff.read()
full_script_tag = soup.new_tag("style")
full_script_tag.insert(0, file_content)
tag.replaceWith(full_script_tag)
print("Done")
with open(dest_folder + f"{sep}complete.html", "w", encoding="utf8") as f:
f.write(str(soup))
print(f"> Saving result as {dest_folder}{sep}complete.html")
size = os.path.getsize(dest_folder + f'{sep}complete.html')
print(f"Done. Complete file size is:{size}")
if remove_temp_files:
print("Argument remove_temp_files is True, "
"will remove temp files in allure report folder: server.js and sinon-9.2.4.js.")
os.remove(f'{folder}{sep}server.js')
os.remove(f'{folder}{sep}sinon-9.2.4.js')
print("Done")
def main():
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('folder', help='Folder path, where allure static files are located')
parser.add_argument('--dest', default=None,
help='Folder path where the single html file will be stored. '
'Default is None, so dest folder == allure static files folder.')
parser.add_argument('--remove-temp-files', action="store_true",
help='Whether remove temp files in source folder: server.js and sinon-9.2.4.js or not. '
'Default is false')
parser.add_argument("--auto-create-folders", action="store_true",
help="Whether auto create dest folders or not when folder does not exist. Default is false.")
args = parser.parse_args()
combine_allure(args.folder.rstrip(sep), args.dest, args.remove_temp_files, args.auto_create_folders)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4865247 | <gh_stars>1-10
import datetime
import json
import os
import requests
import random
import threading
import logging
from flask import Flask
from flask import request
from pymongo import MongoClient
from routing import configuration
from routing import graph
from routing import osm_handler
from routing.utils import bring_closer
mongo_client = MongoClient()
db_client = mongo_client['osm']
map_graph = graph.Graph(db_client)
handler = osm_handler.OsmHandler(db_client)
config = configuration.Configuration()
logging.basicConfig(filename="server.log", level=logging.INFO)
app = Flask(__name__)
import string
import random
def id_generator(size=10, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def process_back_search(id):
map_graph.background_search()
info = config.get_tmp_by_key(id)
info['data'] = {'isEnd': True}
config.set_tmp_by_key(id, info)
logging.info('Server. Back_search has finished.')
def process_backup_create(id):
handler.create_backup(config.get_bounds())
config.set_backup_info({
'exist': True,
'path': '../settings/backup.json',
'date': datetime.datetime.today().strftime("%d.%m.%Y %H:%M")
})
config.save_config()
info = config.get_tmp_by_key(id)
info['data'] = config.get_backup_info()
config.set_tmp_by_key(id, info)
logging.info('Server. Backup_create has finished.')
def process_backup_load(path, id):
bounds = handler.load_backup(path)
config.set_bounds(bounds)
config.save_config()
info = config.get_tmp_by_key(id)
info['data'] = config.get_backup_info()
config.set_tmp_by_key(id, info)
logging.info('Server. Backup_load has finished.')
def process_map(str_req, id):
r = requests.get(str_req, stream=True)
if r.status_code == 200:
with open(os.path.join('..', 'settings', 'tmp.osm'), 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
bounds = handler.parse(open(os.path.join('..', 'settings', 'tmp.osm'), 'r', encoding='utf8'))
if os.path.isfile(os.path.join('..', 'settings', 'tmp.osm')):
os.remove(os.path.join('..', 'settings', 'tmp.osm'))
if bounds not in config.get_bounds():
config.add_bounds(bounds)
config.save_config()
info = config.get_tmp_by_key(id)
info['data'] = {'bounds': bounds}
config.set_tmp_by_key(id, info)
logging.info('Server. Process_map has finished.')
else:
logging.error('Server.Process_map: Ошибка скачивания карты.')
@app.route("/api/0.5/fullroute")
# /api/0.5/fullroute?lat1=1.1&lon1=1.2&lat2=2.1&lon2=2.2
def route():
try:
lat1 = float(request.args.get('lat1'))
lon1 = float(request.args.get('lon1'))
lat2 = float(request.args.get('lat2'))
lon2 = float(request.args.get('lon2'))
except:
logging.error("Server.fullroute: Неверные аргументы запроса")
return json.dumps(
{
'error': True,
'data': {},
'msg': "Error in args"
})
try:
node1 = map_graph.find_nearest([lat1, lon1])
node2 = map_graph.find_nearest([lat2, lon2])
logging.info(f'Routing {node1}, {node2}')
right = map_graph.astar(node1, node2)
path_right = []
time_right = 0
length_right = 0
if right:
path_right = right['path']
time_right = right['dist']
for i, node in enumerate(path_right):
if i == len(path_right) - 1:
break
length_right = length_right + map_graph.distance_between(node, path_right[i + 1])
path_right = map_graph.clarify_path_to_loc(path_right) if path_right else []
if path_right:
if len(path_right) > 1:
start = bring_closer({'loc': [lat1, lon1], 'nodes': [[a['lat'], a['lon']] for a in path_right[0:2]]})
middle = path_right[1:len(path_right) - 1]
end = bring_closer({'loc': [lat2, lon2], 'nodes': [[a['lat'], a['lon']] for a in
path_right[len(path_right) - 1:len(path_right) - 3:-1]]})
end.reverse()
else:
start = {'lat': lat1, 'lon': lon1}
middle = path_right
end = {'lat': lat2, 'lon': lon2}
path_right = start + middle + end
left = map_graph.astar(node1, node2, nodes_client_for_left=map_graph.db_client.nodes)
path_left = []
time_left = 0
length_left = 0
if left:
path_left = left['path']
time_left = left['dist']
for i, node in enumerate(path_left):
if i == len(path_left) - 1:
break
length_left = length_left + map_graph.distance_between(node, path_left[i + 1])
path_left = map_graph.clarify_path_to_loc(path_left) if path_left else []
if path_left:
if len(path_left) > 1:
start = bring_closer({'loc': [lat1, lon1], 'nodes': [[a['lat'], a['lon']] for a in path_left[0:2]]})
middle = path_left[1:len(path_left) - 1]
end = bring_closer({'loc': [lat2, lon2], 'nodes': [[a['lat'], a['lon']] for a in
path_left[len(path_left) - 1:len(path_left) - 3:-1]]})
end.reverse()
else:
start = {'lat': lat1, 'lon': lon1}
middle = path_left
end = {'lat': lat2, 'lon': lon2}
path_left = start + middle + end
except ValueError as e:
return json.dumps({'error': True, 'data': {}, 'msg': str(e)})
logging.info(f"""Send this:
{{
'error': False,
'data': {{
'from': {{'lat': {lat1}, 'lon': {lon1}}},
'to': {{'lat': {lat2}, 'lon': {lon2}}},
'path_right': {path_right},
'distance_right': {length_right},
'time_right': {time_right},
'path_left':{path_left},
'distance_left': {length_left},
'time_left': {time_left}
}},
'msg': "Full routing"
}}
""")
return json.dumps(
{
'error': False,
'data': {
'from': {'lat': lat1, 'lon': lon1},
'to': {'lat': lat2, 'lon': lon2},
'path_right': path_right,
'distance_right': length_right,
'time_right': time_right,
'path_left': path_left,
'distance_left': length_left,
'time_left': time_left
},
'msg': "Full routing"
})
@app.route("/api/0.5/route_id")
# /api/0.5/route_id?id1=11&id2=22
def route_id():
try:
id1 = int(request.args.get('id1'))
id2 = int(request.args.get('id2'))
except:
return json.dumps({'error': True, 'data': {}, 'msg': "Error in args"})
try:
path = map_graph.astar(id1, id2)
except ValueError as e:
return json.dumps({'error': True, 'data': {}, 'msg': str(e)})
path = list(path) if path else []
return json.dumps(
{
'error': False,
'data': {'path': path},
'msg': "Routing by id"
})
@app.route("/api/0.5/fullroute_id")
# /api/0.5/fullroute_id?id1=11&id2=22
def fullroute_id():
try:
id1 = int(request.args.get('id1'))
id2 = int(request.args.get('id2'))
except:
logging.error("Server.fullroute_id: Неверные аргументы запроса")
return json.dumps({'error': True, 'data': {}, 'msg': "Error in args"})
try:
path = map_graph.astar(id1, id2)
except ValueError as e:
return json.dumps({'error': True, 'data': {}, 'msg': str(e)})
path = map_graph.clarify_path_to_loc(path) if path else []
return json.dumps(
{
'error': False,
'data': {'path': path},
'msg': "Full routing by id"
})
@app.route("/api/0.5/create_backup")
def create_backup():
id = id_generator()
thread = threading.Thread(target=process_backup_create, args=(id,))
config.add_tmp(id, {'thread': thread})
thread.start()
logging.info("Server.create_backup: Создание backup'a...")
return json.dumps(
{
'error': False,
'data': {'id': id},
'msg': "Backup is starting"
})
@app.route("/api/0.5/load_backup")
def load_backup():
info = config.get_backup_info()
if info['exist']:
id = id_generator()
thread = threading.Thread(target=process_backup_load, args=(info['path'],id))
config.add_tmp(id, {'thread': thread})
thread.start()
logging.info("Server.load_backup: Загрузка backup'a...")
return json.dumps(
{
'error': False,
'data': {'id': id},
'msg': "Backup is loading"
})
logging.info('Server.load_backup: Backup отсутствует')
return json.dumps(
{
'error': True,
'data': {},
'msg': "Backup doesn't exist"
})
@app.route("/api/0.5/load_map")
# /api/0.5/load_map?min_lat=1.1&min_lon=1.2&max_lat=2.1&max_lon=2.2
def load_map():
try:
min_lat = float(request.args.get('min_lat'))
min_lon = float(request.args.get('min_lon'))
max_lat = float(request.args.get('max_lat'))
max_lon = float(request.args.get('max_lon'))
except:
logging.error("Server.load_map: Неверные аргументы запроса")
return json.dumps({'error': True, 'data': {}, 'msg': "Error in args"})
str_req = 'https://overpass-api.de/api/map?bbox=' + str(min_lon) + ',' + str(min_lat) + ',' + str(max_lon) + ',' + str(max_lat)
id = id_generator()
thread = threading.Thread(target=process_map, args=(str_req,id))
config.add_tmp(id, {'thread': thread})
thread.start()
logging.info('Server.load_map: Скачивание карты началось.')
return json.dumps(
{
'error': False,
'data': {'id': id},
'msg': "Downloading has been started"
})
@app.route("/api/0.5/bounds")
def get_bounds():
logging.info(f"""Send this:
{{
'error': False,
'data': {{'bounds': {config.get_bounds()}}},
'msg': "Map's bounds"
}}
""")
return json.dumps(
{
'error': False,
'data': {'bounds': config.get_bounds()},
'msg': "Map's bounds"
})
@app.route("/api/0.5/back_search")
def back_search():
logging.warning('Server. Фоновый поиск запущен.')
id = id_generator()
thread = threading.Thread(target=process_back_search, args=(id,))
config.add_tmp(id, {'thread': thread})
thread.start()
return json.dumps({
'error': False,
'data': {'id': id},
'msg': "Searching has been started"
})
@app.route("/api/0.5/check")
# /api/0.5/check?id=string
def check():
try:
id = request.args.get('id')
except:
logging.error("Server.check: Неверные аргументы запроса")
return json.dumps({'error': True, 'data': {}, 'msg': "Error in args" })
info = config.get_tmp_by_key(id)
if not info:
# если поток не отслеживается
return json.dumps({
'error': True,
'data': {'run': False, 'data': {}},
'msg': "Thread is not monitored."
})
if info['thread'].isAlive():
# если поток ещё запущен
return json.dumps({
'error': False,
'data': {'run': True, 'data': {}},
'msg': "Thread is still running"
})
else:
if 'data' in info:
# поток завершился, данные существуют
config.del_tmp(id)
return json.dumps({
'error': False,
'data': {'run': False, 'data': info['data']},
'msg': "Thread has finished"
})
else:
# поток завершился, данные не существуют
config.del_tmp(id)
return json.dumps({
'error': True,
'data': {'run': False, 'data': {}},
'msg': "Smth was wrong"
})
@app.route("/api/0.5/delete_graph")
def delete_graph():
logging.warning('Server. Удаление графа из БД.')
map_graph.delete_graph()
return json.dumps({
'error': False,
'data': {},
'msg': "Graph has been deleted"
})
@app.route("/api/0.5/drop")
def drop():
logging.warning('Server. Удаление БД.')
db_client.nodes.drop()
db_client.ways.drop()
handler.create_indexes()
config.set_bounds([])
return json.dumps({
'error': False,
'data': {},
'msg': "DB has been dropped"
})
app.run(host=config.get_ip(), port=config.get_port(), debug=True)
| StarcoderdataPython |
31075 | <filename>examples/kalman/gnss_kf.py
#!/usr/bin/env python
import numpy as np
from kalman_helpers import ObservationKind
from ekf_sym import EKF_sym
from laika.raw_gnss import GNSSMeasurement
def parse_prr(m):
sat_pos_vel_i = np.concatenate((m[GNSSMeasurement.SAT_POS],
m[GNSSMeasurement.SAT_VEL]))
R_i = np.atleast_2d(m[GNSSMeasurement.PRR_STD]**2)
z_i = m[GNSSMeasurement.PRR]
return z_i, R_i, sat_pos_vel_i
def parse_pr(m):
pseudorange = m[GNSSMeasurement.PR]
pseudorange_stdev = m[GNSSMeasurement.PR_STD]
sat_pos_freq_i = np.concatenate((m[GNSSMeasurement.SAT_POS],
np.array([m[GNSSMeasurement.GLONASS_FREQ]])))
z_i = np.atleast_1d(pseudorange)
R_i = np.atleast_2d(pseudorange_stdev**2)
return z_i, R_i, sat_pos_freq_i
class States(object):
ECEF_POS = slice(0,3) # x, y and z in ECEF in meters
ECEF_VELOCITY = slice(3,6)
CLOCK_BIAS = slice(6, 7) # clock bias in light-meters,
CLOCK_DRIFT = slice(7, 8) # clock drift in light-meters/s,
CLOCK_ACCELERATION = slice(8, 9) # clock acceleration in light-meters/s**2
GLONASS_BIAS = slice(9, 10) # clock drift in light-meters/s,
GLONASS_FREQ_SLOPE = slice(10, 11) # GLONASS bias in m expressed as bias + freq_num*freq_slope
class GNSSKalman(object):
def __init__(self, N=0, max_tracks=3000):
x_initial = np.array([-2712700.6008, -4281600.6679, 3859300.1830,
0, 0, 0,
0, 0, 0,
0, 0])
# state covariance
P_initial = np.diag([10000**2, 10000**2, 10000**2,
10**2, 10**2, 10**2,
(2000000)**2, (100)**2, (0.5)**2,
(10)**2, (1)**2])
# process noise
Q = np.diag([0.3**2, 0.3**2, 0.3**2,
3**2, 3**2, 3**2,
(.1)**2, (0)**2, (0.01)**2,
.1**2, (.01)**2])
self.dim_state = x_initial.shape[0]
# mahalanobis outlier rejection
maha_test_kinds = []#ObservationKind.PSEUDORANGE_RATE, ObservationKind.PSEUDORANGE, ObservationKind.PSEUDORANGE_GLONASS]
name = 'gnss'
# init filter
self.filter = EKF_sym(name, Q, x_initial, P_initial, self.dim_state, self.dim_state, maha_test_kinds=maha_test_kinds)
@property
def x(self):
return self.filter.state()
@property
def P(self):
return self.filter.covs()
def predict(self, t):
return self.filter.predict(t)
def rts_smooth(self, estimates):
return self.filter.rts_smooth(estimates, norm_quats=False)
def init_state(self, state, covs_diag=None, covs=None, filter_time=None):
if covs_diag is not None:
P = np.diag(covs_diag)
elif covs is not None:
P = covs
else:
P = self.filter.covs()
self.filter.init_state(state, P, filter_time)
def predict_and_observe(self, t, kind, data):
if len(data) > 0:
data = np.atleast_2d(data)
if kind == ObservationKind.PSEUDORANGE_GPS or kind == ObservationKind.PSEUDORANGE_GLONASS:
r = self.predict_and_update_pseudorange(data, t, kind)
elif kind == ObservationKind.PSEUDORANGE_RATE_GPS or kind == ObservationKind.PSEUDORANGE_RATE_GLONASS:
r = self.predict_and_update_pseudorange_rate(data, t, kind)
return r
def predict_and_update_pseudorange(self, meas, t, kind):
R = np.zeros((len(meas), 1, 1))
sat_pos_freq = np.zeros((len(meas), 4))
z = np.zeros((len(meas), 1))
for i, m in enumerate(meas):
z_i, R_i, sat_pos_freq_i = parse_pr(m)
sat_pos_freq[i,:] = sat_pos_freq_i
z[i,:] = z_i
R[i,:,:] = R_i
return self.filter.predict_and_update_batch(t, kind, z, R, sat_pos_freq)
def predict_and_update_pseudorange_rate(self, meas, t, kind):
R = np.zeros((len(meas), 1, 1))
z = np.zeros((len(meas), 1))
sat_pos_vel = np.zeros((len(meas), 6))
for i, m in enumerate(meas):
z_i, R_i, sat_pos_vel_i = parse_prr(m)
sat_pos_vel[i] = sat_pos_vel_i
R[i,:,:] = R_i
z[i, :] = z_i
return self.filter.predict_and_update_batch(t, kind, z, R, sat_pos_vel)
if __name__ == "__main__":
GNSSKalman()
| StarcoderdataPython |
11208380 | # Generated by Django 3.0.7 on 2021-07-17 18:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('isisdata', '0093_merge_20210717_1853'),
]
operations = [
migrations.AlterField(
model_name='authority',
name='type_controlled',
field=models.CharField(blank=True, choices=[('PE', 'Person'), ('IN', 'Institution'), ('TI', 'Time Period'), ('GE', 'Geographic Term'), ('SE', 'Serial Publication'), ('CT', 'Classification Term'), ('CO', 'Concept'), ('CW', 'Creative Work'), ('EV', 'Event'), ('CR', 'Cross-reference'), ('BL', 'Bibliographic List')], db_index=True, help_text='Specifies authority type. Each authority thema has its own list of controlled type vocabulary.', max_length=2, null=True, verbose_name='type'),
),
migrations.AlterField(
model_name='historicalauthority',
name='type_controlled',
field=models.CharField(blank=True, choices=[('PE', 'Person'), ('IN', 'Institution'), ('TI', 'Time Period'), ('GE', 'Geographic Term'), ('SE', 'Serial Publication'), ('CT', 'Classification Term'), ('CO', 'Concept'), ('CW', 'Creative Work'), ('EV', 'Event'), ('CR', 'Cross-reference'), ('BL', 'Bibliographic List')], db_index=True, help_text='Specifies authority type. Each authority thema has its own list of controlled type vocabulary.', max_length=2, null=True, verbose_name='type'),
),
migrations.AlterField(
model_name='historicalperson',
name='type_controlled',
field=models.CharField(blank=True, choices=[('PE', 'Person'), ('IN', 'Institution'), ('TI', 'Time Period'), ('GE', 'Geographic Term'), ('SE', 'Serial Publication'), ('CT', 'Classification Term'), ('CO', 'Concept'), ('CW', 'Creative Work'), ('EV', 'Event'), ('CR', 'Cross-reference'), ('BL', 'Bibliographic List')], db_index=True, help_text='Specifies authority type. Each authority thema has its own list of controlled type vocabulary.', max_length=2, null=True, verbose_name='type'),
),
]
| StarcoderdataPython |
5104387 | <filename>api/api.py<gh_stars>0
"""Runs loopback server for backend."""
import configparser
import tornado.ioloop
import tornado.web
from csv_handler import CSVHandler
from tree_handler import TreeHandler
from registration_handler import RegistrationHandler
# pylint: disable=W0223
class BaseHandler(tornado.web.RequestHandler):
"""Sets session token to track current user"""
def get_current_user(self):
return self.get_secure_cookie("user")
class MainHandler(BaseHandler):
"""Handles smoke tests to localhost:8888/"""
# pylint: disable=W0221
def get(self):
self.write("Hello, worlds")
def make_app():
"""Returns application object for Tornado server."""
config = configparser.ConfigParser()
config.read('config.ini')
cookie_secret = config['DEFAULT']['COOKIE_SECRET_KEY']
jwt_secret = config['DEFAULT']['JWT_SECRET_KEY']
return tornado.web.Application([
(r"/", MainHandler),
(r"/csv", CSVHandler),
(r"/tree/([^/]+)", TreeHandler),
(r"/registration", RegistrationHandler)],
autoreload=True,
cookie_secret=cookie_secret,
jwt_secret=jwt_secret)
if __name__ == "__main__":
APP = make_app()
APP.listen(8888)
tornado.ioloop.IOLoop.current().start()
| StarcoderdataPython |
1745570 | # ----------------------------------------------------------------------
# Eltex.TAU.get_metrics
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript, metrics
class Script(GetMetricsScript):
name = "Eltex.TAU.get_metrics"
@metrics(
["CPU | Usage"],
volatile=False,
access="S",
)
def get_cpu_usage(self, metrics):
cpu_usage = self.snmp.get("1.3.6.1.4.1.35265.1.9.8.0", cached=True)
if cpu_usage:
try:
self.set_metric(
id=("CPU | Usage", None),
value=int(cpu_usage.split(".")[0]),
multi=True,
)
except ValueError:
pass
@metrics(
["Memory | Usage"],
volatile=False,
access="S",
)
def get_memory_free(self, metrics):
v = self.snmp.get("1.3.6.1.4.1.35265.1.9.5.0", cached=True)
if v:
mem_usage = float(v[:-2]) / 446.44
self.set_metric(
id=("Memory | Usage", None),
value=int(mem_usage),
multi=True,
)
@metrics(
["Environment | Temperature"],
volatile=False,
access="S",
)
def get_temperature(self, metrics):
v = self.snmp.get("1.3.6.1.4.1.35265.1.9.10.5.0", cached=True)
if v:
self.set_metric(
id=("Environment | Temperature", None),
labels=["noc::sensor::Temperature 1"],
value=v,
multi=True,
)
v = self.snmp.get("1.3.6.1.4.1.35265.1.9.10.6.0", cached=True)
if v:
self.set_metric(
id=("Environment | Temperature", None),
labels=["noc::sensor::Temperature 2"],
value=v,
multi=True,
)
v = self.snmp.get("1.3.6.1.4.1.35265.192.168.127.12.0", cached=True)
if v:
self.set_metric(
id=("Environment | Temperature", None),
labels=["noc::sensor::Temperature 3"],
value=v,
multi=True,
)
v = self.snmp.get("1.3.6.1.4.1.35265.192.168.3.11.0", cached=True)
if v:
self.set_metric(
id=("Environment | Temperature", None),
labels=["noc::sensor::Temperature 4"],
value=v,
multi=True,
)
@metrics(
["Environment | Sensor Status"],
volatile=False,
access="S",
)
def get_sensor_status(self, metrics):
v = self.snmp.get("1.3.6.1.4.1.35265.1.9.10.9.0", cached=True)
if v:
self.set_metric(
id=("Environment | Sensor Status", None),
labels=["noc::sensor::Fan State"],
value=v,
multi=True,
)
v = self.snmp.get("1.3.6.1.4.1.35265.192.168.127.12.0", cached=True)
if v:
self.set_metric(
id=("Environment | Sensor Status", None),
labels=["noc::sensor::Fan 1 Rotate"],
value=v,
multi=True,
)
v = self.snmp.get("1.3.6.1.4.1.35265.172.16.31.10.0", cached=True)
if v:
self.set_metric(
id=("Environment | Sensor Status", None),
labels=["noc::sensor::Fan 2 Rotate"],
value=v,
multi=True,
)
v = self.snmp.get("1.3.6.1.4.1.35265.1.9.10.14.0", cached=True)
if v:
self.set_metric(
id=("Environment | Sensor Status", None),
labels=["noc::sensor::Device Power (ac/dc)"],
value=v,
multi=True,
)
| StarcoderdataPython |
324064 | """Test cases for the cli module."""
import click.testing
import pytest
from click.testing import CliRunner
from testfixtures import LogCapture
from scout import cli
@pytest.fixture
def runner() -> CliRunner:
"""Fixture for invoking the command-line interface."""
return click.testing.CliRunner()
def test_main_succeeds(runner: CliRunner) -> None:
"""It exits with a status code of zero."""
result = runner.invoke(cli.main)
assert result.exit_code == 0
def test_main_configures_logger(runner: CliRunner) -> None:
"""It outputs to log."""
with LogCapture() as log:
runner.invoke(cli.main)
log.check(("root", "INFO", "Initializing run..."))
| StarcoderdataPython |
6691601 | <filename>hata/backend/formdata.py<gh_stars>1-10
# -*- coding: utf-8 -*-
__all__ = ('Formdata', )
from io import IOBase
from urllib.parse import urlencode
from json import dumps as dump_to_json
from .utils import multidict
from .headers import CONTENT_TYPE, CONTENT_TRANSFER_ENCODING, CONTENT_LENGTH
from .multipart import MultipartWriter, create_payload, BytesPayload
class Formdata:
"""
Helper class for `multipart/form-data` and `application/x-www-form-urlencoded` body generation.
Attributes
----------
fields : `list` of `tuple` (`multidict` of (`str`, `str`) items, `multidict` of (`str`, `str`) items, `Any`)
The fields of the formdata. Each element is a tuple, which contains the following elements:
- `type_options` : `multidict` of (`str`, `str`) items;
Additional information used by the created ``PayloadBase`` instance when the field is generated.
- `headers` : `multidict` of (`str`, `str`) items;
The field specific headers.
- value : `Any`;
The field's value.
is_multipart : `bool`
Whether the formdata is `multipart/form-data` and not `application/x-www-form-urlencoded` type.
quote_fields : `bool`
Whether type option field values should be quoted.
"""
__slots__ = ('fields', 'is_multipart', 'quote_fields', )
def __init__(self, quote_fields=True):
"""
Creates a new a ``Formdata``.
Parameters
----------
quote_fields : `bool`, Optional
Whether type option field values should be quoted. Defaults to `True`.
"""
self.fields = []
self.is_multipart = False
self.quote_fields = quote_fields
@classmethod
def from_fields(cls, fields):
"""
Creates a new ``Formdata`` instance from the given fields.
Parameters
----------
fields : `dict` of (`str`, `Any`) items, `list` of `tuple` (`str`, `Any`), `IOBase` instance
The fields to convert to ``Formdata``.
Returns
-------
self : ``Formdata``
The created formdata instance.
Raises
------
TypeError
Received unhandleable field.
"""
if isinstance(fields, dict):
fields = list(fields.items())
elif isinstance(fields, (list, tuple)):
fields = list(fields)
else:
fields = [fields]
self = cls()
while fields:
field = fields.pop(0)
fiend_type = field.__class__
if issubclass(fiend_type, IOBase):
self.add_field(getattr(field, 'name', 'unknown'), field)
elif issubclass(fiend_type, (tuple, list)) and len(field) == 2:
self.add_field(*field)
else:
raise TypeError(f'`Formdata.from_fields` got unhandleable field: {fiend_type.__name__}; {field!r}.')
return self
def add_field(self, name, value, content_type=None, filename=None, transfer_encoding=None):
"""
Adds a field to the formdata.
Parameters
----------
name : `str`
The field's name.
value : `Any`
The field's value.
content_type : `None` or `str`, Optional
The field's content type. Defaults to `None`.
filename : `None` or `str`, Optional
The field's name. If not given or given as `None` (so by the default), and the given `value` is an `IOBase`
instance then tries to use that's name.
transfer_encoding : `None` or `str`, Optional
The field's transfer encoding. Defaults to `None`.
Raises
------
AssertionError
- If `content_type` was not given as `None`, neither as `str` instance.
- If `filename` was not given as `None`, neither as `str` instance.
- If `transfer_encoding` was not given as `None`, neither as `str` instance.
"""
if isinstance(value, IOBase):
self.is_multipart = True
elif isinstance(value, (bytes, bytearray, memoryview)):
if (filename is None) and (transfer_encoding is None):
filename = name
type_options = multidict()
type_options['name'] = name
if (filename is not None):
if __debug__:
if not isinstance(filename, str):
raise AssertionError(f'`filename` can be given as `None` or `str` instance, got '
f'{filename.__class__.__name__}.')
if (filename is None) and isinstance(value, IOBase):
filename = getattr(value, 'name', name)
if (filename is not None):
type_options['filename'] = filename
self.is_multipart = True
headers = {}
if (content_type is not None):
if __debug__:
if not isinstance(content_type, str):
raise AssertionError('`content_type` can be `None` or `str` instance, got '
f'{content_type.__class__.__name__}.')
headers[CONTENT_TYPE] = content_type
self.is_multipart = True
if (transfer_encoding is not None):
if __debug__:
if not isinstance(transfer_encoding, str):
raise AssertionError('`transfer_encoding` can be `None` or `str` instance, got: '
f'{transfer_encoding.__class__.__name__}.')
headers[CONTENT_TRANSFER_ENCODING] = transfer_encoding
self.is_multipart = True
self.fields.append((type_options, headers, value))
def _gen_form_urlencoded(self, encoding):
"""
Generates `application/x-www-form-urlencoded` payload from the ``Formdata``'s fields.
Parameters
----------
encoding : `str`
The encoding to use to encode the formdata's fields.
Returns
-------
payload : ``BytesPayload``
The generated payload.
"""
data = []
for type_options, header, value in self.fields:
data.append((type_options['name'], value))
if encoding == 'utf-8':
content_type = 'application/x-www-form-urlencoded'
else:
content_type = f'application/x-www-form-urlencoded; charset={encoding}'
return BytesPayload(urlencode(data, doseq=True, encoding=encoding).encode(), {'content_type': content_type})
def _gen_form_data(self, encoding):
"""
Generates `multipart/form-data` payload from the ``Formdata``'s fields.
Parameters
----------
encoding : `str`
The encoding to use to encode the formdata's fields.
Returns
-------
payload : ``MultipartWriter``
The generated payload.
Raises
------
TypeError
Cannot serialize a field.
RuntimeError
- If a field's content has unknown content-encoding.
- If a field's content has unknown content-transfer-encoding.
"""
writer = MultipartWriter('form-data')
for type_options, headers, value in self.fields:
try:
payload_kwargs = {
'headers': headers,
'encoding': encoding,
}
try:
content_type = headers[CONTENT_TYPE]
except KeyError:
pass
else:
payload_kwargs['content_type'] = content_type
if type_options:
payload_kwargs.update(type_options.kwargs())
part = create_payload(value, payload_kwargs)
except BaseException as err:
raise TypeError(f'Can not serialize value type: {value.__class__.__name__}, headers: {headers!r}, '
f'value: {value!r}.') from err
if type_options:
part.set_content_disposition('form-data', type_options.kwargs(), quote_fields=self.quote_fields)
part.headers.pop_all(CONTENT_LENGTH, None)
writer.append_payload(part)
return writer
def __call__(self, encoding='utf-8'):
"""
Gets the payload of the ``Formdata``-
Parameters
----------
encoding : `str`
The encoding to use to encode the formdata's fields.
Returns
-------
payload : ``BytesPayload`` or ``MultipartWriter``
The generated payload.
Raises
------
TypeError
Cannot serialize a field.
"""
if self.is_multipart:
return self._gen_form_data(encoding)
else:
return self._gen_form_urlencoded(encoding)
def add_json(self, data):
"""
Shortcut to add a json field to the ``Formdata``.
Parameters
----------
data : `Any`
Json serializable content.
"""
if data:
type_options = multidict()
type_options['name'] = 'data_json'
data = dump_to_json(data, separators=(',', ':'), ensure_ascii=True)
self.fields.append((type_options, multidict(), data))
def __repr__(self):
"""Returns the representation of the formdata."""
result = ['<', self.__class__.__name__, ' [']
fields = self.fields
limit = len(fields)
if limit:
index = 0
while True:
type_options, headers, value = fields[index]
result.append('(')
result.append(repr(type_options))
result.append(', ')
result.append(repr(headers))
result.append(', ')
result.append(repr(value))
result.append(')')
index += 1
if index == limit:
break
result.append(', ')
continue
result.append(']>')
return ''.join(result)
__str__ = __repr__
| StarcoderdataPython |
6615204 | <reponame>Speccy-Rom/My-web-service-architecture
from typing import Callable
from starlette.requests import Request
from starlette.responses import JSONResponse
from app.src.exception import APIException
def http_exception_factory(status_code: int) -> Callable:
def http_exception(_: Request, exception: APIException) -> JSONResponse:
return JSONResponse(status_code=status_code, content={"message": exception.message})
return http_exception
| StarcoderdataPython |
3406899 | <filename>Snake game version 2/main/game of snakes.py
# write a simple snake game in python
import pygame
import random
import time
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
# This sets the WIDTH and HEIGHT of each grid location
WIDTH = 20
HEIGHT = 20
# This sets the margin between each cell
MARGIN = 5
# Create a 2 dimensional array. A two dimensional
# array is simply a list of lists.
grid = []
for row in range(10):
# Add an empty array that will hold each cell
# in this row
grid.append([])
for column in range(10):
grid[row].append(0) # Append a cell
# Set row 1, cell 5 to one. (Remember rows and
# column numbers start at zero.)
grid[1][5] = 1
# Initialize pygame
pygame.init()
# Set the HEIGHT and WIDTH of the screen
WINDOW_SIZE = [255, 255]
screen = pygame.display.set_mode(WINDOW_SIZE)
# Set title of screen
pygame.display.set_caption("Snake Game")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# Set the screen background
screen.fill(BLACK)
# Draw the grid
for row in range(10):
for column in range(10):
color = WHITE
if grid[row][column] == 1:
color = GREEN
pygame.draw.rect(screen,color,[(MARGIN + WIDTH) * column + MARGIN,(MARGIN + HEIGHT) * row + MARGIN,WIDTH, HEIGHT])
# Limit to 60 frames per second
clock.tick(60)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
| StarcoderdataPython |
8086236 | import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from sklearn.datasets import fetch_20newsgroups
from tensorflow.keras.preprocessing.text import Tokenizer | StarcoderdataPython |
5144759 | <reponame>ab5424/agility
# Copyright (c) <NAME>
# Distributed under the terms of the MIT License
# author: <NAME>
"""Analysis functions."""
import pathlib
import sys
import warnings
import numpy as np
import pandas as pd
from agility.minimiser import mimimise_lmp
class GBStructure:
"""This is the fundamental class of a grain boundary object."""
def __init__(self, backend, filename, **kwargs):
"""Initialize."""
self.backend = backend
self.filename = filename
self.data = None
if self.backend not in [
"ovito",
"pymatgen",
"babel",
"pyiron",
# https://github.com/pyiron/pylammpsmpi
"ase",
"lammps",
]:
# Put error here
pass
if self.backend == "lammps":
# Determine if a jupyter notebook is used
# Taken from shorturl.at/aikzP
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
ipy = True # Jupyter notebook or qtconsole
elif shell == "TerminalInteractiveShell":
ipy = False # Terminal running IPython
else:
ipy = False # Other type (?)
except NameError:
ipy = False # Probably standard Python interpreter
if ipy:
from lammps import IPyLammps
self.pylmp = IPyLammps()
else:
from lammps import PyLammps
self.pylmp = PyLammps()
if filename:
self.read_file(filename, **kwargs)
def read_file(self, filename, **kwargs):
"""Read structure from file.
Args:
filename: File to read.
Returns:
None
"""
if self.backend == "ovito":
from ovito.io import import_file
self.pipeline = import_file(str(filename))
if self.backend == "pymatgen":
from pymatgen.core import Structure
self.data.structure = Structure.from_file(filename)
if self.backend == "lammps":
self._init_lmp(filename=filename, **kwargs)
def _init_lmp(
self,
filename,
file_type: str = "data",
pair_style: str = "none",
kspace_style: str = "none",
):
"""Initialise lammps backend.
Args:
filename: File to read.
file_type: File type (data, dump, restart)
pair_style: lammps pair style
kspace_style:
"""
self.pylmp.units("metal")
self.pylmp.atom_style("charge")
self.pylmp.atom_modify("map array")
self.pylmp.pair_style(f"{pair_style}")
if kspace_style:
self.pylmp.kspace_style(f"{kspace_style}")
if file_type == "data":
self.pylmp.read_data(filename)
elif file_type == "dump":
self.pylmp.read_dump(filename)
elif file_type == "restart":
self.pylmp.read_restart(filename)
else:
print("Please specify the type of lammps file to read.")
def save_structure(self, filename: str, file_type: str, **kwargs):
"""Save structure to disc.
Args:
filename:
file_type:
"""
if self.backend == "ovito":
from ovito.io import export_file
export_file(self.pipeline, filename, file_type, **kwargs)
if self.backend == "lammps":
if file_type == "data":
self.pylmp.write_data(filename)
elif file_type == "dump":
self.pylmp.write_dump(filename)
elif file_type == "restart":
self.pylmp.write_restart(filename)
def minimise(self, *args, **kwargs):
"""Minimise structure.
Returns:
"""
if self.backend == "ovito":
print(f"The {self.backend} backend does not support minimisation.")
sys.exit(1)
elif self.backend == "lammps":
self.pylmp = mimimise_lmp(self.pylmp, *args, **kwargs)
def delete_particles(self, particle_type):
"""Delete a specific type of particles from a structure.
This can be particularly useful if
there is a mobile type in the structure. Note that for ovito structures you need to make
sure that type information is included.
Args:
particle_type:
Returns:
"""
if self.backend == "ovito":
from ovito.modifiers import DeleteSelectedModifier
self.select_particles_by_type(particle_type)
self.pipeline.modifiers.append(DeleteSelectedModifier())
elif self.backend == "lammps":
self.pylmp.group(f"delete type {particle_type}")
self.pylmp.delete_atoms("group delete compress no")
elif self.backend == "pymatgen":
self.data.structure.remove_species(particle_type)
elif self.backend == "babel":
pass
def select_particles_by_type(self, particle_type: set):
"""Select a specific type of particles from a strcture.
Text.
Args:
particle_type:
Returns:
"""
if self.backend == "ovito":
# from ovito.plugins.StdModPython import (
# SelectTypeModifier,
# DeleteSelectedModifier,
# )
from ovito.plugins.StdModPython import SelectTypeModifier
def assign_particle_types(frame, data): # pylint: disable=W0613
atom_types = data.particles_.particle_types_ # pylint: disable=W0612
self.pipeline.modifiers.append(assign_particle_types)
# Select atoms and delete them
self.pipeline.modifiers.append(
SelectTypeModifier(
operate_on="particles",
property="Particle Type",
types=particle_type,
)
)
def select_particles(
self,
list_ids,
list_ids_type: str = "Identifier",
invert=True,
delete=True,
expand_cutoff=None,
expand_nearest_neighbors=None,
iterations=1,
):
"""Select particles by ID.
Args:
expand_nearest_neighbors (int): Number of nearest neighbors. Default 1.
delete:
iterations:
expand_cutoff (float): Expansion cutoff. Default 3.2.
invert:
list_ids:
Returns:
None
"""
if self.backend == "ovito":
def modify(frame, data): # pylint: disable=W0613
# Specify the IDs of all atoms that are to remain here
if list_ids_type == "Identifier":
ids = data.particles["Particle Identifier"]
elif list_ids_type == "Indices":
ids = list(np.where(self.data.particles["Structure Type"] != 10000)[0])
else:
raise NameError("Only Indices and Identifier are possible as list id types.")
l_ids = np.in1d(ids, list_ids, assume_unique=True, invert=False)
selection = data.particles_.create_property( # pylint: disable=W0612
"Selection", data=l_ids
)
self.pipeline.modifiers.append(modify)
if expand_nearest_neighbors or expand_cutoff:
from ovito.plugins.ParticlesPython import ExpandSelectionModifier
if expand_nearest_neighbors:
self.pipeline.modifiers.append(
ExpandSelectionModifier(
mode=ExpandSelectionModifier.ExpansionMode.Nearest,
num_neighbors=expand_nearest_neighbors,
iterations=iterations,
)
)
else:
self.pipeline.modifiers.append(
ExpandSelectionModifier(
cutoff=expand_cutoff,
mode=ExpandSelectionModifier.ExpansionMode.Cutoff,
iterations=iterations,
)
)
if invert:
self._invert_selection() # for bulk ions
if delete:
self._delete_selection()
def _invert_selection(self):
if self.backend == "ovito":
from ovito.plugins.StdModPython import InvertSelectionModifier
self.pipeline.modifiers.append(InvertSelectionModifier())
if self.backend == "pymatgen":
# Todo: Look which ids are in the list and invert by self.structure
pass
def _delete_selection(self):
if self.backend == "ovito":
from ovito.plugins.StdModPython import DeleteSelectedModifier
self.pipeline.modifiers.append(DeleteSelectedModifier())
def perform_cna(
self,
mode: str = "IntervalCutoff",
enabled: tuple = ("fcc", "hpc", "bcc"),
cutoff: float = 3.2,
only_selected: bool = False,
compute: bool = True,
):
"""Perform Common neighbor analysis.
Args:
mode: Mode of common neighbor analysis. The lammps backend uses "FixedCutoff".
enabled: Enabled structures for identifier.
cutoff: Cutoff for the FixedCutoff mode.
compute: Compute results.
Returns:
None
"""
if self.backend == "ovito":
# TODO: Enable/diable structure types
from ovito.plugins.ParticlesPython import CommonNeighborAnalysisModifier
if mode == "IntervalCutoff":
cna_mode = CommonNeighborAnalysisModifier.Mode.IntervalCutoff
elif mode == "AdaptiveCutoff":
cna_mode = CommonNeighborAnalysisModifier.Mode.AdaptiveCutoff
elif mode == "FixedCutoff":
cna_mode = CommonNeighborAnalysisModifier.Mode.FixedCutoff
elif mode == "BondBased":
cna_mode = CommonNeighborAnalysisModifier.Mode.BondBased
else:
print(f'Selected CNA mode "{mode}" unknown.')
sys.exit(1)
cna = CommonNeighborAnalysisModifier(
mode=cna_mode, cutoff=cutoff, only_selected=only_selected
)
# Enabled by default: FCC, HCP, BCC
if "fcc" not in enabled:
cna.structures[CommonNeighborAnalysisModifier.Type.FCC].enabled = False
if "hcp" not in enabled:
cna.structures[CommonNeighborAnalysisModifier.Type.HCP].enabled = False
if "bcc" not in enabled:
cna.structures[CommonNeighborAnalysisModifier.Type.BCC].enabled = False
if "ico" not in enabled:
cna.structures[CommonNeighborAnalysisModifier.Type.ICO].enabled = False
self.pipeline.modifiers.append(cna)
elif self.backend == "lammps":
# https://docs.lammps.org/compute_cna_atom.html
n_compute = len([i["style"] for i in self.pylmp.computes if i["style"] == "cna/atom"])
self.pylmp.compute(f"cna_{n_compute} all cna/atom {cutoff}")
else:
raise not_implemented(self.backend)
if only_selected:
warnings.warn(
"Evaluating only the selected atoms. Be aware that non-selected atoms may be"
"assigned to the wrong category."
)
if compute:
self.set_analysis()
def perfom_cnp(self, cutoff: float = 3.20, compute: bool = False):
"""Perform Common Neighborhood Parameter calculation.
Please cite https://doi.org/10.1016/j.cpc.2007.05.018
Returns:
None
"""
if self.backend == "lammps":
self.pylmp.compute(f"compute 1 all cnp/atom {cutoff}")
if compute:
self.set_analysis()
def perform_voroni_analysis(self, compute: bool = False):
"""Perform Voronoi analysis.
Args:
ovito:
bonds_vis = False
edge_threshold = 0.0
face_threshold = 0.0
generate_bonds = False
generate_polyhedra = False
mesh_vis = False
relative_face_threshold = 0.0
use_radii = False
lammps:
only_group = no arg
occupation = no arg
surface arg = sgroup-ID
sgroup-ID = compute the dividing surface between group-ID and sgroup-ID
this keyword adds a third column to the compute output
radius arg = v_r
v_r = radius atom style variable for a poly-disperse Voronoi tessellation
edge_histo arg = maxedge
maxedge = maximum number of Voronoi cell edges to be accounted in the histogram
edge_threshold arg = minlength
minlength = minimum length for an edge to be counted
face_threshold arg = minarea
minarea = minimum area for a face to be counted
neighbors value = yes or no = store list of all neighbors or no
peratom value = yes or no = per-atom quantities accessible or no
Returns:
None
"""
if self.backend == "ovito":
from ovito.plugins.ParticlesPython import VoronoiAnalysisModifier
voro = VoronoiAnalysisModifier(
compute_indices=True, use_radii=False, edge_threshold=0.0
)
self.pipeline.modifiers.append(voro)
elif self.backend == "lammps":
# https://docs.lammps.org/compute_voronoi_atom.html
self.pylmp.compute("1 all voronoi/atom")
if compute:
self.set_analysis()
# https://tess.readthedocs.io/en/stable/
# https://github.com/materialsproject/pymatgen/blob/v2022.0.14/pymatgen/analysis/structure_analyzer.py#L61-L174
def perform_ptm(
self,
*args,
enabled: tuple = ("fcc", "hpc", "bcc"),
rmsd_threshold: float = 0.1,
compute: bool = True,
**kwargs,
):
"""Perform Polyhedral template matching.
https://dx.doi.org/10.1088/0965-0393/24/5/055007
https://github.com/pmla/polyhedral-template-matching.
Args:
enabled (tuple): List of strings for enabled structure types. Possible values:
fcc-hcp-bcc-ico-sc-dcub-dhex-graphene
for ovito:
output_deformation_gradient = False
output_interatomic_distance = False
output_ordering = False
output_orientation = False
output_rmsd = False
rmsd_cutoff = 0.1
for lammps:
ID = 1
group-ID = all
threshold = 0.1
group2-ID = all
Returns:
None
"""
for i in enabled:
if i not in ["fcc", "hcp", "bcc", "ico", "sc", "dcub", "dhex", "graphene"]:
print(f"Enabled structure type {i} unknown")
if self.backend == "ovito":
from ovito.plugins.ParticlesPython import PolyhedralTemplateMatchingModifier
ptm = PolyhedralTemplateMatchingModifier(*args, rmsd_cutoff=rmsd_threshold, **kwargs)
# Enabled by default: FCC, HCP, BCC
if "fcc" not in enabled:
ptm.structures[PolyhedralTemplateMatchingModifier.Type.FCC].enabled = False
if "hcp" not in enabled:
ptm.structures[PolyhedralTemplateMatchingModifier.Type.HCP].enabled = False
if "bcc" not in enabled:
ptm.structures[PolyhedralTemplateMatchingModifier.Type.BCC].enabled = False
if "ico" in enabled:
ptm.structures[PolyhedralTemplateMatchingModifier.Type.ICO].enabled = True
if "sc" in enabled:
ptm.structures[PolyhedralTemplateMatchingModifier.Type.SC].enabled = True
if "dcub" in enabled:
ptm.structures[PolyhedralTemplateMatchingModifier.Type.CUBIC_DIAMOND].enabled = True
if "dhex" in enabled:
ptm.structures[PolyhedralTemplateMatchingModifier.Type.HEX_DIAMOND].enabled = True
if "graphene" in enabled:
ptm.structures[PolyhedralTemplateMatchingModifier.Type.GRAPHENE].enabled = True
self.pipeline.modifiers.append(ptm)
elif self.backend == "lammps":
# https://docs.lammps.org/compute_ptm_atom.html
n_compute = len([i["style"] for i in self.pylmp.computes if i["style"] == "ptm/atom"])
enabled_structures = " ".join(enabled)
self.pylmp.compute(
f"ptm_{n_compute} all ptm/atom {enabled_structures} {rmsd_threshold}"
)
else:
raise not_implemented(self.backend)
if compute:
self.set_analysis()
def perform_ajm(self, compute: bool = True):
"""Ackland-Jones analysis.
https://doi.org/10.1103/PhysRevB.73.054104
Returns:
"""
if self.backend == "ovito":
from ovito.plugins.ParticlesPython import AcklandJonesModifier
ajm = AcklandJonesModifier()
self.pipeline.modifiers.append(ajm)
if compute:
self.data = self.pipeline.compute()
elif self.backend == "lammps":
n_compute = len(
[i["style"] for i in self.pylmp.computes if i["style"] == "ackland/atom"]
)
self.pylmp.compute(f"ackland_{n_compute} all ackland/atom")
else:
pass
if compute:
self.set_analysis()
def perform_csp(self, num_neighbors: int = 12, compute: bool = True):
"""Centrosymmetric parameter.
Use 12 for fcc and 8 for bcc, respectively
Returns:
"""
if self.backend == "ovito":
from ovito.plugins.ParticlesPython import CentroSymmetryModifier
csp = CentroSymmetryModifier()
self.pipeline.modifiers.append(csp)
elif self.backend == "lammps":
# https://docs.lammps.org/compute_centro_atom.html
n_compute = len(
[i["style"] for i in self.pylmp.computes if i["style"] == "centro/atom"]
)
self.pylmp.compute(f"centro_{n_compute} all centro/atom {num_neighbors}")
if compute:
self.set_analysis()
def get_distinct_grains(
self, *args, algorithm: str = "GraphClusteringAuto", compute: bool = True, **kwargs
):
"""Get distinct grains from the structure.
Args:
ovito:
algorithm = GrainSegmentationModifier.Algorithm.GraphClusteringAuto
color_particles = True
handle_stacking_faults = True
merging_threshold = 0.0
min_grain_size = 100
orphan_adoption = True
Returns:
None
"""
if self.backend == "ovito":
from ovito.plugins.CrystalAnalysisPython import GrainSegmentationModifier
if algorithm == "GraphClusteringAuto":
gsm_mode = GrainSegmentationModifier.Algorithm.GraphClusteringAuto
elif algorithm == "GraphClusteringManual":
gsm_mode = GrainSegmentationModifier.Algorithm.GraphClusteringManual
elif algorithm == "MinimumSpanningTree":
gsm_mode = GrainSegmentationModifier.Algorithm.MinimumSpanningTree
else:
print("Incorrenct Grain Segmentation algorithm specified.")
sys.exit(1)
gsm = GrainSegmentationModifier(*args, algorithm=gsm_mode, **kwargs)
self.pipeline.modifiers.append(gsm)
if compute:
self.set_analysis()
# TODO: Get misorientation plot
def set_analysis(self):
"""Compute results.
Important function for the ovito backend. The lammps backend can access compute results
without evaluation of this function.
Returns:
None
"""
if self.backend == "ovito":
self.data = self.pipeline.compute()
elif self.backend == "lammps":
self.pylmp.run(1)
elif self.backend == "pymatgen":
print("The pymatgen backend does not require setting the analysis.")
def expand_to_non_selected(
self, cutoff=4.5, return_type: str = "Identifier", invert: bool = False
):
"""Useful method if only_selected was chosen for structural analysis.
Args:
Returns:
gb_non_selected: list of GB atoms that were not in the previously selected group."""
if self.backend == "ovito":
if return_type not in ["Identifier", "Indices"]:
raise NameError("Only Indices and Identifier are possible as return types.")
self._invert_selection()
self.set_analysis()
from ovito.data import CutoffNeighborFinder
finder = CutoffNeighborFinder(cutoff, self.data)
gb_non_selected = []
# edge = []
# Obtain a set of bulk (=crystalline) cations
bulk_atoms_set = set(self.get_crystalline_atoms(return_type="Indices"))
# These are the atoms that haven't been analysed in the structure analysis, i.e. anions
non_selected = set(np.where(self.data.particles.selection == 1)[0])
for index in non_selected:
neighbors = {neigh.index for neigh in finder.find(index)}
# The following is the neighbors w/o the atoms excluded from strucural analysis
neighbors_no_selected = neighbors - non_selected
if len(neighbors_no_selected) < 3:
warnings.warn("At least one atoms has only two other atoms to assign.")
bulk_neighbors = bulk_atoms_set.intersection(neighbors_no_selected)
bulk_fraction = len(bulk_neighbors) / len(neighbors_no_selected)
if bulk_fraction < 0.5:
gb_non_selected.append(index)
# if bulk_fraction == 0.5 and np.random.random_sample() < 0.5:
# gb_non_selected.append(index)
if invert:
gb_non_selected = list(set(non_selected) - set(gb_non_selected))
if return_type == "Identifier":
gb_non_selected = [
self.data.particles["Particle Identifier"][i] for i in gb_non_selected
]
else:
raise not_implemented(self.backend)
return gb_non_selected
def get_non_crystalline_atoms(self, mode: str = "cna", return_type: str = "Identifier"):
"""Get the atoms at the grain boundary.
For this to work, some sort of structural analysis has to be performed.
Args:
mode: Mode for selection of grain boundary atoms.
return_type:
Returns:
None
"""
if self.backend == "ovito":
if "Structure Type" in self.data.particles.keys():
if return_type == "Identifier":
gb_list = [
i[0]
for i in zip(
self.data.particles["Particle Identifier"],
self.data.particles["Structure Type"],
)
if i[1] == 0
]
elif return_type == "Indices":
gb_list = list(np.where(self.data.particles["Structure Type"] == 0)[0])
else:
raise NameError("Only Indices and Identifier are possible as return types.")
elif "Centrosymmetry" in self.data.particles.keys():
print("Implementation in progress.")
gb_list = []
else:
raise not_implemented(self.backend)
elif self.backend == "lammps":
# Supported analysis methods: cna, ptm,
from lammps import LMP_STYLE_ATOM, LMP_TYPE_VECTOR
# ids = []
# for i in range(len(self.pylmp.atoms)):
# ids.append(self.pylmp.atoms[i].id)
types = np.concatenate(
self.pylmp.lmp.numpy.extract_compute("cna_0", LMP_STYLE_ATOM, LMP_TYPE_VECTOR)
)
# https://docs.lammps.org/Classes_atom.html#_CPPv4N9LAMMPS_NS4Atom7extractEPKc
ids = np.concatenate(self.pylmp.lmp.numpy.extract_atom("id"))
df_temp = pd.DataFrame(
list(
zip(
ids,
types,
)
),
columns=["Particle Identifier", "Structure Type"],
)
# TDOD: This is only cna, what about others?
if mode == "cna":
df_gb = df_temp[df_temp["Structure Type"] == 5]
elif mode in ("ptm", "ackland"):
df_gb = df_temp[df_temp["Structure Type"] == 0]
elif mode in ("voronoi", "centro"):
raise NotImplementedError(f"Mode {mode} currently not implemented")
else:
print(f"Incorrect mode {mode} specified")
sys.exit(1)
gb_list = list(df_gb["Particle Identifier"])
else:
raise not_implemented(self.backend)
return gb_list
def get_crystalline_atoms(self, return_type: str = "Identifier"):
"""Get the atoms in the bulk, as determined by structural analysis.
Returns:
None
"""
if self.backend == "ovito":
if "Structure Type" in self.data.particles.keys():
if return_type == "Identifier":
gb_list = [
i[0]
for i in zip(
self.data.particles["Particle Identifier"],
self.data.particles["Structure Type"],
)
if i[1] != 0
]
elif return_type == "Indices":
gb_list = list(np.where(self.data.particles["Structure Type"] != 0)[0])
else:
raise NotImplementedError(
"Indices and Identifier are possible as return types."
)
# df_temp = pd.DataFrame(
# list(
# zip(
# self.data.particles["Particle Identifier"],
# self.data.particles["Structure Type"],
# )
# ),
# columns=["Particle Identifier", "Structure Type"],
# )
# df_gb = df_temp[df_temp["Structure Type"] != 0]
# return list(df_gb["Particle Identifier"])
else:
print("No structure type information found.")
gb_list = None
elif self.backend == "lammps":
# TODO
gb_list = None
else:
raise not_implemented(self.backend)
return gb_list
def get_grain_edge_ions(
self,
nearest_n: int = 12,
cutoff: float = None,
gb_ions: set = None,
return_type: str = "Identifier",
):
"""Get the atoms at the grain edge, as determined by structural analysis.
Returns a list of IDs, which were identified as crystalline/bulk atoms, but border at
least one non-cristalline/grain boundary atom.
Args:
nearest_n (int): Number of nearest neighbors to consider. Examples: fcc=12, bcc=8
cutoff (float):
gb_ions (set): Indices of grain boundary ions. Default: non-crystalline ions.
return_type (str):
"""
if self.backend == "ovito":
from ovito.data import CutoffNeighborFinder, NearestNeighborFinder
# finder: CutoffNeighborFinder | NearestNeighborFinder
from typing import Union
finder: Union[CutoffNeighborFinder, NearestNeighborFinder]
if cutoff:
finder = CutoffNeighborFinder(cutoff, self.data)
else:
finder = NearestNeighborFinder(nearest_n, self.data)
# ptypes = self.data.particles.particle_types
gb_edge_ions = []
gb_ions_set = gb_ions or self.get_non_crystalline_atoms(return_type="Indices")
gb_ions_set = set(gb_ions_set)
for index in self.get_crystalline_atoms(return_type="Indices"):
# print("Nearest neighbors of particle %i:" % index)
# for neigh in finder.find(index):
# print(neigh.index, neigh.distance, neigh.delta)
# # The index can be used to access properties of the current neighbor, e.g.
# type_of_neighbor = ptypes[neigh.index]
neighbors = [neigh.index for neigh in finder.find(index)]
if any(x in gb_ions_set for x in neighbors):
gb_edge_ions.append(index)
if return_type == "Identifier":
gb_edge_ions = [self.data.particles["Particle Identifier"][i] for i in gb_edge_ions]
elif self.backend == "lammps":
# TODO
gb_edge_ions = None
else:
raise not_implemented(self.backend)
return gb_edge_ions
def set_gb_type(self):
"""Set a property for grain boundary/bulk/grain edge atoms."""
def get_gb_fraction(self, mode: str = "cna"):
"""Get fraction of grain boundary ions.
Args:
mode:
Returns:
"""
if self.backend == "ovito":
fraction = len(self.get_non_crystalline_atoms(mode)) / len(
self.data.particles["Particle Identifier"]
)
warnings.warn("Using all particles with a particle identifier as the base.")
elif self.backend == "lammps":
# TODO
fraction = None
else:
raise not_implemented(self.backend)
return fraction
def get_type(self, atom_type, return_type: str = "Identifier"):
"""Get all atoms by type.
Args:
atom_type:
return_type (str):
Returns:
None
"""
if self.backend == "ovito":
# Currently doesn't work!
# def assign_particle_types(frame, data):
# atom_types = data.particles_.particle_types_
#
# self.pipeline.modifiers.append(assign_particle_types)
# self.set_analysis()
if return_type == "Identifier":
atom_list = [
i[0]
for i in zip(
self.data.particles["Particle Identifier"],
self.data.particles["Particle Type"],
)
if i[1] == atom_type
]
elif return_type == "Indices":
atom_list = list(np.where(self.data.particles["Particle Type"] == atom_type)[0])
else:
raise NameError("Only Indices and Identifier are possible as return types.")
# df_temp = pd.DataFrame(
# list(
# zip(
# self.data.particles["Particle Identifier"],
# self.data.particles["Particle Type"],
# )
# ),
# columns=["Particle Identifier", "Particle Type"],
# )
# df_atom = df_temp[df_temp["Particle Type"].eq(atom_type)]
# return list(df_atom["Particle Identifier"])
elif self.backend == "lammps":
# TODO
atom_list = None
else:
raise not_implemented(self.backend)
return atom_list
# Todo: Verkippungswinkel
# Todo: Grain Index
def get_fraction(self, numerator, denominator):
"""Get fraction of ions/atoms. Helper function.
Args:
numerator:
denominator:
Returns:
None
"""
if self.backend == "ovito":
num = sum([len(self.get_type(i)) for i in numerator])
den = sum([len(self.get_type(i)) for i in denominator])
else:
raise not_implemented(self.backend)
return num / den
def save_image(self, filename: str = "image.png"):
"""Save image file.
Args:
filename: file to be saved.
"""
if self.backend == "ovito":
# TODO: use render function
pass
if self.backend == "lammps":
# Only works with IPython integration
self.pylmp.image(filename=filename)
def convert_backend(self, convert_to: str = None):
"""Convert the current backend.
Args:
convert_to: Backend to convert to.
"""
if self.backend == "lammps":
from datetime import datetime
filename = datetime.now().strftime("%d%m%Y_%H%M%S") + ".lmp"
self.save_structure("filename", file_type="data")
if convert_to == "ovito":
try:
return GBStructure(backend=convert_to, filename=filename)
finally:
tempfile = pathlib.Path(filename)
tempfile.unlink()
else:
return None
else:
return None
class GBStructureTimeseries(GBStructure):
"""This is a class containing multiple snapshots from a time series."""
# Todo: enable inheritance
# Todo: get diffusion data
# Todo: differentiate between along/across GB
def remove_timesteps(self, timesteps_to_exclude):
"""Remove timesteps from the beggining of a simulation.
Args:
timesteps_to_exclude (:py:class:`int`): Number of timesteps to exclude
Returns:
new_trajectory (:py:class:`polypy.read.Trajectory`):
Trajectory object.
"""
# Todo: Add differentiation between diffusion along a grain boundary, transverse to the GB,
# and between grains
def not_implemented(backend):
"""Raise not implemeted error.
Args:
backend: Backend currently in use.
Returns:
"""
return NotImplementedError(f"The backend {backend} doesn't support this function.")
| StarcoderdataPython |
12864689 | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
import bisect
def check(arr, x, ln, val):
count = 0
i = 0
while i < ln - 1:
index = bisect.bisect(arr, arr[i] + x) - 1
if index > i:
i = index
count += 1
else:
return False
return count <= val
t = int(input())
for _ in range(t):
n, k = map(int, input().strip().split())
stones = list(map(int, input().strip().split()))
low = 1
high = stones[-1] - stones[0] # Location of all stones are given in ascending order.
while low <= high:
mid = (low + high) // 2
if check(stones, mid, n, k):
high = mid - 1
else:
low = mid + 1
print(low)
| StarcoderdataPython |
11370723 | <filename>images.py
#----------------------------------------------------------------------
# This file was generated by encode-bitmaps.py
#
from wx.lib.embeddedimage import PyEmbeddedImage
Exit = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0'
b'RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAN1SURBVHjaYvz//z8DJQAggFhA'
b'<KEY>+<KEY>/<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'MSzdvv3vpecvzfr+/z8HEEBMYFMYGXM0iwrAmu+sXcvw4OxZhqenTjEwAv3P9OsXw+unTxne'
b'<KEY>/'
b'MzD+/cvw/8kTBgUbGwbB1DSGe1cuMbD8+8EgwMPjCtILEEDgMOCSkhT+t20Nw4v7nxkkNuxm'
b'eLNmFYO0sCgDCwcHAwMzM4Pkl68MLzs7GGS6uhmOCwgxcD2+x8DLysID0gsQQGAD/gH99vPL'
b'dwZGDjaG/0An/z19goHp/z+Gn9dvgoP4/7dPDD9OnGD4+/0bA5uCAsPPW8DA5eACxxxAAIEN'
b'+PDuw/ufirJizE9fMzALCjD8efOO4dHObQx/d29k+PObgeHr268MQta2DCw8fAz/X75k+M/I'
b'<KEY>IL<KEY>dPX2x7w8TDwPL2FcOvI8cYxFs7GFjFpRl+PP/K8O3NVwZuIREGpe5u'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'qRIlJfk2NR42Rj5gEmb5+4/h35+/DJ+/fmd4DUyNN4B+v/DlWwcwcTWzA9PXQqBegACCGwAK'
b'ERD+zsBgwszOXirEwe7OzvCP5y/QCx/+/v/26vfv/R///O0GOvkII1AdKxCDDAAIIEZKszNA'
b'gAEA1sFjF+2KokIAAAAASUVORK5CYII=')
#----------------------------------------------------------------------
Reload = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAQAAABKfvVzAAAABGdBTUEAALGPC/xhBQAAACBj'
b'SFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAAmJLR0QAAKqNIzIA'
b'AAAJcEhZcwAADsQAAA7EAZUrDhsAAAAHdElNRQfjCw4WBh3m/JEzAAABY0lEQVQ4y5XSvWtT'
b'URgH4Of2pmmQ2nQIgq2pBeniorhWh0IXsYMi2cW1pTh0cFI6FfMnOAn2DxCEToUuXYWCQ+vQ'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'DuRzdh2KhExg3HAvF7rshoh+91QygSXBPGXHvilkAA9tGIHFzs97Ri51Lm+aPrf8rp8WIAeu'
b'<KEY>'
b'm1ZybaDmkX6U7DrBhtgQFttP/Q2ZKGoHR/bbgwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxOS0x'
b'MS0xNFQyMjowNjoyOSswMDowMG6RYtoAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTktMTEtMTRU'
b'MjI6MDY6MjkrMDA6MDAfzNpmAAAAGXRFWHRTb2Z0d2FyZQB3d3cuaW5rc2NhcGUub3Jnm+48'
b'GgAAAABJRU5ErkJggg==')
#----------------------------------------------------------------------
Splash = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAAlgAAAD6CAIAAADyT65HAAAAAXNSR0IArs4c6QAAAARnQU1B'
b'<KEY>'
b'<KEY>'
b'J9n7+/LBPZPs5JbJ/d9zq6BwQVUaBoPBYDDNFSH6H4PBYDBNwRN9lf8dpEIGpinAQojBYDBN'
b'BqjgPd0VN3SSYy1sQrAQYjAYTNNAqCARTqQWSqvP5K2cjYy0tIIFN6MQHcqzu7QHVyAjiPyF'
b'd6NQWprAaW1dfxP97kVt5l9XOG+6tPI4mK2/vY24DrRc/CAKBWj7xdQ2865t8+WMrHXvCV1W'
b'dLWezK2ft4EPoNc0efHf6du/Qu+FB75deWorMhiDhRCDwWCagGAVJEikFkpqzuj3/kiERU6y'
b'CDFBYqnS7V9ChLUHV4qcFggoz+zwiSRnbv7u3PXzWi57ini3AYFQhEIBfGlpZ2774cytC+v6'
b'TMnY+gW6GkTZ5S+embU48PJH1SeWEtcjIwzEJCqwEGIwGEyioaogQcK00J3eWrt/qaTuPIQF'
b'fklKE9kM4JwVfjWzwfHKW/EceGzZa14jTP3uhfBu4VfXCa0G4opu389EIGPvIiIgKz9ibXsR'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'sv9409BvurHnlZpDq+Ci6sx2Y/crIAAeZ+7ql+GFdFHgFyxr28FFt3wHfqSs+ozQVgcRqBj3'
b'dNFti8BxJD7gzOkI0TZ3GKk6tVXgtCrO/gWfL54+R7/vJ3hXbDf6P3zrQnurXv57BsBCiMFg'
b'MIkjsgoSJEALfVKVM7uDrUUP/a5viSsSYylICAQcWR0kdaXgIIKjBqZbk+N/t/ac+sy2tl9M'
b'zVn9srzsH/8fpKVVjLw/feeCzE2fmLuMJa4AImsNCgUQBMSJQF6O/rABsbkSXMa6vtPq+ky2'
b'Fg7SHF0LF6WVpzyqTAjU9bq6fNwz8KoddBMhcoDy1FZwTMHnE9eeE1mqLW2H5qx+BXy+zI0f'
b'Ex/waLLhX3tuV7G1RmooVpYdgmi3XP6UvPQw8QEqWAgxGAwmQTBRQYK4a2FAV2qG3aks2kVc'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'AZ+JgLHLWP2+X2xthyA7CEdeV/2eH/S7F9VcdHPuqheyNnyoOfoHXM9e+zZ4h5lbP1cfXGnP'
b'60p8OIB/4NOjTBc6rYVfTIP4EGOKaS67/99Q8M4yGAwGE3eiVcEGvjlmf3Zn1NMgY8fjSgvy'
b'2wQuGyF40eF2pInRSCGB0FbnVeiQEZHCr64D9w4Z4QhMimnoLAUEHhfV3WQO9ggxGAwmvsSs'
b'gkDC5pEiQuUkFhUEQlUQYKKCIksNuG6VIx9AdgRAAoNUEGCjgkAkj1CQlqYUo3CCsbhRAIPB'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>M+vHwi+mFQUOoyhYeNe5GZ8SnwFABYtnfiqpOqU99mf1'
b'0NtB9pRFO+BbQF9Pz0KeIhFu88XUc9d97lHoiYtAJCHsniFqpYzOZbyli2JoHnk9x8u7LUUm'
b'DzKYsabYhUIYTNwAITx9g39Lw2CO1LrHr6xDBgYTDQlTQYIk1UIQQomh2NhrktBl1e/8pmbY'
b'<KEY>/<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'LZY+aep6GVwRBAmhVyK35/cx9rxKZDM0DBkCHO81+tYQ1dT2/q1Xg5m4ynCwJrquUQwmAeCu'
b'<KEY>'
b'<KEY>hFlrBEVB<KEY>aDwcQIH1SQIDXWFzYVWAgxGAwmFvijggRYC<KEY>'
b'Dd9UkABrYWxgIcRgMJjo4KcKEmAtjAEshBgMBhMFfFZBAqyF0YKFEIPBYJjCfxUkwFoYFVgI'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'eb6m2LnkdCwJz1MKx+ZLLmkp7ZUpzlbQJw0aT2uLnfC0rC/B2+3yi7iqIDTyIAytIqyFTUWS'
b'CSFIwv09Fbd0lotIZzKGx+31gRYuOGbfxUADGiV<KEY>'
b'hh/Gagflkn8bAxbXVNU7efkq4SO9lde0Y7STwpLTjlf2WCJ4dY/0VtzfM3SLhzD8U+uee8i2'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>UJEwnSfrxM++JAFZsItNGIFo/TXds+uk3wInBPd/nb'
b'Q9VQaSKbGQVqUB1Nz4yQfAiooKY3Y0Ft4LqO8k8uDulsjJbrO8pWXq6P4asbAG37drS2UM2v'
b'hzlbLvjqUs0dLI621suE7w/XvD5YJee4VsQwJZEqSAAX4S34ALK5A8+diUByCOHsAcrbunLw'
b'OII3+elIjZYy5NYo7w9TD8zhpr/ijSHq6R040MIZHWRP9I3xsdYFatiG3mWFKG3uCA0IJLKj'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>4F0mRC8242BWSFvDVUPzaOXebPLJwV3uDH6Zkv+qfWcNEbXiTc8T/Imgyre7fU5'
b'<KEY>'
b'76gXu71pTCY6YbiiqVSQAPeRJhi+T5aBNv76SXraSrDY7Pn6mH3BUbuN7t6Dc8U3dpJfQZkT'
b'SPDOPusHB/xHITdK/2zxT+N0yAjl839sy884w1WLQ3LF13eUT2xDH4EKm3fEklomazxoJ8ts'
b'KHGODPI21p13ghJDa+Cc2UOs2VCK09prRaNaSae0k4E8BD5Fw7Bfagdki8E7RHaARSf8009A'
b'qmsd/p8rtCh0UgEo5ciWEmrhNgDNkat+jWLVAbihv07Uh5v0Cw0aEPvNpS4QV6LWkAnT0uXC'
b'<KEY>'
b'<KEY>'
b'+w/7j8yYfMlbQ9TgBSI7iJFLaosY+GSvD1ZN70BO0fE69xPbLHuqGm+h98sSvztMTStFcw/Z'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'Hr5bu9BXo8FoJQLq3JYSi+euDSYmKgjAx6aurttcSuMB3N1dwaDTMRIHa9wzfjdGVkFgziH7'
b'E9vNyAglWAVXFTluXWeKPEfxlNH7yFYL+DTIDuWSVkx/rm00QloVLDJ5pv9e16iangxE47PD'
b'9D79DErDJS<KEY>SKkqaHH5wJ9jooLAZ4ftD23xr7Am0UkvntSGg0FHTAR4pYIA/Ake'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>oHa2hq2B6UCa4k8pTk/NlQEosKAodr'
b'PS/95e9UHP5L7az1pt2V0dX4JOwe35dHotvfBH6IS8N0Obq8vmV0rlUE/jF4/iymyQomCwba'
b'aoQd9TQ5D24rCjEGxPtrun1eRrSIfUyFZdfotnL3szstg36qHfpzLZT4llIXbaOhUSzutO9P'
b'<KEY>'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'6PCm2SjzZXRYCDkiGVWQAGsh5zQjIaRU2jQOH4nyoLl/BH3jsLI1BmL+mZmouQAVbizupR8j'
b'RYH0DPRHQ+eIxzZASECNBiCLtcUSxwoseqj9tLHqOyaE5FVBAqyF3NKMhJAqe42e5kRd3jA0'
b'T8JkJX68ccYqXTa6DkhnrCej2+nu1ugaPqpEOTys3DA7XfxFsT7a/PEIAQUlM6M5ggxDT7Kr'
b'IAHWQg5pRkKooGzD3WiVsruS5iGbPUB1W5NrIbe1Iad3i+q0SB4SexdtHKBmZnJnLg9IDRUk'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'/H0NMqKHtoyu+tXQ6NxaEBXqpttby1zX/<KEY>'
b'<KEY>'
b'0XaQEsC3z+gof2OI+o8r9aevz4B/PxupeayPYnyBlG8HpmMYwq1H2EknenGg8th1GYvH6Z7u'
b'pxrfWtY/W1KoEcWmgoAA+4QxkcIqCGC/kCVJU1mDBO6Zlv5YHyV4gegSaxgOZt36pymCFjYg'
b'EAhAFy8rkN7XQzl3pGbD1en7rk1fOFb7eB8lwx04MXzAy9G8UbEg7c0hqjVX6m/urGj0VGHm'
b'YBmMgdRWQQKshWxIAiFMlwnmjFCDBHI+C4Ph7YwuH0MtJKGTCsFnvbeH4rsxusMzMiAVvTOx'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'/<KEY>'
b'<KEY>
b'//1j++Gk4xyzs5QJzlCiBwJ5/2aag6YxJJqnCjaA55FGBa89whs70QyxVNq8N601PrbNEpUK'
b'0sKybb21zD3nkP2ejeahvxiG/Fx7wx/G2bssC4/b91W5oWZHHwpDvlo0d4QGGQmEW38iJZ0T'
b'lmOE0GpGoSB2lLtGLze8vc8WlQrSgh1CJjRzFQSwXxgV/BXC7hkiqu8CzFpvjOG8b8BLqYI4'
b'<KEY>mWv+UfuTOyyTfqsD/2bq6rp39lmpJ+M30FEvfqwP97/VyHBb3ribjsSNnWRZcnIe'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>MGVkLYtB4h'
b'wV7Kg5ijSGgRcFuLxnYzE2XhB7R6aE/rZYhOSpOHMZ82HDPUh9bs8p1lN02UpvWG/sdcAKsg'
b'E7AWRoCvQkhXtbE5xBygHv6a+DrlmIFcPWulCY2FkNNEx9Y1SluzsMkH2r+N+bThmNFRomFw'
b'sJ0pioWwUbAKMgdrYTh4KoQqyiG6AMuHkqZiTHilYnCQI8HhdsxM4NojjOV2tIv3+2bFPlZK'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'h7Yl5YkFcNcogFWQPVgLg+GpEJZZvcWUDrQr20hjHlAbkituoyE3ziPUKXa37zKKjzKzI6sC'
b'vrubQkqZ6X+Y9Y6pUcHtiGTMN6MVqkd7K7VRzh29v6eiE8XJBjYx3oSv2k6u9bRSYa/MWPy4'
b'fVU0bZop7WI/QWwyi79NYbAKcgXWwgZ4KoTAzgpytdJKJfp3rxh/ALQHAkSodE8avWuLaZZC'
b'QwHf3jWWfdEG5Yj/3ZscB6fH9wfdt8QPTnUwdllddIJmz7l8teilaH48o1pJHqFkKfDrWQfz'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>mYNVMB5gLQT4K4TgNByja60/P0D1zlA11BTIjsj4Auniy7TTKEckIiLWM7sq3F/8'
b'<KEY>'
b'V4jS4C3I/DeHqtGlUBYet69jPEAIhOtEfbi38tORmkltpB20UXSTzjlEU6ACgeC7Mbp/91KE'
b'P6w3hJs7y3+4TNsvm9WIeOqBVTB+YC3k9cG8V7eVvjcs7KF9q4ocm8tcB2vc581eg9NHVKoy'
b'UVpbjahLumhAtmRkS0lBxImmTM6DnX+pZiTdORgEx+vcfxS79lS6jhk8No/P64MICKTCtM56'
b'/4G9w/IkvcMvj3vrb+tHBxsXQm4P5v1lvLYv5azOmA/m/X2iriNliK7Rg3kbmNFB9tpgej0j'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'PLTNDayCiQQypHn2kfJdCIHb1pmWnYli+gOVt/dZFxyjuQPDWrzU6p35h3Ehi6MQG3B5fa/s'
b'sTRhBcepDnIgq6AXj26zvEh3WCNz9lW5r1hl+OZ47A/J87ssP3Kkx8ARg2fG78b91bGvELW4'
b'fPdvMu3m9CDDZASrYOJpnlqYBEJY5/Q9sNn8/E5LmTXq/tVis+e2dcYPD/iH4qibmUVVjz+5'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'w1fr8EJkquzebewOE262dNWLOulFHXWiDLlQIxHIRQKjyz/NstTqPWvyHq9zn4yzVEDhwtMF'
b'X60MOggFhP+k0UO7EW6j6KUCeGi7pov9T6xMoJcK7R7/Ewv6V2zxwhN7nHJoc7MFqyBPaD5z'
b'Z5JSCDEYTKqCVZBXNBMtTJquUQwGk/JgFeQbzaSPFAshBoPhBVgF+Ulz0EIshBgMpunBKshn'
b'Ul4LsRBiMJgmBqsg/0ltLcRCiMFgmhKsgslCCmshFkIMBtNkYBVMLlJVC7EQYjCYpgGrYDKS'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>O+<KEY>'
b'<KEY>'
b'iMFzpNYN/x6scUd7AHU8SBkt5N0YYd3vX1V8/ZzHUofsZg63p0WkDL7mvid18pKqKqiXCia0'
b'lj7XX7l0vO7Q9IzF43T/vUh9Qyf5gBwJexUE4CZwK/8o2kVquDl8BXwRfB18KXw1+lDCSZnx'
b'Qj5OlrEd2lz24b3gHiG7WYOFkA4P3p86KUk9FeyeIXqgp+Lncdo909LnjNDM6qronSUWcXgs'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>ORTeD1mneuRGEMph7/0kxMkpDUKgjt0DH5'
b'kh8v0y6doJvUViYVJVnDFCIM0YbIQxIgIfGOffJqIY8mywgV6vTL70JGPZZ9f6IQBnMB7BEm'
b'B0mtghNaS9dcqfv8Eu3AHO67+xIMJAESAsmBRKFL8SFJtZBfs0bV/ceJs/KREcBZfNRVVYyM'
b'ZgjuGqUFe4TJQPKqIDhPKy/XzRmh6ahLgl5Q5kByIFGQNEgguhQHklELebd8QtlzBArV46oo'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>dCvrnhU7UjcmP70TKpAAyAbIiHnNKk0ILm35nGSpFT14a3Pel'
b'<KEY>MO86ZffSQRioHblXJ/X7Rfaeg+j4YsaaNqdZTSDJ2Vc8xAy'
b'YsJdV2U7vNV+6m9n8RF3TSm6GmhwiDSZIm2GotswZY8RksyW6A0WgCTY/tluO7wFvgjqaK/V'
b'2FCOAplKmttG1raHrLCHsvtw4iJzSKUgzmjR6onvkBHAcmCj7dBmx9lD7uoSdAkQCET6HEXn'
b'i5Rdh1DHnmPDn8bDW2zHd7sqitxVxRckWSiUZBdIsgsVXYcou18sVET9E6hZ/hEKBRApdLox'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>CICG'
b'VX3zQvXP7/qiPNjBazNX//Ju+WcPM1RBwF19vnbZR2Vz7ndVn0eXoqXeg4d2WPWi1xpVQcBx'
b'5mDFl08YN/6A7Cgx715T+t4djFQQ8HpNm34off9262HyM8acYMEzbV9a8tZNxg2LIqggIG3Z'
b'AYUSS3Kp4LgC6bIJeqyCEYDMgSwaX8BxNymf+0iTTAiVXYagUD2WWIXQRqmkYvaH4khMC+pr'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'y1OREUB78bXpE+9BB<KEY>'
b'<KEY>'
b'Psq+Y5Rdh5KGx/xDXNuXmrct89rN6FI9srY98+7+ABlhIJWCQCr3OZHDBCh7XiIr7C5t1Umk'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'SfP5HGcOoI8GAN/O5/EqOg9EdhhIafS5XK7SC4UiyWkD5agffZOiY39Jdmv4RvheSK8ko4Wy'
b'21DtyBmy1l1AktGn408SqWCGTLDoMm3vONS5zYRWatGlrSSripw27pbvOr1pK4ucQ/PEeUqO'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'PylU6ZARIPKMR9JUI3F2Qd49jXRvNpA+/g5lrwvdgPLOA7Oun40MxggV2swpjwqEjT/A4DDp'
b'J5AXm1opeUWFOhMnfeK9qr5jkBEe7dBr1IPJ3QaWvWtQKEqyb3iJyZcmjORSwUVjte208dpa'
b'morX59tW5vrPX5ZRS2sH/2x4aIv52+OO/dUeJzvxgj+Hm8Ct4IZwW7g5fAV8EXwd+kT8gWyE'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'sZC9yOYCPmghj4SwdtVn1Anx1F5QAurKevOOFSgUEZBMX+jejOoBUfSLuspOVn3zApsXuhH/'
b'kBV2R6F6SGoXjMcQ8pawsSmR3CLS50hy2yCDGVSn01Mb6Xgv+7GQRxHcu2g1SdGN7CU7wiw1'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'sapptZB7IbSf2m8/fcBRdMhx9nDkl/XgJuOWnyoXzC554wbrfvJsF3FO68zJ/0YGHcETLgBX'
b'<KEY>'
b'<KEY>3Y3b4PD1hHLKmdd8T<KEY>'
b'MYjeyKW1EFWIMLrKNZDJkNXI4Igm1ELuhbBy/tPlcx8o++RfZR/fF/lVueD52mUfWQ/STyLI'
b'nPRQ5PWImsEha88B46bGd5Ik7YRCUtNmDvNdxCR5IT2Tpi2LY984NHoEkqjrZf+zJApZSRb5'
b'<KEY>hNseJjCBUfX2eKH7h4nTyqqEmIblUcHoH2ayu3MeW4PdzztHLDW/vs3F+MkM8'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'iRRIvBbySAjFWfnpV9yTd8+HkuzW6FJjqC8i946aIs4dtfwVsp+IvF0fFEpRrEe21f72ednc'
b'h87NvvLsM+POvzq99N1ZpR/cWfbh3bQv5pWvWJulvfhaZDTgdRnXfVfy5o0l79xa/dNbpu1L'
b'7Sf3MF/WEn9C5TMapSFJGkN8jtBhyPrTo5jga1IhTC4VBN4Zpm6p4tg1sbl9j28zP7DZbOB0'
b'm80mAZIACYHkQKLQJY6AbIfMRwZ3JFgLud90u3EEgsBLJBBLhRKZJLdQktdW2X24vH0jfaG0'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'v4wbF1UtfBmcxfNvXF/9y7v2k3vR2zEQm7dE6lD1JrqZ76vfK5wRsXX/sibpVLCTTvQ019MX'
b'<KEY>'
b'AKhqyz/9d7iJuNHTeLUFWph+5b/CnThIi7u6xLx9WflnD1cueD7aNeb1JFrDuCGq0/+boms0'
b'<KEY>'
b'<KEY>'
b'/<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'iv7nL8uHB8JuIpF6QGIhycjggod6KaFQkMEp8dbCVBBCqm/nKDqEQvWQ3EShOl2SXYCMVIHq'
b'CoPDl33DbFnrSCf2hSAIfR5iqr5kBV00Q67OuvaJVo/Mb/3KmpaPfJV13XP+E+QL6Hfw8Xlc'
b'NUveQQYzfNxoWBTJ042+ueVjC1i+Mq4in1MYAY7SyBSNhPterbiqIPDyILWcu86453aavzjC'
b'/XQPngNJhoQjgzUykQAKBRlcEz8thIc/FYQQKlnSGFVd6DnvgP1YyAmrqdcv6qo+T0qjostg'
b'cPiQwQyBMGS4m5O6WJJTqOp9ad69H7X418f5LywD11OcQT5xwlVeZNz6CzKYwEXVGtX6BHF6'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'ZLADigYKCBlxgCstbFBBIEWEUNa6q0ibjYwAxg0LUQgybvNiFAqg7p+C80U9ddUoFEAW/Znv'
b'fkShz0M8h6lUfUKcQp/H5ao8i4zG4aKS9UZKnVAR4nB4zSF7cCeCiNGLHyy1MAEqqJUI/tWD'
b'm3k9B6rdj2zjbIQs2YGsOFjNzYbiUEBQTMiIA+y1MFgFgRQRQoA0d9S8cyUR8FH2tVJ042u/'
b'qCD2R8drNaFQALEupFnAEIEgvl2jwaj6krdu8UY4/pBE/CeSiFQho87uukRvFOeLYzXSCDFr'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'D0ch/iGQyEgr+VxV51CoMUShLqC7thyFosFnC501EF4IbUd2ls19qPTdWYY182wnQubpMMRx'
b'7ggK1SNUMT+QnYsKN+I9/KddhmLasRyFmGH4Y37JWzcX/3dqzfJPHMWNHBBGQ5N6hAQv77HO'
b'YVAnJkwFZaK0m7ioWw/Xut/dl/jZT0kDZA5kETJY<KEY>EUksIe4UIoc/ltFP2'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>IEsdHUdUPfnAiZHNMjbkoWkZtmHzI86shzYUPf7'
b'V8hoDN2oG1GoHuvBjeWfPugxhizhCIez/EzF/GeRUY+iG++KRnPRFShUj+P0gfK5DzrONjJx'
b'pva3z0kyD1D3EEgiaLUwwSo4NE/cVsu2o+2syfPRQdwpyogPD9ogu5ARK1BkUHDIiDPhtBAe'
b'<KEY>'
b'/<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'lH36kOMUGg0lIVRpxRn5Im1Gmsftqir2q3uYccfM6U+r+5GbDiTIaew8OOe2qNMYb4hDKhKv'
b'<KEY>'
b'<KEY>jEL8Jv3Ke9OEsXQmyNv00I+bhYwgAvM1fqr+4bXyuQ+UvnNrydu3lL5/Z+WX'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'qye+1cZwREbCEWuz8u79EBw70irPyICHnTPrTe2Ia5GNiZWWSuGAbLYTLt7fj+fIxAj7rIPi'
b'g0JEBi9JQSEElD1HoFA9kSfR8BBZYfe8u97NvvllzeBJIO2k3cgio+4/rmD2kvRJD0jy2qNL'
b'tAglyh4jsm99Jfe2N2Lbm5RAN/rG/GcWp0+8D7xDgbSRgXFpfhftqOtz73w7a8Yz4vQ8dDUZ'
b'AMcuf/YS/bhZstbd0aUwKLoNg9S1+Ncnik4s3GVMPVcUS<KEY>cHuYKxA1kEGIiMm'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>//<KEY>'
b'<KEY>'
b'<KEY>KX2p'
b'lIF9NrIvyvjRPISwa3IsnMBgMCRYrubeW+U6a276czxSAMhGyExkxATLoowrKSiEjrP/eAwV'
b'yEhLk7XuzmZKJAaDaSr0UkEHHav9RVcWNeUcmRSDZWZCUUKBIoNnpKAQWv7+A4UC8PzcJQwG'
b'E47+rJcPrmnSyaIpBvvMZF+gcSLVhNBVVWza8jMyAuB+UQwmSenHrt48XufG/aIcApkJWYqM'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'pAW7o1xPGrEQxgWWGcuyWONEcguhOLOVNL+zvOMAzbDJWdfPbvXMj1kznpa17obexmAwSUu6'
b'lFXtdN6ChTAusMxYlsUaJ5JVCMW67MLX17V6/JsW98/Nvf3NjKvuV/W6RKzNQm9jMJgkJ1Me'
b'+zz7OqfXiufKxAfIWMheZEQPm2KNH8ntEWIwmFQlXRZ77VRpwwsn4gib7GVTrPEDCyEGg+Ed'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>UChWZPAJ/KBgMBg+4vCmnTay6kYb'
b'nCtBIQxHsMxSKFAoVh6ChRCDwfCUIwY3CsUEFkLOYZmlLAs0fmAhxGAwPGV/Nat6c1ieBG8w'
b'wyGQmZClyIgJlgUaP7AQYjAYnrK7klW9mSEXDszGU2Y4Y0C2mM0AIcCyQOMHFkIMBsNTDta4'
b'PV5WO3JNaC1FIQxrLmeXmVCUUKDI4BlYCDEYDE+xe9L2VrGqOi8rwELIGSwzE4oSCpSfYCHE'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'IcFt/liBrIMMREassC/EuCIoXFCFglxz5oZMFKLD5PT2/KEWAusn6dtoRG2+qSauc04M94eC'
b'<KEY>C/Oyc6mq/UF6jiWKUMmtZG+P1zz0l+WeUfs6FL8yVEIdk7JWHPOeecGE7qEYYdS'
b'<KEY>nUfy1Hbz9yccyEg4U9vJ3hqqRkb0PLrVvPhUk0V+ZgfZq4Njjzxg9/j6/Vhj5W/P'
b'aJy7Rqvt3tXnnLSvreU8zpW0NFJsHR6f1+cjXUQfTR6G5IqXjtc91odt464B4oaP91Eim3/s'
b'qWya8XkPi92iMSSgAv2TtT9xd3cFOyVtpkCmQdYhI1ag+PisgkDc+<KEY>'
b'R5u4TnsxueidJRYJuKwP4Ia87QZcesY5ebUxke5gA+xWgWPIQFGiUKwUakRT8Uhh9ECmQdYh'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>1MnVjxbH/lzA5ylQTVFtCsmHvI9vk/'
b'Id1cUPk+3U81rMWFDdFdXt/v55yfHrbvq9/mnJgsM/m3utcGqzrpUa1n9/ie32n54STTn9bR'
b'mRlQ57b7tgbZ9UA1+tJAVe+sC5XpnkrXf3db94TuC3VrF/nDvRQakLh6zpg8v511vrbXSphQ'
b'r/1xlf63s47dle5HeivlYpTqb4/Zn9lpyVEIXhusvrQVWsdz2uh5eod5W+ggK9T<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'N3ok4BE7cX3mMYP7shV1xLsN3NFV/kx/1exdFngG0CUeAK3MdVfpBew69uGHP2aZodiSaIc9'
b'GSfL5KuEUCmxnKMEfs6oZYYzJr73kIj0kx9HQa55qJfS5vZ9FvF3DszsKM9RCN/bb0N2APj9'
b'j2wp/b3Y+cUR+6oix5/nXe20omnt5W5v2s4KVM1BfbX0cj2U1jv7rG//bZt72LbuvKvG4ZvR'
b'UV5k8kC9Rnzsli5yvUw4vYN8zTnnJ4ds0DzRywQddOKxBdI9le4iM6MS<KEY>'
b'+<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'9G/+bf34oG1tsQv0DIq+yu7bXIbiDO7RhNayDSWuhv0qp7aX5atFEA24AoldVeSsdfiGtZAO'
b'zfM3uJ7trwLth2hsK/cX0KhWUmgoNDxveqng1i7QmhHAPd/fb/Wr9VmnQpwGEe6WLv6pvp6C'
b'ptsd3RQHatykrLumrfT1IWooR4jtV0f8DbXWGuHVbeWlVu+hmrA7MMKzelGuBBoHB4I+A/cH'
b'FYe3fgwSQkgs5CQ8eMsC4zEglssu1+tkAnjkvjxig0+eNfsfzstbS9cU+1MNmXtxC0nPTAk0'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'+gtzc/7dS/FgL+VzO80LjqEnDOTkru6K/+62BHvMMzrIwI9ceNz+5A7kmgzJFX8/VgeV+Luh'
b'<KEY>'
b'V6UvO+N4YLOZuAL0zRL/Ml4HjtdtQUkmMaKF5OvR2nn/2F7aHeKzgpcT3AkTziOEtsLzuy64'
b'aw2u7bVr6hpkL1suWDcpHZy2i35GzxvhEUJgxu9124Pyisioa36rI+SW8Ai/P25/qj7bCXZO'
b'Tje6fFf9agiePvfrFTpwEAf+FLbXBBzfTVenB2cacHhGhjLQZ9B9YbWl/m7QVnukt/L29cYG'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'VtIk8h3lrmyFsF9Q1zoJ8J6hZIM7//tkikEFidU7s7pe+B1B2w7+DXZDoQUTrIIApAj+bXg4'
b'fz3rvwk47oRJcGWgUJpkqm2jQJUKjzQyWPBsfyW0TZGBoQCZA1mEDBZAYSWFCgLxfRrAI5x/'
b'1EZ9/RPk5nt8fpARikYigPplYI54UOBFdFZn1c9iOG7w3+TNIeoB2WLwroiL4SB1/gDHDJ4W'
b'HP0YWiqF4McQkYTImJzehnEgYH+1u1em+K5ucni8Is/lAW1GoXpKAoMZwX2JAKTF6fEF/5Ih'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>M+/VAOSQ3ygATvdEg2X1wfOFsvfwh1d5Xumpm+dnP7LeBRJeGmkIYL3zj7rH8XO'
b'p/qptk9OP35d5u6p6UvH6+aMUAfXPgRUH4hwcaiVL8Q7eBBbLhYQY40krG4fJJH0g6fW+uBk'
b'fzBcfXRm5uqJeshMIhUP9vI3CSUM5o/SCivtxFOn1xd8Q2JuDhQc6fXyRf4+mdzw09X2Vbtf'
b'2GWB1sa3Y7TwkMCf/HmVfsFozYM9Gc0JJMWMeDqWUMaNiCNhoX0QGUfgY5FrVWJmzU/jdMHJ'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>
b'<KEY>5MDNi0ye0fWT'
b'b69u65fbhU23FVmjQFXxdmCAkz33dFeMasVBjZ9KQIZwtcIEiim0Xuc13NWmsQLtaZJLcXEL'
b'CTT2ock/cVXdd8cdi074X98cdwR3qDYAP3VwDeccsr/xt/WBzeYhvxg2ljivKJQFL8ILWUpW'
b'D/v67ao2UnC5rl1T98EBGxFJeM0/avf5fFRJO17n+fm0A+L53E4L6OKt6/wzEW4IdEM1EJW2'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>k/<KEY>'
b'<KEY>'
b'9AoFgoEBL4cT/gn0Icc2vMHweRhI6TUiatKGdYS0gkiM8vbJjNH/WBbwzm/pIh/eQhq8ScfP'
b'pxzQUrm6rV/DlgfEsoEWStGJOk/DGp4GXF5yK21+QIbv66HQSgU6mfDLQGcpn4EkvR5xhStz'
b'NFLhl5dqaDdPaG5AJkBWBK91ZgMUEN3vgL9wk2w2VNm9KokguBV/MLDgd3T9+vEGSDXV6FaS'
b'mR3QwEkwUFnAv7QTNYOJyv2i5WCNu0u6iLQ1CQBVYbCAvXqRirphSle9aFCOmDTjlFoVRzgD'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>FMMB8tzJoGvj9mhPb50gm7ZBN2NnWTw'
b'eqS3Ysl4Lfh5DT2KVxRKZ3aUr7hc//4wNaTlsnzJlYVS+NjXo7V2T9ongWGeCERQ6+CK+8MD'
b'tiO1bsixOSPU93ZX3NVNvulq/TP9VWdMnuABlTMmb4XVe3Vb2YfD1ZDbRAZARi06YZ/WHiKp'
b'u7+n4tr2stu6yF8frHqdwcErcMNyq3dS/Q0bHRckJeeU0TvnkG1QrmTtlfpn+yuv6yh7aaAK'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>YTHA+5JXOQ5/91tCTf/OVp6Zoo/v0Qr50YFkg9IOCQfMgHZ7IBCgaJBRvLQ9EK4'
b'<KEY>'
b'7kjNW0PV6TLBE9vN/wldZ00Le48Q2ul3bTD9XeUCL+E/g9TwAs3bEVhk1qBS0E6f9GvdgmP2'
b'IXn+tHx2ifbDi/319d9V7imr6xqtdCL015F0fPLqup9POSa0lj3eV/lUP1WBWvTrWce1a+oa'
b'dp8h+M9uy2mj58o2MshtdCkt7Yntlo8OWAvUwkd6K98Yon5+gKqtRvTjyUYGrgjgoafeMBzU'
b'PH99r/WhLSa4A+TJKxepQQvtHt+Dm02RNzro/WPtC7ssZ03eO7sp4PGAon97qDpfJXrxLwuk'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>cYaUGhxeUhslToDq5auEtsaeOvDIoS1y5SpD8F5uPEcmSvt9or4160OCGtha'
b'5rp9vTEeJ+fxc2cZ+NmCL8ihCp41ecauMDiS5gm6QNN7hJGpdvhKreQNMkh4fGmVNv/HmkQF'
b'AfjWc2Zvo/JmcTP6GBtAwyAfShrLCvip034A8vk8Za8cJoS7IXOcXv82PchgDHwl5CekN94q'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'E0DkPx2pgYSgS9wBGQ7ZjoxkBgshBoNJBcwu3x3rTaaYNthrlMntZOuu0t/SOfj0M74DUYUI'
b'r5+kJ86w5BzIashwyHZkJzNYCDEYTIpwwui5d5OZOEiSczRS4QsDVd+M1iKb90BUIcJc7aNN'
b'AjIZshoyHNlJDhZCDAaTOmwqdT2zM47z+AuSZ4lhXKMKmczz47qiAgshBoNJKRaecLyzj5tz'
b'mjC0QPby+fTmGMBCiMFgUo0PDti+4v3BikkKZCxkLzJSBSyEGAwmBXnhL+s3yblXDp+BLIWM'
b'<KEY>'
b'+<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'xyw3fHfc7mt+o4aQZEg4JH/1uWY9gQgLIQaDae6AJ/T0Dsuk3+r+rmoWqwUIILFX/VoHCW+2'
b'jmADWAgxGAzGz/5qz9W/GR/aYipO9bWGkEBIJiT2QA2eLuQHCyEGg8FcYMlp56XLDC/9Zamw'
b'peDAISQKkjZqmQGSiS5hsBBiMBgMCac3bd4R+8VLalNJDgkJhERB0lzNem4QDVgIMRgMhgaH'
b'xy+Hw36pfWyr+aghiRcVQOQhCZAQSA4kCkMFCyEGg8GEBZynH0/5F9jN/L1uZZEjiXwpiCpE'
b'GKINkYckYC8wAoLCBVUoiMFgMJiIqCUCc/RzLKe2k701VI2M6Hl0q3nxqagX/scW1eYJ9ggx'
b'GAyGKUkkLVgFmYOFEIPBYDDNGiyEGAwGg2nWYCHEYDAYTLMGCyEGg8FgmjFpaf8HP0xvxXBF'
b'H0QAAAAASUVORK5CYII=')
#----------------------------------------------------------------------
Info = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAMFmlDQ1BJQ0MgUHJvZmlsZQAA'
b'SImVVwdYU8kWnltSCEkogQhICb0J0qv0XgSkg42QBAglQkJQsaOLCq4FFRGs6CqIomsBZK1Y'
b'sLAIWLA/UFFZWRcLNlTepICur33vfN/c+e+Zc878Z+65880AoGTDzs3NRpUByBHkC6ODfFmJ'
b'ScksUi/AARGggArobI4o1ycqKhxAGe3/Lu9uAUTSX7eSxPrX8f8qKlyeiAMAEgVxKlfEyYH4'
b'KAC4BidXmA8AoQ3qDWfn50rwIMRqQkgQACIuwekyrCHBqTI8QWoTG+0HsTcAZCqbLUwHgC7h'
b'zSrgpMM4dAlHGwGXL4B4C8SenAw2F+L7EE/IyZkFsRIZYrPU7+Kk/y1m6lhMNjt9DMtykQrZ'
b'ny/KzWbP/T+X439LTrZ4dA4D2KgZwuBoSc5w3WqyZoVJMBXiE4LUiEiIVSG+xOdK7SX4boY4'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'K3tagTTpaIHUIws8hTgH18I9cXc8HD69YbPDXXDXUT+W0uisxACiPzGYGEg0H+PBgayzYRMC'
b'/r/RhcGeB7OTcBGM5vAtHuEpoZPwiHCT0EO4A+LBE2kUudVMfpHwB+YsMBn0wGiB8uxSv88O'
b'N4GsHXFf3APyh9xxJq4FrHAHmIkP7gVzc4Ta7xmKx7h9W8sf55Ow/j4fuZ5uQXeUs0gd+zJ+'
b'Y1Y/RvH7bo24sA/70RJbgR3BWrGz2GXsBNYIWNhprAlrw05K8FglPJFWwuhs0VJuWTAOf9TG'
b'ps6m3+bzD3Oz5fNL1kuUz5uTL/kZ/GblzhXy0zPyWT5wN+axQgQc6wksOxtbZwAke7ts63jD'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'ZehqtAKtRvejDehZ9Cp6E+1BX6BDGMAUMSamj1lhLpgfFoklY2mYEFuIlWDlWDVWjzXD73wd'
b'<KEY>'
b'YkI5YQ/hGOEC/G/6CO+IRCKTaEp0hv9lEjGTOI+4iriVeJB4hthJfEwcIpFImiRLkgcpksQm'
b'5ZOKSZtJ+0mnSV2kPtIHsiJZj2xHDiQnkwXkInI5eR/5FLmL/Iw8rKCsYKzgphCpwFWYq7BG'
b'YbdCs8I1hT6FYYoKxZTiQYmlZFKWUCoo9ZQLlPuUN4qKigaKropTFPmKixUrFA8pXlLsVfxI'
b'VaVaUP2o06hi6mrqXuoZ6h3qGxqNZkLzpiXT8mmrabW0c7SHtA90Bt2aHkLn0hfRq+gN9C76'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'2/WHDUwN4gyKDA4aPDCkGLoYphluMGwxHDTSM5psNN+ozuiusYKxi3GG8SbjVuP3JqYmCSbL'
b'TRpNnptqmIaYFprWmd43o5l5meWZVZvdMCeau5hnmW8177BALRwtMiyqLK5ZopZOlnzLrZad'
b'EwgTXCcIJlRP6LaiWvlYFVjVWfVaM63DrYusG61fTjSamDxx3cTWiV9tHG2ybXbb3LNVtQ21'
b'LbJttn1tZ2HHsauyu2FPsw+0X2TfZP/KwdKB57DN4bYjw3Gy43LHFscvTs5OQqd6p35nI+cU'
b'<KEY>'
b'<KEY>'
b'+<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'C643dwO3n+fBK+M9S/NIK0t7nu6Rvj69P8MrozxjgO/Hr+S/ygzO3J75Pisya2/WSHZC9sEc'
b'ck5KznGBqiBLcH6W7qw5szpzLXOLc3vy3PI25g0Kw4R7RIhouqgpXw0ec9rEZuKfxL0FngVV'
b'BR9mx88+MkdljmBO21yLuSvnPisMLPxlHj6PM69lvv78JfN7F/gs2LkQWZi6sGWR4aJli/oW'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'ttw7l3juxvkp59svhF24dDHw4rlWn9bTlzwunbjsdvn4FZcrjVedrja0ObYd+93x92PtTu0N'
b'15yvNXW4djR3Tuo81eXVdfa6//WLN0JuXL0ZcbPzVtyt293Tuntuc28/v5N959XdgrvD9xbf'
b'J9wveaD8oPyh9sPqf5j/42CPU8/JXv/etkcxj+495jx+8UT05HPfsqe0p+XP9J7VPrd7fqI/'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'm2viYAPWEQAAAAlwSFlzAAALEwAACxMBAJqcGAAAAdVpVFh0WE1MOmNvbS5hZG9iZS54bXAA'
b'<KEY>'
b'b3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8x'
b'OTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6'
b'YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'G2KcprekuMQBhbbk4uJTpEMPgVwKLQ7kGApucushIZe2h97TUgUaCiEtKL5E9Bw3OAmuhUMc'
b'<KEY>'
b'DMh9df58OXv69MfDk5PT0jBk4HmO5ziP1123ulqrOQQIEEEIQQOAd85/AGdnL1/+bnJh4axh'
b'24BSQCoFjIwAvg9nZ6ex3eutlW7f/kkHJiIhqhy5zJG188KVKw/<KEY>/<KEY>'
b'HB0VIpUy93Z2sBlFX5bW1r7nTKTUzqucdnF5+duxI0cm/E7nIGo2DWVZplkqmfLcOTMJAiva'
b'2zPj/f1o/OhRFIm++fXu3QucvjJ1Kp8X5ypjtv2B1+kmMI0U4yJ++hT9+/chuFbqdCByOSTd'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>b+q1bX7yoVP5rptP0N5fOU/3DqvxXf4xt08urV6lerW7VNjaWtKcW'
b'65te0JrmxtDG3x88uGBvbl7LtlqLpueNcceY0fh42Juacr25ud/UzMydT06efFGtVo1yuZwM'
b'AN6iCd1ebFAMbf7y5Ml7ac9b4C8biTOZVlIoPNM1v30r+SU3CfA/ZNahIBhUgOQAAAAASUVO'
b'RK5CYII=')
#----------------------------------------------------------------------
Icon = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAAQgAAAEICAYAAACj9mr/AAAAAXNSR0IArs4c6QAAAARnQU1B'
b'<KEY>'
b'<KEY>'
b'LUsIFKFIgbJIWe7fO+eBtx/PvN/MnPt+1utKfjlnPso7z3nue645c+bMOWcAAAAAAAAAAAAU'
b'5oWWl1ludPQIAGBBcvjmEiQBAN/muByQBAB8m5wcUpAEwMScJIcUJAEwIbvIIQVJAEzEPnJI'
b'QRIAE3CIHFKQBMDArJFDCpIAGBAPOaQgCYCB8JRDCpIAGIAIOaQgCYCOiZRDCpIA6JASckhB'
b'EgAdUVIOKUgCoANqyCEFSQA0TE05pCAJgAZpQQ4pSAKgIVqSQwqSAGiAFuWQgiQAKtKyHFKQ'
b'BEAFepBDCpIAKEhPckhBEgAF6FEOKUgCIJCe5ZCCJAACGEEOKUgCwJGR5JCCJAAcGFEOKUgC'
b'YAUjyyEFSQAcQA05/PuS3P8WGSQBsAe15HDLJUgCoFFqyiGBJAAapAU5JJAEQEO0JIcEkgBo'
b'gBblkEASABVpWQ4JJAFQgR7kkEASAAXpSQ4JJAFQgB7lkEASAIH0LIcEkgAIYAQ5JJAEgCMj'
b'ySGBJAAcGFEOCSQBsIKR5ZBAEgAHMIMcEkgCYA9mkkMCSQDswIxySCAJgBOYWQ4JJAGQATl8'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'kAN4gyROQy8mkxyesVktxrstF1iuPXrUNze33MFyJ8s5lh9echtLEuD3W87crJ7xNct1m9Wj'
b'7f+05ZolV1uusnzE8kVL72j7L7Xc5+hROV5uucgiYcAK2HPYnZtZHmC50KJ3qX+yfMaS20aP'
b'6G/rOfRcek49t8bQG+xJdApyOBntBTza8kcW7fFcb8ltU8loDBqLxqSxaYw9gCQ6Aznk+SnL'
b'71j+1fJ1S247WorGqLFqzBp7y9SUBOwBcrghemFpTq605MbeU7QN2pZWZVFLEpoT2AHksEG7'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>7Dk/rs10d/U'
b'<KEY>'
b'WrxepmqHJCowgxx0P4nW5KCLwT7RcrblJEoJ4jgak8amMeb+/7WiGqqWpZheEjPIoaVjDrpZ'
b'zUst+/z0uYYgjqOxvsSisef+VulwTKIQM8hBN4LRfR5yYymZT1qebrmVZV9qCyKhsWsbtC25'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>mOIiHpAvVCa7iQxkxy+11LyLlevsJT8/r1nQQjNleYsN46IqBfUE6XpRhIzyUE8'
b'<KEY>'
b'SCJ3i/XSL1SdE3+B5dqjR+XRrdxuslkN5UmW0mftjYjmUHMZzfdZ1Bs10GtBrwm9Nkqy02tf'
b'X/NoYnKG8U7NPQehK1HnxuWdWruriZH2IBKlPhaqR2pRck9Cr/mdv+ItIYnactA7xFWW3Ng8'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'/<KEY>'
b'<KEY>'
b'<KEY>'
b'6qUWvvI8leOSaEYOiYssLcpBRF5K7r6W1plVEKpNbuweqXFpul3Qa1CvRdiDd1lyRV6bN1t6'
b'YFZBCNUoN/61UU/BANzeEnWG3f0sPTCzIFSj3PjXRj2l3hqa3AVjRuOXLBFf8fyzRbe2h7ZR'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>aPLEvonopZDnm49qiDOXpZe6MatX9yswgColqqpJ949'
b'1wTsQezG1csSxsG7puxBdMKNLd53ufq/ZQnj4F1T9Zx6byhGFMQPLEtPEMR4RNQ0oveqMupH'
b'DG907UEYC2q6AyMK4ubL0pMvLEsYh4iaRvReVUYURMRFar++LGEcImra05XOd2JEQURcbXro'
b'S5tPSkRNh7vS+YiCiDhfYbh3Bgip6XDnynCQcjfYgxgParoDIwrC+ww5cZNlCeMQUdOI3qvK'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'wM0qdIRqptp5E9FjzTC6IN66LL35jWUJ/RBVs6gegwKcadHv9L/pnK9aIg52RfE4S2471kR/'
b'sxdUK9Ustx1rot5Sjw3L6HsQujnKmzerrpxlefxmFTpAtVLNvFFvDX0DntEFIS5Zlt5cbIlo'
b'OvBFNVKtIojqrWaYQRB/bYk4DfZHLT3tZs+KaqRaeaOeUm8NzQyC0L0K/maz6s5vWbhnRruo'
b'NqpRBOqp4e6DcSozCEK8ell6c2cLexHtotqoRhFE9RRUQEeadVHR3JHotdG1CFu/aateKLmx'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'Pa2fit0jmlPNbcSt/I+jHlGvwIT8uEWnzOZ2Kz3zNsvNLLUY7SOG5lJzmhuXZ9Qb6pFpmf2o'
b'7OWWF21WQ/k5i97takpiFDSHmkvNaTTqDfUITMxNLVdacu8g3tG7Xo2PG6PsQWjuSuw5KOoJ'
b'9cbUzL4HIfSjnidsVsPRu947LLc5egT7oDnT3JXYcxDqCfUGwBF/Ysm9k0TkCkvUhUxy9L4H'
b'<KEY>
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'<KEY>'
b'QLVUTeE0IIjdearl5ZtV6BjVULWEHUAQu6ODWhdZkES/qHaqoWoJO4Ag9iNJ4qVHj6AnVDPk'
b'sCcIYn/UYE+2PP/oEfSAaqWaIYc9QRCH8yyL3pGmvJhpJ6g2+ipTtYIDQBDreJnlYZbrjh5B'
b'S6gmqg3HjFaAINajy67fz/Lho0fQAqqFaqLawAoQhA+XWXQdgX84egQ1UQ1UC9UEVoIg/LjG'
b'8mDL8ywclyiP5lxzrxqoFuAAgvBFTfpcyy9YPq5/gCJorn/eorlHzo4giBh0afbzLa85egSR'
b'aI41116/fIVjIIg4Pmd5rOVRlk/qH8AVzamuwKU51lxDAAginjdY7m55hYUTddajOdRcak7f'
b'qH+AOBBEGfQOpxux/LTl3foHOAjN3X0smkv2GgqAIMryH5b7Wn7F8lH9A+yE5kpzprl7j/4B'
b'<KEY>'
b'<KEY>/J4i84ATC+U2QShbdccaC<KEY>WL/eovs1eNCyILSN2lZtMzeuAdgD'
b'r5u5tCwIbljTMBykbJvPL8uRmWEbuwVBAMBWEAQAbAVBAMBWEAQAbAVBAMBWEAQAbAVBAMBW'
b'EAQAbAVBAMBWEAQAbAVBAAAAAAAAAAAAQEXOOONb9n2eQ2r0sfsAAAAASUVORK5CYII=')
| StarcoderdataPython |
9776608 | <filename>rock/tests/test_rule_parser.py
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test for rule parser.
testcase: host down
"""
"""
situation: what if nova service all down, bug ping only one down.
"""
import json
from rock.rules import rule_parser
test_rule = """
{
"rule_name": "host_down",
"collect_data": {
"service_result": {"data": ["%get_by_time", "nova_service", 300],
"judge": ["%false_end_count_lt", 2]},
"ping_result": {"data": ["%get_by_time", "ping", 300],
"judge": ["%false_end_count_lt", 2]}
},
"l1_rule": [
"%or",
"$service_result.judge_result",
"$ping_result.judge_result"
],
"l2_rule": [
"%and",
["%==", ["%count", "$l1_data", "l1_result", false], 1],
[
"%<=",
[
"%count",
[
"%map",
"$target_data",
[
"%and",
[
"%not",
"map.service_result.service_status"
],
[
"%==",
"map.service_result.disabled_reason",
"host_down_disable"
]
]
],
"map_result",
true
],
2]
],
"action": {
"filters": [
[
"%==",
"$service_result.service_status",
true
]
],
"tasks": [
["power_operation"],
["host_evacuate"],
["host_disable", "disabled_reason:host_down_disable"],
["message_report", "message_destination:/queue/eventQueue"]
]
}
}
"""
service_status = [
{u"target": u"a", u"time": u"00:06", u"result": False, u"service_status": True, u"disabled_reason": u"host_down_disable"},
{u"target": u"b", u"time": u"00:05", u"result": False, u"service_status": True, u"disabled_reason": u"some_other_reason"},
{u"target": u"c", u"time": u"00:04", u"result": True, u"service_status": False, u"disabled_reason": u""},
{u"target": u"a", u"time": u"00:03", u"result": False, u"service_status": False, u"disabled_reason": u""},
{u"target": u"b", u"time": u"00:02", u"result": True, u"service_status": False, u"disabled_reason": u""},
{u"target": u"c", u"time": u"00:01", u"result": True, u"service_status": True, u"disabled_reason": u"host_down_disable"},
]
ping_status = service_status
test_parser = rule_parser.RuleParser(json.loads(test_rule))
test_parser.raw_data["service_result"] = service_status
test_parser.raw_data["ping_result"] = service_status
test_parser.calculate()
print(test_parser.l1_data)
print(test_parser.l2_data)
| StarcoderdataPython |
105717 | from .checker import validate
from .creator import create_structure
| StarcoderdataPython |
1627315 | <reponame>vinirossa/decision-tree-algorithm
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Entropy Functions
Description...
"""
from math import log2
def information_entropy(prob_list):
result = 0
for prob in prob_list:
if prob != 0:
result += prob * log2(prob)
return result if result == 0 else -result
def parent_entropy(dataset, target_index):
target_node = [e[target_index] for e in dataset]
parent_probs = []
for e in sorted(set(target_node)):
parent_probs.append((sum(x.count(e)
for x in dataset)) / len(target_node))
return information_entropy(parent_probs)
def children_entropy_n_weight(dataset, target_index):
for row in dataset:
children_entropies, children_weights = ([] for _ in range(2))
for index in range(len(row)):
if index != target_index:
child_probs, child_entropies, child_weights = (
[] for _ in range(3))
node = [e[index] for e in dataset]
target_node = [e[target_index] for e in dataset]
for value in sorted(set(node)):
result = 0
for e in sorted(set(target_node)):
result = (sum(x.count(e)
for x in dataset if x[index] == value)) / node.count(value)
child_probs.append(result)
child_entropies.append(
information_entropy(child_probs))
child_weights.append(node.count(value) / len(target_node))
child_probs.clear()
children_entropies.append(child_entropies)
children_weights.append(child_weights)
return children_entropies, children_weights
def information_gain(parent_entropy, child_entropies, child_weights):
sigma = 0
for (entropy, weight) in zip(child_entropies, child_weights):
sigma += (entropy * weight)
result = parent_entropy - sigma
return result
def get_higher_information_gain(parent_entropy, children_entropies, children_weights):
igs = []
for (child_entropies, child_weights) in zip(children_entropies, children_weights):
igs.append(information_gain(
parent_entropy, child_entropies, child_weights))
higher_ig = max(igs)
if igs.count(higher_ig) > 1:
print("More than one element with max value. Returning first index and printing duplicated indexes.")
for index in range(len(igs)):
if igs[index] == higher_ig:
print(index)
higher_ig_index = igs.index(higher_ig)
return higher_ig, higher_ig_index
if __name__ == "__main__":
pass
| StarcoderdataPython |
6638892 | # NOTE: from https://github.com/LuminosoInsight/ordered-set/blob/master/ordered_set.py
import itertools
from typing import (
Any,
Dict,
Iterable,
Iterator,
Mapping,
MutableSet,
Optional,
Sequence,
MutableSequence,
Set,
TypeVar,
)
T = TypeVar("T")
class OrderedSet(MutableSet[T], Sequence[T]):
def __init__(self, iterable: Optional[Iterable[T]] = None):
if iterable is not None:
self._map = dict.fromkeys(iterable) # type: Dict[T, int]
else:
self._map = {}
self._list_cache: Optional[Sequence[T]] = None
def __len__(self):
return len(self._map)
def __getitem__(self, i):
if self._list_cache is None:
self._list_cache = list(self._map.keys())
return self._list_cache[i]
def copy(self) -> "OrderedSet[T]":
return self.__class__(self)
def __getstate__(self):
if len(self) == 0:
return (None,)
else:
return list(self)
def __setstate__(self, state):
if state == (None,):
self.__init__([])
else:
self.__init__(state)
def __contains__(self, key: Any) -> bool:
return key in self._map
def add(self, key: T): # pylint: disable=W0221
self._map[key] = len(self._map)
self._list_cache = None
def update(self, sequence: Iterable[T]):
self._map.update(dict.fromkeys(sequence))
self._list_cache = None
def pop(self) -> T:
key = next(iter(self._map.keys()))
self._map.pop(key)
self._list_cache = None
return key
def discard(self, key: T): # pylint: disable=W0221
self._map.pop(key, None)
self._list_cache = None
def clear(self):
self._map = {}
self._list_cache = None
def __iter__(self) -> Iterator[T]:
return iter(self._map.keys())
def __repr__(self) -> str:
return "{%s}" % ', '.join(repr(e) for e in self)
def __eq__(self, other: Any) -> bool:
return all(item in other for item in self)
def union(self, *other: Iterable[T]) -> "OrderedSet[T]":
# do not split `str`
outer = tuple(
[o] if not isinstance(o, (Set, Mapping, MutableSequence)) else o
for o in other
)
inner = itertools.chain([self], *outer) # type: ignore
items = itertools.chain.from_iterable(inner) # type: ignore
return type(self)(itertools.chain(items))
def __and__(self, other: Iterable[Iterable[T]]) -> "OrderedSet[T]":
return self.intersection(other)
def intersection(self, *other: Iterable[Iterable[T]]) -> "OrderedSet[T]":
common = set.intersection(*other) # type: ignore
items = (item for item in self if item in common)
return type(self)(items)
def difference(self, *other: Iterable[T]) -> "OrderedSet[T]":
other = set.union(*other) # type: ignore
items = (item for item in self if item not in other)
return type(self)(items)
def issubset(self, other: Set[T]) -> bool:
return all(item in other for item in self)
def issuperset(self, other: Set[T]) -> bool:
if len(self) < len(other): # Fast check for obvious cases
return False
return all(item in self for item in other)
def symmetric_difference(self, other: Set[T]) -> "OrderedSet[T]":
cls = type(self)
diff1 = cls(self).difference(other)
diff2 = cls(other).difference(self)
return cls().union(diff1, diff2)
def difference_update(self, *other: Iterable[T]):
self._map = dict.fromkeys(self.difference(*other))
def intersection_update(self, *other: Iterable[Iterable[T]]):
self._map = dict.fromkeys(self.intersection(*other))
def symmetric_difference_update(self, *other: Iterable[T]):
self._map = dict.fromkeys(self.difference(*other))
| StarcoderdataPython |
11395158 | <reponame>microsoft/semiparametric-distillation
import logging
import json
from functools import partial
from pathlib import Path
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.calibration import CalibratedClassifierCV
from sklearn.base import BaseEstimator
from sklearn.model_selection import GridSearchCV, KFold
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
import torch
import hydra
from omegaconf import OmegaConf, DictConfig
import datamodules
import metrics
class RandomForestRegressorKD(BaseEstimator):
def __init__(self, n_estimators=100, metric=metrics.Accuracy, scale='p'):
assert scale in ['p', 'logp']
self.n_estimators = n_estimators
self.metric = metric
self.student = RandomForestRegressor(n_estimators=self.n_estimators, n_jobs=-1)
self.scale = scale
def fit(self, X, y):
assert y.ndim == 2
y, teacher_prob = y[:, 0].astype(int), y[:, 1]
self.student.fit(X, self.combine_y_teacher_prob(y, teacher_prob))
return self
def combine_y_teacher_prob(self, y, teacher_prob):
return teacher_prob if self.scale == 'p' else np.log(np.maximum(teacher_prob, 1 / 500))
def predict(self, X):
return self.student.predict(X) if self.scale == 'p' else np.exp(self.student.predict(X))
def score(self, X, y):
if y.ndim == 2:
y, teacher_prob = y[:, 0].astype(int), y[:, 1]
return self.metric.score(y, self.predict(X))
class RandomForestRegressorKDMixed(RandomForestRegressorKD):
def __init__(self, n_estimators=100, metric=metrics.Accuracy, alpha=1.0, scale='p'):
assert scale == 'p'
super().__init__(n_estimators, metric, scale)
self.alpha = alpha
def combine_y_teacher_prob(self, y, teacher_prob):
return (1 - self.alpha) * y + self.alpha * teacher_prob
class RandomForestRegressorKDOrtho(RandomForestRegressorKD):
def __init__(self, n_estimators=100, metric=metrics.Accuracy, scale='logp'):
assert scale == 'logp'
super().__init__(n_estimators, metric, scale)
def combine_y_teacher_prob(self, y, teacher_prob):
teacher_prob_clipped = np.maximum(teacher_prob, 1 / 500)
yp_1 = y / teacher_prob_clipped - 1.0
return np.log(teacher_prob_clipped) + yp_1
def find_optimal_gamma_sampling(phat, bound_fn, y, max_range=10, alpha=1.0, scale='p'):
assert scale in ['p', 'logp']
phat_shape = phat.shape
phat = phat.flatten()
gamma = np.arange(-max_range, max_range, 0.05)[..., None, None]
if scale == 'p':
def objective(p):
return (gamma * (p - phat) - (p - phat))**2 + (1 / alpha - 1 + gamma)**2 * p * (1 - p)
# return (gamma * (p - phat) - (p - phat))**2 + (1 / alpha - 1 + gamma)**2 * (y - p)**2
else:
def objective(p):
return (gamma * (p - phat) - (np.log(p) - np.log(phat)))**2 + (1 / alpha - 1 + gamma)**2 * p * (1 - p)
# return (gamma * (p - phat) - (np.log(p) - np.log(phat)))**2 + (1 / alpha - 1 + gamma)**2 * (y - p)**2
bound_l, bound_h = bound_fn(phat)
p_vals = bound_l + np.linspace(0.0, 1.0, 10)[..., None] * (bound_h - bound_l)
objs = objective(p_vals)
max_objs = objs.max(axis=1)
return gamma[np.argmin(max_objs, axis=0)].reshape(*phat_shape)
# As the objective is just a quadratic function in p, we can solve it analytically instead of by
# sampling p.
def find_optimal_gamma(phat, bound_fn, max_range=10, alpha=1.0):
phat_shape = phat.shape
phat = phat.flatten()
gamma = np.arange(-max_range, max_range, 0.05)
# If there's a tie (np.argmin below), take the gamma with the smallest absolute value
gamma = gamma[np.argsort(np.abs(gamma))][..., None]
def objective(p):
# return (gamma * (p - phat) - (p - phat))**2 + (1 / alpha - 1 + gamma)**2 * p * (1 - p)
return (1 - 2 * gamma) * p**2 - (2 * phat * (gamma - 1)**2 - gamma**2) * p + (gamma - 1)**2 * phat**2
bound_l, bound_h = bound_fn(phat)
# Solve the quadratic to find optimal p
optimal_p = (2 * phat * (gamma - 1)**2 - gamma**2) / (2 * (1 - 2 * gamma))
optimal_p = np.clip(optimal_p, bound_l, bound_h)
objs = np.stack([objective(bound_l), objective(bound_h), objective(optimal_p)])
max_objs = objs.max(axis=0)
return gamma[np.argmin(max_objs, axis=0)].reshape(*phat_shape)
def find_optimal_gamma_bound_slow(phat, y, c, max_range=10, scale='p'):
assert scale in ['p', 'logp']
phat_shape = phat.shape
phat = phat.flatten()
gamma = np.linspace(-max_range, max_range, 1000)[..., None]
if scale == 'p':
# objs = gamma ** 2 * phat * (1 - phat) + c * (gamma - 1) ** 4
objs = gamma ** 2 * (y - phat) ** 2 + c * (gamma - 1) ** 4
else:
# objs = gamma ** 2 * phat * (1 - phat) + c * (gamma - 1 / phat) ** 4
objs = gamma ** 2 * (y - phat) ** 2 + c * (gamma - 1 / phat) ** 4
return gamma[np.argmin(objs, axis=0)].reshape(*phat_shape)
class RandomForestRegressorKDRelerr(RandomForestRegressorKD):
def __init__(self, n_estimators=100, metric=metrics.Accuracy, scale='p', c=1.0):
super().__init__(n_estimators, metric, scale)
self.c = c
def combine_y_teacher_prob(self, y, teacher_prob):
bound_l = 0.0 if self.scale == 'p' else 1e-3
bound_fn = lambda phat: (np.maximum(phat / (1 + self.c), bound_l),
np.minimum(phat * (1 + self.c), 1.0))
if self.scale == 'p':
gamma = find_optimal_gamma(teacher_prob, bound_fn)
return teacher_prob + gamma * (y - teacher_prob)
else:
teacher_prob_clipped = np.maximum(teacher_prob, 1 / 500)
teacher_prob_log = np.log(teacher_prob_clipped)
gamma = find_optimal_gamma_sampling(teacher_prob_clipped, bound_fn, y, scale='logp')
return teacher_prob_log + gamma * (y - teacher_prob)
class RandomForestRegressorKDAbserr(RandomForestRegressorKD):
def __init__(self, n_estimators=100, metric=metrics.Accuracy, scale='p', c=1.0):
super().__init__(n_estimators, metric, scale)
self.c = c
def combine_y_teacher_prob(self, y, teacher_prob):
bound_l = 0.0 if self.scale == 'p' else 1e-3
bound_fn = lambda phat: (np.maximum(phat - self.c, bound_l),
np.minimum(phat + self.c, 1.0))
if self.scale == 'p':
gamma = find_optimal_gamma(teacher_prob, bound_fn)
return teacher_prob + gamma * (y - teacher_prob)
else:
teacher_prob_clipped = np.maximum(teacher_prob, 1 / 500)
teacher_prob_log = np.log(teacher_prob_clipped)
gamma = find_optimal_gamma_sampling(teacher_prob_clipped, bound_fn, y, scale='logp')
return teacher_prob_log + gamma * (y - teacher_prob)
class RandomForestRegressorKDPower(RandomForestRegressorKD):
def __init__(self, n_estimators=100, metric=metrics.Accuracy, scale='p', tmax=2.0):
super().__init__(n_estimators, metric, scale)
self.tmax = tmax
def combine_y_teacher_prob(self, y, teacher_prob):
bound_l = 0.0 if self.scale == 'p' else 1e-3
bound_fn = lambda phat: (np.maximum(phat, bound_l),
np.minimum(phat ** (1 / self.tmax), 1.0))
if self.scale == 'p':
gamma = find_optimal_gamma(teacher_prob, bound_fn)
return teacher_prob + gamma * (y - teacher_prob)
else:
teacher_prob_clipped = np.maximum(teacher_prob, 1 / 500)
teacher_prob_log = np.log(teacher_prob_clipped)
gamma = find_optimal_gamma_sampling(teacher_prob_clipped, bound_fn, y, scale='logp')
return teacher_prob_log + gamma * (y - teacher_prob)
class RandomForestRegressorKDBoundFast(RandomForestRegressorKD):
def __init__(self, n_estimators=100, metric=metrics.Accuracy, scale='p', c=1.0):
super().__init__(n_estimators, metric, scale)
self.c = c
def combine_y_teacher_prob(self, y, teacher_prob):
if self.scale == 'p':
# gamma = self.c / (self.c + teacher_prob * (1 - teacher_prob))
gamma = self.c / (self.c + (y - teacher_prob)**2)
return teacher_prob + gamma * (y - teacher_prob)
else:
teacher_prob_clipped = np.maximum(teacher_prob, 1 / 500)
teacher_prob_log = np.log(teacher_prob_clipped)
# gamma = self.c / teacher_prob_clipped / (self.c + teacher_prob * (1 - teacher_prob))
gamma = self.c / teacher_prob_clipped / (self.c + (y - teacher_prob)**2)
return teacher_prob_log + gamma * (y - teacher_prob)
class RandomForestRegressorKDBoundSlow(RandomForestRegressorKD):
def __init__(self, n_estimators=100, metric=metrics.Accuracy, scale='p', c=1.0):
super().__init__(n_estimators, metric, scale)
self.c = c
def combine_y_teacher_prob(self, y, teacher_prob):
if self.scale == 'p':
gamma = find_optimal_gamma_bound_slow(teacher_prob, y, self.c, max_range=100, scale=self.scale)
return teacher_prob + gamma * (y - teacher_prob)
else:
teacher_prob_clipped = np.maximum(teacher_prob, 1 / 500)
teacher_prob_log = np.log(teacher_prob_clipped)
gamma = find_optimal_gamma_bound_slow(teacher_prob_clipped, y, self.c, max_range=100, scale=self.scale)
return teacher_prob_log + gamma * (y - teacher_prob)
def train(model, data, metric=metrics.Accuracy):
X_train, y_train, X_test, y_test = data
model.fit(X_train, y_train)
y_pred = (model.predict_proba(X_test)[:, 1]
if hasattr(model, 'predict_proba') else model.predict(X_test))
return metric.score(y_test, y_pred)
def cv(model, data, param_grid):
X_train, y_train, X_test, y_test = data
search = GridSearchCV(model, param_grid, refit=False, n_jobs=-1)
search.fit(X_train, y_train)
return search.best_params_
def train_kd(cls, cfg, data, param_grid, metric=metrics.Accuracy):
# n_jobs=1 since GridSearchCV already uses n_jobs=-1
best_params = Parallel(n_jobs=1)(
delayed(cv)(cls(metric=metric, scale=cfg.scale, n_estimators=n_tree), data, param_grid)
for n_tree in cfg.n_trees
)
results = Parallel(n_jobs=-1)(
delayed(train)(cls(metric=metric, scale=cfg.scale, n_estimators=n_tree, **best_param),
data, metric=metric)
for n_tree, best_param in zip(list(cfg.n_trees) * cfg.n_repeats, best_params * cfg.n_repeats)
)
return results, best_params
# # For interactive use
# cfg = DictConfig({
# 'dataset': DictConfig({'_target_': 'datamodules.MagicTelescope', 'seed': '${seed}'}),
# # 'dataset': DictConfig({'_target_': 'datamodules.FICO', 'data_dir': '/dfs/scratch0/trid/data/fico/', 'seed': '${seed}'}),
# 'metric': DictConfig({'_target_': 'metrics.AUC'}),
# 'n_trees': [1, 2, 3],
# 'n_repeats': 5,
# 'n_splits': 5,
# 'split_mode': 'crossfit',
# 'calibrate_teacher': False,
# 'scale': 'logp',
# 'seed': 2357
# })
@hydra.main(config_path="cfg", config_name="tabular.yaml")
def main(cfg: OmegaConf):
print(OmegaConf.to_yaml(cfg))
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
#################################### Load Data ##########################################
datamodule = hydra.utils.instantiate(cfg.dataset)
datamodule.prepare_data()
datamodule.setup()
X_train, y_train = datamodule.X_train, datamodule.y_train
X_test, y_test = datamodule.X_test, datamodule.y_test
metric = hydra.utils.instantiate(cfg.metric)
#################################### Train Teacher #######################################
logger.info('Training teacher')
if not cfg.calibrate_teacher:
teacher_cls = RandomForestClassifier
else:
teacher_cls = lambda *args, **kwargs: CalibratedClassifierCV(RandomForestClassifier(*args,
**kwargs))
if cfg.split_mode == 'crossfit':
kf = KFold(n_splits=cfg.n_splits, shuffle=True, random_state=cfg.seed)
teachers = []
teacher_test_index = np.empty(len(X_train), dtype=int)
for i, (train_index, test_index) in enumerate(kf.split(X_train)):
X_train_this_split, y_train_this_split = X_train[train_index], y_train[train_index]
teacher_test_index[test_index] = i
teacher = teacher_cls(n_estimators=500, n_jobs=-1)
teacher.fit(X_train_this_split, y_train_this_split)
teachers.append(teacher)
teacher_result = np.array([metric.score(y_test, teacher.predict_proba(X_test)[:, 1])
for teacher in teachers])
teacher_prob = np.stack([teacher.predict_proba(X_train)[:, 1] for teacher in teachers])
teacher_prob = teacher_prob[teacher_test_index, np.arange(len(X_train))]
else:
if cfg.split_mode == 'split':
X_train, X_train_teacher, y_train, y_train_teacher = train_test_split(
X_train, y_train, test_size=0.5, random_state=cfg.seed
)
else: # cfg.split_mode == 'none'
X_train_teacher, y_train_teacher = X_train, y_train
teacher_result = [
train(teacher_cls(n_estimators=500, n_jobs=-1),
(X_train_teacher, y_train_teacher, X_test, y_test), metric=metric)
for _ in range(cfg.n_repeats)
]
teacher_result = np.array(teacher_result)
teacher = teacher_cls(n_estimators=500, n_jobs=-1)
teacher.fit(X_train_teacher, y_train_teacher)
teacher_prob = teacher.predict_proba(X_train)[:, 1]
##################################### Train students #########################################
logger.info('Training classifier from scratch')
scratch_classifier = Parallel(n_jobs=-1)(
delayed(train)(RandomForestClassifier(n_estimators=n_tree),
(X_train, y_train, X_test, y_test), metric=metric)
for n_tree in list(cfg.n_trees) * cfg.n_repeats
)
scratch_classifier = np.array(scratch_classifier).reshape(cfg.n_repeats, -1)
logger.info('Training regressor from scratch')
scratch_regressor = Parallel(n_jobs=-1)(
delayed(train)(RandomForestRegressor(n_estimators=n_tree),
(X_train, y_train, X_test, y_test), metric=metric)
for n_tree in list(cfg.n_trees) * cfg.n_repeats
)
scratch_regressor = np.array(scratch_regressor).reshape(cfg.n_repeats, -1)
logger.info('Training student with knowledge distillation')
y_train_w_teacherprob = np.stack([y_train, teacher_prob], axis=1)
kd_mse = Parallel(n_jobs=-1)(
delayed(train)(RandomForestRegressorKD(metric=metric, scale=cfg.scale, n_estimators=n_tree),
(X_train, y_train_w_teacherprob, X_test, y_test), metric=metric)
for n_tree in list(cfg.n_trees) * cfg.n_repeats
)
kd_mse = np.array(kd_mse).reshape(cfg.n_repeats, -1)
logger.info('Training student with knowledge distillation upper bound fast rate')
c_values = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1.0, 1e1, 1e2, 1e3]
kd_boundfast, kd_boundfast_params = train_kd(RandomForestRegressorKDBoundFast, cfg,
(X_train, y_train_w_teacherprob, X_test, y_test),
{'c': c_values}, metric=metric)
kd_boundfast = np.array(kd_boundfast).reshape(cfg.n_repeats, -1)
logger.info('Training student with knowledge distillation upper bound slow rate')
c_values = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1.0, 1e1, 1e2, 1e3]
kd_boundslow, kd_boundslow_params = train_kd(RandomForestRegressorKDBoundSlow, cfg,
(X_train, y_train_w_teacherprob, X_test, y_test),
{'c': c_values}, metric=metric)
kd_boundslow = np.array(kd_boundslow).reshape(cfg.n_repeats, -1)
if cfg.scale == 'p':
logger.info('Training student with mixed knowledge distillation')
alpha_values = np.linspace(0.0, 1.0, 11)
kd_mixed, kd_mixed_params = train_kd(RandomForestRegressorKDMixed, cfg,
(X_train, y_train_w_teacherprob, X_test, y_test),
{'alpha': alpha_values}, metric=metric)
kd_mixed = np.array(kd_mixed).reshape(cfg.n_repeats, -1)
logger.info('Training student with relerr knowledge distillation')
c_values = [0.25, 0.5, 1.0, 2.0, 4.0, 8.0]
kd_relerr, kd_relerr_params = train_kd(RandomForestRegressorKDRelerr, cfg,
(X_train, y_train_w_teacherprob, X_test, y_test),
{'c': c_values}, metric=metric)
kd_relerr = np.array(kd_relerr).reshape(cfg.n_repeats, -1)
logger.info('Training student with abserr knowledge distillation')
c_values = [0.01, 0.02, 0.05, 0.1, 0.2, 0.4]
kd_abserr, kd_abserr_params = train_kd(RandomForestRegressorKDAbserr, cfg,
(X_train, y_train_w_teacherprob, X_test, y_test),
{'c': c_values}, metric=metric)
kd_abserr = np.array(kd_abserr).reshape(cfg.n_repeats, -1)
logger.info('Training student with power knowledge distillation')
tmax_values = [0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 16.0]
kd_power, kd_power_params = train_kd(RandomForestRegressorKDPower, cfg,
(X_train, y_train_w_teacherprob, X_test, y_test),
{'tmax': tmax_values}, metric=metric)
kd_power = np.array(kd_power).reshape(cfg.n_repeats, -1)
##################################### Dump to JSON file #######################################
logger.info('Saving results to disk')
file_name = f'tabular_{datamodule.name}_scale_{cfg.scale}_split_{cfg.split_mode}_calibrate_{cfg.calibrate_teacher}.json'
data = {
'n_trees': list(cfg.n_trees),
'metric': type(metric).__name__,
'scratch_classifier': scratch_classifier.tolist(),
'scratch_regressor': scratch_regressor.tolist(),
'kd_mse': kd_mse.tolist(),
'kd_boundfast': kd_boundfast.tolist(),
'kd_boundfast_params': kd_boundfast_params,
'kd_boundslow': kd_boundslow.tolist(),
'kd_boundslow_params': kd_boundslow_params,
'kd_mixed': kd_mixed.tolist() if cfg.scale == 'p' else None,
'kd_mixed_params': kd_mixed_params if cfg.scale == 'p' else None,
'kd_relerr': kd_relerr.tolist(),
'kd_relerr_params': kd_relerr_params,
'kd_abserr': kd_abserr.tolist(),
'kd_abserr_params': kd_abserr_params,
'kd_power': kd_power.tolist(),
'kd_power_params': kd_power_params,
'teacher_result': teacher_result.tolist(),
}
with open(file_name, 'w') as f:
json.dump(data, f, indent=4)
# Also write the same content to `results` directory
path = Path(hydra.utils.get_original_cwd()) / 'results' / file_name
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, 'w') as f:
json.dump(data, f, indent=4)
logger.info(f'Saved results to {str(path)}')
# # plt.figure()
# # plt.hist(teacher.predict_proba(X_train_teacher)[:, 1], bins=10)
# # plt.savefig(f'histogram_p_hat_{dataset_name}_split_True_Xtrainteacher.pdf', bbox_inches='tight')
# # plt.close()
# # plt.figure()
# # plt.hist(teacher_prob, bins=10)
# # plt.savefig(f'histogram_p_hat_{dataset_name}_split_True.pdf', bbox_inches='tight')
# # plt.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4939980 | <reponame>osmr/utct<filename>TFLearn/feed_dict_flow_cp.py
from tflearn import data_flow
class FeedDictFlowCp(data_flow.FeedDictFlow):
"""
Wrapper of TFLearn's FeedDictFlow for some types of augmentation.
"""
def __init__(self,
feed_dict,
coord,
batch_size=128,
num_threads=8,
max_queue=32,
shuffle=False,
continuous=False,
ensure_data_order=False,
dprep_dict=None,
daug_dict=None,
index_array=None):
super(FeedDictFlowCp, self).__init__(feed_dict,
coord,
batch_size,
num_threads,
max_queue,
shuffle,
continuous,
ensure_data_order,
dprep_dict,
daug_dict,
index_array)
def fill_feed_dict_queue(self):
while not self.coord.should_stop() and not self.interrupted:
batch_ids = self.batch_ids_queue.get()
if batch_ids is False:
break
data = self.retrieve_data(batch_ids)
# Apply augmentation according to daug dict
if self.daug_dict:
for k in self.daug_dict:
data = self.daug_dict[k].apply(data)
# Apply preprocessing according to dprep dict
if self.dprep_dict:
for k in self.dprep_dict:
data[k] = self.dprep_dict[k].apply(data[k])
# all prepped, put the data into the queue
self.feed_dict_queue.put(data) | StarcoderdataPython |
1671414 | <reponame>StevenHuang2020/Pedestrian-Segmentation
#python3 steven
import cv2
import argparse
import os,sys
#----------------------------------------------
#usgae: python .\predictBatchPath.py
#----------------------------------------------
from predictSegmentation import getPredictionMaskImg
from modules.folder.folder_file import pathsFiles,createPath,getFileName
from commonModule.ImageBase import *
def cmd_line():
# handle command line arguments
ap = argparse.ArgumentParser()
ap.add_argument('-s', '--src', required=True, help = 'path to input image')
ap.add_argument('-d', '--dst', required=True, help = 'path to save image')
return ap.parse_args()
def main():
args = cmd_line()
src = args.src
dst = args.dst
createPath(dst)
print('src=',src,'dst=',dst)
for i in pathsFiles(src,'jpg'): #png
fileName = getFileName(i)
img = loadImg(i)
dstFile = args.dst + '\\' + fileName
print(i,fileName,dstFile)
predImg = getPredictionMaskImg(img)
writeImg(predImg,dstFile)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3236440 | <filename>gumiyabot/bancho.py
# -*- coding: utf-8 -*-
"""
<NAME> (osu!) irc3 plugin.
"""
import asyncio
import irc3
# Bancho does not comply with the IRC spec (thanks peppy) so we need to account
# for that or else the irc3 module will not read any data
class BanchoConnection(irc3.IrcConnection):
"""asyncio protocol to handle Bancho connections"""
def data_received(self, data):
"""Handle data received from Bancho.
Bancho does not send trailing carriage returns at the end of IRC
commands (i.e. it ends a command with \n instead of \r\n).
"""
if not data.endswith(b'\r\n'):
data = data.rstrip(b'\n') + b'\r\n'
return super(BanchoConnection, self).data_received(data)
@irc3.plugin
class BaseBanchoPlugin:
def __init__(self, bot):
self.bot = bot
self.bancho_queue = self.bot.config.get('bancho_queue')
asyncio.ensure_future(self.get_bancho_msg())
@irc3.event(irc3.rfc.CONNECTED)
def connected(self, **kw):
self.bot.log.info('[bancho] Connected to bancho as {}'.format(self.bot.nick))
@asyncio.coroutine
def get_bancho_msg(self):
while True:
(target, msg) = yield from self.bancho_queue.get()
self.bot.privmsg(target, msg)
| StarcoderdataPython |
6478619 | from packages.pieces.GN2 import GN2
def test_flg_name():
assert(str(GN2()) == 'GN2') | StarcoderdataPython |
3577647 | #!/usr/bin/env python3
# import RPi.GPIO as GPIO
from enum import IntFlag
import timeit
import math
import time
'''
DoorState/Door class. Needs to be linked to actual hardware state etc...
'''
# Some states the door could be in
class DoorState(IntFlag):
INIT = 0 # initial state, adding this state to door.state will result in nothing
OPENED = 1
CLOSED = 2
LOGGING = 4
RECORDING = 8
class Door:
def __init__(self):
self.state = DoorState.INIT
self.last_state_check = timeit.default_timer()
def check_state(self, *args): # check current state against another state
time.sleep(0.1) # sleep to combat debounce in the future (possibly)
# GPIO.input()
return self.state & sum(args) == sum(args)
def get_state(self): # return current state
return DoorState(self.state)
def remove_state(self, *args): # clear a state, toggle open/closed
filtered_args = set(args) # filter out duplicate states in args
for item in filtered_args:
if item == DoorState.OPENED:
self.state ^= DoorState.OPENED
self.state ^= DoorState.CLOSED
elif item == DoorState.CLOSED:
self.state ^= DoorState.OPENED
self.state ^= DoorState.CLOSED
else:
self.state &= ~item
def add_state(self, *args): # add state, remove or add open/closed
filtered_args = set(args)
for item in filtered_args:
if item == DoorState.OPENED:
self.state |= DoorState.OPENED
self.state &= ~DoorState.CLOSED
elif item == DoorState.CLOSED:
self.state &= ~DoorState.OPENED
self.state |= DoorState.CLOSED
else:
self.state |= item
''' ********************* Example usage *********************** '''
# try:
nDoor = Door() # instantiate door, state defaults to INIT
# Poll state?
# if GPIO.input(?): <--- how are you connected?
try:
nDoor.add_state(DoorState.OPENED) # set state to opened, can use multiple arguments
except AttributeError:
print("That is not a valid door state.")
exit()
try:
nDoor.add_state(DoorState.LOGGING)
except AttributeError:
print("That is not a valid door state.")
exit()
if nDoor.check_state(DoorState.OPENED): # compare states, can use multiple arguments
try:
nDoor.add_state(DoorState.CLOSED, DoorState.RECORDING)
except AttributeError:
print("That is not a valid door state.")
exit()
if nDoor.check_state(DoorState.CLOSED, DoorState.RECORDING):
try:
nDoor.add_state(DoorState.OPENED)
except AttributeError:
print("That is not a valid door state.")
exit()
if not nDoor.check_state(DoorState.CLOSED):
try:
nDoor.add_state(DoorState.CLOSED)
except AttributeError:
print("That is not a valid door state.")
exit()
nDoor.remove_state(DoorState.RECORDING, DoorState.LOGGING) # remove states, can use multiple arguments
print("Current state of the door is " + str(nDoor.get_state()))
try:
nDoor.add_state(DoorState[input("Choose a state " + str(nDoor.state._member_names_) + " flag to add: ")])
except KeyError:
print("That is not a valid door state.")
exit()
try:
nDoor.add_state(DoorState[input("Choose another state " + str(nDoor.state._member_names_) + " flag to add: ")])
except KeyError:
print("That is not a valid door state.")
exit()
print("The new state of the door is " + str(nDoor.get_state()))
check_time = math.floor(timeit.default_timer() - nDoor.last_state_check)
if check_time < 60:
print(str(check_time) + " seconds(s) since last check")
nDoor.last_state_check = 0
elif check_time >= 60:
print(str(math.floor(check_time/60)) + " minute(s) since last check")
nDoor.last_state_check = 0
# finally:
# GPIO.cleanup() | StarcoderdataPython |
4910112 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np
import skimage
from skimage import io
import random
import math
import argparse
from torchvision import transforms
from Particle_Sim import ParticleSystem
random.seed(2574)
def to_rgb(x):
rgb = x[0:3,:,:]
return torch.clamp(rgb, 0.0, 1.0)
def show_tensor_surfaces(t):
if (len(t.shape) < 4):
plt.axis('off')
plt.set_cmap('inferno')
plt.imshow(to_rgb(t).cpu().detach().permute(1,2,0), interpolation='nearest')
else:
# batch must be multiple of 2
plt.set_cmap('inferno')
fig, axs = plt.subplots(4,t.shape[0]//4, figsize=(8, 16))
plt.subplots_adjust(hspace =0.02, wspace=0.02)
for axe,batch_item in zip(axs.flatten(),t):
axe.axis('off')
axe.imshow(to_rgb(batch_item).cpu().detach().permute(1,2,0), interpolation='nearest')
def show_hidden(t, section):
if (len(t.shape) < 4):
plt.axis('off')
plt.set_cmap('inferno')
plt.imshow(torch.sigmoid(t[1:4,:,:]).cpu().detach().permute(1,2,0), interpolation='nearest')
else:
# batch must be multiple of 2
plt.set_cmap('inferno')
fig, axs = plt.subplots(4,t.shape[0]//4, figsize=(16, 16))
plt.subplots_adjust(hspace =0.01, wspace=0.1)
for axe,batch_item in zip(axs.flatten(),t):
axe.axis('off')
axe.imshow(torch.sigmoid(batch_item[1:4,:,:]).cpu().detach().permute(1,2,0), interpolation='nearest')
def convert_image(t,section):
tf = transforms.Normalize(mean=[0.0,0.0,0.0], std=[0.33,0.33,0.33])
return torch.clamp(tf(t[0,section*3+1:section*3+4,:,:]),0.0,1.0).cpu().detach().permute(1,2,0)
def state_loss(running_state, final_state):
return F.mse_loss(running_state[:,0:3,:,:], final_state[:,0:3,:,:])
class CAModel(nn.Module):
def __init__(self, env_d):
super(CAModel, self).__init__()
self.conv1 = nn.Conv2d(env_d*3,192,1)
self.conv2 = nn.Conv2d(192,env_d,1)
nn.init.zeros_(self.conv2.weight)
nn.init.zeros_(self.conv2.bias)
def forward(self, x):
#raw_in = x.clone()
x = F.relu(self.conv1(x))
#x = torch.cat((x, raw_in), dim=1)
return self.conv2(x)
class CASimulator():
def __init__(self):
self.ENV_X = 512 #32
self.ENV_Y = 512 #32
self.ENV_D = 16
self.particle_count = 64 # 6
self.init_particle_dim = 8 # 3
self.particle_size = 5.0
self.part_spacing = 1.4
self.repel_strength = 0.015
self.step_size = 1.0
self.update_probability = 0.5
self.cur_batch_size = 1
self.train_steps = 64000
self.min_sim_steps = 1
self.max_sim_steps = 8
self.step_increase_interval = 128
self.updates_per_step = 8
self.device = torch.device('cuda')
self.ca_model = CAModel(self.ENV_D)
self.ca_model = self.ca_model.to(self.device)
self.optimizer = optim.Adam(self.ca_model.parameters(), lr=2e-3)
self.frames_out_count = 0
self.losses = []
self.checkpoint_interval = 500
self.final_plot_interval = 1
self.evolution_interval = 256
self.lr_schedule = lambda x: 3e-3*2.0**(-0.0002*x) #lambda x: 2e-3 if x<4000 else 3e-4
def initialize_particle_sims(self):
self.p_sims = [
ParticleSystem(
self.particle_count,
self.ENV_X,
self.particle_size,
self.repel_strength,
self.init_particle_dim,
self.part_spacing
)
for _ in range(self.cur_batch_size)
]
def draw_states(self):
blank = torch.zeros(self.cur_batch_size, self.ENV_D, self.ENV_X, self.ENV_Y)
blank[:,0:3,:,:] = torch.tensor([ps.draw() for ps in self.p_sims]).permute(0,3,1,2)
return blank
def run_particles(self, num_steps):
for ps in self.p_sims:
for _ in range(num_steps):
ps.sim()
def load_pretrained(self, path):
self.ca_model.load_state_dict(torch.load(path))
def wrap_edges(self, x):
return F.pad(x, (1,1,1,1), 'constant', 0.0) #'circular', 0)
def raw_senses(self):
# state - (batch, depth, x, y)
sobel_x = torch.tensor([[-1.0,0.0,1.0],[-2.0,0.0,2.0],[-1.0,0.0,1.0]])/8
sobel_y = torch.tensor([[1.0,2.0,1.0],[0.0,0.0,0.0],[-1.0,-2.0,-1.0]])/8
identity = torch.tensor([[0.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,0.0]])
all_filters = torch.stack((identity, sobel_x, sobel_y))
all_filters_batch = all_filters.repeat(self.ENV_D,1,1).unsqueeze(1)
all_filters_batch = all_filters_batch.to(self.device)
return F.conv2d(
self.wrap_edges(self.current_states),
all_filters_batch,
groups=self.ENV_D
)
def sim_step(self):
state_updates = self.ca_model(self.raw_senses().to(self.device))*self.step_size
# randomly block updates to enforce
# asynchronous communication between cells
rand_mask = torch.rand_like(
self.current_states[:, :1, :, :]) < self.update_probability
self.current_states += state_updates*(rand_mask.float().to(self.device))
def run_sim(self, steps, run_idx, save_all):
self.optimizer.zero_grad()
for i in range(steps):
if (save_all):
#show_all_layers(self.current_states-self.prev_states, self.current_states)
show_tensor_surfaces(self.current_states)
plt.savefig(f'output/all_figs/out_hidden_{self.frames_out_count:06d}.png')
plt.close('all')
self.frames_out_count += 1
self.prev_states = self.prev_states*0.9 + 0.1*self.current_states.clone()
self.sim_step()
#self.set_unique_control_channel()
loss = state_loss(self.current_states, self.final_states)
loss.backward()
'''
with torch.no_grad():
self.ca_model.conv1.weight.grad = self.ca_model.conv1.weight.grad/(self.ca_model.conv1.weight.grad.norm()+1e-8)
self.ca_model.conv1.bias.grad = self.ca_model.conv1.bias.grad/(self.ca_model.conv1.bias.grad.norm()+1e-8)
self.ca_model.conv2.weight.grad = self.ca_model.conv2.weight.grad/(self.ca_model.conv2.weight.grad.norm()+1e-8)
self.ca_model.conv2.bias.grad = self.ca_model.conv2.bias.grad/(self.ca_model.conv2.bias.grad.norm()+1e-8)
'''
self.optimizer.step()
lsv = loss.item()
self.losses.insert(0, lsv)
self.losses = self.losses[:100]
print(f'running loss: {sum(self.losses)/len(self.losses)}')
print(f'loss run {run_idx} : {lsv}')
print(f'lr: {self.lr_schedule(run_idx)}')
def train_ca(self):
self.initialize_particle_sims()
for idx in range(self.train_steps):
for g in self.optimizer.param_groups:
g['lr'] = self.lr_schedule(idx)
#self.current_states = self.initial_state.repeat(self.cur_batch_size,1,1,1)
#self.initialize_particle_sims()
self.current_states = self.draw_states().to(self.device)
num_steps = random.randint(self.min_sim_steps,min(idx//self.step_increase_interval+1,self.max_sim_steps))*self.updates_per_step
self.run_particles(num_steps)
self.final_states = self.draw_states().to(self.device)
self.prev_states = self.current_states.clone()
self.run_sim(num_steps*2, idx, (idx+1)%self.evolution_interval == 0)
print(f'{num_steps//self.updates_per_step} blocks, {num_steps*2} total steps\n')
if (idx % self.final_plot_interval == 0):
#show_final_target(self.input_matsA, self.input_matsB, self.current_states)
show_tensor_surfaces(self.current_states)
#show_tensor_surfaces(self.final_states)
plt.savefig(f'output/out{idx:06d}.png')
plt.close('all')
if (idx % self.checkpoint_interval == 0):
torch.save(self.ca_model.state_dict(), f'checkpoints/ca_model_step_{idx:06d}.pt')
def run_pretrained(self, steps):
#self.cur_batch_size = 1
self.initialize_particle_sims()
with torch.no_grad():
self.current_states = self.draw_states().to(self.device)
self.prev_states = self.current_states.clone()
for idx in range(steps):
print(f'step: {idx}')
if (idx % 8 == 0):
#show_all_layers(self.current_states-self.prev_states, self.current_states)
show_tensor_surfaces(self.current_states[0])
plt.savefig(f'pretrained/out_{self.frames_out_count:06d}.png')
plt.close('all')
self.frames_out_count += 1
self.prev_states = self.prev_states*0.9 + 0.1*self.current_states.clone()
self.sim_step()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--run-pretrained', dest='run_pretrained', action='store_true')
parser.add_argument('--pretrained-path', type=str, default='ca_model_pretty_g_short')
args = parser.parse_args()
ca_sim = CASimulator()
if args.run_pretrained:
print('running pretained')
ca_sim.load_pretrained(f'checkpoints/{args.pretrained_path}.pt')
ca_sim.run_pretrained(50000)
else:
ca_sim.train_ca()
| StarcoderdataPython |
1704988 | <filename>python/BugLearnAndValidate.py
'''
Created on Jun 23, 2017
@author: <NAME>, <NAME>
'''
import sys
import json
from os.path import join
from os import getcwd
from collections import Counter, namedtuple
import math
import argparse
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers.core import Dense, Dropout
import time
import numpy as np
import Util
import LearningDataSwappedArgs
import LearningDataSwappedArgsBert
import LearningDataBinOperator
import LearningDataBertBinOperator
import LearningDataSwappedBinOperands
import LearningDataIncorrectBinaryOperand
import LearningDataIncorrectAssignment
parser = argparse.ArgumentParser()
parser.add_argument(
"--pattern", help="Kind of data to extract", choices=["SwappedArgs", "SwappedArgsBert", "BinOperator", "BinOperatorBert", "SwappedBinOperands", "IncorrectBinaryOperand", "IncorrectAssignment"], required=True)
parser.add_argument(
"--token_emb", help="JSON file with token embeddings", required=True)
parser.add_argument(
"--type_emb", help="JSON file with type embeddings", required=True)
parser.add_argument(
"--node_emb", help="JSON file with AST node embeddings", required=True)
parser.add_argument(
"--training_data", help="JSON files with training data", required=True, nargs="+")
parser.add_argument(
"--validation_data", help="JSON files with validation data", required=True, nargs="+")
Anomaly = namedtuple("Anomaly", ["message", "score"])
def prepare_xy_pairs(gen_negatives, data_paths, learning_data):
xs = []
ys = []
# keep calls in addition to encoding as x,y pairs (to report detected anomalies)
code_pieces = []
for code_piece in Util.DataReader(data_paths):
learning_data.code_to_xy_pairs(gen_negatives, code_piece, xs, ys,
name_to_vector, type_to_vector, node_type_to_vector, code_pieces)
x_length = len(xs[0])
print("Stats: " + str(learning_data.stats))
print("Number of x,y pairs: " + str(len(xs)))
print("Length of x vectors: " + str(x_length))
xs = np.array(xs)
ys = np.array(ys)
return [xs, ys, code_pieces]
def sample_xy_pairs(xs, ys, number_buggy):
sampled_xs = []
sampled_ys = []
buggy_indices = []
for i, y in enumerate(ys):
if y == [1]:
buggy_indices.append(i)
sampled_buggy_indices = set(np.random.choice(
buggy_indices, size=number_buggy, replace=False))
for i, x in enumerate(xs):
y = ys[i]
if y == [0] or i in sampled_buggy_indices:
sampled_xs.append(x)
sampled_ys.append(y)
return sampled_xs, sampled_ys
if __name__ == '__main__':
print("BugDetection started with " + str(sys.argv))
time_start = time.time()
args = parser.parse_args()
pattern = args.pattern
name_to_vector_file = args.token_emb
type_to_vector_file = args.type_emb
node_type_to_vector_file = args.node_emb
training_data_paths = args.training_data
validation_data_paths = args.validation_data
with open(name_to_vector_file) as f:
name_to_vector = json.load(f)
with open(type_to_vector_file) as f:
type_to_vector = json.load(f)
with open(node_type_to_vector_file) as f:
node_type_to_vector = json.load(f)
if pattern == "SwappedArgs":
learning_data = LearningDataSwappedArgs.LearningData()
elif pattern == "SwappedArgsBert":
learning_data = LearningDataSwappedArgsBert.LearningData()
elif pattern == "BinOperator":
learning_data = LearningDataBinOperator.LearningData()
elif pattern == "BinOperatorBert":
learning_data = LearningDataBertBinOperator.LearningData()
elif pattern == "SwappedBinOperands":
learning_data = LearningDataSwappedBinOperands.LearningData()
elif pattern == "IncorrectBinaryOperand":
learning_data = LearningDataIncorrectBinaryOperand.LearningData()
elif pattern == "IncorrectAssignment":
learning_data = LearningDataIncorrectAssignment.LearningData()
else:
raise Exception(f"Unexpected bug pattern: {pattern}")
# not yet implemented
# elif pattern == "MissingArg":
# learning_data = LearningDataMissingArg.LearningData()
print("Statistics on training data:")
learning_data.pre_scan(training_data_paths, validation_data_paths)
# prepare x,y pairs for learning and validation, therefore generate negatives
print("Preparing xy pairs for training data:")
learning_data.resetStats()
xs_training, ys_training, _ = prepare_xy_pairs(
True, training_data_paths, learning_data)
x_length = len(xs_training[0])
print("Training examples : " + str(len(xs_training)))
print(learning_data.stats)
# create a model (simple feedforward network)
model = Sequential()
model.add(Dropout(0.2, input_shape=(x_length,)))
model.add(Dense(200, input_dim=x_length,
activation="relu", kernel_initializer='normal'))
model.add(Dropout(0.2))
# model.add(Dense(200, activation="relu"))
model.add(Dense(1, activation="sigmoid", kernel_initializer='normal'))
# train model
model.compile(loss='binary_crossentropy',
optimizer='rmsprop', metrics=['accuracy'])
history = model.fit(xs_training, ys_training,
batch_size=100, epochs=10, verbose=1)
time_stamp = math.floor(time.time() * 1000)
model.save("bug_detection_model_"+str(time_stamp))
time_learning_done = time.time()
print("Time for learning (seconds): " +
str(round(time_learning_done - time_start)))
# prepare validation data
print("Preparing xy pairs for validation data:")
learning_data.resetStats()
xs_validation, ys_validation, code_pieces_validation = prepare_xy_pairs(
True, validation_data_paths, learning_data)
print("Validation examples : " + str(len(xs_validation)))
print(learning_data.stats)
# validate the model
validation_loss = model.evaluate(xs_validation, ys_validation)
print()
print("Validation loss & accuracy: " + str(validation_loss))
# compute precision and recall with different thresholds
# for reporting anomalies
threshold_to_correct = Counter()
threshold_to_incorrect = Counter()
threshold_to_found_seeded_bugs = Counter()
threshold_to_warnings_in_orig_code = Counter()
ys_prediction = model.predict(xs_validation)
poss_anomalies = []
scores_dict = {'scores': list(
map(lambda x: float(x[0]), ys_prediction)), 'bug': list(map(lambda x: int(x[0]), ys_validation))}
with open('scores.json', 'w') as file:
file.write(json.dumps(scores_dict))
for idx in range(0, len(xs_validation), 2):
# probab(original code should be changed), expect 0
y_prediction_orig = ys_prediction[idx][0]
# probab(changed code should be changed), expect 1
y_prediction_changed = ys_prediction[idx + 1][0]
# higher means more likely to be anomaly in current code
anomaly_score = learning_data.anomaly_score(
y_prediction_orig, y_prediction_changed)
# higher means more likely to be correct in current code
normal_score = learning_data.normal_score(
y_prediction_orig, y_prediction_changed)
is_anomaly = False
for threshold_raw in range(1, 20, 1):
threshold = threshold_raw / 20.0
suggests_change_of_orig = anomaly_score >= threshold
suggests_change_of_changed = normal_score >= threshold
# counts for positive example
if suggests_change_of_orig:
threshold_to_incorrect[threshold] += 1
threshold_to_warnings_in_orig_code[threshold] += 1
else:
threshold_to_correct[threshold] += 1
# counts for negative example
if suggests_change_of_changed:
threshold_to_correct[threshold] += 1
threshold_to_found_seeded_bugs[threshold] += 1
else:
threshold_to_incorrect[threshold] += 1
# check if we found an anomaly in the original code
if suggests_change_of_orig:
is_anomaly = True
if is_anomaly:
code_piece = code_pieces_validation[idx]
message = "Score : " + \
str(anomaly_score) + " | " + code_piece.to_message()
# print("Possible anomaly: "+message)
# Log the possible anomaly for future manual inspection
poss_anomalies.append(Anomaly(message, anomaly_score))
f_inspect = open('poss_anomalies.txt', 'w+')
poss_anomalies = sorted(poss_anomalies, key=lambda a: -a.score)
for anomaly in poss_anomalies:
f_inspect.write(anomaly.message + "\n")
print("Possible Anomalies written to file : poss_anomalies.txt")
f_inspect.close()
time_prediction_done = time.time()
print("Time for prediction (seconds): " +
str(round(time_prediction_done - time_learning_done)))
print()
for threshold_raw in range(1, 20, 1):
threshold = threshold_raw / 20.0
recall = (
threshold_to_found_seeded_bugs[threshold] * 1.0) / (len(xs_validation) / 2)
precision = 1 - \
((threshold_to_warnings_in_orig_code[threshold]
* 1.0) / (len(xs_validation) / 2))
if threshold_to_correct[threshold] + threshold_to_incorrect[threshold] > 0:
accuracy = threshold_to_correct[threshold] * 1.0 / (
threshold_to_correct[threshold] + threshold_to_incorrect[threshold])
else:
accuracy = 0.0
print("Threshold: " + str(threshold) + " Accuracy: " + str(round(accuracy, 4)) + " Recall: " + str(round(recall, 4)
) + " Precision: " + str(round(precision, 4))+" #Warnings: "+str(threshold_to_warnings_in_orig_code[threshold]))
| StarcoderdataPython |
84893 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Package: mesxr.calibration
Module: utilities
Author: <NAME>, <NAME>
Affiliation: Department of Physics, University of Wisconsin-Madison
Last Updated: November 2018
Description:
This module contains a number of auxilary functions for the main timscan.py module to
make use of. These are either functions which might be useful elsewhere (such are coordinate
conversions) or functions which are more likely to change in future implementations (like
loading in the trimbit scan data from a specific file naming scheme).
Usage:
TBD
"""
from os import path
import numpy as np
import tifffile
M_SIZE_Y = 195
M_SIZE_X = 487
M_NUM_TRIM = 64
M_NUM_CHIPS_Y = 2
M_NUM_CHIPS_X = 8
M_NUM_CHIPS = M_NUM_CHIPS_X * M_NUM_CHIPS_Y
M_CHIP_SIZE_Y = 97
M_CHIP_SIZE_X = 60
def load_calibration_data(calib_path):
"""
Description
Reads in the calibration .tiff files and returns an image indexed by pixel location
and trimbit setting. These files are photon counts for each pixel with the detector
exposed to a given emission line, with this measurement repeated for each trimbit
setting.
The calibrate_detector routine stores the output of this code under the key "trimscan"
in the calibration dictionary. This terminology is used throughout the code.
Parameters:
- calib_path = (string) Path to the folder containing the calibration images.
Returns:
- images = (int[x_index, y_index, trimbit]) Array containing one calibration
image for each of the 64 possible trimbit settings.
Credit:
This function was originally written by <NAME> and/or <NAME>, then
modified by <NAME>.
"""
# Read in the params_b01_m01.txt file - left in for future expansion
with open(path.join(calib_path, 'config.txt'), 'r') as f:
params = f.readlines()
# Container for the calibration data
images = np.empty([M_SIZE_X, M_SIZE_Y, M_NUM_TRIM])
# Load in the data one trimbit at a time
for i in range(0, M_NUM_TRIM):
try:
filename = path.join(calib_path, 'scan_image_{:03d}.tif'.format(i))
images[:, :, 63 - i] = tifffile.imread(filename).transpose()
except:
# Also try 5-digit labeling, the camserver default
filename = path.join(calib_path, 'scan_image_{:05d}.tif'.format(i))
images[:, :, 63 - i] = tifffile.imread(filename).transpose()
return images
def get_chip_coords(image_x, image_y):
"""
Description
This function takes coordinates in the broader "image" (detector) reference frame and
determines chat chip this point falls on as well as its local x,y coordinates in the
chip reference frame.
Parameters:
- image_x = (int) X-coordinate of a point on the overall detector image
- image_y = (int) Y-coordinate of a point on the overall detector image
Returns:
- chip_num = (int) The chip number on which the point (image_x, image_y) lies
- chip_x = (int) The x-coordinate of the point in the frame of chip_num
- chip_y = (int) The y-coordinate of the point in the frame of chip_num
Credit:
This function was originally written by <NAME>. Original note:
This is copied from pix_add in utils.c for the p2det detector. Given an x and y
location on the detector, this will return the appropriate chip number and the x
and y location on that given chip.
"""
if image_y < M_SIZE_Y/2:
chip_num = image_x/(M_CHIP_SIZE_X + 1)
chip_x = (M_CHIP_SIZE_X+1)*(chip_num+1) - image_x - 2
chip_y = image_y
if chip_x < 0:
chip_num = -1
elif image_y == M_SIZE_Y/2:
chip_num = -1
else:
chip_num = M_NUM_CHIPS/2 + image_x/(M_CHIP_SIZE_X + 1)
chip_x = image_x % (M_CHIP_SIZE_X+1)
chip_y = M_SIZE_Y - image_y - 1
if chip_x >= M_CHIP_SIZE_X:
chip_num = -1
# Check if this is a valid chip.
if chip_num < 0:
chip_y = -1
chip_x = -1
return chip_num, chip_x, chip_y
# Data for the line energies
energies = {'Zr': 2.04, 'Mo': 2.29, 'Ag': 2.98, 'In': 3.29, 'Ti': 4.51, 'V': 4.95,
'Cr': 5.41, 'Fe': 6.40, 'Cu': 8.05, 'Ge': 9.89, 'Br': 11.92, 'Y': 14.95,
'MoK': 17.48, 'AgK': 22.16, 'Sn': 25.27}
def get_line_energy(elements):
"""
Return the appropriate energies for the supplied elements.
"""
elem_en = np.array([energies[elem] for elem in elements])
return elem_en | StarcoderdataPython |
5018744 | <reponame>dgolovin/python-django-ex
from django.views.generic import TemplateView
class HomePageView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['name'] = 'Visitor'
return context | StarcoderdataPython |
1769218 | #!/usr/bin/env python3
import argparse
import json
import uuid
import requests
import sys
from string import Formatter
from errors import UserError, RequestError
from request_utils import Requests
from environment_utils import Environments
class Cli(object):
def __init__(
self,
requests_filename='requests.json',
environments_filename='envs.json',
print_all_responses=False
):
self.requests = Requests(filename=requests_filename)
self.environments = Environments(filename=environments_filename)
self.print_all_responses = print_all_responses
def make_call(self, request_name, env_name):
request = self.requests.find(request_name)
environment = self.environments.find(env_name)
parsed_req = self._parse_request(request, env_name)
url = f"{environment['base_url']}{parsed_req['endpoint']}"
headers = {'content-type': 'application/json'}
if 'headers' in environment:
headers = {**headers, **environment['headers']}
if parsed_req['type'] == 'POST':
response = requests.post(url, data=json.dumps(parsed_req['body']), headers=headers)
elif parsed_req['type'] == 'PUT':
response = requests.put(url, data=json.dumps(parsed_req['body']), headers=headers)
elif parsed_req['type'] == 'GET':
response = requests.get(url, params=parsed_req['body'], headers=headers)
else:
raise UserError(f'Unknown HTTP method {parsed_req["type"]}')
response_json = response.json()
if response.status_code != 200:
raise RequestError(
f'{response.status_code} returned when calling {request_name} with response '
f'{response_json}. Expected status code 200.'
)
if self.print_all_responses:
print(f'Response for call to {request_name}:')
print(response_json)
return response_json
def _parse_request(self, request, base_url):
if isinstance(request, str):
parameters = [
parameter for _, parameter, _, _ in Formatter().parse(request) if parameter
]
values = {}
for parameter in parameters:
new_request = parameter.split('[')[0]
values[new_request] = self._populate_parameter(new_request, base_url)
return request.format(**values)
if isinstance(request, list):
parsed = []
for value in request:
parsed.append(self._parse_request(value, base_url))
return parsed
parsed = {}
for attribute, value in request.items():
parsed[attribute] = self._parse_request(value, base_url)
return parsed
def _populate_parameter(self, parameter_name, base_url):
if parameter_name == 'uuid':
return str(uuid.uuid4())
else:
return self.make_call(parameter_name, base_url)
def setup_argparse():
parser = argparse.ArgumentParser(description='Create and retrieve objects in a rest API.')
parser.add_argument(
'request',
metavar='request_name',
help='the name of a request to make',
nargs='?'
)
parser.add_argument(
'-l',
'--list',
help='list all objects and exit',
action='store_true'
)
parser.add_argument(
'-rf',
'--requests_file',
help='the requests file location',
)
parser.add_argument(
'-ef',
'--environments_file',
help='the environments file location',
)
parser.add_argument(
'-o',
'--output_all_requests',
help='prints all the requests being made',
action='store_true'
)
parser.add_argument(
'-e',
'--env',
'--environment',
metavar='env_name',
help='the name of environment to use',
)
return parser
if __name__ == '__main__':
parser = setup_argparse()
args = parser.parse_args()
cli = None
try:
cli = Cli(
requests_filename=args.requests_file or 'requests.json',
environments_filename=args.environments_file or 'envs.json',
print_all_responses=args.output_all_requests
)
except UserError as e:
print(str(e))
sys.exit(1)
if args.list:
cli.requests.print_request_list()
elif args.request:
try:
print(cli.make_call(args.request, args.env or 'default'))
except (UserError, RequestError) as e:
print(str(e))
sys.exit(1)
else:
print('Type -h for help')
| StarcoderdataPython |
1785226 | from torchvision.datasets.vision import VisionDataset
from PIL import Image
import os
import os.path
import torch
import numpy as np
# TODO: remove `device`?
def convert_ann(ann, device):
xmin, ymin, w, h = ann['bbox']
# DEBUG
if w <= 0 or h <= 0:
raise ValueError("Degenerate bbox (x, y, w, h): ", str([xmin, ymin, w, h]))
ann['boxes'] = [xmin, ymin, xmin + w, ymin + h]
ann['labels'] = ann['category_id']
del ann['bbox']
del ann['category_id']
del ann['segmentation']
return ann
def flatten_targets(targets):
"""
Pre-process annotations to match format expected by torchvision's
GeneralizedRCNN.
"""
if len(targets) == 0:
return {}
# Flatten annotations into one dictionary
flat_obj = {}
for k in targets[0].keys():
if k == 'image_id':
# Image ID is special: we only want one of them, regardless of
# the amount of annotations
flat_obj[k] = np.array(targets[0][k])
# print("image ID: ", flat_obj[k])
else:
# print(k)
flat_obj[k] = np.stack([t[k] for t in targets])
return flat_obj
class CocoDetection(VisionDataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(self, root, annFile, allow_missing_targets=True, transform=None, target_transform=None, transforms=None):
super(CocoDetection, self).__init__(root, transforms, transform, target_transform)
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.ids = list(sorted(self.coco.imgs.keys()))
self.allow_missing_targets = allow_missing_targets
# DEBUG: convert format of all COCO annotations to training/testing format
print("converting to PyTorch format...")
for id in self.coco.anns:
self.coco.anns[id] = convert_ann(self.coco.anns[id], 'cpu')
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
target = coco.loadAnns(ann_ids)
if len(ann_ids) == 0:
if not self.allow_missing_targets:
raise ValueError("Image has no annotations!")
# DEBUG
target = [{'image_id': img_id}]
# DEBUG
target = flatten_targets(target)
path = coco.loadImgs(img_id)[0]['file_name']
with Image.open(os.path.join(self.root, path)) as img:
img.convert('RGB')
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.ids) | StarcoderdataPython |
9687355 | <reponame>s2et/hdsplayer
import subprocess, time
doc=[]
vd=[".mp4",".mkv",".flv"]
id=[".jpg",".jpeg",".png",".ico"]
with open('/home/s2/Documents/hds/var1.txt', 'r+') as filehandle:
for line in filehandle:
t = line[:-1]
print(t)
print(line)
doc.append(t)
for ty in vd:
if ty in t:
t1=subprocess.run(["ffprobe","-v","error","-show_entries","format=duration","-of","default=noprint_wrappers=1:nokey=1",t],stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
print(t1.stdout)
subprocess.run(["mpv","--fullscreen",t])
dur=t1.stdout
ti=float(dur)
#time.sleep(ti)
for ty in id:
if ty in t:
ti="59"
subprocess.run(["mpv","--fullscreen","--image-display-duration",ti,t])
#time.sleep(ti)
| StarcoderdataPython |
6656371 | from api.models import db, DocumentClass, Document
import requests
BACKEND_URL = "https://h4i-infra-server.kivaportfolio.now.sh/"
r = (
requests.post(
BACKEND_URL + "register",
data={
"email": "<EMAIL>",
"password": "<PASSWORD>",
"securityQuestionAnswer": "answer",
"answer": "yo",
"questionIdx": 1,
"role": "pm",
},
)
).json()
if r.get("status") == 400:
r = (
requests.post(
BACKEND_URL + "login", data={"email": "<EMAIL>", "password": "<PASSWORD>"}
)
).json()
token = r.get("token")
headers = {"Content-type": "application/x-www-form-urlencoded", "token": token}
# client passed from client - look into pytest for more info about fixtures
# test client api: http://flask.pocoo.org/docs/1.0/api/#test-client
def create_docclass():
temp_docclass = DocumentClass(
{"name": "Annual Report", "description": "Annual report of finances"}
)
return temp_docclass
def test_index(client):
rs = client.get("/")
assert rs.status_code == 200
def test_get_document_class(client):
Document.query.delete()
DocumentClass.query.delete()
db.session.commit()
rs = client.get("/document_classes")
assert rs.status_code == 200
ret_dict = rs.json
assert ret_dict["success"] == True
assert ret_dict["result"]["document_class"] == []
# Creating a docclass and adding it to the database
helper_docclass = create_docclass()
db.session.add(helper_docclass)
db.session.commit()
rs = client.get("/document_classes")
ret_dict = rs.json
assert len(ret_dict["result"]["document_class"]) == 1
assert ret_dict["result"]["document_class"][0]["name"] == "Annual Report"
assert (
ret_dict["result"]["document_class"][0]["description"]
== "Annual report of finances"
)
def test_get_document_class_by_id(client):
Document.query.delete()
DocumentClass.query.delete()
# Creating a docclass and adding it to the database
helper_docclass = create_docclass()
db.session.add(helper_docclass)
db.session.commit()
rs = client.get("/document_class/" + helper_docclass.id)
assert rs.status_code == 200
ret_dict = rs.json
assert ret_dict["success"] == True
assert ret_dict["result"]["document_class"]["_id"] == helper_docclass.id
assert ret_dict["result"]["document_class"]["name"] == "Annual Report"
assert (
ret_dict["result"]["document_class"]["description"]
== "Annual report of finances"
)
def test_add_document_class(client):
Document.query.delete()
DocumentClass.query.delete()
db.session.commit()
# Test for not having required field provided
rs = client.post(
"/document_classes",
content_type="multipart/form-data",
data={"description": "description here"},
headers=headers,
)
assert rs.status_code == 400
ret_dict = rs.json # gives you a dictionary
assert ret_dict["success"] == False
assert ret_dict["message"] == "No name provided for new Document Class"
# Test for legal add
rs = client.post(
"/document_classes",
content_type="multipart/form-data",
data={"name": "docname", "description": "description here"},
headers=headers,
)
assert rs.status_code == 200
ret_dict = rs.json # gives you a dictionary
assert ret_dict["success"] == True
assert ret_dict["message"] == "success"
def test_update_document_class_by_id(client):
# Creating a docclass and adding it to the database
Document.query.delete()
DocumentClass.query.delete()
helper_docclass = create_docclass()
db.session.add(helper_docclass)
db.session.commit()
rs = client.put(
"/document_class/" + helper_docclass.id,
content_type="multipart/form-data",
data={"name": "newdocname"},
headers=headers,
)
assert rs.status_code == 200
ret_dict = rs.json # gives you a dictionary
assert ret_dict["success"] == True
assert ret_dict["result"]["document_class"]["name"] == "newdocname"
assert (
ret_dict["result"]["document_class"]["description"]
== "Annual report of finances"
)
| StarcoderdataPython |
3420230 | <filename>tests/test_split_on_grid.py
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.apps.pathology.transforms import SplitOnGrid
A11 = torch.randn(3, 2, 2)
A12 = torch.randn(3, 2, 2)
A21 = torch.randn(3, 2, 2)
A22 = torch.randn(3, 2, 2)
A1 = torch.cat([A11, A12], 2)
A2 = torch.cat([A21, A22], 2)
A = torch.cat([A1, A2], 1)
TEST_CASE_0 = [{"grid_size": (2, 2)}, A, torch.stack([A11, A12, A21, A22])]
TEST_CASE_1 = [{"grid_size": (2, 1)}, A, torch.stack([A1, A2])]
TEST_CASE_2 = [{"grid_size": (1, 2)}, A1, torch.stack([A11, A12])]
TEST_CASE_3 = [{"grid_size": (1, 2)}, A2, torch.stack([A21, A22])]
TEST_CASE_4 = [{"grid_size": (1, 1), "patch_size": (2, 2)}, A, torch.stack([A11])]
TEST_CASE_5 = [{"grid_size": 1, "patch_size": 4}, A, torch.stack([A])]
TEST_CASE_6 = [{"grid_size": 2, "patch_size": 2}, A, torch.stack([A11, A12, A21, A22])]
TEST_CASE_7 = [{"grid_size": 1}, A, torch.stack([A])]
TEST_CASE_MC_0 = [{"grid_size": (2, 2)}, [A, A], [torch.stack([A11, A12, A21, A22]), torch.stack([A11, A12, A21, A22])]]
TEST_CASE_MC_1 = [{"grid_size": (2, 1)}, [A] * 5, [torch.stack([A1, A2])] * 5]
TEST_CASE_MC_2 = [{"grid_size": (1, 2)}, [A1, A2], [torch.stack([A11, A12]), torch.stack([A21, A22])]]
class TestSplitOnGrid(unittest.TestCase):
@parameterized.expand(
[TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7]
)
def test_split_pathce_single_call(self, input_parameters, img, expected):
splitter = SplitOnGrid(**input_parameters)
output = splitter(img)
np.testing.assert_equal(output.numpy(), expected.numpy())
@parameterized.expand([TEST_CASE_MC_0, TEST_CASE_MC_1, TEST_CASE_MC_2])
def test_split_pathce_multiple_call(self, input_parameters, img_list, expected_list):
splitter = SplitOnGrid(**input_parameters)
for img, expected in zip(img_list, expected_list):
output = splitter(img)
np.testing.assert_equal(output.numpy(), expected.numpy())
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
5016457 | from django.test import TestCase
from django.core.management import call_command
from poradnia.judgements.factories import CourtFactory
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class RunCourtSessionParserTestCase(TestCase):
def test_run_command_basic(self):
court = CourtFactory(name="MyFooCourt")
stdout = StringIO()
call_command("run_court_session_parser", stdout=stdout)
self.assertIn("MyFooCourt", stdout.getvalue())
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.