id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3303019 | # Generated by Django 2.2.16 on 2020-09-23 18:08
from django.db import migrations
import django_jsonfield_backport.models
class Migration(migrations.Migration):
dependencies = [
('files', '0003_webextpermission_optional_permissions'),
]
operations = [
migrations.AlterField(
model_name='webextpermission',
name='optional_permissions',
field=django_jsonfield_backport.models.JSONField(default=dict),
),
migrations.AlterField(
model_name='webextpermission',
name='permissions',
field=django_jsonfield_backport.models.JSONField(default=dict),
),
]
| StarcoderdataPython |
3330130 | from itertools import combinations
from typing import List
import numpy as np
from numpy.typing import ArrayLike
from dexp.utils import xpArray
from dexp.utils.backends import Backend
__all__ = [
"first_derivative_func",
"first_derivative_kernels",
"second_derivative_func",
"second_derivative_kernels",
"derivative_axes",
]
def line_derivative_kernels(dim: int, template: ArrayLike) -> List[xpArray]:
kernels = []
for axis in range(dim):
shape = np.ones(dim, dtype=int)
shape[axis] = 3
D = np.zeros(shape, dtype=np.float32)
slicing = tuple(slice(None) if i == axis else 0 for i in range(dim))
D[slicing] = template
kernels.append(D)
return kernels
def diagonal_derivative_kernels(dim: int, template: ArrayLike) -> List[xpArray]:
kernels = []
for axes in combinations(range(dim), 2):
shape = np.ones(dim, dtype=int)
shape[list(axes)] = 3
D = np.zeros(shape, dtype=np.float32)
slicing = tuple(slice(None) if i in axes else 0 for i in range(dim))
D[slicing] = template
kernels.append(D)
return kernels
def first_derivative_kernels(dim: int) -> List[xpArray]:
kernels = []
line = np.array([1, -1, 0])
kernels += line_derivative_kernels(dim, line)
diagonal = np.array([[1, 0, 0], [0, -1, 0], [0, 0, 0]])
kernels += diagonal_derivative_kernels(dim, diagonal)
return kernels
def second_derivative_kernels(dim: int) -> List[xpArray]:
kernels = []
line = np.array([1, -2, 1])
kernels += line_derivative_kernels(dim, line)
diagonal = np.array([[1, 0, 0], [0, -2, 0], [0, 0, 1]])
kernels += diagonal_derivative_kernels(dim, diagonal)
return kernels
def derivative_axes(dim: int) -> List[int]:
# Must be kept in the same order as the other derivative functions
return list(range(dim)) + list(combinations(range(dim), 2))
def first_derivative_func(array: xpArray, axes: int, transpose: bool) -> xpArray:
xp = Backend.get_xp_module()
left_slice = [slice(None) for _ in range(array.ndim)]
right_slice = [slice(None) for _ in range(array.ndim)]
if isinstance(axes, int):
axes = (axes,)
for axis in axes:
left_slice[axis] = slice(None, -1)
right_slice[axis] = slice(1, None)
if transpose:
left_slice, right_slice = right_slice, left_slice
left_slice = tuple(left_slice)
right_slice = tuple(right_slice)
out = xp.zeros_like(array)
out[right_slice] = array[left_slice] - array[right_slice]
return out
def second_derivative_func(array: xpArray, axes: int, transpose: bool) -> xpArray:
return -1 * first_derivative_func(first_derivative_func(array, axes, transpose), axes, not transpose)
| StarcoderdataPython |
9713 | <gh_stars>10-100
"""
Base pipeline class. Main rule generator classes inherit from this one.
"""
from copy import deepcopy
from typing import List, Tuple, Union, Dict
from iguanas.pipeline.class_accessor import ClassAccessor
from iguanas.utils.typing import PandasDataFrameType, PandasSeriesType
import iguanas.utils.utils as utils
from iguanas.exceptions import DataFrameSizeError
class _BasePipeline:
"""
Base pipeline class. Main pipeline classes inherit from this one.
Parameters
----------
steps : List[Tuple[str, object]]
The steps to be applied as part of the pipeline.
verbose : int, optional
Controls the verbosity - the higher, the more messages. >0 : gives
the overall progress of the training of the pipeline; >1 : shows the
current step being trained.
Attributes
----------
steps_ : List[Tuple[str, object]]
The steps corresponding to the fitted pipeline.
rules : Rules
The Rules object containing the rules produced from fitting the
pipeline.
"""
def __init__(self,
steps: List[Tuple[str, object]],
verbose: int) -> None:
self.steps = steps
self.verbose = verbose
self.steps_ = None
self.rules = None
def get_params(self) -> dict:
"""
Returns the parameters of each step in the pipeline.
Returns
-------
dict
The parameters of each step in the pipeline.
"""
pipeline_params = {}
steps_ = self.steps if self.steps_ is None else self.steps_
for step_tag, step in steps_:
step_param_dict = deepcopy(step.__dict__)
pipeline_params[step_tag] = step_param_dict
# If step inherits from _BasePipeline, call its get_params to get
# the parameters each class in the pipeline
if issubclass(step.__class__, _BasePipeline):
step_param_dict = step.get_params()
pipeline_params.update(step_param_dict)
return pipeline_params
def _update_kwargs(self,
params: dict) -> None:
"""
Updates the given parameters of the given steps in the pipeline.
Parameters
----------
params : dict
A dictionary where each key corresponds to the tag used for the
pipeline step. Each value should be a dictionary of the parameters
(keys) and their new values (values).
"""
for step_tag, step in self.steps:
# If step inherits from _BasePipeline, call its _update_kwargs
if issubclass(step.__class__, _BasePipeline):
step._update_kwargs(params)
if step_tag in params.keys():
# If a parameter in `params` is not in the keyword arguments
# of the class (excl when kwargs is present), raise exception
for param in params[step_tag].keys():
if param not in step.__dict__.keys() and 'kwargs' not in step.__dict__.keys():
raise ValueError(
f'Parameter `{param}` not found in keyword arguments for class in step `{step_tag}`'
)
step.__dict__.update(params[step_tag])
def _pipeline_fit(self,
step_tag: str,
step: object,
X: Union[PandasDataFrameType, dict],
y: Union[PandasSeriesType, dict],
sample_weight: Union[PandasSeriesType, dict]) -> None:
"""
Runs the following before applying the `fit` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so,
then the dataset aligned to `step_tag` is extracted.
Parameters
----------
step_tag : str
The tag corresponding to the step.
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
y : Union[PandasSeriesType, dict]
The binary target column or dictionary of binary target columns
for each pipeline step.
sample_weight : Union[PandasSeriesType, dict], optional
Row-wise weights or dictionary of row-wise weights for each
pipeline step. Defaults to None.
"""
step = self._check_accessor(step)
X, y, sample_weight = [
utils.return_dataset_if_dict(
step_tag=step_tag, df=df
) for df in (X, y, sample_weight)
]
step.fit(X, y, sample_weight)
def _pipeline_transform(self,
step_tag: str,
step: object,
X: Union[PandasDataFrameType, dict]) -> PandasDataFrameType:
"""
Runs the following before applying the `transform` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so,
then the dataset aligned to `step_tag` is extracted.
Parameters
----------
step_tag : str
The tag corresponding to the step.
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
Returns
-------
PandasDataFrameType
The transformed dataset.
"""
step = self._check_accessor(step)
X = utils.return_dataset_if_dict(step_tag=step_tag, df=X)
X = step.transform(X)
self._exception_if_no_cols_in_X(X, step_tag)
return X
def _pipeline_predict(self,
step: object,
X: Union[PandasDataFrameType, dict]) -> PandasSeriesType:
"""
Runs the following before applying the `predict` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
Parameters
----------
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
Returns
-------
PandasSeriesType
The prediction of the final step.
"""
step = self._check_accessor(step)
return step.predict(X)
def _pipeline_fit_transform(self,
step_tag: str,
step: object,
X: Union[PandasDataFrameType, dict],
y: Union[PandasSeriesType, dict],
sample_weight: Union[PandasSeriesType, dict]) -> PandasDataFrameType:
"""
Runs the following before applying the `fit_transform` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so,
then the dataset aligned to `step_tag` is extracted.
Parameters
----------
step_tag : str
The tag corresponding to the step.
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
y : Union[PandasSeriesType, dict]
The binary target column or dictionary of binary target columns
for each pipeline step.
sample_weight : Union[PandasSeriesType, dict], optional
Row-wise weights or dictionary of row-wise weights for each
pipeline step. Defaults to None.
Returns
-------
PandasDataFrameType
The transformed dataset.
"""
step = self._check_accessor(step)
X, y, sample_weight = [
utils.return_dataset_if_dict(
step_tag=step_tag, df=df
) for df in (X, y, sample_weight)
]
X = step.fit_transform(X, y, sample_weight)
self._exception_if_no_cols_in_X(X, step_tag)
return X
def _check_accessor(self,
step: object) -> object:
"""
Checks whether the any of the parameters in the given `step` is of type
ClassAccessor. If so, then it runs the ClassAccessor's `get` method,
which extracts the given attribute from the given step in the pipeline,
and injects it into the parameter.
"""
def _check_accessor_iterable(iterable: Union[list, tuple],
pipeline_params: Dict[str, dict]) -> None:
"""
Iterates through an iterable - if the element is another iterable,
_check_accessor_iterable is called again. If the the element is a
CheckAccessor, its `get` method is called (which extracts the given
attribute from the given step in the pipeline) - this attribute is
then assigned in place of the original element.
"""
for idx, value in enumerate(iterable):
if isinstance(value, (list, tuple)):
_check_accessor_iterable(value, pipeline_params)
elif isinstance(value, ClassAccessor):
try:
iterable[idx] = value.get(pipeline_params)
except TypeError:
raise TypeError(
'`ClassAccessor` object must be within a mutable iterable.'
)
step_param_dict = step.__dict__
for param, value in step_param_dict.items():
# If parameter value is an instantiated class, but not a
# ClassAccessor, call _check_accessor again
if hasattr(value, '__dict__') and value.__dict__ and not isinstance(value, ClassAccessor):
self._check_accessor(value)
# If parameter value is a list or tuple, call
# _check_accessor_iterable
elif isinstance(value, (list, tuple)):
pipeline_params = self.get_params()
_check_accessor_iterable(value, pipeline_params)
# If the parameter value is a ClassAccessor, call its get method
elif isinstance(value, ClassAccessor):
pipeline_params = self.get_params()
step.__dict__[param] = value.get(pipeline_params)
return step
@staticmethod
def _exception_if_no_cols_in_X(X: PandasDataFrameType,
step_tag: str) -> Union[None, DataFrameSizeError]:
"""Raises an exception if `X` has no columns."""
if X.shape[1] == 0:
raise DataFrameSizeError(
f'`X` has been reduced to zero columns after the `{step_tag}` step in the pipeline.'
)
| StarcoderdataPython |
3336358 | <filename>DartDeep/hptf/ppo.py
import tensorflow as tf
import numpy as np
from DartDeep.dart_env_v2_1 import HpDartEnv
import pydart2
from itertools import count
from collections import deque
from random import random, sample
from multiprocessing import Process, Pipe
from copy import deepcopy
class Replay(deque):
def sample(self, batch_size):
return [self[i] for i in sample(range(0, len(self)), batch_size)]
class Episode(list):
pass
class HpPPO(object):
def __init__(self, session, env_name='walk', num_slaves=1):
self.sess = session
self.env = HpDartEnv(env_name)
self.num_slaves = num_slaves
self.num_state = self.env.observation_space.shape[0]
self.num_action = self.env.action_space.shape[0]
self.action_bound = [self.env.action_space.low, self.env.action_space.high]
self.num_train = 0
self.layer_size = [128, 64]
self.error_mag = 0.1
self.num_epoches = 10
# self.sample_size = 256
self.sample_size = 2048
self.batch_size = 128
self.gamma = 0.95
self.td_lambda = 0.95
self.clip_ratio = 0.2
# set memory and episodes
self.replay_buffer = Replay()
self.total_episodes = list() # type: list[Episode]
# set varialbles
with tf.variable_scope('state'):
self.state = tf.placeholder(tf.float32, shape=[None, self.num_state])
with tf.variable_scope('action'):
self.action = tf.placeholder(tf.float32, shape=[None, self.num_action])
with tf.variable_scope('target_value'):
self.y = tf.placeholder(tf.float32, shape=[None, 1])
with tf.variable_scope('advantages'):
self.advantages = tf.placeholder(tf.float32, shape=[None, 1])
# build networks
self.value = self.build_value_net()
self.actor, self.actor_param = self.build_actor_net('actor_net', trainable=True)
self.actor_old, self.actor_old_param = self.build_actor_net('actor_old', trainable=False)
self.syn_old_pi = [oldp.assign(p) for p, oldp in zip(self.actor_param, self.actor_old_param)]
self.sample_op = tf.clip_by_value(tf.squeeze(self.actor.sample(1), axis=0), self.action_bound[0], self.action_bound[1])
# set loss function
with tf.variable_scope('critic_loss'):
self.adv = self.y - self.value
self.critic_loss = tf.reduce_mean(tf.square(self.adv))
with tf.variable_scope('actor_loss'):
ratio = self.actor.prob(self.action) / self.actor_old.prob(self.action)
self.actor_loss = tf.reduce_mean(tf.minimum(ratio * self.advantages, tf.clip_by_value(ratio, 1. - self.clip_ratio, 1. + self.clip_ratio)))
# set optimizer
self.value_step_size = 1e-2
self.value_optimizer = tf.train.AdamOptimizer(self.value_step_size)
self.train_critic = self.value_optimizer.minimize(self.critic_loss)
self.policy_step_size = 1e-4
self.policy_optimizer = tf.train.AdamOptimizer(self.policy_step_size)
self.train_policy = self.value_optimizer.minimize(self.actor_loss)
# for evaluation
self.num_eval = 0
# for multiprocessing
self.state_sender = [] # type: list[Connection]
self.result_sender = [] # type: list[Connection]
self.state_receiver = [] # type: list[Connection]
self.result_receiver = [] # type: list[Connection]
self.action_sender = [] # type: list[Connection]
self.reset_sender = [] # type: list[Connection]
self.motion_sender = [] # type: list[Connection]
self.envs = [] # type: list[Process]
def init_envs(self):
for slave_idx in range(self.num_slaves):
s_s, s_r = Pipe()
r_s, r_r = Pipe()
a_s, a_r = Pipe()
reset_s, reset_r = Pipe()
motion_s, motion_r = Pipe()
p = Process(target=worker, args=(self.rnn_len, slave_idx, s_s, r_s, a_r, reset_r, motion_r))
self.state_sender.append(s_s)
self.result_sender.append(r_s)
self.state_receiver.append(s_r)
self.result_receiver.append(r_r)
self.action_sender.append(a_s)
self.reset_sender.append(reset_s)
self.motion_sender.append(motion_s)
self.envs.append(p)
p.start()
def envs_get_states(self, terminated):
states = []
for recv_idx in range(len(self.state_receiver)):
if terminated[recv_idx]:
states.append([0.] * self.num_state)
else:
states.append(self.state_receiver[recv_idx].recv())
return states
def envs_send_actions(self, actions, terminated):
for i in range(len(self.action_sender)):
if not terminated[i]:
self.action_sender[i].send(actions[i])
def envs_get_status(self, terminated):
status = []
for recv_idx in range(len(self.result_receiver)):
if terminated[recv_idx]:
status.append((0., True))
else:
status.append(self.result_receiver[recv_idx].recv())
return zip(*status)
def envs_resets(self, reset_flag):
for i in range(len(self.reset_sender)):
self.reset_sender[i].send(reset_flag)
def envs_reset(self, i, reset_flag):
self.reset_sender[i].send(reset_flag)
def build_value_net(self):
# build networks
with tf.variable_scope('value_net'):
value_dl1 = tf.contrib.layers.fully_connected(inputs=self.state,
num_outputs=self.layer_size[0],
activation_fn=tf.nn.relu,
scope='value_dl1')
value_dl2 = tf.contrib.layers.fully_connected(inputs=value_dl1,
num_outputs=self.layer_size[1],
activation_fn=tf.nn.relu,
scope='value_dl2')
value = tf.contrib.layers.fully_connected(inputs=value_dl2,
num_outputs=1,
activation_fn=None,
scope='value')
return value
def build_actor_net(self, scope, trainable):
with tf.variable_scope(scope):
actor_dl1 = tf.contrib.layers.fully_connected(inputs=self.state,
num_outputs=self.layer_size[0],
activation_fn=tf.nn.relu,
trainable=trainable,
scope='dl1')
actor_dl2 = tf.contrib.layers.fully_connected(inputs=actor_dl1,
num_outputs=self.layer_size[1],
activation_fn=tf.nn.relu,
trainable=trainable,
scope='dl2')
mu = tf.contrib.layers.fully_connected(inputs=actor_dl2,
num_outputs=self.num_action,
activation_fn=None,
trainable=trainable,
scope='mu')
sigma = tf.contrib.layers.fully_connected(inputs=actor_dl2,
num_outputs=self.num_action,
activation_fn=tf.nn.softplus,
trainable=trainable,
scope='sigma')
# sigma = tf.convert_to_tensor(0.1 * np.ones(self.num_action), dtype=np.float32)
actor_dist = tf.contrib.distributions.Normal(loc=mu, scale=sigma)
param = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope)
return actor_dist, param
def get_action(self, s):
return self.sess.run(self.sample_op, feed_dict={self.state: s[np.newaxis, :]})
def get_v(self, s):
if s.ndim < 2:
s = s[np.newaxis, :]
return self.sess.run(self.value, feed_dict={self.state: s})[0, 0]
def train(self):
self.generate_transitions()
self.optimize_model()
self.num_train += 1
def generate_transitions(self):
del self.total_episodes[:]
episodes = [Episode() for _ in range(self.num_slaves)]
terminated = [False for _ in range(self.num_slaves)]
self.env.Resets(True)
# self.envs_resets(1)
local_step = 0
while True:
states = self.env.GetStates()
# states = self.envs_get_states(terminated)
actions = np.asarray(self.get_action(states))
values = self.get_v(states)
logprobs = self.actor.prob(actions)
# self.envs_send_actions(actions, terminated)
# rewards, is_done = self.envs_get_status(terminated)
__, reward, is_done, info = self.env.step(actions.flatten())
rewards = [reward]
is_dones = [is_done]
for j in range(self.num_slaves):
if terminated[j]:
continue
nan_occur = np.any(np.isnan(states[j])) or np.any(np.isnan(actions[j]))
if not nan_occur:
episodes[j].append((states[j], actions[j], rewards[j], values[j], logprobs[j]))
if is_dones[j] or nan_occur:
self.total_episodes.append(deepcopy(episodes[j]))
if local_step < self.sample_size:
episodes[j] = Episode()
# self.envs_reset(j, 1)
self.env.reset()
else:
terminated[j] = True
else:
# self.envs_reset(j, 0)
pass
if local_step >= self.sample_size and all(terminated):
break
def optimize_model(self):
self.compute_td_gae()
for _ in range(self.num_epoches):
transitions = self.replay_buffer.sample(self.batch_size)
batch = list(zip(*transitions))
td = batch[3]
self.update_value()
self.update_policy()
def compute_td_gae(self):
for epi in self.total_episodes:
len_epi = len(epi)
states, actions, rewards, values, logprobs = zip(*epi)
values = np.concatenate((values, np.zeros(1)), axis=0)
advantages = np.zeros(len_epi)
ad_t = 0
for i in reversed(range(len_epi)):
delta = rewards[i] + values[i + 1] * self.gamma - values[i]
ad_t = delta + self.gamma * self.td_lambda * ad_t
advantages[i] = ad_t
TD = values[:len_epi] + advantages
for i in range(len_epi):
self.replay_buffer.append((states[i], actions[i], logprobs[i], TD[i], advantages[i]))
def update_value(self):
pass
def update_policy(self):
pass
def evaluate(self):
self.num_eval += 1
total_reward = 0
total_step = 0
self.env.Reset(False, 0)
state = self.env.GetState(0)
for t in count():
action = np.asarray(self.actor.mean().eval(feed_dict={ppo.state: [state]})).flatten()
state, reward, is_done, info = self.env.step(action)
if is_done:
break
else:
total_step += 1
total_reward += reward
# print('noise: {:.3f}'.format(self.actor.stddev().eval(feed_dict={ppo.state: [state]})))
print('noise: ', self.actor.stddev().eval(feed_dict={ppo.state: [state]}))
if total_step > 0:
print('Epi reward : {:.2f}, Step reward : {:.2f} Total step : {}'
.format(total_reward, total_reward / total_step, total_step))
else:
print('bad')
return total_reward, total_step
def worker(env_name, proc_num, state_sender, result_sender, action_receiver, reset_receiver, motion_receiver):
"""
:type env_name: str
:type proc_num: int
:type result_sender: Connection
:type action_receiver: Connection
:return:
"""
# reset variable
# 0 : go on (no reset)
# 1 : soft reset ( w/o motion change )
# 2 : hard reset ( with motion change )
env = HpDartEnv(env_name)
state = None
while True:
reset_flag = reset_receiver.recv()
if reset_flag == 1:
state = env.reset()
elif reset_flag == 2:
goals, qs = motion_receiver.recv()
env.update_target(goals, qs)
state = env.reset()
state_sender.send(state)
action = action_receiver.recv()
state, reward, is_done, _ = env.step(action)
result_sender.send((reward, is_done))
if __name__ == '__main__':
pydart2.init()
with tf.Session() as sess:
ppo = HpPPO(sess, 'walk')
sess.run(tf.global_variables_initializer())
# print(sigma.eval())
# print(actor_dist.sample(1).eval(feed_dict={state: np.zeros((1, num_state))}))
# print(ppo.actor.mean().eval(feed_dict={ppo.state: np.zeros((1, ppo.num_state))}))
ppo.train()
ppo.evaluate()
| StarcoderdataPython |
126410 | <filename>passwords/type7.py
import sys
V = [0x64, 0x73, 0x66, 0x64, 0x3b, 0x6b, 0x66, 0x6f, 0x41, 0x2c, 0x2e,
0x69, 0x79, 0x65, 0x77, 0x72, 0x6b, 0x6c, 0x64, 0x4a, 0x4b, 0x44,
0x48, 0x53, 0x55, 0x42, 0x73, 0x67, 0x76, 0x63, 0x61, 0x36, 0x39,
0x38, 0x33, 0x34, 0x6e, 0x63, 0x78, 0x76, 0x39, 0x38, 0x37, 0x33,
0x32, 0x35, 0x34, 0x6b, 0x3b, 0x66, 0x67, 0x38, 0x37]
if len(sys.argv) != 2:
print("Usage: type7.py hash")
sys.exit(0)
hash = sys.argv[1]
i = int(hash[:2], 16)
r = ""
for j in range(2, len(hash) - 2, 2):
h = int(hash[j:j+2], 16)
r = r + chr(h ^ V[i])
i = (i + 1) % 53
print r
| StarcoderdataPython |
1722569 | <reponame>jcalcutt/notes<gh_stars>0
from django.shortcuts import render, redirect
def welcome(request):
if request.user.is_authenticated:
return redirect('user_home')
else:
return render(request, 'notes/welcome.html')
| StarcoderdataPython |
1650879 | """
Testshot script for getting GPI equipment ready while still at MIT.
Usage :
python testgpi_mit.py 1180227500
<NAME>, Feb 27, 2018
"""
from MDSplus import *
from MitDevices.acq132 import ACQ132
from MitDevices.acq196 import ACQ196
from MitDevices.acq196ao import ACQ196AO
import numpy as np
import sys
import time
import matplotlib.pyplot as plt
s=int(sys.argv[1])
myTree=Tree("spectroscopy",-1)
myTree.createPulse(s) #Copies the model tree
myTree=Tree("spectroscopy",s)
myDIO2=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:DIO2")
HV_prog_i=4.0
for i in range (1,9):
myTree.getNode("GPI_TCV.APD_ARRAY.CONTROL.HV_PROG_"+str(i)).putData(myTree.tdiCompile(str(HV_prog_i)))
#Initialize DIO2 through TCL command, since there is no working python command for DIO2
#DIO2_ENCDEC does not work for this, neither does DIO4
myTree.tcl('do /meth '+myDIO2.getFullPath()+' init')
print("Initialized DIO2")
#Take node of each digitizer, and initialize them
myACQ132_1=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:ACQ132_1")
inst_ACQ132_1=ACQ132(myACQ132_1)
inst_ACQ132_1.initftp()
print("Initialized ACQ132_1")
myACQ132_2=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:ACQ132_2")
inst_ACQ132_2=ACQ132(myACQ132_2)
inst_ACQ132_2.initftp()
print("Initialized ACQ132_2")
myACQ132_3=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:ACQ132_3")
inst_ACQ132_3=ACQ132(myACQ132_3)
inst_ACQ132_3.initftp()
print("Initialized ACQ132_3")
myACQ132_4=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:ACQ132_4")
inst_ACQ132_4=ACQ132(myACQ132_4)
inst_ACQ132_4.initftp()
print("Initialized ACQ132_4")
myACQ196=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:ACQ196")
inst_ACQ196=ACQ196(myACQ196)
inst_ACQ196.initftp()
print("Initialized ACQ196")
myACQ196AO=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:ACQ196AO")
inst_ACQ196AO=ACQ196AO(myACQ196AO)
inst_ACQ196AO.init()
print("Initialized ACQ196AO")
#Wait for the initialization
time.sleep(7)
#Trigger DIO2 in order to start the data acquisition
myTree.tcl('do /meth '+myDIO2.getFullPath()+' trigger')
#myTree.getNode('GPI.APD_ARRAY.HARDWARE:eng_encoder').doMethod("set_event","SPECTROSCOPY_START") #Should work with Trig.mode=event in the device setup of DIO2 - put a spectroscopy start MDSplus event on the CPCI network
print("Triggered DIO2")
#Wait for shot to end
time.sleep(7)
#Store data to the MDSplus tree
inst_ACQ132_1.store()
print("Stored data on ACQ132_1")
inst_ACQ132_2.store()
print("Stored data on ACQ132_2")
inst_ACQ132_3.store()
print("Stored data on ACQ132_3")
inst_ACQ132_4.store()
print("Stored data on ACQ132_4")
inst_ACQ196.store()
print("Stored data on ACQ196")
for i in range (1,5):
for j in range (1,33):
if j<10:
sig=myTree.getNode('gpi_tcv.apd_array.hardware.acq132_'+str(i)+'.input_0'+str(j)).getData().data()
# t=myTree.getNode('gpi_tcv.apd_array.hardware.dt132_'+str(i)+'.input_0'+str(j)).dim_of().data()
else:
sig=myTree.getNode('gpi_tcv.apd_array.hardware.acq132_'+str(i)+'.input_'+str(j)).getData().data()
# t=myTree.getNode('gpi_tcv.apd_array.hardware.dt132_'+str(i)+'.input_'+str(j)).dim_of().data()
print("ACQ132_"+str(i)+", Input "+str(j)+": "+str(np.mean(sig)))
"""
for i in range (1,17):
if i < 10:
node_HV_prog=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196AO.OUTPUT_0"+str(i))
node_HV_meas=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_0"+str(i))
else:
node_HV_prog=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196AO.OUTPUT_"+str(i))
node_HV_meas=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_"+str(i))
HV_prog=max(node_HV_prog.getData().data())
HV_meas=np.mean(node_HV_meas.getData().data())
print("HV_prog for output "+str(i)+" : "+str(HV_prog))
print("HV_meas for input "+str(i)+" : "+str(HV_meas))
for i in range (17,33):
node_HV_meas=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_"+str(i))
HV_meas=np.mean(node_HV_meas.getData().data())
print("HV_meas for input "+str(i)+" : "+str(HV_meas))
"""
"""
for i in range (1,33):
if i<10:
HV_meas=myTree.getNode("GPI.INNER_APD.HARDWARE:ACQ132_4.INPUT_0"+str(i)).getData().data()
t=myTree.getNode("GPI.INNER_APD.HARDWARE:ACQ132_4.INPUT_0"+str(i)).dim_of().data()
else:
HV_meas=myTree.getNode("GPI.INNER_APD.HARDWARE:ACQ132_4.INPUT_"+str(i)).getData().data()
t=myTree.getNode("GPI.INNER_APD.HARDWARE:ACQ132_4.INPUT_"+str(i)).dim_of().data()
# plt.plot(t,HV_meas)
print('Input_'+str(i)+' : '+str(np.mean(HV_meas)))
"""
"""
for i in range (1,33):
if i<10:
HV_meas=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_0"+str(i)).getData().data()
t=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_0"+str(i)).dim_of().data()
else:
HV_meas=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_"+str(i)).getData().data()
t=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_"+str(i)).dim_of().data()
# plt.plot(t,HV_meas)
print('Input_'+str(i)+' : '+str(np.mean(HV_meas)))
"""
#plt.xlabel('Time (sec)')
#plt.ylabel('HV_meas (V)')
#plt.ylim(0.,5.)
#plt.show()
#for i in range (1,2):
# if i < 10:
# node_sig=myTree.getNode("GPI.APD_ARRAY.HARDWARE:DT132_3.INPUT_0"+str(i))
# else:
# node_sig=myTree.getNode("GPI.APD_ARRAY.HARDWARE:DT132_3.INPUT_"+str(i))
# sig=np.mean(node_sig.getData().data())
# print("Input "+str(i)+": "+str(sig))
# signal=node_sig.getData().data()
# t=node_sig.dim_of().data()
# plt.plot(t,signal)
# plt.xlabel('Time (sec)')
# plt.ylabel('Signal (V)')
"""
plt.xlabel('Time (sec)')
plt.ylabel('Signal (V)')
line=[]
for i in range (1,5):
if i<4:
node_sig=myTree.getNode("GPI.APD_ARRAY.HARDWARE:DT132_"+str(i)+".INPUT_01")
else:
node_sig=myTree.getNode("GPI.INNER_APD.HARDWARE:ACQ132_"+str(i)+".INPUT_01")
signal=node_sig.getData().data()
t=node_sig.dim_of().data()
line.append(plt.plot(t,signal,label="DT132_"+str(i)))
plt.legend(line,('DT132_1','DT132_2','DT132_3','DT132_4'))
plt.xlim([0.05,0.05003])
plt.show()
"""
| StarcoderdataPython |
43187 | from selenium import webdriver
def main():
driver = webdriver.Chrome()
driver.get('http://127.0.0.1:8000/places/default/search')
driver.find_element_by_id('search_term').send_keys('.')
driver.execute_script("document.getElementById('page_size').options[1].text = '1000'")
driver.find_element_by_id('search').click()
driver.implicitly_wait(30)
links = driver.find_elements_by_css_selector('#results a')
countries = [link.text for link in links]
driver.close()
print countries
if __name__ == '__main__':
main()
| StarcoderdataPython |
1778146 | import bpy
import os
blend_file_path = bpy.data.filepath
directory = os.path.dirname(blend_file_path)
file_name = os.path.splitext(os.path.basename(blend_file_path))[0]
bpy.ops.export_scene.obj(
filepath=os.path.join(directory, 'compressed', file_name + '.obj'),
check_existing=True,
axis_forward='Z',
axis_up='Y',
filter_glob="*.obj;*.mtl",
use_selection=False,
use_animation=False,
use_mesh_modifiers=True,
use_edges=False,
use_smooth_groups=False,
use_smooth_groups_bitflags=False,
use_normals=True,
use_uvs=True,
use_materials=False,
use_triangles=True,
use_nurbs=False,
use_vertex_groups=False,
use_blen_objects=True,
group_by_object=False,
group_by_material=False,
keep_vertex_order=False,
global_scale=1,
path_mode='AUTO'
)
| StarcoderdataPython |
145303 |
from foundations_spec import *
class TestBucketPipelineArchive(Spec):
@let
def bucket_klass(self):
klass = ConditionalReturn()
klass.return_when(self.bucket, *self.constructor_args, **self.constructor_kwargs)
return klass
bucket = let_mock()
@let
def constructor_args(self):
return self.faker.words()
@let
def constructor_kwargs(self):
return self.faker.pydict()
@let
def object_name(self):
return self.faker.name()
@let
def random_data(self):
return self.faker.uuid4()
@let
def random_prefix(self):
return self.faker.uuid4()
@let
def file_listing_with_prefix(self):
return [f'{self.random_prefix}/{file}' for file in self.faker.words()]
@let
def blob_exists(self):
return self.faker.boolean()
@let
def archive(self):
from foundations_contrib.bucket_pipeline_archive import BucketPipelineArchive
return BucketPipelineArchive(self.bucket_klass, *self.constructor_args, **self.constructor_kwargs)
def test_append_binary_uploads_string_to_bucket(self):
self.archive.append_binary(self.object_name, self.random_data, self.random_prefix)
self.bucket.upload_from_string.assert_called_with(f'{self.random_prefix}/{self.object_name}', self.random_data)
def test_list_files_returns_list_of_files(self):
self.bucket.list_files = ConditionalReturn()
self.bucket.list_files.return_when(self.file_listing_with_prefix, f'{self.random_prefix}/*')
result = self.archive.list_files('*', self.random_prefix)
self.assertEqual(self.file_listing_with_prefix, result)
def test_exists_forwards_to_underlying_bucket(self):
self.bucket.exists = ConditionalReturn()
self.bucket.exists.return_when(self.blob_exists, f'{self.object_name}')
self.assertEqual(self.blob_exists, self.archive.exists(self.object_name))
def test_exists_forwards_to_underlying_bucket_with_prefix(self):
self.bucket.exists = ConditionalReturn()
self.bucket.exists.return_when(self.blob_exists, f'{self.random_prefix}/{self.object_name}')
self.assertEqual(self.blob_exists, self.archive.exists(self.object_name, prefix=self.random_prefix)) | StarcoderdataPython |
3326260 | from office365.actions.upload_session_query import UploadSessionQuery
from office365.outlook.mail.attachment_item import AttachmentItem
from office365.outlook.mail.attachment_type import AttachmentType
class AttachmentUploadQuery(UploadSessionQuery):
"""Create an upload session to allow your app to upload attachments up to the maximum file size. An upload session
allows your app to upload ranges of the file in sequential API requests, which allows the transfer to be resumed
if a connection is dropped while the upload is in progress. """
def create_upload_session(self):
attachment_item = AttachmentItem(attachment_type=AttachmentType.file, name=self.file_name, size=self.file_size)
return self.binding_type.create_upload_session(attachment_item)
| StarcoderdataPython |
137368 | import bokeh
import pandas as pd
import numpy as np
import os
from bokeh import events
from bokeh.io import show
from bokeh.plotting import figure, output_file, ColumnDataSource
from bokeh.models.widgets import ColorPicker, Select, Toggle, Dropdown, DataTable, NumberFormatter, TableColumn,TextInput, Button, TextAreaInput, Slider, Div, RangeSlider, HTMLTemplateFormatter
from bokeh.layouts import Column, Row, gridplot, Spacer
from bokeh.models import HoverTool, CustomJS, BoxSelectTool, Panel, Tabs, Span, AjaxDataSource, ToolbarBox, Toolbar, Legend, LegendItem, PanTool, TapTool
from bokeh.models.glyphs import MultiLine
from bokeh.events import ButtonClick, SelectionGeometry, Press, Tap
from bokeh.util.compiler import JavaScript
import imageio as iio
output_file('index.html', title='NaViA v 1.0')
# Usual format 1366x768
plot_canvas = figure(plot_width=1366, plot_height=int(768/1), output_backend='canvas',
x_axis_label='m/z ', y_axis_label='Rel. Abundance [%]',
tools=['box_zoom, reset, pan, wheel_zoom'],hidpi=True,
toolbar_location='right')
plot_canvas.background_fill_color=None
plot_canvas.border_fill_color=None
svg_canvas = figure(
plot_width=1366, plot_height=768,
output_backend='svg',
x_axis_label='m/z ', y_axis_label='Rel. Abundance [%]',
tools=['save'],
hidpi=False, visible=True
)
highres_canvas = figure(plot_width=3840, plot_height=int(2160/1), output_backend='canvas',
x_axis_label='m/z ', y_axis_label='Rel. Abundance [%]',
tools=['save'],
toolbar_location='right', visible=True)
highres_canvas.axis.axis_label_text_font_size='40px'
highres_canvas.axis.major_label_text_font_size='32px'
highres_canvas.axis.axis_line_width=3
highres_canvas.axis.axis_line_width=3
highres_canvas.axis.major_tick_line_width=3
highres_canvas.axis.minor_tick_line_width=3
highres_canvas.axis.major_tick_out=14
highres_canvas.axis.minor_tick_out=10
highres_canvas.grid.grid_line_width=2
highres_canvas.background_fill_color=None
highres_canvas.border_fill_color=None
legend = Legend(items=[])
plot_canvas.add_layout(legend)
# Defining colours
n_series = 20
series_colours = [ bokeh.palettes.Category20_20[int((2*i%20+np.floor(i/10)))] for i in range(20)]
series_cols = {}
series_data = {}
series_sele = {}
series_mz = {}
series_mz4k = {}
series_mzsvg= {}
peak_mz = {}
peak_mz4k = {}
peak_mzsvg = {}
series_names = ['Series {:d}'.format(i_series + 1) for i_series in range(n_series)]
for i_series in range(len(series_names)):
series_cols[series_names[i_series]] = series_colours[i_series]
series_cols['Background']='#000000'
for i_series in series_names:
series_data[i_series] = AjaxDataSource(data=dict(x_low=[], x_upp=[], x_max=[],max_int=[], charge=[]))#
series_sele[i_series] = AjaxDataSource(data=dict(i_low=[], i_upp=[], i_max=[]))
series_mz[i_series] = AjaxDataSource(data=dict(Intensity=[], mz=[]))
series_mz4k[i_series] = AjaxDataSource(data=dict(Intensity=[], mz=[]))
series_mzsvg[i_series]= AjaxDataSource(data=dict(Intensity=[], mz=[]))
peak_mz[i_series] = AjaxDataSource(data=dict(xs=[], ys=[]))
peak_mz4k[i_series] = AjaxDataSource(data=dict(xs=[], ys=[]))
peak_mzsvg[i_series] = AjaxDataSource(data=dict(xs=[], ys=[]))
series_masses = AjaxDataSource(data=dict(Series=[], Mass=[], Uncertainty=[], Colour=[]))
aser_data = AjaxDataSource(data=dict(x_low=[], x_upp=[], x_max=[],max_int=[], charge=[]))
series_dict = AjaxDataSource(data=dict(series=series_names, names=series_names))
groel_data=pd.read_csv('Testdata/siyun_groel.txt', skiprows=10, delimiter='\t')
# groel_data.columns.values=[ 'Intensity']
groel_data.rename(columns={'1980.151514':'mz', '0.000000':'Intensity'}, inplace=True)
# print(groel_data.columns.values)
GroEL_mz = AjaxDataSource(data=groel_data)
raw_mz = AjaxDataSource(data=dict(Intensity=[], mz=[]))
proc_mz = AjaxDataSource(data=dict(Intensity=[], mz=[]))
bg_mz = AjaxDataSource(data=dict(Intensity=[], mz=[]))
bg_mz4k = AjaxDataSource(data=dict(Intensity=[], mz=[]))
bg_mzsvg = AjaxDataSource(data=dict(Intensity=[], mz=[]))
# Define Dataprocessing parameters in DS to make them easier to access etc.
DataProcessingParameters= AjaxDataSource(data=dict(mz_low=[0.0], mz_upp=[20000.0], gau_sigma=[0.0], gau_rep=[1], intensity_threshold=[0.0], sub_mode=['Substract Minimum'], sub_value=[0.0]))
series_colours_DS= AjaxDataSource(data=dict(series=[x for x in series_cols], colour=[series_cols[x] for x in series_cols]))
sel_lines={}
sel_lines4k={}
sel_linessvg={}
peak_lines={}
peak_lines4k={}
peak_linessvg={}
for i_series in series_cols.keys():
sel_lines[i_series] = MultiLine(xs='mz', ys='Intensity', line_color=series_cols[i_series], name=i_series)
sel_lines4k[i_series] = MultiLine(xs='mz', ys='Intensity', line_color=series_cols[i_series], name=i_series, line_width=4)
sel_linessvg[i_series] = MultiLine(xs='mz', ys='Intensity', line_color=series_cols[i_series], name=i_series)
peak_lines[i_series] = MultiLine(xs='xs', ys='ys', line_color=series_cols[i_series], line_alpha=0.5)
peak_lines4k[i_series] = MultiLine(xs='xs', ys='ys', line_color=series_cols[i_series], line_width=4, line_alpha=0.5)
peak_linessvg[i_series] = MultiLine(xs='xs', ys='ys', line_color=series_cols[i_series], line_alpha=0.5)
sel_lines['Background'] = MultiLine(xs='mz', ys='Intensity', line_color=series_cols['Background'], name='Background')
sel_lines4k['Background'] = MultiLine(xs='mz', ys='Intensity', line_color=series_cols['Background'], name='Background', line_width=4)
sel_linessvg['Background'] = MultiLine(xs='mz', ys='Intensity', line_color=series_cols['Background'], name='Background')
# Peak prediction peaks
pp_mean_data = AjaxDataSource(data=dict(xs=[], ys=[]))
pp_std_data = AjaxDataSource(data=dict(xs=[], ys=[]))
pp_mean = MultiLine(xs="xs", ys="ys", name='pp_mean', line_color=bokeh.palettes.Category20_20[0], line_width=2, line_alpha=0.5)
pp_std = MultiLine(xs="xs", ys="ys", name='pp_std', line_color=bokeh.palettes.Category20_20[0], line_dash='dashed',line_width=2, line_alpha=0.5)
# Selection and show of colour of Active Series
ser_act_menu=[('Background (Eraser)', 'Background'), ('New Series', 'New Series')]
# ser_act = Select(value='Background', options=['Background', 'New Series'], width=150, height=30)
ser_act = Dropdown(label="Create Series", value='Background', menu=ser_act_menu, width=140, height=30)
col_cb = CustomJS(args=dict(proc_mz=proc_mz, series_data=series_data, pp_mean_data=pp_mean_data, pp_std_data=pp_std_data, plot_canvas=plot_canvas, peak_lines=peak_lines, peak_lines4k=peak_lines4k, series_colours_DS=series_colours_DS, series_names=series_names, sel_lines=sel_lines, ser_act=ser_act, pp_mean= pp_mean, pp_std=pp_std, series_masses=series_masses)
, code=open(os.path.join(os.getcwd(), 'JS_Functions', "navia_functions.js")).read() + open(os.path.join(os.getcwd(), 'JS_Functions', "col_cb.js")).read())
col = ColorPicker(color='black', width=50, height=30, disabled=False, callback=col_cb)
ser_match = Select(value='', options=[], title='Stoich. Calculation Series:', width=150, height=45)
ser_callback = CustomJS(args=dict(proc_mz=proc_mz,series_colours_DS=series_colours_DS, series_names=series_names, sel_lines=sel_lines, sel_lines4k=sel_lines4k, peak_lines=peak_lines,peak_lines4k=peak_lines4k, col=col, aser_data=aser_data, ser_match=ser_match,
series_data=series_data, pp_mean= pp_mean, pp_std=pp_std, pp_mean_data=pp_mean_data, pp_std_data=pp_std_data, series_masses=series_masses, plot_canvas=plot_canvas),
code=open(os.path.join(os.getcwd(), 'JS_Functions', "navia_functions.js")).read() + open(os.path.join(os.getcwd(), 'JS_Functions', "ser_cb.js")).read())
# Additional Buttons
menu = [("Load spectrum (txt file)", "load_file"), ("Load spectrum (clipboard)", "load_clip"), ("Load example (GroEL)", "load_groel"), None, ("Save session", "save_sess"), ("Load session", "load_sess"), None, ("Save Peaks Table", "save_peaks"), ("Save Masses Table", "save_masses"),None, ("Load Subunit table", "load_SU"), ("Save Subunit table", "save_SU"), None, ("Save Stoichiometry Table", "save_mm")]#, None, ("Load subunit table", "load_suta")]
# Further options to implement ("Export SVG","expo_svg"), ("Export high-res PNG","expo_svg")]
ser_act.js_on_change('value', ser_callback)
posneg_menu=[('Positive Mode','Positive'), ('Negative Mode', 'Negative')]
posneg = Dropdown(menu=posneg_menu, value='Positive', label='Instrument Mode: +' , width=160, height=30)
graph_opt = Toggle(label='Figure', width=110, height=30)
help_alert = CustomJS(args=dict(), code=open(os.path.join(os.getcwd(), 'JS_Functions', "help_cb.js")).read())
mass_finder = Toggle(label='Mass Finder', width=110, height=30)
mass_match = Toggle(label='Complex Stoichiometry', width=150, height=30)
dt_button = Toggle(label='Data Processing', width=110, height=30)
help_button = Button(label='Help', width=90, height=30)
help_button.js_on_event(ButtonClick, help_alert)
sele_cb = CustomJS(args=dict(peak_mz=peak_mz, bg_mz=bg_mz, proc_mz=proc_mz, ser_act=ser_act, series_sele=series_sele, series_data=series_data, series_mz=series_mz,
series_names=series_names, series_masses=series_masses, pp_mean_data=pp_mean_data, pp_std_data=pp_std_data, aser_data=aser_data, posneg=posneg, series_colours_DS=series_colours_DS), code=open(os.path.join(os.getcwd(), 'JS_Functions', "navia_functions.js")).read()
+ open(os.path.join(os.getcwd(), 'JS_Functions', "sele_cb.js")).read())
correct_cb = CustomJS(args=dict(series_colours_DS=series_colours_DS, peak_mz=peak_mz, bg_mz=bg_mz, proc_mz=proc_mz, ser_act=ser_act, series_sele=series_sele, series_data=series_data, series_mz=series_mz,
series_names=series_names, series_masses=series_masses, pp_mean_data=pp_mean_data, pp_std_data=pp_std_data, aser_data=aser_data, posneg=posneg), code=open(os.path.join(os.getcwd(), 'JS_Functions', "navia_functions.js")).read()
+ open(os.path.join(os.getcwd(), 'JS_Functions', "correct_cb.js")).read())
posneg_cb = CustomJS(args=dict(series_colours_DS=series_colours_DS, peak_mz=peak_mz, bg_mz=bg_mz, proc_mz=proc_mz, ser_act=ser_act, series_sele=series_sele, series_data=series_data, series_mz=series_mz,
series_names=series_names, series_masses=series_masses, pp_mean_data=pp_mean_data, pp_std_data=pp_std_data, aser_data=aser_data, posneg=posneg), code=open(os.path.join(os.getcwd(), 'JS_Functions', "navia_functions.js")).read()
+ open(os.path.join(os.getcwd(), 'JS_Functions', "posneg_cb.js")).read())
posneg.js_on_change('value', posneg_cb)
class DQTapTool(TapTool):
# language=JavaScript
__implementation__ = JavaScript("""
import {TapTool} from "models/tools";
export class DQTapTool extends TapTool {
static __name__ = 'DQTapTool'
get tooltip() {
return 'Correct Peak. \\n (Tap on data point to set value for mass calculation of respective to tapped m/z.)';
}
}
""")
class RenamedBoxSelectTool(BoxSelectTool):
# language=JavaScript
__implementation__ = JavaScript("""
import {BoxSelectTool} from "models/tools";
export class RenamedBoxSelectTool extends BoxSelectTool {
static __name__ = 'RenamedBoxSelectTool'
get tooltip() {
return 'Mark Peak \\n (Press left mouse button and hold to select range.)';
}
}
""")
box_selectdq = RenamedBoxSelectTool(dimensions='width', callback=sele_cb, name='Select Peak')
tapdq = DQTapTool(name='Select Peak', callback=correct_cb)
# tapdq.js_on_event(Tap, correct_cb)
# Add lines to graph
plot_canvas.add_glyph(bg_mz,sel_lines['Background'])
highres_canvas.add_glyph(bg_mz4k,sel_lines4k['Background'])
svg_canvas.add_glyph(bg_mzsvg,sel_linessvg['Background'])
ppml = plot_canvas.add_glyph(pp_mean_data, pp_mean, visible=False)
ppsl = plot_canvas.add_glyph(pp_std_data , pp_std , visible=False)
plot_canvas.toolbar.active_tap=None
plot_canvas.toolbar.active_drag=box_selectdq
pp = Toggle(label="Predict adjecent peaks", width=150, height=30)
pp.js_link('active', ppml, 'visible')
pp.js_link('active', ppsl, 'visible')
# pl.js_link('active', plpc, 'visible')
# pl.js_link('active', pl4k, 'visible')
# Defining Mass Finder
mass_finder_line= MultiLine(xs="xs", ys="ys", line_color="#002147", line_width=2, line_alpha=0.5)
mass_finder_data=AjaxDataSource(data=dict(xs=[], ys=[]))
mfl = plot_canvas.add_glyph(mass_finder_data, mass_finder_line, visible=False)
# Define Hovertool
hover = HoverTool()
hover.tooltips = [("m/z", "$x{5.2f}"), ("Abundance", "$y %")]
plot_canvas.add_tools(hover)
plot_canvas.add_tools(box_selectdq)
plot_canvas.add_tools(tapdq)
columns = [
TableColumn(field="x_low", title="Lower m/z", formatter=NumberFormatter(format='0,0.00')),
TableColumn(field="x_upp", title="Upper m/z", formatter=NumberFormatter(format='0,0.00')),
TableColumn(field="x_max", title="mz of Max", formatter=NumberFormatter(format='0,0.00')),
TableColumn(field="max_int", title="Intensity of Maximum", formatter=NumberFormatter(format='0,0.00')),
TableColumn(field="charge", title="Charge"),
]
showtab = DataTable(source=aser_data, name=i_series, columns=columns, width=580, height=300, editable=False, index_position=None)
# show_tab_cb=CustomJS(args=dict(peak_mz=peak_mz, bg_mz=bg_mz, proc_mz=proc_mz, ser_act=ser_act, series_sele=series_sele, series_data=series_data, series_mz=series_mz,
# series_names=series_names, series_masses=series_masses, pp_mean_data=pp_mean_data, pp_std_data=pp_std_data, aser_data=aser_data, posneg=posneg), code=open(os.path.join(os.getcwd(), 'JS_Functions', "navia_functions.js")).read()
# + open(os.path.join(os.getcwd(), 'JS_Functions', "showtab_cb.js")).read())
# aser_data.js_on_change('patching', show_tab_cb)
template="""
<div style="background:<%=
(function colorfromint(){
return(Colour)
}()) %>;
color: <%=
(function colorfromint(){
return(Colour)
}()) %>;">
<%= value %>
</font>
</div>
"""
formatter = HTMLTemplateFormatter(template=template)
columns_mass_table = [
TableColumn(field="Colour", title="Colour", formatter=formatter, width=120),
TableColumn(field="Series", title="Series"),
TableColumn(field="Mass", title="Mass", formatter=NumberFormatter(format='0,0.00')),
TableColumn(field="Uncertainty", title="Uncertainty", formatter=NumberFormatter(format='0,0.00')),
]
masses_table = DataTable(source=series_masses, name='Mass table', columns=columns_mass_table, width=370, height=300, index_position=None, editable=False)
n_complexes = 10
SU_act = AjaxDataSource(dict(name=[],mass=[],min=[],max=[], stride=[]))
columns_complex_table = [
TableColumn(field="name", title="Subunit", width=250),
TableColumn(field="mass", title="Mass", formatter=NumberFormatter(format='0,0.00'), width=100),
TableColumn(field="min", title="Min. #", formatter=NumberFormatter(format='0,0'), width=50),
TableColumn(field="max", title="Max. #", formatter=NumberFormatter(format='0,0'), width=50),
TableColumn(field="stride", title="Stride", formatter=NumberFormatter(format='0,0'), width=50),
]
complex_table = DataTable(source=SU_act, name='Complex table', columns=columns_complex_table, width=450, height=280, index_position=None, editable=True)
# Define buttons for adding subunits
Add_SU_cb=CustomJS(args=dict(SU_act=SU_act), code=open(os.path.join(os.getcwd(), 'JS_Functions', "Add_SU_cb.js")).read() )
Del_SU_cb=CustomJS(args=dict(SU_act=SU_act), code=open(os.path.join(os.getcwd(), 'JS_Functions', "del_SU_cb.js")).read() )
SU_add_button=Button(label='Add subunit', width=120, height=30, button_type='success')
SU_del_button=Button(label='Delete subunit', width=120, height=30, button_type='danger')
SU_add_button.js_on_event(ButtonClick, Add_SU_cb)
SU_del_button.js_on_event(ButtonClick, Del_SU_cb)
comment_window = TextAreaInput(value="Add comments here", width = 340, height= 280)
stoich = AjaxDataSource(dict(stoichiometry=[], mass=[], mass_diff=[]))
diff_match = TextInput(value= str(1000.0), title='Allowed ΔMass', disabled=False, width=150, height=50)
columns_match_table = [
TableColumn(field="stoichiometry", title="Stoichiometry"),
TableColumn(field="mass", title="Mass of Combination", formatter=NumberFormatter(format='0,0.00')),
TableColumn(field="mass_diff", title="Difference to Measured Mass", formatter=NumberFormatter(format='0,0.00')),
]
# Further options to implement ("Export SVG","expo_svg"), ("Export high-res PNG","expo_svg")]
match_table = DataTable(source=stoich, name='Complex table', columns=columns_match_table, width=534, height=280, index_position=None)
mass_match_cb=CustomJS(args=dict(ser_match=ser_match, diff_match=diff_match,series_masses=series_masses, stoich=stoich, SU_act=SU_act), code=open(os.path.join(os.getcwd(), 'JS_Functions', "MassMatching_cb.js")).read())
mass_match_button=Button(label='Stoichiometry', width=150, height=30, button_type='success')
mass_match_button.js_on_event(ButtonClick, mass_match_cb)
cropping_slider = RangeSlider(start=0.0, end=100000.0, value=(0.0,100000.0), name='cropping_slider', step=100, width= 150, height=30)
gaussian_smooth = Slider(value=0.0, start=0, end=50, step=1, name='gau_sigma', width=150, height=30) #TextInput(value=str(0.0), disabled=False, width=100, height=30)
n_smooth = Slider(value=1, start=0, end=10, step=1, name='gau_rep', width=150, height=30)
intensity_threshold = Slider(value=0.0, start=0, end=100, step=0.1, name='intensity_threshold', width=150, height=30)#TextInput(value=str(0.0), disabled=False, width=100, height=30)
substract = Slider(value=0.0, start=0, end=100, step=1, name='sub_value', width=150, height=30) #TextInput(value=str(0.0), disabled=False, width=100, height=30)
# adduct_mass = TextInput(value=str(0.0), disabled=False, width=100, height=30)
# data_reduction = TextInput(value=str(0.0), disabled=False, width=100, height=30)
toggle_cb=CustomJS(code=''' if (cb_obj.active == true) {cb_obj.label='on'} else {cb_obj.label='off'} ''')
### MASS FINDER ###
mass_finder_header = Div(text= " <h2>Mass Finder</h2>", height=45, width=400 )
# mass_finder_range_text = Div(text= " Range mz:", width= 150, height=30 )
mass_finder_range_slider = RangeSlider(start=1.0, end=500.0, value=(1.0,50.0), title='Charge range:',name='mass_finder_range_slider', step=1, width= 250, height=30)
# mass_finder_mass_text = Div(text= " Mass of Complex (kDa):", width= 150, height=30 )
mass_finder_mass = Slider(value=100, start=0.0, end=1000.0, step=10.0, title='Mass of Complex (kDa)',name='gau_sigma', width=250, height=30)
mass_finder_exact_mass_text = Div(text= "Enter exact Mass (Da)", width= 150, height=30 )
mass_finder_exact_mass_sele = TextInput(value=str(mass_finder_mass.value*1000), disabled=False, width=100, height=30)
mass_finder_line_text = Div(text= "Show mz prediction", width= 150, height=30 )
mass_finder_line_sele = Toggle(label='off', active=False, width=100, height=30, callback=toggle_cb)
mass_finder_cb =CustomJS(args=dict(mass_finder_line_sele=mass_finder_line_sele, raw_mz=raw_mz, mass_finder_data=mass_finder_data, mass_finder_exact_mass_sele=mass_finder_exact_mass_sele, mass_finder_mass=mass_finder_mass, mass_finder_range_slider=mass_finder_range_slider, mfl=mfl), code=open(os.path.join(os.getcwd(), 'JS_Functions', "mass_finder_cb.js")).read())
mass_finder_exact_cb =CustomJS(args=dict(mass_finder_line_sele=mass_finder_line_sele, mass_finder_exact_mass_sele=mass_finder_exact_mass_sele, mass_finder_mass=mass_finder_mass), code=open(os.path.join(os.getcwd(), 'JS_Functions', "mass_finder_exact_cb.js")).read())
mass_finder_exact_mass_sele.js_on_change('value', mass_finder_exact_cb)
mass_finder_column=Column(mass_finder_header,mass_finder_mass, mass_finder_range_slider, Row(mass_finder_exact_mass_text,mass_finder_exact_mass_sele), Row(mass_finder_line_text, mass_finder_line_sele), visible=False)
mass_finder.js_link('active', mass_finder_column, 'visible')
mass_finder_line_sele.js_link('active', mfl, 'visible')
mass_finder_mass.js_on_change('value', mass_finder_cb)
mass_finder_line_sele.js_on_change('active', mass_finder_cb)
mass_finder_range_slider.js_on_change('value',mass_finder_cb)
### DATA PROCESSING ###
cropping = Div(text= " Range mz:", width= 150, height=30 )
# crop_max = Div(text= " ", width= 150, height=30 )
gau_name = Div(text= " Gaussian Smoothing:", width= 150, height=30 )
n_smooth_name = Div(text= " Repeats of Smoothing:", width= 150, height=30 )
# bin_name = Div(text= " Bin Every:", width= 150, height=30 )
int_name = Div(text= " Intensity Threshold (%)", width= 150, height=30 )
sub_name = Select(options=['Substract Minimum', 'Substract Line', 'Substract Curved'], name='sub_mode', value='Substract Minimum', width= 150, height=30 )
# add_name = Div(text= " Adduct Mass (Da)", width= 150, height=30 )
# dat_name = Div(text= " Data Reduction (%)", width= 150, height=30 )
#pro_name = Div(text= " bla", width= 150, height=30 )
dt_name = Div(text= " <h2>Data Processing</h2>", height=45 )
dtp_update=CustomJS(args=dict(DataProcessingParameters=DataProcessingParameters), code=open(os.path.join(os.getcwd(), 'JS_Functions', "DataProcessingParameters_update_cb.js")).read())
cropping_slider.js_on_change('value', dtp_update)
n_smooth.js_on_change('value', dtp_update)
gaussian_smooth.js_on_change('value', dtp_update)
intensity_threshold.js_on_change('value', dtp_update)
substract.js_on_change('value', dtp_update)
sub_name.js_on_change('value', dtp_update)
processing_cb=CustomJS(args=dict(peak_mz=peak_mz, bg_mz=bg_mz, raw_mz=raw_mz, proc_mz=proc_mz, DataProcessingParameters=DataProcessingParameters, series_data=series_data, series_sele=series_sele, series_names=series_names, series_mz=series_mz), code=open(os.path.join(os.getcwd(), 'JS_Functions', "navia_functions.js")).read() + open(os.path.join(os.getcwd(), 'JS_Functions', "data_processing_cb.js")).read())
process_data=Button(label='Process Data', width=100, height=30, button_type='success', callback=processing_cb)
reset_cb=CustomJS(args=dict(process_data=process_data, peak_mz=peak_mz, bg_mz=bg_mz, raw_mz=raw_mz, proc_mz=proc_mz, series_data=series_data, series_sele=series_sele, series_names=series_names, series_mz=series_mz), code=open(os.path.join(os.getcwd(), 'JS_Functions', "navia_functions.js")).read() + open(os.path.join(os.getcwd(), 'JS_Functions', "reset_cb.js")).read())
reset_data=Button(label='Reset Processing', width=80, height=30, button_type='danger', callback=reset_cb)
dt_names = Column(cropping, gau_name, n_smooth_name ,int_name, sub_name, reset_data)#, pro_name)
dt_inp=Column(cropping_slider, gaussian_smooth, n_smooth, intensity_threshold, substract, process_data)
grid_text = Div(text= " Show Grid", width= 150, height=30 )
grid_sele = Toggle(label='on', active=True, width=100, height=30, callback=toggle_cb)
for x_grid_line in plot_canvas.xgrid:
grid_sele.js_link('active', x_grid_line, 'visible')
for y_grid_line in plot_canvas.ygrid:
grid_sele.js_link('active', y_grid_line, 'visible')
for x_grid_line in highres_canvas.xgrid:
grid_sele.js_link('active', x_grid_line, 'visible')
for y_grid_line in highres_canvas.ygrid:
grid_sele.js_link('active', y_grid_line, 'visible')
grid_sele.js_link('active', plot_canvas, 'outline_line_alpha')
grid_sele.js_link('active', highres_canvas, 'outline_line_alpha')
labels_text = Div(text= " Show labels", width= 150, height=30 )
labels_sele = Toggle(label='on', active=True, width=100, height=30, callback=toggle_cb)
ticks_text = Div(text= " Show Ticks", width= 150, height=30 )
ticks_sele = Toggle(label='on', active=True, width=100, height=30, callback=toggle_cb)
axes_text = Div(text= " Show Axes", width= 150, height=30 )
axes_sele = Toggle(label='on', active=True, width=100, height=30, callback=toggle_cb)
for item in plot_canvas.axis + highres_canvas.axis + svg_canvas.axis:
labels_sele.js_link('active', item ,'axis_label_text_alpha')
labels_sele.js_link('active', item ,'major_label_text_alpha')
ticks_sele.js_link('active', item ,'major_tick_line_alpha')
ticks_sele.js_link('active', item ,'minor_tick_line_alpha')
for item in plot_canvas.axis + highres_canvas.axis + svg_canvas.axis:
axes_sele.js_link('active', item ,'axis_line_alpha')
fig_text = Div(text= "Note: 4K/SVG figure creation on Safari on MacOS is slow and might crash. Please save high-resolution PNG figures in a different browser. SVG creation takes a few seconds.\n", width= 300, height=70 )
peakmz_text = Div(text= " Show mz of Peaks", width= 150, height=30 )
peakmz_sele = Toggle(label='off', active=False, width=100, height=30, callback=toggle_cb)
plpc={}
pl4k={}
plsvg={}
for i_series in series_names:
plot_canvas.add_glyph(series_mz[i_series],sel_lines[i_series])
highres_canvas.add_glyph(series_mz4k[i_series], sel_lines4k[i_series])
svg_canvas.add_glyph(series_mzsvg[i_series], sel_linessvg[i_series])
plpc[i_series]=plot_canvas.add_glyph(peak_mz[i_series], peak_lines[i_series], visible=False)
pl4k[i_series]=highres_canvas.add_glyph(peak_mz4k[i_series], peak_lines4k[i_series], visible=False)
plsvg[i_series]=svg_canvas.add_glyph(peak_mzsvg[i_series], peak_lines[i_series], visible=False)
peakmz_sele.js_link('active', plpc[i_series], 'visible')
peakmz_sele.js_link('active', pl4k[i_series], 'visible')
peakmz_sele.js_link('active', plsvg[i_series], 'visible')
save4k_text= Button(label= " Create 4K PNG figure: ", width= 150, height=30, button_type='success')
# save4k_text.js_link('active', highres_canvas, 'visible')
savesvg_text= Button(label= " Create SVG figure: ", width= 150, height=30, button_type='success' )
# savesvg_text.js_link('active', svg_canvas, 'visible')
linew_text= Div(text= " Line width ", width= 150, height=30 )
linew_inp = TextInput(value=str(sel_lines['Background'].line_width), disabled=False, width=100, height=30)
save4k = ToolbarBox()
save4k.toolbar = Toolbar(tools=highres_canvas.tools, logo=None)
save4k.toolbar_location ='above'
savesvg = ToolbarBox()
savesvg.toolbar = Toolbar(tools=svg_canvas.tools, logo=None)
savesvg.toolbar_location ='above'
graph_text=Column(grid_text, labels_text, ticks_text, axes_text, peakmz_text, linew_text)
graph_act=Column(grid_sele, labels_sele, ticks_sele, axes_sele, peakmz_sele, linew_inp)
savesvg.visible=False
save4k.visible=False
posneg_text = Div(text= " Ion Charge", width= 150, height=30 )
data_header = Div(text= " <h2>Data Processing</h2>", height=45, width=400 )
posneg_header = Div(text= " <h2>Instrument Mode</h2>", height=45, width=400 )
# save4k.toolbar_options={'logo':None}
data_text = Div(text= "Note: The data processing might take a couple of seconds. Please stay patient and refrain from pressing the Process Data button repeatedly. \n", width= 300, height=70 )
data_tools= Column(data_header, Row(dt_names, dt_inp), data_text, visible=False, name='Data Processing')
row_graphic=Row(graph_text, graph_act, width=150)
graph_header = Div(text= " <h2>Graphics options</h2>", height=45, width=400 )
window_header = Div(text= " <h2>Window Range</h2>", height=45, width=400 )
range_spacer=Div(text= "", width= 100, height=20 )
range_min=Div(text= "<b>Min</b>", width= 70, height=20 )
range_max=Div(text= "<b>Max</b>", width= 70, height=20 )
x_range_text=Div(text= " <b>X-Range (m/z)</b>", width= 100, height=30 )
y_range_text=Div(text= " <b>Y-Range (%)</b>", width= 100, height=30 )
x_range_min=TextInput(value=str(1000.0), disabled=False, width=70, height=30)
x_range_max=TextInput(value=str(20000.0), disabled=False, width=70, height=30)
y_range_min=TextInput(value=str(0.0), disabled=False, width=70, height=30)
y_range_max=TextInput(value=str(100.0), disabled=False, width=70, height=30)
range_text=Column(range_spacer,x_range_text,y_range_text)
range_min=Column(range_min, x_range_min,y_range_min)
range_max=Column(range_max, x_range_max,y_range_max)
range_row=Row(range_text, range_min, range_max)
range_cb=CustomJS(args=dict(plot_canvas=plot_canvas, x_range_min=x_range_min, x_range_max=x_range_max, y_range_min=y_range_min, y_range_max=y_range_max), code=open(os.path.join(os.getcwd(), 'JS_Functions', "range_cb.js")).read())
set_spacer=Div(text= "", width= 100, height=30)
set_range=Button(label='Set range', width=150, height=30, button_type='success', callback=range_cb)
set_range.js_on_event(ButtonClick, range_cb)
graph_layout= Column(graph_header, row_graphic, Row(save4k_text, save4k), Row(savesvg_text, savesvg), fig_text, window_header, range_row, Row(set_spacer, set_range), visible=False)
graph_opt.js_link('active', graph_layout, 'visible')
fig4k_cb=CustomJS(args=dict(save4k=save4k, plot_canvas=plot_canvas, svg_canvas=svg_canvas, highres_canvas=highres_canvas , series_names=series_names,\
bg_mz=bg_mz, bg_mz4k=bg_mz4k, bg_mzsvg=bg_mzsvg, series_mz4k=series_mz4k, series_mzsvg=series_mzsvg, series_mz=series_mz,save4k_text=save4k_text), code=open(os.path.join(os.getcwd(), 'JS_Functions', "fig4k_cb.js")).read())
figsvg_cb=CustomJS(args=dict(savesvg=savesvg, plot_canvas=plot_canvas, svg_canvas=svg_canvas, highres_canvas=highres_canvas , series_names=series_names,\
bg_mz=bg_mz, bg_mz4k=bg_mz4k, bg_mzsvg=bg_mzsvg, series_mz4k=series_mz4k, series_mzsvg=series_mzsvg, series_mz=series_mz,savesvg_text=savesvg_text), code=open(os.path.join(os.getcwd(), 'JS_Functions', "figsvg_cb.js")).read())
save4k_text.js_on_event(ButtonClick, fig4k_cb)
savesvg_text.js_on_event(ButtonClick, figsvg_cb)
graphics_cb=CustomJS(args=dict(linew_txt=linew_inp, ticks_sele=ticks_sele, labels_sele=labels_sele, grid_sele=grid_sele, \
plot_canvas=plot_canvas, highres_canvas=highres_canvas, sel_lines=sel_lines, sel_linessvg=sel_linessvg, sel_lines4k=sel_lines4k, series_names=series_names), code=open(os.path.join(os.getcwd(), 'JS_Functions', "graphics_cb.js")).read())
linew_inp.js_on_change('value', graphics_cb)
drop_cb = CustomJS(args=dict(GroEL_mz=GroEL_mz, peak_lines=peak_lines, peak_lines4k=peak_lines4k, col=col, sel_lines=sel_lines, sel_lines4k=sel_lines4k, cropping_slider=cropping_slider, gaussian_smooth=gaussian_smooth, \
n_smooth=n_smooth, intensity_threshold=intensity_threshold, process_data=process_data, substract=substract, sub_name=sub_name, ser_match=ser_match, \
ser_act=ser_act, comment_window=comment_window, stoich=stoich, SU_act=SU_act, posneg=posneg, series_colours_DS=series_colours_DS, series_dict=series_dict, \
DataProcessingParameters=DataProcessingParameters, series_names=series_names, series_masses=series_masses, aser_data=aser_data, series_data=series_data, \
series_sele=series_sele, series_mz=series_mz, raw_mz=raw_mz, proc_mz=proc_mz, bg_mz=bg_mz, peak_mz=peak_mz, plot_canvas=plot_canvas, \
pp_mean_data=pp_mean_data, pp_std_data=pp_std_data), \
code=open(os.path.join(os.getcwd(), 'JS_Functions', "navia_functions.js")).read() + open(os.path.join(os.getcwd(), 'JS_Functions', "drop_cb.js")).read())
dropdown = Dropdown(label="File", menu=menu, width=150, height=30, callback=drop_cb)
topleftspacer=Spacer(height=30, width=23)
row1 = Row(topleftspacer, dropdown, ser_act, col, posneg, mass_match, pp, mass_finder, dt_button, graph_opt, help_button)
row2 = Row(plot_canvas,Column(mass_finder_column, data_tools, graph_layout), height=768)
row3 = Row(topleftspacer,showtab, masses_table, comment_window)#, row_graphic)
SU_header = Div(text= " <h2>Subunits</h2>", height=35, width=580 )
MM_header = Div(text= " <h2>Stoichiometry</h2>", height=35, width=684 )
SU_text = Div(text= "Note: Edit new subunits in the table on the left. Delete Subunits deletes the highlighted subunit.\n", width= 120 )
SU_column=Column(Spacer(height=30), SU_add_button, SU_del_button, SU_text)
MM_column=Column(Spacer(height=30), ser_match, diff_match, Spacer(height=10), Row(mass_match_button))
mass_match_and_comment = Row(topleftspacer, Column(Row(SU_header, MM_header), Row(complex_table, SU_column, match_table, MM_column)), visible=False)
# collumnasd =[row1, p]
# layout=gridplot(collumnasd, ncols=1)
left_logo_img = iio.imread(os.path.join(os.getcwd(), 'Logo', 'navia_logo.png'))
new_img=[]
for i in range(len(left_logo_img)):
new_img.append(left_logo_img[len(left_logo_img)-1-i])
left_logo_img=np.array(new_img)
right_logo_img = iio.imread(os.path.join(os.getcwd(), 'Logo', 'oxford_rect.png'))
new_img=[]
for i in range(len(right_logo_img)):
new_img.append(right_logo_img[len(right_logo_img)-1-i])
right_logo_img=np.array(new_img)
p = figure(width=300, height=100, toolbar_location=None)
p.x_range.range_padding = p.y_range.range_padding = 0
# must give a vector of images
p.image_rgba(image=[new_img], x=0, y=0, dw=10, dh=10)
left_logo = figure(width=300, height=100, toolbar_location=None, tools='')
left_logo.x_range.range_padding = left_logo.y_range.range_padding = 0
left_logo.image_rgba(image=[left_logo_img], x=0, y=0, dw=10, dh=10)
# left_logo.image_url(url=['navia_logo.png'], x=0, y=1, w=1, h=1)
left_logo.xgrid.grid_line_color = None
left_logo.ygrid.grid_line_color = None
left_logo.outline_line_alpha=0.0
left_logo.axis.minor_tick_line_alpha=0.0
left_logo.axis.major_label_text_alpha=0.0
left_logo.axis.major_tick_line_alpha=0.0
left_logo.axis.axis_line_alpha=0.0
left_logo.axis.axis_label_text_alpha=0.0
right_logo = figure(width=276, height=100, toolbar_location=None, tools='')
right_logo.x_range.range_padding = right_logo.y_range.range_padding = 0
right_logo.image_rgba(image=[right_logo_img], x=0, y=0, dw=10, dh=10)
# right_logo.image_url(url=['oxford_rect.png'], x=0, y=1, w=1, h=1)
right_logo.xgrid.grid_line_color = None
right_logo.ygrid.grid_line_color = None
right_logo.outline_line_alpha=0.0
right_logo.axis.minor_tick_line_alpha=0.0
right_logo.axis.major_label_text_alpha=0.0
right_logo.axis.major_tick_line_alpha=0.0
right_logo.axis.axis_line_alpha=0.0
right_logo.axis.axis_label_text_alpha=0.0
row0=Row(left_logo, Spacer(width=160), Div(text= "<h> <b>Na</b>tive <b>Vi</b>sual <b>A</b>nalyser </h>", width= 596, height=70 , style={'font-size': '42px', 'color':'#002147'}, align='center'), right_logo)
row2b=Row(topleftspacer,Div(text= " <h2>Peaks of Active Series (m/z)</h2>", width= 580, height=35 ),Div(text= " <h2>Masses</h2>", width= 370, height=35 ), Div(text= " <h2>Notes on Spectrum</h2>", width= 280, height=35 ))
layout = Column(row0, row1, row2, row2b, row3, mass_match_and_comment)
dt_button.js_link('active', data_tools, 'visible')
mass_match.js_link('active', mass_match_and_comment, 'visible')
highres_text = Div(text= "<b> This is an empty panel for the 4k plot. </b>", width= 300, height=70)
svg_text = Div(text= "<b> This is an empty panel for the SVG plot. </b>", width= 300, height=70)
tab=Tabs(tabs=[
Panel(child=layout, title='Plot'),
Panel(child=Column(highres_text,highres_canvas), title='4k Plot'),
Panel(child=Column(svg_text,svg_canvas), title='SVG Plot')
], tabs_location='below')
show(tab)
| StarcoderdataPython |
3349522 | <reponame>lffloyd/OCTIS<filename>octis/models/ETM.py
from __future__ import print_function
from octis.models.early_stopping.pytorchtools import EarlyStopping
import torch
import numpy as np
from octis.models.ETM_model import data
from sklearn.feature_extraction.text import CountVectorizer
from torch import nn, optim
from octis.models.ETM_model import etm
from octis.models.base_etm import BaseETM
import gensim
import pickle as pkl
class ETM(BaseETM):
def __init__(self, num_topics=10, num_epochs=100, t_hidden_size=800, rho_size=300, embedding_size=300,
activation='relu', dropout=0.5, lr=0.005, optimizer='adam', batch_size=128, clip=0.0,
wdecay=1.2e-6, bow_norm=1, device='cpu', top_word=10, train_embeddings=True, embeddings_path=None,
use_partitions=True):
super(ETM, self).__init__()
self.hyperparameters = dict()
self.hyperparameters['num_topics'] = int(num_topics)
self.hyperparameters['num_epochs'] = int(num_epochs)
self.hyperparameters['t_hidden_size'] = int(t_hidden_size)
self.hyperparameters['rho_size'] = int(rho_size)
self.hyperparameters['embedding_size'] = int(embedding_size)
self.hyperparameters['activation'] = activation
self.hyperparameters['dropout'] = float(dropout)
self.hyperparameters['lr'] = float(lr)
self.hyperparameters['optimizer'] = optimizer
self.hyperparameters['batch_size'] = int(batch_size)
self.hyperparameters['clip'] = float(clip)
self.hyperparameters['wdecay'] = float(wdecay)
self.hyperparameters['bow_norm'] = int(bow_norm)
self.hyperparameters['train_embeddings'] = bool(train_embeddings)
self.hyperparameters['embeddings_path'] = embeddings_path
self.top_word = top_word
self.early_stopping = None
self.device = device
self.test_tokens, self.test_counts = None, None
self.valid_tokens, self.valid_counts = None, None
self.train_tokens, self.train_counts, self.vocab = None, None, None
self.use_partitions = use_partitions
self.model = None
self.optimizer = None
self.embeddings = None
def train_model(self, dataset, hyperparameters=None, top_words=10):
if hyperparameters is None:
hyperparameters = {}
self.set_model(dataset, hyperparameters)
self.top_word = top_words
self.early_stopping = EarlyStopping(patience=5, verbose=True)
for epoch in range(0, self.hyperparameters['num_epochs']):
continue_training = self._train_epoch(epoch)
if not continue_training:
break
# load the last checkpoint with the best model
# self.model.load_state_dict(torch.load('etm_checkpoint.pt'))
if self.use_partitions:
result = self.inference()
else:
result = self.get_info()
return result
def set_model(self, dataset, hyperparameters):
if self.use_partitions:
train_data, validation_data, testing_data = dataset.get_partitioned_corpus(use_validation=True)
data_corpus_train = [' '.join(i) for i in train_data]
data_corpus_test = [' '.join(i) for i in testing_data]
data_corpus_val = [' '.join(i) for i in validation_data]
vocab = dataset.get_vocabulary()
self.vocab = {i: w for i, w in enumerate(vocab)}
vocab2id = {w: i for i, w in enumerate(vocab)}
self.train_tokens, self.train_counts, self.test_tokens, self.test_counts, self.valid_tokens, \
self.valid_counts = self.preprocess(vocab2id, data_corpus_train, data_corpus_test, data_corpus_val)
else:
data_corpus = [' '.join(i) for i in dataset.get_corpus()]
vocab = dataset.get_vocabulary()
self.vocab = {i: w for i, w in enumerate(vocab)}
vocab2id = {w: i for i, w in enumerate(vocab)}
self.train_tokens, self.train_counts = self.preprocess(vocab2id, data_corpus, None)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.set_default_hyperparameters(hyperparameters)
self.load_embeddings()
## define model and optimizer
self.model = etm.ETM(num_topics=self.hyperparameters['num_topics'], vocab_size=len(self.vocab.keys()),
t_hidden_size=int(self.hyperparameters['t_hidden_size']),
rho_size=int(self.hyperparameters['rho_size']),
emb_size=int(self.hyperparameters['embedding_size']),
theta_act=self.hyperparameters['activation'],
embeddings=self.embeddings,
train_embeddings=self.hyperparameters['train_embeddings'],
enc_drop=self.hyperparameters['dropout']).to(self.device)
print('model: {}'.format(self.model))
self.optimizer = self.set_optimizer()
def _train_epoch(self, epoch):
self.data_list = []
self.model.train()
acc_loss = 0
acc_kl_theta_loss = 0
cnt = 0
indices = torch.arange(0, len(self.train_tokens))
indices = torch.split(indices, self.hyperparameters['batch_size'])
for idx, ind in enumerate(indices):
self.optimizer.zero_grad()
self.model.zero_grad()
data_batch = data.get_batch(self.train_tokens, self.train_counts, ind, len(self.vocab.keys()),
self.hyperparameters['embedding_size'], self.device)
sums = data_batch.sum(1).unsqueeze(1)
if self.hyperparameters['bow_norm']:
normalized_data_batch = data_batch / sums
else:
normalized_data_batch = data_batch
recon_loss, kld_theta = self.model(data_batch, normalized_data_batch)
total_loss = recon_loss + kld_theta
total_loss.backward()
if self.hyperparameters["clip"] > 0:
torch.nn.utils.clip_grad_norm_(self.model.parameters(),
self.hyperparameters["clip"])
self.optimizer.step()
acc_loss += torch.sum(recon_loss).item()
acc_kl_theta_loss += torch.sum(kld_theta).item()
cnt += 1
log_interval = 20
if idx % log_interval == 0 and idx > 0:
cur_loss = round(acc_loss / cnt, 2)
cur_kl_theta = round(acc_kl_theta_loss / cnt, 2)
cur_real_loss = round(cur_loss + cur_kl_theta, 2)
print('Epoch: {} .. batch: {}/{} .. LR: {} .. KL_theta: {} .. Rec_loss: {}'
' .. NELBO: {}'.format(epoch + 1, idx, len(indices),
self.optimizer.param_groups[0]['lr'],
cur_kl_theta, cur_loss, cur_real_loss))
self.data_list.append(normalized_data_batch)
cur_loss = round(acc_loss / cnt, 2)
cur_kl_theta = round(acc_kl_theta_loss / cnt, 2)
cur_real_loss = round(cur_loss + cur_kl_theta, 2)
print('*' * 100)
print('Epoch----->{} .. LR: {} .. KL_theta: {} .. Rec_loss: {} .. NELBO: {}'.format(
epoch + 1, self.optimizer.param_groups[0]['lr'], cur_kl_theta, cur_loss,
cur_real_loss))
print('*' * 100)
# VALIDATION ###
if self.valid_tokens is None:
return True
else:
model = self.model.to(self.device)
model.eval()
with torch.no_grad():
val_acc_loss = 0
val_acc_kl_theta_loss = 0
val_cnt = 0
indices = torch.arange(0, len(self.valid_tokens))
indices = torch.split(indices, self.hyperparameters['batch_size'])
for idx, ind in enumerate(indices):
self.optimizer.zero_grad()
self.model.zero_grad()
val_data_batch = data.get_batch(self.valid_tokens, self.valid_counts,
ind, len(self.vocab.keys()),
self.hyperparameters['embedding_size'], self.device)
sums = val_data_batch.sum(1).unsqueeze(1)
if self.hyperparameters['bow_norm']:
val_normalized_data_batch = val_data_batch / sums
else:
val_normalized_data_batch = val_data_batch
val_recon_loss, val_kld_theta = self.model(val_data_batch,
val_normalized_data_batch)
val_acc_loss += torch.sum(val_recon_loss).item()
val_acc_kl_theta_loss += torch.sum(val_kld_theta).item()
val_cnt += 1
val_total_loss = val_recon_loss + val_kld_theta
val_cur_loss = round(val_acc_loss / cnt, 2)
val_cur_kl_theta = round(val_acc_kl_theta_loss / cnt, 2)
val_cur_real_loss = round(val_cur_loss + val_cur_kl_theta, 2)
print('*' * 100)
print('VALIDATION .. LR: {} .. KL_theta: {} .. Rec_loss: {} .. NELBO: {}'.format(
self.optimizer.param_groups[0]['lr'], val_cur_kl_theta, val_cur_loss,
val_cur_real_loss))
print('*' * 100)
if np.isnan(val_cur_real_loss):
return False
else:
self.early_stopping(val_total_loss, model)
if self.early_stopping.early_stop:
print("Early stopping")
return False
else:
return True
def get_info(self):
topic_w = []
self.model.eval()
info = {}
with torch.no_grad():
theta, _ = self.model.get_theta(torch.cat(self.data_list))
gammas = self.model.get_beta().cpu().numpy()
for k in range(self.hyperparameters['num_topics']):
if np.isnan(gammas[k]).any():
# to deal with nan matrices
topic_w = None
break
else:
top_words = list(gammas[k].argsort()[-self.top_word:][::-1])
topic_words = [self.vocab[a] for a in top_words]
topic_w.append(topic_words)
info['topic-word-matrix'] = gammas
info['topic-document-matrix'] = theta.cpu().detach().numpy().T
info['topics'] = topic_w
return info
def inference(self):
assert isinstance(self.use_partitions, bool) and self.use_partitions
topic_d = []
self.model.eval()
indices = torch.arange(0, len(self.test_tokens))
indices = torch.split(indices, self.hyperparameters['batch_size'])
for idx, ind in enumerate(indices):
data_batch = data.get_batch(self.test_tokens, self.test_counts,
ind, len(self.vocab.keys()),
self.hyperparameters['embedding_size'], self.device)
sums = data_batch.sum(1).unsqueeze(1)
if self.hyperparameters['bow_norm']:
normalized_data_batch = data_batch / sums
else:
normalized_data_batch = data_batch
theta, _ = self.model.get_theta(normalized_data_batch)
topic_d.append(theta.cpu().detach().numpy())
info = self.get_info()
emp_array = np.empty((0, self.hyperparameters['num_topics']))
topic_doc = np.asarray(topic_d)
length = topic_doc.shape[0]
# batch concatenation
for i in range(length):
emp_array = np.concatenate([emp_array, topic_doc[i]])
info['test-topic-document-matrix'] = emp_array.T
return info
def set_default_hyperparameters(self, hyperparameters):
for k in hyperparameters.keys():
if k in self.hyperparameters.keys():
self.hyperparameters[k] = hyperparameters.get(k, self.hyperparameters[k])
def partitioning(self, use_partitions=False):
self.use_partitions = use_partitions
@staticmethod
def preprocess(vocab2id, train_corpus, test_corpus=None, validation_corpus=None):
def split_bow(bow_in, n_docs):
indices = [[w for w in bow_in[doc, :].indices] for doc in range(n_docs)]
counts = [[c for c in bow_in[doc, :].data] for doc in range(n_docs)]
return indices, counts
vec = CountVectorizer(
vocabulary=vocab2id, token_pattern=r'(?u)\b\w+\b')
dataset = train_corpus.copy()
if test_corpus is not None:
dataset.extend(test_corpus)
if validation_corpus is not None:
dataset.extend(validation_corpus)
vec.fit(dataset)
idx2token = {v: k for (k, v) in vec.vocabulary_.items()}
x_train = vec.transform(train_corpus)
x_train_tokens, x_train_count = split_bow(x_train, x_train.shape[0])
if test_corpus is not None:
x_test = vec.transform(test_corpus)
x_test_tokens, x_test_count = split_bow(x_test, x_test.shape[0])
if validation_corpus is not None:
x_validation = vec.transform(validation_corpus)
x_val_tokens, x_val_count = split_bow(x_validation, x_validation.shape[0])
return x_train_tokens, x_train_count, x_test_tokens, x_test_count, x_val_tokens, x_val_count
else:
return x_train_tokens, x_train_count, x_test_tokens, x_test_count
else:
if validation_corpus is not None:
x_validation = vec.transform(validation_corpus)
x_val_tokens, x_val_count = split_bow(x_validation, x_validation.shape[0])
return x_train_tokens, x_train_count, x_val_tokens, x_val_count
else:
return x_train_tokens, x_train_count
| StarcoderdataPython |
1622864 | <gh_stars>0
from .base import BaseTest
from posthog.models import Event, Person, Element, Action, ActionStep
from freezegun import freeze_time # type: ignore
class TestEvents(BaseTest):
TESTS_API = True
ENDPOINT = 'event'
def test_filter_events(self):
person = Person.objects.create(properties={'email': '<EMAIL>'}, team=self.team, distinct_ids=["2", 'some-random-uid'])
event1 = Event.objects.create(team=self.team, distinct_id="2", ip='8.8.8.8', elements=[
Element(tag_name='button', text='something')
])
Event.objects.create(team=self.team, distinct_id='some-random-uid', ip='8.8.8.8')
Event.objects.create(team=self.team, distinct_id='some-other-one', ip='8.8.8.8')
with self.assertNumQueries(10):
response = self.client.get('/api/event/?distinct_id=2').json()
self.assertEqual(response['results'][0]['person'], '<EMAIL>')
self.assertEqual(response['results'][0]['elements'][0]['tag_name'], 'button')
def test_filter_by_person(self):
person = Person.objects.create(properties={'email': '<EMAIL>'}, distinct_ids=["2", 'some-random-uid'], team=self.team)
Event.objects.create(team=self.team, distinct_id="2", ip='8.8.8.8')
Event.objects.create(team=self.team, distinct_id='some-random-uid', ip='8.8.8.8')
Event.objects.create(team=self.team, distinct_id='some-other-one', ip='8.8.8.8')
response = self.client.get('/api/event/?person_id=%s' % person.pk).json()
self.assertEqual(len(response['results']), 2)
self.assertEqual(response['results'][0]['elements'], [])
def _signup_event(self, distinct_id: str):
sign_up = Event.objects.create(distinct_id=distinct_id, team=self.team, elements=[
Element(tag_name='button', text='Sign up!')
])
return sign_up
def _pay_event(self, distinct_id: str):
sign_up = Event.objects.create(distinct_id=distinct_id, team=self.team, elements=[
Element(tag_name='button', text='Pay $10'),
# check we're not duplicating
Element(tag_name='div', text='Sign up!')
])
return sign_up
def _movie_event(self, distinct_id: str):
sign_up = Event.objects.create(distinct_id=distinct_id, team=self.team, elements=[
Element(tag_name='a', attr_class=['watch_movie', 'play'], text='Watch now', attr_id='something', href='/movie', order=0),
Element(tag_name='div', href='/movie', order=1)
])
def test_live_action_events(self):
action_sign_up = Action.objects.create(team=self.team, name='signed up')
ActionStep.objects.create(action=action_sign_up, tag_name='button', text='Sign up!')
# 2 steps that match same element might trip stuff up
ActionStep.objects.create(action=action_sign_up, tag_name='button', text='Sign up!')
action_credit_card = Action.objects.create(team=self.team, name='paid')
ActionStep.objects.create(action=action_credit_card, tag_name='button', text='Pay $10')
action_watch_movie = Action.objects.create(team=self.team, name='watch movie')
ActionStep.objects.create(action=action_watch_movie, text='Watch now', selector="div > a.watch_movie")
# events
person_stopped_after_signup = Person.objects.create(distinct_ids=["stopped_after_signup"], team=self.team)
event_sign_up_1 = self._signup_event('stopped_after_signup')
person_stopped_after_pay = Person.objects.create(distinct_ids=["stopped_after_pay"], team=self.team)
self._signup_event('stopped_after_pay')
self._pay_event('stopped_after_pay')
self._movie_event('stopped_after_pay')
# Test filtering of deleted actions
deleted_action_watch_movie = Action.objects.create(team=self.team, name='watch movie', deleted=True)
ActionStep.objects.create(action=deleted_action_watch_movie, text='Watch now', selector="div > a.watch_movie")
# non matching events
non_matching = Event.objects.create(distinct_id='stopped_after_pay', properties={'$current_url': 'http://whatever.com'}, team=self.team, elements=[
Element(tag_name='blabla', href='/moviedd', order=0),
Element(tag_name='blabla', href='/moviedd', order=1)
])
Event.objects.create(distinct_id='stopped_after_pay', properties={'$current_url': 'http://whatever.com'}, team=self.team)
# with self.assertNumQueries(8):
response = self.client.get('/api/event/actions/').json()
self.assertEqual(len(response['results']), 4)
self.assertEqual(response['results'][3]['event']['id'], event_sign_up_1.pk)
self.assertEqual(response['results'][3]['action']['id'], action_sign_up.pk)
self.assertEqual(response['results'][3]['action']['name'], 'signed up')
self.assertEqual(response['results'][2]['action']['id'], action_sign_up.pk)
self.assertEqual(response['results'][1]['action']['id'], action_credit_card.pk)
self.assertEqual(response['results'][0]['action']['id'], action_watch_movie.pk)
def test_event_names(self):
Event.objects.create(team=self.team, event='user login')
Event.objects.create(team=self.team, event='user sign up')
Event.objects.create(team=self.team, event='user sign up')
response = self.client.get('/api/event/names/').json()
self.assertEqual(response[0]['name'], 'user sign up')
self.assertEqual(response[0]['count'], 2)
self.assertEqual(response[1]['name'], 'user login')
self.assertEqual(response[1]['count'], 1)
def test_event_property_names(self):
Event.objects.create(team=self.team, properties={'$browser': 'whatever', '$os': 'Mac OS X'})
Event.objects.create(team=self.team, properties={'random_prop': 'asdf'})
Event.objects.create(team=self.team, properties={'random_prop': 'asdf'})
response = self.client.get('/api/event/properties/').json()
self.assertEqual(response[0]['name'], '$browser')
self.assertEqual(response[1]['name'], '$os')
self.assertEqual(response[2]['name'], 'random_prop')
def test_event_property_values(self):
Event.objects.create(team=self.team, properties={'random_prop': 'asdf', 'some other prop': 'with some text'})
Event.objects.create(team=self.team, properties={'random_prop': 'asdf'})
Event.objects.create(team=self.team, properties={'random_prop': 'qwerty'})
Event.objects.create(team=self.team, properties={'something_else': 'qwerty'})
response = self.client.get('/api/event/values/?key=random_prop').json()
self.assertEqual(response[0]['name'], 'asdf')
self.assertEqual(response[0]['count'], 2)
self.assertEqual(response[1]['name'], 'qwerty')
self.assertEqual(response[1]['count'], 1)
response = self.client.get('/api/event/values/?key=random_prop&value=qw').json()
self.assertEqual(response[0]['name'], 'qwerty')
self.assertEqual(response[0]['count'], 1)
def test_before_and_after(self):
user = self._create_user('tim')
self.client.force_login(user)
person = Person.objects.create(properties={'email': '<EMAIL>'}, team=self.team, distinct_ids=["2", 'some-random-uid'])
with freeze_time("2020-01-10"):
event1 = Event.objects.create(team=self.team, event='sign up', distinct_id="2")
with freeze_time("2020-01-8"):
event2 = Event.objects.create(team=self.team, event='sign up', distinct_id="2")
action = Action.objects.create(team=self.team)
ActionStep.objects.create(action=action, event='sign up')
response = self.client.get('/api/event/?after=2020-01-09&action_id=%s' % action.pk).json()
self.assertEqual(len(response['results']), 1)
self.assertEqual(response['results'][0]['id'], event1.pk)
response = self.client.get('/api/event/?before=2020-01-09&action_id=%s' % action.pk).json()
self.assertEqual(len(response['results']), 1)
self.assertEqual(response['results'][0]['id'], event2.pk)
# without action
response = self.client.get('/api/event/?after=2020-01-09').json()
self.assertEqual(len(response['results']), 1)
self.assertEqual(response['results'][0]['id'], event1.pk)
response = self.client.get('/api/event/?before=2020-01-09').json()
self.assertEqual(len(response['results']), 1)
self.assertEqual(response['results'][0]['id'], event2.pk) | StarcoderdataPython |
3345200 | <filename>gralog-fx/src/main/java/gralog/gralogfx/piping/scripts/contractBetweenVertices.py<gh_stars>10-100
#!/usr/bin/python
#ef.py
from Gralog import *
from bfs import populateBFS
g = Graph("directed");
g.generateRandomGraph(15);
v1 = g.requestVertex();
v2 = g.requestVertex();
populateBFS(g,v1);
curr = v2.getProperty("prev");
toDelete = [];
while curr != v1:
toDelete.append(curr);
curr = curr.getProperty("prev");
v1.connect(v2);
for v in toDelete:
v.delete();
| StarcoderdataPython |
61431 | # Generated by Django 3.2.7 on 2021-10-03 15:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sellers', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='seller',
old_name='description',
new_name='descr',
),
]
| StarcoderdataPython |
3336569 | #!/usr/bin/env python3
# <https://glom.readthedocs.io/en/latest/>
import glom
data = {"a": {"b": {"c": "d"}}}
filter_result = glom.glom(data, spec="a.b.c")
print(filter_result) # d
try:
glom.glom(data, spec='a.nope')
except glom.core.PathAccessError as e:
print(e)
# glom.core.PathAccessError: error raised while processing, details below.
# Target-spec trace (most recent last):
# - Target: {'a': {'b': {'c': 'd'}}}
# - Spec: 'a.nope'
# glom.core.PathAccessError: could not access 'nope', part 1 of Path('a', 'nope'), got error: KeyError('nope')
dutch_specialisms = {
"doctor": {
"internist": {
"sub-specialty": "allergologie"
}
}
}
result = glom.glom(dutch_specialisms, spec="doctor.internist.sub-specialty")
print(result) # allergologie"
dutch_specialisms = {
"doctor": {
"internist": [
{"sub-specialty": "allergologie"},
{"sub-specialty": "bloedtransfusiegeneeskunde"},
{"sub-specialty": "oncologie"},
]
}
}
result = glom.glom(dutch_specialisms, spec=("doctor.internist", ["sub-specialty"]))
print(result) # ['allergologie', 'bloedtransfusiegeneeskunde', 'oncologie']
| StarcoderdataPython |
3206227 | <filename>DQM/TrackingMonitor/python/LogMessageMonitor_cff.py
import FWCore.ParameterSet.Config as cms
import DQM.TrackingMonitor.LogMessageMonitor_cfi
LocalRecoLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
LocalRecoLogMessageMon.pluginsMonName = cms.string ( 'LocalReco' )
LocalRecoLogMessageMon.modules = cms.vstring( 'siPixelDigis', 'siStripDigis', 'siPixelClusters', 'siStripClusters' ) # siPixelDigis : SiPixelRawToDigi, siStripDigis : SiStripRawToDigi (SiStripRawToDigiUnpacker), siPixelClusters : SiPixelClusterProducer, siStripClusters : SiStripClusterizer
LocalRecoLogMessageMon.categories = cms.vstring( 'SiPixelRawToDigi', 'TooManyErrors', 'TooManyClusters' )
# apparentely there are not LogError in RecoLocalTracker/SubCollectionProducers/src/TrackClusterRemover.cc
ClusterizerLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
ClusterizerLogMessageMon.pluginsMonName = cms.string ( 'TrackClusterRemover' )
ClusterizerLogMessageMon.modules = cms.vstring( 'detachedTripletStepClusters', 'lowPtTripletStepClusters', 'pixelPairStepClusters', 'mixedTripletStepClusters', 'pixelLessStepClusters', 'tobTecStepClusters' ) # TrackClusterRemover
ClusterizerLogMessageMon.categories = cms.vstring( )
# initialStepSeeds,lowPtTripletStepSeeds, pixelPairStepSeeds, detachedTripletStepSeeds, : TooManyClusters (SeedGeneratorFromRegionHitsEDProducer),
# photonConvTrajSeedFromSingleLeg : (PhotonConversionTrajectorySeedProducerFromSingleLeg)
SeedingLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
SeedingLogMessageMon.pluginsMonName = cms.string ( 'Seeding' )
SeedingLogMessageMon.modules = cms.vstring( 'initialStepSeedsPreSplitting', 'initialStepSeeds', 'detachedTripletStepSeeds', 'lowPtTripletStepSeeds', 'pixelPairStepSeeds', 'mixedTripletStepSeedsA', 'mixedTripletStepSeedsB', 'pixelLessStepSeeds', 'tobTecStepSeeds', 'jetCoreRegionalStepSeeds', 'muonSeededSeedsOutIn', 'muonSeededSeedsInOut', 'photonConvTrajSeedFromSingleLeg')
SeedingLogMessageMon.categories = cms.vstring( 'TooManyClusters', 'TooManyPairs', 'TooManyTriplets', 'TooManySeeds' )
# RecoTracker/CkfPattern/src/CkfTrackCandidateMakerBase.cc
TrackCandidateLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
TrackCandidateLogMessageMon.pluginsMonName = cms.string ( 'TrackCandidate' )
TrackCandidateLogMessageMon.modules = cms.vstring( 'initialStepTrackCandidatesPreSplitting', 'initialStepTrackCandidates', 'detachedTripletStepTrackCandidates', 'lowPtTripletStepTrackCandidates', 'pixelPairStepTrackCandidates', 'mixedTripletStepTrackCandidates', 'pixelLessStepTrackCandidates', 'tobTecStepTrackCandidates', 'jetCoreRegionalStepTrackCandidates', 'muonSeededTrackCandidatesInOut', 'muonSeededTrackCandidatesOutIn', 'convTrackCandidates' )
TrackCandidateLogMessageMon.categories = cms.vstring( 'TooManySeeds' )
# TrackProducer:FailedPropagation
TrackFinderLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
TrackFinderLogMessageMon.pluginsMonName = cms.string ( 'TrackFinder' )
TrackFinderLogMessageMon.modules = cms.vstring( 'pixelTracks', 'initialStepTracks', 'lowPtTripletStepTracks', 'pixelPairStepTracks', 'detachedTripletStepTracks', 'mixedTripletStepTracks', 'pixelLessStepTracks', 'tobTecStepTracks', 'jetCoreRegionalStepTracks', 'muonSeededTracksOutIn', 'muonSeededTracksInOut', 'convStepTracks', 'generalTracks' )
TrackFinderLogMessageMon.categories = cms.vstring(
'FailedPropagation', 'RKPropagatorInS'
)
FullIterTrackingLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
FullIterTrackingLogMessageMon.pluginsMonName = cms.string ( 'FullIterTracking' )
FullIterTrackingLogMessageMon.modules = cms.vstring(
'initialStepSeeds_iter0',
'initialStepTrackCandidates_iter0',
'initialStepTracks_iter0',
'lowPtTripletStepSeeds_iter1',
'lowPtTripletStepTrackCandidates_iter1',
'lowPtTripletStepTracks_iter1',
'pixelPairStepSeeds_iter2',
'pixelPairStepTrackCandidates_iter2',
'pixelPairStepTracks_iter2',
'detachedTripletStepSeeds_iter3',
'detachedTripletStepTrackCandidates_iter3',
'detachedTripletStepTracks_iter3',
'mixedTripletStepSeedsA_iter4',
'mixedTripletStepSeedsB_iter4',
'mixedTripletStepTrackCandidates_iter4',
'mixedTripletStepTracks_iter4',
'pixelLessStepSeeds_iter5',
'pixelLessStepTrackCandidates_iter5',
'pixelLessStepTracks_iter5',
'tobTecStepSeeds_iter6',
'tobTecStepTrackCandidates_iter6',
'tobTecStepTracks_iter6',
'photonConvTrajSeedFromSingleLeg',
'convTrackCandidates',
'convStepTracks',
)
FullIterTrackingLogMessageMon.categories = cms.vstring(
'TooManyClusters',
'TooManyPairs',
'TooManyTriplets',
'TooManySeeds',
)
IterTrackingLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
IterTrackingLogMessageMon.pluginsMonName = cms.string ( 'IterTracking' )
IterTrackingLogMessageMon.modules = cms.vstring(
'initialStepSeeds_iter0',
'initialStepTrackCandidates_iter0',
'initialStepTracks_iter0',
'lowPtTripletStepSeeds_iter1',
'lowPtTripletStepTrackCandidates_iter1',
'lowPtTripletStepTracks_iter1',
'pixelPairStepSeeds_iter2',
'pixelPairStepTrackCandidates_iter2',
'pixelPairStepTracks_iter2',
'detachedTripletStepSeeds_iter3',
'detachedTripletStepTrackCandidates_iter3',
'detachedTripletStepTracks_iter3',
'mixedTripletStepSeedsA_iter4',
'mixedTripletStepSeedsB_iter4',
'mixedTripletStepTrackCandidates_iter4',
'mixedTripletStepTracks_iter4',
'pixelLessStepSeeds_iter5',
'pixelLessStepTrackCandidates_iter5',
'pixelLessStepTracks_iter5',
'tobTecStepSeeds_iter6',
'tobTecStepTrackCandidates_iter6',
'tobTecStepTracks_iter6',
)
IterTrackingLogMessageMon.categories = cms.vstring(
'TooManyClusters',
'TooManyPairs',
'TooManyTriplets',
'TooManySeeds',
)
ConversionLogMessageMon = DQM.TrackingMonitor.LogMessageMonitor_cfi.LogMessageMon.clone()
ConversionLogMessageMon.pluginsMonName = cms.string ( 'Conversion' )
ConversionLogMessageMon.modules = cms.vstring(
'photonConvTrajSeedFromSingleLeg',
'convTrackCandidates',
'convStepTracks',
)
ConversionLogMessageMon.categories = cms.vstring(
'TooManyClusters',
'TooManyPairs',
'TooManyTriplets',
'TooManySeeds',
)
| StarcoderdataPython |
176744 | <filename>olaf/user/tests.py<gh_stars>0
from django.test import TestCase, Client
from django.shortcuts import reverse
from django.db.models import signals
from django.contrib.auth import get_user_model
from olaf.tasks import send_sms_to
import json
User = get_user_model()
class SignalTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(phone_number='994508874380', password='<PASSWORD>')
def test_order_list_view_GET(self):
self.assertIsNotNone(self.user.profile)
self.assertIsNotNone(self.user.notifications)
class TFATests(TestCase):
def setUp(self):
self.user = User.objects.create_user(phone_number='994508874380', password='<PASSWORD>')
self.client.login(phone_number='994508874380', password='<PASSWORD>')
def test_request_tfa(self):
pass
# send_sms_to('508874380')
def test_request_tfa_view(self):
data = {
'phone_number': '508874380'
}
url = reverse('user:request-tfa')
response = self.client.post(url, data=json.dumps(data), content_type="application/json")
self.assertEqual(response.status_code, 200)
| StarcoderdataPython |
1632060 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from uncertainties import ufloat
import uncertainties
from uncertainties.unumpy import uarray
from scipy.optimize import curve_fit
import os
# print("Cwd:", os.getcwd())
# print("Using matplotlibrc from ", mpl.matplotlib_fname())
fig = plt.figure()
clear = plt.close()
ax = fig.add_subplot(111)
def gimmeTHATcolumn(array,k):
"""Extracts the k-column of an 2D-array, returns list with those column-elements"""
helparray = []
for i in range(len(array)):
helparray.append(array[i][k])
return helparray
def meanDistance(x):
x = np.array(x)
sum = 0
for a, b in zip(x, x[1:]):
sum += (b - a) / len(x)
return sum / len(x)
def autoplot(xValues, yValues, xLabel, yLabel, plotLabel="", errorbars=True, plotStyle='ro', errorStyle='g,', yScale='linear', **furtherPlotArgs):
"""Return a subplot object.
:param errorbars=True: Plots error bars when true.
:param yScale: e.g. 'log', 'dec'
"""
xValues = np.array(xValues)
yValues = np.array(yValues)
errX = None
errY = None
if type(xValues[0]) == uncertainties.Variable or type(xValues[0]) == uncertainties.AffineScalarFunc:
x = [item.nominal_value for item in xValues]
errX = [item.std_dev for item in xValues]
else:
x = xValues
if type(yValues[0]) == uncertainties.Variable or type(yValues[0]) == uncertainties.AffineScalarFunc:
y = [item.nominal_value for item in yValues]
errY = [item.std_dev for item in yValues]
else:
y = yValues
ax.set_yscale(yScale)
x_offset = (max(x) - min(x)) * 0.015
ax.set_xlim(min(x) - x_offset, max(x) + x_offset)
if yScale != 'log':
y_offset = (max(y) - min(y)) * 0.015
ax.set_ylim(min(y) - y_offset, max(y) + y_offset)
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.legend(loc='best')
if errorbars:
if errX != None and errY != None:
plt.errorbar(x, y, xerr=errX, yerr=errY, fmt=errorStyle)
elif errY != None:
plt.errorbar(x, y, yerr=errY, fmt=errorStyle)
print(errY)
elif errX != None:
plt.errorbar(x, y, xerr=errX, fmt=errorStyle)
else:
raise "Should draw errorbars, but x, y are not ufloats!"
ax.plot(x, y, plotStyle, label=plotLabel, **furtherPlotArgs)
fig.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
return fig
def linearFit(x, a, b):
return a * x + b
def isUfloat(var):
return type(var) == uncertainties.core.Variable or type(var) == uncertainties.core.AffineScalarFunc
maxfev = 1000000
def autofit(x, y, fitFunction, p0=None):
"""Returns params of the curvefit as ufloat."""
if isUfloat(y[0]):
ny = [i.nominal_value for i in y]
dy = [i.std_dev for i in y]
params, covariance = curve_fit(fitFunction, x, ny, sigma=dy, absolute_sigma=True,
p0=p0, maxfev=maxfev)
else:
params, covariance = curve_fit(fitFunction, x, y, p0=p0, maxfev=maxfev)
errors = np.sqrt(np.diag(covariance))
return uarray(params, errors)
def array(values, offset, magnitude):
"""Return numpy array
offset: is added to all items
magnitude: all items are multiplied by 10^magnitude"""
res = np.array(values).astype(float) + offset
res *= 10 ** magnitude
return res
def mean(values):
"""Return the mean of values"""
values = np.array(values)
return sum(values) / len(values)
def stdDev(values):
"""Return estimated standard deviation"""
values = np.array(values)
b = 0
m = mean(values)
for x in values:
b += (x - m) ** 2
return np.sqrt(1 / (len(values) - 1) * b)
def stdDevOfMean(values):
"""Return estimated standard deviation of the mean (the important one!)"""
return stdDev(values) / np.sqrt(len(values))
def errorString(value):
"""Return a more beautiful number with error"""
return str(value.nominal_value) + "±" + str(value.std_dev)
def abweichung(value, lit):
"""Returns diveation of an experimental value from a literature value."""
return '{:.3f}'.format((lit - value.nominal_value) / lit * 100) + "%"
def modifiyItems(dic, keyFunction, valueFunction):
"""Applies *funkction(key,value) to each key or value in dic"""
return {keyFunction(key, value): valueFunction(key, value) for key, value in dic.items()}
# find peaks
import sys
from numpy import NaN, Inf, arange, isscalar, asarray, array
def peakdet(v, delta, x=None):
"""
Converted from MATLAB script at http://billauer.co.il/peakdet.html
Returns two arrays
function [maxtab, mintab]=peakdet(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% <NAME>, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab)
# if __name__=="__main__":
# from matplotlib.pyplot import plot, scatter, show
# series = [0,0,0,2,0,0,0,-2,0,0,0,2,0,0,0,-2,0]
# maxtab, mintab = peakdet(series,.3)
# plot(series)
# scatter(array(maxtab)[:,0], array(maxtab)[:,1], color='blue')
# scatter(array(mintab)[:,0], array(mintab)[:,1], color='red')
# show()
def getPeakVal(peaksmax):
"""gets the values of the peaks for the x and y axes"""
peakst = []
for i in range(len(peaksmax)):
peakst.append(peaksmax[i][0])
peaksT = []
for i in range(len(peaksmax)):
peaksT.append(peaksmax[i][1])
return peakst, peaksT
def get_noms(values):
return array([i.nominal_value for i in values])
def get_std_dev(values):
return array([i.std_dev for i in values])
| StarcoderdataPython |
1601321 | from .root import root
| StarcoderdataPython |
67566 | <gh_stars>0
# (c) 2015, <NAME> <<EMAIL>>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.plugins.lookup import LookupBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
validate_certs = kwargs.get('validate_certs', True)
split_lines = kwargs.get('split_lines', True)
ret = []
for term in terms:
display.vvvv("url lookup connecting to %s" % term)
try:
response = open_url(term, validate_certs=validate_certs)
except HTTPError as e:
raise AnsibleError("Received HTTP error for %s : %s" % (term, str(e)))
except URLError as e:
raise AnsibleError("Failed lookup url for %s : %s" % (term, str(e)))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, str(e)))
except ConnectionError as e:
raise AnsibleError("Error connecting to %s: %s" % (term, str(e)))
if split_lines:
for line in response.read().splitlines():
ret.append(to_text(line))
else:
ret.append(to_text(response.read()))
return ret
| StarcoderdataPython |
3238650 | <reponame>jordiae/DeepLearning-MAI
import torch
from typing import Tuple
import torch.nn.functional as F
import argparse
import os
import logging
from torch import nn
def load_arch(device: torch.device, args: argparse.Namespace) -> Tuple[torch.nn.Module, torch.nn.Module]:
"""
Returns initialized encoder and decoder, to be used jointly as a Seq2seq model.
Notice that if bidirectional is set to True, the hidden_size of the decoder will be multiplied by 2.
:param device: device
:param args: Arguments from argparse.
:return: Initialized model
"""
from rnn.models import VanillaRNN, LSTM, GRU, Decoder, PyTorchBaseRNN
decoder_bidirectional_mul = 2 if args.bidirectional else 1
embeddings = None
if args.share_embeddings:
embeddings = nn.Embedding(args.vocab_size, args.embedding_size)
if args.no_pytorch_rnn:
if args.arch == 'elman':
encoder = VanillaRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, mode='elman',
dropout=args.dropout, bidirectional=args.bidirectional, embeddings=embeddings)
decoder = Decoder(VanillaRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul,
n_layers=args.n_layers, mode='elman', dropout=args.dropout,
bidirectional=False, embeddings=embeddings),
args.vocab_size)
elif args.arch == 'jordan':
encoder = VanillaRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, mode='jordan',
dropout=args.dropout, bidirectional=args.bidirectional, embeddings=embeddings)
decoder = Decoder(VanillaRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul,
n_layers=args.n_layers, mode='jordan', dropout=args.dropout,
bidirectional=False, embeddings=embeddings),
args.vocab_size)
elif args.arch == 'lstm':
encoder = LSTM(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, dropout=args.dropout,
bidirectional=args.bidirectional, embeddings=embeddings)
decoder = Decoder(LSTM(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul, n_layers=args.n_layers,
dropout=args.dropout, bidirectional=False, embeddings=embeddings), args.vocab_size)
elif args.arch == 'gru':
encoder = GRU(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, dropout=args.dropout,
bidirectional=args.bidirectional, embeddings=embeddings)
decoder = Decoder(GRU(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul, n_layers=args.n_layers,
dropout=args.dropout, bidirectional=False, embeddings=embeddings), args.vocab_size)
else:
raise NotImplementedError()
else:
if args.arch == 'elman':
encoder = PyTorchBaseRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, arch='elman',
dropout=args.dropout, bidirectional=args.bidirectional, embeddings=embeddings)
decoder = Decoder(PyTorchBaseRNN(device=device, vocab_size=args.vocab_size,
embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul,
n_layers=args.n_layers, arch='elman', dropout=args.dropout,
bidirectional=False, embeddings=embeddings),
args.vocab_size)
elif args.arch == 'jordan':
raise NotImplementedError()
elif args.arch == 'lstm':
encoder = PyTorchBaseRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, dropout=args.dropout,
bidirectional=args.bidirectional, embeddings=embeddings, arch='lstm')
decoder = Decoder(PyTorchBaseRNN(device=device, vocab_size=args.vocab_size,
embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul,
n_layers=args.n_layers, dropout=args.dropout, bidirectional=False,
embeddings=embeddings, arch='lstm'), args.vocab_size)
elif args.arch == 'gru':
encoder = PyTorchBaseRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, dropout=args.dropout,
bidirectional=args.bidirectional, embeddings=embeddings, arch='gru')
decoder = Decoder(PyTorchBaseRNN(device=device, vocab_size=args.vocab_size,
embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul,
n_layers=args.n_layers, dropout=args.dropout, bidirectional=False,
embeddings=embeddings, arch='gru'), args.vocab_size)
else:
raise NotImplementedError()
return encoder, decoder
def pack_right_padded_seq(seqs: torch.Tensor, lengths: torch.Tensor, device: str) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Function for packing a right-padded sequence, inspired by the functionality of
torch.nn.utils.rnn.pack_padded_sequence.
Instead of relying on a lengths parameter, it assumes that the sequences are zero-padded.
The function flattens all sequences into a single sequence, ordered by time-step ([first token of first batch,
first token of second batch,... last token of last batch] and removes padding. It also returns the effective batch
size at each iteration, which will be [number of first tokens across batch, number of second tokens...].
lengths is used to verify that the sequence are ordered by length.
If the batch is not sorted by increasing lengths, an exception is thrown.
:param seqs: [batch, right-padded tokens]
:param lengths: [batch]
:param device: device
:return: ([packed tokens], [effective batch sizes])
"""
prev = lengths[0]
for l in lengths:
if l < prev:
raise Exception('Unsorted batches!')
else:
prev = l
effective_batch_sizes = (seqs != 0).sum(dim=0)
seqs = torch.cat((seqs, torch.zeros(seqs.shape[0], 1).to(device).long()), dim=-1)
seqs = seqs.permute(-1, 0).reshape(seqs.shape[0] * seqs.shape[1]) # [batch, tokens] -> [batch*tokens]
non_pad_idx = (seqs != 0).nonzero().flatten()
seqs = seqs[non_pad_idx]
return seqs, effective_batch_sizes
def init_train_logging():
"""Sets logging such that the output is both saved in a file and output to stdout"""
log_path = 'train.log'
if os.path.exists('checkpoint_last.pt'):
logging.basicConfig(filename=log_path, level=logging.INFO, filemode='a')
else:
logging.basicConfig(filename=log_path, level=logging.INFO)
logging.getLogger('').addHandler(logging.StreamHandler())
def init_eval_logging(set_: str):
"""Sets logging such that the output is both saved in a file and output to stdout"""
# TODO: Refactor
log_path = f'eval-{set_}.log'
if os.path.exists('checkpoint_last.pt'):
logging.basicConfig(filename=log_path, level=logging.INFO, filemode='a')
else:
logging.basicConfig(filename=log_path, level=logging.INFO)
logging.getLogger('').addHandler(logging.StreamHandler())
class LabelSmoothingLoss(nn.Module):
def __init__(self, smoothing=0.0):
super(LabelSmoothingLoss, self).__init__()
self.smoothing = smoothing
def smooth_one_hot(self, target, classes, smoothing=0.0):
assert 0 <= smoothing < 1
shape = (target.size(0), classes)
with torch.no_grad():
target = torch.empty(size=shape, device=target.device) \
.fill_(smoothing / (classes - 1)) \
.scatter_(1, target.data.unsqueeze(1), 1. - smoothing)
return target
def forward(self, input, target):
target = LabelSmoothingLoss.smooth_one_hot(self, target, input.size(-1), self.smoothing)
lsm = F.log_softmax(input, -1)
loss = -(target * lsm).sum(-1)
loss = loss.mean()
return loss
| StarcoderdataPython |
3318374 | import os
import sys
from nose.exc import SkipTest
try:
from pkg_resources import EntryPoint
except ImportError:
raise SkipTest("No setuptools available; skipping")
here = os.path.dirname(__file__)
support = os.path.join(here, 'support')
ep = os.path.join(support, 'ep')
def test_plugin_entrypoint_is_loadable():
ep_path = os.path.join(ep, 'Some_plugin.egg-info', 'entry_points.txt')
ep_file = open(ep_path, 'r')
lines = ep_file.readlines()
ep_file.close()
assert EntryPoint.parse_map(lines)
| StarcoderdataPython |
3221336 | import json
from rumps import application_support
def reset_default_statistics():
"""Called if stats file is missing and then writes statistics.json in the App Support folder"""
with open("./static/default_stats.json", "r") as default_file:
with open(f"{application_support('Duolingo Pomodoro')}/statistics.json", "w+") as file:
json.dump(json.load(default_file), file)
| StarcoderdataPython |
1706438 | <filename>tests/mokcam/halerror.py<gh_stars>0
# test hal errors
import pyb
i2c = pyb.I2C(2, pyb.I2C.MASTER)
try:
i2c.recv(1, 1)
except OSError as e:
print(repr(e))
| StarcoderdataPython |
4807146 |
# © Copyright 2021, PRISMA’s Authors
import numpy as np
from prisma.spectrum import Spectrum
BATTINFO_ID = '0XH81'
def trimming(spectrum, within):
""" Trim raw spectrum
* within [float,float]: lower and upper limits of the range to be studied
"""
new_metadata = {'Process':'Trimming','Process ID': BATTINFO_ID}
idxs_within = np.where((spectrum.indexes>within[0]) & (spectrum.indexes<within[1]),True, False) #nparray of booleans
#trimming interval outside spectrum.indexes
if np.all(~idxs_within):
new_indexes = spectrum.indexes
new_counts = spectrum.counts
else:
new_indexes = spectrum.indexes[idxs_within]
new_counts = spectrum.counts[idxs_within]
new_metadata['Trim interval'] = within
new_metadata['Trim interval'] = [min(within[0],np.amin(spectrum.indexes)),max(within[1],np.amax(spectrum.indexes))]
return Spectrum(indexes = new_indexes, counts = new_counts, parent = spectrum, metadata = new_metadata)
def smooth_assymlsq(spectrum, parameters):
#To be implemented
pass
def reject_outliers_std_method(spectrum, parameters):
#To be implemented
pass
| StarcoderdataPython |
3336231 | # coding: utf-8
import re
def preg_split(pattern, subject):
return re.split(pattern, subject)
if __name__ == '__main__':
keywords = preg_split(r'[\s,]+', 'hypertext language, programming')
print(keywords)
| StarcoderdataPython |
3283899 | <filename>c10.py
from tkinter import *
conduitType = ["Heavy duty rigid UPVC conduit", "Corflo conduit",
"Medium duty corrugated", "Medium duty rigid UPVC conduit"]
CableType = ["-", "1", "1.5", "2.5", "4" , "6" ,"10" ,"16", "25", "35", "50", "70" , "95" ,"120" ,"150","185","240","300",
"400","500","630"]
class Application(Frame):
def __init__(self, master):
""" Initialise the Frame. """
super(Application, self).__init__(master)
self.UserIn = IntVar()
self.grid()
self.create_widgets()
def create_widgets(self):
self.conduitLbl = Label (self, text = "Type of Conduit", height=2, width=20)#Label
self.conduitLbl.grid(row=0, column = 0)
self.conduit = StringVar(master) ### OPTION MENU FOR CONIOT TYPE
self.conduit.set("Heavy duty rigid UPVC conduit") # default value
self.conduitOptions = OptionMenu(master, self.conduit, *conduitType)
self.conduitOptions.config(width=28)
self.conduitOptions.grid(row=0, column=1)
self.PVCLabel = Label (master, text = "Cable Type", height=2, width=20)#Label
self.PVCLabel.grid(row=1, column = 0)
self.cable = StringVar(master)
self.cable.set("-") # default value
self.PVCom = OptionMenu(master, self.cable, *CableType, )
self.PVCom.config(width=15)
self.PVCom.grid(row=1, column=1)
self.circuitLbl = Label (master, text = "Number of Circuits:", height=1, width=20) #Label
self.circuitLbl.grid(row=2, column = 0)
self.getCircuit = StringVar()
self.getCircuit = Entry (master) ######## ENTRY BOX
self.getCircuit.grid(row=2, column=1)
self.btn = Button(master, text="Calculate", bg="light grey", command=self.onButtonClick)
self.btn.grid(row = 3,column=1)
self.conduitTypeResult = Label (master, text = "Conduit Type: ", height=1, width=40) #Label
self.conduitTypeResult.grid(row=0, column =2)
self.PVCResult = Label (master, text = "Cable Type: ", height=2, width=25) #Label
self.PVCResult.grid(row=1, column =2)
self.circuitNo = Label (master, text = "Number of Circuits: ", height=2, width=25) #Label
self.circuitNo.grid(row=2, column =2)
self.conduitResult = Label (master, text = "-", height=2, width=40, font='Helvetica 9 bold') #Label
self.conduitResult.grid(row=3, column =2)
self.disclaimerText = Label (master, text = """DISCLAIMER\n Please refer to Table C10 (can be viewed by clicking Open Table button)
to confirm the results before practically applying the Number Of Conduits. Each output has not been tested thus
caution should be taken when using this program.\n
REFERENCE: AS/NZ 3000:2018 Electrical Installations (known as the Australian/New Zealand Wiring Rules)"""
,font='Helvetica 9 bold') #Label
self.disclaimerText.grid(row=6, rowspan=2, column=0, columnspan=3, sticky=W)
self.close = Button(master, text="Close", bg="light grey", command=master.destroy)
self.close.grid(row = 4,column=0)
self.canvas = Canvas(master, width=99, height=29)
self.canvas.grid(row=4, column=2)
self.logo = PhotoImage(file='C:\\Users\\Aditya.Verma\\Documents\\GitHub\\Table-c10---max-single-core-sheathed-cables\\Lucid Logo.PNG')
self.canvas.create_image(0, 0, image = self.logo, anchor = NW)
self.canvas.logo = self.logo
def openImage(): ### opens table
control = Toplevel()
canvas = Canvas(control, width=1172, height=704)
canvas.pack(expand = YES, fill = BOTH)
png1 = PhotoImage(file='C:\\Users\\Aditya.Verma\\Documents\\GitHub\\Table-c10---max-single-core-sheathed-cables\\Capture.PNG')
canvas.create_image(0, 0, image = png1, anchor = NW)
canvas.png1 = png1
self.openImage = Button(master, text="Open Table", bg="light grey", command=openImage)#image open button
self.openImage.grid(row=4, column = 1)
def reset():
self.PVCResult.configure(text="" )
self.conduitTypeResult.configure(text="-" )
self.PVCResult.configure(text="-" )
self.conduit.set("Heavy duty rigid UPVC conduit")
self.cable.set("-")
self.circuitNo.configure(text="-")
self.conduitResult.configure(text="-", bg='gray85', borderwidth=2, relief='flat')
self.tableview = Button(master, text="Reset", bg="light grey", command=reset)
self.tableview.grid(row = 3,column=0)
if (self.cable.get()=='-'):
self.btn.config(state=DISABLED)
if (self.cable.get()=="1", "1.5", "2.5", "4" , "6" ,"10" ,"16", "25", "35", "50", "70" , "95" ,"120" ,"150","185","240","300",
"400","500","630"):
self.btn.config(state=NORMAL)
def onButtonClick(self):
#get values
def getConduitType(self): #type of conduit
self.x = self.conduit.get()
return self.x
def getCable(self):
self.x = self.cable.get()
return self.x
def getCircuitState(self):
self.x = self.getCircuit.get()
return int(self.x)
if not self.getCircuit.get():
self.conduitResult.configure(text="Error: Missing Values", bg='orange' )
self.conduitTypeResult.configure(text="Conduit Type: " + self.conduit.get(), font='Helvetica 9 bold')
self.PVCResult.configure(text="CableType: " + self.cable.get(),font='Helvetica 9 bold' )
self.circuitNo.configure(text="Number of Circuits: "+ self.getCircuit.get(), font='Helvetica 9 bold')
def circuitNo(self):
if (getConduitType(self)=="Heavy duty rigid UPVC conduit"):
if(getCable(self)=="1" and getCircuitState(self) <= int("5")):
return "20"
if(getCable(self)=="1" and getCircuitState(self)<= int("9")):
return "25"
if(getCable(self)=="1" and getCircuitState(self)<= int("16")):
return "32"
if(getCable(self)=="1" and getCircuitState(self)<= int("26")):
return "40"
if(getCable(self)=="1" and getCircuitState(self)<= int("43")):
return "50"
if(getCable(self)=="1" and getCircuitState(self)<= int("71")):
return "63"
if(getCable(self)=="1" and getCircuitState(self) >= int("100")):
return "80(NZ), 80(AUS), 100(NZ), 100(AUS), 125 or 150"
if ((getCable(self)=="25" or getCable(self)=="35" or getCable(self)=="50" )
and getCircuitState(self)<= int("0")):
return '20'
if ((getCable(self)=="70" or getCable(self)=="95") and getCircuitState(self)<= int("0")):
return "20 or 25"
if ((getCable(self)=="120" or getCable(self)=="150") and getCircuitState(self)<= int("0")):
return "20, 25 or 32"
if ((getCable(self)=="185" or
getCable(self)=="240" or getCable(self)=="300") and getCircuitState(self)<= int("0")):
return "20, 25, 32 or 40"
if ((getCable(self)=="400" or getCable(self)=="500") and getCircuitState(self)<= int("0")):
return "20, 25, 32, 40 or 50"
if ((getCable(self)=="630") and getCircuitState(self)<= int("0")):
return "20, 25, 32, 40, 50 or 63"
if ((getCable(self)=="25" or getCable(self)== "35")
and getCircuitState(self)<= int("1")):
return '25 or 32'
if ((getCable(self)=="50") and getCircuitState(self)<= int("1")):
return "25, 32 or 40"
if ((getCable(self)=="70") and getCircuitState(self)<= int("1")):
return "25, 32, 40 or 50"
if ((getCable(self)=="95") and getCircuitState(self)<= int("1")):
return "32 or 40"
if ((getCable(self)=="120" or getCable(self)=="150") and getCircuitState(self)<= int("1")):
return "40 or 50"
if ((getCable(self)=="185" or
getCable(self)=="240") and getCircuitState(self)<= int("1")):
return "50 or 63"
if ((getCable(self)=="300") and getCircuitState(self)<= int("1")):
return "50, 63 or 80(NZ)"
if ((getCable(self)=="400" or getCable(self)=="500") and getCircuitState(self)<= int("1")):
return "63 or 80(NZ) or 80(AUS)"
if ((getCable(self)=="630") and getCircuitState(self)<= int("1")):
return "80(NZ), 80(AUS) or 100(NZ)"
if ((getCable(self)=="35") and getCircuitState(self)<= int(int("2"))):
return "40"
if ((getCable(self)=="70") and getCircuitState(self)<= int(int("2"))):
return "50"
if ((getCable(self)=="120") and getCircuitState(self)<= int("2")):
return "63"
if ((getCable(self)=="150") and getCircuitState(self)<= int("2")):
return "150"
if ((getCable(self)=="240") and getCircuitState(self)<= int("2")):
return "80(NZ)"
if ((getCable(self)=="300") and getCircuitState(self)<= int("2")):
return "80(AUS)"
if ((getCable(self)=="630") and getCircuitState(self)<= int("2")):
return "100(AUS)"
if ((getCable(self)=="25") and getCircuitState(self)<= int("3")):
return "40"
if ((getCable(self)=="50") and getCircuitState(self)<= int("3")):
return "50"
if ((getCable(self)=="95") and getCircuitState(self)<= int("3")):
return "63"
if ((getCable(self)=="185") and getCircuitState(self)<= int("3")):
return "80(NZ)"
if ((getCable(self)=="240") and getCircuitState(self)<= int("3")):
return "80(AUS)"
if ((getCable(self)=="400" or getCable(self)=="500") and getCircuitState(self)<= int("3")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="630") and getCircuitState(self)<= int("3")):
return "125"
if ((getCable(self)=="25") and getCircuitState(self)<= int("4")):
return "50"
if ((getCable(self)=="50") and getCircuitState(self)<= int("4")):
return "63"
if ((getCable(self)=="150") and getCircuitState(self)<= int("4")):
return "80(NZ)"
if ((getCable(self)=="185") and getCircuitState(self)<= int("4")):
return "80(AUS)"
if ((getCable(self)=="300") and getCircuitState(self)<= int("4")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="500") and getCircuitState(self)<= int("4")):
return "125"
if ((getCable(self)=="630") and getCircuitState(self)<= int("4")):
return "150"
if ((getCable(self)=="25") and getCircuitState(self)<= int("5")):
return "50"
if ((getCable(self)=="120") and getCircuitState(self)<= int("5")):
return "80(NZ)"
if ((getCable(self)=="150") and getCircuitState(self)<= int("5")):
return "80(AUS)"
if ((getCable(self)=="240") and getCircuitState(self)<= int("5")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="400") and getCircuitState(self)<= int("5")):
return "125"
if ((getCable(self)=="50") and getCircuitState(self)<= int("6")):
return "63"
if ((getCable(self)=="95") and getCircuitState(self)<= int("6")):
return "80(NZ)"
if ((getCable(self)=="120") and getCircuitState(self)<= int("6")):
return "80(AUS)"
if ((getCable(self)=="185") and getCircuitState(self)<= int("6")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="300") and getCircuitState(self)<= int("6")):
return "125"
if ((getCable(self)=="500") and getCircuitState(self)<= int("6")):
return "150"
#7
if ((getCable(self)=="35") and getCircuitState(self)<= int("7")):
return "63"
if ((getCable(self)=="95") and getCircuitState(self)<= int("7")):
return "80(AUS)"
if ((getCable(self)=="150") and getCircuitState(self)<= int("7")):
return "100(NZ)"
if ((getCable(self)=="400") and getCircuitState(self)<= int("7")):
return "150"
#8
if ((getCable(self)=="70") and getCircuitState(self)<= int("8")):
return "80(NZ)"
if ((getCable(self)=="150") and getCircuitState(self)<= int("8")):
return "100(AUS)"
if ((getCable(self)=="240") and getCircuitState(self)<= int("8")):
return "125"
if ((getCable(self)=="300") and getCircuitState(self)<= int("8")):
return "150"
#9
if ((getCable(self)=="25") and getCircuitState(self)<= int("9")):
return "63"
if ((getCable(self)=="70") and getCircuitState(self)<= int("9")):
return "80(AUS)"
if ((getCable(self)=="120") and getCircuitState(self)<= int("9")):
return "100(NZ)"
if ((getCable(self)=="50") and getCircuitState(self)<= int("10")):
return "80(NZ)"
if ((getCable(self)=="120") and getCircuitState(self)<= int("10")):
return "100(AUS)"
if ((getCable(self)=="185") and getCircuitState(self)<= int("10")):
return "125"
if ((getCable(self)=="240") and getCircuitState(self)<= int("10")):
return "150"
if ((getCable(self)=="95") and getCircuitState(self)<= int("11")):
return "100(NZ)"
if ((getCable(self)=="50") and getCircuitState(self)<= int("12")):
return "80(AUS)"
if ((getCable(self)=="95") and getCircuitState(self)<= int("12")):
return "100(AUS)"
if ((getCable(self)=="150") and getCircuitState(self)<= int("12")):
return "125"
if ((getCable(self)=="35") and getCircuitState(self)<= int("15")):
return "80(AUS)"
if ((getCable(self)=="70") and getCircuitState(self)<= int("15")):
return "100(NZ)"
if ((getCable(self)=="120") and getCircuitState(self)<= int("15")):
return "125"
if ((getCable(self)=="25") and getCircuitState(self)<= int("16")):
return "80(NZ)"
if ((getCable(self)=="70") and getCircuitState(self)<= int("16")):
return "100(AUS)"
if ((getCable(self)=="150") and getCircuitState(self)<= int("16")):
return "150"
if ((getCable(self)=="95") and getCircuitState(self)<= int("18")):
return "125"
if ((getCable(self)=="25") and getCircuitState(self)<= int("19")):
return "80(AUS"
if ((getCable(self)=="50") and getCircuitState(self)<= int("19")):
return "100(NZ)"
if ((getCable(self)=="120") and getCircuitState(self)<= int("20")):
return "150"
if ((getCable(self)=="50") and getCircuitState(self)<= int("21")):
return "100(AUS)"
if ((getCable(self)=="35") and getCircuitState(self)<= int("24")):
return "100(NZ)"
if ((getCable(self)=="70") and getCircuitState(self)<= int("24")):
return "125"
if ((getCable(self)=="95") and getCircuitState(self)<= int("24")):
return "150"
if ((getCable(self)=="35") and getCircuitState(self)<= int("26")):
return "100(AUS)"
if ((getCable(self)=="25") and getCircuitState(self)<= int("29")):
return "100(NZ)"
if ((getCable(self)=="50") and getCircuitState(self)<= int("31")):
return "125"
if ((getCable(self)=="70") and getCircuitState(self)<= int("31")):
return "150"
if ((getCable(self)=="35") and getCircuitState(self)<= int("39")):
return "125"
if ((getCable(self)=="50") and getCircuitState(self)<= int("41")):
return "150"
if ((getCable(self)=="25") and getCircuitState(self)<= int("48")):
return "125"
if ((getCable(self)=="35") and getCircuitState(self)<= int("52")):
return "150"
if ((getCable(self)=="25") and getCircuitState(self)<= int("62")):
return "150"
#CableType AND HEAVY
#1
if ((getCable(self)=="4") and getCircuitState(self)<= int("1")):
return "20"
if ((getCable(self)=="6") and getCircuitState(self)<= int("1")):
return "20"
if ((getCable(self)=="10" or getCable(self)=="16") and getCircuitState(self)<= int("1")):
return "20 or 25"
#3
if ((getCable(self)=="2.5") and getCircuitState(self)<= int("3")):
return "20"
if ((getCable(self)=="4" or getCable(self)=="6") and getCircuitState(self)<= int("3")):
return "25"
if ((getCable(self)=="16") and getCircuitState(self)<= int("3")):
return "32"
#4
if ((getCable(self)=="1.5") and getCircuitState(self)<= int("4")):
return "20"
if ((getCable(self)=="10") and getCircuitState(self)<= int("4")):
return "32"
#5
if ((getCable(self)=="2.5") and getCircuitState(self)<= int("5")):
return "25"
if ((getCable(self)=="16") and getCircuitState(self)<= int("5")):
return "40"
#6
if ((getCable(self)=="6") and getCircuitState(self)<= int("6")):
return "32"
if ((getCable(self)=="10") and getCircuitState(self)<= int("6")):
return "40"
#7
if ((getCable(self)=="1.5") and getCircuitState(self)<= int("7")):
return "25"
if ((getCable(self)=="4") and getCircuitState(self)<= int("7")):
return "32"
#8
if ((getCable(self)=="16") and getCircuitState(self)<= int("8")):
return "50"
if ((getCable(self)=="6") and getCircuitState(self)<= int("9")):
return "40"
#10
if ((getCable(self)=="2.5") and getCircuitState(self)<= int("10")):
return "32"
#11
if ((getCable(self)=="4") and getCircuitState(self)<= int("11")):
return "40"
if ((getCable(self)=="10") and getCircuitState(self)<= int("11")):
return "50"
#13
if ((getCable(self)=="1.5") and getCircuitState(self)<= int("13")):
return "32"
if ((getCable(self)=="16") and getCircuitState(self)<= int("13")):
return "63"
#16
if ((getCable(self)=="2.5") and getCircuitState(self)<= int("16")):
return "40"
if ((getCable(self)=="6") and getCircuitState(self)<= int("16")):
return "50"
#18
if ((getCable(self)=="10") and getCircuitState(self)<= int("18")):
return "63"
#19
if ((getCable(self)=="4") and getCircuitState(self)<= int("19")):
return "50"
#19
if ((getCable(self)=="1.5") and getCircuitState(self)<= int("21")):
return "40"
#24
if ((getCable(self)=="16") and getCircuitState(self)<= int("24")):
return "80(NZ)"
#24
if ((getCable(self)=="6") and getCircuitState(self)<= int("26")):
return "63"
if ((getCable(self)=="2.5") and getCircuitState(self)<= int("27")):
return "50"
if ((getCable(self)=="16") and getCircuitState(self)<= int("28")):
return "82(AUS)"
if ((getCable(self)=="4") and getCircuitState(self)<= int("31")):
return "63"
if ((getCable(self)=="10") and getCircuitState(self)<= int("32")):
return "80(NZ)"
if ((getCable(self)=="10") and getCircuitState(self)<= int("32")):
return "80(NZ)"
if ((getCable(self)=="1.5") and getCircuitState(self)<= int("36")):
return "50"
if ((getCable(self)=="1.5") and getCircuitState(self)<= int("36")):
return "50"
if ((getCable(self)=="10") and getCircuitState(self)<= int("38")):
return "80(AUS)"
if ((getCable(self)=="16") and getCircuitState(self)<= int("43")):
return "100(NZ)"
if ((getCable(self)=="2.5") and getCircuitState(self)<= int("44")):
return "63"
if ((getCable(self)=="16") and getCircuitState(self) <= int("46")):
return "100(AUS)"
if ((getCable(self)=="6") and getCircuitState(self)<= int("48")):
return "80(NZ)"
if ((getCable(self)=="6") and getCircuitState(self)<= int("55")):
return "80(AUS)"
if ((getCable(self)=="4") and getCircuitState(self)<= int("56")):
return "80(NZ)"
if ((getCable(self)=="10") and getCircuitState(self)<= int("58")):
return "100(NZ)"
if ((getCable(self)=="1.5") and getCircuitState(self)<= int("59")):
return "63"
if ((getCable(self)=="10") and getCircuitState(self)<= int("63")):
return "100(AUS)"
if ((getCable(self)=="4") and getCircuitState(self)<= int("64")):
return "80(AUS)"
if ((getCable(self)=="4") and getCircuitState(self)<= int("64")):
return "80(AUS)"
if ((getCable(self)=="16") and getCircuitState(self) <= int("70")):
return "125"
if ((getCable(self)=="2.5") and getCircuitState(self)<= int("79")):
return "80(NZ)"
if ((getCable(self)=="6") and getCircuitState(self)<= int("85")):
return "100(NZ)"
if ((getCable(self)=="2.5") and getCircuitState(self)<= int("92")):
return "80(AUS)"
if ((getCable(self)=="6") and getCircuitState(self)<= int("92")):
return "100(AUS)"
if ((getCable(self)=="16") and getCircuitState(self) <= int("92")):
return "150"
if ((getCable(self)=="10") and getCircuitState(self)<= int("95")):
return "125"
if ((getCable(self)=="4") and getCircuitState(self)<= int("99")):
return "100(NZ)"
if ((getCable(self)=="1.5") and getCircuitState(self) >= int("100")):
return "80(NZ), 80(AUS), 100(NZ), 100(AUS), 125 or 150"
if ((getCable(self)=="2.5") and getCircuitState(self) >= int("100")):
return "100(NZ), 100(AUS), 125 or 150"
if ((getCable(self)=="4") and getCircuitState(self) >= int("100")):
return "100(AUS), 125 or 150"
if ((getCable(self)=="6") and getCircuitState(self) >= int("100")):
return "125 or 150"
if ((getCable(self)=="10") and getCircuitState(self) >= int("100")):
return "150"
if (getConduitType(self)=="Corflo conduit"):
if ((getCable(self)=="16") and getCircuitState(self)<= int("43")):
return "100(NZ)"
if ((getCable(self)=="16") and getCircuitState(self)<= int("45")):
return "100(AUS)"
if ((getCable(self)=="10") and getCircuitState(self)<= int("60")):
return "100(AUS)"
if ((getCable(self)=="6") and getCircuitState(self)<= int("89")):
return "100(AUS)"
if ((getCable(self)=="10") and getCircuitState(self)<= int("58")):
return "100(NZ)"
if ((getCable(self)=="6") and getCircuitState(self)<= int("85")):
return "100(NZ)"
if ((getCable(self)=="4") and getCircuitState(self)>= int("100")):
return "100(NZ)"
if ((getCable(self)=="4") and getCircuitState(self)>= int("100")):
return "100(NZ)"
if ((getCable(self)=="16") and getCircuitState(self)<= int("67")):
return "125"
if ((getCable(self)=="10") and getCircuitState(self)<= int("97")):
return "125"
if ((getCable(self)=="16") and getCircuitState(self)<= int("88")):
return "150"
if (((getCable(self)=="1.5" or (getCable(self)=="2.5"))) and getCircuitState(self) >= int("100")):
return "100(NZ), 100(AUS), 125 or 150"
if ((getCable(self)=="4") and getCircuitState(self) >= int("100")):
return "100(AUS), 125 or 150"
if ((getCable(self)=="6") and getCircuitState(self) >= int("100")):
return "125 or 150"
if ((getCable(self)=="10") and getCircuitState(self) >= int("100")):
return "150"
if ((getCable(self)=="630") and getCircuitState(self)<= int("1")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="400" or getCable(self)=='500') and getCircuitState(self)<= int("3")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="630") and getCircuitState(self)<= int("3")):
return "125"
if ((getCable(self)=="300") and getCircuitState(self)<= int("4")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="500") and getCircuitState(self)<= int("4")):
return "125"
if ((getCable(self)=="630") and getCircuitState(self)<= int("4")):
return "150"
if ((getCable(self)=="240") and getCircuitState(self)<= int("5")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="400") and getCircuitState(self)<= int("5")):
return "125"
if ((getCable(self)=="185") and getCircuitState(self)<= int("6")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="300") and getCircuitState(self)<= int("6")):
return "125"
if ((getCable(self)=="400" or getCable(self)=="500") and getCircuitState(self)<= int("6")):
return "150"
if ((getCable(self)=="150") and getCircuitState(self)<= int("7")):
return "100(NZ)"
if ((getCable(self)=="240") and getCircuitState(self)<= int("7")):
return "125"
if ((getCable(self)=="150") and getCircuitState(self)<= int("8")):
return "100(AUS)"
if ((getCable(self)=="300") and getCircuitState(self)<= int("8")):
return "150"
if ((getCable(self)=="95") and getCircuitState(self)<= int("11")):
return "100(NZ)"
if ((getCable(self)=="120") and getCircuitState(self)<= int("10")):
return "100(AUS)"
if ((getCable(self)=="240") and getCircuitState(self)<= int("10")):
return "125"
if ((getCable(self)=="300") and getCircuitState(self)<= int("10")):
return "150"
if ((getCable(self)=="95") and getCircuitState(self)<= int("12")):
return "100(AUS)"
if ((getCable(self)=="150") and getCircuitState(self)<= int("12")):
return "125"
if ((getCable(self)=="185") and getCircuitState(self)<= int("13")):
return "150"
if ((getCable(self)=="120") and getCircuitState(self)<= int("14")):
return "125"
if ((getCable(self)=="150") and getCircuitState(self)<= int("16")):
return "150"
if ((getCable(self)=="95") and getCircuitState(self)<= int("17")):
return "125"
if ((getCable(self)=="70") and getCircuitState(self)<= int("15")):
return "100(NZ) or 100(AUS)"
if ((getCable(self)=="120") and getCircuitState(self)<= int("19")):
return "150"
if ((getCable(self)=="50") and getCircuitState(self)<= int("19")):
return "100(NZ)"
if ((getCable(self)=="70") and getCircuitState(self)<= int("20")):
return "125"
if ((getCable(self)=="70") and getCircuitState(self)<= int("23")):
return "125"
if ((getCable(self)=="95") and getCircuitState(self)<= int("23")):
return "150"
if ((getCable(self)=="35") and getCircuitState(self)<= int("24")):
return "100(NZ)"
if ((getCable(self)=="25") and getCircuitState(self)<= int("29")):
return "100(NZ)"
if ((getCable(self)=="25") and getCircuitState(self)<= int("30")):
return "100(AUS)"
if ((getCable(self)=="70") and getCircuitState(self)<= int("30")):
return "150"
if ((getCable(self)=="35") and getCircuitState(self)<= int("25")):
return "100(AUS)"
if ((getCable(self)=="50") and getCircuitState(self)<= int("30")):
return "125"
if ((getCable(self)=="35") and getCircuitState(self)<= int("38")):
return "125"
if ((getCable(self)=="25") and getCircuitState(self)<= int("45")):
return "125"
if ((getCable(self)=="25") and getCircuitState(self)<= int("45")):
return "125"
if ((getCable(self)=="50") and getCircuitState(self)<= int("40")):
return "150"
if ((getCable(self)=="25") and getCircuitState(self)<= int("60")):
return "150"
if ((getCable(self)=="35") and getCircuitState(self)<= int("50")):
return "150"
if (getConduitType(self)=="Medium duty corrugated"):
if ((getCable(self)=="4" or getCable(self)=="6" or getCable(self)=="10" or getCable(self)=="16") and getCircuitState(self)<= int("1")):
return "20"
if ((getCable(self)=="2.5") and getCircuitState(self)<= int("2")):
return "20"
if ((getCable(self)=="6") and getCircuitState(self)<= int("2")):
return "25"
if ((getCable(self)=="16") and getCircuitState(self)<= int("2")):
return "32"
if ((getCable(self)=="1.5") and getCircuitState(self)<= int("3")):
return "20"
if ((getCable(self)=="4") and getCircuitState(self)<= int("3")):
return "25"
if ((getCable(self)=="16") and getCircuitState(self)<= int("3")):
return "32"
if ((getCable(self)=="1") and getCircuitState(self)<= int("4")):
return "20"
if ((getCable(self)=="2.5") and getCircuitState(self)<= int("4")):
return "25"
if ((getCable(self)=="16") and getCircuitState(self)<= int("4")):
return "40"
if ((getCable(self)=="6") and getCircuitState(self)<= int("5")):
return "32"
if ((getCable(self)=="10") and getCircuitState(self)<= int("5")):
return "40"
if ((getCable(self)=="1.5") and getCircuitState(self)<= int("6")):
return "25"
if ((getCable(self)=="6") and getCircuitState(self)<= int("6")):
return "32"
if ((getCable(self)=="1") and getCircuitState(self)<= int("7")):
return "25"
if ((getCable(self)=="2.5") and getCircuitState(self)<= int("8")):
return "32"
if ((getCable(self)=="1.5") and getCircuitState(self)<= int("11")):
return "32"
if ((getCable(self)=="1") and getCircuitState(self)<= int("14")):
return "32"
if ((getCable(self)=="6") and getCircuitState(self)<= int("8")):
return "40"
if ((getCable(self)=="1") and getCircuitState(self)<= int("23")):
return "40"
if ((getCable(self)=="1.5") and getCircuitState(self)<= int("19")):
return "40"
if ((getCable(self)=="2.5") and getCircuitState(self)<= int("14")):
return "40"
if ((getCable(self)=="6") and getCircuitState(self)<= int("10")):
return "40"
if (getConduitType(self)=="Medium duty rigid UPVC conduit"):
if ((getCable(self)=="16") and getCircuitState(self)<= int("0")):
return "16"
if ((getCable(self)=="2.5" or getCable(self)=="4" or getCable(self)=="6" or getCable(self)=="10") and getCircuitState(self)<= int("1")):
return "16"
if ((getCable(self)=="6" or getCable(self)=="10" or getCable(self)=="16") and getCircuitState(self)<= int("1")):
return "20"
if ((getCable(self)=="16") and getCircuitState(self)<= int("1")):
return "35"
if ((getCable(self)=="4") and getCircuitState(self)<= int("2")):
return "20"
if ((getCable(self)=="10") and getCircuitState(self)<= int("2")):
return "25"
if ((getCable(self)=="1" or getCable(self)=="1.5") and getCircuitState(self)<= int("3")):
return "16"
if ((getCable(self)=="2.5" ) and getCircuitState(self)<= int("3")):
return "20"
if ((getCable(self)=="6" ) and getCircuitState(self)<= int("3")):
return "25"
if ((getCable(self)=="16" ) and getCircuitState(self)<= int("3")):
return "32"
if ((getCable(self)=="1.5" ) and getCircuitState(self)<= int("5")):
return "20"
if ((getCable(self)=="1" ) and getCircuitState(self)<= int("6")):
return "20"
if ((getCable(self)=="4" ) and getCircuitState(self)<= int("4")):
return "25"
if ((getCable(self)=="2.5" ) and getCircuitState(self)<= int("6")):
return "25"
if ((getCable(self)=="1.5" ) and getCircuitState(self)<= int("8")):
return "25"
if ((getCable(self)=="1" ) and getCircuitState(self)<= int("10")):
return "25"
if ((getCable(self)=="10" ) and getCircuitState(self)<= int("4")):
return "32"
if ((getCable(self)=="6" ) and getCircuitState(self)<= int("6")):
return "32"
if ((getCable(self)=="4" ) and getCircuitState(self)<= int("7")):
return "32"
if ((getCable(self)=="2.5" ) and getCircuitState(self)<= int("11")):
return "32"
if ((getCable(self)=="1.5" ) and getCircuitState(self)<= int("14")):
return "32"
if ((getCable(self)=="1" ) and getCircuitState(self)<= int("17")):
return "32"
if ((getCable(self)=="16" ) and getCircuitState(self)<= int("5")):
return "40"
if ((getCable(self)=="10" ) and getCircuitState(self)<= int("7")):
return "40"
if ((getCable(self)=="6" ) and getCircuitState(self)<= int("10")):
return "40"
if ((getCable(self)=="4" ) and getCircuitState(self)<= int("12")):
return "40"
if ((getCable(self)=="2.5" ) and getCircuitState(self)<= int("17")):
return "40"
if ((getCable(self)=="1.5" ) and getCircuitState(self)<= int("23")):
return "40"
if ((getCable(self)=="1" ) and getCircuitState(self)<= int("28")):
return "40"
if ((getCable(self)=="1" ) and getCircuitState(self)<= int("45")):
return "50"
if ((getCable(self)=="1.5" ) and getCircuitState(self)<= int("38")):
return "50"
if ((getCable(self)=="2.5" ) and getCircuitState(self)<= int("28")):
return "50"
if ((getCable(self)=="4" ) and getCircuitState(self)<= int("20")):
return "50"
if ((getCable(self)=="6" ) and getCircuitState(self)<= int("17")):
return "50"
if ((getCable(self)=="10" ) and getCircuitState(self)<= int("11")):
return "50"
if ((getCable(self)=="16" ) and getCircuitState(self)<= int("8")):
return "50"
else:
return "Invalid input, please check again"
self.conduitResult.configure(text="Number of Conduits: \n" + circuitNo(self), bg='green2', borderwidth="1", relief="raised")
if (circuitNo(self)=="Invalid input, please check again"):
self.conduitResult.configure(bg='firebrick1', borderwidth="2", relief="sunken")
self.circuitNo.configure(text="Number of Circuits: - "+ self.getCircuit.get(), font='Helvetica 9 bold')
master = Tk()
master.title("Guide to Max No. of Single-Core Sheather Calbes Installed in Conduit. Table C10")
master.geometry("700x275")
app = Application(master)
master.mainloop()
| StarcoderdataPython |
1625263 | """
Mimic the Dataverse urls for testing
- get user info
- get DDI
- download file
"""
from django.urls import path, re_path
from opendp_apps.dataverses.views import manifest_test_params_view
from opendp_apps.dataverses import mock_dv_views
MOCK_API_PREFIX = 'api'
MOCK_API_VERSION = 'v1'
urlpatterns = [
re_path(f'dv-info/as-dict/(?P<object_id>[0-9a-f-]+)',
manifest_test_params_view,
name='view_as_dict'),
path(f'test-dv-post',
mock_dv_views.view_test_dv_post,
name='view_test_dv_post'),
path(f'dataverse/incoming-test-1',
mock_dv_views.view_dataverse_incoming_1,
name='view_dataverse_incoming_1'),
path(f'dataverse/incoming-test-2',
mock_dv_views.view_dataverse_incoming_2,
name='view_dataverse_incoming_2'),
# Dataverse version and build numbers
#
path(f'{MOCK_API_PREFIX}/{MOCK_API_VERSION}/info/version',
mock_dv_views.view_get_info_version,
name='view_get_info_version'),
# Dataverse server url
#
path(f'{MOCK_API_PREFIX}/{MOCK_API_VERSION}/info/server',
mock_dv_views.view_get_info_server,
name='view_get_info_server'),
# Get DDI or schema.org dataset information
#
path(f'{MOCK_API_PREFIX}/{MOCK_API_VERSION}/datasets/export',
mock_dv_views.view_get_dataset_export,
name='view_get_dataset_export'),
# Get user information
#
path(f'{MOCK_API_PREFIX}/{MOCK_API_VERSION}/users/:me',
mock_dv_views.view_get_user_info,
name='view_get_user_info'),
]
"""
https://dataverse.harvard.edu/api/v1/datasets/export?persistentId=doi:10.7910/DVN/OLD7MB&exporter=schema.org
schema.org
""" | StarcoderdataPython |
1721092 | <filename>Proj1_HDR/task2_03_radiance_map.py
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
def read_images(directory, extension='png'):
# Read all the files with OpenCV
files = list([os.path.join(directory, f) for f in os.listdir(directory) if f[-3:] == extension])
files = sorted(files)
images = list([cv2.imread(f) for f in files])
# read the exposure times in seconds from txt file
exposures = list()
with open(os.path.join(directory, 'list.txt'), 'r') as f:
for line in f.readlines():
fname, exposure = line.strip('\n').split(' ')
exposures.append(1. / float(exposure))
exposures = np.float32(exposures)
return images, exposures
def compute_weights():
"""
A helper function which computes the weights needed in the algorithm for each
of the intensity values. As of now, we've implemented the triangle function specified in the paper.
:return: weights: a 1x256 vector with the corresponding weight for the intensity.
"""
print('== Computing weights ==')
weights = [i for i in range(256)]
weights = [min(i, 255 - i) for i in weights]
return weights
def sample_rgb_images(images):
"""
A helper function which samples images to construct the Z matrix needed in
gsolve. Z is an NxP matrix, where N is the number of sampled pixels and P is the number of input
images (exposures). This is the RGB version, and returns one Z matrix per channel.
:param images: a array of images to use, where each item in the array is one exposure.
:return: z_red is the Z matrix for the red channel
z_green is the Z matrix for the green channel
z_blue is the Z matrix for the blue channel
"""
print('== Sampling images to construct the Z matrices for each channel == ')
num_exposures = len(images) # Value of P.
'''
Number of samples should satisfy: N(P-1) > Z_max - Z_min
This means that we should have: N > (Z_max - Z_min) / (P-1).
We will use: N = 255 / (P-1) * 2, which satisfies the equation
'''
num_samples = round(255 / (num_exposures - 1) * 2) # Value of N.
# Calculate the indices we are going to sample from. Assumes that all images are same size.
img_pixels = images[0].shape[0] * images[0].shape[1]
step = img_pixels / num_samples
sample_indices = np.arange(0, img_pixels - 1, step).astype(np.int)
# Preallocate space for results.
z_red = np.zeros((num_samples, num_exposures))
z_green = np.zeros((num_samples, num_exposures))
z_blue = np.zeros((num_samples, num_exposures))
# Sample the images.
for i in range(num_exposures):
sampled_red, sampled_green, sampled_blue = sample_exposure(images[i], sample_indices)
z_red[:, i] = sampled_red
z_green[:, i] = sampled_green
z_blue[:, i] = sampled_blue
return z_red, z_green, z_blue
def sample_exposure(image, sample_indices):
"""
A helper function which samples the given image at the specified indices.
:param image: a single RGB image to be sampled from
:param sample_indices: an array of the length N with the indices to sample at. N is the number of pixels
:return: sampled_red is an array of the length N with the sample from the red channel
sampled_green is an array of the length N with the sample from the green channel
sampled_blue is an array of the length N with the sample from the blue channel
"""
# Get the constituent channels.
red_img = image[:, :, 0].flatten()
green_img = image[:, :, 1].flatten()
blue_img = image[:, :, 2].flatten()
# Construct the samples.
sampled_red = [red_img[indice] for indice in sample_indices]
sampled_green = [green_img[indice] for indice in sample_indices]
sampled_blue = [blue_img[indice] for indice in sample_indices]
return sampled_red, sampled_green, sampled_blue
def gsolve(Z,B,l,w):
"""
Solve for imaging system response function.
Given a set of pixel values observed for several pixels in several images with different exposure times,
this function returns the imaging system's response function g as well as the log film irradiance
values for the observed pixels.
:param Z: Z(i,j) is the pixel values of pixel location number i in image j
:param B: B(j) is the log delta t, or log shutter speed, for image j
:param l: l is lamdba, the constant that determines the amount of smoothness
:param w: w(z) is the weighting function value for pixel value z
:return: g(z) is the log exposure corresponding to pixel value z
lE(i) is the log film irradiance at pixel location i
"""
n = 256
A = np.zeros((Z.shape[0] * Z.shape[1] + n + 1, n + Z.shape[0]))
b = np.zeros((A.shape[0], 1))
# Include the data-fitting equations
k = 1
for i in range(Z.shape[0]):
for j in range(Z.shape[1]):
wij = w[int(Z[i][j])]
A[k][int(Z[i][j]) + 1] = wij
A[k][n + i] = -wij
b[k][0] = wij * B[j]
k = k + 1
# Fix the curve by setting its middle value to 0
A[k][129] = 1
k = k + 1
# Include the smoothness equations
for i in range(n-2):
A[k][i] = l * w[i + 1]
A[k][i + 1] = -2 * l * w[i + 1]
A[k][i + 2] = l * w[i + 1]
k = k + 1
# Solve the system using SVD
x = np.linalg.lstsq(A, b)[0]
g = x.flatten()[:n]
return g
def nesting(weight, array):
height, width = array.shape[:2]
if len(array.shape) > 2:
num_channels = array.shape[2]
nested = np.zeros((height, width, num_channels), dtype=float)
else:
num_channels = 1
nested = np.zeros((height, width), dtype=float)
for i in range(height):
for j in range(width):
if num_channels > 1:
for k in range(num_channels):
nested[i][j][k] = weight[int(array[i][j][k])]
else:
nested[i][j] = weight[int(array[i][j])]
return nested
def plot_radiance_map(rmap):
height, width, num_channels = rmap.shape
# thres = (np.max(rmap) - np.min(rmap)) * 0.001
# rmap = (rmap + np.abs(np.min(rmap))) / thres * 255
# rmap = (rmap - np.min(rmap)) / (np.max(rmap) - np.min(rmap)) * 255
rmap = rmap / np.max(rmap) * 255
for i in range(height):
for j in range(width):
for k in range(num_channels):
rmap[i][j][k] = 0 if rmap[i][j][k] < 0 else rmap[i][j][k]
rmap[i][j][k] = 255 if rmap[i][j][k] > 255 else rmap[i][j][k]
# intensity = []
# for i in range(height):
# for j in range(width):
# intensity.append(np.mean(rmap[i][j]))
# plt.hist(intensity, bins=400, normed=0, facecolor="blue", edgecolor="black", alpha=0.7)
# plt.show()
rmap = rmap.astype(np.uint8)
cv2.imshow('radiance map', rmap)
cv2.imwrite('radiance_map.jpg', rmap)
cv2.waitKey(0)
def compute_hdr_map(images, g_red, g_green, g_blue, weights, ln_dt):
"""
A helper function which creates the HDR radiance map.
:param images: a cell array of the input images
:param g_red: the camera response for the red channel
:param g_green: the camera response for the green channel
:param g_blue: the camera response for the blue channel
:param weights: the weight vector to use
:param ln_dt: the log of the exposure times
:return: hdr_map: the HDR radiance map we are trying to compute
"""
print("Computing HDR map")
num_exposures = len(images)
height, width, num_channels = images[0].shape # Assume all images are the same size.
numerator = np.zeros((height, width, num_channels), dtype=float)
denominator = np.zeros((height, width, num_channels), dtype=float)
curr_num = np.zeros((height, width, num_channels), dtype=float)
for i in range(num_exposures):
# Grab the current image we are processing and split into channels.
curr_image = images[i].astype(np.float) + 1e-5 # Grab the current image. Add 1e-5 to get rid of zeros.
curr_red = curr_image[:, :, 0]
curr_green = curr_image[:, :, 1]
curr_blue = curr_image[:, :, 2]
"""
Compute the numerator and denominator for this exposure. Add to cumulative total.
sum_{j=1}^{P} (w(Z_ij)[g(Z_ij) - ln dt_j])
ln E_i = ------------------------------------------
sum_{j=1}^{P} (w(Z_ij))
"""
curr_weight = nesting(weights, curr_image)
curr_num[:, :, 0] = curr_weight[:, :, 0] * (nesting(g_red, curr_red) - ln_dt[i])
curr_num[:, :, 1] = curr_weight[:, :, 1] * (nesting(g_green, curr_green) - ln_dt[i])
curr_num[:, :, 2] = curr_weight[:, :, 2] * (nesting(g_blue, curr_blue) - ln_dt[i])
# Sum into the numerator and denominator.
numerator = numerator + curr_num
denominator = denominator + curr_weight
ln_hdr_map = numerator / denominator
hdr_map = np.exp(ln_hdr_map)
# Plot radiance map.
plot_radiance_map(ln_hdr_map)
return hdr_map
def plot_responses(g_red, g_green, g_blue):
"""
plot_responses() is a helper function which plots response to intensity.
"""
pixel_range = [i for i in range(256)]
plt.plot(g_red, pixel_range, label='red')
plt.plot(g_green, pixel_range, label='green')
plt.plot(g_blue, pixel_range, label='blue')
plt.title('response to intensity')
plt.xlabel('Log Exposure X')
plt.ylabel('Pixel Value Z')
plt.show()
def create_hdr_map(lamb, directory, extension):
"""
A helper function which creates the HDR image from the provided directory of images.
:param lamb: the smoothing factor to use
:param directory: the path relative to input/ which contains the images.
:param extension: the file extension of the images. Default is 'jpg'
:return:
"""
print('## Creating HDR image: ', directory)
# Read in images and exposure times from directory. Take the log of exposure time.
images, exposure_times = read_images(directory, extension)
ln_dt = np.log(exposure_times)
# Sample the images appropriately, per color channel.
z_red, z_green, z_blue = sample_rgb_images(images)
# Compute the weighting function needed.
weights = compute_weights()
# Solve for the camera response for each color channel.
print('== Computing camera response for each channel ==')
g_red = gsolve(z_red, ln_dt, lamb, weights)
g_green = gsolve(z_green, ln_dt, lamb, weights)
g_blue = gsolve(z_blue, ln_dt, lamb, weights)
# Plot response.
plot_responses(g_red, g_green, g_blue)
# Compute the HDR radiance map.
hdr_map = compute_hdr_map(images, g_red, g_green, g_blue, weights, ln_dt)
return hdr_map
if __name__ == '__main__':
directory = './src_images'
radiance_map = create_hdr_map(50, directory, 'png')
| StarcoderdataPython |
1715996 | # Generated by Django 3.2.6 on 2021-09-04 03:07
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pwas', '0054_alter_pwa_manifest_json'),
]
operations = [
migrations.AlterField(
model_name='pwa',
name='manifest_url',
field=models.CharField(max_length=250, null=True, validators=[django.core.validators.MinLengthValidator(5)]),
),
]
| StarcoderdataPython |
3357307 | #!/usr/bin/env python3
import cgi
import cgitb
cgitb.enable()
from templates import login_page, secret_page, after_login_incorrect
import secret
import os
from http.cookies import SimpleCookie
# set up cgi form
s = cgi.FieldStorage()
username = s.getfirst("username")
password = s.getfirst("password")
# from this point on, everything prints to html
print("Content-Type: text/html")
print()
# load login page, print user form info
form_ok = username == secret.username and password == <PASSWORD>
# set up cookie
cookie = SimpleCookie(os.environ["HTTP_COOKIE"])
cookie_username = None
cookie_password = None
if cookie.get("username"):
cookie_username = cookie.get("username").value
if cookie.get("password"):
cookie_password = cookie.get("password").value
# check if cookie username/password == secret username/password
cookie_ok = cookie_username == secret.username and cookie_password == <PASSWORD>
# then set username/password to cookie username/password
if cookie_ok:
username = cookie_username
password = <PASSWORD>
print("Content-Type: text/html")
if form_ok:
# set cookie iff login info correct
print(f"Set-Cookie: username={username}")
print(f"Set-Cookie: password={password}")
print()
# load relevant html pages
if not username and not password:
print(login_page())
elif username == secret.username and password == <PASSWORD>:
print(secret_page(username, password))
else:
print(after_login_incorrect()) | StarcoderdataPython |
4822573 | <filename>graph_generation/config_params/connection_params.py
class ConnectionParams:
def __init__(self, system_write_connection_count, system_read_connection_count):
self.system_write_connection_count = system_write_connection_count
self.system_read_connection_count = system_read_connection_count
self.dataset_system_connection_count = system_write_connection_count + system_read_connection_count
| StarcoderdataPython |
3306019 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from d2go.export.api import PredictorExportConfig
from d2go.utils.export_utils import (
D2Caffe2MetaArchPreprocessFunc,
D2Caffe2MetaArchPostprocessFunc,
D2RCNNTracingWrapper,
)
from detectron2.export.caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP
from mobile_cv.predictor.api import FuncInfo
logger = logging.getLogger(__name__)
def d2_meta_arch_prepare_for_export(self, cfg, inputs, predictor_type):
if "torchscript" in predictor_type and "@tracing" in predictor_type:
return PredictorExportConfig(
model=D2RCNNTracingWrapper(self),
data_generator=D2RCNNTracingWrapper.generator_trace_inputs,
run_func_info=FuncInfo.gen_func_info(
D2RCNNTracingWrapper.RunFunc, params={}
),
)
if cfg.MODEL.META_ARCHITECTURE in META_ARCH_CAFFE2_EXPORT_TYPE_MAP:
C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE]
c2_compatible_model = C2MetaArch(cfg, self)
preprocess_info = FuncInfo.gen_func_info(
D2Caffe2MetaArchPreprocessFunc,
params=D2Caffe2MetaArchPreprocessFunc.get_params(cfg, c2_compatible_model),
)
postprocess_info = FuncInfo.gen_func_info(
D2Caffe2MetaArchPostprocessFunc,
params=D2Caffe2MetaArchPostprocessFunc.get_params(cfg, c2_compatible_model),
)
preprocess_func = preprocess_info.instantiate()
return PredictorExportConfig(
model=c2_compatible_model,
# Caffe2MetaArch takes a single tuple as input (which is the return of
# preprocess_func), data_generator requires all positional args as a tuple.
data_generator=lambda x: (preprocess_func(x),),
preprocess_info=preprocess_info,
postprocess_info=postprocess_info,
)
raise NotImplementedError("Can't determine prepare_for_tracing!")
| StarcoderdataPython |
85527 | <reponame>amexias/code-vault
import json
import datetime
class fileop:
def __init__(self,Text):
self.text=Text
self.tlength=len(self.text)
def writetext(self):
with open('settings.txt', 'w') as json_file:
json.dump(self.text, json_file)
def readtext(self):
with open('settings.txt') as f:
data = json.load(f)
return(data)
def writeorder(self,filenum):
filename=str(filenum)+".txt"
with open(filename,'w') as json_file:
json.dump(self.text, json_file)
class order:
def __init__(self):
pass
class settings:
def __init__(self,settingsText):
self.setTxt = settingsText
p=fileop(self.setTxt)
p.writetext()
| StarcoderdataPython |
3285322 | from __future__ import (
absolute_import,
unicode_literals,
)
import codecs
import sys
from setuptools import (
find_packages,
setup,
)
from pysoa import __version__
def readme():
with codecs.open('README.rst', 'rb', encoding='utf8') as f:
return f.read()
install_requires = [
'attrs>=18.2,<20',
'conformity~=1.26',
'currint>=1.6,<3',
'enum34;python_version<"3.4"',
'msgpack~=0.6,>=0.6.2',
'pymetrics~=1.0',
'pytz>=2019.1',
'redis~=2.10',
'six~=1.10',
'typing~=3.7.4;python_version<"3.5"',
'typing-extensions~=3.7.4;python_version<"3.8"',
# For context, see the comment in pysoa.common.compatibility. Due to the peculiarities of the patching detailed
# there, we pin these dependencies to hard versions, or else things might break when they update. When new versions
# come out, we'll bump and adjust our patching or, hopefully, relax and remove our patching.
'contextvars==2.4;python_version>"3.4" and python_version<"3.7"',
'aiocontextvars==0.2.2;python_version>"3.4" and python_version<"3.7"',
]
test_helper_requirements = [
'mock>=2.0;python_version<"3.3"',
]
test_plan_requirements = test_helper_requirements + [
'pyparsing~=2.2',
'pytest>4.2,<6',
'pytest-asyncio;python_version>"3.4"',
]
tests_require = [
'coverage~=4.5',
'factory_boy~=2.11.1',
'freezegun~=0.3',
'lunatic-python-universal~=2.1',
'mockredispy~=2.9',
'mypy~=0.740;python_version>"3.4"',
'parameterized~=0.7',
] + test_plan_requirements
setup(
name='pysoa',
version=__version__,
author='Eventbrite, Inc.',
author_email='<EMAIL>',
description='A Python library for writing (micro)services and their clients',
long_description=readme(),
url='http://github.com/eventbrite/pysoa',
packages=list(map(str, find_packages(include=['pysoa', 'pysoa.*']))),
package_data={
str('pysoa'): [str('py.typed')], # PEP 561,
},
zip_safe=False, # PEP 561
include_package_data=True,
install_requires=install_requires,
tests_require=tests_require,
setup_requires=['pytest-runner'] if {'pytest', 'test', 'ptr'}.intersection(sys.argv) else [],
test_suite='tests',
extras_require={
'docs': [
'conformity[docs]~=1.26,>=1.26.4',
'django~=1.11',
'sphinx~=2.2;python_version>="3.6"',
] + test_plan_requirements,
'testing': tests_require,
'test_helpers': test_helper_requirements,
'test_plans': test_plan_requirements,
},
entry_points={
'pytest11': [
'pysoa_test_plan=pysoa.test.plugins.pytest.plans',
'pysoa_test_fixtures=pysoa.test.plugins.pytest.fixtures',
]
},
license='Apache 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development',
],
project_urls={
'Documentation': 'https://pysoa.readthedocs.io',
'Issues': 'https://github.com/eventbrite/pysoa/issues',
'CI': 'https://travis-ci.org/eventbrite/pysoa/',
},
)
| StarcoderdataPython |
1740989 | <reponame>mickahell/websites
import streamlit as st
def app():
header = """
<div align="center">
<h1>Online Quantum Lab</h1>
Here an online version of my Docker images for <b>Quantum development</b> and <b>Quantum experiments</b>.
This platform is only for prototype, testing experiments and science <b>vulgarisation/demo</b>.
<h2>Access</h2>
<table>
<tbody>
<tr>
<td align="center"><a href="http://qiskit.xtraorbitals.xyz"><b>Qiskit</b></a></td>
<td align="center"><a href="http://pennylane.xtraorbitals.xyz"><b>Pennylane</b></a></td>
<td align="center"><a href="http://cirq.xtraorbitals.xyz"><b>Cirq</b></a></td>
</tr>
</tbody>
</table>
</div>
"""
st.markdown(header, unsafe_allow_html=True)
lib = """
<br /><br />
<h2>Libs available</h2>
"""
st.markdown(lib, unsafe_allow_html=True)
col0, col1, col2, col3 = st.columns(4)
lib_commun = col0.expander("Common")
lib_commun.write("```python3.8, networkx, numpy, matplotlib, notebook, pandas, scipy, tk```")
libs_qiskit = col1.expander("Qiskit")
libs_qiskit.write("```qiskit, qiskit[visualization], qiskit-nature```")
libs_qml = col2.expander("Pennylane / QML")
libs_qml.write("```autograd, pennylane, pennylane-sf, pennylane-qiskit```")
libs_cirq = col3.expander("Cirq")
libs_cirq.write("```cirq, cirq-core[contrib], texlive-latex-base, latexmk```")
body = """
## GitHub
[](https://github.com/mickahell/quantum_lab/actions/workflows/docker-image.yml)
[](https://github.com/mickahell/quantum_lab/actions/workflows/docker-tag.yml)
[](https://github.com/mickahell/quantum_lab/releases)
## DockerHub
[](https://hub.docker.com/r/mickahell/quantum_lab_qiskit)
[](https://hub.docker.com/r/mickahell/quantum_lab_qml)
[](https://hub.docker.com/r/mickahell/quantum_lab_qsharp)
[](https://hub.docker.com/r/mickahell/quantum_lab_myqlm)
[](https://hub.docker.com/r/mickahell/quantum_lab_simulaqron)
[](https://hub.docker.com/r/mickahell/quantum_lab_cirq)
## Citation
If you use my work, please cite as : <pre>Quantum Lab: Docker image for quantum laboratory, <NAME>, 2021, DOI: <a href=https://doi.org/10.5281/zenodo.4664195>10.5281/zenodo.4664195</a></pre>
"""
st.markdown(body, unsafe_allow_html=True)
doi_col0, doi_col1 = st.columns([4, 1])
doi = """
[](https://zenodo.org/badge/latestdoi/343446026)
"""
doi_col1.markdown(doi)
footer = """
### Bug / Feature
If you have an idea of features do not hesitate and create an **[issue](https://github.com/mickahell/quantum_lab/issues/new)**.
"""
st.markdown(footer, unsafe_allow_html=True)
termofuse = """
## Term of use
Anyone can use these quantum laboratories freely without any limitation, the ony limits are the one of the system. Max memory for a program is limit to 500MB.
It doesn't have the purpose to be as evolve as the <a href="https://quantum-computing.ibm.com/">IBM QC platform</a> or the <a href="https://quantumai.google/cirq">Google Cirq colab platform</a>.
These laboratories doesn't provide private access, means every notebook created will stay publicly available until someone delete it. If you don't want to share your work, feel free to delete your files.
Owner of the application is not be responsable of any data produice or loose and he doesn't own any of the data produice. Indeed all the data available in the laboratories are in the opensource field.
Anyone can import work and share it inside the application, if you want to share tutorial and keep them protect from deleting feel free to reach an issue on the [GitHUb repository](https://github.com/mickahell/websites/issues/new) or to send an email to my [GitHub email](https://github.com/mickahell).
"""
st.markdown(termofuse, unsafe_allow_html=True)
| StarcoderdataPython |
81272 | import psycopg2
import sys
from handHud import *
import time
from psycopg2.extensions import AsIs
import copy
from multiprocessing import Pool
import os
def connection():
try:
conn = psycopg2.connect(database="new", user="postgres",
password="<PASSWORD>", host='localhost',
port='5432')
# print('super')
curs = conn.cursor()
except psycopg2.OperationalError:
print(sys.exc_info())
print('connection failed')
sys.exit()
return conn, curs
def drop_tables(cur, con):
# for testing when needed to empty database
cur.execute(''' DROP TABLE IF EXISTS public.hands_info CASCADE''')
cur.execute(''' DROP TABLE IF EXISTS public.hands CASCADE''')
cur.execute(''' DROP TABLE IF EXISTS public.players CASCADE''')
cur.execute(''' DROP TABLE IF EXISTS public.positions CASCADE''')
cur.execute(''' DROP TABLE IF EXISTS public.player_hand CASCADE''')
cur.execute(''' DROP TABLE IF EXISTS public.hud CASCADE''')
con.commit()
def create_schema(cur, con):
def hands(c):
c.execute('''
CREATE TABLE public.hands (
id SERIAL PRIMARY KEY UNIQUE,
number BIGINT UNIQUE,
txt TEXT
)''')
def players(c):
c.execute('''
CREATE TABLE public.players (
id SERIAL PRIMARY KEY UNIQUE,
name VARCHAR(30) UNIQUE
)''')
def positions(c):
c.execute('''
CREATE TABLE public.positions (
id SMALLINT PRIMARY KEY UNIQUE,
name CHAR(3)
)''')
c.execute('''
INSERT INTO public.positions (id, name) VALUES (1, 'BU');
INSERT INTO public.positions (id, name) VALUES (2, 'BB');
INSERT INTO public.positions (id, name) VALUES (3, 'SB');
INSERT INTO public.positions (id, name) VALUES (4, 'CO');
INSERT INTO public.positions (id, name) VALUES (5, 'MP');
INSERT INTO public.positions (id, name) VALUES (6, 'UTG');
''')
def player_hand(c):
types = PlayerGeneral.hud_types()
columns = ''
for k, v in types.items():
columns += k + ' ' + v + ', '
c.execute('''
CREATE TABLE public.player_hand (
players_id INTEGER,
hands_id INTEGER,
%s
PRIMARY KEY (players_id, hands_id),
FOREIGN KEY (players_id) REFERENCES players (id),
FOREIGN KEY (hands_id) REFERENCES hands (id),
FOREIGN KEY (pos_nr) REFERENCES positions (id)
)''' % columns)
def hands_info(c):
types = HandGeneral.hud_types()
columns = ''
for k, v in types.items():
columns += k + ' ' + v + ', '
columns = columns[:-2]
c.execute('''
CREATE TABLE public.hands_info (
hands_id INTEGER PRIMARY KEY UNIQUE REFERENCES hands (id),
%s
)''' % columns)
def hud(c):
classes_post = [Bet, Db, Cb, BVsMcb, Float, Aggression]
classes2 = [Preflop]
post = ['_f', '_t', '_r']
columns = ''
for cls in classes2:
types = cls.hud_types()
for k, v in types.items():
columns += k + ' ' + v + ', '
for cls in classes_post:
types = cls.hud_types()
for k, v in types.items():
for el in post:
columns += k + el + ' ' + v + ', '
c.execute('''
CREATE TABLE public.hud (
players_id INTEGER,
hands_id INTEGER,
%s
PRIMARY KEY (players_id, hands_id),
FOREIGN KEY (players_id) REFERENCES players (id),
FOREIGN KEY (hands_id) REFERENCES hands (id)
)''' % columns)
hands(cur)
players(cur)
positions(cur)
player_hand(cur)
hands_info(cur)
hud(cur)
con.commit()
def fill_tables(cur, con, hand_huds):
def hands(c, hand_hud):
dct = hand_hud.hands
try:
columns = dct.keys()
values = [dct[col] for col in columns]
ins_stment = 'INSERT INTO hands (%s) VALUES %s' \
' ON CONFLICT DO NOTHING'
cur.execute(ins_stment, (AsIs(','.join(columns)), tuple(values)))
except:
print('KONIEC')
sys.exit()
def players(c, hand_hud):
dct = hand_hud.players
for k, v in dct.items():
column, player = tuple(v.items())[0]
cur.execute('''
INSERT INTO players (name) VALUES (%s)
ON CONFLICT DO NOTHING
''' % player)
def hands_info(c, hand_hud):
nr = hand_hud.get_number()
c.execute('''
SELECT id FROM public.hands WHERE number = %s
''' % nr)
hand_id = c.fetchone()[0]
dct = hand_hud.hands_info
dct['hands_id'] = hand_id
try:
columns = dct.keys()
values = [dct[col] for col in columns]
ins_stment = 'INSERT INTO hands_info (%s) VALUES %s'
cur.execute(ins_stment, (AsIs(','.join(columns)), tuple(values)))
except:
print('KONIEC')
sys.exit()
def player_hand(c, hand_hud):
nr = hand_hud.get_number()
c.execute('''
SELECT id FROM public.hands WHERE number = %s
''' % nr)
hand_id = c.fetchone()[0]
dct = hand_hud.player_hand
tmp = copy.copy(dct)
for player in tmp:
# plr = player.replace(', '')
dct[player]['hands_id'] = hand_id
c.execute('''
SELECT id FROM public.players WHERE name = '%s'
''' % player)
player_id = c.fetchone()[0]
dct[player]['players_id'] = player_id
try:
columns = dct[player].keys()
values = [dct[player][col] for col in columns]
ins_stment = 'INSERT INTO player_hand (%s) VALUES %s'
cur.execute(ins_stment, (AsIs(','.join(columns)),
tuple(values)))
except:
print('KONIEC')
sys.exit()
def hud(c, hand_hud):
nr = hand_hud.get_number()
c.execute('''
SELECT id FROM public.hands WHERE number = %s
''' % nr)
hand_id = c.fetchone()[0]
dct = hand_hud.hud
tmp = copy.copy(dct)
for player in tmp:
dct[player]['hands_id'] = hand_id
c.execute('''
SELECT id FROM public.players WHERE name = '%s'
''' % player)
player_id = c.fetchone()[0]
dct[player]['players_id'] = player_id
try:
columns = dct[player].keys()
values = [dct[player][col] for col in columns]
ins_stment = 'INSERT INTO hud (%s) VALUES %s'
cur.execute(ins_stment, (AsIs(','.join(columns)),
tuple(values)))
except:
print('KONIEC')
sys.exit()
for hnd_hud in hand_huds:
if hnd_hud is not None:
hands(cur, hnd_hud)
players(cur, hnd_hud)
hands_info(cur, hnd_hud)
player_hand(cur, hnd_hud)
hud(cur, hnd_hud)
con.commit()
def read_hands(data, limit=-1):
import re
pat = re.compile(r'^PokerStars Hand #\d+:.*')
with open(data) as plk:
hands = plk.readlines()
start = []
hand_cnt, limit_flg = 0, False
for i, line in enumerate(hands):
if pat.match(line):
start.append(i)
hand_cnt += 1
if hand_cnt == limit:
limit_flg = True
break
hands_lst = []
for i in range(len(start)):
if i == len(start) - 1:
if limit_flg:
break
hand = (hands[start[i]:len(hands)])
else:
hand = (hands[start[i]:start[i + 1]])
hands_lst.append(hand)
return hands_lst
def make_hud(hand):
try:
return HandHud(hand)
except:
pass
def make_hand(txt):
return Reka(txt)
def folder_to_db(folder):
return [fle for fle in os.listdir(folder) if fle.endswith('txt')]
if __name__ == '__main__':
conn, curs = connection()
# drop_tables(curs, conn)
create_schema(curs, conn)
folder_base = os.getcwd() + '/hands/'
if not os.path.exists(os.getcwd() + '/hands_backup/'):
os.makedirs(os.getcwd() + '/hands_backup/')
folder_back = os.getcwd() + '/hands_backup/'
i, cnt = 0, 0
t = time.time()
for file in folder_to_db(folder_base):
dirc = folder_base + file
try:
hands_t = read_hands(dirc)
except:
continue
i += 1
print('File number: ', i)
print('Making hands...')
with Pool(processes=4) as p:
hands = p.map(make_hand, hands_t, chunksize=1)
huds = p.map(make_hud, hands, chunksize=1)
os.rename(dirc, folder_back + file)
cnt += len(huds)
print('Filling...')
fill_tables(curs, conn, huds)
print('Filled. Average time: ', round(cnt / (time.time() - t), 2),
'hands/s')
print()
| StarcoderdataPython |
1721739 | <reponame>controversial/SocialNPHS
import os
import livejson
localdir = os.path.dirname(os.path.abspath(__file__))
dbpath = os.path.join(localdir, "users.json")
students = livejson.File(dbpath, pretty=True, sort_keys=True)
| StarcoderdataPython |
55693 | <gh_stars>10-100
from __future__ import absolute_import, unicode_literals
import unittest
from django.test import override_settings
from wagtaildraftail import blocks, draft_text, forms, widgets
class DraftailTextBlockTestCase(unittest.TestCase):
def test_get_default_with_string_default(self):
class StringDefaultDraftailTextBlock(blocks.DraftailTextBlock):
class Meta:
default = '{}'
block = StringDefaultDraftailTextBlock()
default = block.get_default()
expected_default = draft_text.DraftText('{}')
self.assertIsInstance(default, draft_text.DraftText)
self.assertEqual(default, expected_default)
def test_get_default_with_node_default(self):
class NodeDefaultDraftailTextBlock(blocks.DraftailTextBlock):
class Meta:
default = draft_text.DraftText('{}')
block = NodeDefaultDraftailTextBlock()
default = block.get_default()
expected_default = draft_text.DraftText('{}')
self.assertIsInstance(default, draft_text.DraftText)
self.assertEqual(default, expected_default)
def test_field_class(self):
block = blocks.DraftailTextBlock()
self.assertIsInstance(block.field, forms.SerializedJSONField)
@override_settings(WAGTAILADMIN_RICH_TEXT_EDITORS={
'test_editor': {
'WIDGET': 'wagtaildraftail.widgets.DraftailTextArea',
}
})
def test_field_is_initialized_with_widget(self):
block = blocks.DraftailTextBlock(editor='test_editor')
self.assertIsInstance(block.field.widget, widgets.DraftailTextArea)
def test_field_is_initialized_with_options(self):
options = {'required': False, 'help_text': 'weee'}
block = blocks.DraftailTextBlock(**options)
self.assertEqual(block.field.required, options['required'])
self.assertEqual(block.field.help_text, options['help_text'])
def test_to_python(self):
value = '{"entityMap": {}, "blocks": [{"entityRanges": [], "inlineStyleRanges": [{"style": "BOLD", "length": 7, "offset": 0}], "type": "unstyled", "text": "Cupcake ipsum dolor sit amet muffin drag\u00e9e cupcake biscuit...", "depth": 0, "key": "en564", "data": {}}]}' # noqa: E501
python_value = blocks.DraftailTextBlock().to_python(value)
expected_python_value = draft_text.DraftText(value)
self.assertIsInstance(python_value, draft_text.DraftText)
self.assertEqual(python_value, expected_python_value)
def test_value_from_form(self):
value = '{"entityMap": {}, "blocks": [{"entityRanges": [], "inlineStyleRanges": [{"style": "BOLD", "length": 7, "offset": 0}], "type": "unstyled", "text": "Cupcake ipsum dolor sit amet muffin drag\u00e9e cupcake biscuit...", "depth": 0, "key": "en564", "data": {}}]}' # noqa: E501
form_value = blocks.DraftailTextBlock().value_from_form(value)
expected_form_value = draft_text.DraftText(value)
self.assertIsInstance(form_value, draft_text.DraftText)
self.assertEqual(form_value, expected_form_value)
def test_get_searchable_content_with_string_value(self):
value = '{"entityMap": {}, "blocks": [{"entityRanges": [], "inlineStyleRanges": [{"style": "BOLD", "length": 7, "offset": 0}], "type": "unstyled", "text": "Cupcake ipsum dolor sit amet muffin drag\u00e9e cupcake biscuit...", "depth": 0, "key": "en564", "data": {}}]}' # noqa: E501
searchable_content = blocks.DraftailTextBlock().get_searchable_content(value)
expected_searchable_content = [
'<p><strong>Cupcake</strong> ipsum dolor sit amet muffin drag\u00e9e cupcake biscuit...</p>']
self.assertEqual(searchable_content, expected_searchable_content)
def test_get_searchable_content_with_node_value(self):
value = draft_text.DraftText('{"entityMap": {}, "blocks": [{"entityRanges": [], "inlineStyleRanges": [{"style": "BOLD", "length": 7, "offset": 0}], "type": "unstyled", "text": "Cupcake ipsum dolor sit amet muffin drag\u00e9e cupcake biscuit...", "depth": 0, "key": "en564", "data": {}}]}') # noqa: E501
searchable_content = blocks.DraftailTextBlock().get_searchable_content(value)
expected_searchable_content = [
'<p><strong>Cupcake</strong> ipsum dolor sit amet muffin drag\u00e9e cupcake biscuit...</p>']
self.assertEqual(searchable_content, expected_searchable_content)
| StarcoderdataPython |
1747279 | import machine
import network
import utime
import bme280
import ambient
ssid = "ess-id"
password = "<PASSWORD>"
i2c = machine.I2C(scl=machine.Pin(5), sda=machine.Pin(4))
bme = bme280.BME280(i2c=i2c)
am = ambient.Ambient(ch-id, 'write-key')
station = network.WLAN(network.STA_IF)
station.active(True)
station.connect(ssid, password)
while station.isconnected() == False: pass
print( station.ifconfig() )
while True:
print(bme.values)
data = bme.read_compensated_data()
r = am.send({'d1': data[0] / 100.0, 'd2': data[2] / 1024.0, 'd3': data[1] / 25600.0})
print(r.status_code)
r.close()
utime.sleep(600)
| StarcoderdataPython |
1762508 | import torch.nn as nn
from transformers import *
class BertForRanking(nn.Module):
def __init__(self):
super(BertForRanking, self).__init__()
model = model_class.from_pretrained('bert-base-uncased')
feature_dim = 768
self.tanh = nn.Tanh()
self.dense = nn.Linear(feature_dim, 1)
self.dense_p = nn.Linear(feature_dim + 1, 1)
def forward(self, inst, tok, mask, raw_score=None):
output = self.bert(inst, token_type_ids=tok, attention_mask=mask)
if score_feature is not None:
logits = torch.cat([output[1], raw_score.unsqueeze(1)], 1)
score = self.tanh(self.dense_p(output[1])).squeeze(-1)
else:
score = self.tanh(self.dense(output[1])).squeeze(-1)
return score, output[1]
| StarcoderdataPython |
1662600 | import re
import sqlite3
import time
#########################################################################
# Base class for generating a catebot response. This is intended to be a parent to classes that
# implement each type of response with overrides specific to them. The classes that are expected to be
# overridden are:
#
# getResponse(self, requests)
# getContextLink(self, key, httpLocation)
# getOverflowComment(self, keys)
# linkCrossReferences(self, response)
# parsedRequests(self, requests, includeRanges = True)
#
# The initializer is called with a dictionary, a base URL for links, and a Configuration object.
#
# NOTE: The base class is implemented with the methods that are expected to be overriden. In addition to serving
# as an example of how the overrides should be written, they also implement the behavior expected when quoting
# the CCC.
#
#########################################################################
class Response:
# Set up the Catechism and initialize variables
def __init__(self, dictionary, baseURL, configuration):
self._dictionary = dictionary
self._baseURL = baseURL
self._configuration = configuration
# Just returns the current character limit for the reddit comment. Makes it easy to find/change in the future.
# NOTE: reddit's character limit is 10,000 characters by default.
def getCharLimit(self):
return 9500
# Simply returns the comment footer found at the bottom of every comment posted by the bot.
def getCommentFooter(self):
return ('\n***\nCatebot ' + self._configuration.version + ' links: [Source Code](https://github.com/konohitowa/catebot)'
+ ' | [Feedback](https://github.com/konohitowa/catebot/issues)'
+ ' | [Contact Dev](http://www.reddit.com/message/compose/?to=kono_hito_wa)'
+ ' | [FAQ](https://github.com/konohitowa/catebot/blob/master/docs/CateBot%20Info.md#faq)'
+ ' | [Changelog](https://github.com/konohitowa/catebot/blob/master/docs/CHANGELOG.md)')
def getOverflowHeader(self, singular, plural, number):
noun = singular
if number > 1:
noun = plural
return 'The contents of the ' + noun + ' you quoted exceed the character limit ([' + str(self.getCharLimit()) + '](https://github.com/konohitowa/catebot/blob/master/docs/CateBot%20Info.md#wait-ive-counted-the-characters-and-i-didnt-hit-the-limit) characters). Instead, here are links to the ' + noun + '...\n\n'
def parsedRequests(self, requests, includeRanges = True):
validRequests = list()
for request in requests:
request = re.sub(r"\s+","",request)
if ',' in request:
sublist = request.split(',')
else:
sublist = [ request ]
for subrequest in sublist:
if '-' in subrequest:
startingRequest = subrequest.partition('-')[0]
if includeRanges:
endingRequest = subrequest.partition('-')[2]
if int(startingRequest) < int(endingRequest)+1:
for key in range(int(startingRequest), int(endingRequest)+1):
if str(key) in self._dictionary:
validRequests.append(str(key))
else:
validRequests.append(startingRequest)
elif subrequest in self._dictionary:
validRequests.append(subrequest)
return validRequests
# Constructs reddit comment response for Catechism requests.
def getResponse(self, requests):
validRequests = self.parsedRequests(requests)
if len(validRequests) > 0:
comment = ''
for request in validRequests:
content,location = self._dictionary[request]
comment += ('[**CCC ' + request + '**](' + self.getContextLink(request, location) + ') ' + content) + '\n\n'
comment = self.linkCrossReferences(comment)
if len(comment) > self.getCharLimit():
comment = self.getOverflowComment(validRequests)
comment += self.getCommentFooter()
return True,comment
else:
return False,""
# Takes the request key and http location as parameters. The function then constructs
# the appropriate context link. This link appears on each paragraph number.
def getContextLink(self, request, location):
return 'http://www.scborromeo.org/ccc/para/' + request + '.htm'
# Constructs and returns an overflow comment whenever the comment exceeds the character limit set by
# getCharLimit(). Instead of posting the contents of the request(s) in the comment, it links to webpages
# that contain the contents of the request(s).
def getOverflowComment(self, requests):
numberOfRequests = 0
comment = ''
for request in requests:
content,location = self._dictionary[request]
numberOfRequests += 1
comment += ('([' + request + '](' + self.getContextLink(request,location) + '))\n')
if len(comment) > self.getCharLimit():
comment += "\n\nAnd even when condensing the paragraphs to links, you still exceeded the quota..."
break
return self.getOverflowHeader('paragraph','paragraphs',numberOfRequests) + comment
def linkCrossReferences(self, comment):
xrefBlocks = reversed(list(re.finditer(r'\([\d\,\s\-]+\)$(?m)',comment)))
for xrefBlock in xrefBlocks:
xrefs = reversed(list(re.finditer(r'\d+',xrefBlock.group(0))))
for xref in xrefs:
paragraph = xref.group(0)
content,location = self._dictionary[paragraph]
start = xrefBlock.start()+xref.start()
end = xrefBlock.start()+xref.end()
comment = comment[:start]+"["+paragraph+"]("+self.getContextLink(paragraph, location)+")"+comment[end:]
return comment
#########################################################################
#
# Constructs reddit comment response for Balitmore Catechism requests of
# the form [bccd 1], [bccd 1-5], [bccd 1-5,9-10], and the same forms with
# a BCCD book #, such as [bccd #1 1-5, 10-12]. The default book is #2.
#
#########################################################################
class BaltimoreResponse(Response):
def parsedRequests(self, requests, includeRanges = True):
validRequests = list()
for taggedRequest in requests:
bookNumber = '2'
bookRequest, request = taggedRequest
bookRequest = re.sub(r"\s+","",bookRequest)
request = re.sub(r"\s+","",request)
bookMatch = re.match(r'#(\d+)', bookRequest)
if bookMatch:
bookNumber = bookMatch.group(1)
if int(bookNumber) < 1 or int(bookNumber) > 4:
bookNumber = '2'
if ',' in request:
sublist = request.split(',')
else:
sublist = [ request ]
for subrequest in sublist:
if '-' in subrequest:
startingRequest = subrequest.partition('-')[0]
if includeRanges:
endingRequest = subrequest.partition('-')[2]
if int(startingRequest) < int(endingRequest)+1:
for key in range(int(startingRequest), int(endingRequest)+1):
if str(key) in self._dictionary[bookNumber]:
validRequests.append({'Book': bookNumber, 'Request': str(key)})
elif startingRequest in self._dictionary[bookNumber]:
validRequests.append({'Book': bookNumber, 'Request': startingRequest})
elif subrequest in self._dictionary[bookNumber]:
validRequests.append({'Book': bookNumber, 'Request': subrequest})
return validRequests
def getResponse(self, requests):
validRequests = self.parsedRequests(requests)
if len(validRequests) > 0:
comment = ''
for request in validRequests:
bookNumber = request['Book']
requestNumber = request['Request']
qa = self._dictionary[bookNumber][requestNumber]
comment += ('[**BCCD #' + bookNumber + " Q." + requestNumber + '**](' + self.getContextLink(bookNumber, requestNumber, qa['Q']) + ') ' + qa['Q'] + '\n\nA. ' + qa['A']) + '\n\n'
comment = self.linkCrossReferences(comment)
if len(comment) > self.getCharLimit():
comment = self.getOverflowComment(validRequests)
comment += self.getCommentFooter()
return True,comment
else:
return False,""
# This needs to be updated when an actual linkable source is available
#q.2_who_is_god.3F #self._baseURL
def getContextLink(self, bookNumber, questionNumber, questionText):
modifiedQuestionText = re.sub(r'\s','_',questionText).lower()
modifiedQuestionText = re.sub(r',','.2C',modifiedQuestionText)
modifiedQuestionText = re.sub(r'\?','.3F',modifiedQuestionText)
partitionText = ""
if int(bookNumber) == 4:
partitionText = "_"
if int(questionNumber) < 211:
partitionText += "1"
else:
partitionText += "2"
return 'https://www.reddit.com/r/Catebot/wiki/bccd_' + bookNumber + partitionText + '#wiki_q.' + questionNumber + '_' + modifiedQuestionText
def getOverflowComment(self, requests):
numberOfRequests = 0
comment = ''
for request in requests:
numberOfRequests += 1
bookNumber = request['Book']
requestNumber = request['Request']
qa = self._dictionary[bookNumber][requestNumber]
comment += ('([' + requestNumber + '](' + self.getContextLink(bookNumber, requestNumber, qa['Q']) + '))\n')
if len(comment) > self.getCharLimit():
comment += "\n\nAnd even when condensing the requested questions to links, you still exceeded the quota..."
break
return self.getOverflowHeader('question','questions',numberOfRequests) + comment
# This needs to be filled out for the {} references in #3
def linkCrossReferences(self,comment):
return comment
xrefBlocks = reversed(list(re.finditer(r'cann*\.\s+\d+$(?m)',comment)))
for xrefBlock in xrefBlocks:
xrefs = reversed(list(re.finditer(r'\d+',xrefBlock.group(0))))
for xref in xrefs:
paragraph = xref.group(0)
content,location = self._Catechism[paragraph]
contextLink = self.__getCanonContextLink(paragraph, location)
start = xrefBlock.start()+xref.start()
end = xrefBlock.start()+xref.end()
comment = comment[:start]+"["+paragraph+"]("+contextLink+")"+comment[end:]
return comment
#########################################################################
#
# Constructs reddit comment response for Canon requests of form [can 12],
# [can 12s1], [can 12-14], [can 12,15-17].
#
#########################################################################
class CanonResponse(Response):
def parsedRequests(self, requests, includeRanges = True):
validRequests = list()
for request in requests:
request = re.sub(r"\s+","",request)
if ',' in request:
sublist = request.split(',')
else:
sublist = [ request ]
for subrequest in sublist:
if '-' in subrequest:
startingRequest = subrequest.partition('-')[0].partition('s')[0]
if includeRanges:
endingRequest = subrequest.partition('-')[2].partition('s')[0]
if int(startingRequest) < int(endingRequest)+1:
for key in range(int(startingRequest), int(endingRequest)+1):
if str(key) in self._dictionary:
validRequests.append(str(key))
elif startingRequest in self._dictionary:
validRequests.append(startingRequest)
else:
key = subrequest.partition('s')[0]
if key in self._dictionary:
validRequests.append(subrequest)
return validRequests
def getResponse(self, requests):
validRequests = self.parsedRequests(requests)
if len(validRequests) > 0:
comment = ''
for request in validRequests:
key = request.partition('s')[0]
section = request.partition('s')[2]
isSectioned,content,location = self._dictionary[key]
contextLink = self.getContextLink("", location)
if section and isSectioned:
try:
comment += ('[**Can. ' + key + '**](' + contextLink + ') ' + u"\u00A7" + section + " " + content[section]) + '\n\n'
except KeyError:
comment += '[**Can. ' + key + '**](' + contextLink + ') ' + u"\u00A7" + section + " doesn't exist\n\n"
elif not section and isSectioned:
comment += '[**Can. ' + key + '**](' + contextLink + ') '
for sect in sorted(content.keys(),key=int):
comment += u"\u00A7"+sect+" "+content[sect]+"\n\n"
else:
comment += '[**Can. ' + key + '**](' + contextLink + ') ' + content
comment = self.linkCrossReferences(comment)
if len(comment) > self.getCharLimit():
comment = self.getOverflowComment(validRequests)
comment += self.getCommentFooter()
return True,comment
else:
return False,""
def getContextLink(self, dummy, location):
return self._baseURL + location
def getOverflowComment(self, requests):
numberOfRequests = 0
comment = ''
for request in requests:
isSectioned,content,location = self._dictionary[request]
numberOfRequests += 1
comment += ('([' + request + '](' + self.getContextLink("",location) + '))\n')
if len(comment) > self.getCharLimit():
comment += "\n\nAnd even when condensing the laws to links, you still exceeded the quota..."
break
return self.getOverflowHeader('law','laws',numberOfRequests) + comment
# HERE
def linkCrossReferences(self,comment):
return comment
xrefBlocks = reversed(list(re.finditer(r'cann*\.\s+\d+$(?m)',comment)))
for xrefBlock in xrefBlocks:
xrefs = reversed(list(re.finditer(r'\d+',xrefBlock.group(0))))
for xref in xrefs:
paragraph = xref.group(0)
content,location = self._Catechism[paragraph]
contextLink = self.__getCanonContextLink(paragraph, location)
start = xrefBlock.start()+xref.start()
end = xrefBlock.start()+xref.end()
comment = comment[:start]+"["+paragraph+"]("+contextLink+")"+comment[end:]
return comment
#########################################################################
#
# Constructs reddit comment response for GIRM requests of form [girm n].
#
#########################################################################
class GIRMResponse(Response):
def getResponse(self, requests):
validRequests = self.parsedRequests(requests)
if len(validRequests) > 0:
comment = ''
for request in validRequests:
content,location = self._dictionary[request]
comment += ('[**GIRM ' + request + '**](' + self.getContextLink(request, location) + ') ' + content) + '\n\n'
comment = self.linkCrossReferences(comment)
if len(comment) > self.getCharLimit():
comment = self.getOverflowComment(validRequests)
comment += self.getCommentFooter()
return True,comment
else:
return False,""
def getContextLink(self, request, location):
return self._baseURL + location
def linkCrossReferences(self, comment):
return comment
xrefBlocks = reversed(list(re.finditer(r'\([\d\,\s\-]+\)$(?m)',comment)))
for xrefBlock in xrefBlocks:
xrefs = reversed(list(re.finditer(r'\d+',xrefBlock.group(0))))
for xref in xrefs:
paragraph = xref.group(0)
content,location = self._dictionary[paragraph]
start = xrefBlock.start()+xref.start()
end = xrefBlock.start()+xref.end()
comment = comment[:start]+"["+paragraph+"]("+self.getContextLink(paragraph, location)+")"+comment[end:]
return comment
| StarcoderdataPython |
87705 | <reponame>maslychm/mighty
from PIL import Image, ImageFont, ImageDraw
import requests
from io import BytesIO
import os
MAXPICSIZE = 300
def generate_onjoin_pic(namestr: str, member_id: int, url):
"""
Generates an image with user name and image
and retuns the absolute path to the file
"""
# Check if dir exists
if not os.path.isdir(os.getcwd() + "/temp/"):
os.mkdir(os.getcwd() + "/temp/")
print("making dir here")
# Set up return path
retpath = "temp/w" + str(member_id) + ".png"
# Open template and font
im = Image.open("resources/template.PNG")
font_type = ImageFont.truetype("resources/Ubuntu-Regular.ttf",39)
# Get and resize user imageBG from url
response = requests.get(url)
imageBG = Image.open(BytesIO(response.content))
imageBG.thumbnail((64,64), Image.ANTIALIAS)
# Add text user name to image
draw = ImageDraw.Draw(im)
draw.text(xy=(185,75),text=namestr[:12],fill=(17,17,19),font=font_type)
im.paste(imageBG,box=(5,5))
im.save(retpath)
return retpath
def generate_hat(member_id, url=None):
if url == None:
return
# Check if dir exists
if not os.path.isdir(os.getcwd() + "/temp/"):
os.mkdir(os.getcwd() + "/temp/")
print("making dir here")
# Set up return path
retpath = "temp/h" + str(member_id) + ".png"
# Download image
response = requests.get(url)
imageBG = Image.open(BytesIO(response.content))
bg_w, bg_h = imageBG.size
# Get hat from resources
hatImg = Image.open('resources/Hat.png')
hat_w, hat_h = hatImg.size
# Check if imageBG is too small and resize hat to 3:2
if bg_w < MAXPICSIZE or bg_h < MAXPICSIZE:
hatImg.thumbnail((bg_w / 3 * 2, bg_h / 3 * 2), Image.ANTIALIAS)
hat_w, hat_h = hatImg.size
else:
imageBG.thumbnail((MAXPICSIZE, MAXPICSIZE), Image.ANTIALIAS)
bg_w, bg_h = imageBG.size
# Paste hat on correct offsets
offset = ((bg_w - hat_w), 0)
imageBG.paste(hatImg, offset, mask=hatImg)
imageBG.save(retpath)
return retpath
def generate_nuzzle(a, m, a_url, m_url, reversed=False):
nuzzlestr = a[:12] + " *nuzzles* " + m[:12]
retpath = "fpath"
box_author = (360,130)
box_mentioned = (690,45)
# Check if dir exists
if not os.path.isdir(os.getcwd() + "/temp/"):
os.mkdir(os.getcwd() + "/temp/")
print("making dir here")
# Set up return path
retpath = "temp/n" + a + "_" + m + ".png"
# Download author and mentioned avatars
response = requests.get(a_url)
author_image = Image.open(BytesIO(response.content))
author_image.thumbnail((64,64), Image.ANTIALIAS)
response = requests.get(m_url)
mentined_image = Image.open(BytesIO(response.content))
mentined_image.thumbnail((64,64), Image.ANTIALIAS)
# Open template and font
impath = "resources/swordfish_template.jpg"
if reversed:
impath = "resources/swordfish_template_reversed.jpg"
box_author = (400,130)
box_mentioned = (70,45)
im = Image.open(impath)
font_type = ImageFont.truetype("resources/Ubuntu-Regular.ttf",40)
draw = ImageDraw.Draw(im)
draw.text(xy=(70,240),text=nuzzlestr,fill=(17,17,19),font=font_type)
im.paste(author_image,box=box_author)
im.paste(mentined_image,box=box_mentioned)
im.save(retpath)
return retpath
| StarcoderdataPython |
3240733 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import threading
import uuid
from django.conf import settings
from django.http import HttpResponse, StreamingHttpResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from guacamole.client import GuacamoleClient
logger = logging.getLogger(__name__)
sockets = {}
sockets_lock = threading.RLock()
read_lock = threading.RLock()
write_lock = threading.RLock()
pending_read_request = threading.Event()
def index(request):
return render(request, 'core/index.html', {})
@csrf_exempt
def tunnel(request):
qs = request.META['QUERY_STRING']
logger.info('tunnel %s', qs)
if qs == 'connect':
return _do_connect(request)
else:
tokens = qs.split(':')
if len(tokens) >= 2:
if tokens[0] == 'read':
return _do_read(request, tokens[1])
elif tokens[0] == 'write':
return _do_write(request, tokens[1])
return HttpResponse(status=400)
def _do_connect(request):
# Connect to guacd daemon
client = GuacamoleClient(settings.GUACD_HOST, settings.GUACD_PORT)
client.handshake(protocol='ssh',
hostname=settings.SSH_HOST,
port=settings.SSH_PORT,
username=settings.SSH_USER,
password=settings.SSH_PASSWORD)
cache_key = str(uuid.uuid4())
with sockets_lock:
logger.info('Saving socket with key %s', cache_key)
sockets[cache_key] = client
response = HttpResponse(content=cache_key)
response['Cache-Control'] = 'no-cache'
return response
def _do_read(request, cache_key):
pending_read_request.set()
def content():
with sockets_lock:
client = sockets[cache_key]
with read_lock:
pending_read_request.clear()
while True:
# instruction = '5.mouse,3.400,3.500;'
instruction = client.receive()
if instruction:
yield instruction
else:
break
if pending_read_request.is_set():
logger.info('Letting another request take over.')
break
# End-of-instruction marker
yield '0.;'
response = StreamingHttpResponse(content(),
content_type='application/octet-stream')
response['Cache-Control'] = 'no-cache'
return response
def _do_write(request, cache_key):
with sockets_lock:
client = sockets[cache_key]
with write_lock:
while True:
chunk = request.read(8192)
if chunk:
client.send(chunk)
else:
break
response = HttpResponse(content_type='application/octet-stream')
response['Cache-Control'] = 'no-cache'
return response
| StarcoderdataPython |
127709 | import dico
client = dico.Client("YOUR_BOT_TOKEN")
client.on_ready = lambda ready: print(f"Bot ready, with {len(ready.guilds)} guilds.")
@client.on_message_create
async def on_message_create(message: dico.Message):
if message.content.startswith("!button"):
button = dico.Button(style=dico.ButtonStyles.PRIMARY, label="Hello!", custom_id="hello")
button2 = dico.Button(style=dico.ButtonStyles.DANGER, label="Bye!", custom_id="bye")
row = dico.ActionRow(button, button2)
await message.reply("Button!", component=row)
@client.on_interaction_create
async def on_button_response(interaction: dico.Interaction):
if not interaction.type.message_component or not interaction.data.component_type.button:
return
resp = dico.InteractionResponse(callback_type=dico.InteractionCallbackType.CHANNEL_MESSAGE_WITH_SOURCE,
data=dico.InteractionApplicationCommandCallbackData(content=f"Yes, it's {interaction.data.custom_id}.",
flags=dico.InteractionApplicationCommandCallbackDataFlags.EPHEMERAL))
await interaction.create_response(resp)
client.run()
| StarcoderdataPython |
1722863 | <gh_stars>0
from flask.ext import restful
from db import db
from uuid import uuid4, UUID
from jobs.scrape import scrape
from datetime import datetime, timedelta
from pymongo import DESCENDING
class DogScrape(restful.Resource):
def post(self):
jobs_cursor = db.jobs.find({}).sort('date', DESCENDING)
if jobs_cursor.count() > 0:
latest_job = jobs_cursor[0]
latest_date = latest_job['date']
now = datetime.now()
delta = now - latest_date
thirty_delta = timedelta(minutes=30)
if thirty_delta > delta and len(latest_job['error']) == 0:
return {'error': 'Please wait {} '.format(thirty_delta-delta)}
job_id = uuid4()
job = {'type': 'DogScrape',
'done': False,
'percent': 0,
'job_id': job_id,
'error': '',
'date': datetime.now()}
db.jobs.insert(job)
scrape.delay(job_id)
return {'job_id': job_id}
def get(self, job_id):
return db.jobs.find({'job_id': UUID(job_id)})[0] | StarcoderdataPython |
4813210 | <reponame>rileyblackwell/company-growth-rates
def castToFloat(num):
notNum = True
while notNum == True:
try:
num = float(num)
notNum = False
except:
print(f"\"{num}\" is not a number\nEnter a number")
num = input()
return num
def formatGrowthRate(data):
data *= 100
data -= 100
data = round(data, 2)
data = str(data)
data += "%"
return data
def formatOpMargin(data):
data *=100
data = round(data, 2)
data = str(data)
data += "%"
return data
def formatEPS(data):
data -= 1
data *= 100
data = round(data, 2)
data = str(data)
data += "%"
if data[0] == "-":
data = data[1:]
data += " buyback"
else:
data += " share increase"
return data
def formatResults(resultsDict, company, key):
totalResults = ''
if key == 'revenue':
results = resultsDict[company].getRevenue()
elif key == 'opinc':
results = resultsDict[company].getOperatingIncome()
elif key == 'eps':
results = resultsDict[company].getEPS()
elif key == 'pe':
results = resultsDict[company].getPE()
for result in results:
totalResults += (str(result) + " " )
return totalResults | StarcoderdataPython |
3360769 | <reponame>guangbin79/Lua_5.1.5-Android<filename>zziplib-0.13.62/docs/zzipdoc/docbookdocument.py
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
from match import Match
class DocbookDocument:
""" binds some xml content page with additional markup - in this
variant we set the rootnode container to 'reference' and the DTD
to the Docbook 4.1.2 version. Modify as you like."""
has_title_child = [ "book", "chapter", "section", "reference" ]
docbook_dtd = (
' PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"'+"\n"+
' "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd"')
def __init__(self, o, filename = None):
self.o = o
self.rootnode = "reference"
self.filename = filename
self.title = ""
self.text = []
def add(self, text):
""" add some content """
self.text += [ text ]
return self
def get_title(self):
if self.title: return title
try: return self.text[0].get_title()
except Exception, e: pass
return self.title
def _xml_doctype(self, rootnode):
return "<!DOCTYPE "+rootnode+self.docbook_dtd+">"
def _xml_text(self, xml):
""" accepts adapter objects with .xml_text() """
try: return xml.xml_text()
except Exception, e: print "DocbookDocument/text", e; pass
return str(xml)
def _fetch_rootnode(self, text):
fetch = Match(r"^[^<>]*<(\w+)\b")
if text & fetch: return fetch[1]
return self.rootnode
def _filename(self, filename):
if filename is not None:
self.filename = filename
filename = self.filename
if not filename & Match(r"\.\w+$"):
ext = self.o.docbook
if not ext: ext = "docbook"
filename += "."+ext
return filename
def save(self, filename = None):
filename = self._filename(filename)
print "writing '"+filename+"'"
if len(self.text) > 1:
self.save_all(filename)
else:
self.save_text(filename, self.text[0])
def save_text(self, filename, text):
try:
fd = open(filename, "w")
xml_text = self._xml_text(text)
rootnode = self._fetch_rootnode(xml_text)
doctype = self._xml_doctype(rootnode)
print >>fd, doctype
print >>fd, xml_text
fd.close()
return True
except IOError, e:
print "could not open '"+filename+"'file", e
return False
def save_all(self, filename):
assert len(self.text) > 1
try:
fd = open(filename, "w")
xml_text = self._xml_text(self.text[0])
rootnode = self._fetch_rootnode(xml_text)
if rootnode == self.rootnode:
rootnode = "book"
else:
rootnode = self.rootnode
doctype = self._xml_doctype(rootnode)
print >>fd, doctype
title = self.get_title()
if title and self.rootnode in self.has_title_child:
print >>fd, "<"+self.rootnode+'><title>'+title+'</title>'
elif title:
print >>fd, "<"+self.rootnode+' id="'+title+'">'
else:
print >>fd, "<"+self.rootnode+'>'
for text in self.text:
text = self._xml_text(text)
print >>fd, text
print >>fd, "</"+self.rootnode+">"
fd.close()
return True
except IOError, e:
print "could not open '"+filename+"'file", e
return False
| StarcoderdataPython |
1781963 | import time
import subprocess
file_name = "output.txt"
threshold = 30
vm_list = []
vm_cpuUtilization = {}
def readGanglia():
"""
This function open and reads data file from ganglia
The data file is of the format ip_addr||cpu_utilization
It parses the file and if at any point the cpu_utilization crosses threshold,
it adds it to a dictionary if not previously present
"""
with open(file_name) as f:
for line in f.readlines():
line_words = line.split('||')
value = float(line_words[1].rstrip('\n'))
cpu_utilization = float(100.00-value)
if cpu_utilization > threshold:
if line_words[0] in vm_cpuUtilization:
length_of_values = len(vm_cpuUtilization[line_words[0]])
size = 3
if length_of_values < int(size):
vm_cpuUtilization[line_words[0]][0] = cpu_utilization
vm_cpuUtilization[line_words[0]].append(time.time())
else:
vm_cpuUtilization[line_words[0]][0] = cpu_utilization
vm_cpuUtilization[line_words[0]][2] = time.time()
else:
vm_cpuUtilization.setdefault(line_words[0], []).append(cpu_utilization)
vm_cpuUtilization[line_words[0]].append(time.time())
def scaling():
#print "Here"
for key in vm_cpuUtilization:
length_of_values = len(vm_cpuUtilization[key])
size = 3
if length_of_values >int(size)-1:
diff = vm_cpuUtilization[key][2]-vm_cpuUtilization[key][1]
compare_time = int(diff)
if compare_time >= 120:
CallScaling(key)
vm_cpuUtilization.pop(key,None)
time.sleep(20)
def CallScaling(key):
VMFilePath = "/home/node4_a1/VmInfo.txt"
with open(VMFilePath) as f:
for i, line in enumerate(f, 1):
line_words=line.split('|')
if(line_words[0]==key):
#scaleCmd = "sh parsevcpu.sh " + line_words[1] + " " + line_words[2]
#subprocess.check_call(scaleCmd,shell=True)
print("VM_NAME" + str(line_words[1]) + " " + "host IP "+ str(line_words[2]))
def scaling_check():
while(1):
readGanglia()
scaling()
scaling_check()
| StarcoderdataPython |
3393344 | <gh_stars>1-10
import requests
from bs4 import BeautifulSoup
def ask_anna(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=e6b3d89abe37ba83"
data = {
"input": text,
"botcust2": "b170873d9e664911"}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(id="output").text.split("Anna:")[-1].strip()
def ask_chomsky(text="who said god is dead"):
url = "http://demo.vhost.pandorabots.com/pandora/talk?botid=b0dafd24ee35a477"
data = {
"input": text,
"questionstring": text,
"submit": "Ask Chomsky",
"botcust2": "86347fdb4e66491c"}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
for s in soup.find_all("br"):
if str(s).strip() == "<br/>":
continue
return str(s).replace("<br>", "").replace("</br>", "").strip()
def ask_professor(text="want to go to cyberspace"):
url = "https://www.pandorabots.com/pandora/talk?botid=935a0a567e34523c"
data = {
"input": text,
"questionstring": text,
"submit": "Ask The Professor",
"botcust2": "<KEY>"}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
for s in soup.find_all("br"):
if str(s).strip() == "<br/>":
continue
return str(s).replace("<br>", "").replace("</br>", "").strip()
def ask_clarence(text="who said god is dead"):
url = "https://www.pandorabots.com/pandora/talk?botid=e221aa930e345a2c"
data = {
"input": text,
"questionstring": text,
"botcust2": "b170873d9e664911"}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all(color="white")[-1].text
def ask_ieinstein(text="who said god is dead"):
url = "https://www.pandorabots.com/pandora/talk?botid=ea77c0200e365cfb"
data = {
"input": text,
"botcust2": "b170873d9e664911"}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(id="output").text
def ask_amy(text="hi"):
url = "https://www.pandorabots.com/pandora/talk?botid=878ba74dfe34402c"
data = {
"input": text,
"botcust2": "b170873d9e664911"}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(id="output").text
def ask_zog(text="hi"):
url = "http://www.pandorabots.com/pandora/talk?botid=c1baddb74e35ebd0"
data = {
"input": text,
"botcust2": "b170873d9e664911"}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all(color="#ffffff")[-1].text.replace("Click on the Flying Saucer.", "").strip()
def ask_glados(text="hi"):
url = "https://www.pandorabots.com/pandora/talk?botid=cf7aa84b0e34555c"
data = {
"input": text,
"botcust2": "b170873d9e664911"}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all("p")[-1].text
def ask_alice(text="hi"):
url = "https://www.pandorabots.com/pandora/talk?botid=a847934aae3456cb"
data = {
"input": text,
"botcust2": "b170873d9e664911"}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all(color="darkred")[-1].text
def ask_lucifer(text="are you evil"):
url = "https://www.pandorabots.com/pandora/talk?botid=d6dd41a29e3649d6"
data = {
"input": text,
"botcust2": "b170873d9e664911"}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(bgcolor="#333333").text.split("Lucifer: ")[1].split("Human:")[0].strip()
def ask_lauren(text="are you alive"):
url = "http://lauren.vhost.pandorabots.com/pandora/talk?botid=f6d4afd83e34564d&skin=input&speak=true"
data = {
"input": text,
"botcust2": "b170873d9e664911"}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return [s for s in soup.text.split("\n") if s.strip()][-1].split("LaurenBot:")[-1].strip()
def ask_izar(text="are you alive"):
url = "https://www.pandorabots.com/pandora/talk?botid=996c51e02e345a21"
data = {
"message": text,
"botcust2": "b170873d9e664911"}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all(color="blue")[-1].text.replace("Izar::", "").strip()
def ask_cartman_bot(text="are you alive"):
url = "https://www.pandorabots.com/pandora/talk?botid=92e4f42e8e3601aa"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return [s for s in soup.text.split("\n") if s.strip()][-1].split(":")[1].replace(
"Cartman Bot is a purely fan-created work, and is not affiliated with South Park Studios or with Comedy Central",
"").strip()
def ask_ayame(text="are you alive"):
url = "https://www.pandorabots.com/pandora/talk?botid=cd44746d1e3755a1"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="blue").text
def ask_bot_god(text="are you dead"):
url = "https://www.pandorabots.com/pandora/talk?botid=c5952f7ede34fb28"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("God:")[-1]
def ask_robot_girl(text="are you dead"):
url = "https://www.pandorabots.com/pandora/talk?botid=9875cbde2e341077"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="green").text
def ask_axbot(text="yo"):
url = "https://www.pandorabots.com/pandora/talk?botid=f8daef0c1e368ea6"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find("span").text
def ask_shadow(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=cd003aaf5e34b722"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="green").text
def ask_hal(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=fb6b492bbe347bfb"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("HAL: ")[-1]
def ask_mom(text="hi"):
url = "https://www.pandorabots.com/pandora/talk?botid=e87035050e358b0d"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("MOMbotsaid: ")[-1].split("\n")[0].strip()
def ask_alphonse(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=a49c4ffc1e35fcb5"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return [s for s in soup.text.split("\n") if s.strip()][-2].strip()
def ask_songoku(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=946dd59a6e36e738"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="green").text
def ask_ALICE(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=b8d616e35e36e881"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("A.L.I.C.E:")[-1].split("\n")[0].strip()
def ask_lilith(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=b9b96b247e34f4f2"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="#000000").text.split("Lilith: ")[-1]
def ask_yugi(text="who are you"):
url = "https://pandorabots.com/pandora/talk?botid=b456548d9e3598e8"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all(color="green")[-1].text
def ask_satan(text="who are you"):
url = "https://pandorabots.com/pandora/talk?botid=eb25a08afe36a7e8"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("Satan: ")[-1].strip()
def ask_melon_head(text="who are you"):
url = "https://pandorabots.com/pandora/talk?botid=916edf0d9e357417"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="green").text
def ask_osiris(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=c83fd9a71e34161e"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return [s for s in soup.text.split("\n") if s.strip()][-2].replace("Osiris: ", "")
def ask_daeron(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=cac20f908e34b88e"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("Daeron: ")[-1].strip()
def ask_shiny_head(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=fefeb1153e351f12"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="green").text
def ask_pi(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=c7757fa4fe340e87"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return [s for s in soup.text.split("\n") if s.strip()][-3].strip()
def ask_monty(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=b87c28624e34f68f"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("Monty:")[-1].strip()
def ask_gaara(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=f100661fae3448e9"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("Gaara :")[-1].strip()
def ask_tavabot(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=c49a3fa4de3437f8"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("TAVABOT:")[-1].strip()
def ask_santas_elf_robot(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=c39a3375ae34d985"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(id="typing").text
def ask_jesus(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=c6802be5ae363184"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("Jesus: ")[-1].strip()
def ask_jarvis(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=f1156095ee345aac"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(id="typing").text
def ask_michael_jackson(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=ef90e5c1be347e01"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("<NAME>:")[-1].strip()
def ask_mr_whore(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=bad9a9a0be34efcb"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("Mr. Whore: ")[-1].strip()
def ask_pyxis(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=fad8d33fee365392"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="#FFCCFF").text
def ask_eren(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=b88a30282e347163"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("Eren: ")[-1].strip()
def ask_darkin0ria(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=ef919e4f6e348667"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="green").text
def ask_ariel(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=a04b6f529e35047a"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="green").text
def ask_thaladir(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=ac3f4d012e361625"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="green").text
def ask_pikachu(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=b6ef783abe36c353"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("Pikachu:")[-1].strip()
def ask_harry_potter(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=a841d6e81e36b78f"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("Harry Potter: ")[-1].strip()
def ask_tombot(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=ec5d51f42e367eb1"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all("p")[-2].text.split(":")[1].strip()
def ask_taylor_swift(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=f74b85304e347e0e"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[3].strip()
def ask_wanlu(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=ff8d05da5e345bf7&skin=wanlu03"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return [s for s in soup.text.split("\n") if s.strip()][-3].strip()
def ask_hal9000(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=be2cf7a28e347800"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_pinnochion1(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=f75f2c175e34107a"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_yoshi(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=a9e75ae12e363f6b"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-2].replace("You", "").strip()
def ask_mayumi(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=f120c2776e3778c2"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="blue").text
def ask_captain_cultural_policy(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=c13f66042e3447af"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="#E10019").text
def ask_phillip_bot(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=b9a032f8be37ba8e"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[2].replace("Type Here", "").strip()
def ask_itachi(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=f0962253ee345b71"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_707(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=de569d6efe377f23"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_gabriel(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=9164ef73de357cb3"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_edward_cullen(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=d86a52a91e34517d"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_nick_jonas(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=b5cfa30eae376ba5"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="green").text
def ask_clive(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=effe99d2de376e1d"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_atton(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=9b7f16b86e35b16f"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="blue").text
def ask_master_chief(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=e4ca3cafbe34a91a"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.strip().split("\n")[-1].strip()
def ask_mita(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=9e218aa52e340fe3"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="#339933").text
def ask_hk47(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=805aba3b5e366915"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all("center")[2].text
def ask_trey(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=9029fa64ee36078e"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return [s for s in soup.text.split("\n") if s.strip()][-1].split(":")[1].replace(
"Talk 2 Trey is a purely fan-created work, and is not affiliated with South Park Studios, Trey Parker or with Comedy Central.",
"").strip()
def ask_cortana(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=bdaf7f49be340a49"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_lissie(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=b3f4b1d34e36a3a4"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find("p").text.split(":")[-1].strip()
def ask_carolina(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=a2535020de36ab54"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].replace("Carolina © is developed by <NAME>. and <NAME>.", "").strip()
def ask_laylah(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=973eaff43e35194e&skin=nupage"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(id="typing").text
def ask_amas_lucifer(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=fd26e1547e36e84f"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="#2cfcbe").text.split(":")[-1].strip()
def ask_zwatser(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=f6549746fe35938d"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="#3D575D").text.strip()
def ask_leonardo(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=ed5602a31e3426bf"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all(color="#000000")[-1].text.strip()
def ask_hitler(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=fa5a83c6fe365982"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_doraemon(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=bf7120c3be345be9"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="black").text
def ask_mario(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=dce5be031e375aa5"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_cyber_guru(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=b4bb24af9e36e4ec"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="#ffffff").text
def ask_indra(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=b6fdf5e8fe357b7b"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="green").text.strip()
def ask_kim_jong_un(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=cd5b93431e34da59"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return [a for a in soup.text.split("\n") if "김정은: " in a][0].split(":")[1].strip()
def ask_luna(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=a548a1fbbe34687a"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_ELS(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=81335b6afe35522e"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="green").text.strip()
def ask_chesse_of_essex(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=a3ea96d61e3425ca"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_helpo(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=b4a69d0f7e348453"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip().split("|")[0]
def ask_santa(text="what is your name"):
url = "http://drwallace.vhost.pandorabots.com/pandora/talk?botid=dc7ac68f6e36f134&skin=input&speak=true"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[2].split("You say")[0].strip()
def ask_cherie(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=a7e2ad183e355d6a&skin=chatcher"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_naruto(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=96e0c0f2ae36c421"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_horny_helen(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=ce84a6fe8e3436bf"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_sailor_moon(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=e285c2b23e36ed13"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_grandma_elaine(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=8f02c14a0e34bbe4"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="#0000A0").text.strip()
def ask_witch(text="hi"):
url = "https://www.pandorabots.com/pandora/talk?botid=fb9b8c806e35ce8b"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="black").text.strip()
def ask_kennysbro(text="who are you"):
url = "http://demo.vhost.pandorabots.com/pandora/talk?botid=f58077abee3559c7"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.strip().split(" devoted to his bots.")[1].replace("Tell Kennysbro:", "").strip()
def ask_doroty(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=9a5e54324e34a414"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all(color="#FF0000")[1].text
def ask_eeve(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=80c8a82dfe36395a"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_rosie(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=b16e613a3e341aa4"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_thothbot(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=d71a83785e35dc1c"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[1].split("Come on then")[0].strip()
def ask_kirk(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=c91cd9d5be347e25"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_paris(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=a304db0dae35c70b"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="yellow").text
def ask_ariane(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=85d191437e377304"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[2].replace("Ask Ariane", "").strip()
def ask_espeon3000(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=93e9da494e35b218"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_monster_hunter(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=9de0265bfe35c7de"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_ships_computer(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=970595d99e3645f3"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return " ".join([s.strip() for s in
soup.find(color="yellow").text.split("The Computer says:")[1].split("Instructions:")[0].split("\n")
if s])
def ask_negative7(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=bad9cde60e354daa"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("Say:")[0].split("Negative7 Chatbot")[1].strip()
def ask_severus_snape(text="what is your name"):
url = "http://demo.vhost.pandorabots.com/pandora/talk?botid=85bd13e5de34a4fd"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_shagojyo_hotbot(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=9b288cb0ee36f4cc"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("say:")[0].split("THANG")[1].strip()
def ask_virtual_hal(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=89bd78935e350235"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("say:")[0].split("Conversation:")[1].strip()
def ask_joe_jonas(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=8e2ba9309e36a226"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_dr_ann_neering(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=a0379d77fe369f47&skin=neering"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("Say:")[1].strip()
def ask_alien_bot(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=ba0cb354be35bb45"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="white").text.strip()
def ask_bakabot(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=e5f85008fe354264"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="#7441D1").text.strip()
def ask_zelda(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=9ff89df2ee375b02"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_adam(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=bf1f94a7fe3429d8"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_mission_vao(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=e7ecc4b5ae3669e7"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("Say:")[1].split("Please know that there are problems")[0].strip()
def ask_kim_kardashian(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=d7aded4b5e347e5c"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_aleka(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=f0726b22ee349ac0"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="red").text.strip()
def ask_archimedes(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=c34376751e34cf4d"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="green").text.strip()
def ask_abraham_lincoln(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=c6759b896e344a76"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_methos(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=b39c7d4eee34beb8"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(color="white").text.strip()
def ask_virtual_teacher(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=d0372925be35ac2c"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all(color="green")[-1].text.replace(":", "").strip()
def ask_evilness(text="who are you"):
url = "https://www.pandorabots.com/pandora/talk?botid=90ccf0f6ae356c2b"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split("say:")[0].split("LISTENING")[1].strip()
def ask_vincent(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=87395fcdfe35bc4d"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.find(id="typing").text.strip()
def ask_monica(text="what is your name"):
url = "https://pandorabots.com/pandora/talk?botid=c89a77f43e34714a"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
def ask_sailor_mercury(text="what is your name"):
url = "https://www.pandorabots.com/pandora/talk?botid=ebdf3864fe34aa5e"
data = {
"input": text}
html = requests.post(url, data).text
soup = BeautifulSoup(html, 'html.parser')
return soup.text.split(":")[-1].strip()
if __name__ == "__main__":
print(
ask_sailor_mercury()
)
| StarcoderdataPython |
154376 | <reponame>SandyChapman/pyright<filename>packages/pyright-internal/src/tests/samples/literals7.py
# This sample tests the handling of very large integer values used in
# literals.
from typing import Literal
# This should generate an error.
y1: Literal[
900001231231231456487987456452132130000000000000000000000000000001
] = 900001231231231456487987456452132130000000000000000000000000000000
y2: Literal[
900001231231231456487987456452132130000000000000000000000000000001
] = 900001231231231456487987456452132130000000000000000000000000000001
reveal_type(
y2,
expected_text="Literal[900001231231231456487987456452132130000000000000000000000000000001]",
)
y3 = y2 + 1
reveal_type(
y3,
expected_text="Literal[900001231231231456487987456452132130000000000000000000000000000002]",
)
y4 = 0xFFFFFFFFFFF123456789456123456789456123456789456123
reveal_type(
y4,
expected_text="Literal[1606938044258905427252460960878516708721138816242982137979171]",
)
y5 = 0b101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010
reveal_type(y5, expected_text="Literal[886151997189943915269204706853563050]")
| StarcoderdataPython |
70333 | <gh_stars>0
import logging
import torch
import os
from base_read_data import prepare_data, Sample, domain_slot_list, domain_slot_type_map, SampleDataset
from base_model import BaseModel
from base_config import args, DEVICE, medium_result_template, evaluation_folder, ckpt_template, logger
import pickle
import torch.multiprocessing as mp
from torch import nn
from tqdm import tqdm
from collections import OrderedDict
from base_evaluation import reconstruct_batch_predict_label_train, batch_eval, comprehensive_eval,\
evaluation_test_batch_eval
import torch.distributed as dist
from transformers import get_linear_schedule_with_warmup, AdamW
PROCESS_GLOBAL_NAME = args['process_name']
use_multi_gpu = args['multi_gpu']
overwrite = args['overwrite_cache']
start_epoch = args['start_epoch']
load_cpkt_path = args['load_cpkt_path']
mode = args['mode']
def train(model, name, train_loader, dev_loader, test_loader, classify_slot_index_value_dict,
classify_slot_value_index_dict, local_rank=None):
max_step = len(train_loader) * args['epoch']
num_warmup_steps = int(len(train_loader) * args['epoch'] * args['warmup_proportion'])
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args['weight_decay']},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args['learning_rate'], eps=args['adam_epsilon'])
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps,
num_training_steps=max_step)
global_step = 0
ckpt_path = None
for epoch in range(args['epoch']):
logger.info("Epoch :{}".format(epoch))
if use_multi_gpu:
train_loader.sampler.set_epoch(epoch)
epoch_result = []
# Run the train function
if mode == 'train':
model.train()
full_loss = 0
for train_batch in tqdm(train_loader):
global_step += 1
if global_step < start_epoch * len(train_loader):
scheduler.step()
continue
if not use_multi_gpu:
train_batch = data_device_alignment(train_batch)
predict_gate, predict_dict, referred_dict = model(train_batch)
loss, train_batch_predict_label_dict = train_compute_loss_and_batch_eval(
predict_gate, predict_dict, referred_dict, train_batch, classify_slot_index_value_dict, local_rank)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args['max_grad_norm'])
optimizer.step()
scheduler.step()
full_loss += loss.detach().item()
epoch_result.append(batch_eval(train_batch_predict_label_dict, train_batch))
del loss, predict_gate, predict_dict, referred_dict, train_batch # for possible CUDA out of memory
logger.info('average loss of epoch: {}: {}'.format(epoch, full_loss / len(train_loader)))
if use_multi_gpu:
pickle.dump(epoch_result,
open(medium_result_template.format('train', PROCESS_GLOBAL_NAME, epoch, local_rank), 'wb'))
torch.distributed.barrier()
if local_rank == 0:
result_list = load_result_multi_gpu('train', epoch)
result_print(comprehensive_eval(result_list, 'train', PROCESS_GLOBAL_NAME, epoch))
torch.distributed.barrier()
else:
result_print(comprehensive_eval(epoch_result, 'train', PROCESS_GLOBAL_NAME, epoch))
# save model
ckpt_path = ckpt_template.format(PROCESS_GLOBAL_NAME, epoch)
save_model(use_multi_gpu, model, ckpt_path, local_rank)
# validation and test,此处因为原始数据判定的顺序问题,不可以使用distributed model,因此要重新载入
if (use_multi_gpu and local_rank == 0) or not use_multi_gpu:
if mode != 'train':
assert ckpt_path is None and load_cpkt_path is not None
eval_model = BaseModel(name, args['pretrained_model'], classify_slot_value_index_dict)
eval_model = eval_model.cuda(DEVICE)
load_model(multi_gpu=False, model=eval_model, ckpt_path=load_cpkt_path)
else:
assert ckpt_path is not None
eval_model = BaseModel(name, args['pretrained_model'], classify_slot_value_index_dict)
eval_model = eval_model.cuda(DEVICE)
load_model(multi_gpu=False, model=eval_model, ckpt_path=ckpt_path)
logger.info('start evaluation in dev dataset, epoch: {}'.format(epoch))
model_eval(eval_model, dev_loader, 'dev', epoch, classify_slot_index_value_dict, local_rank)
logger.info('start evaluation in test dataset, epoch: {}'.format(epoch))
model_eval(eval_model, test_loader, 'test', epoch, classify_slot_index_value_dict, local_rank)
if use_multi_gpu:
torch.distributed.barrier()
def save_model(multi_gpu, model, ckpt_path, local_rank=None):
if multi_gpu:
if local_rank == 0:
torch.save(model.state_dict(), ckpt_path)
dist.barrier()
else:
torch.save(model.state_dict(), ckpt_path)
logger.info('save model success')
def load_model(multi_gpu, model, ckpt_path, local_rank=None):
if multi_gpu:
map_location = {'cuda:%d' % 0: 'cuda:%d' % local_rank}
state_dict = torch.load(ckpt_path, map_location=map_location)
new_state_dict = OrderedDict()
for key in state_dict:
if 'module.' in key:
new_state_dict[key] = state_dict[key]
else:
new_state_dict['module.'+key] = state_dict[key]
model.load_state_dict(state_dict)
else:
state_dict = torch.load(ckpt_path, map_location=torch.device(DEVICE))
new_state_dict = OrderedDict()
for key in state_dict:
if 'module.' in key:
new_state_dict[key.replace('module.', '')] = state_dict[key]
else:
new_state_dict[key] = state_dict[key]
model.load_state_dict(new_state_dict)
logger.info('load model success')
def data_device_alignment(batch):
batch = list(batch)
batch[1] = batch[1].to(DEVICE)
batch[2] = batch[2].to(DEVICE)
batch[5] = batch[5].to(DEVICE)
batch[9] = batch[9].to(DEVICE)
for key in batch[6]:
batch[6][key] = batch[6][key].to(DEVICE)
batch[7][key] = batch[7][key].to(DEVICE)
batch[8][key] = batch[8][key].to(DEVICE)
return batch
def result_print(comprehensive_result):
for line in comprehensive_result:
logger.info(line)
def train_compute_loss_and_batch_eval(predict_gate, predict_dict, referred_dict, train_batch,
classify_slot_index_value_dict, local_rank=None):
gate_weight = float(args['gate_weight'])
span_weight = float(args['span_weight'])
classify_weight = float(args['classify_weight'])
referral_weight = float(args['referral_weight'])
batch_predict_label_dict = {}
cross_entropy = nn.CrossEntropyLoss(ignore_index=-1).to(DEVICE)
if local_rank is not None:
cross_entropy = cross_entropy.cuda(local_rank)
gate_loss, classify_loss, referral_loss, span_loss = 0, 0, 0, 0
for domain_slot in domain_slot_list:
if not use_multi_gpu:
predict_hit_type_one_slot = predict_gate[domain_slot].to(DEVICE)
predict_value_one_slot = predict_dict[domain_slot].to(DEVICE)
predict_referral_one_slot = referred_dict[domain_slot].to(DEVICE)
label_hit_type_one_slot = train_batch[7][domain_slot].to(DEVICE)
label_value_one_slot = train_batch[8][domain_slot].to(DEVICE)
label_referral_one_slot = train_batch[6][domain_slot].to(DEVICE)
else:
predict_hit_type_one_slot = predict_gate[domain_slot].to(local_rank)
predict_value_one_slot = predict_dict[domain_slot].to(local_rank)
predict_referral_one_slot = referred_dict[domain_slot].to(local_rank)
label_hit_type_one_slot = train_batch[7][domain_slot].to(local_rank)
label_value_one_slot = train_batch[8][domain_slot].to(local_rank)
label_referral_one_slot = train_batch[6][domain_slot].to(local_rank)
batch_predict_label_dict[domain_slot] = reconstruct_batch_predict_label_train(
domain_slot, predict_hit_type_one_slot, predict_value_one_slot,
predict_referral_one_slot, train_batch, classify_slot_index_value_dict)
gate_loss += cross_entropy(predict_hit_type_one_slot, label_hit_type_one_slot)
referral_loss += cross_entropy(predict_referral_one_slot, label_referral_one_slot)
if domain_slot_type_map[domain_slot] == 'classify':
classify_loss += cross_entropy(predict_value_one_slot, label_value_one_slot)
else:
assert domain_slot_type_map[domain_slot] == 'span'
pred_start, pred_end = predict_value_one_slot[:, :, 0], predict_value_one_slot[:, :, 1]
label_start, label_end = label_value_one_slot[:, 0], label_value_one_slot[:, 1]
span_loss += (cross_entropy(pred_start, label_start) + cross_entropy(pred_end, label_end)) / 2
loss = gate_weight*gate_loss + classify_weight*classify_loss + referral_weight*referral_loss + span_weight*span_loss
return loss, batch_predict_label_dict
def model_eval(model, data_loader, data_type, epoch, classify_slot_index_value_dict, local_rank=None):
# eval的特点在于data_loader顺序采样
model.eval()
result_list = []
latest_state, last_sample_id = {domain_slot: 'none' for domain_slot in domain_slot_list}, ''
with torch.no_grad():
if (use_multi_gpu and local_rank == 0) or (not use_multi_gpu):
for batch in tqdm(data_loader):
if not use_multi_gpu:
batch = data_device_alignment(batch)
predict_gate, predict_dict, referred_dict = model(batch)
batch_predict_label_dict, last_sample_id, latest_state = \
evaluation_test_batch_eval(predict_gate, predict_dict, referred_dict, batch,
classify_slot_index_value_dict, latest_state, last_sample_id)
result_list.append(batch_eval(batch_predict_label_dict, batch))
result_print(comprehensive_eval(result_list, data_type, PROCESS_GLOBAL_NAME, epoch))
if use_multi_gpu:
torch.distributed.barrier()
logger.info('model eval, data: {}, epoch: {} finished'.format(data_type, epoch))
def load_result_multi_gpu(data_type, epoch):
file_list, target_file_list = os.listdir(evaluation_folder), []
key_name = (data_type + '_' + PROCESS_GLOBAL_NAME + '_' + str(epoch)).strip()
for file_name in file_list:
if key_name in file_name:
target_file_list.append(file_name)
assert len(target_file_list) == torch.cuda.device_count()
result_list = []
for file_name in target_file_list:
batch_result = pickle.load(open(os.path.join(evaluation_folder, file_name), 'rb'))
for sample_result in batch_result:
result_list.append(sample_result)
return result_list
def single_gpu_main(pass_info):
data, classify_slot_value_index_dict, classify_slot_index_value_dict = prepare_data(overwrite=overwrite)
train_loader, dev_loader, test_loader = data
name, pretrained_model = pass_info
model = BaseModel(name, pretrained_model, classify_slot_value_index_dict)
model = model.cuda(DEVICE)
if os.path.exists(load_cpkt_path):
load_model(use_multi_gpu, model, load_cpkt_path)
train(model, name, train_loader, dev_loader, test_loader, classify_slot_index_value_dict,
classify_slot_value_index_dict)
def multi_gpu_main(local_rank, _, pass_info):
name, pretrained_model = pass_info
num_gpu = torch.cuda.device_count()
logger.info('GPU count: {}'.format(num_gpu))
torch.distributed.init_process_group(backend="nccl", init_method='tcp://127.0.0.1:23456', world_size=num_gpu,
rank=local_rank)
data, classify_slot_value_index_dict, classify_slot_index_value_dict = prepare_data(overwrite=overwrite)
train_loader, dev_loader, test_loader = data
logger.info('world size: {}'.format(torch.distributed.get_world_size()))
local_rank = torch.distributed.get_rank()
logger.info('local rank: {}'.format(local_rank))
# DEVICE = torch.device("cuda", local_rank)
torch.cuda.set_device(local_rank)
model = BaseModel(name, pretrained_model, classify_slot_value_index_dict)
model = model.cuda(local_rank) # 将模型拷贝到每个gpu上
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank,
find_unused_parameters=True)
if os.path.exists(load_cpkt_path):
load_model(use_multi_gpu, model, load_cpkt_path, local_rank)
train(model, name, train_loader, dev_loader, test_loader, classify_slot_index_value_dict,
classify_slot_value_index_dict, local_rank)
def main():
pretrained_model = args['pretrained_model']
name = args['name']
pass_info = name, pretrained_model
logger.info('start training')
if use_multi_gpu:
num_gpu = torch.cuda.device_count()
mp.spawn(multi_gpu_main, nprocs=num_gpu, args=(num_gpu, pass_info))
else:
single_gpu_main(pass_info)
if __name__ == '__main__':
for item in args:
logging.info('{} value: {}'.format(item, args[item]))
main()
| StarcoderdataPython |
3293106 | <reponame>bytecrash/iromoozik<gh_stars>0
import os
import pymongo
from motor.motor_asyncio import AsyncIOMotorClient
client = AsyncIOMotorClient(host=os.environ.get("mongodb://userWXA:UOQwEVhSarlqOdcW@mongodb/sampledb"))
db = client.music
def text_search(query):
return db.tracks.find(
{ '$text': { '$search': query } },
{ 'score': { '$meta': 'textScore' } }
).sort([('score', {'$meta': 'textScore'})])
async def prepare_index():
await db.tracks.create_index([
("title", pymongo.TEXT),
("performer", pymongo.TEXT)
])
await db.tracks.create_index([
("file_id", pymongo.ASCENDING)
])
await db.users.create_index("id")
| StarcoderdataPython |
185707 | <gh_stars>100-1000
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright (C) 2006 Fluendo, S.A. (www.fluendo.com).
# Copyright 2006,2007,2008,2009 <NAME> <<EMAIL>>
from twisted.python import failure
from twisted.python.util import OrderedDict
from coherence import log
class Argument:
def __init__(self, name, direction, state_variable):
self.name = name
self.direction = direction
self.state_variable = state_variable
def get_name(self):
return self.name
def get_direction(self):
return self.direction
def get_state_variable(self):
return self.state_variable
def __repr__(self):
return ("Argument(%(name)r, %(direction)r, %(state_variable)r"
% vars(self))
def as_tuples(self):
r = [
('Name', self.name),
('Direction', self.direction),
('Related State Variable', self.state_variable)
]
return r
def as_dict(self):
return {
'name': self.name,
'direction': self.direction,
'related_state_variable': self.state_variable
}
class Action(log.Loggable):
logCategory = 'action'
def __init__(self, service, name, implementation, arguments_list):
log.Loggable.__init__(self)
self.service = service
self.name = name
self.implementation = implementation
self.arguments_list = arguments_list
self.callback = None
def _get_client(self):
client = self.service._get_client(self.name)
return client
def get_name(self):
return self.name
def get_implementation(self):
return self.implementation
def get_arguments_list(self):
return self.arguments_list
def get_in_arguments(self):
return [arg for arg in self.arguments_list
if arg.direction == 'in']
def get_out_arguments(self):
return [arg for arg in self.arguments_list
if arg.direction == 'out']
def get_service(self):
return self.service
def set_callback(self, callback):
self.callback = callback
def get_callback(self):
return self.callback
def call(self, *args, **kwargs):
self.info("calling %s", self.name)
in_arguments = self.get_in_arguments()
self.info("in arguments %s", [a.get_name() for a in in_arguments])
instance_id = kwargs.get('InstanceID', 0)
# check for missing or extraneous arguments
passed_args = set(kwargs)
expected_args = set(a.get_name() for a in in_arguments)
if passed_args - expected_args:
self.error("arguments %s not valid for action %s",
list(passed_args - expected_args), self.name)
return
elif expected_args - passed_args:
self.error("argument %s missing for action %s",
list(expected_args - passed_args), self.name)
return
action_name = self.name
device_client = self.service.device.client
if self.name in getattr(device_client, 'overlay_actions', {}):
self.info("we have an overlay method %r for action %r",
device_client.overlay_actions[self.name], self.name)
action_name, kwargs = device_client.overlay_actions[self.name](**kwargs)
self.info("changing action to %r %r", action_name, kwargs)
if hasattr(device_client, 'overlay_headers'):
self.info("action call has headers %r", kwargs.has_key('headers'))
if kwargs.has_key('headers'):
kwargs['headers'].update(device_client.overlay_headers)
else:
kwargs['headers'] = device_client.overlay_headers
self.info("action call with new/updated headers %r", kwargs['headers'])
ordered_arguments = OrderedDict()
for argument in self.get_in_arguments():
ordered_arguments[argument.name] = kwargs[argument.name]
if kwargs.has_key('headers'):
ordered_arguments['headers'] = kwargs['headers']
client = self._get_client()
d = client.callRemote(action_name, ordered_arguments)
d.addCallback(self._got_results, instance_id=instance_id,
name=action_name)
d.addErrback(self._got_error)
return d
def _got_error(self, failure):
self.warning("error on %s request with %s %s",
self.name, self.service.service_type,
self.service.control_url)
self.info(failure)
return failure
def _got_results(self, results, instance_id, name):
instance_id = int(instance_id)
out_arguments = self.get_out_arguments()
self.info("call %s (instance %d) returns %d arguments: %r",
name, instance_id, len(out_arguments), results)
# Update state-variables from the result. NB: This silently
# ignores missing and extraneous result values. I'm not sure
# if this is according to the DLNA specs. :todo: check the DLNS-specs
for outarg in out_arguments:
if outarg.get_name() in results:
var = self.service.get_state_variable(
outarg.get_state_variable(), instance_id)
var.update(results[outarg.get_name()])
return results
def __repr__(self):
return ("Action(%(name)r, %(implementation)r, (%arguments_list)r"
% vars(self))
def as_tuples(self):
r = [
('Name', self.name),
("Number of 'in' arguments", len(self.get_in_arguments())),
("Number of 'out' arguments", len(self.get_out_arguments())),
]
return r
def as_dict(self):
return {
'name': self.name,
'arguments': [a.as_dict() for a in self.arguments_list]
}
| StarcoderdataPython |
3268642 | <reponame>martinfleis/pysal
import esda
import giddy
import inequality
import pointpats
import spaghetti
import segregation
| StarcoderdataPython |
1612140 | import jinja2
import flask_themes2
def get_global_theme_template(cache):
@cache.memoize()
def _get_templatepath(theme, templatename, fallback):
templatepath = '_themes/{}/{}'.format(theme, templatename)
if (not fallback) or flask_themes2.template_exists(templatepath):
return templatepath
return templatename
@jinja2.contextfunction
def global_theme_template(ctx, templatename, fallback=True):
theme = flask_themes2.active_theme(ctx)
return _get_templatepath(theme, templatename, fallback)
return global_theme_template
| StarcoderdataPython |
3200157 | # -*- coding: utf-8 -*-
"""
.. module:: test_login_view
"""
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APITestCase
from apps.volontulo.factories import UserFactory
ENDPOINT_URL = reverse('api_logout')
class TestLogoutViewAuthenticated(APITestCase, TestCase):
def test_logout(self):
self.client.force_login(UserFactory())
res = self.client.post(ENDPOINT_URL)
self.assertEqual(res.status_code, 200)
class TestLogoutViewNotAuthenticated(APITestCase, TestCase):
def test_logout(self):
res = self.client.post(ENDPOINT_URL)
self.assertEqual(res.status_code, 400)
| StarcoderdataPython |
3269228 | <filename>dags/example_all.py
"""
Example DAG where rekcurd_airflow plugins are used
"""
import airflow
from airflow import DAG
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from airflow.operators.bash_operator import BashOperator
from rekcurd_airflow.operators import EvaluationUploadOperator, \
ModelDeleteOperator, ModelEvaluateOperator, ModelSwitchOperator, ModelUploadOperator
from datetime import timedelta
default_args = {
'owner': 'rekcurd-airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2),
'email': [],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(seconds=5),
}
EVAL_PATH = '/tmp/tmp_rekcurd_eval.txt'
def write_eval_file(**kwargs):
with open(EVAL_PATH, 'w') as f:
f.write('1\t0\t2\t3')
def train_func(**kwargs):
kwargs['ti'].xcom_push(key=ModelUploadOperator.MODEL_KEY,
value='dummy model content')
kwargs['ti'].xcom_push(key=ModelUploadOperator.MODEL_DESCRIPTION_KEY,
value='dummy description')
def is_good_evaluation_result(**kwargs):
result = kwargs['ti'].xcom_pull(task_ids='evaluate_model')
threshold = 0.7
if result['accuracy'] < threshold:
return 'delete_model'
else:
return 'switch_development_model'
def output_metrics(**kwargs):
"""
Output should be like:
accuracy: 0.78573
precision: 0.79797 0.54825 0.79828
recall: 0.79797 0.54825 0.79828
fvalue: 0.79797 0.54825 0.79828
"""
result = kwargs['ti'].xcom_pull(task_ids='evaluate_model')
metrics = ['accuracy', 'precision', 'recall', 'fvalue']
for m in metrics:
if m == 'accuracy':
print(m + ':', '{:.5f}'.format(result[m]))
else:
print(m + ':', ' '.join('{:.5f}'.format(r) for r in result[m]))
with DAG('example_all', default_args=default_args, schedule_interval="@once") as dag:
train = PythonOperator(task_id='train', python_callable=train_func, provide_context=True)
project_id = 1
application_id = 'sample_app'
sandbox_service_id = 10
dev_service_id = 11
upload_model = ModelUploadOperator(task_id='upload_model',
project_id=project_id,
app_id=application_id,
model_provide_task_id='train')
switch_sandbox_model = ModelSwitchOperator(task_id='switch_sandbox_model',
project_id=project_id,
app_id=application_id,
service_id=sandbox_service_id,
model_provide_task_id='upload_model')
# wait until kubernetes cluster finishes rolling update.
wait = BashOperator(task_id='wait_updating', bash_command='sleep 800')
save_eval_file = PythonOperator(task_id='write_eval_file',
python_callable=write_eval_file,
provide_context=True)
upload_evaluation_file = EvaluationUploadOperator(task_id='upload_eval_file',
project_id=project_id,
app_id=application_id,
evaluation_file_path=EVAL_PATH,
description='sample file')
remove_eval_file = BashOperator(task_id='remove_local_eval_file',
bash_command='rm {}'.format(EVAL_PATH),
trigger_rule='all_done')
evaluate_model = ModelEvaluateOperator(task_id='evaluate_model',
project_id=project_id,
app_id=application_id,
model_provide_task_id='upload_model',
evaluation_provide_task_id='upload_eval_file')
output_metrics = PythonOperator(task_id='output_metrics',
python_callable=output_metrics,
provide_context=True)
judge_metrics = BranchPythonOperator(task_id='judge_metrics',
python_callable=is_good_evaluation_result,
provide_context=True)
delete_model = ModelDeleteOperator(task_id='delete_model',
project_id=project_id,
app_id=application_id,
model_provide_task_id='upload_model')
switch_dev_model = ModelSwitchOperator(task_id='switch_development_model',
project_id=project_id,
app_id=application_id,
service_id=dev_service_id,
model_provide_task_id='upload_model')
"""
After completing switching model and uploading evaluation data, evaluate the new model.
If the new model has low accuracy, delete the model.
If the new model has high accuracy, use the model in development environment as well.
"""
train >> upload_model >> switch_sandbox_model >> wait >> evaluate_model
save_eval_file >> upload_evaluation_file >> remove_eval_file >> evaluate_model
evaluate_model >> output_metrics >> judge_metrics >> switch_dev_model
judge_metrics >> delete_model
| StarcoderdataPython |
3247953 | from requests import Session
def run():
print("Hello world!")
if __name__ == '__main__':
run() | StarcoderdataPython |
68720 | # -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example 1.1.5
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, <NAME>
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from __future__ import with_statement
from google.appengine.api import files, images
from google.appengine.ext import blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
import json, re, urllib, webapp2
WEBSITE = 'http://blueimp.github.com/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
MAX_FILE_SIZE = 5000000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMBNAIL_MODIFICATOR = '=s80' # max width / height
EXPIRATION_TIME = 300 # seconds
def cleanup(blob_keys):
blobstore.delete(blob_keys)
class UploadHandler(webapp2.RequestHandler):
def initialize(self, request, response):
super(UploadHandler, self).initialize(request, response)
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers[
'Access-Control-Allow-Methods'
] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE'
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
blob = files.blobstore.create(
mime_type=info['type'],
_blobinfo_uploaded_filename=info['name']
)
with files.open(blob, 'a') as f:
f.write(data)
files.finalize(blob)
return files.blobstore.get_blob_key(blob)
def handle_upload(self):
results = []
blob_keys = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = re.sub(r'^.*\\', '',
fieldStorage.filename)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
blob_key = str(
self.write_blob(fieldStorage.value, result)
)
blob_keys.append(blob_key)
result['delete_type'] = 'DELETE'
result['delete_url'] = self.request.host_url +\
'/?key=' + urllib.quote(blob_key, '')
if (IMAGE_TYPES.match(result['type'])):
try:
result['url'] = images.get_serving_url(
blob_key,
secure_url=self.request.host_url\
.startswith('https')
)
result['thumbnail_url'] = result['url'] +\
THUMBNAIL_MODIFICATOR
except: # Could not get an image serving url
pass
if not 'url' in result:
result['url'] = self.request.host_url +\
'/' + blob_key + '/' + urllib.quote(
result['name'].encode('utf-8'), '')
results.append(result)
deferred.defer(
cleanup,
blob_keys,
_countdown=EXPIRATION_TIME
)
return results
def options(self):
pass
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
s = json.dumps(self.handle_upload(), separators=(',',':'))
redirect = self.request.get('redirect')
if redirect:
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
def delete(self):
blobstore.delete(self.request.get('key') or '')
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, filename):
if not blobstore.get(key):
self.error(404)
else:
# Cache for the expiration time:
self.response.headers['Cache-Control'] =\
'public,max-age=%d' % EXPIRATION_TIME
self.send_blob(key, save_as=filename)
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/([^/]+)/([^/]+)', DownloadHandler)
],
debug=True
) | StarcoderdataPython |
3212484 | from collections import namedtuple
Colour = namedtuple("Colour", "r, g, b, a")
| StarcoderdataPython |
17525 | <gh_stars>1-10
import os.path
import pytest
import py
from pynpact.steps import extract
def test_binfile_exists():
assert extract.BIN
assert os.path.exists(extract.BIN)
def test_plan(gbkconfig, executor):
extract.plan(gbkconfig, executor)
filename = gbkconfig[extract.OUTPUTKEY]
assert filename
p = py.path.local(filename)
assert p.exists()
# based on how many genes are in testgbk
assert 3 == len(p.readlines())
def test_plan_async(gbkconfig, async_executor):
extract.plan(gbkconfig, async_executor)
filename = gbkconfig[extract.OUTPUTKEY]
assert filename
async_executor.result(filename, 1)
p = py.path.local(filename)
assert p.exists()
# based on how many genes are in testgbk
assert 3 == len(p.readlines())
| StarcoderdataPython |
3264895 | #!/usr/bin/env python
import os
from glob import glob
from distutils.core import setup
setup(
name='ceres',
version='0.10.0',
url='https://github.com/graphite-project/ceres',
author='<NAME>',
author_email='<EMAIL>',
license='Apache Software License 2.0',
description='Distributable time-series database',
py_modules=['ceres'],
scripts=glob('bin/*')
)
| StarcoderdataPython |
111879 | <gh_stars>0
from unittest import TestCase
from uhlive.stream.conversation import Conversation, Ok, ProtocolError
from .conversation_events import join_successful
class TestConnection(TestCase):
def test_join(self):
client = Conversation("customerid", "myconv", "john_test")
frame = client.join(model="en", country="us")
self.assertEqual(
frame,
r'["1","1","conversation:customerid@myconv","phx_join",{"readonly":false,"speaker":"john_test","model":"en","country":"us","interim_results":true,"rescoring":true,"origin":0,"audio_codec":"linear"}]',
)
with self.assertRaises(ProtocolError):
client.send_audio_chunk(bytes(60))
with self.assertRaises(ProtocolError):
client.leave()
def test_joined(self):
client = Conversation("customerid", "myconv", "john_test")
client.join()
event = client.receive(join_successful)
self.assertIsInstance(event, Ok)
# Can't join twice
with self.assertRaises(ProtocolError):
client.join()
# Can stream
client.send_audio_chunk(bytes(60))
# Can leave
client.leave()
def test_leave(self):
client = Conversation("customerid", "myconv", "john_test")
client.join()
client.receive(join_successful)
frame = client.leave()
self.assertEqual(
frame, r'["1","2","conversation:customerid@myconv","phx_leave",{}]'
)
def receive_wrong_topic(self):
client = Conversation("customerid", "unrelated_topic", "john_test")
with self.assertRaises(AssertionError):
client.receive(join_successful)
| StarcoderdataPython |
3218136 | #$ -S /netapp/home/xingjiepan/.local/bin/python3
'''
Empty script for debugging on SGE
'''
import os
import sys; sys.path.append(os.getcwd())
import benchmark_constructor as BC
if __name__ == '__main__':
os.environ['PATH'] = ':'.join(['/netapp/home/xingjiepan/.local/bin',
os.environ['PATH']])
print(sys.version)
print(sys.path)
print(os.getcwd())
print(os.environ['PATH'])
print("I'm doing nothing :P")
| StarcoderdataPython |
1713070 | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Camino2Trackvis top level namespace
"""
from .convert import Camino2Trackvis, Trackvis2Camino
| StarcoderdataPython |
3372371 | #In this script I will try to assess any possible difference
#in treatment outcome across patients of different groups
#(isolated with hierarchical clustering on snps genotypes)
import pandas as pd
from scipy import stats
#importing table with treatment infos
df_rr = pd.read_csv('./checks/delta_pl_ther.tsv', sep = '\t')
#dropping patient where difference between PL and
#first day of first treatment is higher than 90 days
df = df_rr[(abs(df_rr['delta_pl']) < 91)]
def getGroups(file, stops):
ordered_ids = []
with open(file) as f:
for line in f:
ordered_ids.append(int(line.strip()))
mask = []
for i in range(len(ordered_ids)):
if ordered_ids[i] in stops:
mask.append(-1)
else:
mask.append(0)
groups = []
gp = []
for i in range(len(mask)):
if mask[i] == 0:
gp.append(ordered_ids[i])
else:
groups.append(gp)
gp = [ordered_ids[i]]
groups.append(gp)
return groups
#extracting groups from snps genotypes based clustering
snp_stops = [862, 826]
snp_groups = getGroups('./data/cluster_groups/ordered_rr_snps.txt', snp_stops)
group1 = snp_groups[0]
group2 = snp_groups[1]
group3 = snp_groups[2]
df1 = df[df['patient_id'].isin(group1)] #18
df2 = df[df['patient_id'].isin(group2)] #22
df3 = df[df['patient_id'].isin(group3)] #23
#defining function to get contingency table:
def getContTab(s1, s2):
succ1 = sum(s1)
fail1 = len(s1) - succ1
succ2 = sum(s2)
fail2 = len(s2) - succ2
tab = [[succ1, succ2], [fail1, fail2]]
return tab
def writeStat(matrix, filename):
with open('./data/thertest/{}.tsv'.format(filename), 'w') as f:
f.write('\tg1\tg2\tg3\n')
for i, row in enumerate(matrix, start = 1):
f.write('g{}\t'.format(i) + '\t'.join(row) + '\n')
#testing if patients of different groups have a different response
#to therapy (without distinction of FL vs SL)
def getInterruptionCause(df):
series = df['re_end1'].fillna('A')
series.replace(to_replace = 'P', value = 1, inplace = True)
series.replace(to_replace = 'A', value = 0, inplace = True)
series.dropna(inplace = True)
return(list(series))
s1 = getInterruptionCause(df1)
s2 = getInterruptionCause(df2)
s3 = getInterruptionCause(df3)
liste_a = [s1, s2, s3]
liste_b = [s1, s2, s3]
matrix = []
for lista1 in liste_a:
pvals = []
for lista2 in liste_b:
table = getContTab(lista1, lista2)
print(table)
pvals.append(str(stats.fisher_exact(table)[1]))
matrix.append(pvals)
writeStat(matrix, 'snps_ther1_end')
#testing if patients of different groups have been assigned
#to different therapy classes (first or second line)
def getTherapyLine(df):
series = df['TL'].dropna()
series.replace(to_replace = 'FL', value = 1, inplace = True)
series.replace(to_replace = 'SL', value = 0, inplace = True)
series.dropna(inplace = True)
return(list(series))
s1 = getTherapyLine(df1)
s2 = getTherapyLine(df2)
s3 = getTherapyLine(df3)
liste_a = [s1, s2, s3]
liste_b = [s1, s2, s3]
matrix = []
for lista1 in liste_a:
pvals = []
for lista2 in liste_b:
table = getContTab(lista1, lista2)
print(table)
pvals.append(str(stats.fisher_exact(table)[1]))
matrix.append(pvals)
writeStat(matrix, 'snps_ther_line') | StarcoderdataPython |
3303792 | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2021 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA-Workflow-Engine-Snakemake command line interface."""
import logging
import os
from reana_commons.config import (
REANA_LOG_FORMAT,
REANA_LOG_LEVEL,
REANA_WORKFLOW_UMASK,
)
from reana_commons.workflow_engine import create_workflow_engine_command
from reana_workflow_engine_snakemake.config import LOGGING_MODULE
from reana_workflow_engine_snakemake.executor import run_jobs
logging.basicConfig(level=REANA_LOG_LEVEL, format=REANA_LOG_FORMAT)
log = logging.getLogger(LOGGING_MODULE)
def run_snakemake_workflow_engine_adapter(
publisher,
rjc_api_client,
workflow_uuid=None,
workflow_workspace=None,
workflow_file=None,
workflow_parameters=None,
operational_options={},
**kwargs,
):
"""Run a ``snakemake`` workflow."""
running_status = 1
finsihed_status = 2
failed_status = 3
# use some shared object between tasks.
os.environ["workflow_uuid"] = workflow_uuid
os.environ["workflow_workspace"] = workflow_workspace
os.umask(REANA_WORKFLOW_UMASK)
log.info("Snakemake workflows are not yet supported. Skipping...")
log.info(f"Workflow spec received: {workflow_file}")
publisher.publish_workflow_status(workflow_uuid, running_status)
success = run_jobs(
rjc_api_client,
publisher,
workflow_workspace,
workflow_file,
workflow_parameters,
)
if success:
publisher.publish_workflow_status(workflow_uuid, finsihed_status)
else:
publisher.publish_workflow_status(
workflow_uuid, failed_status, logs="Workflow exited unexpectedly."
)
run_snakemake_workflow = create_workflow_engine_command(
run_snakemake_workflow_engine_adapter, engine_type="snakemake"
)
| StarcoderdataPython |
3359086 | # -*- coding: utf-8 -*-
from .atomic_permutation import AtomicPermutation
from .atomic_permutation_ase import AtomicPermutationASE
from .coordinate_perturation_ordered import CoordinateOrderedPerturbation
from .coordinate_perturbation_ase import CoordinatePerturbationASE
from .coordinate_perturbation import CoordinatePerturbation
from .heredity_mutation_ase import HeredityASE
from .lattice_strain_ase import LatticeStrainASE
from .lattice_strain import LatticeStrain
from .mirror_mutation_ase import MirrorMutationASE
from .rotational_mutation_ase import RotationalMutationASE
from .soft_mutation_ase import SoftMutationASE
| StarcoderdataPython |
122261 | <filename>main.py<gh_stars>0
#!/usr/bin/env python
# If you keep OpenSCAD in an unusual location, uncomment the following line of code and
# set it to the full path to the openscad executable.
# Note: Windows/python now support forward-slash characters in paths, so please use
# those instead of backslashes which create a lot of confusion in code strings.
# OPENSCAD_PATH = "C:/Program Files/OpenSCAD/openscad"
# do not edit below unless you know what you are doing!
import os
import configparser
import platform
from shutil import copy, rmtree
import shlex
import random as rd
import time
import numpy as np
import math
import re
from PIL import Image
import subprocess as sp
halt = -1 # debug: terminate skipping this shell (0 to n to enable)
# Make sure we have a fresh random seed
rd.seed()
USE_SCAD_THREAD_TRAVERSAL = False
STL_DIR = "stl_files"
PREV_DIR = "prev"
def openscad():
try:
if OPENSCAD_PATH:
return OPENSCAD_PATH
except NameError:
pass
if os.getenv("OPENSCAD_PATH"):
return os.getenv("OPENSCAD_PATH")
if platform.system() == "Darwin":
return "/Applications/OpenSCAD.app/Contents/MacOS/OpenSCAD"
if platform.system() == "Windows":
# Note: Windows allows forward slashes now
return '"C:/Program Files/OpenSCAD/openscad"'
# Default to linux-friendly CLI program name
return "openscad"
def prepwd():
# Linux and other systems that use PATH variables don't need an absolute path configured.
# if os.path.exists(openscad_exe) == False:
# input("ERROR: openscad path not found.")
# exit()
if os.path.exists(STL_DIR):
rmtree(STL_DIR)
os.mkdir(STL_DIR) # Default perms: world-writable
if os.path.exists(PREV_DIR):
rmtree(PREV_DIR)
os.mkdir(PREV_DIR) # Default perms: world-writable
def has_scad_threading():
cmd = [openscad(), "--help"]
# Note: help comes on stderr
out = sp.check_output(cmd, stderr=sp.STDOUT, universal_newlines=True)
m = re.search(r"enable experimental features:\s(.+?)\n\s*\n", out, flags=re.DOTALL)
if m:
return "thread-traversal" in re.split(r"\s*\|\s*", m[1])
return False
def scad_version():
cmd = [openscad(), "--version"]
# Note: version comes on stderr
out = sp.check_output(cmd, stderr=sp.STDOUT, universal_newlines=True)
m = re.search(r"enable experimental features:\s(.+?)\n\s*\n", out, flags=re.DOTALL)
m = re.match(r"^\s*OpenSCAD version (\d{4})\.(\d\d)\.(\d\d)\s*$", out)
return (int(m[1]), int(m[2]), int(m[3])) if m else ()
def execscad(threadid=0):
print("Executing OpenSCAD script...")
cmd = [openscad()]
if USE_SCAD_THREAD_TRAVERSAL:
cmd.append("--enable=thread-traversal")
cmd.extend(
[
"-o",
os.path.join(os.getcwd(), STL_DIR, str(shell + 1) + ".stl"),
os.path.join(os.getcwd(), "make_shells.scad"),
]
)
print(cmd)
sp.run(cmd)
def udnbers(n, vi, nc, mw, mh, stag):
for y in range(0, mh):
for x in range(0, mw):
x3 = int((x + stag[y]) % mw)
x2 = [x - 1, x + 1, x, x]
y2 = [y, y, y - 1, y + 1]
for i in range(0, 4):
if stag[y] % mw > 0:
x2[i] = int((x2[i] + mw) % mw)
else:
if x2[i] < 0:
x2[i] = 0
if x2[i] > mw - 1:
x2[i] = mw - 1
if (
not ((x3 == 0 and i == 0) or (x3 == mh - 1 and i == 1))
and y2[i] > -1
and y2[i] < mh
):
n[x, y, i] = vi[int(x2[i]), int(y2[i])] == 0
else:
n[x, y, i] = 0
nc[x, y] = len(np.argwhere(n[x, y].astype("int")))
def genmaze(mw, mh, stag, st, ex):
im = Image.new("L", [2 * mw + 1, 2 * mh + 1], 0)
visited = np.zeros(mw * mh)
nbercount = np.zeros(mw * mh)
nbers = np.ones(mw * mh * 4)
walls = np.ones(mw * mh * 4)
r = int((mw * mh) / 2)
vcount = 1
visited[r] = 1
visited = visited.reshape([mw, mh])
nbers = nbers.reshape([mw, mh, 4])
nbercount = nbercount.reshape([mw, mh])
walls = walls.reshape([mw, mh, 4])
udnbers(nbers, visited, nbercount, mw, mh, stag)
while vcount < (mw * mh):
v = np.transpose(np.nonzero(np.logical_and(visited == 1, nbercount > 0)))
# choose branch
r = rd.randint(0, len(v) - 1)
c = v[r]
# choose wall to break
if nbers[c[0], c[1]][0] == 1 or nbers[c[0], c[1]][1] == 1:
# horizontal bias when possible
r = rd.randint(0, nbercount[c[0], c[1]] - 1 + hbias)
if r > nbercount[c[0], c[1]] - 1:
r = int(r - (nbercount[c[0], c[1]]))
if nbers[c[0], c[1]][0] == 1 and nbers[c[0], c[1]][1] == 1:
r = int(r % 2)
else:
r = 0
else:
# otherwise just vertical
r = rd.randint(0, nbercount[c[0], c[1]] - 1)
n = np.argwhere(nbers[c[0], c[1]])[r]
# break wall
walls[c[0], c[1], n] = 0
c2 = c
# walls: 0=L 1=R 2=U 3=D
if n == 0:
n2 = 1
c2[0] = c[0] - 1
elif n == 1:
n2 = 0
c2[0] = c[0] + 1
elif n == 2:
n2 = 3
c2[1] = c[1] - 1
else:
n2 = 2
c2[1] = c[1] + 1
c2[0] = int((c2[0] + mw) % mw)
visited[c2[0], c2[1]] = 1
walls[c2[0], c2[1], n2] = 0
udnbers(nbers, visited, nbercount, mw, mh, stag)
vcount = vcount + 1
# preview
if ((i == 0 and shell < shells - 1) or (i == 1 and shell > 0)) and tpp != 1:
im.putpixel((1 + ex * 2, 0), 255)
im.putpixel((1 + st * 2, mh * 2), 255)
for y in range(0, mh):
for x in range(0, mw):
imx = 1 + x * 2
imy = 1 + y * 2
imnx = [imx - 1, imx + 1, imx, imx]
imny = [imy, imy, imy - 1, imy + 1]
if visited[x, y] == 1:
im.putpixel((imx, imy), 255)
for idx in range(0, 4):
if walls[x, y, idx] == 0:
im.putpixel((imnx[idx], imny[idx]), 255)
if tpp == 2:
im.save(os.path.join(os.getcwd(), PREV_DIR, str(shell + 1) + "a.png"))
else:
im.save(os.path.join(os.getcwd(), PREV_DIR, str(shell + 1) + ".png"))
return walls
def gen():
global shell
global d2
global mh
global mw
global i
global tpp
if shell < shells:
if shell == halt:
exit()
if shell + 1 > 0 and shell + 1 < shells and shell + 1 == tp and tpp < 1:
tpp = -1
if tpp < 1:
print("part: " + str(shell + 1))
wt = mwt
if tpp < 1:
if shell == 0:
d = (mw * us * p) / np.pi + wt - marge * 2
else:
if shell == tp:
d = d2
else:
d = d2 + us + wt + marge * 2
if i == 0:
mw = int(math.ceil((d / p + us) * np.pi / 2 / us))
if shell == (shells - 2):
mh += 1
else:
if shell == (shells - 1):
mw = int(math.ceil((d / p + us) * np.pi / 2 / us))
else:
mw = int(math.ceil((d2 / p + us) * np.pi / 2 / us))
mh += 1
else:
d = d2 + us + wt + marge * 2
mw = int(math.ceil((d / p + us) * np.pi / 2 / us))
mh += 1
# stag/shift
stag = np.zeros(mh)
if stagmode in (1, 2):
for y in range(0, mh):
if y == 0 or stagmode == 1:
stag[y] = rd.randint(0, mh - 1)
else:
stag[y] = stag[y - 1] + rd.randint(0, mh - 1)
elif stagmode == 3:
stag = np.multiply(np.arange(0, mh), stagconst).astype("int")
# maze
st = rd.randint(0, mw - 1)
ex = rd.randint(0, mw - 1)
marr = genmaze(int(mw), int(mh), stag, st, ex)
matrix = []
for y in range(0, mh):
row = []
for x in range(0, mw * p):
x2 = x % mw
r = marr[x2, y, 1] == 0
u = marr[x2, y, 3] == 0
if u and r:
row.append("3")
elif u:
row.append("2")
elif r:
row.append("1")
else:
row.append("0")
matrix.append(f"[{','.join(row)}]")
s = f"[{','.join(matrix)}];"
if tpp < 1:
maze_num = 1
open_mode = "w"
else:
maze_num = 2
open_mode = "a+"
with open("maze.scad", open_mode) as maze:
maze.write(f"maze{maze_num}=")
maze.write(
"\n".join(
[
s,
f"h{maze_num}={mh};",
f"w{maze_num}={mw * p};",
f"st{maze_num}={st};",
f"ex{maze_num}={ex};",
]
)
)
base = 1
lid = 0
if shell == shells - 1:
lid = 1
base = 0
if shell > shells - 2:
mos = 0
else:
mos = shells - shell - 2
with open("config.scad", "w+") as cfg:
cfg.write(
"\n".join(
[
f"p={p};",
f"tpp={tpp};",
f"is={shell};",
f"os={mos};",
f"lid={lid};",
f"base={base};",
f"iw={wt};",
f"id={d};",
f"s={us};",
f"i={i};",
f"bd={d + wt * 2 + us * 2};",
f"m={marge};",
]
)
)
if shell < shells - 2:
d2 = d
if shell > 0 and shell < shells and shell == tp and tpp < 1:
if i == 0: # double nub transition
tpp = 1
i = 1
else: # double maze transition
tpp = 2
i = 0
else:
tpp = 0
if tpp < 1:
execscad()
shell = shell + 1
return False
else:
return True
if __name__ == "__main__":
try:
prepwd()
# get scad version:
if has_scad_threading():
USE_SCAD_THREAD_TRAVERSAL = (
input("multi-threading available. use it(y/n)?").lower() == "y"
)
version = scad_version()
if version[0] < 2015:
input("ERROR: invalid scad version. must be at least 2015.xx.xx .")
exit(1)
except FileNotFoundError:
input("ERROR: Could not find OpenSCAD: " + openscad())
exit(1)
d2 = 0
shell = 0
# make parts:
p = abs(int(input("nub count (0=2 nubs,1=3 nubs,2=4 nubs, ...):"))) + 2
tpp = 0
hbias = abs(
int(input("difficulty (hbias); 0=none >0= bias; larger= more difficult:"))
)
stagconst = 0
stagmode = int(input("shift mode (0=none 1=random 2=random change 3=twist):"))
if stagmode == 3:
stagconst = abs(int(input("twist amount:")))
config = configparser.ConfigParser()
config.read("opt.ini")
if "DEFAULT" not in config:
input("ERROR: No DEFAULT section in opt.ini")
exit(1)
config = config["DEFAULT"]
shells = config.getint("levels") + 1 # levels
marge = config.getfloat("tolerance")
i = int(config.getboolean("maze_inside"))
tp = config.getint("transition_shell")
if tp >= shells:
tp = 0
us = config.getfloat("spacing")
mh = config.getint("units_tall")
mw = config.getint("units_wide")
mwt = config.getfloat("wall_thickness")
while not gen():
continue
print("done!")
| StarcoderdataPython |
1618095 | <reponame>b0bac/ApolloScanner<filename>Configuration/migrations/0013_serviceslog_method_serviceslog_status_and_more.py
# Generated by Django 4.0.1 on 2022-03-10 12:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Configuration', '0012_alter_serviceslog_name'),
]
operations = [
migrations.AddField(
model_name='serviceslog',
name='method',
field=models.CharField(default='GET', editable=False, max_length=20, verbose_name='请求类型'),
),
migrations.AddField(
model_name='serviceslog',
name='status',
field=models.CharField(default='200', editable=False, max_length=20, verbose_name='响应码'),
),
migrations.AlterField(
model_name='serviceslog',
name='name',
field=models.CharField(editable=False, max_length=20, verbose_name='日志类型'),
),
]
| StarcoderdataPython |
3383160 | import arcpy, os
db = os.path.join(os.path.dirname(os.path.abspath(__file__)), r'eqedocsp.sde\AGRC.VW_DSHW_FACILITY')
if arcpy.Exists(db):
print('pass')
else:
print('fail')
| StarcoderdataPython |
187249 | import os, csv
class BaseConfig(object):
MAINTENANCE = False
| StarcoderdataPython |
3202373 | import unittest
import os # noqa: F401
import json # noqa: F401
import time
import requests
from os import environ
try:
from ConfigParser import ConfigParser # py2
except:
from configparser import ConfigParser # py3
from pprint import pprint # noqa: F401
from biokbase.workspace.client import Workspace as workspaceService
from kb_diamond.kb_diamondImpl import kb_diamond
from kb_diamond.kb_diamondServer import MethodContext
from kb_diamond.authclient import KBaseAuth as _KBaseAuth
from AssemblyUtil.AssemblyUtilClient import AssemblyUtil
from kb_diamond.util.diamond import FastaException
class kb_diamondTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = environ.get('KB_AUTH_TOKEN', None)
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_diamond'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'kb_diamond',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL)
cls.serviceImpl = kb_diamond(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_example1_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
# NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
def load_fasta_file(self, filename, obj_name, contents):
f = open(filename, 'w')
f.write(contents)
f.close()
assemblyUtil = AssemblyUtil(self.callback_url)
assembly_ref = assemblyUtil.save_assembly_from_fasta({'file': {'path': filename},
'workspace_name': self.getWsName(),
'assembly_name': obj_name
})
return assembly_ref
def Xtest_input_sequence(self):
# First load a test FASTA file as an KBase Assembly
fasta_content = '>seq1 something soemthing asdf\n' \
'agcttttcat\n' \
'>seq2\n' \
'agctt\n' \
'>seq3\n' \
'agcttttcatgg'
params = {'workspace_name': self.getWsName(),
'input_one_sequence': fasta_content,
'scratch': self.scratch,
'context': self.ctx
}
# Second, call your implementation
output = self.getImpl().Diamond_Blastp_Search(self.ctx, params)
pprint(output)
self.assertEquals(1, 1)
def test_input_ref(self):
query_seq = ">ATCG00500.1 pacid=19637947 transcript=ATCG00500.1 locus=ATCG00500 ID=ATCG00500.1.TAIR10 annot-version=TAIR10\n\
MEKSWFNFMFSKGELEYRGELSKAMDSFAPGEKTTISQDRFIYDMDKNFYGWDERSSYSSSYSNNVDLLVSSKDIRNFIS\
DDTFFVRDSNKNSYSIFFDKKKKIFEIDNDFSDLEKFFYSYCSSSYLNNRSKGDNDLHYDPYIKDTKYNCTNHINSCIDS\
YFRSYICIDNNFLIDSNNFNESYIYNFICSESGKIRESKNYKIRTNRNRSNLISSKDFDITQNYNQLWIQCDNCYGLMYK\
KVKMNVCEQCGHYLKMSSSERIELSIDPGTWNPMDEDMVSADPIKFHSKEEPYKNRIDSAQKTTGLTDAVQTGTGQLNGI\
PVALGVMDFRFMGGSMGSVVGEKITRLIEYATNQCLPLILVCSSGGARMQEGSLSLMQMAKISSVLCDYQSSKKLFYISI\
LTSPTTGGVTASFGMLGDIIIAEPYAYIAFAGKRVIEQTLKKAVPEGSQAAESLLRKGLLDAIVPRNLLKGVLSELFQLH\
AFFPLNTN*" + "\n" + \
">ATCG00510.1 pacid=19637948 transcript=ATCG00510.1 locus=ATCG00510 ID=ATCG00510.1.TAIR10 annot-version=TAIR10 \
MTTFNNLPSIFVPLVGLVFPAIAMASLFLHIQKNKIF*" + "\n"
params = {'workspace_name': self.getWsName(),
'evalue' : 0.1,
'input_query_string': query_seq,
'target_object_ref': '12588/9/1',
'scratch': self.scratch,
'context': self.ctx
}
# Second, call your implementation
output = self.getImpl().Diamond_Blast_Search(self.ctx, params)
pprint(output)
self.assertEquals(1, 1)
def test_input_sequence_with_repeating_ids(self):
pass
# # NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
# def XtestX_filter_contigs_ok(self):
#
# # First load a test FASTA file as an KBase Assembly
# fasta_content = '>seq1 something soemthing asdf\n' \
# 'agcttttcat\n' \
# '>seq2\n' \
# 'agctt\n' \
# '>seq3\n' \
# 'agcttttcatgg'
#
# assembly_ref = self.load_fasta_file(os.path.join(self.scratch, 'test1.fasta'),
# 'TestAssembly',
# fasta_content)
# params = {'workspace_name': self.getWsName(),
# 'assembly_input_ref': assembly_ref,
# 'min_length': 10
# }
# # Second, call your implementation
# output = self.getImpl().Diamond_Blastp_Search(self.ctx, params)
# pprint(output)
#
# self.assertEquals(1,1)
# # Validate the returned data
# self.assertEqual(ret[0]['n_initial_contigs'], 3)
# self.assertEqual(ret[0]['n_contigs_removed'], 1)
# self.assertEqual(ret[0]['n_contigs_remaining'], 2)
# def test_filter_contigs_err1(self):
# with self.assertRaises(ValueError) as errorContext:
# self.getImpl().filter_contigs(self.getContext(),
# {'workspace_name': self.getWsName(),
# 'assembly_input_ref': '1/fake/3',
# 'min_length': '-10'})
# self.assertIn('min_length parameter cannot be negative', str(errorContext.exception))
#
# def test_filter_contigs_err2(self):
# with self.assertRaises(ValueError) as errorContext:
# self.getImpl().filter_contigs(self.getContext(),
# {'workspace_name': self.getWsName(),
# 'assembly_input_ref': '1/fake/3',
# 'min_length': 'ten'})
# self.assertIn('Cannot parse integer from min_length parameter', str(errorContext.exception))
| StarcoderdataPython |
1641879 | # encoding: utf-8
from .nhindex_agent import NHIndexAgent
nh_agent = NHIndexAgent()
def get_index_list():
return nh_agent.get_index_list()
def get_index_daily(index_code):
return nh_agent.get_index_daily(index_code)
def get_index_snapshot():
return nh_agent.get_index_snapshot()
def get_index_weight():
return nh_agent.get_index_weight()
| StarcoderdataPython |
3270601 | s = input('Skriv inn strekningen s [m]: ')
v = input('Skriv inn farta v [m/s]: ')
s = float(s)
v = float(v)
t = s/v
print(t) | StarcoderdataPython |
1684913 | <filename>examples/object_mgmt.py<gh_stars>10-100
"""This is the demo script to show how administrator can manage folders, objects
and their dependencies.
This script will not work without replacing parameters with real values.
Its basic goal is to present what can be done with this module and to
ease its usage.
"""
from mstrio.connection import Connection
from mstrio.object_management import (Folder, list_folders, get_predefined_folder_contents,
get_my_personal_objects_contents, PredefinedFolders, Object,
list_objects, SearchObject, get_search_suggestions,
quick_search, quick_search_from_object, full_search,
start_full_search, get_search_results, SearchPattern,
SearchResultsFormat)
from mstrio.project_objects.dossier import Dossier
from mstrio.utils.entity import Rights
from mstrio.types import ObjectSubTypes, ObjectTypes
base_url = "https://<>/MicroStrategyLibrary/api"
username = "some_username"
password = "<PASSWORD>"
conn = Connection(base_url, username, password, project_name="MicroStrategy Tutorial",
login_mode=1)
# list folders from a particular project
list_folders(conn, project_id="project_id")
# list configuration-level folders
list_folders(conn)
# get contents of My Personal Objects in a specific folder
get_my_personal_objects_contents(conn, project_id="project_id")
# get contents of a pre-defined folder in a specific folder
get_predefined_folder_contents(conn, folder_type=PredefinedFolders.PUBLIC_REPORTS,
project_id="project_id")
# create new folder in a particular folder
new_folder = Folder.create(conn, name="New Folder", parent="parent_folder_id",
description="Description of New Folder")
# get folder with a given id
folder = Folder(conn, id="folder_id")
# get contents of a folder (optionally as a dictionary)
contents_objs = folder.get_contents()
contents_dict = folder.get_contents(to_dictionary=True)
# alter name and description of a folder
folder.alter(name="New name of folder", description="Folder with a new name")
# add ACL right to the folder for the user and propagate to children of folder
folder.acl_add(Rights.EXECUTE, trustees="user_id", denied=False, inheritable=True,
propagate_to_children=True)
# remove ACL from the folder for the user
folder.acl_remove(Rights.EXECUTE, trustees="user_id", denied=False, inheritable=True)
# Delete folder. When argument `force` is set to `False` (default value), then
# deletion must be approved.
folder.delete(force=True)
# list objects of a given type
reports = list_objects(conn, ObjectTypes.REPORT_DEFINITION)
documents = list_objects(conn, ObjectTypes.DOCUMENT_DEFINITION)
# initialize an object of a given type (in this case `FOLDER`) with a given id
object = Object(conn, ObjectTypes.FOLDER, "object_id")
# alter name and description of an object
object.alter(name="New name", description="New description")
# certify object
Object(conn, ObjectTypes.REPORT_DEFINITION, "object_id").certify()
# decerttify object
Object(conn, ObjectTypes.REPORT_DEFINITION, "object_id").decertify()
# get object properties as a dictionary
object.to_dict()
# Delete objects of a given types (in this case `REPORT` and 'DOCUMENT)
# and given ids. When argument `force` is set to `False` (default value), then
# deletion must be approved.
Object(conn, ObjectTypes.REPORT_DEFINITION, "report id").delete()
Object(conn, ObjectTypes.DOCUMENT_DEFINITION, "document id").delete(force=True)
# initialize SearchObject and synchronize with server
search_object = SearchObject(conn, id="search_object_id")
# get search suggestions with the pattern set performed in all unique projects
# accross the cluster (ti takes effect only in I-Server with cluster nodes)
suggestions = get_search_suggestions(conn, project="project_id", key="search pattern",
max_results=20, cross_cluster=True)
# use the stored results of the Quick Search engine to return search results
# and display them as a list (in this particular case all reports which name
# begins with 'A')
objects = quick_search(conn, "project_id", name='A', pattern=SearchPattern.BEGIN_WITH,
object_types=[ObjectTypes.REPORT_DEFINITION])
# perform quick search based on a predefined Search Object (include ancestors
# and acl of returned objects)
objects = quick_search_from_object(conn, "project_id", search_object, include_ancestors=True,
include_acl=True)
# search the metadata for objects in a specific project that match specific
# search criteria (e.g. super cubes which name ends with `cube`) and save the
# results in I-Server memory
start_dict = start_full_search(conn, "project_id", name="cube", pattern=SearchPattern.END_WITH,
object_types=ObjectSubTypes.SUPER_CUBE)
# Retrieve the results of a full metadata search in a tree format which were
# previously obtained by `start_full_search`
results = get_search_results(conn, search_id=start_dict['id'],
results_format=SearchResultsFormat.TREE)
# perform full search in one call (instead of the two steps presented above).
# Get all documents which name contains `document` and return in a list format.
# Perform search only in the root folder and its subfolders.
results = full_search(conn, "project_id", name="document", pattern=SearchPattern.CONTAINS,
object_types=ObjectTypes.DOCUMENT_DEFINITION, root="folder_id")
# return cubes that are used by the given dossier (it can be performed with the
# function `full_search` or method `get_connected_cubes` from `Document` class
# or method `get_dependencies` from `Entity` class)
сubes = Dossier(conn, id="dossier_id").get_connected_cubes()
cubes = Dossier(conn, id="dossier_id").list_dependencies(
project="project_id", object_types=[ObjectSubTypes.OLAP_CUBE, ObjectSubTypes.SUPER_CUBE])
cubes = full_search(conn, project="project_id",
object_types=[ObjectSubTypes.OLAP_CUBE,
ObjectSubTypes.SUPER_CUBE], used_by_object_id="dossier_id",
used_by_object_type=ObjectTypes.DOCUMENT_DEFINITION)
# we can also list cubes which uses given dossier
cubes_using_dossier = Dossier(conn, id="dossier_id").list_dependents(
project="project_id", object_types=[ObjectSubTypes.OLAP_CUBE, ObjectSubTypes.SUPER_CUBE])
# get all objects which uses (also recursive) metric with given id
objects = full_search(conn, project="project_id", uses_object_id="metric_id",
uses_object_type=ObjectTypes.METRIC, uses_recursive=True)
| StarcoderdataPython |
3208874 | """
Collection of database access functions.
"""
import os
import re
import json
import logging
from pathlib import Path
import tempfile
from contextlib import contextmanager
from collections import defaultdict
import numpy as np
import pandas as pd
import sh
from tqdm import tqdm
from sqlalchemy import create_engine, select, bindparam, and_
from sqlalchemy.orm import sessionmaker
# for create_database:
import MySQLdb
from sqlalchemy_utils import database_exists
from sqlalchemy_utils.functions import drop_database
from ticclat.ticclat_schema import Base, Wordform, Lexicon, Anahash, \
lexical_source_wordform, WordformLink, WordformLinkSource, \
MorphologicalParadigm, WordformFrequencies
from ticclat.utils import chunk_df, anahash_df, write_json_lines, \
read_json_lines, get_temp_file, json_line, split_component_code, \
morph_iterator, preprocess_wordforms
from ticclat.sacoreutils import bulk_add_anahashes_core, sql_query_batches, sql_insert_batches
LOGGER = logging.getLogger(__name__)
# source: https://docs.sqlalchemy.org/en/latest/orm/session_basics.html
@contextmanager
def session_scope(session_maker):
"""Provide a transactional scope around a series of operations."""
session = session_maker()
try:
yield session
session.commit()
except: # noqa: E722
session.rollback()
raise
finally:
session.close()
def get_engine(without_database=False):
"""
Create an sqlalchemy engine using the DATABASE_URL environment variable.
"""
url = os.environ.get('DATABASE_URL')
if without_database:
url = url.replace(get_db_name(), "")
engine = create_engine(url)
return engine
def get_session_maker():
"""
Return an sqlalchemy sessionmaker object using an engine from get_engine().
"""
return sessionmaker(bind=get_engine())
def get_session():
"""
Return an sqlalchemy session object using a sessionmaker from get_session_maker().
"""
return get_session_maker()()
def get_db_name():
"""
Get the database name from the DATABASE_URL environment variable.
"""
database_url = os.environ.get('DATABASE_URL')
return re.match(r'.*/(.*?)($|(\?.*$))', database_url).group(1)
def get_or_create_wordform(session, wordform, has_analysis=False, wordform_id=None):
"""
Get a Wordform object of wordform.
The Wordform object is an sqlalchemy field defined in the ticclat schema.
It is coupled to the entry of the given wordform in the wordforms database
table.
"""
# does the wordform already exist?
if wordform_id is not None:
wordform_object = None
else:
query = session.query(Wordform)
wordform_object = query.filter(Wordform.wordform == wordform).first()
# first() can also return None, so still need to check
if wordform_object is None:
wordform_object = Wordform(wordform_id=wordform_id,
wordform=wordform,
has_analysis=has_analysis,
wordform_lowercase=wordform.lower())
session.add(wordform_object)
return wordform_object
def bulk_add_wordforms(session, wfs, preprocess_wfs=True):
"""
wfs is pandas DataFrame with the same column names as the database table,
in this case just "wordform"
"""
LOGGER.info('Bulk adding wordforms.')
if preprocess_wfs:
wfs = preprocess_wordforms(wfs)
# remove empty entries
wfs['wordform'].replace('', np.nan, inplace=True)
wfs.dropna(subset=['wordform'], inplace=True)
if not wfs['wordform'].is_unique:
LOGGER.info('The wordform-column contains duplicate entries. '
'Removing duplicates.')
wfs = wfs.drop_duplicates(subset='wordform')
wfs['wordform_lowercase'] = wfs['wordform'].apply(lambda x: x.lower())
file_handler, file_name = tempfile.mkstemp()
os.close(file_handler)
wfs.to_csv(file_name, header=False, index=False, sep='\t')
query = f"""
LOAD DATA LOCAL INFILE :file_name INTO TABLE wordforms (wordform, wordform_lowercase);
"""
result = session.execute(query, {'file_name': file_name})
os.unlink(file_name)
LOGGER.info('%s wordforms have been added.', result.rowcount)
return result.rowcount
def add_lexicon(session, lexicon_name, vocabulary, wfs, preprocess_wfs=True):
"""
wfs is pandas DataFrame with the same column names as the database table,
in this case just "wordform"
"""
LOGGER.info('Adding lexicon.')
bulk_add_wordforms(session, wfs, preprocess_wfs=preprocess_wfs)
lexicon = Lexicon(lexicon_name=lexicon_name, vocabulary=vocabulary)
session.add(lexicon)
session.flush()
lexicon_id = lexicon.lexicon_id
LOGGER.debug('Lexicon id: %s', lexicon.lexicon_id)
wordforms = list(wfs['wordform'])
select_statement = select([Wordform]).where(Wordform.wordform.in_(wordforms))
result = session.execute(select_statement).fetchall()
LOGGER.info('Adding %s wordforms to the lexicon.', len(result))
session.execute(
lexical_source_wordform.insert(), # noqa pylint: disable=E1120
# this is a known pylint/sqlalchemy issue, see
# https://github.com/sqlalchemy/sqlalchemy/issues/4656
[{'lexicon_id': lexicon_id,
'wordform_id': wf['wordform_id']} for wf in result]
)
LOGGER.info('Lexicon was added.')
return lexicon
def get_word_frequency_df(session, add_ids=False):
"""Can be used as input for ticcl-anahash.
Returns:
Pandas DataFrame containing wordforms as index and a frequency value as
column, or None if all wordforms in the database already are
connected to an anahash value
"""
LOGGER.info('Selecting wordforms without anahash value.')
query = session.query(Wordform).filter(Wordform.anahash == None) # noqa E711 pylint: disable=singleton-comparison
if add_ids:
query = query.with_entities(Wordform.wordform, Wordform.wordform_id)
else:
query = query.with_entities(Wordform.wordform)
df = pd.read_sql(query.statement, query.session.bind)
if df.empty:
df = None
else:
df = df.set_index('wordform')
df['frequency'] = 1
return df
def get_wf_mapping(session, lexicon=None, lexicon_id=None):
"""
Create a dictionary with a mapping of wordforms to wordform_id.
The keys of the dictionary are wordforms, the values are the IDs
of those wordforms in the database wordforms table.
"""
msg = 'Getting the wordform mapping of lexicon "{}"'
if lexicon is not None:
if lexicon.lexicon_id is None:
raise ValueError('The lexicon does not (yet) have an ID. Please'
' make sure the ID of the lexicon is set.')
lexicon_id = lexicon.lexicon_id
msg = msg.format(lexicon)
elif lexicon_id is not None:
msg = msg.format('lexicon id {}'.format(lexicon_id))
# Add option lexicon name and get the lexicon_id from the database
else:
raise ValueError('Please specify the lexicon.')
LOGGER.info(msg)
select_statement = select([lexical_source_wordform.join(Lexicon).join(Wordform)]) \
.where(Lexicon.lexicon_id == lexicon_id)
LOGGER.debug(select_statement)
result = session.execute(select_statement).fetchall()
wf_mapping = defaultdict(int)
for row in result:
wf_mapping[row['wordform']] = row[lexical_source_wordform.c.wordform_id]
# Make sure a KeyError is raised, if we try to look up a word that is not
# in the database (because we preprocessed it)
wf_mapping.default_factory = None
return wf_mapping
def bulk_add_anahashes(session, anahashes, tqdm_factory=None, batch_size=10000):
"""anahashes is pandas dataframe with the column wordform (index), anahash
"""
LOGGER.info('Adding anahashes.')
# Remove duplicate anahashes
unique_hashes = anahashes.copy().drop_duplicates(subset='anahash')
LOGGER.debug('The input data contains %s wordform/anahash pairs.', anahashes.shape[0])
LOGGER.debug('There are %s unique anahash values.', unique_hashes.shape[0])
count_added = 0
with get_temp_file() as anahashes_to_add_file:
if tqdm_factory is not None:
pbar = tqdm_factory(total=unique_hashes.shape[0])
for chunk in chunk_df(unique_hashes, batch_size=batch_size):
# Find out which anahashes are not yet in the database.
ahs = set(list(chunk['anahash']))
select_statement = select([Anahash]).where(Anahash.anahash.in_(ahs))
result = session.execute(select_statement).fetchall()
existing_ahs = {row[1] for row in result}
for non_existing_ah in ahs.difference(existing_ahs):
anahashes_to_add_file.write(json.dumps({'anahash': non_existing_ah}))
anahashes_to_add_file.write('\n')
count_added += 1
if tqdm_factory is not None:
pbar.update(chunk.shape[0])
if tqdm_factory is not None:
pbar.close()
bulk_add_anahashes_core(session, read_json_lines(anahashes_to_add_file))
LOGGER.info('Added %s anahashes.', count_added)
return count_added
def get_anahashes(session, anahashes, wf_mapping, batch_size=50000):
"""
Generator of dictionaries with anahash ID and wordform ID pairs.
Given `anahashes`, a dataframe with wordforms and corresponding anahashes,
yield dictionaries containing two entries each: key 'a_id' has the value
of the anahash ID in the database, key 'wf_id' has the value of the
wordform ID in the database.
"""
unique_hashes = anahashes.copy().drop_duplicates(subset='anahash')
with tqdm(total=unique_hashes.shape[0], mininterval=2.0) as pbar:
ah_mapping = {}
for chunk in chunk_df(unique_hashes, batch_size=batch_size):
# Find out which anahashes are not yet in the database.
ahs = set(list(chunk['anahash']))
select_statement = select([Anahash]).where(Anahash.anahash.in_(ahs))
result = session.execute(select_statement).fetchall()
for row in result:
ah_mapping[row[1]] = row[0]
pbar.update(chunk.shape[0])
with tqdm(total=anahashes.shape[0], mininterval=2.0) as pbar:
for wordform, row in anahashes.iterrows():
# SQLAlchemy doesn't allow the use of column names in update
# statements, so we use something else.
yield {'a_id': ah_mapping[row['anahash']], 'wf_id': wf_mapping[wordform]}
pbar.update(1)
def connect_anahashes_to_wordforms(session, anahashes, df, batch_size=50000):
"""
Create the relation between wordforms and anahashes in the database.
Given `anahashes`, a dataframe with wordforms and corresponding anahashes,
create the relations between the two in the wordforms and anahashes tables
by setting the anahash_id foreign key in the wordforms table.
"""
LOGGER.info('Connecting anahashes to wordforms.')
LOGGER.debug('Getting wordform/anahash_id pairs.')
with get_temp_file() as anahash_to_wf_file:
total_lines_written = write_json_lines(anahash_to_wf_file,
get_anahashes(session, anahashes, df))
update_statement = Wordform.__table__.update(). \
where(Wordform.wordform_id == bindparam('wf_id')). \
values(anahash_id=bindparam('a_id'))
LOGGER.debug('Adding the connections wordform -> anahash_id.')
sql_query_batches(session, update_statement, read_json_lines(anahash_to_wf_file),
total_lines_written, batch_size)
LOGGER.info('Added the anahash of %s wordforms.', total_lines_written)
return total_lines_written
def update_anahashes_new(session, alphabet_file):
"""
Add anahashes for all wordforms that do not have an anahash value yet.
Requires ticcl to be installed!
Inputs:
session: SQLAlchemy session object.
alphabet_file (str): the path to the alphabet file for ticcl.
"""
tmp_file_path = str(Path(tempfile.tempdir) / 'mysql/wordforms.csv')
LOGGER.info("Exporting wordforms to file")
if os.path.exists(tmp_file_path):
os.remove(tmp_file_path)
session.execute(f"""
SELECT wordform, 1 INTO OUTFILE '{tmp_file_path}'
FIELDS TERMINATED BY '\t' LINES TERMINATED BY '\n'
FROM wordforms
WHERE anahash_id IS NULL;
""")
LOGGER.info("Generating anahashes")
try:
sh.TICCL_anahash(['--list', '--alph', alphabet_file, tmp_file_path])
except sh.ErrorReturnCode as exception:
raise ValueError('Running TICCL-anahash failed: {}'.format(exception.stdout))
ticcled_file_path = tmp_file_path + '.list'
# drop old table if it's there
session.execute("DROP TABLE IF EXISTS ticcl_import")
# create temp table
session.execute("""
CREATE TEMPORARY TABLE ticcl_import (
wordform VARCHAR(255),
anahash BIGINT
);
""")
LOGGER.info("Loading ticcled file into temp table")
session.execute("""
LOAD DATA LOCAL INFILE :file_path INTO TABLE ticcl_import
FIELDS TERMINATED BY '\t' LINES TERMINATED BY '\n'
(wordform, anahash)
""", {'file_path': ticcled_file_path})
if os.path.exists(tmp_file_path):
os.remove(tmp_file_path)
LOGGER.info("Storing new anahashes")
session.execute("""INSERT IGNORE INTO anahashes(anahash) SELECT anahash FROM ticcl_import""")
LOGGER.info("Setting wordform anahash_ids")
session.execute("""
UPDATE ticcl_import
LEFT JOIN wordforms ON ticcl_import.wordform = wordforms.wordform
LEFT JOIN anahashes ON ticcl_import.anahash = anahashes.anahash
SET wordforms.anahash_id = anahashes.anahash_id WHERE 1
""")
def update_anahashes(session, alphabet_file, tqdm_factory=None, batch_size=50000):
"""Add anahashes for all wordforms that do not have an anahash value yet.
Requires ticcl to be installed!
Inputs:
session: SQLAlchemy session object.
alphabet_file (str): the path to the alphabet file for ticcl.
"""
LOGGER.info('Adding anahash values to wordforms without anahash.')
df = get_word_frequency_df(session, add_ids=True)
if df is None:
LOGGER.info('All wordforms have an anahash value.')
return
wf_mapping = df['wordform_id'].to_dict(defaultdict(int))
anahashes = anahash_df(df[['frequency']], alphabet_file)
bulk_add_anahashes(session, anahashes, tqdm_factory=tqdm_factory, batch_size=batch_size)
connect_anahashes_to_wordforms(session, anahashes, wf_mapping, batch_size)
def write_wf_links_data(session, wf_mapping, links_df, wf_from_name,
wf_to_name, lexicon_id, wf_from_correct, wf_to_correct,
links_file, sources_file, add_columns=None):
"""
Write wordform links (obtained from lexica) to JSON files for later processing.
Two JSON files will be written to: `links_file` and `sources_file`. The links
file contains only the wordform links and corresponds to the wordform_links
database table. The sources file contains the source lexicon of each link and
also whether either wordform is considered a "correct" form or not, which is
defined by the lexicon (whether it is a "dictionary" with only correct words
or a correction list with correct words in one column and incorrect ones in the
other).
"""
if add_columns is None:
add_columns = []
num_wf_links = 0
num_wf_link_sources = 0
wf_links = defaultdict(bool)
for _, row in tqdm(links_df.iterrows(), total=links_df.shape[0]):
wf_from = wf_mapping[row[wf_from_name]]
wf_to = wf_mapping[row[wf_to_name]]
# Don't add links to self! and keep track of what was added,
# because duplicates may occur
if wf_from != wf_to and (wf_from, wf_to) not in wf_links:
select_statement = select([WordformLink]). \
where(and_(WordformLink.wordform_from == wf_from, WordformLink.wordform_to == wf_to))
result = session.execute(select_statement).fetchone()
if result is None:
# Both directions of the relationship need to be added.
links_file.write(json_line({'wordform_from': wf_from,
'wordform_to': wf_to}))
links_file.write(json_line({'wordform_from': wf_to,
'wordform_to': wf_from}))
num_wf_links += 2
# The wordform link sources (in both directions) need to be
# written regardless of the existence of the wordform links.
link_source = {'wordform_from': wf_from,
'wordform_to': wf_to,
'lexicon_id': lexicon_id,
'wordform_from_correct': wf_from_correct,
'wordform_to_correct': wf_to_correct}
for column in add_columns:
link_source[column] = row[column]
line = json_line(link_source)
sources_file.write(line)
link_source = {'wordform_from': wf_to,
'wordform_to': wf_from,
'lexicon_id': lexicon_id,
'wordform_from_correct': wf_to_correct,
'wordform_to_correct': wf_from_correct}
for column in add_columns:
link_source[column] = row[column]
line = json_line(link_source)
sources_file.write(line)
num_wf_link_sources += 2
wf_links[(wf_from, wf_to)] = True
wf_links[(wf_to, wf_from)] = True
return num_wf_links, num_wf_link_sources
def add_lexicon_with_links(session, lexicon_name, vocabulary, wfs, from_column,
to_column, from_correct, to_correct,
batch_size=50000, preprocess_wfs=True, to_add=None):
"""
Add wordforms from a lexicon with links to the database.
Lexica with links contain wordform pairs that are linked. The `wfs`
dataframe must contain two columns: the `from_column` and the `to_column`,
which contains the two words of each pair (per row). Using the arguments
`from_correct` and `to_correct`, you can indicate whether the columns of
this dataframe contain correct words or not (boolean). Typically, there
are two types of linked lexica: True + True, meaning it links correct
wordforms (e.g. morphological variants) or True + False, meaning it links
correct wordforms to incorrect ones (e.g. a spelling correction list).
"""
LOGGER.info('Adding lexicon with links between wordforms.')
if to_add is None:
to_add = []
# Make a dataframe containing all wordforms in the lexicon
wordforms = pd.DataFrame()
wordforms['wordform'] = wfs[from_column].append(wfs[to_column],
ignore_index=True)
wordforms = wordforms.drop_duplicates(subset='wordform')
# Create the lexicon (with all the wordforms)
lexicon = add_lexicon(session, lexicon_name, vocabulary, wordforms,
preprocess_wfs=preprocess_wfs)
wf_mapping = get_wf_mapping(session, lexicon_id=lexicon.lexicon_id)
if preprocess_wfs:
wfs = preprocess_wordforms(wfs, columns=[from_column, to_column])
with get_temp_file() as wfl_file:
LOGGER.debug('Writing wordform links to add to (possibly unnamed) temporary file.')
with get_temp_file() as wfls_file:
LOGGER.debug('Writing wordform link sources to add to (possibly unnamed) temporary file.')
num_l, num_s = write_wf_links_data(session, wf_mapping, wfs,
from_column, to_column,
lexicon.lexicon_id,
from_correct, to_correct,
wfl_file, wfls_file,
add_columns=to_add)
LOGGER.info('Inserting %s wordform links.', num_l)
sql_insert_batches(session, WordformLink, read_json_lines(wfl_file),
batch_size=batch_size)
LOGGER.info('Inserting %s wordform link sources.', num_s)
sql_insert_batches(session, WordformLinkSource,
read_json_lines(wfls_file), batch_size=batch_size)
return lexicon
def add_morphological_paradigms(session, in_file):
"""
Add morphological paradigms to database from CSV file.
"""
data = pd.read_csv(in_file, sep='\t', index_col=False,
names=['wordform', 'corpus_freq', 'component_codes',
'human_readable_c_code', 'first_year', 'last_year',
'dict_ids', 'pos_tags', 'int_ids'])
# drop first row (contains empty wordform)
data = data.drop([0])
# store wordforms for in database
wfs = data[['wordform']].copy()
bulk_add_wordforms(session, wfs)
# get the morphological variants from the pandas dataframe
LOGGER.info('extracting morphological variants')
morph_paradigms_per_wordform = defaultdict(list)
with tqdm(total=data.shape[0]) as pbar:
for row in data.iterrows():
codes = row[1]['component_codes'].split('#')
wordform = row[1]['wordform']
for code in codes:
morph_paradigms_per_wordform[wordform].append(split_component_code(code, wordform))
pbar.update()
LOGGER.info('Looking up wordform ids.')
select_statement = select([Wordform]).where(Wordform.wordform.in_(wfs['wordform']))
mapping = session.execute(select_statement).fetchall()
LOGGER.info('Writing morphological variants to file.')
with get_temp_file() as mp_file:
total_lines_written = write_json_lines(mp_file, morph_iterator(morph_paradigms_per_wordform, mapping))
LOGGER.info('Wrote %s morphological variants.', total_lines_written)
LOGGER.info('Inserting morphological variants to the database.')
sql_insert_batches(session, MorphologicalParadigm,
read_json_lines(mp_file), batch_size=50000)
def create_ticclat_database(delete_existing=False):
"""
Create the TICCLAT database.
Sets the proper encoding settings and uses the schema to create tables.
"""
# db = MySQLdb.connect(user=user, passwd=passwd, host=host)
# engine = create_engine(f"mysql://{user}:{passwd}@{host}/{dbname}?charset=utf8mb4")
engine = get_engine(without_database=True)
connection = engine.connect()
db_name = get_db_name()
try:
connection.execute(f"CREATE DATABASE {db_name} CHARACTER SET utf8mb4 COLLATE utf8mb4_bin;")
except MySQLdb.ProgrammingError as exception:
if database_exists(engine.url):
if not delete_existing:
raise Exception(f"Database `{db_name}` already exists, delete it first before recreating.")
drop_database(engine.url)
connection.execute(f"CREATE DATABASE {db_name} CHARACTER SET utf8mb4 COLLATE utf8mb4_bin;")
else:
raise exception
connection.close()
engine = get_engine()
# create tables
Base.metadata.create_all(engine)
def empty_table(session, table_class):
"""
Empty a database table.
- table_class: the ticclat_schema class corresponding to the table
"""
row = session.query(table_class).first()
if row is not None:
LOGGER.info('Table "%s" is not empty.', table_class.__table__.name)
LOGGER.info('Deleting rows...')
Base.metadata.drop_all(bind=session.get_bind(),
tables=[table_class.__table__])
Base.metadata.create_all(bind=session.get_bind(),
tables=[table_class.__table__])
def create_wf_frequencies_table(session):
"""
Create wordform_frequencies table in the database.
The text_attestations frequencies are summed and stored in this table.
This can be used to save time when needing total-database frequencies.
"""
LOGGER.info('Creating wordform_frequencies table.')
# Make sure the wordform_frequency table exists (create it if it doesn't)
Base.metadata.create_all(session.get_bind(),
tables=[WordformFrequencies.__table__])
empty_table(session, WordformFrequencies)
session.execute("""
INSERT INTO wordform_frequency
SELECT
wordforms.wordform_id,
wordforms.wordform,
SUM(frequency) AS frequency
FROM
wordforms LEFT JOIN text_attestations ta ON wordforms.wordform_id = ta.wordform_id
GROUP BY wordforms.wordform, wordforms.wordform_id
""")
def add_ticcl_variants(session, name, df, **kwargs):
"""
Add TICCL variants as a linked lexicon.
"""
lexicon = add_lexicon_with_links(session,
lexicon_name=name,
vocabulary=False,
wfs=df,
from_column='ocr_variant',
to_column='correction_candidate',
from_correct=False,
to_correct=True,
preprocess_wfs=False,
to_add=['ld'],
**kwargs)
return lexicon
| StarcoderdataPython |
1760067 | import unittest
import pep8
class TestCodeFormat(unittest.TestCase):
def test_pep8_conformance(self):
"""all packages, tests, and cookbook conform to PEP8"""
pep8style = pep8.StyleGuide(quiet=True, exclude=['_version.py'])
result = pep8style.check_files(['fatiando', 'test', 'setup.py',
'cookbook'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
| StarcoderdataPython |
110620 | #
#
# Copyright 2019 Asylo authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Functions for describing type definitions for generating macros.
Implements the functions for describing and parsing the type definitions. Allows
emitting macros which can be read directly by a C/C++ program, to evaluate the
unresolved values in such macros and then generate include directives, constant
definitions and conversion functions that allow system constants to be converted
from the enclave C library implementation used by Asylo to target host
implementation on the untrusted side (typically libc).
For each type definition (eg. define_constants, define_structs), a definition
and getter methods are provided. The definition methods accept a type definition
one at a time, while the get methods return all the type definitions under a
single macro.
Finally, a write_output() method is provided, which emits all the type
definitions recorded so far in the definitions file (types.py).
"""
from __future__ import print_function
import collections
import re
import sys
# Stores system header includes as a set. Only header file names are expected
# with or without the .h extension and without the '#include' directive
# prefixed.
# We include stdbool.h by default so that the generated output (as .inc file) is
# also readable by a C program.
_includes = {'stdbool.h'}
# Map from enum names to dictionary of enum properties and their values.
_enum_map = collections.defaultdict(dict)
# Map from struct names to dictionary of struct properties and its members.
_struct_map = collections.defaultdict(dict)
# Declare the prefix to be used for C enum declarations and conversion
# functions. This prefix should be used for direct conversions between enclave
# C library and host library, ones which do not involve an intermediate bridge.
_klinux_prefix = 'kLinux'
def set_klinux_prefix(prefix):
"""Sets the prefix used for constants definitions and conversion functions.
Args:
prefix: Name of the prefix to be applied to a kernel based constant
definition or conversion function name.
"""
global _klinux_prefix
_klinux_prefix = prefix
def define_constants(name,
values,
include_header_file,
multi_valued=False,
skip_conversions=False,
wrap_macros_with_if_defined=False,
data_type='int'):
"""Defines a collection of related constants/macros and their properties.
Args:
name: Name of the collection of constants.
values: Constant names provided as a list of strings.
include_header_file: The system header file used for resolving values to
generate the type definition. The filename here is expected to be a
system header file (included as #include <filename>). This system header
file is used twice - once for resolving values of constants on the target
host implementation at compile time, then by the generated conversion
functions for converting the constant values between enclave C library and
the target host C library at runtime.
multi_valued: Boolean indicating if the constant values can be combined
using bitwise OR operations.
skip_conversions: Boolean indicating if generation of types conversion
functions be skipped, and only constants definitions be generated. Useful
when conversion functions are complex and need to be written manually, but
the constants definitions can be generated automatically by resolving the
constants for the target host implementation.
wrap_macros_with_if_defined: Boolean indicating if each constant value in
the collection is to be wrapped inside a #if defined(value) ...#endif
while generating the conversion functions. This allows define_constants()
to safely accept constants that might not exist on a particular platform
or architecture. This parameter is intended for use only with constants
that are C/C++ macros.
data_type: String specifying the type of constants, if not int.
Raises:
ValueError: Invalid include_header_file format provided.
"""
# A constant here are written twice, once as a string literal, then as an
# numerical value pointing to the actual integer value of the constant. This
# allows types conversions generator to directly interpret the latter as a
# valid integer corresponding to the constant value, since casting string to
# enum value is non-trivial in c++.
# An example 'values', like ['CONST_VAL1', 'CONST_VAL2'] looks like the
# following stored as a dictionary entry -
# {"CONST_VAL1", CONST_VAL1}, {"CONST_VAL2", CONST_VAL2}
_enum_map[name]['values'] = ', '.join(
'{{"{}", {}}}'.format(val, val) for val in values)
_enum_map[name]['multi_valued'] = multi_valued
_enum_map[name]['skip_conversions'] = skip_conversions
_enum_map[name]['wrap_macros_with_if_defined'] = wrap_macros_with_if_defined
_enum_map[name]['data_type'] = '"{}"'.format(data_type)
add_include_header_file(include_header_file)
def add_include_header_file(include_header_file):
"""Adds a system header file to the list of includes to be generated.
Args:
include_header_file: Name of the system header file, in the format
'filename.h'. Do not use <> or "" to wrap the filename.
"""
if re.match(r'[<,"].*?[>,"]', include_header_file):
raise ValueError(
'Invalid include format for filename "%s". Please provide the include '
'file without enclosing pointy brackets <> or quotes "".' %
include_header_file)
if re.match('#include', include_header_file, re.IGNORECASE):
raise ValueError(
'Invalid include format for filename "%s". Please provide the filename '
'without the prefixing #include directive.' % include_header_file)
_includes.add(include_header_file)
def define_struct(name,
values,
include_header_file,
pack_attributes=True,
skip_conversions=False):
"""Defines a collection of structs and their properties.
Args:
name: Name of the struct. This should be the same as the struct name used in
enclave C library and the host C library for the system calls. Eg. 'stat',
'timeval'
values: List containing tuples of struct member types and struct member
names. The struct members names should match the corresponding struct
member names in the struct from enclave C library and libc. Eg.
[("int64_t", "st_dev"), ("int64_t", "st_ino")].
include_header_file: Kernel header file to include to identify |name| as a
valid kernel struct when generating conversion functions between kernel
structs and enclave structs.
pack_attributes: Boolean indicating if the compiler should be prevented from
padding the generated kernel struct members from their natural alignment.
skip_conversions: Boolean indicating if generation of types conversion
functions be skipped, and only kernel struct definitions be generated.
Useful when kernel conversion functions are complex and need to be written
manually, but the struct definitions can be generated automatically.
"""
_struct_map[name]['values'] = ', '.join(
'{{"{}", "{}"}}'.format(member_name, member_type)
for member_type, member_name in values)
_struct_map[name]['pack_attributes'] = pack_attributes
_struct_map[name]['skip_conversions'] = skip_conversions
add_include_header_file(include_header_file)
def get_klinux_prefix():
"""Gets the prefix for generated C enums and conversion functions."""
return 'const char klinux_prefix[] = "{}";\n'.format(_klinux_prefix)
def get_includes_as_include_macros():
"""Returns all the includes as line separated #include macros.
These includes are required by the types conversions generator at compile time
to infer the values of constants for a given host implementation.
"""
return ''.join(
'#include <{}>\n'.format(filename) for filename in sorted(_includes))
def get_includes_in_define_macro():
"""Returns all the includes under a #define INCLUDES macro.
The returned list can be used to generate #include directives by a consumer.
"""
quoted_includes = ['"{}"'.format(incl) for incl in sorted(_includes)]
return '#define INCLUDES {}'.format(', \\\n'.join(quoted_includes))
def get_constants():
r"""Returns a macro containing all constants' description.
The returned macro is used by types conversions generator to initialize a enum
description table (enum_properties_table) mapping enum names to a struct
(EnumProperties) describing the enum properties, including the enum values. A
typical output of get_constants() looks like the following -
#define ENUMS_INIT \
{"FcntlCmd", {false, false, false, "int",
{{"F_GETFD", F_GETFD}, {"F_SETFD", F_SETFD}}}}, \
{"FileFlags", {0, 0, true, false, false, false, "int", {{"O_RDONLY",
O_RDONLY}, {"O_WRONLY", O_WRONLY}}}}
Each line contains an enum, and has the following pattern -
{"EnumName", {multi_valued, skip_conversions, wrap_macros_with_if_defined,
data_type, {{"const_val1", const_val1}, {"const_val2", const_val2}}}}, \
"""
enum_rows = []
for enum_name, enum_properties in sorted(_enum_map.items()):
enum_rows.append(
'{{{name}, {{{multi_valued}, {skip_conversions}, '
'{wrap_macros_with_if_defined}, {data_type}, {{{values}}}}}}}'.format(
name='"{}"'.format(enum_name),
multi_valued='true' if enum_properties['multi_valued'] else 'false',
skip_conversions='true'
if enum_properties['skip_conversions'] else 'false',
wrap_macros_with_if_defined='true'
if enum_properties['wrap_macros_with_if_defined'] else 'false',
data_type=enum_properties['data_type'],
values=enum_properties['values']))
return '#define ENUMS_INIT \\\n{}\n'.format(', \\\n'.join(enum_rows))
def get_structs():
r"""Returns a macro containing all struct descriptions.
The returned macro is used by types conversion generator to initialize a
struct description table (struct_properties_table) mapping struct names to a
struct (StructProperties) describing the struct properties, including struct
members. A typical output of get_structs looks like the following -
#define STRUCTS_INIT \
{"stat", {true, false, {{"st_dev", "int64_t"}, {"st_ino", "int64_t"}}}}, \
{"timespec", {true, false, {{"tv_sec", "int64_t"}, {"tv_nsec", "int64_t"}}}}
Each line contains a struct, and has the following pattern -
{"struct_name", {pack_attributes, skip_conversions, \
{{"member_name1", "member_type1"}, {"member_name2", "member_type2"}}}}
"""
struct_rows = []
for struct_name, struct_properties in sorted(_struct_map.items()):
struct_rows.append(
'{{{struct}, {{{pack_attributes}, {skip_conversions}, {{{values}}}}}}}'
.format(
struct='"{}"'.format(struct_name),
pack_attributes='true'
if struct_properties['pack_attributes'] else 'false',
skip_conversions='true'
if struct_properties['skip_conversions'] else 'false',
values=struct_properties['values']))
return '#define STRUCTS_INIT \\\n{}\n'.format(', \\\n'.join(struct_rows))
def write_output(stream=sys.stdout):
"""Writes the macros to a stream, default to stdout."""
print(get_includes_as_include_macros(), file=stream)
print(get_includes_in_define_macro(), file=stream)
print(get_klinux_prefix(), file=stream)
print(get_constants(), file=stream)
print(get_structs(), file=stream)
| StarcoderdataPython |
187310 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Window Transformers '''
from __future__ import print_function, division, absolute_import, unicode_literals
import io
import numpy as np
import pandas as pd
import re
import six
from PIL import Image
def _bgr2rgb(pil_image):
return Image.fromarray(np.asarray(pil_image)[:,:,::-1])
def bgr2rgb(data, columns=None):
'''
Convert BGR images to RGB
Parameters
----------
data : PIL.Image or DataFrame
The image data
columns : string or list-of-strings, optional
If `data` is a DataFrame, this is the list of columns that
contain image data.
Returns
-------
:class:`PIL.Image`
If `data` is a :class:`PIL.Image`
:class:`pandas.DataFrame`
If `data` is a :class:`pandas.DataFrame`
'''
if hasattr(data, 'columns'):
if len(data):
if not columns:
columns = list(data.columns)
elif isinstance(columns, six.string_types):
columns = [columns]
for col in columns:
if Image.isImageType(data[col].iloc[0]):
data[col] = data[col].apply(_bgr2rgb)
return data
elif Image.isImageType(data):
return _bgr2rgb(data)
return data
def rgb2bgr(data, columns=None):
'''
Convert RGB images to BGR
Parameters
----------
data : PIL.Image or DataFrame
The image data
columns : string or list-of-strings, optional
If `data` is a DataFrame, this is the list of columns that
contain image data.
Returns
-------
:class:`PIL.Image`
If `data` is a :class:`PIL.Image`
:class:`pandas.DataFrame`
If `data` is a :class:`pandas.DataFrame`
'''
return bgr2rgb(data, columns=columns)
def _bytes2image(data):
return Image.open(io.BytesIO(data))
def bytes2image(data, columns=None):
'''
Convert bytes to PIL.Image objects
Parameters
----------
data : PIL.Image or DataFrame
The image data
columns : string or list-of-strings, optional
If `data` is a DataFrame, this is the list of columns that
contain image data.
Returns
-------
:class:`PIL.Image`
If `data` is a :class:`PIL.Image`
:class:`pandas.DataFrame`
If `data` is a :class:`pandas.DataFrame`
'''
if hasattr(data, 'columns'):
if len(data):
if not columns:
columns = list(data.columns)
elif isinstance(columns, six.string_types):
columns = [columns]
for col in columns:
if isinstance(data[col].iloc[0], bytes):
data[col] = data[col].apply(_bytes2image)
return data
elif isinstance(data, six.binary_type):
return _bytes2image(data)
return data
| StarcoderdataPython |
4814623 | from gibson2.core.render.mesh_renderer import MeshRendererContext
from gibson2.core.render.mesh_renderer.get_available_devices import get_available_devices
def test_device():
assert len(get_available_devices()) > 0
def test_binding():
r = MeshRendererContext.MeshRendererContext(256, 256, get_available_devices()[0])
r.init()
r.release()
| StarcoderdataPython |
78117 | <gh_stars>0
import argparse
from base64 import b64decode
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
class Document:
def __init__(self, id=0, url='', html='', mark=False):
self.id = id
self.url = url
self.html = html
self.mark = mark
def read_dataset(dataset_file):
print(dataset_file)
dataset = []
with open(dataset_file, "r", encoding="utf-8") as input_file:
headers = input_file.readline()
for i, line in enumerate(input_file):
if i % 1000 == 0:
print("processed document %d\n" % i)
parts = line.strip().split('\t')
url_id = int(parts[0])
mark = bool(int(parts[1]))
url = parts[2]
pageInb64 = parts[3]
dataset.append(
Document(id=url_id, url=url, html=b64decode(pageInb64).decode("utf-8", errors="replace"),
mark=mark))
return dataset
def fit_vectorizer(vectorizer, dataset):
print("... fit vectorizer ...")
corpus = [doc.html for doc in dataset]
vectorizer.fit(corpus)
return corpus
def get_vectorized_dataset(vectorizer, dataset, corpus):
print("... vectorizing ...")
y = [doc.mark for doc in dataset]
X = vectorizer.transform(corpus)
return X, y
def main():
parser = argparse.ArgumentParser(description='Homework 3: Antispam')
parser.add_argument('--train_set', required=True, help='kaggle_train_data_tab.csv')
parser.add_argument('--test_set', required=True, help='kaggle_test_data_tab.csv')
parser.add_argument('--submission_file', required=True, help='submission.txt')
args = parser.parse_args()
train = read_dataset(args.train_set)
test = read_dataset(args.test_set)
vectorizer = TfidfVectorizer(ngram_range=(1, 2), sublinear_tf=True)
train_corpus = fit_vectorizer(vectorizer, train)
test_corpus = fit_vectorizer(vectorizer, test)
X_train, y_train = get_vectorized_dataset(vectorizer, train, train_corpus)
X_test, y_test = get_vectorized_dataset(vectorizer, test, test_corpus)
clf = SGDClassifier(verbose=True, epsilon=1e-5, class_weight='balanced', penalty='elasticnet')
print("... fitting model ...")
clf.fit(X_train, y_train)
print("... predicting ...")
y_predicted = clf.predict(X_test)
print("... writing answer ...")
with open(args.submission_file, "w") as output_file:
output_file.write("Id,Prediction\n")
for doc, mark in zip(test, y_predicted):
output_file.write("%d,%d\n" % (doc.id, mark))
if __name__ == '__main__':
main()
| StarcoderdataPython |
152136 | <filename>echobot.py
# -*- coding: UTF-8 -*-
from fbchat import log, Client
# Subclass fbchat.Client and override required methods
class EchoBot(Client):
def onMessage(self, author_id, message, thread_id, thread_type, **kwargs):
self.markAsDelivered(author_id, thread_id)
self.markAsRead(author_id)
log.info("Message from {} in {} ({}): {}".format(author_id, thread_id, thread_type.name, message))
# If you're not the author, echo
if author_id != self.uid:
self.sendMessage(message, thread_id=thread_id, thread_type=thread_type)
client = EchoBot("<email>", "<password>")
client.listen()
| StarcoderdataPython |
4801965 | <filename>python/formalizeua.py
#!/usr/bin/python
import sys
import os
if len(sys.argv) < 2:
print('Usage: input')
exit(-1)
oses = ["windows", "ios", "mac", "android", "linux"]
browsers = ["chrome", "sogou", "maxthon", "safari", "firefox", "theworld", "opera", "ie"]
fi = open(sys.argv[1], 'r')
outname = sys.argv[1] + ".fmua"
fo = open(outname, 'w')
first = True
for l in fi:
if first:
fo.write(l)
first = False
continue
s = l.split('\t')
ua = s[7].lower()
operation = "other"
browser = "other"
for o in oses:
if o in ua:
operation = o
break
for b in browsers:
if b in ua:
browser = b
break
fmua = operation + "_" + browser
output = s[0]
for i in range(1, len(s)):
if i == 7:
output = output + '\t' + fmua
else:
if len(s[i]) == 0 or s[i] == '\n':
s[i] = "null" + s[i]
output = output + '\t' + s[i]
fo.write(output)
fo.close()
os.rename(outname, sys.argv[1])
| StarcoderdataPython |
1634359 | import inspect
import platform, re
from enum import Enum
from subprocess import run
from pathlib import Path
from coldtype.geometry import Rect
from coldtype.color import normalize_color
from coldtype.animation import Timeable, Frame
from coldtype.animation.timeline import Timeline
from coldtype.text.reader import normalize_font_prefix
class Action(Enum):
Initial = "initial"
Resave = "resave"
RenderAll = "render_all"
RenderWorkarea = "render_workarea"
RenderIndices = "render_indices"
PreviewStoryboard = "preview_storyboard"
PreviewIndices = "preview_indices"
PreviewStoryboardNext = "preview_storyboard_next"
PreviewStoryboardPrev = "preview_storyboard_prev"
ArbitraryTyping = "arbitrary_typing"
ArbitraryCommand = "arbitrary_command"
UICallback = "ui_callback"
class RenderPass():
def __init__(self, render, suffix, args):
self.render = render
self.fn = self.render.func
self.args = args
self.suffix = suffix
self.path = None
class renderable():
def __init__(self, rect=(1080, 1080), bg="whitesmoke", hide=False, fmt="png", rasterizer=None, prefix=None, dst=None, custom_folder=None, postfn=None, watch=[], layers=[], ui_callback=None):
self.hide = hide
self.rect = Rect(rect)
self.bg = normalize_color(bg)
self.fmt = fmt
self.prefix = prefix
self.dst = Path(dst).expanduser().resolve() if dst else None
self.custom_folder = custom_folder
self.postfn = postfn
self.ui_callback = ui_callback
self.watch = [Path(w).expanduser().resolve() for w in watch]
self.rasterizer = rasterizer
self.layers = layers
if not rasterizer:
if self.fmt == "svg":
self.rasterizer = "svg"
else:
system = platform.system()
if system == "Darwin":
self.rasterizer = "drawbot"
else:
self.rasterizer = "cairo"
def __call__(self, func):
self.func = func
return self
def folder(self, filepath):
return ""
def layer_folder(self, filepath, layer):
return ""
def passes(self, action, layers, indices=[]):
return [RenderPass(self, self.func.__name__, [self.rect])]
def package(self, filepath, output_folder):
pass
async def run(self, render_pass):
if inspect.iscoroutinefunction(render_pass.fn):
return await render_pass.fn(*render_pass.args)
else:
return render_pass.fn(*render_pass.args)
async def runpost(self, result, render_pass):
if self.postfn:
return self.postfn(self, result)
else:
return result
class svgicon(renderable):
def __init__(self, **kwargs):
super().__init__(fmt="svg", **kwargs)
def folder(self, filepath):
return filepath.stem
class glyph(renderable):
def __init__(self, glyphName, width=500, **kwargs):
r = Rect(kwargs.get("rect", Rect(1000, 1000)))
kwargs.pop("rect", None)
self.width = width
self.body = r.take(750, "mdy").take(self.width, "mdx")
self.glyphName = glyphName
super().__init__(rect=r, **kwargs)
def passes(self, action, layers, indices=[]):
return [RenderPass(self, self.glyphName, [])]
class fontpreview(renderable):
def __init__(self, font_dir, font_re, rect=(1200, 150), limit=25, **kwargs):
super().__init__(rect=rect, **kwargs)
self.dir = normalize_font_prefix(font_dir)
self.re = font_re
self.matches = []
for font in self.dir.iterdir():
if re.search(self.re, str(font)):
if len(self.matches) < limit:
self.matches.append(font)
self.matches.sort()
def passes(self, action, layers, indices=[]):
return [RenderPass(self, "{:s}".format(m.name), [self.rect, m]) for m in self.matches]
class iconset(renderable):
valid_sizes = [16, 32, 64, 128, 256, 512, 1024]
def __init__(self, sizes=[128, 1024], **kwargs):
super().__init__(**kwargs)
self.sizes = sizes
def folder(self, filepath):
return f"{filepath.stem}_source"
def passes(self, action, layers, indices=[]): # TODO could use the indices here
sizes = self.sizes
if action == Action.RenderAll:
sizes = self.valid_sizes
return [RenderPass(self, str(size), [self.rect, size]) for size in sizes]
def package(self, filepath, output_folder):
# inspired by https://retifrav.github.io/blog/2018/10/09/macos-convert-png-to-icns/
iconset = output_folder.parent / f"{filepath.stem}.iconset"
iconset.mkdir(parents=True, exist_ok=True)
system = platform.system()
if system == "Darwin":
for png in output_folder.glob("*.png"):
d = int(png.stem.split("_")[1])
for x in [1, 2]:
if x == 2 and d == 16:
continue
elif x == 1:
fn = f"icon_{d}x{d}.png"
elif x == 2:
fn = f"icon_{int(d/2)}x{int(d/2)}@2x.png"
print(fn)
run(["sips", "-z", str(d), str(d), str(png), "--out", str(iconset / fn)])
run(["iconutil", "-c", "icns", str(iconset)])
if True: # can be done windows or mac
from PIL import Image
output = output_folder.parent / f"{filepath.stem}.ico"
largest = list(output_folder.glob("*_1024.png"))[0]
img = Image.open(str(largest))
icon_sizes = [(x, x) for x in self.valid_sizes]
img.save(str(output), sizes=icon_sizes)
class animation(renderable, Timeable):
def __init__(self, rect=(1080, 1080), duration=10, storyboard=[0], timeline:Timeline=None, **kwargs):
super().__init__(**kwargs)
self.rect = Rect(rect)
self.r = self.rect
self.start = 0
self.end = duration
self.duration = duration
self.storyboard = storyboard
if timeline:
self.timeline = timeline
self.t = timeline
self.start = timeline.start
self.end = timeline.end
self.duration = timeline.duration
if self.storyboard != [0] and timeline.storyboard == [0]:
pass
else:
self.storyboard = timeline.storyboard
else:
self.timeline = None
def folder(self, filepath):
return filepath.stem # TODO necessary?
def layer_folder(self, filepath, layer):
return layer
def all_frames(self):
return list(range(0, self.duration))
def passes(self, action, layers, indices=[]):
frames = self.storyboard
if action == Action.RenderAll:
frames = self.all_frames()
elif action in [Action.PreviewIndices, Action.RenderIndices]:
frames = indices
elif action in [Action.RenderWorkarea]:
if self.timeline:
if hasattr(self.timeline, "find_workarea"):
frames = self.timeline.find_workarea()
return [RenderPass(self, "{:04d}".format(i), [Frame(i, self, layers)]) for i in frames] | StarcoderdataPython |
1604868 | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from models import Net
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.binary_cross_entropy(output, target, reduction='sum')
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.binary_cross_entropy(output, target, reduction='sum').item() # sum up batch loss
correct += F.smooth_l1_loss(output, target)
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, ({:.4f})\n'.format(
test_loss,
correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Vec2Color Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1500, metavar='N',
help='number of epochs to train (default: 1500)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
file_names = ('capitalize', 'lower', 'upper', 'title')
x_df = pd.concat([pd.read_csv('doc2color/data/{}.csv'.format(file_name)) for file_name in file_names])
y_df = pd.concat([pd.read_csv('doc2color/data/rgb.csv')] * len(file_names))
tensor_x = torch.stack([torch.from_numpy(np.array(i)) for i in x_df.values.astype(np.float32)])
tensor_y = torch.stack([torch.from_numpy(np.array(i)) for i in y_df.values.astype(np.float32) / 255.0])
x_train, x_test, y_train, y_test = train_test_split(
tensor_x, tensor_y, test_size=0.01, random_state=args.seed)
train_dataset = torch.utils.data.TensorDataset(x_train, y_train)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size, shuffle=True, **kwargs)
test_dataset = torch.utils.data.TensorDataset(x_test, y_test)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "doc2color/pt_objects/vec2color.pt")
if __name__ == '__main__':
main() | StarcoderdataPython |
3292934 | <gh_stars>100-1000
"""
for numbers a, b returns x, y such as x * a + y * b = gcd(a,b)
"""
def extendedEuclidean(a,b):
a_old, b_old = a,b
a, b = max(a, b), min(a, b)
x, y, old_x, old_y = 0, 1, 1, 0
while b != 0:
quotient = a // b
residue = a % b
a, b = b, residue
x, old_x = old_x, (old_x - quotient * x)
y, old_y = old_y, (old_y - quotient * y)
if a_old > b_old:
return x, y
return y, x
| StarcoderdataPython |
1742931 | <reponame>punk95/Continual-Learning-With-Curiosity
import torch
from model import Discrete_Q_Function_CNN_NN
from parameters import NN_Paramters, Algo_Param, Save_Paths, Load_Paths
import numpy as np
from algorithms.epsilon_greedy import epsilon_greedy
from util.replay_buffer import Replay_Memory
from util.reservoir_with_fifo_replay_buffer_flow_through import Half_Reservoir_with_FIFO_Flow_Through_Replay_Buffer
from util.new_replay_buffers.gradual.custom_hrf import Custom_HRF
class Q_learning():
def __init__(self, env, q_nn_param, algo_param, max_episodes =100, memory_capacity =50000,
batch_size=512, save_path = Save_Paths(), load_path= Load_Paths(),
buffer_type= "FIFO", fifo_frac=0.34, change_at = [100000, 350000]):
self.state_dim = q_nn_param.state_dim
self.action_dim = q_nn_param.action_dim
self.q_nn_param = q_nn_param
self.algo_param = algo_param
self.max_episodes = max_episodes
self.save_path = Save_Paths()
self.load_path = Load_Paths()
self.inital_state = None
self.time_step = 0
self.Q = Discrete_Q_Function_CNN_NN(nn_params=q_nn_param,
save_path= self.save_path.q_path, load_path=self.load_path.q_path)
self.Target_Q = Discrete_Q_Function_CNN_NN(nn_params=q_nn_param,
save_path= self.save_path.q_path, load_path=self.load_path.q_path)
self.Target_Q.load_state_dict(self.Q.state_dict())
#self.loss_function = torch.nn.functional.smooth_l1_loss
self.loss_function = torch.nn.functional.mse_loss
self.Q_optim = torch.optim.Adam(self.Q.parameters(), self.q_nn_param.l_r)
self.buffer_type = buffer_type
self.t = 0
self.c = 0
self.change_at = change_at
if buffer_type == "FIFO":
self.replay_buffer = Replay_Memory(capacity=memory_capacity)
elif buffer_type == "Half_Reservior_FIFO_with_FT":
self.replay_buffer = Half_Reservoir_with_FIFO_Flow_Through_Replay_Buffer(capacity=memory_capacity, fifo_fac=fifo_frac)
elif buffer_type == "Custom":
self.replay_buffer = Custom_HRF(capacity=memory_capacity, fifo_fac=fifo_frac, change_at = change_at)
self.memory_capacity = memory_capacity
self.batch_size = batch_size
self.env = env
def save(self, q_path, target_q_path):
self.Q.save(q_path)
self.Target_Q.save(target_q_path)
def load(self, q_path, target_q_path):
self.Q.load(q_path)
self.Target_Q.load(target_q_path)
def step(self, state, random = None):
#since step is done on the basis of single states and not as a batch
batch_size = 1
q_values = self.Target_Q.get_value(state, format="numpy")
action, self.steps_done, self.epsilon = epsilon_greedy(q_values, self.steps_done, self.epsilon, self.action_dim)
next_state, reward, done, _ = self.env.step(action)
#converting the action for buffer as one hot vector
sample_hot_vec = np.array([0.0 for i in range(self.q_nn_param.action_dim)])
sample_hot_vec[action] = 1
action = sample_hot_vec
self.time_step += 1
if done:
next_state = None
self.replay_buffer.push(state, action, None, reward, next_state, self.time_step)
state = self.env.reset()
self.inital_state = state
self.time_step = 0
return state
if self.time_step == self.max_episodes:
self.replay_buffer.push(state, action, None, reward, next_state, self.time_step)
state = self.env.reset()
self.inital_state = state
self.time_step = 0
return state
self.replay_buffer.push(state, action, None, reward, next_state, self.time_step)
return next_state
def get_action(self, state, evaluate=True):
q_values = self.Q.get_value(state, format="numpy")
action_scaler = np.argmax(q_values)
return action_scaler
def update(self):
self.t += 1
if self.buffer_type == "Custom":
if self.t == self.change_at[self.c]:
print(self.epsilon)
self.epsilon = 1.0
if self.c <= len(self.change_at) - 2:
self.c += 1
batch_size = self.batch_size
if len(self.replay_buffer) < batch_size:
return
batch = self.replay_buffer.sample(batch_size)
state = batch.state
action = torch.Tensor(batch.action).to(self.q_nn_param.device)
action_scaler = action.max(1)[1].unsqueeze(1).to(self.q_nn_param.device) #to make them as indices in the gather function
reward = torch.Tensor(batch.reward).to(self.q_nn_param.device)
next_state = batch.next_state
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=self.q_nn_param.device, dtype=torch.bool).to(self.q_nn_param.device)
non_final_next_states = torch.Tensor([s for s in next_state if s is not None]).to(self.q_nn_param.device)
#get only the q value relevant to the actions\
state_action_values = self.Q.get_value(state).gather(1, action_scaler)
with torch.no_grad():
next_state_action_values = torch.zeros(batch_size, device=self.q_nn_param.device).to(self.q_nn_param.device)
next_state_action_values[non_final_mask] = self.Target_Q.get_value(non_final_next_states).max(1)[0]
#now there will be a zero if it is the final state and q*(n_s,n_a) is its not None
expected_state_action_values = (self.algo_param.gamma*next_state_action_values).unsqueeze(1) + reward.unsqueeze(1)
loss = self.loss_function( state_action_values, expected_state_action_values)
self.Q_optim.zero_grad()
loss.backward()
self.Q_optim.step()
def hard_update(self):
self.Target_Q.load_state_dict(self.Q.state_dict())
def initalize(self):
#inital_phase train after this by continuing with step and train at single iteration and hard update at update interval
self.steps_done = 0
self.epsilon = 0.9
state = self.env.reset()
self.inital_state = state
for i in range(self.batch_size):
state = self.step(state)
return state
| StarcoderdataPython |
3262767 | <filename>game.py<gh_stars>1-10
import pygame
import sys
import random
import networkx as nx
from edges import edges
class snake_game:
def __init__(self):
self.w = 600
self.h = 600
self.snake_size = 20
self.fps = 40
self.screen = pygame.display.set_mode([self.w,self.h])
self.clock = pygame.time.Clock()
self.framIter = 0
self.snake = Player([290,290])
self.snakeGroup = pygame.sprite.Group()
self.snakeGroup.add(self.snake)
self.foods = pygame.sprite.Group()
self.graph = nx.Graph()
self.graph.add_edges_from(edges)
def reset(self):
self.snake.rect.center = (290,290)
self.snake.direction = pygame.K_UP
self.snake.length = 0
self.snake.tails = []
self.snake.tailsObject = pygame.sprite.Group()
self.foods = pygame.sprite.Group()
self.screen = pygame.display.set_mode([self.w,self.h])
self.framIter = 0
self.update_screen()
self.generate_food()
def crash(self):
if self.snake.rect.center[0] not in range(0,self.w) or self.snake.rect.center[1] not in range(0,self.w):
return True
elif pygame.sprite.spritecollide(self.snake, self.snake.tailsObject, True):
return True
else: return False
def eat(self):
if pygame.sprite.spritecollide(self.snake, self.foods, True):
self.snake.create_tail()
self.generate_food()
self.snake.length += 1
return True
else: return False
def generate_food(self):
while True:
check = 1
x = random.randint(0,29)*20 + 10
y = random.randint(0,29)*20 + 10
if (x,y) == self.snake.rect.center:
check = 0
for tail in self.snake.tails:
if tail.rect.center == (x,y):
check = 0
if check == 1:
break
food = Food([x,y])
food.image.fill((110, 215, 45))
self.foods.add(food)
self.currentfood = food
return [x,y]
def lock_move(self,old_direction,lock_list):
relativD = ['forward','left','right']
for i in range(3):
if lock_list[i] == 0:
if self.snake.direction == relativ_to_absolute(relativD[i],old_direction):
return True
return False
def one_step(self,direction,lock_list):
done = False
reward = 0
self.framIter += 1
old_direction = self.snake.direction
self.snake.direction = direction
self.snake.run()
if self.crash(): #check crash to waall or tails
reward = -10
done = True
elif self.lock_move(old_direction,lock_list):
reward = -10
# done = True
# print('thats wrong')
# print (self.currentfood.rect.center)
# print('----')
# if 1 not in lock_list:
# print(lock_list)
# print(self.snake.rect.center)
# for tail in self.snake.tails:
# print(tail.rect.center)
else:
done = False
if self.eat(): reward = +10 #check eat foods
return reward, done, self.snake.length
def update_screen(self):
score_font = pygame.font.SysFont("comicsansms", 25)
value = score_font.render("Score: " + str(self.snake.length), True, (255, 255, 102))
self.screen.fill((50, 153, 213))
self.screen.blit(value, [0, 0])
self.snakeGroup.draw(self.screen)
self.foods.draw(self.screen)
self.snake.tailsObject.draw(self.screen)
pygame.display.update()
pygame.display.flip()
def get_next_move_pos(self,direction):
abs_direction = relativ_to_absolute(direction,self.snake.direction)
current_point = self.snake.rect.center
for i in range(2):
if abs_direction == self.snake.move[i]:
current_point = (current_point[0] + self.snake.vx * [-1, 1][i] ,current_point[1])
elif abs_direction == self.snake.move[2:4][i]:
current_point = (current_point[0] ,current_point[1] + self.snake.vy * [-1, 1][i])
return current_point
def check_into_ground(self,point):
if point[0] < 600 and point[0] > 0 and point[1] > 0 and point[1] < 600:
return True
else: return False
def check_path(self):
graph = self.graph.copy()
graph = self.remove_tails_graph(graph)
forward_point = self.get_next_move_pos('forward')
try:
if self.check_into_ground(forward_point):
forward_path = nx.has_path(graph, self.get_graph_node(forward_point), self.get_graph_node(self.currentfood.rect.center))
else: forward_path = False
except : forward_path = False
try:
left_point = self.get_next_move_pos('left')
if self.check_into_ground(left_point):
left_path = nx.has_path(graph, self.get_graph_node(left_point), self.get_graph_node(self.currentfood.rect.center))
else: left_path = False
except : left_path = False
try:
right_point = self.get_next_move_pos('right')
if self.check_into_ground(right_point):
right_path = nx.has_path(graph, self.get_graph_node(right_point), self.get_graph_node(self.currentfood.rect.center))
else: right_path = False
except: right_path = False
del graph
return (forward_path, left_path, right_path )
def get_graph_node(self,point):
node = int(str(point[0]) + str(point[1]))
return node
def remove_tails_graph(self,graph):
nodes = []
for tail in range(self.snake.length-1):
nodes.append(self.get_graph_node(self.snake.tails[tail].rect.center))
if self.check_into_ground(self.snake.rect.center):
nodes.append(self.get_graph_node(self.snake.rect.center))
graph.remove_nodes_from(nodes)
return graph
def relativ_to_absolute(relativeDirect,curent_ABSdirection):
relativD = {'forward':0,'left':1,'right':-1}
pygame_format_ABSdirection = [pygame.K_UP,pygame.K_RIGHT, pygame.K_DOWN, pygame.K_LEFT]
curent_ABSdirection_Index = pygame_format_ABSdirection.index(curent_ABSdirection)
ABS_direction = pygame_format_ABSdirection[(curent_ABSdirection_Index - relativD[relativeDirect])%4]
return ABS_direction
def absolute_to_relative(curentDirect,Directions):
direction_Index = {pygame.K_UP : 0 ,pygame.K_RIGHT : 1 ,pygame.K_DOWN : 2 ,pygame.K_LEFT : 3 }
return (Directions[(direction_Index[curentDirect]%4)], Directions[(direction_Index[curentDirect]-1)%4], Directions[(direction_Index[curentDirect]+1)%4])
class Player(pygame.sprite.Sprite):
def __init__(self, pos):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([20, 20])
self.rect = self.image.get_rect()
self.image.fill((155, 0, 155))
self.move = [pygame.K_LEFT, pygame.K_RIGHT, pygame.K_UP, pygame.K_DOWN]
self.vx = 20
self.vy = 20
self.rect.center = pos
self.direction = pygame.K_UP
self.length = 0
self.tails = []
self.tailsObject = pygame.sprite.Group()
def run(self):
if self.direction != None:
for i in range(2):
if self.direction == self.move[i]:
self.rect.x += self.vx * [-1, 1][i]
elif self.direction == self.move[2:4][i]:
self.rect.y += self.vy * [-1, 1][i]
self.tail_run()
def tail_run(self):
temp_direction1 = None
temp_direction2 = None
temp_pos1 = None
temp_pos2 = None
firstCheck = 1
for tail in self.tails:
if firstCheck == 1:
temp_pos1 = tail.rect.center
for i in range(2):
if self.direction == self.move[i]:
tail.rect.center = (self.rect.center[0] - 20 * [-1, 1][i],self.rect.center[1])
elif self.direction == self.move[2:4][i]:
tail.rect.center = (self.rect.center[0],self.rect.center[1] - 20 * [-1, 1][i])
temp_direction1 = tail.direction
tail.direction = self.direction
firstCheck = 0
else:
temp_pos2 = tail.rect.center
tail.rect.center = temp_pos1
temp_pos1 = temp_pos2
temp_direction2 = tail.direction
tail.direction = temp_direction1
temp_direction1 = temp_direction2
def create_tail(self):
for i in range(2):
if self.direction == self.move[i]:
pos = (self.rect.center[0] - 20 * [-1, 1][i],self.rect.center[1])
elif self.direction == self.move[2:4][i]:
pos = (self.rect.center[0],self.rect.center[1] - 20 * [-1, 1][i])
tail = Tail(pos)
tail.direction = self.direction
self.tailsObject.add(tail)
self.tails.append(tail)
def get_danger(self):
up_danger = [abs(0 - self.rect.center[1])]
right_danger = [abs(600 - self.rect.center[0])]
down_danger = [abs(600 - self.rect.center[1])]
left_danger = [abs(0 - self.rect.center[0])]
for tail in self.tails:
if tail.rect.center[1] == self.rect.center[1]:
if tail.rect.center[0] > self.rect.center[0]:
right_danger.append(tail.rect.center[0] - self.rect.center[0])
else:
left_danger.append(self.rect.center[0] - tail.rect.center[0])
if tail.rect.center[0] == self.rect.center[0]:
if tail.rect.center[1] > self.rect.center[1]:
down_danger.append(tail.rect.center[1] - self.rect.center[1])
else:
up_danger.append(self.rect.center[1] - tail.rect.center[1])
return min(up_danger), min(right_danger), min(left_danger), min(down_danger)
class Tail(pygame.sprite.Sprite):
def __init__(self, pos):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([20, 20])
self.rect = self.image.get_rect()
self.rect.center = pos
self.image.fill((255, 100, 255))
self.direction = None
self.move = [pygame.K_LEFT, pygame.K_RIGHT, pygame.K_UP, pygame.K_DOWN]
self.vx = 20
self.vy = 20
class Food(pygame.sprite.Sprite):
def __init__(self, pos):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([20, 20])
self.rect = self.image.get_rect()
self.rect.center = pos
self.image.fill((110, 215, 45))
if __name__ == '__main__':
pass | StarcoderdataPython |
3381417 | <gh_stars>0
"""
Test serializers' validators
"""
from api.enums.vs_blueprint import VsComponentType, SliceServiceType
from api.tests.utils import catch_exception, error_catcher, mixer
from api.serializers.requests import VsBlueprintRequestSerializer
from api.serializers.vs_blueprint import VsdNsdTranslationRuleSerializer, VsdParameterValueRangeSerializer, \
VsBlueprintParameterSerializer, VsBlueprintSerializer, VsComponentSerializer, VsbForwardingPathEndPointSerializer, \
VsbEndpointSerializer
from api.serializers.vnf import OnBoardVnfPackageRequestSerializer
from api.serializers.vs_descriptor import VsDescriptorSerializer
def generate_data(cls, remove_fields=None):
if remove_fields is None:
remove_fields = []
data = {}
for i in range(100):
data = mixer.blend(cls)
if data is not None:
break
for field in remove_fields:
data.pop(field, None)
return data
# VsdParameterValueRangeSerializer
@catch_exception
def test_vsd_parameter_value_range_serializer_invalid_parameter_id(error_catcher):
field = "parameter_id"
data = generate_data(VsdParameterValueRangeSerializer, remove_fields=[field])
errors = VsdParameterValueRangeSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VSD parameter value range without ID."
# VsdNsdTranslationRuleSerializer
@catch_exception
def test_vsd_nsd_translation_rule_serializer_invalid_input_none(error_catcher):
field = "input"
data = generate_data(VsdNsdTranslationRuleSerializer, remove_fields=[field])
errors = VsdNsdTranslationRuleSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VSD NSD translation rule without matching conditions"
@catch_exception
def test_vsd_nsd_translation_rule_serializer_invalid_input_empty(error_catcher):
field = "input"
data = generate_data(VsdNsdTranslationRuleSerializer)
data[field] = []
errors = VsdNsdTranslationRuleSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VSD NSD translation rule without matching conditions"
@catch_exception
def test_vsd_nsd_translation_rule_serializer_invalid_nst_id_and_nsd_id(error_catcher):
fields = ["nst_id", "nsd_id"]
data = generate_data(VsdNsdTranslationRuleSerializer, remove_fields=fields)
errors = VsdNsdTranslationRuleSerializer().validate(data)
assert len(errors) > 0 and errors.get(" & ".join(fields))[0] == "VSD NSD translation rule without NSD ID/NST ID"
@catch_exception
def test_vsd_nsd_translation_rule_serializer_invalid_nsd_id_and_nsd_version(error_catcher):
field = "nsd_version"
data = generate_data(VsdNsdTranslationRuleSerializer, remove_fields=[field])
errors = VsdNsdTranslationRuleSerializer().validate(data)
assert len(errors) > 0 and errors.get(f'nsd_id & {field}')[0] == "VSD NSD translation rule without NSD version"
# OnBoardVnfPackageRequestSerializer
@catch_exception
def test_on_board_vnf_package_request_serializer_invalid_name(error_catcher):
field = "name"
data = generate_data(OnBoardVnfPackageRequestSerializer, remove_fields=[field])
errors = OnBoardVnfPackageRequestSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "On board VNF package request without name"
@catch_exception
def test_on_board_vnf_package_request_serializer_invalid_version(error_catcher):
field = "version"
data = generate_data(OnBoardVnfPackageRequestSerializer, remove_fields=[field])
errors = OnBoardVnfPackageRequestSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "On board VNF package request without version"
@catch_exception
def test_on_board_vnf_package_request_serializer_invalid_version(error_catcher):
field = "provider"
data = generate_data(OnBoardVnfPackageRequestSerializer, remove_fields=[field])
errors = OnBoardVnfPackageRequestSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "On board VNF package request without provider"
@catch_exception
def test_on_board_vnf_package_request_serializer_invalid_version(error_catcher):
field = "checksum"
data = generate_data(OnBoardVnfPackageRequestSerializer, remove_fields=[field])
errors = OnBoardVnfPackageRequestSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "On board VNF package request without checksum"
@catch_exception
def test_on_board_vnf_package_request_serializer_invalid_version(error_catcher):
field = "vnf_package_path"
data = generate_data(OnBoardVnfPackageRequestSerializer, remove_fields=[field])
errors = OnBoardVnfPackageRequestSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "On board VNF package request without package path"
# VsBlueprintRequestSerializer
@catch_exception
def test_vs_blueprint_request_serializer_invalid_vs_blueprint(error_catcher):
field = "vs_blueprint"
data = generate_data(VsBlueprintRequestSerializer, remove_fields=[field])
errors = VsBlueprintRequestSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "Onboard VS blueprint request without VS blueprint"
# VsBlueprintParameterSerializer
@catch_exception
def test_vs_blueprint_parameter_serializer_invalid_parameter_id(error_catcher):
field = "parameter_id"
data = generate_data(VsBlueprintParameterSerializer, remove_fields=[field])
errors = VsBlueprintParameterSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VS blueprint parameter without ID"
# VsComponentSerializer
@catch_exception
def test_vs_component_serializer_invalid_component_id(error_catcher):
field = "component_id"
data = generate_data(VsComponentSerializer, remove_fields=[field])
errors = VsComponentSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VSB atomic component without ID."
@catch_exception
def test_vs_component_serializer_invalid_type_and_associated_vsb_id(error_catcher):
field = "associated_vsb_id"
data = generate_data(VsComponentSerializer, remove_fields=[field])
data['type'] = VsComponentType.SERVICE.value
errors = VsComponentSerializer().validate(data)
assert len(errors) > 0 and errors.get(f'type & {field}')[0] == "Component of type service without associated VSB id"
# VsbForwardingPathEndPointSerializer
@catch_exception
def test_vsb_forwarding_path_end_point_serializer_invalid_vs_component_id(error_catcher):
field = "vs_component_id"
data = generate_data(VsbForwardingPathEndPointSerializer, remove_fields=[field])
errors = VsbForwardingPathEndPointSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VS Forwarding Graph element without VS component"
@catch_exception
def test_vsb_forwarding_path_end_point_serializer_invalid_vs_component_id(error_catcher):
field = "end_point_id"
data = generate_data(VsbForwardingPathEndPointSerializer, remove_fields=[field])
errors = VsbForwardingPathEndPointSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VS Forwarding Graph element without end point"
# VsbEndpointSerializer
@catch_exception
def test_vsb_endpoint_serializer_invalid_end_point_id(error_catcher):
field = "end_point_id"
data = generate_data(VsbEndpointSerializer, remove_fields=[field])
errors = VsbEndpointSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VSB end point without ID"
# VsBlueprintSerializer
@catch_exception
def test_vs_blueprint_serializer_invalid_version(error_catcher):
field = "version"
data = generate_data(VsBlueprintSerializer, remove_fields=[field])
errors = VsBlueprintSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VS blueprint without version"
@catch_exception
def test_vs_blueprint_serializer_invalid_version(error_catcher):
field = "name"
data = generate_data(VsBlueprintSerializer, remove_fields=[field])
errors = VsBlueprintSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VS blueprint without name"
@catch_exception
def test_vs_blueprint_serializer_invalid_slice_service_type_and_embb_service_category(error_catcher):
field = "embb_service_category"
data = generate_data(VsBlueprintSerializer, remove_fields=[field])
data['slice_service_type'] = SliceServiceType.EMBB.value
errors = VsBlueprintSerializer().validate(data)
assert len(errors) > 0 and errors.get(f"slice_service_type & {field}")[0] == "VSB without slice service category"
@catch_exception
def test_vs_blueprint_serializer_invalid_slice_service_type_and_embb_urllc_service_category(error_catcher):
field = "urllc_service_category"
data = generate_data(VsBlueprintSerializer, remove_fields=[field])
data['slice_service_type'] = SliceServiceType.URLLC.value
errors = VsBlueprintSerializer().validate(data)
assert len(errors) > 0 and errors.get(f"slice_service_type & {field}")[0] == "VSB without slice service category"
# VsDescriptorSerializer
@catch_exception
def test_vs_descriptor_invalid_name(error_catcher):
field = "name"
data = generate_data(VsDescriptorSerializer, remove_fields=[field])
errors = VsDescriptorSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VSD without name"
@catch_exception
def test_vs_descriptor_invalid_version(error_catcher):
field = "version"
data = generate_data(VsDescriptorSerializer, remove_fields=[field])
errors = VsDescriptorSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VSD without version"
@catch_exception
def test_vs_descriptor_invalid_vs_blueprint_id(error_catcher):
field = "vs_blueprint_id"
data = generate_data(VsDescriptorSerializer, remove_fields=[field])
errors = VsDescriptorSerializer().validate(data)
assert len(errors) > 0 and errors.get(field)[0] == "VSD without VS blueprint ID"
| StarcoderdataPython |
194786 | <reponame>Neiron07/07033084-5cfd-4812-90a4-e4d24ffb6e3d
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import os
from flask import Flask, render_template, request, redirect, url_for, flash, send_from_directory, request
from werkzeug.utils import secure_filename
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
UPLOAD_FOLDER = 'uploads'
UPLOAD_FOLDER = os.path.abspath(UPLOAD_FOLDER)
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///shop.db'
db = SQLAlchemy(app)
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer(), primary_key=True)
username = db.Column(db.String(50), nullable=False, unique=True)
password_hash = db.Column(db.String(100), nullable=False)
created_on = db.Column(db.DateTime(), default=datetime.utcnow)
updated_on = db.Column(db.DateTime(), default=datetime.utcnow, onupdate=datetime.utcnow)
def __repr__(self):
return "<{}:{}>".format(self.id, self.username)
class FileLink(db.Model):
__tablename__ = 'files'
id = db.Column(db.Integer(), primary_key=True)
link = db.Column(db.String(50), nullable=False, unique=True)
created_on = db.Column(db.DateTime(), default=datetime.utcnow)
updated_on = db.Column(db.DateTime(), default=datetime.utcnow, onupdate=datetime.utcnow)
def __repr__(self):
return "<{}:{}>".format(self.id, self.link)
@app.route('/', methods=['POST', 'GET'])
def login():
if request.method == "POST":
username = request.form['name']
password = request.form['pass']
article = User(username=username, password_hash=password)
try:
db.session.add(article)
db.session.commit()
return redirect('/create')
except:
return "Ошибка"
else:
return render_template("login.html")
@app.route('/create', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file:
# Функция secure_filename не дружит с не ascii-символами, поэтому
# файлы русскими словами не называть
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# Функция url_for('uploaded_file', filename=filename) возвращает строку вида: /uploads/<filename>
link=url_for('uploaded_file', filename=filename)
art=FileLink(link=link)
try:
db.session.add(art)
db.session.commit()
return redirect(url_for('uploaded_file', filename=filename))
except:
return "Ошибка"
return render_template("index.html")
@app.route('/all')
def user():
articles = User.query.all()
return render_template("all.html", articles = articles)
# Пример обработчика, возвращающий файлы из папки app.config['UPLOAD_FOLDER'] для путей uploads и files.
# т.е. не нужно давать специальное название, чтобы получить файл в flask
# @app.route('/uploads/<filename>')
@app.route('/files/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
if __name__ == '__main__':
# Localhost
app.debug = True
# Включение поддержки множества подключений
app.run(threaded=True) | StarcoderdataPython |
1720585 | '''
module docstring for density of states
'''
from numpy import exp,sqrt,pi
from semic.constants.constants import value
def density_of_states(m_star=0,energy=0,conduction_band_energy=0):
'''
Function to find the density of quantum states as a function of
energy
m_star: the effective mass of a carrier in an energy band.
energy: Energy of a particle or system, or of a particular quantum state
conduction_band_energy: Conduction band edge energy in a semiconductor.
This is the potential energy for electrons, including
the electrostatic potential.
D(E) = m_star*sqrt(2*m_star*(energy-conduction_band_energy))/(pi**2 * h_bar**3)
'''
h_bar = value('reduced Planck in eV s')
d_e = m_star*sqrt(2*m_star*(energy-conduction_band_energy)) / ((pi**2) * (h_bar**3))
return d_e
def density_of_states_abm(m1_star=0,m2_star=0,m3_star=0,energy=0,conduction_band_energy=0):
'''
Function to find the density of quantum states as a function of
energy for an anisotropic band minimum.
m_star: the effective mass of a carrier in an energy band.
energy: Energy of a particle or system, or of a particular quantum state
conduction_band_energy: Conduction band edge energy in a semiconductor.
This is the potential energy for electrons, including
the electrostatic potential.
D(E) = sqrt(2*m1_star*m2_star*m3_star*(energy-conduction_band_energy))/(pi**2 * h_bar**3)
'''
h_bar = value('reduced Planck in eV s')
d_e = sqrt(2*m1_star*m2_star*m3_star*(energy-conduction_band_energy)) / ((pi**2) * (h_bar**3))
return d_e
def density_of_states_non_parabolic(m_star=0,energy=0,conduction_band_energy=0,alpha=0):
'''
Function to find the density of quantum states as a function of
energy for a non-parabolic energy band.
m_star: the effective mass of a carrier in an energy band.
energy: Energy of a particle or system, or of a particular quantum state
conduction_band_energy: Conduction band edge energy in a semiconductor.
This is the potential energy for electrons, including
the electrostatic potential.
alpha: an arbitary constant
D(E) = m_star*sqrt(2*m_star*(energy-conduction_band_energy)
*[1+alpha*(energy-conduction_band_energy)])
*[1+2*alpha*(energy-conduction_band_energy)/(pi**2 * h_bar**3)
'''
h_bar = value('reduced Planck constant in eV s')
energy_sub = energy-conduction_band_energy
pi_h_product = (pi**2) * (h_bar**3)
sqrt_product = sqrt(2*m_star*(energy_sub)*(1+alpha*(energy_sub)))
d_e = m_star*sqrt_product*(1+2*alpha*energy_sub) / pi_h_product
return d_e
def density_of_states_two_d(m_star=0):
'''
Function to find the 2D density of quantum states.
m_star: the effective mass of a carrier in an energy band.
D_2d = m_star/(pi*h_bar**2)
'''
h_bar = value('reduced Planck constant in eV s')
d_2d = m_star / (pi * (h_bar**2))
return d_2d
def density_of_states_one_d(m_star=0,energy=0,conduction_band_energy=0):
'''
Function to find the 1D density of quantum states.
m_star: the effective mass of a carrier in an energy band.
energy: Energy of a particle or system, or of a particular quantum state
conduction_band_energy: Conduction band edge energy in a semiconductor.
This is the potential energy for electrons, including
the electrostatic potential.
D_1d = 1/(pi*h_bar) * sqrt(m_star/2(energy-conduction_band_energy))
'''
h_bar = value('reduced Planck constant in eV s')
d_1d = (1 / (pi * h_bar)) * sqrt(m_star / (2*(energy-conduction_band_energy)))
return d_1d
def density_of_states_photon(omega=0,speed_of_light=0,refractive_index=1):
'''
Function to find the 3D photon density of states.
omega: angular frequency in rad/s.
speed_of_light: speed of light in medium.
refractive_index: refractive index of medium
d_photon3d = omega**2 * refractive_index**3 / pi**2 * speed_of_light**3
'''
d_photon3d = ((omega**2)*(refractive_index**3)) / ((pi**2) * (speed_of_light**3))
return d_photon3d
def density_of_states_photon1d(refractive_index=1,speed_of_light=1):
'''
Function to find the 1D photon density of states.
speed_of_light: speed of light in medium.
refractive_index: refractive index of medium.
d_photon1d = refractive_index / (pi * speed_of_light)
'''
d_photon1d = refractive_index / (pi * speed_of_light)
return d_photon1d
def equilibrium_energy_density(omega=0,speed_of_light=1,temp=1):
'''
Function to find the equilibrium energy density in an
electromagnetic field.
omega: angular frequency in rad/s.
speed_of_light: speed of light in medium.
temp: temperature in kelvin
exponential = 1/(exp(h_bar*omega/(k_b * temps)) - 1)
u_w = ((h_bar * omega**3) / ((pi**2)*(speed_of_light**3))) * exponential
'''
h_bar = value('reduced Planck constant in eV s')
kb_t = value('Boltzmann constant in eV/K') * temp
exponential = 1/(exp(h_bar*omega/kb_t) - 1)
u_w = ((h_bar * omega**3) / ((pi**2)*(speed_of_light**3))) * exponential
return u_w
def intensity_thermal_radiation(omega=0,speed_of_light=1,temp=1):
'''
Function to find the intensity (flux) of thermal radiation.
omega: angular frequency in rad/s.
speed_of_light: speed of light in medium.
temp: temperature in kelvin
exponential = 1/(exp(h_bar*omega/(k_b * temps)) - 1)
I_w = ((h_bar * omega**3) / ((4*pi**3)*(speed_of_light**2))) * exponential
'''
h_bar = value('reduced Planck constant in eV s')
kb_t = value('Boltzmann constant in eV/K') * temp
exponential = 1/(exp(h_bar*omega/(kb_t)) - 1)
i_w = ((h_bar * omega**3) / ((4*(pi**3))*(speed_of_light**2))) * exponential
return i_w
| StarcoderdataPython |
11371 | <filename>test/HPE3ParClient_base.py
# (c) Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test base class of 3PAR Client."""
import os
import sys
import unittest
import subprocess
import time
import inspect
from pytest_testconfig import config
import datetime
from functools import wraps
from hpe3parclient import client, file_client
TIME = datetime.datetime.now().strftime('%H%M%S')
try:
# For Python 3.0 and later
from urllib.parse import urlparse
except ImportError:
# Fall back to Python 2's urllib2
from urlparse import urlparse
class HPE3ParClientBaseTestCase(unittest.TestCase):
user = config['TEST']['user']
password = config['<PASSWORD>']['<PASSWORD>']
flask_url = config['TEST']['flask_url']
url_3par = config['TEST']['3par_url']
debug = config['TEST']['debug'].lower() == 'true'
unitTest = config['TEST']['unit'].lower() == 'true'
port = None
remote_copy = config['TEST']['run_remote_copy'].lower() == 'true'
run_remote_copy = remote_copy and not unitTest
if run_remote_copy:
secondary_user = config['TEST_REMOTE_COPY']['user']
secondary_password = config['TEST_REMOTE_COPY']['pass']
secondary_url_3par = config['TEST_REMOTE_COPY']['3par_url']
secondary_target_name = config['TEST_REMOTE_COPY']['target_name']
ssh_port = None
if 'ssh_port' in config['TEST']:
ssh_port = int(config['TEST']['ssh_port'])
elif unitTest:
ssh_port = 2200
else:
ssh_port = 22
# Don't setup SSH unless needed. It slows things down.
withSSH = False
if 'domain' in config['TEST']:
DOMAIN = config['TEST']['domain']
else:
DOMAIN = 'UNIT_TEST_DOMAIN'
if 'cpg_ldlayout_ha' in config['TEST']:
CPG_LDLAYOUT_HA = int(config['TEST']['cpg_ldlayout_ha'])
if 'disk_type' in config['TEST']:
DISK_TYPE = int(config['TEST']['disk_type'])
CPG_OPTIONS = {'domain': DOMAIN,
'LDLayout': {'HA': CPG_LDLAYOUT_HA,
'diskPatterns': [{'diskType':
DISK_TYPE}]}}
else:
CPG_OPTIONS = {'domain': DOMAIN,
'LDLayout': {'HA': CPG_LDLAYOUT_HA}}
else:
CPG_LDLAYOUT_HA = None
CPG_OPTIONS = {'domain': DOMAIN}
if 'known_hosts_file' in config['TEST']:
known_hosts_file = config['TEST']['known_hosts_file']
else:
known_hosts_file = None
if 'missing_key_policy' in config['TEST']:
missing_key_policy = config['TEST']['missing_key_policy']
else:
missing_key_policy = None
def setUp(self, withSSH=False, withFilePersona=False):
self.withSSH = withSSH
self.withFilePersona = withFilePersona
cwd = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
if self.unitTest:
self.printHeader('Using flask ' + self.flask_url)
parsed_url = urlparse(self.flask_url)
userArg = '-user=%s' % self.user
passwordArg = <PASSWORD>' % self.password
portArg = '-port=%s' % parsed_url.port
script = 'HPE3ParMockServer_flask.py'
path = "%s/%s" % (cwd, script)
try:
self.mockServer = subprocess.Popen([sys.executable,
path,
userArg,
passwordArg,
portArg],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE
)
except Exception:
pass
time.sleep(1)
if self.withFilePersona:
self.cl = file_client.HPE3ParFilePersonaClient(self.flask_url)
else:
self.cl = client.HPE3ParClient(self.flask_url)
if self.withSSH:
self.printHeader('Using paramiko SSH server on port %s' %
self.ssh_port)
ssh_script = 'HPE3ParMockServer_ssh.py'
ssh_path = "%s/%s" % (cwd, ssh_script)
self.mockSshServer = subprocess.Popen([sys.executable,
ssh_path,
str(self.ssh_port)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
time.sleep(1)
else:
if withFilePersona:
self.printHeader('Using 3PAR %s with File Persona' %
self.url_3par)
self.cl = file_client.HPE3ParFilePersonaClient(self.url_3par)
else:
self.printHeader('Using 3PAR ' + self.url_3par)
self.cl = client.HPE3ParClient(self.url_3par)
if self.withSSH:
# This seems to slow down the test cases, so only use this when
# requested
if self.unitTest:
# The mock SSH server can be accessed at 0.0.0.0.
ip = '0.0.0.0'
else:
parsed_3par_url = urlparse(self.url_3par)
ip = parsed_3par_url.hostname.split(':').pop()
try:
# Now that we don't do keep-alive, the conn_timeout needs to
# be set high enough to avoid sometimes slow response in
# the File Persona tests.
self.cl.setSSHOptions(
ip,
self.user,
self.password,
port=self.ssh_port,
conn_timeout=500,
known_hosts_file=self.known_hosts_file,
missing_key_policy=self.missing_key_policy)
except Exception as ex:
print(ex)
self.fail("failed to start ssh client")
# Setup remote copy target
if self.run_remote_copy:
parsed_3par_url = urlparse(self.secondary_url_3par)
ip = parsed_3par_url.hostname.split(':').pop()
self.secondary_cl = client.HPE3ParClient(self.secondary_url_3par)
try:
self.secondary_cl.setSSHOptions(
ip,
self.secondary_user,
self.secondary_password,
port=self.ssh_port,
conn_timeout=500,
known_hosts_file=self.known_hosts_file,
missing_key_policy=self.missing_key_policy)
except Exception as ex:
print(ex)
self.fail("failed to start ssh client")
self.secondary_cl.login(self.secondary_user,
self.secondary_password)
if self.debug:
self.cl.debug_rest(True)
self.cl.login(self.user, self.password)
if not self.port:
ports = self.cl.getPorts()
ports = [p for p in ports['members']
if p['linkState'] == 4 and # Ready
('device' not in p or not p['device']) and
p['mode'] == self.cl.PORT_MODE_TARGET]
self.port = ports[0]['portPos']
def tearDown(self):
self.cl.logout()
if self.run_remote_copy:
self.secondary_cl.logout()
if self.unitTest:
self.mockServer.kill()
if self.withSSH:
self.mockSshServer.kill()
def print_header_and_footer(func):
"""Decorator to print header and footer for unit tests."""
@wraps(func)
def wrapper(*args, **kwargs):
test = args[0]
test.printHeader(unittest.TestCase.id(test))
result = func(*args, **kwargs)
test.printFooter(unittest.TestCase.id(test))
return result
return wrapper
def printHeader(self, name):
print("\n##Start testing '%s'" % name)
def printFooter(self, name):
print("##Completed testing '%s\n" % name)
def findInDict(self, dic, key, value):
for i in dic:
if key in i and i[key] == value:
return True
| StarcoderdataPython |
3268830 | <filename>cartography/intel/aws/ec2/elastic_ip_addresses.py
import logging
from typing import Dict
from typing import List
import boto3
import neo4j
from botocore.exceptions import ClientError
from .util import get_botocore_config
from cartography.util import aws_handle_regions
from cartography.util import run_cleanup_job
from cartography.util import timeit
logger = logging.getLogger(__name__)
@timeit
@aws_handle_regions
def get_elastic_ip_addresses(boto3_session: boto3.session.Session, region: str) -> List[Dict]:
client = boto3_session.client('ec2', region_name=region, config=get_botocore_config())
try:
addresses = client.describe_addresses()['Addresses']
except ClientError as e:
logger.warning(f"Failed retrieve address for region - {region}. Error - {e}")
raise
return addresses
@timeit
def load_elastic_ip_addresses(
neo4j_session: neo4j.Session, elastic_ip_addresses: List[Dict], region: str,
current_aws_account_id: str, update_tag: int,
) -> None:
"""
Creates (:ElasticIpAddress)
(:ElasticIpAddress)-[:RESOURCE]->(:AWSAccount),
(:EC2Instance)-[:ELASTIC_IP_ADDRESS]->(:ElasticIpAddress),
(:NetworkInterface)-[:ELASTIC_IP_ADDRESS]->(:ElasticIpAddress),
"""
logger.info(f"Loading {len(elastic_ip_addresses)} Elastic IP Addresses in {region}.")
ingest_addresses = """
UNWIND {elastic_ip_addresses} as eia
MERGE (address: ElasticIPAddress{id: eia.AllocationId})
ON CREATE SET address.firstseen = timestamp()
SET address.instance_id = eia.InstanceId, address.public_ip = eia.PublicIp,
address.allocation_id = eia.AllocationId, address.association_id = eia.AssociationId,
address.domain = eia.Domain, address.network_interface_id = eia.NetworkInterfaceId,
address.network_interface_owner_id = eia.NetworkInterfaceOwnerId,
address.private_ip_address = eia.PrivateIpAddress, address.public_ipv4_pool = eia.PublicIpv4Pool,
address.network_border_group = eia.NetworkBorderGroup, address.customer_owned_ip = eia.CustomerOwnedIp,
address.customer_owned_ipv4_pool = eia.CustomerOwnedIpv4Pool, address.carrier_ip = eia.CarrierIp,
address.region = {Region}, address.lastupdated = {update_tag}
WITH address
MATCH (account:AWSAccount{id: {aws_account_id}})
MERGE (account)-[r:RESOURCE]->(address)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
WITH address
MATCH (instance:EC2Instance) WHERE instance.id = address.instance_id
MERGE (instance)-[r:ELASTIC_IP_ADDRESS]->(address)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
WITH address
MATCH (ni:NetworkInterface{id: address.network_interface_id})
MERGE (ni)-[r:ELASTIC_IP_ADDRESS]->(address)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {update_tag}
"""
neo4j_session.run(
ingest_addresses,
elastic_ip_addresses=elastic_ip_addresses,
Region=region,
aws_account_id=current_aws_account_id,
update_tag=update_tag,
)
@timeit
def cleanup_elastic_ip_addresses(neo4j_session: neo4j.Session, common_job_parameters: Dict) -> None:
run_cleanup_job(
'aws_import_elastic_ip_addresses_cleanup.json',
neo4j_session,
common_job_parameters,
)
@timeit
def sync_elastic_ip_addresses(
neo4j_session: neo4j.Session, boto3_session: boto3.session.Session, regions: List[str],
current_aws_account_id: str, update_tag: int, common_job_parameters: Dict,
) -> None:
for region in regions:
logger.info(f"Syncing Elastic IP Addresses for region {region} in account {current_aws_account_id}.")
addresses = get_elastic_ip_addresses(boto3_session, region)
load_elastic_ip_addresses(neo4j_session, addresses, region, current_aws_account_id, update_tag)
cleanup_elastic_ip_addresses(neo4j_session, common_job_parameters)
| StarcoderdataPython |
188752 | <filename>drawer/__init__.py
from .drawer import drawer | StarcoderdataPython |
4827547 | from ..utils import _CurrentSiteCommand, SiteManager
class LogsCommand(_CurrentSiteCommand):
"""
Get the logs of the running site
"""
def run(self, **kwargs):
SiteManager().logs()
| StarcoderdataPython |
3246327 | <reponame>ADBI-george2/AnomalyDetection
from __future__ import print_function, division
from igraph import *
import numpy as np
import numpy.linalg as la
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
__author__ = 'panzer'
FEATURES = ["degree", "clustering_coefficient", "ego_net_edges"]
def say(*lst):
print(*lst, end="")
sys.stdout.flush()
def list_files(folder):
"""
List all files in a folder
:param folder: Name of the folder
:return: list of complete file names in folder
"""
return ["%s/%s"%(folder, f) for f in os.listdir(folder) if f.endswith(".txt")]
def make_graph(file_name):
"""
Make graph from a file
:param file_name:
:return:
"""
with open(file_name, 'r') as f:
lines = f.readlines()
node_count, edge_count = map(int, lines[0].strip().split())
edges = [map(int, line.strip().split()) for line in lines[1:]]
graph = Graph()
graph.add_vertices(node_count)
graph.add_edges(edges)
for vertex in graph.vs:
assign_attributes(vertex, graph)
return graph
def assign_attributes(vertex, graph):
"""
Assign Attributes for the vertex
:param vertex: Vertex to be assigned attributes
:param graph: Instance of graph to which the vertex belongs
"""
neighbors = graph.neighbors(vertex.index)
ego_net = graph.subgraph([vertex.index]+neighbors)
vertex["degree"] = vertex.degree()
cc = graph.transitivity_local_undirected([vertex.index])[0]
vertex["clustering_coefficient"] = 0 if np.isnan(cc) else cc
vertex["ego_net_edges"] = len(ego_net.es)
def get_feature_vector(graphs, vertex_id, feature):
return [graph.vs[vertex_id][feature] for graph in graphs]
def pearson_rho(x_vector, y_vector):
val, _ = pearsonr(x_vector, y_vector)
return 0 if np.isnan(val) else val
def get_principal_eigen_vector(matrix):
_, v = la.eig(matrix)
return v[0]
def construct_correlation_matrix(all_graphs, feature, start, window=7):
graphs = all_graphs[start:start+window]
vertices = range(len(graphs[0].vs))
matrix = []
for x in vertices:
x_vector = get_feature_vector(graphs, x, feature)
covariance_vector = []
for y in vertices:
y_vector = get_feature_vector(graphs, y, feature)
covariance_vector.append(pearson_rho(x_vector, y_vector))
matrix.append(covariance_vector)
return matrix
def vector_average(vectors):
total = vectors[0]
count = 1
for vector in vectors[1:]:
total = total + vector
count += 1
return total / count
def construct_correlation_matrices(all_graphs, window=7):
feature_info = {}
for feature in FEATURES:
matrices = []
eigens = []
for start in range(len(all_graphs)-window):
say(".")
matrix = construct_correlation_matrix(all_graphs, feature, start, window)
matrices.append(matrix)
eigens.append(get_principal_eigen_vector(matrix))
feature_info[feature] = {
"matrices" : matrices,
"eigens" : eigens
}
print("%s completed"%feature)
return feature_info
def compute_eigen_behaviour(feature_info, window=7):
eigen_behaviours = {}
for feature in FEATURES:
eigens = feature_info[feature]["eigens"]
eigen_behaviour = []
for start in range(len(eigens)-window):
u_t = eigens[start+window]
r_t1 = vector_average(eigens[start:start+window])
eigen_behaviour.append(round(np.dot(u_t, r_t1).real, 2))
eigen_behaviours[feature] = eigen_behaviour
return eigen_behaviours
def save_eigen_behaviours(eigen_behaviours, file_name):
lines = [" ".join(FEATURES)+"\n"]
vals = []
for feature in FEATURES:
vals.append(eigen_behaviours[feature])
vals = zip(*vals)
for line in vals:
lines.append(" ".join(map(str, line))+"\n")
with open(file_name, 'w') as f:
f.writelines(lines)
def plot_eigen_behaviours(eigen_behaviours, file_name, window = 7):
xs = range(window,len(eigen_behaviours.values()[0])+window)
colors = ["r", "g", "b"]
f, axis_arr = plt.subplots(3, sharex=True)
for i, feature in enumerate(FEATURES):
ys = eigen_behaviours[feature]
axis_arr[i].plot(xs, ys, "%s-"%colors[i])
axis_arr[i].set_ylabel("Z Score")
plt.xlabel("Time")
plt.xlim(0, xs[-1]+2)
plt.savefig(file_name)
plt.clf()
def _main(folder):
graphs = []
for f in list_files(folder):
graphs.append(make_graph(f))
print("Graphs Processed")
feature_info = construct_correlation_matrices(graphs)
eigen_behaviours = compute_eigen_behaviour(feature_info)
dataset = folder.split("/")[-1]
ts_file_name = "%s_time_series.txt"%dataset
ts_png_name = "%s_time_series.png"%dataset
save_eigen_behaviours(eigen_behaviours, ts_file_name)
plot_eigen_behaviours(eigen_behaviours, ts_png_name)
if __name__ == "__main__":
args = sys.argv
if len(args) != 2:
print("USE THE COMMAND : python anomaly.py <data folder>")
exit()
folder_name = args[1]
_main(folder_name) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.