seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
35636565941 | import arviz as az
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pymc as pm
import pytensor
import pytensor.tensor as pt
print(f"Running on PyMC v{pm.__version__}")
def my_model(theta, x):
m, c = theta
return m * x + c
def my_loglike(theta, x, data, sigma):
model = my_model(theta, x)
model_2 = my_model(theta, x)
lh_1=-(0.5 / sigma**2) * np.sum((data - model) ** 2)
lh_2=-(0.5 / sigma**2) * np.sum((data - model) ** 2)
return lh_1+lh_2
# define a pytensor Op for our likelihood function
class LogLike(pt.Op):
"""
Specify what type of object will be passed and returned to the Op when it is
called. In our case we will be passing it a vector of values (the parameters
that define our model) and returning a single "scalar" value (the
log-likelihood)
"""
itypes = [pt.dvector] # expects a vector of parameter values when called
otypes = [pt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, loglike, data, x, sigma):
"""
Initialise the Op with various things that our log-likelihood function
requires. Below are the things that are needed in this particular
example.
Parameters
----------
loglike:
The log-likelihood (or whatever) function we've defined
data:
The "observed" data that our log-likelihood function takes in
x:
The dependent variable (aka 'x') that our model requires
sigma:
The noise standard deviation that our function requires.
"""
# add inputs as class attributes
self.likelihood = loglike
self.data = data
self.x = x
self.sigma = sigma
def perform(self, node, inputs, outputs):
# the method that is used when calling the Op
(theta,) = inputs # this will contain my variables
# call the log-likelihood function
logl = self.likelihood(theta, self.x, self.data, self.sigma)
outputs[0][0] = np.array(logl) # output the log-likelihood
# set up our data
N = 10 # number of data points
sigma = 1.0 # standard deviation of noise
x = np.linspace(0.0, 9.0, N)
mtrue = 0.4 # true gradient
ctrue = 3.0 # true y-intercept
truemodel = my_model([mtrue, ctrue], x)
# make data
rng = np.random.default_rng(716743)
data = sigma * rng.normal(size=N) + truemodel
# create our Op
logl = LogLike(my_loglike, data, x, sigma)
# use PyMC to sampler from log-likelihood
for j in range(0, 1):
with pm.Model():
# uniform priors on m and c
#m = pm.Uniform("m", lower=-10.0, upper=10.0)
#c = pm.Uniform("c", lower=-10.0, upper=10.0)
#print(vars(m))
v=[pm.Uniform(x, lower=-10, upper=10) for x in ["m"]]
q=[pm.Uniform(x, lower=-10, upper=10) for x in ["c"]]
# convert m and c to a tensor vector
theta = pt.as_tensor_variable(v+q)
save_str= "/home/henney/Documents/Oxford/General_electrochemistry/heuristics/file_{0}.nc".format(j)
# use a Potential to "call" the Op and include it in the logp computation
pm.Potential("likelihood", logl(theta))
# Use custom number of draws to replace the HMC based defaults
idata_mh = pm.sample(3000, tune=1000)
idata_mh.to_netcdf(save_str)
# plot the traces
az.plot_trace(idata_mh, lines=[("m", {}, mtrue), ("c", {}, ctrue)])
| HOLL95/General_electrochemistry | heuristics/testing_pymc.py | testing_pymc.py | py | 3,438 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pymc.__version__",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pytensor.tensor.Op",
"l... |
19562145539 | """ This is a modified version of
https://github.com/ifeherva/optimizer-benchmark/blob/master/optimizers/__init__.py """
import argparse
import torch.optim as optim
import math
from .coolmom_pytorch import Coolmomentum
__all__ = ['parse_optimizer', 'supported_optimizers']
optimizer_defaults = {
'coolmomentum': (Coolmomentum, 'Coolmomentum', {
'lr': 0.01,
'momentum': 0.99,
'weight_decay': 5e-4,
'beta': (1 - 0.99)**(1/(200*math.ceil(50000/128))),
'dropout': 0.0,
})
}
def supported_optimizers():
return list(optimizer_defaults.keys())
def required_length(nargs):
class RequiredLength(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if len(values) != nargs:
msg = 'argument "{}" requires exactly {} arguments'.format(self.dest, nargs)
raise argparse.ArgumentTypeError(msg)
setattr(args, self.dest, values)
return RequiredLength
def parse_optim_args(args, default_args):
parser = argparse.ArgumentParser(description='Optimizer parser')
for k, v in default_args.items():
if type(v) == bool:
kwargs = {'action': 'store_false' if v else 'store_true'}
elif type(v) == list:
kwargs = {'type': type(v[0]), 'nargs': '+', 'default': v}
elif type(v) == tuple:
kwargs = {'type': type(v[0]), 'nargs': '+', 'action': required_length(len(v)), 'default': v}
else:
kwargs = {'type': type(v), 'default': v}
parser.add_argument('--{}'.format(k), **kwargs)
opt = parser.parse_args(args)
opt_params_name = ''
for k, v in default_args.items():
if opt.__getattribute__(k) != v:
param_format = '' if type(v) == bool else '_{}'.format(opt.__getattribute__(k))
opt_params_name += '_{}{}'.format(k, param_format)
return opt, opt_params_name
def parse_optimizer(optimizer, optim_args, model_params):
if optimizer not in optimizer_defaults:
raise RuntimeError('Optimizer {} is not supported'.format(optimizer))
optim_func, optim_name, def_params = optimizer_defaults[optimizer]
optim_opts, opt_name = parse_optim_args(optim_args, def_params)
opt_name = '{}{}'.format(optim_name, opt_name)
return optim_func(model_params, **vars(optim_opts)), opt_name
| borbysh/coolmomentum | optimizers/__init__.py | __init__.py | py | 2,377 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "coolmom_pytorch.Coolmomentum",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "math.ceil",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "argparse.Action",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "argparse.... |
10989384624 | import sys
from towhee.runtime import register, pipe, ops, accelerate, AutoConfig, AutoPipes
from towhee.data_loader import DataLoader
from towhee.serve.triton import triton_client
from towhee.utils.lazy_import import LazyImport
# Legacy towhee._types
from towhee import types
_types = types # pylint: disable=protected-access
sys.modules['towhee._types'] = sys.modules['towhee.types']
datacollection = LazyImport('datacollection', globals(), 'towhee.datacollection')
server_builder = LazyImport('server_builder', globals(), 'towhee.serve.server_builder')
api_service = LazyImport('api_service', globals(), 'towhee.serve.api_service')
__all__ = [
'dataset',
'pipe',
'triton_client',
'AutoConfig',
'build_docker_image',
'build_pipeline_model',
'AutoConfig',
'AutoPipes',
'DataLoader'
]
__import__('pkg_resources').declare_namespace(__name__)
def build_docker_image(
dc_pipeline: 'towhee.RuntimePipeline',
image_name: str,
cuda_version: str,
format_priority: list,
parallelism: int = 8,
inference_server: str = 'triton',
):
"""
Wrapper for lazy import build_docker_image.
Args:
dc_pipeline ('towhee.RuntimPipeline'):
The pipeline to build as a model in the docker image.
image_name (`str`):
The name of the docker image.
cuda_version (`str`):
Cuda version.
format_priority (`list`):
The priority order of the model format.
parallelism (`int`):
The parallel number.
inference_server (`str`):
The inference server.
Examples:
>>> import towhee
>>> from towhee import pipe, ops
>>> p = (
... pipe.input('url')
... .map('url', 'image', ops.image_decode.cv2_rgb())
... .map('image', 'vec', ops.image_embedding.timm(model_name='resnet50'))
... .output('vec')
... )
>>> towhee.build_docker_image(
... dc_pipeline=p,
... image_name='clip:v1',
... cuda_version='11.7',
... format_priority=['onnx'],
... parallelism=4,
... inference_server='triton'
... )
"""
return server_builder.build_docker_image(dc_pipeline, image_name, cuda_version, format_priority, parallelism, inference_server)
def build_pipeline_model(
dc_pipeline: 'towhee.RuntimePipeline',
model_root: str,
format_priority: list,
parallelism: int = 8,
server: str = 'triton'
):
"""
Wrapper for lazy import build_pipeline_model.
Args:
dc_pipeline ('towhee.RuntimePipeline'):
The piepline to build as a model.
model_root (`str`):
The model root path.
format_priority (`list`):
The priority order of the model format.
parallelism (`int`):
The parallel number.
server (`str`):
The server type.
Examples:
>>> import towhee
>>> from towhee import pipe, ops
>>> p = (
... pipe.input('url')
... .map('url', 'image', ops.image_decode.cv2_rgb())
... .map('image', 'vec', ops.image_embedding.timm(model_name='resnet50'))
... .output('vec')
... )
>>> towhee.build_pipeline_model(
... dc_pipeline=p,
... model_root='models',
... format_priority=['onnx'],
... parallelism=4,
... server='triton'
... )
"""
return server_builder.build_pipeline_model(dc_pipeline, model_root, format_priority, parallelism, server)
def DataCollection(data): # pylint: disable=invalid-name
"""
Wrapper for lazy import DataCollection
DataCollection is a pythonic computation and processing framework for unstructured
data in machine learning and data science. It allows a data scientist or researcher
to assemble data processing pipelines and do their model work (embedding,
transforming, or classification) with a method-chaining style API.
Args:
data ('towhee.runtime.DataQueue'):
The data to be stored in DataColletion in the form of DataQueue.
"""
return datacollection.DataCollection(data)
def dataset(name: str, *args, **kwargs) -> 'TorchDataSet':
"""Get a dataset by name, and pass into the custom params.
Args:
name (str): Name of a dataset.
*args (any): Arguments of the dataset construct method.
**kwargs (any): Keyword arguments of the dataset construct method.
Returns:
TorchDataSet: The corresponding `TorchDataSet`.
Examples:
>>> from towhee import dataset
>>> type(dataset('fake', size=10))
<class 'towhee.data.dataset.dataset.TorchDataSet'>
"""
from torchvision import datasets
from towhee.data.dataset.dataset import TorchDataSet
dataset_construct_map = {
'mnist': datasets.MNIST, 'cifar10': datasets.cifar.CIFAR10, 'fake': datasets.FakeData
# 'imdb': IMDB # ,()
}
torch_dataset = dataset_construct_map[name](*args, **kwargs)
return TorchDataSet(torch_dataset)
| towhee-io/towhee | towhee/__init__.py | __init__.py | py | 5,238 | python | en | code | 2,843 | github-code | 1 | [
{
"api_name": "towhee.types",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sys.modules",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "towhee.utils.lazy_import.LazyImport",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tow... |
18057701170 | import tensorflow as tf
from tf_agents.networks import network
from tf_agents.agents.dqn import dqn_agent
from tf_agents.environments import tf_py_environment
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
from sudoku_env import SudokuEnvironment
from utils import load_dataset
if __name__ == "__main__":
ds_train, ds_eval = load_dataset("data/sudoku.csv")
train_py_env = SudokuEnvironment(ds_train)
train_env = tf_py_environment.TFPyEnvironment(train_py_env)
eval_py_env = SudokuEnvironment(ds_eval)
eval_env = tf_py_environment.TFPyEnvironment(eval_py_env)
num_iterations = 20000
initial_collect_steps = 1000
collect_steps_per_iteration = 1
replay_buffer_max_length = 100000
batch_size = 64
learning_rate = 1e-3
log_interval = 200
num_eval_episodes = 30
eval_interval = 1000
class MyQNetwork(network.Network):
def __init__(self, input_tensor_spec, action_spec, name="MyQnet"):
super(MyQNetwork, self).__init__(
input_tensor_spec=input_tensor_spec, state_spec=(), name=name
)
action_spec = tf.nest.flatten(action_spec)[0]
num_actions = action_spec.maximum - action_spec.minimum + 1
self._forward = tf.keras.Sequential(
[
tf.keras.layers.Reshape((9, 9, 9)),
tf.keras.layers.Conv2D(
64,
kernel_size=(3, 3),
activation="relu",
padding="same",
),
tf.keras.layers.BatchNormalization(),
# tf.keras.layers.Conv2D(
# 64,
# kernel_size=(3, 3),
# activation="relu",
# padding="same",
# ),
# tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(
128,
kernel_size=(1, 1),
activation="relu",
padding="same",
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(num_actions),
]
)
def call(self, observation, step_type=None, network_state=()):
observation = tf.cast(observation, dtype=tf.float32)
logits = self._forward(observation)
return logits, network_state
q_net = MyQNetwork(train_env.observation_spec(), train_env.action_spec())
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
train_step_counter = tf.Variable(0)
agent = dqn_agent.DqnAgent(
train_env.time_step_spec(),
train_env.action_spec(),
q_network=q_net,
optimizer=optimizer,
td_errors_loss_fn=common.element_wise_squared_loss,
train_step_counter=train_step_counter,
)
agent.initialize()
random_policy = random_tf_policy.RandomTFPolicy(
train_env.time_step_spec(), train_env.action_spec()
)
def compute_avg_return(environment, policy, num_episodes=10):
total_return = 0.0
for _ in range(num_episodes):
time_step = environment.reset()
episode_return = 0.0
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = environment.step(action_step.action)
episode_return += time_step.reward
total_return += episode_return
avg_return = total_return / num_episodes
return avg_return.numpy()[0]
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_max_length,
)
def collect_step(environment, policy, buffer):
time_step = environment.current_time_step()
action_step = policy.action(time_step)
next_time_step = environment.step(action_step.action)
traj = trajectory.from_transition(
time_step, action_step, next_time_step
)
# Add trajectory to the replay buffer
buffer.add_batch(traj)
def collect_data(env, policy, buffer, steps):
for _ in range(steps):
collect_step(env, policy, buffer)
collect_data(train_env, random_policy, replay_buffer, steps=100)
dataset = replay_buffer.as_dataset(
num_parallel_calls=3, sample_batch_size=batch_size, num_steps=2
).prefetch(3)
iterator = iter(dataset)
agent.train = common.function(agent.train)
# Reset the train step
agent.train_step_counter.assign(0)
# Evaluate the agent's policy once before training.
avg_return = compute_avg_return(eval_env, agent.policy, num_eval_episodes)
returns = [avg_return]
for _ in range(num_iterations):
# Collect a few steps using collect_policy and save to the replay buffer.
for _ in range(collect_steps_per_iteration):
collect_step(train_env, agent.collect_policy, replay_buffer)
# Sample a batch of data from the buffer and update the agent's network.
experience, unused_info = next(iterator)
train_loss = agent.train(experience).loss
step = agent.train_step_counter.numpy()
if step % log_interval == 0:
print("step = {0}: loss = {1}".format(step, train_loss))
if step % eval_interval == 0:
avg_return = compute_avg_return(
eval_env, agent.policy, num_eval_episodes
)
print("step = {0}: Average Return = {1}".format(step, avg_return))
returns.append(avg_return)
| emla2805/rl-sudoku | train.py | train.py | py | 5,909 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "utils.load_dataset",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sudoku_env.SudokuEnvironment",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tf_agents.environments.tf_py_environment.TFPyEnvironment",
"line_number": 18,
"usage_type... |
32023888041 | import pandas as pd
import plotly as py
import numpy as np
from plotly.graph_objs import *
from os import path
trace1 = Choropleth(
z=['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
showlegend=True,
autocolorscale=False,
colorscale=[[0, 'rgb(255,255,255)'], [1, '#a0db8e']],
hoverinfo='text',
locationmode='USA-states',
locations=['AL','FL','GA','MS','NC','SC','TN'],
name='Southeast',
text='Southeast',
showscale=False,
zauto=False,
zmax=1,
zmin=0,
marker=dict(line=dict(color='white')),
)
trace2 = Choropleth(
z=['1', '1'],
autocolorscale=False,
colorscale=[[0, 'rgb(255,255,255)'], [1, 'rgb(255,167,0)']],
hoverinfo='text',
locationmode='USA-states',
locations=['LA','TX'],
name='Gulf',
showscale=False,
zauto=False,
zmax=1,
zmin=0,
marker=dict(line=dict(color='white'))
)
trace3 = Choropleth(
z=['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1','1','1','1'],
autocolorscale=False,
colorscale=[[0, 'rgb(255,255,255)'], [1, 'rgb(141,85,36)']],
hoverinfo='text',
locationmode='USA-states',
locations=['AR','IL','IN','IA','KS','MI','MN','MO','NE','ND','OK','SD','WI'],
name='Midwest',
text='Midwest',
showscale=False,
zauto=False,
zmax=1,
zmin=0,
marker=dict(line=dict(color='white'))
)
trace4 = Choropleth(
z=['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1','1','1','1','1','1'],
autocolorscale=False,
colorscale=[[0, 'rgb(255,255,255)'], [1, 'rgb(241,194,125)']],
hoverinfo='text',
locationmode='USA-states',
locations=['CT','KY','ME','MA','NH','NJ','NY','OH','PA','RI','VT','DE','MD','VA','WV'],
name='Northeast',
legendgroup='Northeast',
showscale=False,
zauto=False,
zmax=1,
zmin=0,
marker=dict(line=dict(color='white'))
)
trace5 = Choropleth(
z=['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
autocolorscale=False,
colorscale=[[0, 'rgb(255,255,255)'], [1, '#7aceeb']],
hoverinfo='text',
locationmode='USA-states',
locations=['AZ','CA','CO','ID','MT','NV','NM','OR','UT','WA','WY'],
name='Western',
showscale=False,
zauto=False,
zmax=1,
zmin=0,
marker=dict(line=dict(color='white')),
showlegend=True
)
layout = Layout(
geo=dict(
countrycolor='rgb(102, 102, 102)',
countrywidth=0.1,
lakecolor='rgb(255, 255, 255)',
landcolor='rgba(255, 255, 255, 0.28)',
lonaxis=dict(
gridwidth=1.5999999999999999,
range=[-180, -50],
showgrid=False
),
projection=dict(
type='albers usa'
),
scope='usa',
showland=True,
showrivers=False,
showsubunits=True,
),
showlegend=True,
title='Federal Energy Regulatory Commission (FERC) Natural Gas Market Classification',
legend = dict(
traceorder = 'reversed'
)
)
fig = dict( data=([trace1, trace2, trace3, trace4, trace5]), layout=layout )
# py.plotly.image.save_as(fig, filename='US_map.png')
py.offline.plot(fig, validate=False, filename=path.basename(__file__)+".html")
| nshahr/Data-Visualization | ngas-ovr-map.py | ngas-ovr-map.py | py | 3,195 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "plotly.offline.plot",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "plotly.offline",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "os.path",... |
19659832895 | import math
import logging
from datetime import datetime
from drive_controller import DrivingController
# 제한 속도
SPEED_LIMIT = 100
logging.basicConfig(filename='{}.log'.format(datetime.now().strftime('%Y-%m-%d-%H-%M')), level=logging.DEBUG)
class DrivingClient(DrivingController):
def __init__(self):
# =========================================================== #
# Area for member variables =============================== #
# =========================================================== #
# Editing area starts from here
#
self.is_debug = False
self.collision_flag = False
#
# Editing area ends
# ==========================================================#
super().__init__()
def control_driving(self, car_controls, sensing_info):
# =========================================================== #
# Area for writing code about driving rule ================= #
# =========================================================== #
# Editing area starts from here
#
logging.debug("=========================================================")
logging.debug("to middle: {}".format(sensing_info.to_middle))
logging.debug("collided: {}".format(sensing_info.collided))
logging.debug("car speed: {} km/h".format(sensing_info.speed))
logging.debug("is moving forward: {}".format(sensing_info.moving_forward))
logging.debug("moving angle: {}".format(sensing_info.moving_angle))
logging.debug("lap_progress: {}".format(sensing_info.lap_progress))
logging.debug("track_forward_angles: {}".format(sensing_info.track_forward_angles))
logging.debug("track_forward_obstacles: {}".format(sensing_info.track_forward_obstacles))
logging.debug("opponent_cars_info: {}".format(sensing_info.opponent_cars_info))
logging.debug("=========================================================")
###########################################################################
emergency_break = False
# 1. 박았는지 상태 체크
# 1-1. 장애물에 박았을 때
if is_collided(self, sensing_info):
car_controls.steering = -1 * avoid_obstacles(self, sensing_info)
car_controls.throttle = -1
# 1-2. 도로 밖 팬스에 박았을 때
else:
# 2. 피해야할 대상이 있는지 확인
# 2-1. 피해야할 장애물이 있는지 확인
if is_avoid_obstacles(sensing_info):
car_controls.steering = avoid_obstacles(self, sensing_info)
# 2-2. 피해야할 상대 자동차가 있는지 확인
# 3. 직선, 코너 주행 확인
else:
for i in range(2):
if abs(sensing_info.track_forward_angles[i]) > 20:
emergency_break = True
break
angle = -(sensing_info.moving_angle - sensing_info.track_forward_angles[0]) / 110
middle = -sensing_info.to_middle / 50 if abs(sensing_info.to_middle) > self.half_road_limit - 3 else 0
# 각 앵글값 및 미들값으로 구한 핸들값 중 더 큰 값을 선택
# car_controls.steering = angle if abs(angle) > abs(middle) else middle
car_controls.steering = angle + middle if angle + middle < 1 else 1
if emergency_break:
car_controls.steering = car_controls.steering + (sensing_info.speed / 250) if car_controls.steering > 0 else car_controls.steering - (sensing_info.speed / 250)
# 4. 상대 차량이 있다면 추월 가능한지
# 제한 속도 이상이면 악셀값 조정
car_controls.throttle = 0 if sensing_info.speed > SPEED_LIMIT else 1
car_controls.brake = 0
if emergency_break and car_controls.throttle == 1 and sensing_info.speed > 60:
car_controls.throttle = 0
logging.debug("steering:{}, throttle:{}, brake:{}".format(car_controls.steering, car_controls.throttle, car_controls.brake))
#
# Editing area ends
# ==========================================================#
return car_controls
# ============================
# If you have NOT changed the <settings.json> file
# ===> player_name = ""
#
# If you changed the <settings.json> file
# ===> player_name = "My car name" (specified in the json file) ex) Car1
# ============================
def set_player_name(self):
player_name = ""
return player_name
# 1-1. 장애물에 박았을 때
def is_collided(self, sensing_info):
if len(sensing_info.track_forward_obstacles) > 0:
if self.collision_flag:
if sensing_info.track_forward_obstacles[0]['dist'] < 10 and sensing_info.speed < 10:
return True
if sensing_info.collided:
self.collision_flag = True
return True
self.collision_flag = False
return False
# 피해야할 장애물이 있는지 확인
def is_avoid_obstacles(sensing_info):
# 전방에 장애물이 하나이상 있는지 확인
if len(sensing_info.track_forward_obstacles) > 0:
# 가장 가까운 장애물과의 거리 확인
if 5 < sensing_info.track_forward_obstacles[0]['dist'] < sensing_info.speed * 0.8:
logging.debug("장애물 발견")
print("장애물 발견")
# 피하지 않아도 되는지 확인
tangent = math.tan(math.radians(sensing_info.moving_angle))
temp_to_middle = sensing_info.track_forward_obstacles[0]['dist'] * tangent
if abs(sensing_info.to_middle + temp_to_middle - sensing_info.track_forward_obstacles[0]['to_middle']) > 3:
logging.debug("그냥 가세요")
print("그냥 가세요")
return False
logging.debug("피해라")
logging.debug("temp_to_middle:{}".format(temp_to_middle))
print("피해라")
return True
return False
# 기본적으로 장애물의 반대로 이동, 중간에 위치할 경우 왼쪽 or 오른쪽 구석으로 이동
def avoid_obstacles(self, sensing_info):
steering = -sensing_info.track_forward_obstacles[0]['to_middle'] / 50
if abs(sensing_info.track_forward_obstacles[0]['to_middle']) < 2.5:
if sensing_info.to_middle > 0:
steering = (sensing_info.to_middle + (self.half_road_limit / 3)) / 60
else:
steering = (sensing_info.to_middle - (self.half_road_limit / 3)) / 60
return steering
if __name__ == '__main__':
client = DrivingClient()
client.run()
| holy-water/self-driving | driving_client.py | driving_client.py | py | 6,808 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "logging.D... |
20029740083 | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class SignupForm(UserCreationForm):
email = forms.EmailField(label="Email address",
help_text="A valid email address is required.",
error_messages={'invalid':"Please supply a valid email address"} )
class Meta:
model = User
fields = ('username', 'email')
def clean_email(self):
email = self.cleaned_data['email']
try:
User.objects.get(email=email)
except User.DoesNotExist:
return email
raise forms.ValidationError("A user with that email address already exists.")
def save(self, commit=True):
user = super(SignupForm, self).save(commit=False)
user.email = self.cleaned_data['email']
if commit:
user.save()
return user | crcsmnky/opensciencedata | webapp/users/forms.py | forms.py | py | 908 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.forms.EmailField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_... |
29455914344 | # Import the necessary modules.
import tkinter as tk
import tkinter.messagebox
import pyaudio
import wave
import os
import threading
class RecAUD:
def __init__(self,topic_names ,chunk=3024, frmat=pyaudio.paInt16, channels=2, rate=44100):
# Start Tkinter and set Title
self.topic_names = topic_names
self.sentences = []
self.audio_name = None
self.file_output = None
self.url = None
self.cur_sentence = -1
self.main = tk.Tk()
self.main.geometry('600x300')
self.main.title('Voice Recording')
self.CHUNK = chunk
self.FORMAT = frmat
self.CHANNELS = channels
self.RATE = rate
self.frames = []
self.state = 0 #0 -> recording/ 1 -> stop
self.playing_theard = None
self.stream = pyaudio.PyAudio().open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)
self.TopFrame = tk.Frame(self.main)
self.MidFrame = tk.Frame(self.main)
self.BottomFrame = tk.Frame(self.main)
self.TopFrame.pack()
self.MidFrame.pack()
self.BottomFrame.pack()
self.topic_var = tk.StringVar(self.main)
self.topic_var.set('Pick subject')
self.topic_var.trace('w', self.changetopic)
self.topicPopup = tk.OptionMenu(self.TopFrame, self.topic_var, *topic_names)
# sentence label
self.sentence_title = tk.Label(self.TopFrame, text= "Sentence:")
self.sentence_label = tk.Label(self.TopFrame, text= "------------------", wraplength=600)
self.topicPopup.grid(row=0,column=0, padx=50, pady=5)
self.sentence_title.grid(row=1, column = 0 , columnspan =1)
self.sentence_label.grid(row=2, column = 0 , columnspan =1, pady=5)
# button
self.next = tk.Button(self.MidFrame, width=10, text='Next ->', command=lambda: self.nextSentence())
self.pre = tk.Button(self.MidFrame, width=10, text='<- Previous', command=lambda: self.preSentence())
self.strt_rec = tk.Button(self.MidFrame, width=10, text='Start Record', command=lambda: self.start_record())
self.stop_rec = tk.Button(self.MidFrame, width=10, text='Stop Record', command=lambda: self.stop_record())
self.pre.grid(row=1, column=0, pady = 5, padx = 5)
self.next.grid(row=1, column=4, pady = 5 ,padx = 5)
self.strt_rec.grid(row=1, column=1, pady = 5, padx = 5)
self.stop_rec.grid(row=1, column=2, pady = 5 ,padx = 5)
# status
self.status_title = tk.Label(self.BottomFrame, text = "State:")
self.status_label = tk.Label(self.BottomFrame, text = "")
self.status_title.grid(row = 0, column = 0, pady = 5)
self.status_label.grid(row = 1, column = 0, pady = 5)
tk.mainloop()
def changetopic(self, *args):
self.sentence_label = tk.Label(self.TopFrame, text= "------------------", wraplength=600)
topic_name = self.topic_var.get()
fin = open("/".join(["data",topic_name ,"data.txt"]), "r", encoding="utf-8")
self.url = fin.readline()
self.sentences = fin.readlines()
# khởi tạo array ghi lại trạng thái câu đã được gh âm?
self.record_tags = [False for i in range(len(self.sentences))]
self.cur_sentence = -1
fin.close()
# check/ make output
if self.file_output:
self.file_output.close()
output_folder = "/".join(["output",topic_name])
if not os.path.exists(output_folder):
os.makedirs(output_folder,exist_ok=True)
# mở file output
self.file_output = open("/".join(["output",topic_name ,"output.txt"]), "w" , encoding="utf-8")
self.status_label['text'] = 'Current Subject: ' + topic_name
def nextSentence(self):
topic_name = self.topic_var.get()
if topic_name == 'Pick subject':
return
if self.cur_sentence >= len(self.sentences) - 1: # record all sentence -> write output
if self.file_output.closed:
return
self.file_output.write(self.url)
for sentence in self.sentences:
index = self.sentences.index(sentence)
if self.record_tags[index]:
audio_name = topic_name + "-" + str(index) + ".wav"
else:
audio_name = topic_name + ""
self.file_output.write(audio_name + "\n")
self.file_output.write(sentence)
self.file_output.close()
self.status_label['text'] = 'Finish subject: ' + topic_name
return
#next sentence
self.cur_sentence += 1
file_path = "/".join(["output",topic_name , str(self.cur_sentence) +".wav"])
status = 'Sentence: ' + str(self.cur_sentence) + "/" + str(len(self.sentences) -1)
if os.path.exists(file_path):
self.record_tags[self.cur_sentence] = True
status += " Recorded"
self.status_label['text'] = status
self.sentence_label['text']= self.sentences[self.cur_sentence]
def preSentence(self):
if self.topic_var.get() == 'Pick Subject':
return
if self.cur_sentence > 0:
self.cur_sentence -= 1
self.sentence_label['text']= self.sentences[self.cur_sentence]
topic_name = self.topic_var.get()
file_path = "/".join(["output",topic_name ,topic_name + "-" + str(self.cur_sentence) +".wav"])
status = 'Sentence: ' + str(self.cur_sentence) + "/" + str(len(self.sentences) -1)
if os.path.exists(file_path):
self.record_tags[self.cur_sentence] = True
status += " Recorded"
self.status_label['text'] = status
self.sentence_label['text']= self.sentences[self.cur_sentence]
def start_record(self):
if self.cur_sentence == -1:
return
self.status_label['text'] = 'Recording line: ' + str(self.cur_sentence) + "/" + str(len(self.sentences) -1)
self.state = 1
self.frames = []
stream = pyaudio.PyAudio().open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)
while self.state == 1:
data = stream.read(self.CHUNK)
self.frames.append(data)
self.main.update()
stream.close()
# get topic name
topic_name = self.topic_var.get()
# open wav file
wf = wave.open("/".join(["output",topic_name , topic_name + "-" + str(self.cur_sentence) +".wav"]), 'wb')
wf.setnchannels(self.CHANNELS)
wf.setsampwidth(pyaudio.PyAudio().get_sample_size(self.FORMAT))
wf.setframerate(self.RATE)
wf.writeframes(b''.join(self.frames))
wf.close()
def stop_record(self):
if self.st == 0:
return
self.state = 0
self.record_tags[self.cur_sentence] = True
self.status_label['text'] = 'Recorded line: ' + str(self.cur_sentence) + "/" + str(len(self.sentences) -1)
topic_names = []
for (paths, dirs, files) in os.walk("data/."):
for dirname in dirs:
topic_names.append(dirname)
guiAUD = RecAUD(topic_names) | duchung19399/voice-recording | record.py | record.py | py | 7,351 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyaudio.paInt16",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Tk",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pyaudio.PyAudio",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame",
"... |
11481471531 | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import re
try:
import jpype
except ImportError:
pass
from .. import jvm
from .. import utils
__all__ = ['Kkma']
class Kkma():
"""Wrapper for `Kkma <http://kkma.snu.ac.kr>`_.
Kkma is a morphological analyzer and natural language processing system written in Java, developed by the Intelligent Data Systems (IDS) Laboratory at `SNU <http://snu.ac.kr>`_.
.. code-block:: python
from konlpy.tag import Kkma
kkma = Kkma()
print kkma.sentences(u'저는 대학생이구요. 소프트웨어 관련학과 입니다.')
print kkma.nouns(u'대학에서 DB, 통계학, 이산수학 등을 배웠지만...')
print kkma.pos(u'자주 사용을 안하다보니 모두 까먹은 상태입니다.')
:param jvmpath: The path of the JVM passed to :py:func:`.init_jvm`.
"""
def nouns(self, phrase):
"""Noun extractor."""
phrase = utils.preprocess(phrase)
nouns = self.jki.extractNoun(phrase)
if not nouns: return []
return [nouns.get(i).getString() for i in range(nouns.size())]
def pos(self, phrase):
"""POS tagger."""
phrase = utils.preprocess(phrase)
sentences = self.jki.morphAnalyzer(phrase)
morphemes = []
if not sentences: return morphemes
for i in range(sentences.size()):
sentence = sentences.get(i)
for j in range(sentence.size()):
eojeol = sentence.get(j)
for k in range(eojeol.size()):
morpheme = eojeol.get(k)
morphemes.append((morpheme.getString(), morpheme.getTag()))
return morphemes
def sentences(self, phrase):
"""Sentence detection."""
phrase = utils.preprocess(phrase)
sentences = self.jki.morphAnalyzer(phrase)
if not sentences: return []
return [sentences.get(i).getSentence() for i in range(sentences.size())]
def __init__(self, jvmpath=None):
if not jpype.isJVMStarted():
jvm.init_jvm(jvmpath)
kkmaJavaPackage = jpype.JPackage('kr.lucypark.kkma')
KkmaInterfaceJavaClass = kkmaJavaPackage.KkmaInterface
self.jki = KkmaInterfaceJavaClass() # Java instance
| kanghyojun/konlpy | konlpy/tag/_kkma.py | _kkma.py | py | 2,284 | python | en | code | null | github-code | 1 | [
{
"api_name": "jpype.isJVMStarted",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "jpype.JPackage",
"line_number": 73,
"usage_type": "call"
}
] |
8692823944 | import mysql.connector
import cv2
import pyttsx3
import pickle
import PySimpleGUI as sg
import time
"""
This is the gui program for face recognition.
verify() should be used with a subprocess and the childConn is one end of the pipe.
If unable to recognize face for a period longer than TIMEOUT, the program will terminate.
"""
TIMEOUT = 20
def verify(childConn):
# 1 Create database connection
myconn = mysql.connector.connect(host="localhost", user="root", passwd="12345", database="facerecognition")
cursor = myconn.cursor()
#2 Load recognize and read label from model
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("train.yml")
labels = {"person_name": 1}
with open("labels.pickle", "rb") as f:
labels = pickle.load(f)
labels = {v: k for k, v in labels.items()}
# create text to speech
engine = pyttsx3.init()
rate = engine.getProperty("rate")
engine.setProperty("rate", 175)
# Define camera and detect face
face_cascade = cv2.CascadeClassifier('haarcascade/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
CONFIDENCE = 60
TRIGGER = True
sg.theme('DarkGray 3')
# 3 Define pysimplegui setting
layout = [[sg.Text('Press OK to proceed to the Face Recognition System.')], [sg.OK(), sg.Cancel()]]
win = sg.Window('Authentication System',
text_justification='center',
auto_size_text=False).Layout(layout)
event, values = win.Read()
if event is None or event =='Cancel':
TRIGGER = False
gui_confidence = CONFIDENCE
win_started = False
# 4 Open the camera and start face recognition
t_end = time.time() + TIMEOUT
success = False
while TRIGGER:
if time.time() > t_end:
break
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=3)
end = False
for (x, y, w, h) in faces:
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
# predict the id and confidence for faces
id_, conf = recognizer.predict(roi_gray)
# 4.1 If the face is recognized
if conf >= gui_confidence:
font = cv2.QT_FONT_NORMAL
id = 0
id += 1
customerID = labels[id_]
color = (255, 0, 0)
stroke = 2
cv2.putText(frame, customerID, (x, y), font, 1, color, stroke, cv2.LINE_AA)
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), (2))
# Find the customer information in the database.
select = "SELECT name FROM Customer WHERE customerID='%s'" % (customerID)
name = cursor.execute(select)
result = cursor.fetchall()
data = "error"
for x in result:
data = x
# If the customer's information is not found in the database
if data == "error":
# the customer's data is not in the database
print("Customer with customerID", customerID, "is NOT FOUND in the database.")
end = True
# If the customer's information is found in the database
else:
"""
Implement useful functions here.
"""
print("Face Recognition Success")
print(result)
childConn.send((customerID, result[0][0]))
childConn.close()
end = True
success = True
# 4.2 If the face is unrecognized
else:
color = (255, 0, 0)
stroke = 2
font = cv2.QT_FONT_NORMAL
cv2.putText(frame, "UNKNOWN", (x, y), font, 1, color, stroke, cv2.LINE_AA)
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), (2))
hello = ("Your face is not recognized")
print(hello)
engine.say(hello)
# engine.runAndWait()
if end:
break
# GUI
imgbytes = cv2.imencode('.png', frame)[1].tobytes()
if not win_started:
win_started = True
layout = [
[sg.Text(f"Please hold still...", size=(30,1))],
[sg.Image(data=imgbytes, key='_IMAGE_')],
[sg.Exit()]
]
win = sg.Window("Authentication System",
text_justification='left',
auto_size_text=False).Layout(layout).Finalize()
image_elem = win.FindElement('_IMAGE_')
else:
image_elem.Update(data=imgbytes)
event, values = win.Read(timeout=20)
if event is None or event == 'Exit':
break
if not success:
childConn.send((False, False))
childConn.close()
win.Close()
cap.release()
| hongming-wong/COMP3278-Group-Project | back-end/faces_gui.py | faces_gui.py | py | 5,134 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 18,
"usage_type": "name"
},
{
"... |
15393129745 | from itertools import product
from useful_functions import converged
# import Gurobi but don't crash if it wasn't loaded
import warnings
warnings.formatwarning = lambda msg, *args: "warning: " + str(msg) + "\n"
try:
import gurobipy as G
except ImportError:
warnings.warn("Gurobi is required to solve MDPs by linear programming.")
def exact_primal_LP(mdp):
"""
Construct an exponentially large LP to solve the MDP with Gurobi.
This LP follows the standard construction given, for example, on
p.25 of 'Competitive Markov Decision Processes' by Filar & Vrieze.
The solution to this LP is the value of the initial state. The
the value any other state can be extracted from the var.x of
the state's lp variable. A list of these lp variables can be
retreived using lp.getVars().
"""
lp = G.Model() # Throws a NameError if gurobipy wasn't loaded
state_vars = {}
# add a variable to the LP to represent the value of each state
for s in mdp.reachable_states:
state_vars[s] = lp.addVar(name=str(s), lb=-float("inf"))
lp.update()
# objective is the value of the initial state
lp.setObjective(state_vars[mdp.initial])
# can always cash out
for s,v in state_vars.items():
lp.addConstr(v >= mdp.terminal_reward(s))
# backpropagation
for state,action in product(mdp.reachable_states, mdp.actions):
if action.prereq <= state and action.can_change(state, mdp.variables):
const = action.stop_prob * mdp.terminal_reward(state)
const -= action.cost
expr = G.LinExpr(float(const))
for out,prob in action.outcome_probs.items():
lp_var = state_vars[out.transition(state)]
expr += prob * lp_var
lp.addConstr(state_vars[state] >= expr)
lp.update()
lp.optimize()
return lp, state_vars
def exact_dual_LP(mdp):
"""
Construct an exponentially large LP to solve the MDP with Gurobi.
This LP follows the standard construction given, for example, on
p.25 of 'Competitive Markov Decision Processes' by Filar & Vrieze.
The solution to this LP is the value of the initial state. After
optimize() has been called, the variables of the LP indicate the
optimal policy as follows: if the variable v has v.name=s_a, then
action a is optimal in state s iff v.x > 0.
"""
lp = G.Model() # Throws a NameError if gurobipy wasn't loaded
sa_vars = G.tuplelist()
for s in mdp.reachable_states:
sa_vars.append((s, "STOP", lp.addVar(name=str(s)+"_STOP", lb=0)))
for a in mdp.actions:
if a.prereq <= s:
sa_vars.append((s, a, lp.addVar(name=str(s)+"_"+a.name, lb=0)))
lp.update()
# set objective
obj = G.LinExpr()
for s,a,var in sa_vars:
rew = mdp.terminal_reward(s)
if a == "STOP":
obj += rew * var
else:
obj += (a.stop_prob * rew - a.cost) * var
lp.setObjective(obj, G.GRB.MAXIMIZE)
# set constraints
for s in mdp.reachable_states:
constr = G.quicksum([v for _,__,v in sa_vars.select(s)])
for parent,action in mdp.reachable_states[s]:
prob = action.trans_prob(parent, s, mdp.variables)
var = sa_vars.select(parent,action)[0][2]
constr -= prob * var
if s == mdp.initial:
lp.addConstr(constr, G.GRB.EQUAL, G.LinExpr(1))
else:
lp.addConstr(constr, G.GRB.EQUAL, G.LinExpr(0))
lp.update()
lp.optimize()
return lp, sa_vars
def action_value(mdp, state, action, values):
"""
Expected next-state value of perfrming action in state.
"""
value = -action.cost
for outcome,prob in action.outcome_probs.items():
next_state = outcome.transition(state)
value += prob * values[next_state]
value += action.stop_prob * mdp.terminal_reward(state)
return value
def state_values(mdp, policy, values, iters=1000, cnvrg_thresh=1e-6):
"""
Expected value estimate for each state when following policy.
An accurate estimate requires convergence, which may require a
a large number of iterations. For modified policy iteration, iters
can be set relatively low to return before convergence.
"""
for _i in range(iters):
new_values = {}
for state in mdp.reachable_states:
action = policy[state]
if action == None:
new_values[state] = mdp.terminal_reward(state)
else:
new_values[state] = action_value(mdp, state, action, values)
if converged(values, new_values, cnvrg_thresh):
break
values = new_values
return new_values
def greedy_policy(mdp, values):
"""
State-action map that is one-step optimal according to values.
"""
new_policy = {}
for state in mdp.reachable_states:
best_action = None
best_value = mdp.terminal_reward(state)
for action in mdp.actions:
if action.prereq <= state:
act_val = action_value(mdp, state, action, values)
if act_val > best_value:
best_value = act_val
best_action = action
new_policy[state] = best_action
return new_policy
def policy_iteration(mdp, policy_iters=1000, value_iters=100, \
cnvrg_thresh=1e-6):
"""
Computes optimal policy and value functions for the MDP.
This algorithm represents the full state space and therefore
requires time and space exponential in the size of the factored MDP.
If policy_iters is reached, the algorithm has not converged and the
results may be sub-optimal. For true policy iteration, value_iters
should be set very high; for modified policy iteration, it can be
relativley small.
"""
values = {s:0 for s in mdp.reachable_states}
for _i in range(policy_iters):
old_values = values
policy = greedy_policy(mdp, values)
values = state_values(mdp, policy, values, value_iters, cnvrg_thresh)
if converged(old_values, values, cnvrg_thresh):
values_changed = False
return policy, values
| btwied/MDP_interdiction | exact_solvers.py | exact_solvers.py | py | 5,531 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "warnings.formatwarning",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "warnings.warn",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "gurobipy.Model",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "itertools.pro... |
72799928675 | import django.forms as forms
from django_utils.form_helpers import DivForm, FormValidator, RecaptchaForm
import django_utils.form_widgets as form_widgets
def build_flag_form(actions, reasons):
"""
Generates a DivForm to be used for submitting content flags.
"""
base_fields = {'action' : forms.ChoiceField(choices = actions, required = True),
'reason' : forms.ChoiceField(choices = reasons, required = True),
'details' : forms.CharField(max_length = 500,
min_length = 1,
required = False,
widget = form_widgets.StandardTextarea(attrs={'class':'full_width'}),
label = 'Additional info.')
}
FlagContentForm = type('FlagForm', (DivForm, ), base_fields)
return FlagContentForm | genghisu/eruditio | eruditio/shared_apps/django_moderation/forms.py | forms.py | py | 958 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ChoiceField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.forms.ChoiceField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django... |
30277225234 | #!/usr/bin/env python
from __future__ import absolute_import
import cProfile
import logging
import sys
import time
import rosgraph
import roslaunch
import rospy
from pyros import PyrosROS
roscore_process = None
# BROKEN ?? start roscore beofre running this...
# if not rosgraph.masterapi.is_online():
# # Trying to solve this : http://answers.ros.org/question/215600/how-can-i-run-roscore-from-python/
# def ros_core_launch():
# roslaunch.main(['roscore', '--core']) # same as rostest_main implementation
#
# roscore_process = multiprocessing.Process(target=ros_core_launch)
# roscore_process.start()
#
# while not roscore_process.is_alive():
# time.sleep(0.2) # waiting for roscore to be born
#
# assert roscore_process.is_alive()
assert rosgraph.masterapi.is_online()
# Start roslaunch
launch = roslaunch.scriptapi.ROSLaunch()
launch.start()
# starting connection cache is available
rospy.set_param('/connection_cache/spin_freq', 2) # 2 Hz
connection_cache_node = roslaunch.core.Node('rocon_python_comms', 'connection_cache.py', name='connection_cache',
remap_args=[('~list', '/pyros_ros/connections_list'),
('~diff', '/pyros_ros/connections_diff'),
])
try:
connection_cache_proc = launch.launch(connection_cache_node)
except roslaunch.RLException as rlexc:
pass # ignore
time.sleep(2)
# start a bunch of node (this will load ros interface)
pub_proc = []
def start_pub_node(pubnum):
node_name = 'string_pub_node_' + str(pubnum)
rospy.set_param('/' + node_name + '/topic_name', 'pub_' + str(pubnum))
rospy.set_param('/' + node_name + '/test_message', 'msg_' + str(pubnum))
node = roslaunch.core.Node('pyros_test', 'string_pub_node.py', name=node_name)
try:
pub_proc.append(launch.launch(node))
except roslaunch.RLException as rlexc:
logging.error(
"pyros_test is needed to run this. Please verify that it is installed in your ROS environment")
raise
# TODO : make MANY node / services / params to simulate complex robot and make profiling more realistic.
time.sleep(2) # waiting for node to be up
rosn = PyrosROS()
rosn.setup(
services=['/test/empsrv', '/test/trgsrv'],
params=['/test/confirm_param'],
enable_cache=connection_cache_proc.is_alive()
)
print("Module LOADED")
def update_loop():
total_count = 1024*1024*255
count = 0
start = time.time()
pct = 0
last_pct = -1
max_pubnodes = 42
node_step = 0
last_node_step = -1
while count < total_count:
# time is ticking
now = time.time()
timedelta = now - start
start = now
rosn.update(timedelta)
count += 1
# creating and removing node while looping
node_step = count * max_pubnodes * 2/ total_count
if node_step != last_node_step:
last_node_step = node_step
if count < total_count/2:
# adding node
print("adding node {0}".format(node_step))
start_pub_node(node_step)
elif pub_proc:
# stopping node LIFO
print("stopping node {0}".format(len(pub_proc)-1))
pub_proc.pop().stop()
pct = count * 100 / total_count
if pct != last_pct:
last_pct = pct
sys.stdout.write("\r" + str(last_pct) + "%")
sys.stdout.flush()
# In case you want to run kernprof here
#update_loop()
cProfile.run('update_loop()', sort='cumulative')
# ensuring all process are finished
for p in pub_proc:
p.stop()
if connection_cache_proc is not None:
connection_cache_proc.stop()
rospy.signal_shutdown('test complete')
if roscore_process is not None:
roscore_process.terminate() # make sure everything is stopped | pyros-dev/pyros | tests/test_pyros/profile_pyros_ros.py | profile_pyros_ros.py | py | 3,954 | python | en | code | 24 | github-code | 1 | [
{
"api_name": "rosgraph.masterapi.is_online",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "rosgraph.masterapi",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "roslaunch.scriptapi.ROSLaunch",
"line_number": 34,
"usage_type": "call"
},
{
... |
11645362565 | # -*- coding: utf-8 -*-
import scrapy
import sqlite3
from ..items import IndexarticlesItem
class IndexarticleSpider(scrapy.Spider):
name = 'indexarticle'
allowed_domains = ['index.hu']
conn = sqlite3.connect(r'C:\Users\Athan\OneDrive\Documents\Dissertation\Python\webscraperorigo\url.db')
curr = conn.cursor()
urls=[]
curr.execute("""SELECT DISTINCT * FROM 'indexUrl_tb' WHERE
url LIKE "%2018/01%" OR
url LIKE "%2018/02%" OR
url LIKE "%2018/03%" OR
url LIKE "%2018/04%" OR
url LIKE "%2018/05%" OR
url LIKE "%2017%" OR
url LIKE "%2016%" OR
url LIKE "%2015%" OR
url LIKE "%2014/05%" or
url LIKE "%2014/06%" or
url LIKE "%2014/07%" or
url LIKE "%2014/08%" or
url LIKE "%2014/09%" or
url LIKE "%2014/10%" or
url LIKE "%2014/11%" or
url LIKE "%2014/12%" ORDER BY url""")
for row in curr.fetchall():
urlrow = str(row)
urlrow = urlrow.replace('(',"")
urlrow = urlrow.replace(')',"")
urlrow = urlrow.replace("'","")
urlrow = urlrow.replace(',',"")
urls.append(urlrow)
start_urls = urls
def parse(self, response):
items = IndexarticlesItem()
text = ['']
connections = ['']
tags = ['']
start_url = ['']
p = response.css(".cikk-torzs li::text , .cikk-torzs p ::text, .lead::text").extract()
connection = response.css("p a").xpath("@href").extract()
tag = response.css(".cikk-cimkek .cimke::text").extract()
start_url[0] = response.request.url
for paragaph in p:
text[0] += " " + paragaph
for c in connection:
connections[0] += " " + c
for t in tag:
tags[0] += " " + t
items['paragaph'] = text
items['tags'] = tags
items['connections'] = connections
items['start_url'] = start_url
yield items
| AJszabo/dissertation | indexarticles/indexarticles/spiders/indexarticle.py | indexarticle.py | py | 2,149 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "items.IndexarticlesItem",
"line_number": 39,
"usage_type": "call"
}
] |
29004478715 | # %% [markdown]
# # Question 2.
# Implement the Principal Component Analysis algorithm for reducing the dimensionality of the points
# given in the datasets: https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.
# data. Each point of this dataset is a 4-dimensional vector (d = 4) given in the first column of the datafile.
# Reduce the dimensionality to 2 (k = 2). This dataset contains 3 clusters. Ground-truth cluster IDs are
# given as the fifth column of the data file. In order to evaluate the performance of the PCA algorithm,
# perform clustering (in 3 clusters) before and after dimensionality reduction using the Spectral Clustering
# algorithm and then find the percentage of points for which the estimated cluster label is correct. Report
# the accuracy of the Spectral Clustering algorithm before and after the dimensionality reduction. Report
# the reconstruction error for k = 1, 2, 3. [15 Marks]
# 1
#
# %%
import sys
import numpy as np
import numpy.linalg as la
import pandas as pd
from sklearn.preprocessing import StandardScaler, LabelEncoder
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={'axes.facecolor': 'lightblue', 'figure.facecolor': 'lightblue'})
# %% [markdown]
# - KMeans Cluster From Scratch
# %%
class KMeans:
def __init__(self, n_clusters=2, tollerance=0.001, max_iter=10):
self.k = n_clusters
self.tollerance = tollerance
self.max_iter = max_iter
def fit_predict(self, data):
self.centroids = {}
for i in range(self.k):
self.centroids[i] = data[i]
for i in range(self.max_iter):
self.classifications = {}
for i in range(self.k):
self.classifications[i] = []
for featureIndex, featureset in enumerate(data):
distances = [la.norm(featureset-self.centroids[centroid])
for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(featureIndex)
prev_centroids = dict(self.centroids)
for classification in self.classifications:
self.centroids[classification] = np.average(
data[self.classifications[classification]], axis=0)
optimized = True
for c in self.centroids:
centroid_shift = np.sum(
(self.centroids[c]-prev_centroids[c])/prev_centroids[c]*100.0)
if centroid_shift > self.tollerance:
optimized = False
if optimized:
break
predictions = np.empty([len(data)])
for classification in self.classifications:
predictions[self.classifications[classification]] = classification
return predictions
# %% [markdown]
# - Utility functions for Spectral Clustering from scratch
# %%
def pairwise_distances(X, Y):
# Calculate distances from every point of X to every point of Y
# start with all zeros
distances = np.empty((X.shape[0], Y.shape[0]), dtype='float')
# compute adjacencies
for i in range(X.shape[0]):
for j in range(Y.shape[0]):
distances[i, j] = la.norm(X[i]-Y[j])
return distances
def nearest_neighbor_graph(X):
'''
Calculates nearest neighbor adjacency graph.
https://en.wikipedia.org/wiki/Nearest_neighbor_graph
'''
X = np.array(X)
# for smaller datasets use sqrt(#samples) as n_neighbors. max n_neighbors = 10
n_neighbors = min(int(np.sqrt(X.shape[0])), 10)
# calculate pairwise distances
A = pairwise_distances(X, X)
# sort each row by the distance and obtain the sorted indexes
sorted_rows_ix_by_dist = np.argsort(A, axis=1)
# pick up first n_neighbors for each point (i.e. each row)
# start from sorted_rows_ix_by_dist[:,1] because because sorted_rows_ix_by_dist[:,0] is the point itself
nearest_neighbor_index = sorted_rows_ix_by_dist[:, 1:n_neighbors+1]
# initialize an nxn zero matrix
W = np.zeros(A.shape)
# for each row, set the entries corresponding to n_neighbors to 1
for row in range(W.shape[0]):
W[row, nearest_neighbor_index[row]] = 1
# make matrix symmetric by setting edge between two points if at least one point is in n nearest neighbors of the other
for r in range(W.shape[0]):
for c in range(W.shape[0]):
if(W[r, c] == 1):
W[c, r] = 1
return W
def compute_laplacian(W):
'''
Reference for simple: https://en.wikipedia.org/wiki/Laplacian_matrix
simple:
L = D - W
'''
# calculate row sums
d = W.sum(axis=1)
# create degree matrix
D = np.diag(d)
L = D - W
return L
def get_eigvecs(L, k):
'''
Calculate Eigenvalues and EigenVectors of the Laplacian Matrix.
Return k eigenvectors corresponding to the smallest k eigenvalues.
Uses real part of the complex numbers in eigenvalues and vectors.
The Eigenvalues and Vectors will be complex numbers when using
NearestNeighbor adjacency matrix for W.
'''
eigvals, eigvecs = la.eig(L)
# sort eigenvalues and select k smallest values - get their indices
ix_sorted_eig = np.argsort(eigvals)[:k]
# select k eigenvectors corresponding to k-smallest eigenvalues
return eigvecs[:, ix_sorted_eig]
# %% [markdown]
# - Spectral Clustering from scratch
# %%
def spectral_clustering(X, k):
# create weighted adjacency matrix
W = nearest_neighbor_graph(X)
# create unnormalized graph Laplacian matrix
L = compute_laplacian(W)
# create projection matrix with first k eigenvectors of L
E = get_eigvecs(L, k)
# return clusters using k-means on rows of projection matrix
f = KMeans(n_clusters=k).fit_predict(E) # k_means_clustering(E,k)
return np.ndarray.tolist(f)
# %% [markdown]
# - Utility function for confusion Matrix And Accuracy Report
# %%
def confusion_matrix(actual, pred):
classes = np.unique(actual)
no_of_classes = len(classes)
actual = np.array([np.where(classes==x)[0][0] for x in actual])
pred = np.array([np.where(classes==x)[0][0] for x in pred])
cm = np.zeros((no_of_classes,no_of_classes))
for i in range(len(actual)):
cm[actual[i]][pred[i]]+=1
return cm
def confusionMatrixAndAccuracyReport(Y_test, Y_pred, title):
cm = confusion_matrix(Y_test, Y_pred)
overallAccuracy = np.trace(cm)/sum(cm.flatten())
classwiseAccuracy = np.zeros(len(cm))
for n in range(len(cm)):
for i in range(len(cm)):
for j in range(len(cm)):
if (i != n and j != n) or (i == n and j == n):
classwiseAccuracy[n] += cm[i][j]
classwiseAccuracy /= sum(cm.flatten())
plt.figure(figsize=(6, 6))
plt.title('{0} Accuracy Score: {1:3.3f}'.format(
title, overallAccuracy), size=12)
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
sns.heatmap(data=cm, annot=True, square=True, cmap='Blues')
plt.show()
print('Overall Accuracy Score: {0:3.3f}'.format(overallAccuracy))
print('Classwise Accuracy Score: {0}'.format(classwiseAccuracy))
# %% [markdown]
# - Data load
# %%
data_path = sys.argv[1] if len(sys.argv) > 1 else 'data-ques-2/iris.data'
dataset = pd.read_csv(data_path, names=[
'Sepal.Length', 'Sepal.Width', ' Petal.Length', 'Petal.Width', 'Class'])
#print (sys.argv)
dataset.head()
# %%
features = ['Sepal.Length', 'Sepal.Width', ' Petal.Length', 'Petal.Width']
X = dataset[features].values
Y = dataset['Class'].values
# %%
X = StandardScaler().fit_transform(X)
Y_bin = LabelEncoder().fit_transform(Y)
# %% [markdown]
# - PCA Decomposition from scratch
# %%
class PCA:
def __init__(self, n_components=2):
self.n_components = n_components
def fit_transform(self, X_data):
# centering data
self.X_mean = np.mean(X_data, axis=0)
x_centered = X_data - self.X_mean
# calculating covariance matrix
x_cov = np.cov(x_centered.T)
# eigendecomposition
eigenvals, eigenvecs = la.eig(x_cov)
# sorting
i = np.argsort(eigenvals)[::-1]
self.eigenvecs = eigenvecs[:, i]
eigenvals = eigenvals[i]
# retaining the eigenvectors for first n PCs
self.X_evecs_n = self.eigenvecs[:, :self.n_components]
return np.dot(X_data - self.X_mean, self.X_evecs_n)
def inverse_transform(self, data):
return np.dot(data, self.X_evecs_n.T)+self.X_mean
# %%
#from sklearn.decomposition import PCA
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(X)
principalDf = pd.DataFrame(data=principalComponents, columns=['PC 1', 'PC 2'])
# %%
sns.scatterplot(data=principalDf, x='PC 1',
y='PC 2', hue=Y, palette='rocket_r')
# %%
print('\nBefore Dimensionality Reduction:\n')
pred = spectral_clustering(X, 3)
confusionMatrixAndAccuracyReport(Y_bin, pred,'\nBefore Dimensionality Reduction:\n')
# %%
print('\nAfter Dimensionality Reduction:\n')
predPca = spectral_clustering(principalDf, 3)
confusionMatrixAndAccuracyReport(Y_bin, predPca,'\nAfter Dimensionality Reduction:\n')
print()
# %%
def reconstructionError(X_train, X_projected):
return np.round(np.sum((X_train - X_projected) ** 2, axis=1).mean(), 3)
# %%
for k in range(3):
pca_k = PCA(n_components=k)
pc_x_train = pca_k.fit_transform(X)
pc_x_projected = pca_k.inverse_transform(pc_x_train)
print(
f'The reconstruction error for k = {k+1} is :: {reconstructionError(X,pc_x_projected)}')
| debonil/ml-assignments | Assignment3/M21AIE225_PA1_2.py | M21AIE225_PA1_2.py | py | 9,673 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "seaborn.set",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "numpy.average",
"line_... |
28986345346 | from django.shortcuts import render, redirect
from django.views.decorators.clickjacking import xframe_options_exempt
import json
import sys
if '/God' not in sys.path:
sys.path.append('/God')
import Twitter
import Github
import datetime
import NatureLang
import Sitemap
repo = "twitter_network"
information_page_link = "about.html"
title = "twitter network analytics"
description = "twitter上でアカウント同士の人脈ネットワークを可視化するツールです。"
img = "http://fanstatic.short-tips.info/static/fanstatic/sample.png"
favicon = "https://raw.githubusercontent.com/kawadasatoshi/minegishirei/main/img/beaver.png"
def index(request):
page_list = Github.seach_page_list(repo)
htmlname = "all_page.html"
params = {
"information_page_link" :information_page_link,
"title" : title,
"repo":repo,
"page_list":page_list,
"favicon" : favicon,
"description" : description,
"img" : img
}
return render(request, "fanstatic/dashboard/twitter_network_index.html", params)
@xframe_options_exempt
def page(request, htmlname):
if htmlname=="about.html":
return about(request)
if "reload" in request.GET:
Github.delete_page(repo, htmlname)
try:
upload_network_json(htmlname)
except Twitter.MyTwitterException:
return render(request, "fanstatic/dashboard/twitter_network_busy.html", params)
params = {
"information_page_link" :information_page_link,
"acount_name" : htmlname,
"title" : htmlname + " " +title,
"repo":repo,
"favicon" : favicon,
"description" : description,
"img" : img
}
return render(request, "fanstatic/dashboard/twitter_network.html", params)
def about(request):
params = {
"title" : "twitter network analytics Q&A",
"favicon" : favicon,
"description" : "twitterアカウントの人脈可視化ツール「twitter network analytics」についてのQ&Aページです。",
"img" : img
}
return render(request, "fanstatic/dashboard/twitter_network_about.html", params)
def upload_network_json(htmlname):
if not Github.has_already_created(repo, htmlname):
git_json = create_network_json(htmlname)
text = json.dumps(git_json, ensure_ascii=False, indent=4)
Github.upload(repo, htmlname, text)
def create_network_json(root_name):
root_name = "@" + root_name.replace("@","")
link_list = []
acount_list = []
acount_set = set()
myTwitterAction = Twitter.MyTwitterAction()
def induction_json(parent_name, depth, node_num):
if depth < 0:
return
tweet_list = myTwitterAction.search_tweet_list(parent_name, amount=100)
dub_tweet_list, non_dub_tweet_list = get_dub_acount(tweet_list, acount_set)
for tweet in dub_tweet_list:
acount_name = "@"+tweet["user"]["screen_name"]
if acount_name in acount_set:
pass
else:
acount_set.add(acount_name)
acount_list.append(grep_node_info(tweet))
link_list.append({
"target" : acount_name,
"source" : parent_name,
"value" : 1
})
induction_json(acount_name, depth-1, int(node_num/2))
for tweet in non_dub_tweet_list[:max(0, node_num -len(dub_tweet_list))]:
acount_name = "@"+tweet["user"]["screen_name"]
if acount_name in acount_set:
pass
else:
acount_set.add(acount_name)
acount_list.append(grep_node_info(tweet))
link_list.append({
"target" : acount_name,
"source" : parent_name,
"value" : 1
})
induction_json(acount_name, depth-1, int(node_num/2))
def grep_node_info(tweet):
base = {
"name" : "@" +tweet["user"]["screen_name"],
"img" : tweet["user"]["profile_image_url"],
"text" : tweet["text"],
"group" : 1
}
return base
def get_dub_acount(tweet_list, acount_set):
dub_tweet_list = []
non_dub_tweet_list = []
for tweet in tweet_list:
acount_name = "@"+ tweet["user"]["screen_name"]
if acount_name in acount_set:
dub_tweet_list.append(tweet)
else:
non_dub_tweet_list.append(tweet)
return dub_tweet_list, non_dub_tweet_list
induction_json(root_name, 1, 50)
if root_name not in acount_set:
acount_list.append({
"name" : root_name,
"img" : "https://cdn.icon-icons.com/icons2/1144/PNG/512/twitterlogo1_80940.png",
"text" : "本人",
"group" : 1
})
return {
"nodes":acount_list,
"links":link_list
}
#twitterのJsonをフォーマットしてくれる物が欲しい
| minegishirei/flamevalue | trashbox/django3/app/fanstatic/twitter_views.py | twitter_views.py | py | 5,034 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "Github.seach_page_list",
... |
27577939256 | from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from tickets.models import Ticket
@login_required
def dashboard(request):
#from django.apps.apps import get_model
#t = get_model('openticketing', 'Ticket')
from django.db import connection
with connection.cursor() as cr:
cr.execute("select count(id) no_of_items, strftime('%Y-%m', create_date) [month] "
"from ot_ticket group by strftime('%Y-%m', create_date) "
"order by strftime('%Y-%m', create_date) desc limit 10 offset 0")
rows = cr.fetchall()
print(rows)
my_tickets = Ticket.objects.filter(assigned_to__id=request.user.id).order_by('-create_date')
return render(request, 'openticketing/dashboard.html', context=dict(tickets=my_tickets)) | majidasadish/OpenTicketing | OpenTicketing/tickets/app_views/pages/dashboard.py | dashboard.py | py | 826 | python | en | code | null | github-code | 1 | [
{
"api_name": "django.db.connection.cursor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.db.connection",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "tickets.models.Ticket.objects.filter",
"line_number": 17,
"usage_type": "call"
},
{
... |
11020158140 | from django.shortcuts import render
from app01 import models
from utils import mypage
# Create your views here.
def book_list(request):
# 查找到所有的书籍
books = models.Book.objects.all()
# 拿到总数据量
total_count = books.count()
# 从url拿到page参数
current_page = request.GET.get("page", None)
page_obj = mypage.MyPage(current_page, total_count, url_prefix="book_list", max_show=7)
# 对总数据进行切片,拿到页面显示需要的数据
data = books[page_obj.start:page_obj.end]
page_html = page_obj.page_html()
return render(request, "book_list.html", {"books": data, "page_html": page_html})
| xyw324/DemoPaging | app01/views.py | views.py | py | 674 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "app01.models.Book.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "app01.models.Book",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "app01.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "uti... |
8818476440 | import cv2, math
from math import *
import numpy as np
import sys
from decimal import *
sys.path.append("../")
from libs.configs import cfgs
IMG_LOW = 1100
black = (0,0,0)
red = (0, 0, 255)
def convert_rect_origin(rect):
if rect[4] == 90 or rect[4] == -90:
new_rect = [rect[0],rect[1],rect[3],rect[2], 0]
elif rect[4] > 0:
new_rect = [rect[0], rect[1], rect[3], rect[2], -90 + rect[4]]
# new_rect = (rect[0], (rect[1][1], rect[1][0]), -90 + rect[2])
elif rect[4] < 0:
new_rect = [rect[0],rect[1], rect[3], rect[2], 90 + rect[4]]
# new_rect = (rect[0], (rect[1][1], rect[1][0]), 90 + rect[2])
else: #rect[2] == 0
new_rect = [rect[0],rect[1], rect[3], rect[2], 0]
# new_rect = (rect[0], (rect[1][1], rect[1][0]), 0)
return new_rect
#boxの向きを写真の下までの直線の点を得る
def check_rect_line(img, rect):
# print(img.shape)
bb = ((rect[0], rect[1]), (rect[2], rect[3]), rect[4])
# print(rect)
# print(img.shape)
if bb[2] > 0:
flag = 1
else:
flag = -1
x = bb[0][0]
yoko, tate = hanbun(bb)
h = img.shape[0]-rect[1]
if bb[2] == 0 or abs(bb[2]) == 90:
a = x - yoko
b = x + yoko
else:
a = x + flag * h*cos(radians(abs(bb[2])))/sin(radians(abs(bb[2])))
b = x + flag * -1 * yoko
if a < b:
xmin = a
xmax = b
else:
xmin = b
xmax = a
# img = cv2.line(img,(bb[0][0],bb[0][1]),(int(a),924),(0,255,0),5)
return img, a
#この関数に投げたら、適したboxに変更してくれる
def check_rect(img, rect):
rect2 = convert_rect_origin(rect)
img, a = check_rect_line(img, rect)
img, a2 = check_rect_line(img, rect2)
diff = abs(a - rect[0])
diff2 = abs(a2 - rect[0])
if diff > diff2:
return img, rect2
else:
return img, rect
def calc_x_range(img, rect):
# print(img.shape)
x_min_maxs = []
for i in range(cfgs.STRIDE_NUM):
img, rect = check_rect(img, rect)
bb = ((rect[0] + (i-2) * cfgs.STRIDE , rect[1]), (rect[2], rect[3]), rect[4])
# print(rect)
# print(img.shape)
if bb[2] > 0:
flag = 1
else:
flag = -1
x = bb[0][0]
yoko, tate = hanbun(bb)
h = IMG_LOW-rect[1]
thre = 950
if h > thre:
h = thre
if bb[2] == 0 or abs(bb[2]) == 90:
a = x - yoko
b = x + yoko
else:
# a = x + flag * (img.shape[0]-rect[1])*cos(radians(abs(bb[2])))/sin(radians(abs(bb[2])))
a = x + flag * h *cos(radians(abs(bb[2])))/sin(radians(abs(bb[2])))
b = x + flag * -1 * yoko
if a < b:
xmin = a
xmax = b
else:
xmin = b
xmax = a
x_min_maxs.append([xmin, xmax])
img = cv2.line(img,(bb[0][0],bb[0][1]),(int(a),950),(0,255,0),5)
return img, x_min_maxs
def hanbun(rect):
if rect[2] == 0:
yoko_hanbun = 0.5*rect[1][0]
tate_hanbun = 0.5*rect[1][1]
elif rect[2] == 90 or rect[2] == -90:
yoko_hanbun = 0.5*rect[1][1]
tate_hanbun = 0.5*rect[1][0]
else:
yoko_hanbun = 0.5*(rect[1][0]*cos(radians(abs(rect[2]))) + rect[1][1]*sin(radians(abs(rect[2]))))
tate_hanbun = 0.5*(rect[1][0]*sin(radians(abs(rect[2]))) + rect[1][1]*cos(radians(abs(rect[2]))))
# elif rect[2] < 0:
# yoko_hanbun = 0.5*(rect[1][0]*cos(radians(abs(rect[2]))) + rect[1][1]*sin(radians(abs(rect[2]))))
# tate_hanbun = 0.5*(rect[1][0]*sin(radians(abs(rect[2]))) + rect[1][1]*cos(radians(abs(rect[2]))))
# else:
# yoko_hanbun = 0.5*(rect[1][1]*sin(radians(abs(rect[2]))) + rect[1][0]*sin(radians(abs(rect[2]))))
# tate_hanbun = 0.5*(rect[1][1]*cos(radians(abs(rect[2]))) + rect[1][0]*cos(radians(abs(rect[2]))))
return [yoko_hanbun, tate_hanbun]
def convert_rect(rect):
if rect[2] == 90 or rect[2] == -90:
new_rect = (rect[0], (rect[1][1], rect[1][0]), 0)
elif rect[2] > 0:
new_rect = (rect[0], (rect[1][1], rect[1][0]), -90 + rect[2])
elif rect[2] < 0:
new_rect = (rect[0], (rect[1][1], rect[1][0]), 90 + rect[2])
else: #rect[2] == 0
new_rect = (rect[0], (rect[1][1], rect[1][0]), 0)
return new_rect
def calc_book_range(img, rect):
return_boxes = []
for i in range(cfgs.STRIDE_NUM):
print(rect)
img, rect = check_rect(img, rect)
print(rect)
bb = ((rect[0] + (i-2) * cfgs.STRIDE , rect[1]), (rect[2], rect[3]), rect[4])
if bb[2] > 0:
flag = 1
else:
flag = -1
x,y = bb[0][0], bb[0][1]
yoko, tate = hanbun(bb)
print("tate : " + str(tate))
print("yoko : " + str(yoko))
h2 = IMG_LOW-rect[1]
thre = img.shape[0]
print("tres : " + str(thre))
if h2 > thre:
h2 = thre
if bb[2] == 0 or abs(bb[2]) == 90:
if bb[2] == 0:
bb = (bb[0], (bb[1][1], bb[1][0]), 90)
h1 = tate
line_h = h1 + h2
line_w = 1000000
cnt_x = bb[0][0]
cnt_y = bb[0][1] + line_h/2-h1
line_norm = line_h
else:
h1 = yoko * tan(radians(abs(bb[2])))
line_h = h1 + h2
line_w = line_h / tan(radians(abs(bb[2])))
line_norm = line_h / sin(radians(abs(bb[2])))
cnt_x = x + flag * (-yoko + line_w /2)
cnt_y = y - h1 + line_h /2
print("cnt_x : " + str(cnt_x))
print("cnt_y : " + str(cnt_y))
print("line_h : " + str(line_h))
print("line_w : " + str(line_w))
print("line_norm : " + str(line_norm))
img = cv2.line(img, (0, int(cnt_y + line_h/2)), (800, int(cnt_y + line_h/2)), (255,0,0), 5)
img = cv2.circle(img,(int(cnt_x), int(cnt_y)), 6, (0,255,0), -1)
box1 = ((cnt_x, cnt_y), (line_norm, bb[1][1]), bb[2])
# box1 = [cnt_x, cnt_y, bb[1][1], line_norm,bb[2]]
# img, box1 = check_rect(img, box1)
# box1 = ((box1[0], box1[1]), (box1[2], box1[3]), box1[4])
box = cv2.boxPoints(box1)
box = np.int0(box)
img = cv2.drawContours(img,[box],-1,black,2)
return_boxes.append(box1)
# img = cv2.line(img,(bb[0][0],bb[0][1]),(int(a),950),(0,255,0),5)
return img, return_boxes
# rect = [500, 500, 50, 100, 80]
# rect1= [503, 800, 100, 50, 10]
# rect2 = ((rect[0], rect[1]), (rect[2], rect[3]), rect[4])
# rect1 = ((rect1[0], rect1[1]), (rect1[2], rect1[3]), rect1[4])
#
# img = cv2.imread("hon.jpg")
# img = cv2.circle(img,(int(rect1[0][0]), int(rect1[0][1])), 3, (0,255,0), -1)
# img,x = calc_x_range(img, rect)
# box = cv2.boxPoints(rect2)
# box = np.int0(box)
# img = cv2.drawContours(img,[box],-1,red,2)
# box = cv2.boxPoints(rect1)
# box = np.int0(box)
# img = cv2.drawContours(img,[box],-1,red,2)
# img, boxes = calc_book_range(img, rect)
# area_bb2 = rect1[1][0] * rect1[1][1]
#
# for i in range(len(boxes)):
# int_pts = cv2.rotatedRectangleIntersection(boxes[i], rect1)[1]
# inter = 0.0
# if int_pts is not None:
# #convexhull は 凸法を計算
# order_pts = cv2.convexHull(int_pts, returnPoints=True)
# #order_ptsの面積を計算
# int_area = cv2.contourArea(order_pts)
# inter = int_area * 1.0 / area_bb2
# print(inter)
# # cv2.imshow("te3", img)
# cv2.imwrite("res.jpg", img)
# cv2.waitKey(0)
# rect = np.array([500, 500, 50, 100, 80])
# rect1= np.array([503, 800, 100, 50, 10])
rect = np.array([1,1])
rect1= np.array([1,1])
rects = np.array([rect, rect1])
print(np.var(rects))
| anegawa/book_detection | tools/test.py | test.py | py | 7,650 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "libs.configs.cfgs.STRIDE_NUM",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "libs.co... |
31064789985 | #!/usr/bin/env python
import os
import sys
import pdb
import numpy as np
from scipy.interpolate import interp1d
from scipy.constants import pi
from matplotlib import pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from astropy.constants import h, k_B, c, G
from astropy import units as u
from astropy.cosmology import FlatLambdaCDM as LCDM
from astropy.coordinates import SkyCoord
from astropy.table import Table
from uncertainties import ufloat
# from BCES import run_MC_BCES
from potential import Hogan
class Cluster(object):
def __init__(self, z, infile):
self.z = z
self.profiles = Table.read(infile, format='csv')
@classmethod
def from_files(cls, z, infile, centroid=None, mass_file=None, potential=None):
c = cls(z, infile)
if centroid is not None:
c.centroid_from_file(centroid)
if mass_file is not None:
c.potential = Hogan.from_file(mass_file)
# c.set_mass_profile(mass_file)
if potential is not None:
if mass_file is not None:
raise Warning("Potential is overwriting the mass profile")
c.potential = potential
return c
@property
def centroid(self):
return self._centroid
@centroid.setter
def centroid(self, coord):
self._centroid = coord
# def centroid(self, ra, dec, unit=(u.hourangle, u.deg)):
# self._centroid = SkyCoord(ra, dec, unit=unit)
def centroid_from_file(self, centroid_file):
with open(centroid_file) as f:
line = f.readlines()[-1]
trim = line.split(")")[0].split("(")[1]
toks = trim.split(",")
ra, dec = toks[:2]
self.centroid = SkyCoord(ra, dec, unit=(u.hourangle, u.deg))
def interpolate(self, key, value, xkey='R', return_error=False):
xx = self.profiles[xkey]
yy = self.profiles[key]
if isinstance(xx, u.Quantity):
xx = xx.value
xu = xx.unit
if isinstance(yy, u.Quantity):
yy = yy.value
yu = yy.unit
if isinstance(value, u.Quantity):
try:
value = value.to(xu).value
except NameError:
value = value.value
if value < xx[0]:
print("Warning: Attempting to extrapolate.")
print("Returning innermost data point.")
return yy[0]
x = np.log10(xx)
y = np.log10(yy)
interp_fxn = interp1d(x, y, kind='linear')
log_yp = interp_fxn(np.log10(value))
try:
yp = 10**log_yp * yu
except NameError:
yp = 10**log_yp
if return_error:
# Find one or two closest points along x-axis
# Average the fractional uncertainties
left_ind = np.argwhere(xx <= value).max()
right_ind = np.argwhere(xx > value).min()
left_unc_p = self.profiles[f"{key}_p"][left_ind] / self.profiles[key][left_ind]
left_unc_m = self.profiles[f"{key}_m"][left_ind] / self.profiles[key][left_ind]
right_unc_p = self.profiles[f"{key}_p"][right_ind] / self.profiles[key][right_ind]
right_unc_m = self.profiles[f"{key}_m"][right_ind] / self.profiles[key][right_ind]
unc_p = yp * np.mean([left_unc_p, right_unc_p])
unc_m = yp * np.mean([left_unc_m, right_unc_m])
return (yp, unc_p, unc_m)
return yp
def fit_powerlaw(self, key):
raise NotImplementedError
def plot_profile(self, key, Rkey="R", xlims=None, ylims=None,
outfile=None, ax=None, **mpl_kwargs):
ylabels = dict(density = r'Density (cm$^{-3}$)',
kT = r'Temperature (keV)',
Z = r'Abundance (Z$_{\odot}$)',
pressure = r'Pressure (erg cm$^{-3}$)',
entropy = r'Entropy (kev cm$^{2}$)',
Lx = r'L$_X$ (erg s$^{-1}$)',
tcool = r'$t_{\rm cool}$ (yr)',
M = r'Enclosed Mass ($M_{\odot}$)',
Mgas = r'Gas Mass ($M_{\odot}$)',
tcool_tff = r"$t_{\rm cool}/t_{\rm ff}$")
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(7,6), constrained_layout=True)
ax = plt.gca()
xerr = self.profiles[f'{Rkey}_pm']
yerr = (abs(self.profiles[key+'_m']), self.profiles[key+'_p'])
ax.errorbar(self.profiles[Rkey], self.profiles[key], yerr, xerr,
**mpl_kwargs)
ax.set_xlabel(r'R (kpc)', fontsize=16)
ax.set_ylabel(ylabels[key], fontsize=16)
if xlims is not None:
ax.set_xlim(xlims)
else:
ax.set_xlim(xmin=1)
if ylims is not None:
ax.set_ylim(ylims)
ax.set_xscale('log')
if key in ['kT', 'Z']:
ax.set_yscale('linear')
else:
ax.set_yscale('log')
ax.xaxis.set_major_formatter(FormatStrFormatter('%g'))
ax.tick_params(axis='both', which='major', labelsize=12)
if outfile is not None:
# plt.tight_layout()
# plt.axes().set_aspect('equal')
plt.savefig(outfile, facecolor='white', transparent=True)
plt.clf()
| avantyghem/Cluster | Cluster.py | Cluster.py | py | 5,318 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "astropy.table.Table.read",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "astropy.table.Table",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "astropy.constants.c",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "astr... |
23550603160 | import logging
import os
import time
from pathlib import Path
import tomllib
IS_DEVELOPMENT = bool(os.environ.get("DEVELOPMENT", False))
parsed_toml = tomllib.load(open("config.toml", "rb"))
SECRET_KEY = parsed_toml.get("SECRET_KEY", "S$cR3t_K3y")
API_HOST = parsed_toml.get("API_HOST")
API_PORT = int(parsed_toml.get("API_PORT", 8080))
API_URL = parsed_toml.get("API_URL", "https://server1.getwvkeys.cc")
API_SECRETS = parsed_toml.get("API_SECRETS", [])
CONSOLE_LOG_LEVEL = logging.DEBUG
FILE_LOG_LEVEL = logging.DEBUG
LOG_FORMAT = parsed_toml.get("LOG_FORMAT", "[%(asctime)s] [%(name)s] [%(funcName)s:%(lineno)d] %(levelname)s: %(message)s")
LOG_DATE_FORMAT = parsed_toml.get("LOG_DATE_FORMAT", "%I:%M:%S")
LOG_FILE_PATH = Path(os.getcwd(), "logs", f"{time.strftime('%Y-%m-%d')}.log")
CHALLENGES_DIR_PATH = Path(os.getcwd(), "challenges")
CHALLENGES_DIR_PATH.mkdir(exist_ok=True, parents=True)
WV_CVT = Path(os.getcwd(), "pywidevine", "wv_cvt.exe")
if not WV_CVT.exists():
raise FileNotFoundError("wv_cvt.exe is missing")
DISABLE_INFO_ROUTE = bool(parsed_toml.get("DISABLE_INFO_ROUTE", False))
| GetWVKeys/wv_cdm_api | api/config.py | config.py | py | 1,107 | python | en | code | 32 | github-code | 1 | [
{
"api_name": "os.environ.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "tomllib.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_nu... |
25163443774 | import time
import pyautogui
from pykeyboard import PyKeyboard
from pymouse import PyMouse
from positions import POSITION
from roles import Role
from scenes.common import CommonScene
from tools import loading, locate
# 场景3:游戏界面
class GameScene(CommonScene):
@staticmethod
def goto_association():
"""
步行至冒险家协会
:return:
"""
k = PyKeyboard()
time.sleep(1)
k.press_key('w')
time.sleep(10)
k.press_key('a')
time.sleep(0.5)
k.release_key('a')
while 1:
if locate('game_catch_1_item.jpg'):
k.release_key('w')
break
time.sleep(0.5)
@staticmethod
def receive_daily_prizes():
pass
@staticmethod
def receive_discovery_prizes():
k = PyKeyboard()
m = PyMouse()
time.sleep(1)
k.press_key('f')
time.sleep(1)
m.click(*POSITION['game']['skip_dialog'])
time.sleep(1.5)
m.move(*POSITION['game']['discovery_prizes_button'])
time.sleep(0.3)
m.click(*POSITION['game']['discovery_prizes_button'])
time.sleep(3)
# 进入探索派遣界面
def _tag(name):
m.move(*POSITION['discovery'][name])
time.sleep(0.3)
m.click(*POSITION['discovery'][name])
time.sleep(0.3)
def _set(place='place_monde_2', role='role_2'):
m.click(*POSITION['discovery'][place])
time.sleep(0.3)
# 判断是否完成探索 未完成则跳过流程
if not locate('discovery_recall_button.png'):
m.click(*POSITION['discovery']['confirm']) # 领取奖励
time.sleep(0.3)
m.click(*POSITION['discovery']['confirm']) # 确认奖励
time.sleep(0.3)
m.click(*POSITION['discovery']['confirm']) # 选择角色
time.sleep(0.3)
m.click(*POSITION['discovery'][role]) # 选择角色1
time.sleep(0.3)
def _receive():
while 1:
time.sleep(0.5)
position = locate('expedition/dispatch/mark.png', threshold=0.85)
if not position:
break
pyautogui.click(*position)
time.sleep(0.3)
pyautogui.click(*POSITION['discovery']['confirm']) # 领取奖励
time.sleep(0.3)
pyautogui.click(*POSITION['discovery']['confirm']) # 确认奖励
time.sleep(0.3)
_tag('tag_monde')
_receive()
_set('place_monde_1', 'role_1') # 蒙德1
_set('place_monde_2', 'role_2') # 蒙德2
_tag('tag_liyue')
_receive()
_set('place_liyue_1', 'role_3') # 璃月1 角色3-申鹤
_set('place_liyue_2', 'role_1') # 璃月2
_tag('tag_inazuma')
_receive()
_set('place_inazuma_1', 'role_1') # 稻妻1
k.tap_key(k.escape_key)
time.sleep(1)
@classmethod
def into_pot_scene(cls):
"""
进入尘歌壶
:return:
"""
k = PyKeyboard()
m = PyMouse()
time.sleep(1)
k.press_key(k.alt_l_key)
time.sleep(1)
m.move(*POSITION['game']['backpack_button'])
time.sleep(0.3)
m.click(*POSITION['game']['backpack_button'])
time.sleep(0.5)
k.release_key(k.alt_l_key)
time.sleep(1)
m.move(*POSITION['backpack']['category_6'])
time.sleep(0.3)
m.click(*POSITION['backpack']['category_6'])
time.sleep(0.3)
m.click(*POSITION['backpack']['item_1'])
time.sleep(0.3)
m.click(*POSITION['backpack']['confirm'])
time.sleep(1)
while 1:
if locate('game_catch_1_item.jpg'):
break
k.tap_key('f')
time.sleep(8)
@classmethod
def into_log_scene(cls):
"""
进入纪行界面
:return:
"""
k = PyKeyboard()
k.press_key(k.alt_l_key)
time.sleep(0.5)
# todo 会出现纪行期限结束 没有纪行图标存在的情况
pyautogui.click(*locate('game_log_button.png', once=False, threshold=0.8))
time.sleep(0.1)
k.release_key(k.alt_l_key)
time.sleep(0.5)
@staticmethod
def receive_blessing():
is_received = False
time.sleep(1)
while 1:
if loading(POSITION['game']['role_button'], 'game_role_button', once=True, threshold=0.8):
is_received = True
break
if loading(POSITION['game']['receive_blessing_button'], 'game_receive_blessing_button', once=True, threshold=0.8):
break
if not is_received: # 已领取空月祝福的场合直接跳过领取流程
time.sleep(1)
pyautogui.click(*POSITION['game']['receive_blessing_button'][0:2])
time.sleep(0.3)
loading(POSITION['game']['receive_blessing_confirm'], 'game_receive_blessing_confirm', threshold=0.8)
time.sleep(1)
pyautogui.click(*POSITION['game']['receive_blessing_confirm'][0:2])
time.sleep(2.5)
GameScene.switch_role(1)
time.sleep(0.5)
@staticmethod
def switch_role(number=1):
# todo 诺埃尔图标
time.sleep(1)
pyautogui.press(str(number))
time.sleep(0.5)
@classmethod
def hit_tree_1(cls):
"""
蒙德城砍树
:return:
"""
with pyautogui.hold('s'):
time.sleep(3)
with pyautogui.hold('d'):
time.sleep(2)
Role.hit_3()
time.sleep(0.5)
with pyautogui.hold('a'):
time.sleep(0.5)
time.sleep(0.5)
with pyautogui.hold('w'):
time.sleep(0.5)
time.sleep(0.5)
Role.hit_3()
time.sleep(0.5)
with pyautogui.hold('s'):
time.sleep(0.15)
time.sleep(0.5)
Role.hit_3()
time.sleep(2)
@classmethod
def hit_tree_2(cls):
"""
蒙德城外砍树
:return:
"""
with pyautogui.hold('a'):
with pyautogui.hold('w'):
time.sleep(4)
with pyautogui.hold('w'):
time.sleep(1)
Role.hit_3()
time.sleep(0.5)
with pyautogui.hold('d'):
with pyautogui.hold('s'):
time.sleep(1)
Role.hit_3()
time.sleep(2)
@classmethod
def hit_tree_3(cls):
"""
蒙德城外砍树
:return:
"""
Role.go_back(3.5)
Role.hit_3()
time.sleep(0.5)
pyautogui.press('e')
Role.go_back(0.1, left=True)
Role.hit_3()
Role.go_right(1)
time.sleep(0.5)
Role.go_front(0.1, right=True)
Role.hit_3()
Role.go_front(1.5, right=True)
Role.go_front(0.1)
Role.hit_3()
time.sleep(0.5)
Role.go_front(1.5, left=True)
Role.go_front(0.1)
Role.hit_3()
time.sleep(1)
@classmethod
def hit_tree_4(cls):
"""
庆云顶砍树
:return:
"""
Role.go_back(2.5, right=True)
Role.hit_1()
time.sleep(4)
Role.go_left(0.5)
with pyautogui.hold('d'):
Role.hit_4()
Role.go_left(2)
with pyautogui.hold('w'):
Role.hit_4()
@classmethod
def load_complete(cls):
loading(POSITION['game']['role_button'], 'game_role_button', threshold=0.7)
time.sleep(0.3)
| huiyaoren/genshin_test_tools | scenes/game.py | game.py | py | 7,704 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "scenes.common.CommonScene",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pykeyboard.PyKeyboard",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "time.sleep... |
5071621268 |
# coding:utf-8
# 卷积核尺寸,卷积核个数,池化层尺寸,全连接层的节点数,学习率,权重,偏置
import copy
import random
import time
import matplotlib.pyplot as plt
import numpy as np
from public.public_function import *
from public.cnn_single_keras_tensorflow import *
# from multi_part.first_part import *
from keras.models import load_model
from _1_first_part.first_part import Constraints,control
class Multi_Individual():
# x:包含个体全部信息,第一二卷积层卷积核尺寸以及卷积核个数,第一二层池化层尺寸和步长,全连接层节点数以及学习率
# train_average_y_list:保存每一代模型在训练集上的输出矩阵
# net_num:该代的第几个个体,用于确认保存的位置以及保存的模型名称
def __init__(self, x, net_num,initial_p=None):
print('开始创建个体')
if initial_p == None:
# 种群的个体信息
self.x = x
# 用于保存个体对应的两个目标函数值
self.f = []
# 记录该个体是此代中第几个个体
self.net_num = net_num
# 模型的保存位置以及保存的模型名称
self.model_save_path = temppath
self.model_name = 'model_' + str(self.net_num) + '.h5'
# 根据个体,模型保存位置和模型名称构建网络
# 返回来三个结果:
# 1、训练集上的准确度
# 2、训练集得到的分类矩阵
print("即将进入CNN")
c = cnn(self.x, model_save_path=self.model_save_path, model_name=self.model_name)
acc, y_prediction =c.cnn_run(1)
if acc > 0.3:
acc, y_prediction = c.cnn_run(10)
print("网络训练完毕")
print('验证集集准确率:', acc)
self.y_pre = y_prediction
# 将训练误差作为第一个目标函数
f1 = 1 - acc
self.f.append(f1)
# 初始化的第二个目标函数,这里先用100占位
f2 = 100
self.f.append(f2)
with open(os.path.join(self.model_save_path, "second.txt"), 'w')as f:
f.write(str(self.f[1]) + ",")
else:
# 种群的个体信息
self.x = initial_p.x
# 用于保存个体对应的两个目标函数值
self.f = []
# 记录该个体是此代中第几个个体
self.net_num = net_num
# 模型的保存位置以及保存的模型名称
self.model_save_path = os.path.join(cluster_path,str(self.net_num))
self.model_name = 'model_' + str(self.net_num) + '.h5'
self.acc=initial_p.acc
self.y_pre=initial_p.y_pre
# 将训练误差作为第一个目标函数
f1 = 1 - self.acc
self.f.append(f1)
# 初始化的第二个目标函数,这里先用0占位
f2 = 100
self.f.append(f2)
with open(os.path.join(self.model_save_path,"second.txt"),'w')as f:
f.write(str(self.f[1])+",")
def matrix_to_number(x):
x_num=np.argmax(x,axis=1)
return x_num
def calculate_P(p):
p_l = []
for i in range(len(p)):
p_l.append(p[i].y_pre)
p_sum = 0
for i in range(len(p_l)):
p_sum += p_l[i]
P=matrix_to_number(p_sum)
return P
def firstPart_single_or_semeble_SecondFunction(p, k=None, y=None):
# 非变异个体的第二个目标函数的计算。。包括一代每个个体的第二个目标函数计算,以及对邻域个体的第二个目标函数的重新计算
if y==None:
P = calculate_P(p)
p_l = []
for i in range(len(p)):
p_l.append(matrix_to_number(p[i].y_pre))
# 计算一个集合中所有个体的第二个目标函数
if k == None:
for i in range(len(p_l)):
pi = np.sum(p_l[i] == P) / len(P)
pj_sum = 0
for j in range(len(p_l)):
if i == j:
continue
else:
pj_sum += np.sum(p_l[j] == P) / len(P)
pj_sum_average = pj_sum
p[i].f[1] = pi*pj_sum_average
return p
# 计算单个个体的第二个目标函数
else:
pk = np.sum(p_l[k] == P) / len(P)
pj_sum = 0
for j in range(len(p_l)):
if j==k:
continue
pj_sum += np.sum(p_l[j] == P) / len(P)
pj_sum_average = pj_sum
return pk * pj_sum_average
# 计算处于第一阶段的变异个体的第二个目标函数
else:
p[k] = y
P = calculate_P(p)
p_l = []
for i in range(len(p)):
p_l.append(matrix_to_number(p[i].y_pre))
pk = np.sum(p_l[k] == P) / len(P)
pj_sum = 0
for j in range(len(p_l)):
if j == k:
continue
pj_sum += np.sum(p_l[j] == P) / len(P)
pj_sum_average = pj_sum
return pk * pj_sum_average
# 初始化种群及对应的权重向量
# 输入 N:种群个体数量
# 返回:种群p,和对应的权重向量Lamb
def Initial(N,initial_p):
# p用来保存种群个体
p = []
# Lamb保存的是对应的权重向量
Lamb = []
for i in range(N):
temp = []
# 卷积核尺寸,卷积核个数,池化层尺寸,全连接层的节点数,学习率,权重,偏置
l = initial_p[i].x
net_num = i
# 清空模型保存的一手文件夹
# 所有模型都是先保存在该文件夹中,然后根据一定规则将该文件夹中的模型文件复制到指定文件夹,后清空该文件夹。等待接受下一个模型。
if os.path.exists(temppath):
deletefile(temppath)
else:
os.makedirs(temppath)
# # 根据个体信息,创建对应的CNN
net_i = Multi_Individual(l,net_num,copy.deepcopy(initial_p[i])) # 初始化第一代网络
print_l(net_i.x)
p.append(net_i)
# 为个体随机创建权重向量,向量里包含元素的个数与目标函数个数相同,此处为2个。
mmm = i/N
temp.append(mmm)
temp.append(1.0 - mmm)
# 用列表Lamb保存权重向量,个体的坐标与对应的权重向量坐标是对应的。
Lamb.append(temp)
print('第一代网络模型保存位置')
for i in range(len(p)):
print(p[i].model_save_path)
return p, Lamb #返回种群,及对应的权重向量
def rndG(a,b):
max_ = max(a,b)
min_ = min(a,b)
return np.random.normal(0, (1 / 20) * (max_-min_))
def mutation_Gaussian(l):
conv1_size_Gaussian = rndG(28/2,1)
conv1_numbers_Gaussian = rndG(2,64)
pool1_size_Gaussian = rndG(2,28/2)
pool1_stride_Gaussian = rndG(2,l[2])
p1 = pictureSize_afterPool(28,l[3])
conv2_size_Gaussian = rndG(2,int(p1/2))
conv2_numbers_Gaussian = rndG(l[1],128)
pool2_size_Gaussian = rndG(2,int(p1/2))
pool2_stride_Gaussian = rndG(2,l[6])
p2=pictureSize_afterPool(p1,l[7])
fullconnection_Gaussian = rndG(10,p2*p2*l[5])
learning_rate_Gaussian=rndG(0,1)
dropout_Gaussian=rndG(0,1)
return np.array([conv1_size_Gaussian,conv1_numbers_Gaussian,pool1_size_Gaussian,pool1_stride_Gaussian,conv2_size_Gaussian,conv2_numbers_Gaussian,pool2_size_Gaussian,pool2_stride_Gaussian,fullconnection_Gaussian,learning_rate_Gaussian,dropout_Gaussian])
# 变异获得新的个体
# c为原有个体,a,b为它邻域里面的两个个体,
def GeneticOperation(a, b, c, k):
# 此处F设置为0.5
F = 0.5
# 在0,1范围内生成一个服从均匀分布的随机数
j = np.random.uniform(0, 1)
print("变异控制参数"+str(j))
# 如果随机变量小于等于0.5,在原有个体加上控制参数乘以邻域个体的差,再加上后面的随机变量e
a_array=np.array(a.x)
b_array=np.array(b.x)
c_array=np.array(c.x)
print("邻域个体1:",a_array)
print("邻域个体2:",b_array)
print("本体:",c_array)
l_array = c_array + F *(a_array - b_array)
if j <= 0.5:
l_array = Constraints(l_array,a_array,b_array,c_array)
Gaussian = mutation_Gaussian(l_array)
l_array+=Gaussian
# 卷积核尺寸,卷积核个数,池化层尺寸,全连接层的节点数,学习率,权重,偏置
l_array=Constraints(l_array,a_array,b_array,c_array)
print_l(l_array)
return Multi_Individual(l_array,k)
# 计算邻域
# 输入:权重向量Lamb,邻域个数T
# 返回:距离每个向量最近的T个向量的索引的列表
def Neighbor(Lamb, T):
#为每个权重向量,计算对应的T个邻域
B = []
for i in range(len(Lamb)):
temp = []
for j in range(len(Lamb)):
distance = np.sqrt((Lamb[i][0]-Lamb[j][0])**2+(Lamb[i][1]-Lamb[j][1])**2)
temp.append(distance)
# temp中存放的是种群中第i个个体与其他个体之间的距离
# 对距离进行排序,并且将其对应个体的坐标存放在l列表中
l = np.argsort(temp)
# 取前T个个体 B中存放的是距离每个个体最近的T个个体的坐标
B.append(l[:T])
return B #得到每个权重向量的T个邻域
def min_distance(p, l):
d = []
for i in range(len(p)):
d.append(p[i].f[0]*l[0]+p[i].f[1]*l[1])
return np.argmin(d)
def BestValue(p):
# 获取每个目标函数的最优值
best = []
for i in range(len(p[0].f)):
best.append(p[0].f[i])
for i in range(1, len(p)):
for j in range(len(p[i].f)):
if p[i].f[j] < best[j]:
best[j] = p[i].f[j]
return best
def max_rPlace(l, z_1, z):
l_1 = l[0] * np.abs(z_1[0] - z[0])
l_2 = l[1] * np.abs(z_1[1] - z[1])
return max(l_1, l_2)
def update_bestvalue(z,y,min_value):
# step 2.7) Updating
# 更新到目前为止的两个目标函数的最优值
flag = False
if (1-y.f[0]) > min_value:
for j in range(len(z)):
if y.f[j] < z[j]:
z[j] = y.f[j]
if flag == False:
flag = True
else:
pass
return flag
# 输入为N,T,G
# N 种群个体数量
# T 邻域个数
# G 进化的代数
def MOEAD(N, T, G, initial_p, path, min_value): # 种群数量和邻域个数
# step 1)
# step 1.1)
# 初始化种群及对应的权重向量,以及“候选集成模型个体集合”
p, Lamb = Initial(N,initial_p)
print('种群数量', len(p))
print('种群初始化完毕')
# 计算初代所有个体第二个目标函数值
p=firstPart_single_or_semeble_SecondFunction(copy.deepcopy(p))
update_second(p)
functions_print(p)
# step 1.2)
# 获取当前两个目标函数的最小值,参考点
z = BestValue(p)
with open(os.path.join(path,"bestvalue.txt"),'w')as f:
f.write(str(z)+",")
print('当前BestValue:', z)
# step 1.3)
# 根据权重向量计算对应的T个邻域
B = Neighbor(Lamb, T)
# step 1.4) 标准化部分 没有
# step 2)
# 进化G代
t = 0
while (t < G):
# step 2.1) 标准化部分 没有
t += 1
for i in range(len(p)):
if update_bestvalue(z, p[i],min_value):
print("参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新"
"参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新")
with open(os.path.join(path,"bestvalue.txt"),'a')as f:
f.write(str(z)+",")
for i in range(N):
# step 2.2) Reproduction
# step 2.3) Repairing
# step 2.4) Evaluation 这三部分包含在i中
# 为个体i在其邻域随机选取两个个体
k = random.randint(0, T - 1)
l = random.randint(0, T - 1)
print('从第'+str(t)+"代" + str(i) + '个个体选取邻域为:' + str(k) + ', ' + str(l))
# 根据原有个体i,以及随机选取的它邻域的两个个体变异出一个新的个体
y = GeneticOperation(p[B[i][k]], p[B[i][l]], p[i], i)
y.f[1] = firstPart_single_or_semeble_SecondFunction(copy.deepcopy(p), i, y)
with open(os.path.join(y.model_save_path, 'second.txt'), 'a')as f:
f.write(str(y.f[1]) + ",")
bianyigeti_print(y, t, i)
print('变异结束')
# step 2.5)标准化部分 没有
print(B[i])
# step 2.6) Replacement
# 此处进行replacement,对个体的邻域的每个个体,如果变异出来的个体满足条件,则用变异个体将邻域个体进行替换
for j in B[i]:
# 获取邻域元素的权重向量
Lamb_j = Lamb[j]
# 重新计算邻域中待更新个体的第二个目标函数
p[j].f[1] = firstPart_single_or_semeble_SecondFunction(copy.deepcopy(p),j)
# 用变异个体代替待更新个体,计算变异个体的第二个目标函数
# 计算变异个体与除待更新个体外的其他个体之间的联系
y.f[1] = firstPart_single_or_semeble_SecondFunction(copy.deepcopy(p), j, y)
# 获取变异个体两个目标函数距离最优值的最大值
y_ = max_rPlace(Lamb_j, y.f, z)
# 获取当前 邻域个体两个目标函数距离最优值的最大值
j_ = max_rPlace(Lamb_j, p[j].f, z)
# 如果变异个体小,则对邻域个体进行替换
if y_ <= j_ and (1-y.f[0]) > min_value:
# 用变异个体模型文件对原有文件进行替换
deletefile(p[j].model_save_path)
print("权重向量",Lamb_j)
print("变异个体目标函数值",y.f)
print("邻域目标函数值", p[j].f)
print("===================邻域个体替换=============================="
"===================邻域个体替换=============================="
"===================邻域个体替换=============================="
"===================邻域个体替换=============================="
"===================邻域个体替换=============================="
"===================邻域个体替换=============================="
"===================邻域个体替换=============================="
"===================邻域个体替换==============================")
print('第' +str(t)+"代第"+ str(i) + '个个体的邻域:第' + str(j) + '个个体 模型文件删除成功')
movefiles(y.model_save_path, p[j].model_save_path)
print('第' +str(t)+'代用变异个体模型文件对第' + str(i) + '个个体的邻域:第' + str(j) + '个个体 模型文件替换成功')
# 用变异个体对原有个体的邻域个体进行替换,但是模型的保存位置不变
# 将保存种群个体的列表进行同步更新,将对应位置的个体替换为变异个体,并且修改变异个体的model_save_path属性
p[j] = copy.deepcopy(y)
p[j].model_save_path = path + str(j) + "\\"
else:
print('第' +str(t)+'代' + str(j) + '个体不满足替换要求')
if update_bestvalue(z,y,min_value):
print('第' +str(t)+"代参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新"
"参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新参考点更新")
with open(os.path.join(path,"bestvalue.txt"),'a')as f:
f.write(str(z)+",")
# 变异个体操作全部完成,则将变异个体保存的模型文件进行删除
deletefile(y.model_save_path)
print('变异个体文件删除==================================')
# step 2.8)在以上步骤中包含,并没有保存每一代的每一个个体,都是在第一代中不断进行替换,直到最后一代
return p
def move_first_part_to_multi_part(multi_path, first_path):
for i in os.listdir(first_path):
old_path=os.path.join(first_path,str(i))
new_path=os.path.join(multi_path,str(i))
if os.path.exists(new_path):
os.rmdir(new_path)
shutil.copytree(old_path,new_path)
print("文件复制完毕")
class p():
def __init__(self, i):
path = os.path.join(cluster_path, str(i))
model_path = os.path.join(path, getFilename(path))
model = load_model(model_path)
last_layer = Model(inputs=model.input, outputs=model.get_layer("fc2").output)
y_prediction = last_layer.predict(validation_x_all)
self.y_pre = y_prediction
self.net_num = i
l = os.listdir(path)
# print(l)
with open(os.path.join(path, l[1]), 'r')as f:
acc = float(f.read().strip())
self.acc = acc
with open(os.path.join(path, l[2]), 'r')as f:
l_content = f.read().strip()[1:-1].split(",")
for i in range(len(l_content) - 2):
l_content[i] = int(l_content[i])
l_content[-2] = float(l_content[-2])
l_content[-1] = float(l_content[-1])
self.x = l_content
def read_first_part():
l_p=[]
for i in range(20):
p_i=p(i)
l_p.append(p_i)
return l_p
if __name__ == "__main__":
start = time.clock()
print(cluster_path)
folder_create_or_clear(cluster_path)
# 将文件复制过来
move_first_part_to_multi_part(cluster_path, "E:\pc_model\\new\\"+date_str+"\population")
single_p=read_first_part()
print("第一阶段单目标完毕")
# (1)训练程序从此处进入
# 进入MOEAD多目标程序 位于train_moea_cnn的MOEAD
# 第一个参数:种群个体数,第二个参数:邻域数量,第三个参数:进化代数,第四个参数:模型保存位置,外部存储集合的模型保存位置
p = MOEAD(20, 6, 3, single_p, cluster_path ,0.98)
end = time.clock()
multi_time = str(end-start)
with open(cluster_path+"time.txt", 'w')as f:
f.write(multi_time)
with open(cluster_path+"all_outep_variables.txt", "w")as f:
for i in range(len(p)):
if i==(len(p)-1):
f.write(str(p[i].x))
else:
f.write(str(p[i].x)+"$$$")
print('多目标训练部分耗时' + multi_time + '秒') | githubzhch/ensemble-learning-grf | _2_multi_moead/multi_moead_cluster.py | multi_moead_cluster.py | py | 19,841 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "numpy.argmax",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 131... |
22653630816 | from PIL import Image
import os
import codecs
include_extension = ['bmp']
def P2A(image, name):
x,y = image.size
print(f"x={x},y={y}\n")
file = codecs.open(name, 'w', 'utf-8')
for i in range(x):
for j in range(y):
r,g,b = image.getpixel((i,j))
file.write(f"{r},{g},{b},")
file.write('\r\n')
file.close
read_path = "./in"
filedir = os.listdir(read_path)
filenames = [fn for fn in filedir if any(fn.endswith(ext) for ext in include_extension) ]
file_num = 0
for file in filenames:
file_num += 1
print(file)
im = Image.open(f"./in/{file}")
im_rgb = im.convert('RGB')
P2A(im_rgb, f"./Array/{file}_array.txt")
print(f"file_num={file_num}\n")
| Dalminham/CV_Processing | General_format/Evaluation/Pic2Array.py | Pic2Array.py | py | 728 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "codecs.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": ... |
36500206584 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 15:07:43 2020
@author: Pooya Poolad
A poisson event generator.
"""
try:
import numpy as np
import numpy.random as rnd
#import matplotlib.pyplot as plt
from numba import jit,cuda
from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float32
except ModuleNotFoundError: # means you're probably trying to run partial_hist test code on its own
import os
import sys
sys.path.append(os.path.abspath('..')) # add directory above to path
import numpy as np
import numpy.random as rnd
#import matplotlib.pyplot as plt
#from numba import jit,cuda
#from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float32
from pdb import set_trace;
import torch
#import cupy as cp
# #@torch.no_grad()
# @torch.jit.script
def pytorch_generator(lambda_bg, lambda_sum, n_spad, sim_res, tof_echo, echo_split):
#defualts to gpu 1// you might want to change this
torch.cuda.set_device('cuda:0')
torch.no_grad()
cuda1 = torch.device('cuda:0')
#copy to gpu
signal_lambdax = torch.tensor(lambda_sum[0],device=cuda1,dtype=float)
#calculate the summation on gpu
signal_lambda = (signal_lambdax + lambda_bg) * sim_res
#secondary reflections
signal_lambda_secx = torch.tensor(lambda_sum[1], device=cuda1,dtype=float)
signal_lambda_sec = (signal_lambda_secx + lambda_bg) * sim_res
n_spad_per_sipm = n_spad
if tof_echo:
lambda_sum_org = torch.zeros([ len(signal_lambda), n_spad_per_sipm], device=cuda1)
split_spad = np.rint(np.array(echo_split) * n_spad_per_sipm).astype(int)
#TODO: Only 2 Echo supported here for now #
lambda_sum_org[:,0:split_spad[0]] = torch.tile(signal_lambda.reshape(signal_lambda.size()[0],1),[1,split_spad[0]])
lambda_sum_org[:,split_spad[0]:split_spad[0]+split_spad[1]] = torch.tile(signal_lambda_sec.reshape(signal_lambda_sec.size()[0],1),[1,split_spad[1]])
else:
lambda_sum_org = torch.tile((lambda_bg + signal_lambda) * sim_res,[n_spad_per_sipm,1], device=cuda1)
#generate random numbers
rnd_list = torch.rand(lambda_sum_org.shape, device=cuda1) #torch.cuda.FloatTensor(lambda_sum_org.shape).uniform_()
#get the event list
ev_list = torch.where(rnd_list < lambda_sum_org)
times, spads = ev_list[0].cpu().numpy(), ev_list[1].cpu().numpy()
return (times, spads)
def rnd_gen_vect_random(p_tile):
p_tile = p_tile
rnd_list = rnd.random(p_tile.shape)
return np.where(rnd_list < p_tile)
def rnd_gen_vect_dotile(lambda_bg, lambda_sum, n_spad, sim_res, tof_echo, echo_split):
p_tile = create_lambda_t(lambda_bg, lambda_sum[0], n_spad, sim_res, tof_echo, lambda_sum[1], echo_split)
return rnd_gen_vect_random(p_tile)
def create_lambda_t(lambda_bg, signal_lambda, n_spad_per_sipm, sim_res, tof_echo=0, signal_lambda_sec = None, echo_split= [0.8, 0.2]):
if tof_echo:
#lambda_sum_org_untiled = np.zeros([len(time_steps)])
lambda_sum_org = np.zeros([ len(signal_lambda), n_spad_per_sipm])
split_spad = np.rint(np.array(echo_split) * n_spad_per_sipm).astype(int)
start_chunk = 0
for echo,chunks in enumerate(split_spad):
#first chunk is primary
if echo == 0:
signal_gamma = (signal_lambda + lambda_bg) * sim_res
else:
signal_gamma = ( signal_lambda_sec[echo] + lambda_bg) * sim_res
#Tile the lambda because we have n_spad_per_sipm independent spads (Some have different distro)
lambda_sum_org[:,start_chunk:start_chunk+chunks] = np.tile(signal_gamma.reshape(signal_gamma.size,1),[1,chunks])
start_chunk += chunks
else:
#Tile the lambda because we have n_spad_per_sipm i.i.d. spad
lambda_sum_org = np.tile((lambda_bg + signal_lambda) * sim_res,[n_spad_per_sipm,1])
return lambda_sum_org
def rnd_gen_vect_pretiled(lambda_sum, n_spad, sim_res):
#use this if you have tiled lambdas in the main.
return rnd_gen_vect_random(lambda_sum)
def dd_time_gen(dd_avg,std_dev,const,num_out=False):
num = rnd.normal(loc=dd_avg,scale=std_dev)
indexed = num//const.sim_res
if(num_out):
return int(indexed),num
else:
return int(indexed)
#For testing this module independently
if __name__ == "__main__":
pass
#Test program
#launching cuda core
# n_spad = 4
# tof_echo = 1
# echo_split = [0.8, 0.2]
# lambda_sum = [1e12]
# lambda_sig = np.zeros((600,1))
# lambda_sig[100:120] += 4e12
# lambda_sum.append(lambda_sig)
# lambda_sig_sec = np.zeros((600,1))
# lambda_sig_sec[200:220] += 5e11
# lambda_sum.append(lambda_sig_sec)
# sim_res = 10e-12
# cuda_generator_launcher(lambda_sum, n_spad, sim_res, tof_echo, echo_split)
| ppoolad/MonteCarlo_ToF | Utility/event_generator.py | event_generator.py | py | 4,924 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
2158776369 | import torch
import open_clip
from pathlib import Path
import pandas as pd
from PIL import Image
model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion400m_e32')
def vectorize(img):
with torch.no_grad():
image = preprocess(img).unsqueeze(0)
vec = model.encode_image(image).cpu().detach().numpy()[0]
return vec.tolist()
if not(Path('base.csv').exists()):
paths = []
vecs = []
for file in Path('base').glob('*'):
paths.append(file)
vecs.append(vectorize(Image.open(str(file))))
df = pd.DataFrame(data={'Path':paths, 'Vec':vecs})
df = df.reset_index()
df = df.rename(columns={'index':'Id'})
df.to_csv('base.csv',index=False)
| wyttnik/SimilarImageSearchNeuron | base_creation.py | base_creation.py | py | 733 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "open_clip.create_model_and_transforms",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pathl... |
25009901167 | # -*- coding: utf-8 -*-
from fastapi import APIRouter, Path
from .controllers import PostCtrl
post_router = APIRouter(prefix='/posts')
@post_router.get('', summary="获取文章列表")
async def get_posts_list(page: int = 1, per_page: int = 10):
pagination = PostCtrl().get_posts_paginate(page=page, per_page=per_page)
return {
"r": {
"items": pagination.items,
"page": pagination.page,
"per_page": pagination.per_page,
"total_page": pagination.pages
},
"msg": "",
"code": 0
}
@post_router.get('/{post_id}', summary="查看文章详情")
async def get_post_detail(post_id: int = Path(...)):
post = PostCtrl().get_one_post(post_id)
return {
"r": post or {},
"msg": "",
"code": 0
}
| zxins/fast-lofter | services/post/apis.py | apis.py | py | 816 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "controllers.PostCtrl",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "fastapi.Path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "controllers.PostCt... |
6135984234 | import sympy
def perturbed_quants(terms, order):
ep = sympy.symbols('epsilon', real=True)
replacements = []
new_vars = []
for term in terms:
raw = str(term)
expanded = [ raw+'%d'%expand for expand in range(order+1)]
symbs = sympy.symbols(' '.join(expanded), real=True)
total = 0
for ind in range(order+1):
total += (symbs[ind]*ep**ind)
replacements.append((term, total))
new_vars.append(symbs)
return replacements, new_vars
def main():
pth, pr, th, r = sympy.symbols('p_theta p_r theta r', real=True)
alph, m, n, ep = sympy.symbols('alpha m n epsilon', real=True)
coords = [th, r]
momenta = [pth, pr]
sympy.pprint(momenta)
sympy.pprint(coords)
ham = (pth**2*r**(-2)+pr**2)/(2*m)+alph*r**n
sympy.pprint(ham)
pr_dot = -ham.diff(r)
sympy.pprint(pr_dot)
pth0 = sympy.solve(pr_dot, pth)[1]
perturbing, new_vars = perturbed_quants([r], 2)
pth0 = pth0.subs(r, new_vars[0][0])
sympy.pprint(pth0)
pr_dot1 = pr_dot.subs(pth, pth0).expand().simplify()
sympy.pprint(pr_dot1)
# sympy.pprint(pr_dot1.expand().simplify())
sympy.pprint(perturbing)
pr_dot2 = pr_dot1.subs(perturbing)
sympy.pprint(pr_dot2)
sympy.pprint(pr_dot2.expand().simplify().collect(ep))
if __name__ == '__main__':
main()
'''
austin shenans
laser tag?
http://www.blazertag.com/
esther's follies?
https://www.esthersfollies.com/
http://www.austincityguide.com/listings/south-congress-avenue-shopping
escape rooms?
AR adventure?
https://www.thearadventureaustin.com/
transcendental brunch
http://moonshinegrill.com/menus/
'''
| wolfram74/worked_problems | docs/summer_19/week_2019_07_08/scratch2.py | scratch2.py | py | 1,660 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sympy.symbols",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sympy.symbols",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sympy.symbols",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sympy.symbols",
"line_nu... |
1656456443 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/9/8 12:39
# @Author : JJkinging
# @File : utils.py
from torch.utils.data import Dataset, DataLoader
from data.code.predict.test_utils import load_vocab, collate_to_max_length
class CCFDataset(Dataset):
def __init__(self, filename, intent_filename, slot_filename, slot_none_filename, vocab, intent_dict,
slot_none_dict, slot_dict, max_length=512):
'''
:param filename:读取数据文件名,例如:train_seq_in.txt
:param intent_filename: train_intent_label.txt or dev_intent_label.txt
:param slot_filename: train_seq_out.txt
:param slot_none_filename: train_slot_none.txt or dev_slot_none.txt
:param slot_none_dict: slot_none的字典
:param slot_dict: slot_label的字典
:param vocab: 词表,例如:bert的vocab.txt
:param intent_dict: intent2id的字典
:param max_length: 单句最大长度
'''
self.filename = filename
self.intent_filename = intent_filename
self.slot_filename = slot_filename
self.slot_none_filename = slot_none_filename
self.vocab = vocab
self.intent_dict = intent_dict
self.slot_none_dict = slot_none_dict
self.slot_dict = slot_dict
self.max_length = max_length
self.result = []
# 读取数据
with open(self.filename, 'r', encoding='utf-8') as fp:
sen_data = fp.readlines()
sen_data = [item.strip('\n') for item in sen_data] # 删除句子结尾的换行符('\n')
# 读取intent
with open(self.intent_filename, 'r', encoding='utf-8') as fp:
intent_data = fp.readlines()
intent_data = [item.strip('\n') for item in intent_data] # 删除结尾的换行符('\n')
intent_ids = [intent_dict[item] for item in intent_data]
# 读取slot_none
with open(self.slot_none_filename, 'r', encoding='utf-8') as fp:
slot_none_data = fp.readlines()
# 删除结尾的空格和换行符('\n')
slot_none_data = [item.strip('\n').strip(' ').split(' ') for item in slot_none_data]
# 下面列表表达式把slot_none转为id
slot_none_ids = [[self.slot_none_dict[ite] for ite in item] for item in slot_none_data]
# 读取slot
with open(self.slot_filename, 'r', encoding='utf-8') as fp:
slot_data = fp.readlines()
slot_data = [item.strip('\n') for item in slot_data] # 删除句子结尾的换行符('\n')
# slot_ids = [self.slot_dict[item] for item in slot_data]
idx = 0
for utterance in sen_data:
utterance = utterance.split(' ') # str变list
slot_utterence = slot_data[idx].split(' ')
# 最大长度检验
if len(utterance) > self.max_length-2:
utterance = utterance[:max_length]
slot_utterence = slot_utterence[:max_length]
# input_ids
utterance = ['[CLS]'] + utterance + ['[SEP]']
input_ids = [int(self.vocab[i]) for i in utterance]
length = len(input_ids)
# slot_ids
slot_utterence = ['[START]'] + slot_utterence + ['[EOS]']
slot_ids = [int(self.slot_dict[i]) for i in slot_utterence]
# input_mask
input_mask = [1] * len(input_ids)
# intent_ids
intent_id = intent_ids[idx]
# slot_none_ids
slot_none_id = slot_none_ids[idx] # slot_none_id 为 int or list
idx += 1
self.result.append((input_ids, slot_ids, input_mask, intent_id, slot_none_id))
def __len__(self):
return len(self.result)
def __getitem__(self, index):
input_ids, slot_ids, input_mask, intent_id, slot_none_id = self.result[index]
return input_ids, slot_ids, input_mask, intent_id, slot_none_id
if __name__ == "__main__":
filename = '../dataset/final_data/train_seq_in.txt'
vocab_file = '../dataset/pretrained_model/erine/vocab.txt'
intent_filename = '../dataset/final_data/train_intent_label.txt'
slot_filename = '../dataset/final_data/train_seq_out.txt'
slot_none_filename = '../dataset/final_data/train_slot_none.txt'
intent_label = '../dataset/final_data/intent_label.txt'
slot_label = '../dataset/final_data/slot_label.txt'
slot_none_vocab = '../dataset/final_data/slot_none_vocab.txt'
intent_dict = load_vocab(intent_label)
slot_dict = load_vocab(slot_label)
slot_none_dict = load_vocab(slot_none_vocab)
vocab = load_vocab(vocab_file)
dataset = CCFDataset(filename, intent_filename, slot_filename, slot_none_filename, vocab, intent_dict,
slot_none_dict, slot_dict)
dataloader = DataLoader(dataset, shuffle=False, batch_size=8, collate_fn=collate_to_max_length)
for batch in dataloader:
print(batch)
break
| SCU-JJkinging/CCIR-Cup | data/code/scripts/dataset.py | dataset.py | py | 4,968 | python | en | code | 22 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "data.code.predict.test_utils.load_vocab",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "data.code.predict.test_utils.load_vocab",
"line_number": 113,
"usage_ty... |
40378963481 | from locust import HttpUser, task
from urllib3.exceptions import InsecureRequestWarning
import urllib3
urllib3.disable_warnings(InsecureRequestWarning)
__version__ = "1"
params = {}
params["all"] = {
"types[0]": "software-catalog",
}
params["all_components"] = {
"types[0]": "software-catalog",
"filters[kind]": "Component",
}
params["not_found"] = {
"types[0]": "software-catalog",
"term": "n/a"
}
params["components_by_lifecycle"] = {
"types[0]": "software-catalog",
"filters[kind]": "Component",
"filters[lifecycle][0]": "experimental",
}
base_path = "/api/search/query"
class SearchCatalogTest(HttpUser):
def on_start(self):
self.client.verify = False
def search(self, query="all") -> None:
self.client.get(base_path,
verify=False,
params=params[query])
@task
def searchAll(self) -> None:
self.search("all")
@task
def searchAllComponents(self) -> None:
self.search("all_components")
@task
def searchNotFound(self) -> None:
self.search("not_found")
@task
def searchComponentsByLifecycle(self) -> None:
self.search("components_by_lifecycle")
| redhat-performance/backstage-performance | scenarios/search-catalog.py | search-catalog.py | py | 1,228 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "urllib3.exceptions.InsecureRequestWarning",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "locust.HttpUser",
"line_number": 34,
"usage_type": "name"
},
{
... |
736500735 | import bs4
import requests
import json
from io import StringIO
import gzip
import csv
import codecs
from bs4 import BeautifulSoup
import sys
import io
import StringIO
reload(sys)
sys.setdefaultencoding('utf-8')
linker = []
myTopics = ["football","basketball","nba","mls","nfl","nhl","cricket","soccer"]
def GetRecords():
recordList = []
ccUrl = "http://index.commoncrawl.org/CC-MAIN-2019-13-index?url=https://www.espn.com/&matchType=domain&output=json"
response = requests.get(ccUrl)
if response.status_code ==200:
records = response.content.splitlines()
for record in records:
recordList.append(json.loads(record))
return recordList
def GetData(recordList):
count=0
for record in recordList:
if count >800:
break;
offset, length = int(record['offset']), int(record['length'])
offset_end = offset + length -1
prefix = "https://commoncrawl.s3.amazonaws.com/"
response = requests.get(prefix + record['filename'], headers={'Range': 'bytes={}-{}'.format(offset, offset_end)})
raw_data = StringIO.StringIO(response.content)
f = gzip.GzipFile(fileobj=raw_data)
data = f.read()
response = ""
if(len(data)):
warc,header,response = data.strip().split('\r\n',2)
parser = BeautifulSoup(response, 'html.parser')
links = parser.find_all("a")
if links:
for link in links:
if isinstance(link, str):
continue
href = link.attrs.get("href")
if href is not None:
if href.encode('utf-8') not in linker and href.startswith("http"):
linker.append(href.encode('utf-8'))
count = count +1
print(str(count))
if count > 800:
break
with open("hrefs_espn19.txt", 'w+') as file:
for link in linker:
file.write(link.encode("utf-8"))
file.write("\n")
return response
def ScrapData(filename):
with open(filename) as f:
try:
i = 0
urls = f.read().split()
for htmlLink in urls:
print(htmlLink)
page = requests.get(htmlLink)
soup = BeautifulSoup(page.text, 'html.parser')
text = ""
for para in soup.find_all('p'):
text += para.get_text()
if text != "":
text = text.lower()
textFile = open(str(i) + "_espn19.txt", "w+")
textFile.write(text.encode('utf-8'))
i = i + 1
except:
print("Something went wrong...")
textFile.close()
if __name__ == '__main__':
# Reference -https://www.bellingcat.com/resources/2015/08/13/using-python-to-mine-common-crawl/
#recordList =GetRecords()
#GetData(recordList)
ScrapData("hrefs_espn19.txt") | SouravBihani/Large-Scale-Text-Processing | Data/Common Crawl/Utilities/CCDataExtract.py | CCDataExtract.py | py | 3,095 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "requests.get",
"li... |
74525949792 | import argparse
import collections
import glob
import json
import math
import numpy as np
import random
from ordered_set import OrderedSet
import os
import pickle
import shutil
from sklearn.metrics import average_precision_score
import sys
import termcolor
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn
import torch.optim as optim
from tqdm import tqdm
from NAACL import vocabulary
from NAACL import settings
from NAACL import util
WORD_VEC_FILE = 'wordvec/PubMed-and-PMC-w2v.txt'
WORD_VEC_NUM_LINES = 4087447
EMB_SZIE = 200 # size of word embeddings
PARA_EMB_SIZE = 100 # size of paragraph index embeddings
PARA_EMB_MAX_SPAN = 1000
MAX_ENTITIES_PER_TYPE = 200
MAX_NUM_PARAGRAPHS = 200
MAX_NUM_CANDIDATES = 10000
ALL_ENTITY_TYPES = ['drug', 'gene', 'variant']
ALL_ENTITY_TYPES_PAIRS = [('drug', 'gene'), ('drug', 'variant'), ('gene', 'variant')]
MAX_PARAGRAPH_LENGTH = 800
CLIP_THRESH = 5 # Gradient clipping (on L2 norm)
JAX_DEV_PMIDS_FILE = 'jax/jax_dev_pmids.txt'
JAX_TEST_PMIDS_FILE = 'jax/jax_test_pmids.txt'
log_file = None
def log(msg):
print(msg, file=sys.stderr)
if log_file:
print(msg, file=log_file)
ParaMention = collections.namedtuple(
'ParaMention',['start', 'end', 'type', 'name'])
class Candidate(object):
def __init__(self, drug=None, gene=None, variant=None, label=None):
self.drug = drug
self.gene = gene
self.variant = variant
self.label = label
def remove_entity(self, i, new_label=None):
'''
:param i:
:param new_label:
:return: Return new Candidate with entity |i| replaced with None.
'''
triple = (self.drug, self.gene, self.variant)
new_triple = triple[:i] + (None,) + triple[i+1:]
return Candidate(*new_triple, label=new_label)
def get_entities(self):
return (self.drug, self.gene, self.variant)
def is_triple(self):
return self.drug and self.gene and self.variant
def get_types(self):
out = []
if self.drug:
out.append('drug')
if self.gene:
out.append('gene')
if self.variant:
out.append('variant')
return tuple(out)
def __key(self):
return (self.drug, self.gene, self.variant, self.label)
def __eq__(x, y):
return x.__key() == y.__key()
def __hash__(self):
return hash(self.__key())
class Example(object):
def __init__(self, pmid, paragraphs, mentions, triple_candidates, pair_candidates):
self.pmid = pmid
self.paragraphs = paragraphs
self.mentions = mentions
self.triple_candidates = triple_candidates
self.pair_candidates = pair_candidates
self.entities = collections.defaultdict(OrderedSet)
for m_list in mentions:
for m in m_list:
self.entities[m.type].add(m.name)
@classmethod
def read_examples(cls, example_json_file):
results = []
with open(os.path.join(settings.DATA_DIR, example_json_file)) as f:
for line in f:
ex = cls.read_examples(line)
results.append(ex)
return results
@classmethod
def read_examples(cls, example_json_str):
example_json = json.loads(example_json_str)
mentions = [[ParaMention(**mention) for mention in paragraph_mentions]
for paragraph_mentions in example_json['mentions']]
pair_candidates = {}
for pair_key in example_json['pair_candidates']:
pair_key_tuple = tuple(json.loads(pair_key))
pair_candidates[pair_key_tuple] = OrderedSet(Candidate(**x)
for x in example_json['pair_candidates'][pair_key])
triple_candidates = {}
triple_candidates = [Candidate(**x)
for x in example_json['triple_candidates']]
return cls(example_json['pmid'],
example_json['paragraphs'],
mentions,
triple_candidates,
pair_candidates)
class Preprocessor(object):
def __init__(self, entity_lists, vacab, device):
self.entity_lists = entity_lists
self.vocab = vacab
self.device = device
def count_labels(self, ex, pair_only=None):
if pair_only:
candidates = ex.pair_candidates[pair_only]
else:
candidates = ex.triple_candidates
num_pos = sum(c.label for c in candidates)
num_neg = sum(1 - c.label for c in candidates)
return num_neg, num_pos
def shuffle_entities(self, ex):
entity_map = {}
for e_type in ex.entities:
cur_ents = ex.entities[e_type]
replacements = random.sample(self.entity_lists[e_type], len(cur_ents))
for e_old, e_new in zip(cur_ents, replacements):
entity_map[(e_type, e_old)] = e_new
new_paras = []
new_mentions = []
for p, m_list in zip(ex.paragraphs, ex.mentions):
new_para = []
new_m_list =[]
mentions_at_loc = collections.defaultdict(list)
in_mention = [False] * len(p)
for m in m_list:
mentions_at_loc[m.start].append((m.type, m.name))
for i in range(m.start, m.end):
in_mention[i] = True
for i in range(len(p)):
if mentions_at_loc[i]:
for e_type, name in mentions_at_loc[i]:
e_new = entity_map[(e_type, name)]
m = ParaMention(len(new_para), len(new_para)+1, e_type, name)
new_m_list.append(m)
new_para.append(e_new)
if not in_mention[i]:
new_paras.append(p[i])
new_paras.append(new_para)
new_mentions.append(new_m_list)
return new_paras, new_mentions
def preprocess(self, ex, pair_only):
new_paras, new_mentions = self.shuffle_entities(ex)
para_prep = []
for para_idx, (para, m_list) in enumerate(zip(new_paras, new_mentions)):
word_idxs = torch.tensor(self.vocab.indexify_list(para),
dtype=torch.long, device=self.device)
para_from_start = [
para_idx / math.pow(PARA_EMB_MAX_SPAN, 2*i / (PARA_EMB_SIZE // 4))
for i in range(PARA_EMB_SIZE // 4)
]
para_from_end = [
(len(new_paras)- para_idx) / math.pow(PARA_EMB_MAX_SPAN, 2*i / (PARA_EMB_SIZE // 4))
for i in range(PARA_EMB_SIZE // 4)
]
para_args = torch.cat([torch.tensor(x, dtype=torch.float, device=self.device)
for x in (para_from_start, para_from_end)])
para_vec = torch.cat([torch.sin(para_args), torch.cos(para_args)])
para_prep.append((word_idxs ,para_vec, m_list))
# sort for pack_padded_sequence
para_prep.sort(key=lambda x:len(x[0]), reverse=True)
T, P = len(para_prep[0][0]), len(para_prep)
para_mat = torch.zeros((T, P), device=self.device, dtype=torch.long)
for i, x in enumerate(para_prep):
cur_words = x[0]
para_mat[:len(cur_words), i] = cur_words
lenghts = torch.tensor([len(x[0]) for x in para_prep], device=self.device)
triple_labels = torch.tensor([c.label for c in ex.triple_candidates],
dtype=torch.float, device=self.device)
pair_labels = {k: torch.tensor([c.label for c in ex.pair_candidates[k]],
dtype=torch.float, device=self.device)
for k in ex.pair_candidates}
para_vecs = torch.stack([x[1] for x in para_prep], dim=0)
unlabeled_triple_cands = [Candidate(ex.drug, ex.gene, ex.variant)
for ex in ex.triple_candidates]
unlabeled_pair_cands = {k: [Candidate(ex.drug, ex.gene, ex.variant)
for ex in ex.pair_candidates[k]]
for k in ex.pair_candidates}
return (para_mat, lenghts, para_vecs, [x[2] for x in para_prep],
unlabeled_triple_cands, unlabeled_pair_cands, triple_labels, pair_labels)
def logsumexp(inputs, dim=None, keepdim=False):
'''
:param inputs: A variable with any shape.
:param dim: An integer.
:param keepdim: A boolean.
:return: Equivalent of log(sum(exp(inputs), dim=dim, keepdim=keepdim)).
'''
if dim is None:
inputs = inputs.view(-1)
dim = 0
s, _ = torch.max(inputs, dim=dim, keepdim=True)
outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(dim)
return outputs
class BackoffModel(nn.Module):
'''
Combine triple and pairwise information.
'''
def __init__(self, emb_mat, lstm_size, lstm_layers, device, use_lstm=True,
use_position=True, pool_method='max', dropout_prob=0.5, vocab=None,
pair_only=None):
super(BackoffModel, self).__init__()
self.device = device
self.use_lstm = use_lstm
self.use_position = use_position
self.pool_method - pool_method
self.embs = nn.Embedding.from_pretrained(emb_mat, freeze=False)
self.vocab = vocab
self.pair_only =pair_only
self.dropout = nn.Dropout(p=dropout_prob)
para_emb_size = PARA_EMB_SIZE if use_position else 0
if use_lstm:
self.lstm_layers = lstm_layers
self.lstm = nn.LSTM(EMB_SZIE + para_emb_size, lstm_size,
bidirectional=True, num_layers=lstm_layers)
else:
self.emb_linear = nn.Linear(EMB_SZIE + para_emb_size, 2 * lstm_size)
for t1 ,t2 in ALL_ENTITY_TYPES_PAIRS:
setattr(self, 'hidden_%s_%s' %
(t1, t2), nn.Linear(4 * lstm_size, 2 * lstm_size))
setattr(self, 'out_%s_%s' % (t1, t2), nn.Linear(2 * lstm_size, 1))
setattr(self, 'backoff_%s_%s' % (t1, t2), nn.Parameter(
torch.zeros(1, 2 * lstm_size)))
self.hidden_triple = nn.Linear(3 * 2 * lstm_size, 2 * lstm_size)
self.backoff_triple = nn.Parameter(torch.zeros(1, 2 * lstm_size))
self.hidden_all = nn.Linear(4 * 2 * lstm_size, 2 * lstm_size)
self.out_triple = nn.Linear(2 * lstm_size, 1)
def pool(self, grouped_vecs):
'''
:param grouped_vecs:
:return:
'''
if self.pool_method == 'mean':
return torch.stack([torch.mean(g, dim=0) for g in grouped_vecs])
elif self.pool_method == 'sum':
return torch.stack([torch.sum(g, dim=0) for g in grouped_vecs])
elif self.pool_method == 'max':
return torch.stack([torch.max(g, dim=0)[0] for g in grouped_vecs])
elif self.pool_method == 'softmax':
return torch.stack([logsumexp(g, dim=0) for g in grouped_vecs])
raise NotImplementedError
def forward(self, word_idx_mat, lens, para_vecs, mentions,
triple_candidates, pair_candidates):
'''
:param word_idx_mat: list of word indices, size(T, P)
:param lens: list of paragraph lengths, size(P)
:param para_vecs: list of paragraph vectors, size(P, pe)
:param mentions: list of list of ParaMention
:param triple_candidates: list of unlabeled Candidate
:param pair_candidates: list of unlabeled Candidate
:return:
'''
T, P = word_idx_mat.shape # T=num_toks, P=num_paras
# Organize the candidate pairs and triples
pair_to_idx = {}
pair_sets = collections.defaultdict(set)
for(t1, t2), cands in pair_candidates.items():
pair_to_idx[(t1, t2)] = {c: i for i, c, in enumerate(cands)}
for c in cands:
pair_sets[(t1, t2)].add(c)
triple_to_idx = {c: i for i, c in enumerate(triple_candidates)}
# Build local embeddings of each word
embs = self.embs(word_idx_mat) # T, P, e
if self.use_position:
para_embs = para_vecs.unsqueeze(0).expand(T, -1,-1) # T, P, pe
embs = torch.cat([embs, para_embs], dim=2) # T, P, e + pe
if self.use_lstm:
lstm_in = rnn.pack_padded_sequence(embs, lens) # T, P, e + pe
lstm_out_packed, _ = self.lstm(lstm_in)
embs, _ = rnn.pad_packed_sequence(lstm_out_packed) # T, P, 2*h
else:
embs = self.emb_linear(embs) # T, P, 2*h
# Gather co-occurring mention pairs and triples
pair_inputs = {(t1, t2):[[] for i in range(len(cands))]
for(t1, t2), cands in pair_candidates.items()}
triple_inputs = [[] for i in range(len(triple_candidates))]
for para_idx, m_list in enumerate(mentions):
typed_mentions = collections.defaultdict(list)
for m in m_list:
typed_mentions[m.type].append(m)
for t1, t2 in ALL_ENTITY_TYPES_PAIRS:
if self.pair_only and self.pair_only !=(t1 ,t2):
continue
for m1 in typed_mentions[t1]:
for m2 in typed_mentions[t2]:
query_cand = Candidate(**{t1: m1.name, t2: m2.name})
if query_cand in pair_to_idx[(t1, t2)]:
idx = pair_to_idx[(t1, t2)][query_cand]
cur_vecs = torch.cat([embs[m1.start, para_idx, :],
embs[m2.start, para_idx, :]]) # 4*h
pair_inputs[(t1, t2)][idx].append(cur_vecs)
if self.pair_only:
continue
for m1 in typed_mentions['drug']:
for m2 in typed_mentions['gene']:
for m3 in typed_mentions['variant']:
query_cand = Candidate(m1.name, m2.name, m3.name)
if query_cand in triple_to_idx:
idx = triple_to_idx[query_cand]
cur_vecs = torch.cat(
[embs[m1.start, para_idx, :],
embs[m2.start, para_idx, :],
embs[m3.start, para_idx, :]]) # 6*h
triple_inputs[idx].append(cur_vecs)
# Compute local mention pair/triple representations
pair_vecs = {}
for t1, t2 in ALL_ENTITY_TYPES_PAIRS:
if self.pair_only and self.pair_only != (t1, t2):
continue
cur_group_sizes = [len(vecs) for vecs in pair_inputs[(t1, t2)]]
if sum(cur_group_sizes) > 0:
cur_stack = torch.stack([
v for vecs in pair_inputs[(t1, t2)] for v in vecs]) # M, 4*h
cur_m_reps = getattr(self, 'hidden_%s_%s' %
(t1, t2))(cur_stack) # M, 2*h
cur_pair_grouped_vecs = list(torch.split(cur_m_reps, cur_group_sizes))
for i in range(len(cur_pair_grouped_vecs)):
if cur_pair_grouped_vecs[i].shape[0] == 0: # Back off
cur_pair_grouped_vecs[i] = getattr(self,
'backoff_%s_%s' % (t1, t2))
else:
cur_pair_grouped_vecs = [getattr(self, 'backoff_%s_%s' % (t1, t2))
for vecs in pair_inputs[(t1, t2)]]
pair_vecs[(t1, t2)] = torch.tanh(
self.pool(cur_pair_grouped_vecs)) # P, 2*h
if not self.pair_only:
triple_group_sizes = [len(vecs) for vecs in triple_inputs]
if sum(triple_group_sizes) > 0:
triple_stack = torch.stack([
v for vecs in triple_inputs for v in vecs]) # M, 6*h
triple_m_reps = self.hidden_triple(triple_stack) # M, 2*h
triple_grouped_vecs = list(
torch.split(triple_m_reps, triple_group_sizes))
for i in range(len(triple_grouped_vecs)):
if triple_grouped_vecs[i].shape[0] == 0: # back off
triple_grouped_vecs[i] = self.backoff_triple
else:
triple_grouped_vecs = [self.backoff_triple for vecs in triple_inputs]
triple_vecs = torch.tanh(self.pool(triple_grouped_vecs)) # C, 2*h
# Score candidate pairs
pair_logits = {}
for t1, t2 in ALL_ENTITY_TYPES_PAIRS:
if self.pair_only and self.pair_only != (t1, t2):
continue
pair_logits[(t1, t2)] = getattr(self, 'out_%s_%s' % (t1, t2))(
pair_vecs[(t1, t2)])[:, 0] #M
if self.pair_only:
return None, pair_logits
# Score candidate triples
pair_feats_per_triple = [[], [], []]
for c in triple_candidates:
for i in range(3):
pair = c.remove_entity(i)
t1, t2 = pair.get_types()
pair_idx = pair_to_idx[(t1, t2)](pair)
pair_feats_per_triple[i].append(
pair_vecs[(t1, t2)][pair_idx, :]) # 2*h
triple_feats = torch.cat(
[torch.stack(pair_feats_per_triple[0]),
torch.stack(pair_feats_per_triple[1]),
torch.stack(pair_feats_per_triple[2]),
triple_vecs],
dim=1) # C, 8*h
final_hidden = F.relu(self.hidden_all(triple_feats)) # C, 2*h
triple_logits = self.out_triple(final_hidden)[:, 0] # C
return triple_logits, pair_logits
def get_entity_lists():
entity_lists = {}
for et in ALL_ENTITY_TYPES:
entity_lists[et] = ['__%s__' % et
for i in range(MAX_ENTITIES_PER_TYPE)]
# Can streamline, since we're just using single placeholder per entity type
return entity_lists
def count_labels(name, data, preprocessor, pair_only=None):
num_neg, num_pos = 0, 0
for ex in data:
cur_neg, cur_pos = preprocessor.count_labels(ex, pair_only=pair_only)
num_neg += cur_neg
num_pos += cur_pos
log('%s data: +%d, -%d' % (name, num_pos, num_neg))
return num_neg, num_pos
def print_data_stats(data, name):
print(name)
print(' Max num paragraphs: %d' % max(len(ex.paragraphs) for ex in data))
print(' Max num triple candidates: %d' % max(
len(ex.triple_candidates) for ex in data))
def init_word_vecs(device, vocab, all_zero=False):
num_pretrained = 0
embs = torch.zeros((len(vocab), EMB_SZIE), dtype=torch.float, device=device)
if not all_zero:
with open(os.path.join(settings.DATA_DIR,WORD_VEC_FILE)) as f:
for line in tqdm(f, total=WORD_VEC_NUM_LINES):
toks = line.strip().split(' ')
if len(toks) != EMB_SZIE + 1:
continue
word = toks[0]
if word in vocab:
idx = vocab.get_index(word)
embs[idx, :] = torch.tensor([float(x) for x in toks[1:]],
dtype=torch.float, device=device)
num_pretrained += 1
log('Found pre-trained vectors for %d/%d = %.2f%% words' % (
num_pretrained, len(vocab), 100*0 * num_pretrained /len(vocab)))
return embs
def train(model, train_data, dev_data, preprocessor, num_epochs, lr, ckpt_iters,
downsample_to, out_dir, lr_decay=1.0, pos_weight=None, use_pair_loss=True,
pair_only=None):
model.train()
if ckpt_iters > len(train_data):
ckpt_iters = len(train_data) # Checkpoint at least once per epoch
loss_func = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
params = [p for p in model.paraments() if p.requires_grad]
optimizer = optim.Adam(params, lr=lr)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
train_data = list(train_data) # Copy before shuffling
num_iters = 0
best_ap = 0.0 # Choose checkpoint based on dev average precision
train_loss = 0.0
for t in range(num_epochs):
t0 = time.time()
random.shuffle(train_data)
if not downsample_to:
cur_train = tqdm(train_data)
else:
cur_train = train_data # tqdm is annoyingn on downsampled data
for ex in cur_train:
model.zero_grad()
ex_torch = preprocessor.preprocess(ex, pair_only)
triple_labels, pair_labels = ex_torch[-2:]
triple_logits, pair_logits = model(*ex_torch[:-2])
if pair_only:
loss = loss_func(pair_logits[pair_only], pair_labels[pair_only])
else:
loss = loss_func(triple_logits, triple_labels)
if use_pair_loss:
for t1, t2 in ALL_ENTITY_TYPES_PAIRS:
loss += loss_func(pair_logits[(t1, t2)], pair_labels[(t1, t2)])
train_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm(model.paraments(),CLIP_THRESH)
optimizer.step()
num_iters += 1
if num_iters % ckpt_iters == 0:
model.eval()
dev_preds, dev_loss = predict(
model, dev_data, preprocessor, loss_func=loss_func,
use_pair_loss=use_pair_loss, pair_only=pair_only)
log('Iter %d: train loss = %.6f, dev loss = %.6f' % (
num_iters, train_loss / ckpt_iters, dev_loss))
train_loss = 0.0
p_doc, r_doc, f1_doc, ap_doc = evaluate(dev_data, dev_preds,
pair_only=pair_only)
log(' Document-level : p=%.2f%% r=%.2f%% f1=%.2f%% ap=%.2f%%' % (
100 * p_doc, 100 * r_doc, 100 * f1_doc, 100 * ap_doc))
if out_dir:
save_model(model, num_iters, out_dir)
model.train()
scheduler.step()
t1 = time.time()
log('Epoch %s: took %s' % (str(t).rjust(3), util.secs_to_str(t1 - t0)))
def predict(model, data, preprocessor, loss_func=None, use_pair_loss=True, pair_only=None):
loss = 0.0
preds = []
with torch.no_grad():
for ex in data:
all_logits = []
ex_torch = preprocessor.preprocess(ex, pair_only)
triple_labels, pair_labels = ex_torch[-2:]
triple_logits, pair_logits = model(*ex_torch[:-2])
if loss_func:
if pair_only:
loss += loss_func(pair_logits[pair_only], pair_labels[pair_only])
else:
loss += loss_func(triple_logits, triple_labels)
if use_pair_loss:
for t1 ,t2 in ALL_ENTITY_TYPES_PAIRS:
loss += loss_func(pair_logits[(t1 ,t2)], pair_labels[(t1, t2)])
if pair_only:
cur_pred = [1 / (1 + np.exp(-z.item())) for z in pair_logits[pair_only]]
else:
cur_pred = [1 / (1 + np.exp(-z.item())) for z in pair_logits[pair_only]]
preds.append(cur_pred)
out = [preds]
if loss_func:
out.append(loss / len(data))
if len(out) == 1:
return out[0]
return out
COLORS = {'drug': 'red', 'variant': 'cyan', 'gene': 'green'}
def pprint_example(ex, f=sys.stdout):
print('PMID %s' % ex.pmid, file=f)
for para_idx, (paragraph, m_list) in enumerate(zip(ex.paragraphs, ex.mentions)):
word_to_type = {}
for m in m_list:
for i in range(m.start, m.end):
word_to_type[i] = m.type
para_toks = []
for i in range(len(paragraph)):
if i in word_to_type:
para_toks.append(termcolor.colored(
paragraph[i], COLORS[word_to_type[i]]))
else:
para_toks.append(paragraph[i])
print(' Paragraph %d: %s' % (para_idx, ' '.join(para_toks)), file=f)
def evaluate(data, probs, name=None, threshold=0.5, pair_only=None):
def get_candidates(ex):
if pair_only:
return ex.pair_candidates[pair_only]
else:
return ex.triple_candidates
if name:
log('== %s, document-level: %d documents, %d candidates (+%d, -%d) ==' % (
name, len(data), sum(len(get_candidates(ex)) for ex in data),
sum(1 for ex in data for c in get_candidates(ex) if c.label == 1),
sum(1 for ex in data for c in get_candidates(ex) if c.label == 0)))
tp = fp = fn = 0
y_true = []
y_pred = []
for ex, prob_list in zip(data, probs):
for c, prob in zip(get_candidates(ex), prob_list):
y_true.append(c.label)
y_pred.append(prob)
pred = int(prob > threshold)
if pred == 1:
if c.label == 1:
tp += 1
else:
fp += 1
else:
if c.label == 1:
fn += 1
ap = average_precision_score(y_true, y_pred)
if name:
log(util.get_prf(tp, fp, fn, get_str=True))
log('AvgPrec : %.2f%%' % (100.0 * ap))
p, r, f = util.get_prf(tp, fp, fn)
return p, r, f, ap
def predict_write(model, data, preprocessor, out_dir, ckpt, data_name, pair_only):
if out_dir:
if ckpt:
out_path = os.path.join(out_dir, 'pred_%s_%07d.tsv' % (data_name, ckpt))
else:
out_path = os.path.join(out_dir, 'pred_%s.tsv' % data_name)
# Only one pprint necessary
pprint_out = os.path.join(out_dir, 'dev_pprint.txt')
else:
pprint_out = None
pred = predict(model, tqdm(data), preprocessor, pair_only=pair_only)
pprint_predictions(data, pred, preprocessor, fn=pprint_out)
if out_path:
write_predictions(data, pred, out_path, pair_only=pair_only)
def pprint_predictions(data, preds, preprocessor, threshold=0.5, fn=None):
if fn:
f = open(fn, 'w')
else:
f = sys.stdout
for i, (ex, pred_list) in enumerate(zip(data, preds)):
pprint_example(ex, f=f)
new_paras, new_mentions = ex.paragraphs, ex.mentions
for j, (c, pred) in enumerate(zip(ex.triple_candidates, pred_list)):
pred_label = pred > threshold
print(' (%s, %s, %s): pred=%s (p=%.4f), gold=%s, correct=%s' % (
c.drug, c.gene, c.variant, pred_label, pred,
c.label == 1, pred_label == (c.label == 1)), file=f)
print('', file=f)
if fn:
f.close()
def write_predictions(data, preds, fn, pair_only=None):
i = 0
with open(fn, 'w') as f:
for ex, pred_list in zip(data, preds):
if pair_only:
candidates = ex.pair_candidates[pair_only]
else:
candidates = ex.triple_candidates
for c, pred in zip(candidates, pred_list):
print('%d\t%s\t%s\t%s\t%s\t%.6f' % (
i, ex.pmid, c.drug, c.gene, c.variant, pred), file=f)
i += 1
def make_vocab(train_data, entity_lists, unk_thresh):
vocab = vocabulary.Vocabulary(unk_threshold=unk_thresh)
for ents in list(entity_lists.values()):
for e in ents:
vocab.add_word_hard(e)
for ex in tqdm(train_data):
for p, m_list in zip(ex.paragraphs, ex.mentions):
in_mention = [False] * len(p)
for m in m_list:
for i in range(m.start, m.end):
in_mention[i] = True
for i, w in enumerate(p):
if not in_mention[i]:
vocab.add_word(w)
return vocab
def save_model(model, num_iters, out_dir):
fn = os.path.join(out_dir, 'model.%07d.pth' % num_iters)
torch.save(model.state_dict(), fn)
def load_model(model, load_dir, device, load_ckpt):
# if not load_ckpt:
# with open(os.path.join(load_dir, 'best_model.txt')) as f:
# load_ckpt = int(f.read().strip().split('\t')[0])
fn = os.path.join(load_dir, 'model.%07d.pth' % load_ckpt)
log('Loading model from %s' % fn)
model.load_state_dict(torch.load(fn, map_location=device))
def predict_write(model, data, preprocessor, out_dir, ckpt, data_name, pair_only):
if out_dir:
if ckpt:
out_path = os.path.join(out_dir, 'pred_%s_%07d.tsv' % (data_name, ckpt))
else:
out_path = os.path.join(out_dir, 'pred_%s.tsv' % data_name)
# Only one pprint necessary
pprint_out = os.path.join(out_dir, 'dev_pprint.txt')
else:
pprint_out = None
pred = predict(model, tqdm(data), preprocessor, pair_only=pair_only)
pprint_predictions(data, pred, preprocessor, fn=pprint_out)
if out_path:
write_predictions(data, pred, out_path, pair_only=pair_only)
def get_ds_train_dev_pmids(pmid_file):
with open(os.path.join(settings.DATA_DIR, pmid_file)) as f:
pmids = sorted([pmid.strip() for pmid in f if pmid.strip()])
random.shuffle(pmids)
num_train = int(round(len(pmids) * 0.7))
num_train_dev = int(round(len(pmids) * 0.8))
train_pmids = set(pmids[:num_train])
dev_pmids = set(pmids[num_train:num_train_dev])
return train_pmids, dev_pmids
def parse_args(args):
parser = argparse.ArgumentParser()
# Required params
# parser.add_argument('para_file', help='JSON object storing paragraph text')
# parser.add_argument('mention_file', help='List of mentions for relevant paragraphs')
parser.add_argument('--ds-train-dev-file', help='Training examples')
parser.add_argument('--jax-dev-test-file', help='Dev examples')
parser.add_argument('--init-pmid-file', default='pmid_lists/init_pmid_list.txt',
help='Dev examples')
# Model architecture
parser.add_argument('--lstm-size', '-c', default=200,
type=int, help='LSTM hidden state size.')
parser.add_argument('--lstm-layers', '-l', default=1,
type=int, help='LSTM number of layers.')
parser.add_argument('--pool', '-p', choices=['softmax', 'max', 'mean', 'sum'], default='softmax',
help='How to pool across mentions')
parser.add_argument('--no-position', action='store_true',
help='Ablate paragraph index encodings')
parser.add_argument('--no-lstm', action='store_true', help='Ablate LSTM')
# Training
parser.add_argument('--num-epochs', '-T', type=int,
default=10, help='Training epochs')
parser.add_argument('--learning-rate', '-r', type=float,
default=1e-5, help='Learning rate.')
parser.add_argument('--dropout-prob', '-d', type=float,
default=0.5, help='Dropout probability')
parser.add_argument('--lr-decay', '-g', type=float, default=1.0,
help='Decay learning rate by this much each epoch.')
parser.add_argument('--balanced', '-b', action='store_true',
help='Upweight positive examples to balance dataset')
parser.add_argument('--pos-weight', type=float, default=None,
help='Upweight postiive examples by this much')
parser.add_argument('--use-pair-loss', action='store_true',
help="Multi-task on pair objective")
# Data
#parser.add_argument('--data-cache', default=DEFAULT_CACHE)
parser.add_argument('--data-cache', default=None)
parser.add_argument('--rng-seed', default=0, type=int, help='RNG seed')
parser.add_argument('--torch-seed', default=0,
type=int, help='torch RNG seed')
parser.add_argument('--downsample-to', default=None, type=int,
help='Downsample to this many examples per split')
parser.add_argument('--unk-thresh', '-u', default=5, type=int,
help='Treat words with fewer than this many counts as <UNK>.')
parser.add_argument('--print-dev', action='store_true',
help='Test on dev data')
parser.add_argument('--jax', action='store_true', help='Test on JAX data')
parser.add_argument('--jax-out', default='pred_jax.tsv')
parser.add_argument('--text-level', choices=['document', 'paragraph', 'sentence'],
default='document', help='Split documents paragraph-wise or sentence-wise')
parser.add_argument('--pair-only', default=None,
help='Comma-separated pair of entities to focus on only')
# CPU vs. GPU
parser.add_argument('--cpu-only', action='store_true',
help='Run on CPU only')
parser.add_argument('--gpu-id', type=int, default=0,
help='GPU ID (default=0)')
# Saving and loading
parser.add_argument('--out-dir', '-o', default=None,
help='Where to write all output')
parser.add_argument('--ckpt-iters', '-i', default=10000, type=int,
help='Checkpoint after this many training steps.')
parser.add_argument(
'--load', '-L', help='Directory to load model parameters and vocabulary')
parser.add_argument('--load-ckpt', type=int, default=None,
help='Which checkpoint to use (default: use best_model.txt)')
parser.add_argument('--try-all-checkpoints', action='store_true',
help='Make predictions for every checkpoint')
parser.add_argument('--data-dir', help='root data directory')
# Other
parser.add_argument('--no-w2v', action='store_true',
help='No pre-trained word vectors')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args(args)
def get_all_checkpoints(out_dir):
fns = glob.glob(os.path.join(out_dir, 'model.*.pth'))
return sorted([int(os.path.basename(x).split('.')[1]) for x in fns])
def run(OPTS, device):
# Process pair-only mode
pair_only = None
if OPTS.pair_only:
pair_only = tuple(OPTS.pair_only.split(','))
if pair_only not in ALL_ENTITY_TYPES_PAIRS:
raise ValueError('Bad value for pair_only: %s' % OPTS.pair_only)
entity_lists = get_entity_lists()
# Read data
train_pmids_set, dev_ds_pmids_set = get_ds_train_dev_pmids(
OPTS.init_pmid_file)
ds_train_dev_data = Example.read_examples(OPTS.ds_train_dev_file)
# Filter out examples that doesn't contain pair or triple candidates
if pair_only:
ds_train_dev_data = [x for x in ds_train_dev_data if pair_only in
x.pair_candidates and x.pair_candidates[pair_only]]
else:
ds_train_dev_data = [x for x in ds_train_dev_data if x.triple_candidates]
train_data = [x for x in ds_train_dev_data if x.pmid in train_pmids_set]
dev_ds_data = [x for x in ds_train_dev_data if x.pmid in dev_ds_pmids_set]
random.shuffle(train_data)
random.shuffle(dev_ds_data)
jax_dev_test_data = Example.read_examples(OPTS.jax_dev_test_file)
if pair_only:
jax_dev_test_data = [x for x in jax_dev_test_data if pair_only in
x.pair_candidates and x.pair_candidates[pair_only]]
else:
jax_dev_test_data = [x for x in jax_dev_test_data if x.triple_candidates]
random.shuffle(jax_dev_test_data)
with open(os.path.join(settings.DATA_DIR, JAX_DEV_PMIDS_FILE)) as f:
dev_jax_pmids_set = set(x.strip() for x in f if x.strip())
with open(os.path.join(settings.DATA_DIR, JAX_TEST_PMIDS_FILE)) as f:
test_pmids_set = set(x.strip() for x in f if x.strip())
dev_jax_data = [x for x in jax_dev_test_data if x.pmid in dev_jax_pmids_set]
test_data = [x for x in jax_dev_test_data if x.pmid in test_pmids_set]
log('Read %d train, %d dev dist sup, %d dev jax, %d test examples' %
(len(train_data), len(dev_ds_data), len(dev_jax_data), len(test_data)))
vocab = make_vocab(train_data, entity_lists, OPTS.unk_thresh)
log('Vocab size = %d.' % len(vocab))
preprocessor = Preprocessor(entity_lists, vocab, device)
num_neg, num_pos = count_labels('train', train_data, preprocessor,
pair_only=pair_only)
word_vecs = init_word_vecs(device, vocab, all_zero=OPTS.load or OPTS.no_w2v)
log('Finished reading data.')
# Run model
model = BackoffModel(
word_vecs, OPTS.lstm_size, OPTS.lstm_layers, device,
use_lstm=not OPTS.no_lstm, use_position=not OPTS.no_position,
pool_method=OPTS.pool, dropout_prob=OPTS.dropout_prob,
vocab=vocab, pair_only=pair_only).to(device=device)
if OPTS.load:
load_model(model, OPTS.load, device, OPTS.load_ckpt)
if OPTS.num_epochs > 0:
log('Starting training.')
pos_weight = None
if OPTS.balanced:
pos_weight = torch.tensor(float(num_neg) / num_pos, device=device)
elif OPTS.pos_weight:
pos_weight = torch.tensor(OPTS.pos_weight, device=device)
train(model, train_data, dev_ds_data, preprocessor, OPTS.num_epochs,
OPTS.learning_rate, OPTS.ckpt_iters, OPTS.downsample_to, OPTS.out_dir,
pos_weight=pos_weight, lr_decay=OPTS.lr_decay,
use_pair_loss=OPTS.use_pair_loss, pair_only=pair_only)
log('Finished training.')
model.eval()
if OPTS.try_all_checkpoints:
ckpts = get_all_checkpoints(OPTS.out_dir)
else:
ckpts = [None]
for ckpt in ckpts:
if ckpt:
print('== Checkpoint %s == ' % ckpt, file=sys.stderr)
load_model(model, OPTS.out_dir, device, ckpt)
predict_write(model, dev_jax_data, preprocessor,
OPTS.out_dir, ckpt, 'dev', pair_only)
predict_write(model, test_data, preprocessor,
OPTS.out_dir, ckpt, 'test', pair_only)
def main(OPTS):
if OPTS.out_dir:
if os.path.exists(OPTS.out_dir):
shutil.rmtree(OPTS.out_dir)
os.makedirs(OPTS.out_dir)
global log_file
log_file = open(os.path.join(OPTS.out_dir, 'log.txt'), 'w')
log(OPTS)
random.seed(OPTS.rng_seed)
torch.manual_seed(OPTS.torch_seed)
if OPTS.cpu_only:
device = torch.device('cpu')
else:
device = torch.device('cuda:%d' % OPTS.gpu_id)
try:
run(OPTS, device)
finally:
if log_file:
log_file.close()
if __name__ == '__main__':
OPTS = parse_args(sys.argv[1:])
main(OPTS)
| acproject/GNNs | NAACL/backoffnet.py | backoffnet.py | py | 38,944 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sys.stderr",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "collections.namedtuple",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "order... |
16136952400 | from bson import ObjectId
from fastapi import HTTPException
from starlette import status
from app.api.dto.user import User
from app.repository.entity.user_entity import UserEntity
from app.repository.user_repository import UserRepository
from app.util.logger import logger
class UserDBService:
def __init__(self, user_repository: UserRepository):
self.user_repository = user_repository
def create_user(self, user_entity: UserEntity | dict) -> User:
logger.info("Creating user...")
insert_one_result = self.user_repository.create_user(user_entity)
if not insert_one_result.acknowledged:
self._throw_internal_server_error("user creation failed.")
found_users = self.user_repository.read_users(insert_one_result.inserted_id)
if len(found_users) != 1:
self._throw_internal_server_error(
f"user with id {str(insert_one_result.inserted_id)} not found."
)
user_entity = found_users[0]
return self._map_user(user_entity)
def update_user_ticket(self, user_id: str, new_ticket_id: str) -> User:
logger.info("Updating user via adding tickets...")
user_id = ObjectId(user_id)
found_users = self.user_repository.read_users(user_id)
if len(found_users) != 1:
self._throw_internal_server_error(f"user with id {str(user_id)} not found.")
user_entity = found_users[0]
ticket_ids = []
if user_entity["ticket_ids"]:
ticket_ids = user_entity["ticket_ids"]
ticket_ids.append(new_ticket_id)
user_entity["ticket_ids"] = ticket_ids
update_result = self.user_repository.update_user(user_id, user_entity)
if not update_result.acknowledged:
self._throw_internal_server_error(
f"user with id {str(user_id)} not updated."
)
return self._map_user(user_entity)
@staticmethod
def _map_user(user_entity: UserEntity) -> User:
user = User.parse_obj(user_entity)
user.id = str(user_entity["_id"])
return user
@staticmethod
def _throw_internal_server_error(message: str):
logger.error(message)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=message,
)
| amosproj/amos2023ws01-ticket-chat-ai | Backend/app/service/user_db_service.py | user_db_service.py | py | 2,331 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "app.repository.user_repository.UserRepository",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "app.repository.entity.user_entity.UserEntity",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "app.util.logger.logger.info",
"line_number": 16,
... |
23044885851 | '''
Module untuk membantu dalam 'menjawab' query/request user
Reinaldo Antolis / 13519015
Jeane Mikha / 13519116
Josep Marcello / 13519164
27 April 2021
'''
from datetime import datetime, timedelta
from matching import boyer_moore
import re
def extract_date(msg: str) -> 'list[datetime]':
'''
Fungsi untuk mengekstrak tanggal dari string. Format tanggal yang valid:
<tanggal/nomor hari><delimiter><bulan><delimiter><tahun>
delimiter valid: `-`, `/`, ` `
tanggal valid: 1 atau 01 sampai 31
bulan valid: nama-nama bulan dalam bahasa Indonesia,
nomor bulan 01 atau 1 sampai 12
tahun valid: dua digit terakhir tahun atau 4 digit (21 atau 2021)
Contoh tanggal valid:
- 28 April 2021
- 28 04 2021
- 28 04 21
- 28/04/2021
- 28/4/21
- 08/4/21
- 8/4/21
- 28-April-2021
- 28/April/2021
- 28/04-21
- 28-April/21
Parameters
----------
msg : str
string yang mau diekstrak tanggalnya
Returns
-------
list[datetime]
list of datetime berisikan semua tanggal pada string yang ditemukan
secara berurut
Throws
------
KeyError
Kalau nama bulan invalid (tidak sesuai dengan KBBI)
ValueError
Kalau format tanggal salah
Examples
--------
>>> extract_date('tanggal 27-April/2021')
[datetime.datetime(2021, 4, 27, 0, 0)]
>>> extract_date('tanggal 27/04-21')
[datetime.datetime(2021, 4, 27, 0, 0)]
>>> extract_date('tanggal 27-04 21')
[datetime.datetime(2021, 4, 27, 0, 0)]
'''
month_regex =\
r'(januari|februari|maret|april|mei|juni|juli|agustus|september|oktober|november|desember)'
regex_separator = r'(\/|-| )' # matches `/` or `-` or `space`
regex_group1 = r'\d{1,2}' + regex_separator + r'\d{1,2}' +\
regex_separator + r'\d{2,4}'
regex_group2 = r'\d{1,2}' + regex_separator + month_regex +\
regex_separator + r'\d{2,4}'
regex_all = r'(' + regex_group1 + r'|' + regex_group2 + r')'
month_no = {
'januari': '1',
'februari': '2',
'maret': '3',
'april': '4',
'mei': '5',
'juni': '6',
'juli': '7',
'agustus': '8',
'september': '9',
'oktober': '10',
'november': '11',
'desember': '12',
}
matches_dirty = re.findall(regex_all, msg, flags=re.IGNORECASE)
matches = []
for match in matches_dirty:
clean = match[0]
clean = re.findall(r'[^ /\-]+', clean)
clean = '/'.join(clean)
if re.search(regex_group2, clean, flags=re.IGNORECASE) is not None:
clean = clean.split('/')
date = clean[0]
month = month_no[clean[1].lower()]
year = clean[2]
clean = date + '/' + month + '/' + year
try:
matches.append(datetime
.strptime(clean, '%d/%m/%y'))
except ValueError:
matches.append(datetime
.strptime(clean, '%d/%m/%Y'))
return matches
def extract_jenis(msg: str, db) -> str:
'''
Fungsi untuk mendapatkan jenis tugas dari string. Mengembalikan jenis tugas
yang dimaksud user. Jika jenis tugas tidak jelas atau tidak ada, maka akan
dikembalikan string kosong.
Jenis tugas yang ada:
- uas
- uts
- praktikum (atau prak)
- tubes (atau tugas besar)
- tucil (atau tugas kecil)
- kuis (atau quiz)
Parameters
----------
msg : str
pesan/message dari user
db : firestore database
database untuk mendapatkan data
Returns
-------
str
jenis user yang sebenarnya (uts, uas, praktikum, tubes, tucil, kuis)
'''
possible_keywords = load_keywords(db)['jenis_tugas']
regex_kata_kunci = '('
i = 0
for keyword in possible_keywords:
regex_kata_kunci += keyword
regex_kata_kunci += '|' if i < len(possible_keywords) - 1 else ''
i += 1
regex_kata_kunci += ')'
regex = regex_kata_kunci + r'|(prak|tugas kecil|tugas besar|quiz)'
try:
match = re.findall(regex, msg, flags=re.IGNORECASE)[0]
if len(match[0]) == 0:
if match[1] == 'prak':
match = 'praktikum'
elif match[1] == 'quiz':
match = 'kuis'
elif match[1] == 'tugas kecil':
match = 'tucil'
elif match[1] == 'tugas besar':
match = 'tubes'
else:
match = ''
else:
match = match[0]
except IndexError:
match = ''
return match
def extract_course_id(msg: str) -> str:
'''
Fungsi untuk mendapatkan kode mata kuliah
Parameters
----------
msg : str
pesan/message dari user
Returns
-------
str
kode mata kuliah (ITB)
'''
matches = re.findall(r'[a-zA-Z]{2}\d{4}', msg, flags=re.IGNORECASE)
try:
res = matches[0]
except IndexError:
res = None
return res.upper()
def extract_topic(msg: str) -> str:
'''
Fungsi untuk mencari topic yang terletak
antara tanda kutip
Parameters
----------
msg : str
pesan/message dari user
Returns
-------
str
topic
Throws
------
ValueError
Kalau message tidak memiliki kode mata kuliah
'''
try:
topic = re.findall(r'"[\w\s:\',.?!><\]\[\}\{=+\-\)\(;]+"', msg)
res = re.sub(r'"', '', topic[0])
except IndexError:
res = None
return res
def load_keywords(db) -> 'dict[str, list[str]]':
'''
Fungsi untuk loading keywords dari database
Parameters
----------
db : firestore database
database untuk mendapatkan data
Returns
-------
dict[str, list[str]]
dictionary dengan key adalah jenis keyword dan value adalah array
of string untuk keyword-nya
'''
# load keyword untuk jenis tugas
jenis_tugas_ref = db.collection(u'keywords').document(u'keywords')
keywords = jenis_tugas_ref.get().to_dict()
return keywords
def lihat_tugas(msg: str, db) -> str:
'''
Fungsi untuk mendapatkan list tugas dari database. Bisa dengan periode
tertentu.
Bisa dengan durasi (inklusif), seperti:
<dari|antara> <tanggal 1> <hingga|sampai> <tanggal 2>
Bisa juga dari sekarang sampai jangka waktu tertentu, seperti:
<n> <hari|minggu|bulan|tahun> <ke depan|berikutnya|lagi>
Apa bila bentuk query/msg adalah:
`deadline apa saja dari 24/04/2021 sampai 30/04/2021 3 minggu ke depan?`
atau
`deadline apa saja 3 minggu ke depan dari 24/04/2021 sampai 30/04/2021?`
maka yang hanya akan ditunjukkan adalah deadline dari atnggal 24 April 21
sampai 30 April 2021 (inklusif)
Parameters
----------
msg : str
message dari user
db : firestore database
database untuk mendapatkan data
Returns
-------
str
balasan dari bot
Throws
------
KeyError
Kalau nama bulan pada msg invalid (tidak sesuai dengan KBBI)
ValueError
Kalau format tanggal pada msg salah
'''
tugas_ref = db.collection(u'tugas')
all_tugas = tugas_ref.stream()
res = "[Daftar tugas IF'19]\n"
# ngertiin message user
trigger_tanggal_satuan = [
'ke depan',
'berikutnya',
'lagi',
'selanjutnya',
]
trigger_tanggal_range_dari = [
'dari',
'antara',
]
trigger_tanggal_range_sampai = [
'hingga',
'sampai',
]
pake_tanggal_range = False
pake_tanggal_satuan = False
# Cek user-nya mau deadline pada periode tertentu atau nggak
found = False
idx_keyword_tanggal_range_dari = -1
for trigger_dari in trigger_tanggal_range_dari:
idx_keyword_tanggal_range_dari =\
boyer_moore(text=msg, pattern=trigger_dari)
found = idx_keyword_tanggal_range_dari != -1
if found:
break
found = False
idx_keyword_tanggal_range_sampai = -1
for trigger_sampai in trigger_tanggal_range_sampai:
idx_keyword_tanggal_range_sampai =\
boyer_moore(text=msg, pattern=trigger_sampai)
found = idx_keyword_tanggal_range_sampai != -1
if found:
break
pake_tanggal_range =\
idx_keyword_tanggal_range_dari != -1\
and idx_keyword_tanggal_range_sampai != -1\
and idx_keyword_tanggal_range_dari <= idx_keyword_tanggal_range_sampai\
if not pake_tanggal_range:
for trigger in trigger_tanggal_satuan:
if boyer_moore(text=msg, pattern=trigger) != -1:
pake_tanggal_satuan = True
trigger_periode = [
'hari',
'minggu',
'bulan',
'tahun',
]
for trigger in trigger_periode:
idx_periode = boyer_moore(text=msg, pattern=trigger)
if idx_periode != -1:
periode = trigger
break
if idx_periode == -1:
return 'Durasi waktu kamu salah'
try:
durasi = int(re.findall(r'\d+', msg[:idx_periode])[0])
except IndexError:
return 'Durasi waktu kamu salah'
if periode == 'minggu':
durasi *= 7
elif periode == 'bulan':
durasi *= 30
elif periode == 'tahun':
durasi *= 365
else:
try:
date1, date2, *_ = extract_date(msg)
if date1 > date2:
return 'Jarak tanggal kamu salah'
except ValueError:
return 'Penulisan tanggal kamu salah'
# Tentuin user-nya mau jenis task (tugas) tertentu atau nggak
jenis_tugas_permintaan = extract_jenis(msg, db)
i = 1
for tugas in all_tugas:
tugas_dict = tugas.to_dict()
if len(jenis_tugas_permintaan) != 0\
and tugas_dict['jenis'] != jenis_tugas_permintaan:
continue
# bikin deadline
deadline = tugas_dict['deadline']
year, month, day, hour, minute, second =\
deadline.year,\
deadline.month,\
deadline.day,\
deadline.hour,\
deadline.minute,\
deadline.second
deadline = datetime(year, month, day, hour, minute, second)
deadline_str = deadline.strftime('%Y-%m-%d')
# cek tanggal permintaan user
# print(date1, deadline, date2)
if pake_tanggal_satuan:
# Tetep tunjukin tugas yang udah lewat deadline
print(durasi)
now = datetime.now().replace(hour=23, minute=59, second=59, microsecond=0)
if deadline > now + timedelta(days=durasi):
continue
elif pake_tanggal_range:
if deadline < date1 or deadline > date2:
# Deadline di luar permintaan user
continue
if tugas_dict['jenis'] == 'tubes':
jenis = 'tugas besar'
elif tugas_dict['jenis'] == 'tucil':
jenis = 'tugas kecil'
else:
jenis = tugas_dict['jenis']
space_4 = ' '
res += f'{i}. ID: {tugas.id}'
res += f'\n{space_4}Matkul: {tugas_dict["id_matkul"]}'
res += f'\n{space_4}Deadline (yyyy-mm-dd): {deadline_str}'
res += f'\n{space_4}{jenis}: {tugas_dict["topik"]}'
res += '\n\n'
i += 1
if i == 1:
res = 'Ga ada deadline yang akan datang\n'
return res[:-1]
def lihat_deadline(msg: str, db) -> str:
'''
Fungsi untuk mendapatkan deadline tugas-tugas dari sebuah matkul
Parameters
----------
msg : str
message dari user
db : firestore database
database untuk mendapatkan data
Returns
-------
str
balasan dari bot
Throws
------
KeyError
Kalau nama bulan pada msg invalid (tidak sesuai dengan KBBI)
ValueError
Kalau format tanggal pada msg salah
'''
id_matkul_request = extract_course_id(msg)
if id_matkul_request is None:
return 'ID Matkul ga ada atau salah'
jenis_tugas_request = extract_jenis(msg, db)
if jenis_tugas_request == '':
return 'Jenis tugas salah'
ret = f'Deadline {jenis_tugas_request} {id_matkul_request}:\n'
tugas_ref = db.collection(u'tugas')
all_tugas = tugas_ref.stream()
i = 1
for tugas in all_tugas:
tugas = tugas.to_dict()
if tugas['jenis'] != jenis_tugas_request\
or tugas['id_matkul'].lower() != id_matkul_request.lower():
continue
deadline_dirty = tugas['deadline']
deadline = datetime(
day=deadline_dirty.day,
month=deadline_dirty.month,
year=deadline_dirty.year
).strftime('%Y-%m-%d')
ret += f'{i}. {deadline}\n'
i += 1
if i == 1:
ret = f'Tidak ada deadline untuk {jenis_tugas_request} matkul {id_matkul_request}.'
return ret
def tambah_tugas(msg: str, db) -> str:
'''
Fungsi untuk menambahkan list tugas ke database
Parameters
----------
msg : str
message dari user
db : firestore database
database untuk mendapatkan data
Returns
-------
str
balasan dari bot
ID, tanggal, kode mata kuliah, jenis tugas, topik tugas
Throws
------
ValueError
Jika msg kurang 1 atau lebih komponen
(tanggal, kode mata kuliah, jenis, topik tugas)
'''
date = extract_date(msg)[0]
course_id = extract_course_id(msg)
jenis = extract_jenis(msg, db)
topic = extract_topic(msg)
if course_id is None or jenis == '' or topic is None:
raise ValueError(f'{"ID Matkul" if course_id is None else "Jenis tugas" if jenis == "" else "Topik tugas"} salah')
tanggal = date.strftime('%Y-%m-%d')
data = {
u'deadline': date,
u'id_matkul': course_id,
u'jenis': jenis,
u'topik': topic
}
tugas_ref = db.collection(u'tugas')
all_tugas = tugas_ref.stream()
id_task = '1'
for tugas in all_tugas:
id_task = str(int(tugas.id) + 1)
ret = '[Task berhasil dicatat]'
ret += f'\nID: {id_task}'
ret += f'\nMatkul: {course_id}'
ret += f'\nDeadline (yyyy/mm/dd): {tanggal}'
ret += f'\nJenis: {jenis}'
ret += f'\nTopik: {topic}'
db.collection(u'tugas').document(id_task).set(data)
return ret
def easter_egg():
try:
with open('view/public/copypasta.txt', 'r') as f:
msg = f.read()
except Exception:
msg = '┻━┻ ︵ \\\(\'0\')// ︵ ┻━┻ FLIP ALL THE TABLES'
return msg
def update_tugas(msg: str, db) -> str:
task_id = extract_task_id(msg)
date_list = extract_date(msg)
print(task_id, date_list)
# Kasus tidak dituliskan ID dari tugas yang ingin diundur deadlinenya
if task_id is None or len(date_list) == 0:
return f'{"ID Tugas" if task_id is None else "Tanggal"} kamu salah'
date = date_list[0]
all_tugas_ref = db.collection(u'tugas')
all_tugas = all_tugas_ref.stream()
tugas_found = False
for tugas in all_tugas:
if task_id == tugas.id:
tugas.reference.update({u'deadline': date})
tugas_found = True
break
if tugas_found:
res = "Deadline tugas " + task_id + " berhasil diundur"
else:
res = "Tugas " + task_id + " tidak terdapat dalam daftar tugas"
# TODO: Write ke firebase db
return res
def clear_tugas(msg: str, db) -> str:
task_id = extract_task_id(msg)
# Kasus tidak dituliskan ID dari tugas yang ingin ditandai selesai
if task_id is None:
return 'ID tugas kamu salah'
all_tugas_ref = db.collection(u'tugas')
all_tugas = all_tugas_ref.stream()
tugas_found = False
for tugas in all_tugas:
if task_id == tugas.id:
tugas.reference.delete()
tugas_found = True
break
if tugas_found:
res = "Tugas " + task_id + " berhasil ditandai selesai"
else:
res = "Tugas " + task_id + " tidak terdapat dalam daftar tugas"
# TODO: Write ke firebase db
return res
def extract_task_id(msg: str) -> str:
matches = re.findall(r'\d+', msg, flags=re.IGNORECASE)
try:
res = matches[0]
except IndexError:
res = None
return res
def handle_bingung():
return 'Maaf, aku ga paham kamu ngomong apa 😵'
def help_msg(db) -> str:
'''
Membuat fungsi untuk help message
Parameters
----------
db : firestore database
database untuk mendapatkan data
Returns
-------
str
help message
'''
keywords = load_keywords(db)
jenis_tugas = keywords['jenis_tugas']
ret = 'It\'s dangerous to go alone! Take this.\n'
ret += '\n'
ret += '[Fitur]\n'
ret += '1. Menambahkan tasks baru.\n'
ret += f'Kata kunci: {keywords["tambah_task"]}\n'
ret += '2. Melihat daftar tasks yang harus dikerjakan.\n'
ret += f'Kata kunci: {keywords["lihat_task"]}\n'
ret += '3. Menampilkan deadline dari suatu task tertentu.\n'
ret += f'Kata kunci: {keywords["lihat_deadline"]}\n'
ret += '4. Memperbarui task tertentu.\n'
ret += f'Kata kunci: {keywords["update_task"]}\n'
ret += '5. Menandai bahwa suatu task sudah selesai dikerjakan.\n'
ret += f'Kata kunci: {keywords["nandain_task_selesai"]}\n'
ret += '6. Menampilkan opsi help yang difasilitasi oleh assistant.\n'
ret += f'Kata kunci: {keywords["lihat_help"]}\n'
ret += '7. Mendefinisikan list kata penting terkait tugas.\n'
ret += '8. Menampilkan pesan error ketika tidak mengenali query user\n'
ret += '\n'
ret += '[Kata kunci tugas]\n'
i = 1
for jenis in jenis_tugas:
ret += f'{i}. {jenis}\n'
i += 1
return ret
if __name__ == '__main__':
coba = 'Hari ini tanggal 27-April/2021 : 27 04/2021 : 27/04-2021'
print(extract_date(coba))
| jspmarc/BotWangy | src/response.py | response.py | py | 18,205 | python | id | code | 0 | github-code | 1 | [
{
"api_name": "re.findall",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number... |
15226484150 | import time
import os, sys
import traceback
import socket
import hmac
import hashlib
import mensagem_pb2
import threading
from random import randint
import logging
logging.basicConfig(level=logging.INFO, format='%(levelname)s:%(threadName)s:%(message)s')
from random import (
choice, randint
)
from string import (
ascii_uppercase, ascii_letters, digits
)
from communication import (
send_message, recv_message, SocketReadError
)
def recebe_mensagem_do_cliente(cliente, endereco):
while True:
try:
mensagem = mensagem_pb2.Mensagem()
recebe_mensagem = recv_message(cliente)
mensagem.ParseFromString(recebe_mensagem)
if not mensagem:
raise error('Erro de comunicacao')
logging.info("[received] sender name: %s, receiver name: %s, message type: %s and thread ID: %s", mensagem.sender_name, mensagem.receiver_name, mensagem_pb2.Mensagem.Msg_type.Name(mensagem.msg_type), str(mensagem.thread_id))
mensagem.dados = os.urandom(32).encode("hex")
mensagem.msg_type = 1
mensagem.thread_id = threading.current_thread().ident
hmac_maker = hmac.new('chave-secreta-2018', '', hashlib.sha512)
hmac_maker.update(str(mensagem.id_cliente))
hmac_maker.update(mensagem.dados)
mensagem.hmac = hmac_maker.hexdigest()
logging.info("[sending] sender name: %s, receiver name: %s, message type: %s and thread ID: %s", mensagem.sender_name, mensagem.receiver_name, mensagem_pb2.Mensagem.Msg_type.Name(mensagem.msg_type), str(mensagem.thread_id))
send_message(cliente, mensagem)
except (SocketReadError):
cliente.close()
return False
except:
traceback.print_exc()
if __name__ == "__main__":
PORTA = 5001
try:
s_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s_socket.bind(("0.0.0.0", PORTA))
s_socket.listen(10)
logging.info ("Servidor iniciado na porta %s", str(PORTA))
while True:
(cliente, endereco) = s_socket.accept()
logging.info ("Cliente (%s, %s) conectado" % endereco)
threading.Thread(target = recebe_mensagem_do_cliente,args = (cliente,endereco)).start()
s_socket.close()
except (KeyboardInterrupt, SystemExit):
logging.info("Finalizando a execucacao ...")
pass
except:
traceback.print_exc()
| VictorSCosta/Python-Protobuf-example-client-server | servidor.py | servidor.py | py | 2,582 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "mensagem_pb2.Mensagem",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "communica... |
42272258033 | from astropy.utils.data import get_pkg_data_filename
from ..catalogues import Catalogue
from ..lr import LRMatch
def set_catalogues():
mocfile = get_pkg_data_filename('data/testcat_moc_1.moc')
pcat_datafile = get_pkg_data_filename('data/testcat_moc_1.fits')
pcat = Catalogue(pcat_datafile, area=mocfile, name='pcat')
scat_datafile = get_pkg_data_filename('data/testcat_3.fits')
scat = Catalogue(scat_datafile, area=mocfile, name='scat',
coord_cols=['RA', 'DEC'], poserr_cols=['raErr', 'decErr'],
poserr_type='rcd_dec_ellipse', mag_cols=['uMag', 'gMag'])
scat_poserr_circle = scat.poserr.transform_to(errtype='circle')
scat.poserr = scat_poserr_circle
return pcat, scat
def test_lr_rndprior():
pcat, scat = set_catalogues()
xm = LRMatch(pcat, scat)
match = xm.run(prior_method='random')
assert len(match) >= len(pcat)
assert all(match['prob_has_match'] >= 0)
assert all(match['prob_has_match'] <= 1)
assert all(match['prob_this_match'] >= 0)
assert all(match['prob_this_match'] <= 1)
match_mask = ~match['LR_BEST'].mask
assert all(match['LR_BEST'][match_mask] >= 0)
assert all(match['Separation_pcat_scat'][match_mask] >= 0)
def test_lr_maskprior():
pcat, scat = set_catalogues()
xm = LRMatch(pcat, scat)
match = xm.run(prior_method='mask')
assert len(match) >= len(pcat)
assert all(match['prob_has_match'] >= 0)
assert all(match['prob_has_match'] <= 1)
assert all(match['prob_this_match'] >= 0)
assert all(match['prob_this_match'] <= 1)
match_mask = ~match['LR_BEST'].mask
assert all(match['LR_BEST'][match_mask] >= 0)
assert all(match['Separation_pcat_scat'][match_mask] >= 0)
| ruizca/astromatch | astromatch/tests/test_lr.py | test_lr.py | py | 1,769 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "astropy.utils.data.get_pkg_data_filename",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "astropy.utils.data.get_pkg_data_filename",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "catalogues.Catalogue",
"line_number": 11,
"usage_type": ... |
40818797234 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import time
driver=webdriver.Chrome()
driver.implicitly_wait(5)
driver.maximize_window()
driver .get ("https://tr.wikipedia.org/wiki/Anasayfa")
seçkin_madde_alanı=driver.find_element(By.ID,"mp-tfa")
metin_yazısı=seçkin_madde_alanı.text
print(metin_yazısı)
metin_yazısı=metin_yazısı.split(",")[0]
print(metin_yazısı)
kaliteli_madde= driver.find_element(By.ID,"mf-tfp").text
kaliteli_madde=kaliteli_madde.split(",")[0]
print(kaliteli_madde)
time.sleep(3)
driver.quit() | htcAK/selen-um | çalışma_sayfam.py | çalışma_sayfam.py | py | 608 | python | tr | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 11,
"usage_type": "attribute"
},
{
... |
3581159598 | from flask import Flask, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
#for rendering templates with flask
#First letter must be uppercase
app = Flask(__name__) #The name of the application name or package
#app.config['SQLALCHEMY_DATABASE_URI']= 'postgresql+psycopg2://postgres:Alucinante123*@localhost/quotes'
#^^For connection to local server
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://xnfwvewfmxmznu:ac00ef66979ddf2b96d08da121684bedf5c51e5c6a81595df1c6f8bf23c0d6fe@ec2-52-72-252-211.compute-1.amazonaws.com:5432/ddrvi3khou5a3'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
#^^Event notification in SQLALchemy to track modifications
db = SQLAlchemy(app)
class Favquotes(db.Model):
#Defines class for db and it's data validations
id = db.Column(db.Integer, primary_key = True)
author = db.Column(db.String(30))
quote = db.Column(db.String(2000))
# End points
@app.route('/')
def index():
result = Favquotes.query.all() #Queries all records inside this table
return render_template('index.html', result = result)
# You can inject variables into a html
@app.route('/quotes')
def quotes():
return render_template('quotes.html')
@app.route('/process', methods = ['POST']) #We have to set the type request in order to save info
def process():
#create variables to capture data
author = request.form['author'] #captures the answers of the form
quote = request.form['quote']
quotedata = Favquotes(author = author, quote = quote) #saves parameters from form
db.session.add(quotedata) #adds data to the session of the db
db.session.commit() #saves changes to the database
return redirect(url_for('index'))
#Rendering templates. Flask looks for them in a folder named tamplates | kevinnarvaes/fav-quotes-flaskapp | quotes.py | quotes.py | py | 1,787 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.re... |
27823076441 | import cv2
class ShapeDetection():
def __init__(self):
self.corners = []
self.is_displaying = False
"""
Trouve les contours du plus grand quadrilatère sur l'image envoyée
"""
def detect_from_picture(self, img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contour_size = []
# Detection de la plus grosse forme
for contour in contours:
contour_size.append(cv2.contourArea(contour))
if len(contour_size)>0:
index = contour_size.index(max(contour_size))
approx = cv2.approxPolyDP(contours[index], 0.01 * cv2.arcLength(contours[index], True), True)
corner = approx.ravel()
i = 0
##Detection des coordonnees du contour
for j in corner:
if i % 2 == 0:
x = corner[i]
y = corner[i + 1]
self.corners.append((x, y))
if (self.is_displaying):
string = str(x) + " " + str(y)
if (i != 0):
cv2.putText(img, string, (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
i = i + 1
if (self.is_displaying):
while 1:
cv2.imshow('Flux Camera', img)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
return self.corners
| GregoryMoutote/issou_project | Calibration/ShapeDetection.py | ShapeDetection.py | py | 1,667 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.cvtColor",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY"... |
20994349960 | import inspect
import tokenize
from indenter import Indenter
import pylatex
from .utilities import is_list_or_set
from .traits import TraitRegistry, Trait
newlines = "\n\n"
def content_report(fidia_trait_registry):
# type: (TraitRegistry) -> str
assert isinstance(fidia_trait_registry, TraitRegistry)
latex_lines = [
r"""\documentclass{awg_report}
\author{Andy Green}
\title{SAMI Traits}
\usepackage{hyperref}
\usepackage{listings}
\lstset{% general command to set parameter(s)
basicstyle=\ttfamily\scriptsize,
showstringspaces=false,
numbers=left, numberstyle=\tiny, stepnumber=5, numbersep=5pt,
breaklines=true,
postbreak=\raisebox{0ex}[0ex][0ex]{\ensuremath{\color{red}\hookrightarrow\space}}}
\lstset{language=Python}
\usepackage{minted}
\begin{document}
\maketitle
"""
]
latex_lines.extend(trait_report(fidia_trait_registry))
latex_lines.append("\\end{document}")
return latex_lines
def schema_hierarchy3(fidia_trait_registry):
# type: (TraitRegistry) -> str
"""Create a diagram showing the hierarchy of a a FIDIA Plugin.
This produces the best output. -- AWG (Jan 2017)
"""
assert isinstance(fidia_trait_registry, TraitRegistry)
nodes = dict()
sames = dict()
links = []
def do_level(trait_registry, level, parent):
if level not in sames:
sames[level] = []
for trait in trait_registry._registry:
trait_classname = trait.__name__
keys = []
for tk in trait_registry._trait_lookup:
if trait_registry._trait_lookup[tk] is trait:
keys.append(str(tk))
properties = []
for tp in trait._trait_properties(include_hidden=True):
properties.append(tp.name + ": " + tp.type)
label = "{" + trait_classname + "|" + "\\l".join(keys) + "|" + "\\l".join(properties) + "}"
nodes[trait_classname] = label
sames[level].append(trait_classname)
if parent is not None:
links.append((parent, trait_classname))
if trait.sub_traits is not None and len(trait.sub_traits._registry) > 0:
do_level(trait.sub_traits, level + 1, trait_classname)
do_level(fidia_trait_registry, 1, None)
output = 'digraph "classes_sami_fidia" {\ncharset="utf-8"\nrankdir=TB\n'
for trait in nodes:
output += '{id:30s} [label="{label}", shape="record"]; \n'.format(
id='"' + trait + '"', label=nodes[trait]
)
for link in links:
output += '"{left}" -> "{right}" [arrowhead="empty", arrowtail="none"];\n'.format(
left=link[0], right=link[1]
)
for level in sames:
output += '{{rank=same; "{nodes}" }}\n'.format(nodes='"; "'.join(sames[level]))
output += "}\n"
return output
def schema_hierarchy(fidia_trait_registry):
# type: (TraitRegistry) -> str
"""Create a diagram showing the hierarchy of a a FIDIA Trait Registry."""
assert isinstance(fidia_trait_registry, TraitRegistry)
schema = fidia_trait_registry.schema(include_subtraits=True, data_class='all',
combine_levels=('branch_version', ),
verbosity='data_only')
from graphviz import Digraph
graph = Digraph('FIDIA Data Model', filename='tmp.gv')
graph.body.append('size="12,6"')
# graph.node_attr.update(color='lightblue2', style='filled')
graph.node("Archive")
def graph_from_schema(schema, top, branch_versions=False):
schema_type = schema
for trait_type in schema_type:
schema_qualifier = schema_type[trait_type]
for trait_qualifier in schema_qualifier:
if trait_qualifier:
trait_name = trait_type + "-" + trait_qualifier
else:
trait_name = trait_type
node_text = "<<TABLE BORDER=\"1\" CELLBORDER=\"0\" CELLSPACING=\"0\">"
node_text += "<TR><TD><B>{label}</B></TD></TR>".format(
label=trait_name
)
for trait_property in schema_qualifier[trait_qualifier]['trait_properties']:
node_text += "<TR><TD PORT=\"{port}\">{label}</TD></TR>".format(
port=top + trait_name + trait_property,
label=trait_property
)
node_text += "</TABLE>>"
graph.node(top + "+" + trait_name, node_text, shape='none')
graph.edge(top, top + "+" + trait_name)
sub_trait_schema = schema_qualifier[trait_qualifier]['sub_traits']
if len(sub_trait_schema) > 0:
graph_from_schema(sub_trait_schema, top + "+" + trait_name)
# if branch_versions:
# schema_branch = schema_qualifier[trait_qualifier]['branches']
# for branch in schema_branch:
# schema_version = schema_branch['versions']
# for version in schema_version:
# pass
graph_from_schema(schema, "Archive")
# graph.render("out.pdf")
return graph.source
def schema_hierarchy_tikz(fidia_trait_registry):
# type: (TraitRegistry) -> str
"""Create a diagram showing the hierarchy of a a FIDIA Trait Registry."""
assert isinstance(fidia_trait_registry, TraitRegistry)
schema = fidia_trait_registry.schema(include_subtraits=True, data_class='all',
combine_levels=('branch_version', ),
verbosity='data_only')
latex_lines = r"""
\documentclass{standalone}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{tikz-qtree}
\usetikzlibrary{shadows,trees}
\begin{document}
\tikzset{font=\small,
edge from parent fork down,
level distance=1.75cm,
every node/.style=
{anchor=north,
rectangle,rounded corners,
minimum height=8mm,
draw=blue!75,
very thick,
align=center
},
edge from parent/.style=
{draw=blue!50,
thick
}}
\centering
\begin{tikzpicture}
\Tree """
class TikZTree:
leaf_close = " ]\n"
def __init__(self, name, **kwargs):
self._latex = ""
self.add_leaf(name, **kwargs)
# Delete the closing off of the leaf to cause it to be the start of a new tree
self._latex = self._latex[:-len(self.leaf_close)]
self._latex += "\n"
self._ready_for_export = False
def add_leaf(self, name, escape=True, as_node=None):
if escape:
escape = pylatex.utils.escape_latex
else:
escape = lambda x: x
if is_list_or_set(name):
proc_name = "\\\\".join(map(escape, name))
else:
proc_name = escape(name)
if as_node:
self._latex += "[ .\\node[" + as_node + "]{" + proc_name + "};" + self.leaf_close
else:
self._latex += "[ .{" + proc_name + "}" + self.leaf_close
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._latex += "]\n"
self._ready_for_export = True
def get_tex(self):
if self._ready_for_export:
ind = Indenter(enforce_spacing=False)
ind.add_many_lines(self._latex)
return ind.code
else:
raise Exception("TikZTree instance incomplete: cannot create latex.")
def add_sub_tree(self, sub_tree):
assert isinstance(sub_tree, TikZTree)
self._latex += sub_tree.get_tex()
texescape = pylatex.utils.escape_latex
def graph_from_schema(schema, top, branch_versions=False):
with TikZTree(top) as ttree:
schema_type = schema
for trait_type in schema_type:
schema_qualifier = schema_type[trait_type]
for trait_qualifier in schema_qualifier:
if trait_qualifier:
trait_name = trait_type + "-" + trait_qualifier
else:
trait_name = trait_type
trait_text = "\\textbf{" + texescape(trait_name) + "}"
for trait_property in schema_qualifier[trait_qualifier]['trait_properties']:
trait_text += "\\\\" + texescape(trait_property)
sub_trait_schema = schema_qualifier[trait_qualifier]['sub_traits']
if len(sub_trait_schema) > 0:
with TikZTree(trait_text, escape=False, as_node="anchor=north") as trait_tree:
sub_trait_tree = graph_from_schema(sub_trait_schema, trait_name)
trait_tree.add_sub_tree(sub_trait_tree)
ttree.add_sub_tree(trait_tree)
else:
if isinstance(trait_name, str) and is_list_or_set(trait_name):
raise Exception()
else:
ttree.add_leaf(trait_text, escape=False, as_node="anchor=north")
# if branch_versions:
# schema_branch = schema_qualifier[trait_qualifier]['branches']
# for branch in schema_branch:
# schema_version = schema_branch['versions']
# for version in schema_version:
# pass
return ttree
tree = graph_from_schema(schema, "Archive")
latex_lines += tree.get_tex()
latex_lines += r"""\end{tikzpicture}
\end{document}"""
return latex_lines
def trait_report(fidia_trait_registry):
# type: (TraitRegistry) -> str
assert isinstance(fidia_trait_registry, TraitRegistry)
latex_lines = []
additional_traits = []
# Iterate over all Traits in the Registry:
for trait_type in fidia_trait_registry.get_trait_types():
for trait_class in fidia_trait_registry.get_trait_classes(trait_type_filter=trait_type):
assert issubclass(trait_class, Trait)
latex_lines.append(newlines + r"\section{Trait Class: %s}" % pylatex.utils.escape_latex(trait_class.__name__))
latex_lines.append(r"\label{sec:%s}" % (trait_class.__name__.replace("_", "-")))
latex_lines.append(newlines + r"\subsection{Trait Keys Included}")
tk_list = []
all_keys = fidia_trait_registry.get_all_traitkeys(trait_type_filter=trait_type)
assert len(all_keys) > 0
for tk in all_keys:
class_for_key = fidia_trait_registry.retrieve_with_key(tk)
assert issubclass(class_for_key, Trait)
if class_for_key is trait_class:
tk_list.append(tk)
latex_lines.extend(latex_format_trait_key_table(tk_list))
if trait_class.init is not Trait.init:
latex_lines.append(newlines + r"\subsection{Init Code}")
latex_lines.extend(latex_format_code_for_object(trait_class.init))
latex_lines.append(newlines + r"\subsection{Trait Properties}")
latex_lines.extend(trait_property_report(trait_class))
if hasattr(trait_class, 'sub_traits'):
assert isinstance(trait_class.sub_traits, TraitRegistry)
all_sub_traits = trait_class.sub_traits.get_trait_classes()
if len(all_sub_traits) > 0:
latex_lines.append(newlines + r"\subsection{Sub traits}")
latex_lines.append(newlines + r"\begin{itemize}")
for sub_trait in all_sub_traits:
additional_traits.append(sub_trait)
latex_lines.append("\\item \\hyperref[{ref}]{{{text}}}".format(
ref=sub_trait.__name__.replace("_", "-"),
text=pylatex.utils.escape_latex(trait_class.__name__)
))
latex_lines.append(r"\end{itemize}")
assert isinstance(latex_lines, list)
return latex_lines
def trait_property_report(trait):
# type: (Trait) -> str
assert issubclass(trait, Trait)
latex_lines = []
for trait_property_name in trait.trait_property_dir():
trait_property = getattr(trait, trait_property_name)
latex_lines.append(newlines + r"\subsubsection{Trait Property: %s}" % pylatex.utils.escape_latex(trait_property_name))
# source_lines = inspect.getsourcelines(trait_property.fload)[0]
latex_lines.extend(latex_format_code_for_object(trait_property.fload))
assert isinstance(latex_lines, list)
return latex_lines
def latex_format_trait_key_table(trait_key_list):
latex_lines = [
newlines + r"\begin{tabular}{llll}",
r"Type & Qualifier & Branch & Version \\"
]
for tk in trait_key_list:
latex_lines.append(r"{type} & {qual} & {branch} & {version} \\".format(
type=pylatex.utils.escape_latex(tk.trait_type),
qual=pylatex.utils.escape_latex(tk.trait_qualifier),
branch=pylatex.utils.escape_latex(tk.branch),
version=pylatex.utils.escape_latex(tk.version)))
latex_lines.append(r"\end{tabular}")
assert isinstance(latex_lines, list)
return latex_lines
def latex_format_code_for_object(obj, package='listings'):
# type: (str) -> str
# prev_toktype = token.INDENT
# first_line = None
# last_lineno = -1
# last_col = 0
#
# tokgen = tokenize.generate_tokens(python_code)
# for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen:
# if 0: # Change to if 1 to see the tokens fly by.
# print("%10s %-14s %-20r %r" % (
# tokenize.tok_name.get(toktype, toktype),
# "%d.%d-%d.%d" % (slineno, scol, elineno, ecol),
# ttext, ltext
# ))
# if slineno > last_lineno:
# last_col = 0
# if scol > last_col:
# mod.write(" " * (scol - last_col))
# if toktype == token.STRING and prev_toktype == token.INDENT:
# # Docstring
# mod.write("#--")
# elif toktype == tokenize.COMMENT:
# # Comment
# mod.write("##\n")
# else:
# mod.write(ttext)
# prev_toktype = toktype
# last_col = ecol
# last_lineno = elineno
python_code = inspect.getsourcelines(obj)[0]
if obj.__doc__:
code_string = "".join(python_code)
code_string.replace(obj.__doc__, "")
python_code = code_string.splitlines()
if package == 'minted':
latex_lines = [newlines + r"\begin{minted}[linenos,fontsize=\small]{python}"]
else:
latex_lines = [newlines + r"\begin{lstlisting}"]
for line in python_code:
latex_lines.append(line.strip("\n"))
if package == 'minted':
latex_lines.append(r"\end{minted}")
else:
latex_lines.append(r"\end{lstlisting}")
assert isinstance(latex_lines, list)
return latex_lines | astrogreen/fidia | fidia/reports.py | reports.py | py | 15,595 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "traits.TraitRegistry",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "traits.TraitRegistry",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "traits.TraitRegistry",
"line_number": 118,
"usage_type": "argument"
},
{
"api_n... |
73111180834 | # -*- coding: utf-8 -*-
import scrapy
import time
from scrapy.http import Request
from loguru import logger
from SafetyInformation.items import SafeInfoItem
from SafetyInformation.settings import SLEEP_TIME, TOTAL_PAGES
class SecUnSpider(scrapy.Spider):
name = 'sec_un'
allowed_domains = ['sec-un.org']
start_urls = ['https://www.sec-un.org/all-posts/']
page = 1
headers = {
'Referer': 'https://www.sec-un.org/all-posts/',
'Host': 'www.sec-un.org'
}
source = 'https://www.sec-un.org'
def parse(self, response):
logger.info("==========当前正在抓取第{}页==========".format(self.page))
item = SafeInfoItem()
info_list = response.xpath('//div[@class="elementor-widget-container"]/div[contains(@class,"elementor-posts--skin-classic")]/article')
for info in info_list:
title = info.xpath('./div/h3/a/text()').extract_first('').strip()
link = info.xpath('./div/h3/a/@href').extract_first('')
author = info.xpath('./div/div[@class="elementor-post__meta-data"]/span[@class="elementor-post-author"]/text()').extract_first('').strip()
date = info.xpath('./div/div[@class="elementor-post__meta-data"]/span[@class="elementor-post-date"]/text()').extract_first('').strip()
source = self.source
info_type = 'news'
intro = info.xpath('./div/div[@class="elementor-post__excerpt"]/p/text()').extract_first('')
item['title'] = title
item['link'] = link
item['author'] = author
item['date'] = date
item['source'] = source
item['type'] = info_type
item['intro'] = intro
logger.info(item)
yield item
time.sleep(SLEEP_TIME)
self.page += 1
next_url = 'https://www.sec-un.org/all-posts/page/{}/'.format(self.page)
if self.page <= TOTAL_PAGES:
yield Request(url=next_url, headers=self.headers, callback=self.parse)
| Silentsoul04/SafetyInformation | SafetyInformation/spiders/sec_un.py | sec_un.py | py | 2,029 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "loguru.logger.info",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "SafetyInformation... |
41032364306 | import os
import jinja2
import yaml
from pathlib import Path
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=jinja2.select_autoescape(
enabled_extensions=("html", "xml"), default_for_string=True
),
)
if __name__ == "__main__":
sdk_metadata = Path(__file__).parent / ".." / ".." / "metadata" / "sdks.yaml"
with open(sdk_metadata, "r") as file:
metadata = yaml.safe_load(file)
for language in metadata.keys():
shortname = metadata[language]["property"]
template = env.get_template("template.txt")
print(template.render(language=language, shortname=shortname))
| awsdocs/aws-doc-sdk-examples | .tools/images/render-blurbs.py | render-blurbs.py | py | 689 | python | en | code | 8,378 | github-code | 1 | [
{
"api_name": "jinja2.Environment",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
27421766026 | #!/usr/bin/env python3
# coding: utf-8
# In[2]:
import numpy as np
import keras
class DataGenerator(keras.utils.Sequence):
def __init__(self, input_texts, target_texts, input_token_index, target_token_index, max_encoder_seq_length, max_decoder_seq_length, num_encoder_tokens, num_decoder_tokens, batch_size, shuffle=True):
'Initialization'
self.input_texts = input_texts
self.target_texts = target_texts
self.batch_size = batch_size
self.shuffle = shuffle
self.input_token_index = input_token_index
self.target_token_index = target_token_index
self.max_encoder_seq_length = max_encoder_seq_length
self.max_decoder_seq_length = max_decoder_seq_length
self.num_encoder_tokens = num_encoder_tokens
self.num_decoder_tokens = num_decoder_tokens
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.input_texts) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
input_text_sample = [self.input_texts[x] for x in indexes]
target_text_sample = [self.target_texts[x] for x in indexes]
# Generate data
X, y = self.__data_generation(input_text_sample, target_text_sample)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.input_texts))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, input_text_sample, target_text_sample):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
encoder_input_data = np.zeros(
(len(input_text_sample), self.max_encoder_seq_length, self.num_encoder_tokens),
dtype='float32')
decoder_input_data = np.zeros(
(len(input_text_sample), self.max_decoder_seq_length, self.num_decoder_tokens),
dtype='float32')
decoder_target_data = np.zeros(
(len(input_text_sample), self.max_decoder_seq_length, self.num_decoder_tokens),
dtype='float32')
for i, (input_text, target_text) in enumerate(zip(input_text_sample, target_text_sample)):
for t, char in enumerate(input_text):
encoder_input_data[i, t, self.input_token_index[char]] = 1.
for t, char in enumerate(target_text):
# decoder_target_data is ahead of decoder_input_data by one timestep
decoder_input_data[i, t, self.target_token_index[char]] = 1.
if t > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[i, t - 1, target_token_index[char]] = 1.
return [encoder_input_data, decoder_input_data], decoder_target_data
# In[3]:
# from __future__ import print_function
import keras
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import pickle
import matplotlib.pyplot as plt
from keras.models import load_model
# In[4]:
batch_size = 512 # Batch size for training.
epochs = 50 #100 # Number of epochs to train for.
latent_dim = 256 # Latent dimensionality of the encoding space.
# Path to the data txt file on disk.
# In[5]:
data_path = 'sarcastics_only.csv'
df = pd.read_csv(data_path, usecols=['parent_comment','comment'])
# df['comment'] = df['comment'].apply(lambda x: x.replace("\n", " ").join(['\t','\n']))
#punctuation spacele split
# In[6]:
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
for idx in range(len(df)):
target_text = df["comment"][idx]
input_text = df["parent_comment"][idx]
target_text = ' '.join(df["comment"][idx].split())[:200]
input_text = ' '.join(df["parent_comment"][idx].split())[:200]
target_text = target_text.join(['\t','\n'])
target_texts.append(target_text)
input_texts.append(input_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
# In[7]:
input_token_index = dict(
[(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
# In[8]:
#input_texts = input_texts[:1000]
#target_texts = target_texts[:1000]
train_input_texts, valid_input_texts, train_target_texts, valid_target_texts = train_test_split(input_texts, target_texts, test_size=0.1, random_state=42)
del input_texts, target_texts, df
# In[9]:
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_length = max(max([len(txt) for txt in train_input_texts]), max([len(txt) for txt in valid_input_texts]))
max_decoder_seq_length = max(max([len(txt) for txt in train_target_texts]), max([len(txt) for txt in valid_target_texts]))
print('Number of samples:', len(train_input_texts))
print('Number of unique input tokens:', num_encoder_tokens)
print('Number of unique output tokens:', num_decoder_tokens)
print('Max sequence length for inputs:', max_encoder_seq_length)
print('Max sequence length for outputs:', max_decoder_seq_length)
# In[10]:
training_generator = DataGenerator(train_input_texts, train_target_texts, input_token_index, target_token_index, max_encoder_seq_length, max_decoder_seq_length, num_encoder_tokens, num_decoder_tokens, batch_size)
validation_generator = DataGenerator(valid_input_texts, valid_target_texts, input_token_index, target_token_index, max_encoder_seq_length, max_decoder_seq_length, num_encoder_tokens, num_decoder_tokens, batch_size)
# In[11]:
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = CuDNNLSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = CuDNNLSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
# In[12]:
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# In[13]:
earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='min')
mcp_save = ModelCheckpoint('.mdl_wts.hdf5', save_best_only=True, monitor='val_loss', mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, mode='min')
history = model.fit_generator(generator=training_generator,
validation_data=validation_generator,
epochs=epochs,
callbacks=[earlyStopping, mcp_save, reduce_lr_loss],
verbose=1)
# In[16]:
# Plot training & validation accuracy values
# try:
# plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
# plt.title('Model accuracy')
# plt.ylabel('Accuracy')
# plt.xlabel('Epoch')
# plt.legend(['Train', 'Test'], loc='upper left')
# plt.show()
# plt.savefig('acc.png')
# plt.savefig('acc.pdf')
# except:
# print('failed on acc plot')
# Plot training & validation loss values
try:
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('loss.png')
plt.savefig('loss.pdf')
except:
print('failed on loss plot')
# In[17]:
#save etcd
model.save('s2s.h5')
# In[18]:
# Define sampling models
sampler_encoder_model = Model(encoder_inputs, encoder_states)
sampler_decoder_state_input_h = Input(shape=(latent_dim,))
sampler_decoder_state_input_c = Input(shape=(latent_dim,))
sampler_decoder_states_inputs = [sampler_decoder_state_input_h, sampler_decoder_state_input_c]
sampler_decoder_outputs, sampler_state_h, sampler_state_c = decoder_lstm(
decoder_inputs, initial_state=sampler_decoder_states_inputs)
sampler_decoder_states = [sampler_state_h, sampler_state_c]
sampler_decoder_outputs = decoder_dense(sampler_decoder_outputs)
sampler_decoder_model = Model(
[decoder_inputs] + sampler_decoder_states_inputs,
[sampler_decoder_outputs] + sampler_decoder_states)
# In[19]:
sampler_encoder_model.save('sampler_encoder_model.h5')
sampler_decoder_model.save('sampler_decoder_model.h5')
# In[20]:
with open('target_token_index.pickle', 'wb') as pf:
pickle.dump(target_token_index, pf, protocol=pickle.HIGHEST_PROTOCOL)
with open('input_token_index.pickle', 'wb') as pf:
pickle.dump(input_token_index, pf, protocol=pickle.HIGHEST_PROTOCOL)
# num_decoder_tokens,
# In[21]:
variables = {'num_encoder_tokens':num_encoder_tokens,
'num_decoder_tokens':num_decoder_tokens,
'max_encoder_seq_length':max_encoder_seq_length,
'max_decoder_seq_length':max_decoder_seq_length
}
with open('variables.pickle', 'wb') as pf:
pickle.dump(variables, pf, protocol=pickle.HIGHEST_PROTOCOL)
# In[ ]:
print('Done')
| GirayEryilmaz/University-Projects | cmpe597/project/seq2seq.py | seq2seq.py | py | 10,255 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "keras.utils",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.floor",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
... |
15609782422 | import json
class NLG:
# get text by action from config file
def __init__(self,
json_path):
self.json_path=json_path
self.get_json_infor()
def get_json_infor(self):
# get config file
f = open(self.json_path, encoding='utf-8')
file=json.load(f)
self.action_to_text=file
self.action_name=list(self.action_to_text.keys())
def get_text(self,actions):
# get text
text=''
for i in actions:
if i in self.action_name and i!=' ':
tmp=self.action_to_text[i]
text+=tmp
return text
| foowaa/bert-dst | nlg.py | nlg.py | py | 635 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
}
] |
24248168494 | from collections import defaultdict
class TrieNode:
def __init__(self):
self.is_word = False
self.children = defaultdict(TrieNode)
class WordDictionary:
def __init__(self):
self.root = TrieNode()
def add_word(self, word):
cur_node = self.root
for ch in word:
cur_node = cur_node.children[ch]
cur_node.is_word = True
def search(self, word):
return self.search_node(self.root, word)
def search_node(self, node, word):
if not word:
return node.is_word
for ch in word:
if ch == '.':
return any(self.search_node(n, word[1:]) for n in node.children.values())
if ch in node.children:
return self.search_node(node.children[ch], word[1:])
else:
return False
dic = WordDictionary()
dic.add_word('bad')
dic.add_word('good')
dic.add_word('dad')
print(dic.search('.ad'))
| ekinrf/ProgPuzzles | Cache/python/word_dict.py | word_dict.py | py | 968 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 7,
"usage_type": "call"
}
] |
18187474839 | import pytest
from forest.components import tiles
@pytest.mark.parametrize(
"name,expect",
[
(
tiles.OPEN_STREET_MAP,
"https://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png",
),
(
tiles.STAMEN_TERRAIN,
"http://tile.stamen.com/terrain-background/{Z}/{X}/{Y}.png",
),
(
tiles.STAMEN_WATERCOLOR,
"http://tile.stamen.com/watercolor/{Z}/{X}/{Y}.jpg",
),
(
tiles.STAMEN_TONER,
"http://tile.stamen.com/toner-background/{Z}/{X}/{Y}.png",
),
(
tiles.STAMEN_TONER_LITE,
"http://tile.stamen.com/toner-lite/{Z}/{X}/{Y}.png",
),
(
tiles.WIKIMEDIA,
"https://maps.wikimedia.org/osm-intl/{Z}/{X}/{Y}.png",
),
],
)
def test_background_url(name, expect):
assert tiles.background_url(name) == expect
@pytest.mark.parametrize(
"name,expect",
[
(
tiles.STAMEN_TERRAIN,
"http://tile.stamen.com/terrain-labels/{Z}/{X}/{Y}.png",
),
(
tiles.STAMEN_WATERCOLOR,
"http://tile.stamen.com/toner-labels/{Z}/{X}/{Y}.png",
),
(
tiles.STAMEN_TONER,
"http://tile.stamen.com/toner-labels/{Z}/{X}/{Y}.png",
),
(
tiles.STAMEN_TONER_LITE,
"http://tile.stamen.com/toner-labels/{Z}/{X}/{Y}.png",
),
],
)
def test_labels_url(name, expect):
assert tiles.labels_url(name) == expect
@pytest.mark.parametrize(
"name,expect",
[
(tiles.OPEN_STREET_MAP, tiles.OPEN_STREET_MAP_ATTRIBUTION),
(tiles.STAMEN_TERRAIN, tiles.STAMEN_TONER_AND_TERRAIN_ATTRIBUTION),
(tiles.STAMEN_TONER, tiles.STAMEN_TONER_AND_TERRAIN_ATTRIBUTION),
(tiles.STAMEN_TONER_LITE, tiles.STAMEN_TONER_AND_TERRAIN_ATTRIBUTION),
(tiles.STAMEN_WATERCOLOR, tiles.STAMEN_WATERCOLOR_ATTRIBUTION),
(tiles.WIKIMEDIA, tiles.WIKIMEDIA_ATTRIBUTION),
],
)
def test_attribution(name, expect):
assert tiles.attribution(name).strip() == expect.strip()
| MetOffice/forest | test/test_components_tiles.py | test_components_tiles.py | py | 2,153 | python | en | code | 38 | github-code | 1 | [
{
"api_name": "forest.components.tiles.background_url",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "forest.components.tiles",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 5,
"usage_type": "call"
},
{
... |
16895758445 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Description: Analyze the reopening type of a user since the beginning of his
# work to now.
# Usage:
# $ python analyze_reopening_reason.py tms-production {user login}
import erppeek
from operator import itemgetter
from collections import OrderedDict
import os
import re
import sys
from trobz.log import init_logger, logger
init_logger()
log = logger('analyze.reopening.reason')
if len(sys.argv) < 3:
log.error('Missing argument. Usage '
'`python analyze_reopening_reason.py {env} {user login}`')
exit(os.EX_USAGE)
client = erppeek.Client.from_config(sys.argv[1])
User = client.model('res.users')
user_login = sys.argv[2]
target_user = User.browse([('login', '=', user_login)])
if not target_user:
log.error('Cannot find user with login %s' % user_login)
exit(os.EX_NOTFOUND)
user_id = target_user.id[0]
ForgeTicket = client.model('tms.forge.ticket')
ForgeReopening = client.model('forge.ticket.reopening')
WorkingHour = client.model('tms.working.hour')
# Analyze reopening by types
reopen_types = {}
TYPE_HEADER = ['Reopening Type', 'No. of Tickets']
# Categories to analyze
REOPEN_CATEG_DESCRIPTION = {
'1-missing_req': 'Missing some requirements',
'2-misunderstand_req': 'Misunderstanding the requirements',
'3-not_test_before_commit': 'Not testing before code commit',
'4-defect_code_completed': 'Defect at status code completed',
'5-others': 'Other reasons'
}
CATEG_KEYWORDS = {
'1-missing_req': '(miss|lack|(all (requirements|points).*not done))',
'2-misunderstand_req': '(mis[ -]*under(stand|stood)|' +
'((get|got|under(stand|stood)).*(wrong|(not |in|un)correct)))',
'3-not_test_before_commit': '(error|error when upgrad|' +
'(still (not|in|un)correct))'
}
reopen_categs = {
'1-missing_req': [],
'2-misunderstand_req': [],
'3-not_test_before_commit': [],
'4-defect_code_completed': [],
'5-others': []
}
CATEG_HEADER = ['Reopening ID', 'Type', 'Previous Status',
'TS for Fix before reopening (h)',
'Ticket', 'Ticket Estimation (h)', 'Time Spent (h)',
'Time Over Consumed (h)']
# affected tickets
reopened_ticket_ids = []
# Analyze all reopenings (except the invalid ones)
reopenings = ForgeReopening.browse(
[('last_completer_id', '=', user_id),
('reopening_type', '!=', 'invalid')])
reopening_total = len(reopenings)
reopening_count = 0
for reopening in reopenings:
reopening_count += 1
sys.stdout.write("Analyzing %5d/%5d reopenings \r" % (reopening_count,
reopening_total))
sys.stdout.flush()
ticket = reopening.name
# Analyze the reopening by types
if reopening.reopening_type not in reopen_types:
reopen_types[reopening.reopening_type] = 0
reopen_types[reopening.reopening_type] += 1
# Info of reopened tickets
if ticket.id not in reopened_ticket_ids:
reopened_ticket_ids.append(ticket.id)
# Classify the reopening into predefined categories by
# looking for some keywords
categorized = False
reopening_data = (
str(reopening.id), reopening.reopening_type, reopening.pre_state,
str(reopening.fixing_time_spent), str(ticket.name),
str(ticket.development_time), str(ticket.time_spent),
ticket.time_spent - ticket.development_time)
for categ, pattern in CATEG_KEYWORDS.iteritems():
if not re.search(pattern, reopening.comment, re.M|re.I):
continue
reopen_categs[categ].append(reopening_data)
categorized = True
break
# If none of the categories matches, add it to category Others.
if not categorized:
if reopening.reopening_type == 'defect' and\
reopening.pre_state == 'code_completed':
reopen_categs['4-defect_code_completed'].append(reopening_data)
else:
reopen_categs['5-others'].append(reopening_data)
log.info("========== ANALYSIS ==========")
for categ in sorted(reopen_categs.keys()):
data = reopen_categs[categ]
log.info('\n%s (%s times)' % (REOPEN_CATEG_DESCRIPTION[categ], len(data)))
pattern = CATEG_KEYWORDS.get(categ, '')
if pattern:
log.info('\t(search with pattern: %s' % pattern)
if not data:
log.info('Clean...')
continue
# Sort by over time consumed, ticket id desc, reopening id
data = sorted(
sorted(sorted(data, key=itemgetter(0)),
key=itemgetter(4),
reverse=True),
key=itemgetter(7),
reverse=True)
log.table(data, CATEG_HEADER)
# Count all tickets which was developed by this user
developed_tickets = ForgeTicket.browse([('developer_id', '=', user_id)])
total_estimate = 0
total_user_spent = 0
total_all_spent = 0
ticket_total = len(developed_tickets)
ticket_count = 0
for ticket in developed_tickets:
ticket_count += 1
sys.stdout.write("Analyzing %5d/%5d tickets \r" % (ticket_count,
ticket_total))
sys.stdout.flush()
total_estimate += ticket.development_time
total_all_spent += ticket.time_spent
for wh in WorkingHour.browse(
[('user_id', '=', user_id),
('tms_forge_ticket_id', '=', ticket.id)]):
total_user_spent += wh.duration_hour
log.info("========== GRAND SUMMARY ==========")
log.table([(reopen_type, str(count))
for reopen_type, count in reopen_types.iteritems()],
TYPE_HEADER)
reopened_tickets_count = len(reopened_ticket_ids)
summary_reopen_header = [
'No. of tickets', 'Reopened tickets', 'Reopening times',
'Reopening rate (%)', 'Reopening times/ticket']
summary_reopen_content = [(
str(ticket_total),
str(reopened_tickets_count),
str(reopening_total),
'%.2f' % (1.0 * reopened_tickets_count / ticket_total * 100),
'%.2f' % (1.0 * reopening_total / reopened_tickets_count))]
log.table(summary_reopen_content, summary_reopen_header)
summary_time_header = [
'Total estimate (h)', 'Total TS - user (h)', 'Total TS - team (h)',
'User Efficiency (%)', 'Team Efficiency (%)']
summary_time_content = [(
str(total_estimate), str(total_user_spent), str(total_all_spent),
'%.2f' % (1.0 * total_estimate / total_user_spent * 100),
'%.2f' % (1.0 * total_estimate / total_all_spent * 100))]
log.table(summary_time_content, summary_time_header)
| TinPlusIT05/tms | erppeek/analyze_reopening_reason.py | analyze_reopening_reason.py | py | 6,456 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "trobz.log.init_logger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "trobz.log.logger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.EX_USAGE",
... |
2715646635 | import os,glob
from Bio import SeqIO
import statistics
import numpy as np
from Bio.Seq import Seq
input_bs_file = '/scratch/users/anniz44/genomes/donor_species/vcf_round2/BS/binding_results_ccpA.txt'
ref_BS = '/scratch/users/anniz44/genomes/donor_species/vcf_round2/BS/ccpA_BS_RegPrecise_difflength.fa'
vcf_folder = '/scratch/users/anniz44/genomes/donor_species/vcf_round2/merge/details/'
output_folder = '/scratch/users/anniz44/genomes/donor_species/vcf_round2/BS/'
No_BS_pick = 10# top 10 BS
mut_cutoff = 0.1 # 10% -> 5bp
mut_cutoff2 = 5
def find_strains(vcf_file,genomewithSNP):
mut_strains = []
for linesvcf in open(vcf_file, 'r'):
if linesvcf.startswith('CHR'):
linesvcf_set = linesvcf.split('\n')[0].split('\t')
allgenome = linesvcf_set[9:]
i = 1
# find mutated strains
for genome in allgenome:
if str(i) in genomewithSNP:
mut_strains.append(genome)
i += 1
break
return [mut_strains,allgenome]
# compare BS SNPs
def compare_BS(seq, seq2, mut_cutoff_set=0):
alldiff = 0
for i in range(0, len(seq)):
if seq2[i] != seq[i]:
alldiff += 1
if mut_cutoff_set != 0 and alldiff > mut_cutoff_set:
break
return alldiff
def load_genes(input_faa):
Mapping_loci_all = dict()
for record in SeqIO.parse(input_faa, 'fasta'):
record_id = str(record.id)
contig = '_'.join(record_id.split('_')[0:-1])
description = str(record.description).replace(' ', '').split('#')
Mapping_loci_all.setdefault(contig, [])
Mapping_loci_all[contig].append([int(description[1]) - 1,
int(description[2]) - 1, record_id])
return Mapping_loci_all
def load_BS(BS_file,Mapping_loci_all):
allBS = []
allBS.append('BS\tpvalue\tlocus\tcontig\tstrand\ttargetgane\tlocusgene\n')
target_gene_list = dict()
for lines in open(BS_file, 'r'):
i = 0
if not lines.startswith('#') and not lines.startswith('motif_id') and lines != '\n':
lines_set = lines.split('\n')[0].split('\t')
if i < No_BS_pick:
i+=1
pvalue = lines_set[7]
contig, locus1, locus2, strand = lines_set[2:6]
locus1 = int(locus1)
locus2 = int(locus2)
targetgene = ''
locus_target = 0
if contig in Mapping_loci_all:
for locus in Mapping_loci_all[contig]:
locusre1, locusref2, genename = locus
if locus2 <= locusref2 and targetgene == '':
targetgene = genename
locus_target = locusre1
seq = lines_set[9]
allBSset.setdefault(seq, [set(), set()])
if genomename in mut_strains:
allBSset[seq][-1].add(genomename)
else:
allBSset[seq][0].add(genomename)
if targetgene != '':
if strand == '-':
# the gene before
gene_locus = int(targetgene.split('_')[-1])
if gene_locus > 1:
targetgene = '_'.join(targetgene.split('_')[0:-1]) + '_%s' % (
int(targetgene.split('_')[-1]) - 1)
else:
targetgene='%s_1'%(contig)
allBS.append('%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (
seq, pvalue, locus1, contig, strand, targetgene, locus_target))
target_gene_list.setdefault(targetgene, set())
target_gene_list[targetgene].add(seq)
f1 = open('%s/%s/%s.BS.txt' % (output_file, genomename, genomename), 'w')
f1.write(''.join(list(set(allBS))))
f1.close()
aa_output = []
genomename_short = genomename.replace('_BL_', '_S')
for record in SeqIO.parse(input_faa, 'fasta'):
record_id = str(record.id)
if record_id in target_gene_list:
for seq in target_gene_list[record_id]:
aa_output.append('>%s_%s_C_%s_G_%s\n%s\n' % (
seq, genomename_short, record_id.split('_')[1], record_id.split('_')[-1], str(record.seq)))
select_seq_faa.setdefault(seq,'>%s_%s_C_%s_G_%s\n%s\n' % (
seq, genomename_short, record_id.split('_')[1], record_id.split('_')[-1], str(record.seq)))
f1 = open('%s/%s/%s.BS.faa' % (output_file, genomename, genomename), 'w')
f1.write(''.join(aa_output))
f1.close()
def compareBS():
BS_diff = dict()
alldiff_set = []
for seq in allBSset:
BS_diff.setdefault(seq, set())
if mut_cutoff2 == 0:
# set cutoff as 10% top similar -> 5bp
for seq2 in allBSset:
if seq2 != seq:
alldiff = compare_BS(seq, seq2)
alldiff_set.append(alldiff)
newmut_cutoff = np.quantile(alldiff_set, [0.1])[0]
else:
# preset cutoff
newmut_cutoff = mut_cutoff2
for seq2 in allBSset:
if seq2 != seq:
alldiff = compare_BS(seq, seq2, newmut_cutoff)
if alldiff <= newmut_cutoff:
BS_diff[seq].add(seq2)
return [BS_diff,alldiff_set]
# whether BS in some mut, not in all wt
def select_BS(list_seq):
selected = False
no_mut = len(list_seq[-1])
no_wt = len(list_seq[0])
if no_mut > 0 and no_wt < (len(allgenome)-len(mut_strains))*0.5:
selected = True
return [no_mut, no_wt, selected]
def select_reversecomplement(seq):
seq_rc = str(Seq(seq).reverse_complement())
seq_set = [seq,seq_rc]
seq_set.sort()
return seq == seq_set[0]
def find_candidate_mut_BS():
allBS_all = dict()
allseq = list(allBSset.keys())
allBS_select = dict()
for seq in allBSset:
inref = False
if seq in Ref:
inref = True
no_mut, no_wt, selected = select_BS(allBSset[seq])
withsim_wt = ''
if selected:
if BS_diff[seq] != set():
for seq2 in BS_diff[seq]:
if len(allBSset[seq2][0]) > 0 and not any(mut in allBSset[seq][-1] for mut in allBSset[seq2][-1]):
# does not share mut strains, some wt has it
# potential mutated BS from wt BS
# wt BS similar to mutated BS
withsim_wt += '%s;' % (allseq.index(seq2))
allBS_select.setdefault(seq, set())
allBS_select[seq].add(seq2)
if withsim_wt == '':
# no similar wt
allBS_select[seq] = set()
allBS_all.setdefault(seq, ('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (
allseq.index(seq),
seq, no_wt, no_mut, withsim_wt, selected,
inref, ';'.join(BS_diff[seq]),
';'.join(allBSset[seq][0]), ';'.join(allBSset[seq][-1])
)))
# output result
allBS = []
allBS.append('SNPdiff\tBS_order\tBS\tNo.wt\tNo.mut\tmut_hit\twithsim_wt\tref\tsimilarseq\twt\tmut\n')
allseqout = []
for seq in allBS_select:
if select_reversecomplement(seq):
# one orientation
if allBS_select[seq] != set():
allBS.append('%s\t%s' % (0, allBS_all[seq]))
for seq2 in allBS_select[seq]:
alldiff = compare_BS(seq, seq2)
allBS.append('%s\t%s' % (alldiff, allBS_all[seq2]))
allseqout.append(select_seq_faa.get(seq2, ''))
allBS.append('\n')
allseqout.append(select_seq_faa.get(seq,''))
f1 = open('%s.BS.txt' % (output_file_BS), 'w')
f1.write(''.join(allBS))
f1.close()
if allseqout!=[] and not all(gene =='' for gene in allseqout):
fasta_output = '%s.BS.faa' % (output_file_BS)
f1 = open(fasta_output, 'w')
f1.write(''.join(allseqout))
f1.close()
# run eggnog
annotate(fasta_output)
def annotate(fasta_output):
cutoff = 0.7
cmd_cluster = ('%s -sort length -cluster_fast %s -id %s -centroids %s.cluster.aa -uc %s.uc -threads %s\n'
% ('usearch', fasta_output, cutoff, fasta_output,
fasta_output, 40))
os.system(cmd_cluster)
fasta_output = fasta_output + '.cluster.aa'
cutoff = 0.01
database = '/scratch/users/mit_alm/database/eggnog/xaa.hmm'
cmds = ('hmmsearch --tblout %s.eggnog.1.txt --cpu 40 -E %s %s %s\n') % (
fasta_output, cutoff, database, fasta_output)
database = '/scratch/users/mit_alm/database/eggnog/xab.hmm'
cmds += ('hmmsearch --tblout %s.eggnog.2.txt --cpu 40 -E %s %s %s\n') % (
fasta_output, cutoff, database, fasta_output)
database = '/scratch/users/mit_alm/database/eggnog/xac.hmm'
cmds += ('hmmsearch --tblout %s.eggnog.3.txt --cpu 40 -E %s %s %s\n') % (
fasta_output, cutoff, database, fasta_output)
f1 = open(output_file_BS + '.eggnog.sh', 'w')
f1.write(
'#!/bin/bash\nsource ~/.bashrc\nexport LD_LIBRARY_PATH=/scratch/users/anniz44/bin/pro/lib/gsl-2.6:/scratch/users/anniz44/bin/pro/lib/glibc-2.14-build:/scratch/users/anniz44/bin/pro/lib/:/scratch/users/anniz44/bin/miniconda3/lib:$LD_LIBRARY_PATH\n%s' % (
cmds))
f1.close()
# load ref
Ref = []
if ref_BS != 'None':
for record in SeqIO.parse(ref_BS, 'fasta'):
Ref.append(str(record.seq))
# process each SNP
for lines in open(input_bs_file,'r'):
if not lines.startswith('AA_POS_ref'):
lines_set = lines.split('\t')
lineage = lines_set[4].split('__')[0]
species = lines_set[4].split('_')[0]
donor = lines_set[5]
SNP = lines_set[3]
if SNP not in ['K226*','A23V','G12R','A112V']:
# find genome names
vcf_file = '%s/%s%s'%(vcf_folder,lineage.replace('CL','clustercluster'),'.all.parsi.fasta.linktrunc.sum.txt')
print(vcf_file)
mut_strains, allgenome = find_strains(vcf_file,lines_set[-9].split(';'))
print(mut_strains)
# process fino results
output_file = '%s/%s_%s'%(output_folder,species,donor)
output_file_BS = '%s/%s_%s_%s'%(output_folder,species,donor,SNP)
print(output_file_BS)
allBSset = dict()
# load BS
select_seq_faa = dict()
for BS_folder in glob.glob('%s/*' % (output_file)):
genomename = os.path.split(BS_folder)[-1]
if genomename in allgenome:
# load BS file and target genes
BS_file = glob.glob('%s/fimo.tsv' % (BS_folder))[0]
input_faa = '%s/%s/%s.faa' % (output_file, genomename,genomename)
# load all gene position
Mapping_loci_all = load_genes(input_faa)
# load BS
load_BS(BS_file,Mapping_loci_all)
# compare BS differences
BS_diff,alldiff_set = compareBS()
# find candidate mut BS
find_candidate_mut_BS()
f1 = open(os.path.join(output_folder, 'allanno.sh'), 'w')
f1.write('#!/bin/bash\nsource ~/.bashrc\n')
for sub_scripts in glob.glob(os.path.join(output_folder, '*eggnog.sh')):
f1.write('jobmit %s %s small1\n' % (sub_scripts, os.path.split(sub_scripts)[-1]))
f1.close()
print('please run %s/allanno.sh'%(output_folder))
| caozhichongchong/snp_finder | snp_finder/scripts/compareBSold.py | compareBSold.py | py | 11,703 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "Bio.SeqIO.parse",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "Bio.SeqIO.parse",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "Bio.SeqIO",
"line_numbe... |
34841985760 | # Binary Tree Right Side View: https://leetcode.com/problems/binary-tree-right-side-view/
# Given the root of a binary tree, imagine yourself standing on the right side of it, return the values of the nodes you can see ordered from top to bottom.
# Initial solution is pretty simple you just do a depth level traversal with a bst and append the last element you see on that level
from collections import deque
# 20 min
# This solution is o(n) time and o(D) space where d is the diameter of the largest level
class Solution:
def rightSideView(self, root):
result = []
if root is None:
return []
q = deque([[root, 0]])
last = [root, 0]
# When the level goes up 1 append last to result
while q:
node, level = q.pop()
if last[1] != level:
result.append(node.val)
last = [node, level]
if node.left:
q.appendleft((node.left, level + 1))
if node.right:
q.appendleft((node.right, level + 1))
# The last time you go through the tree you can't see the next level
# So we have to check if end on a new level where we need to append the last val
if len(result) == level:
result.append(last[0].val)
return result
# After reviewing the solution there is actually another solution that I wanted to implement it is similar
# however it uses two queues to figure out the last element basically whenever the first q is empty
# you have reached the end of the level. Then you move the second q to the first
# So after testing this solution it looks like this solution is slower than my initial mainly because it has slightly more computations
# that being said the overall complexity is the same and this is a simpler solution
class Solution2:
def rightSideView(self, root):
result = []
if root is None:
return []
nextLevel = deque()
nextLevel.appendleft(root)
while nextLevel:
curLevel = nextLevel
nextLevel = deque()
while curLevel:
node = curLevel.pop()
if(node.left):
nextLevel.appendleft(node.left)
if(node.right):
nextLevel.appendleft(node.right)
result.append(node.val)
return result
# Score Card
# Did I need hints?
# Did you finish within 30 min?
# Was the solution optimal?
# Were there any bugs?
# 4 5 5 3 = 4.25
| KevinKnott/Coding-Review | Month 01/Week 01/Day 06/a.py | a.py | py | 2,545 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 58,
"usage_type": "call"
}
] |
27401851860 | import pytest
from cognite.pygen.utils.text import to_pascal, to_snake
@pytest.mark.parametrize(
"word, singularize, pluralize, expected",
[
("Actress", True, False, "Actress"),
("BestLeadingActress", True, False, "BestLeadingActress"),
("Actress", False, True, "Actresses"),
],
)
def test_to_pascal(word: str, singularize: bool, pluralize: bool, expected: str):
# Act
actual = to_pascal(word, singularize=singularize, pluralize=pluralize)
# Assert
assert actual == expected
@pytest.mark.parametrize(
"word, singularize, pluralize, expected",
[
("APM_Activity", False, True, "apm_activities"),
("APM_Material", False, True, "apm_materials"),
],
)
def test_to_snake(word: str, singularize: bool, pluralize: bool, expected: str):
# Act
actual = to_snake(word, singularize=singularize, pluralize=pluralize)
# Assert
assert actual == expected
| cognitedata/pygen | tests/test_unit/test_generator/test_utils/test_text.py | test_text.py | py | 942 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "cognite.pygen.utils.text.to_pascal",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_nam... |
35575043040 | from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import Article, Lesson, NewUser, Tutorial, Chapter, Book
from .serializers import (ArticleSerializer,
RegisterSerializer,
LoginSerializer,
TutorialSerializer,
LessonSerializer,
BookSerializer,
ChapterSerializer)
from rest_framework import generics
from rest_framework import permissions
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from rest_framework.authtoken.models import Token
from rest_framework import status
from .models import update_last_login
from django.contrib.auth import logout
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated
from rest_framework import authentication
from itertools import chain
# Create your views here.
def index(request):
return render(request, 'build/index.html')
# Register user and generate the token
@method_decorator(csrf_exempt, name='post')
class RegisterView(generics.CreateAPIView):
queryset = NewUser.objects.all()
permission_classes = []
serializer_class = RegisterSerializer
class LoginAPIView(APIView):
permission_classes = (permissions.AllowAny,)
def post(self, request):
serializer = LoginSerializer(data=request.data, context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
update_last_login(None, user)
token, created = Token.objects.get_or_create(user=user)
print(user)
return Response({"status": status.HTTP_200_OK, "token": token.key, "message": "User Logged In"})
class Search(APIView):
permission_classes = [IsAuthenticatedOrReadOnly]
def get(self, request):
query = request.GET.get('query')
queryset1 = Article.objects.all()
article_final_data = []
if request.user.is_authenticated == False and query is not None:
print(request.user)
article_data1 = queryset1.filter(title__icontains = query)
article_data2 = queryset1.filter(description__icontains = query).exclude(title__icontains = query)
article_final_data = chain(article_data1, article_data2)
print(article_final_data)
serializer_data = ArticleSerializer(article_final_data, many = True)
return Response({"Article": serializer_data.data})
class ArticleView(APIView):
permission_classes = [IsAuthenticatedOrReadOnly]
authentication_classes = (authentication.TokenAuthentication,)
def get(self, request, format = None):
queryset1 = Article.objects.all()
queryset2 = Lesson.objects.all()
queryset3 = Tutorial.objects.all()
queryset4 = Chapter.objects.all()
queryset5 = Book.objects.all()
print(request.user)
if request.user.is_authenticated == False:
print(request.user.is_authenticated)
article_data = queryset1.exclude(is_public = False)
lesson_data = queryset2.exclude(is_public = False)
tutorial_data = queryset3.exclude(is_public = False)
chapter_data = queryset4.exclude(is_public = False)
book_data = queryset5.exclude(is_public = False)
serializer_article = ArticleSerializer(article_data, many = True)
serializer_lesson = LessonSerializer(lesson_data, many = True)
serializer_tutorial = TutorialSerializer(tutorial_data, many = True)
serializer_chapter = ChapterSerializer(chapter_data, many = True)
serializer_book = BookSerializer(book_data, many = True)
return Response({"Article": serializer_article.data,
"Lesson": serializer_lesson.data,
"Tutorial": serializer_tutorial.data,
"Chapter": serializer_chapter.data,
"Book": serializer_book.data})
else:
serializer_article = ArticleSerializer(queryset1, many = True)
serializer_lesson = LessonSerializer(queryset2, many = True)
serializer_tutorial = TutorialSerializer(queryset3, many = True)
serializer_chapter = ChapterSerializer(queryset4, many = True)
serializer_book = BookSerializer(queryset5, many = True)
return Response({"Article": serializer_article.data,
"Lesson": serializer_lesson.data,
"Tutorial": serializer_tutorial.data,
"Chapter": serializer_chapter.data,
"Book": serializer_book.data})
# serializer_article = ArticleSerializer(article_data, many = True)
# serializer_lesson = LessonSerializer(lesson_data, many = True)
# serializer_tutorial = TutorialSerializer(tutorial_data, many = True)
# serializer_chapter = ChapterSerializer(chapter_data, many = True)
# serializer_book = BookSerializer(book_data, many = True)
# serializer_article = ArticleSerializer(queryset1, many = True)
# serializer_lesson = LessonSerializer(queryset2, many = True)
# serializer_tutorial = TutorialSerializer(queryset3, many = True)
# serializer_chapter = ChapterSerializer(queryset4, many = True)
# serializer_book = BookSerializer(queryset5, many = True)
# return Response({"Article": serializer_article.data,
# "Lesson": serializer_lesson.data,
# "Tutorial": serializer_tutorial.data,
# "Chapter": serializer_chapter.data,
# "Book": serializer_book.data})
# article_serializer = self.serializer_class(article_data, many = True)
# return Response(article_serializer.data)
# serializer_article = self.serializer_class()
# article_data = self.queryset.exclude(is_public = False)
# article_serializer = self.serializer_class(article_data, many = True)
# return Response(article_serializer.data)
# class RegisterView(APIView):
# permissions = []
# def post(self, request):
# serializer = RegisterSerializer(data = request.data)
# if not serializer.is_valid():
# return Response({'status': 403, 'error': serializer.error, 'message': "Some Error Occured"})
# serializer.save()
# user = NewUser.objects.get(email = serializer.data['email'])
# token_obj, _ = Token.objects.get_or_create(user=user)
# return Response({'status': 200, 'User': serializer.data, 'token': str(token_obj)})
# @api_view(["POST"])
# @permission_classes([AllowAny])
# def login_user(request):
# data = {}
# reqBody = json.loads(request.body)
# email1 = reqBody['Email_Address']
# print(email1)
# password = reqBody['password']
# try:
# Account = NewUser.objects.get(Email_Address=email1)
# except BaseException as e:
# raise ValidationError({"400": f'{str(e)}'})
# token = Token.objects.get_or_create(user=Account)[0].key
# print(token)
# if not check_password(password, Account.password):
# raise ValidationError({"message": "Incorrect Login credentials"})
# if Account:
# if Account.is_active:
# print(request.user)
# login(request, Account)
# data["message"] = "user logged in"
# data["email_address"] = Account.email
# Res = {"data": data, "token": token}
# return Response(Res)
# else:
# raise ValidationError({"400": f'Account not active'})
# else:
# raise ValidationError({"400": f'Account doesnt exist'})
| devanshsharma416/ReactDjangoApplication | UserModel/views.py | views.py | py | 8,063 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "rest_framework.generics.CreateAPIView",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 31,
"usage_type": "name"
},... |
71994365793 | from django.urls import path
from .views import *
from . import views
app_name='eventos'
urlpatterns = [
path('calendario-dinamico/', Calendario.as_view(), name='calendario-dinamico'),
path('evento/',MostrarEvento.as_view(), name='detalle-evento'),
path('evento/<int:pk>/asistencias',ConfirmarAsistencia.as_view(), name='asistencias'),
path('evento/<int:pk>/',RedirigirEvento.as_view(), name='evento-especifico'),
path('evento/filtrado',views.lista_eventos, name='evento-filtrado'),
path('evento/panel',views.mostrar_panel, name='panel'),
path('evento/panel-eventos',CrearEvento.as_view(), name='panel-eventos'),
path('evento/panel-categorias',CrearCategoria.as_view(), name='panel-categorias'),
path('evento/<int:pk>/panel-actualizar-eventos',EventoUpdateView.as_view(), name='panel-actualizar-eventos'),
path('evento/<int:pk>/panel-actualizar-categorias',CategoriaUpdateView.as_view(), name='panel-actualizar-categorias'),
path('evento/<int:pk>/panel-borrar-eventos',EventoDeleteView.as_view(), name='panel-borrar-eventos'),
path('evento/<int:pk>/panel-borrar-categorias',CategoriaDeleteView.as_view(), name='panel-borrar-categorias'),
]
| lucasppperalta/ONG-WEB-BLOG-INFORMATORIO2 | eventos/urls.py | urls.py | py | 1,195 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
20994653930 | # Increase the chances that this code will work in both Python 2 and Python 3 (however, this is written for Python 3!!!)
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
from typing import *
from astropy.io import fits
from tdfdr import aaorun
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)
class SAMIObservation(object):
def __init__(self, raw_filename):
assert os.path.exists(raw_filename)
assert os.path.exists(raw_filename)
self.is_reduced = False
self.raw_filename = raw_filename
self.tlm_filename = None
self.provenance_data = {}
with fits.open(self.raw_filename) as fits_data:
self.ndf_class = fits_data["STRUCT.MORE.NDF_CLASS"].data[0][0]
try:
self.plate_id = fits_data[0].header["PLATEID"]
except KeyError:
self.plate_id = None
self.spectrograph_arm = fits_data[0].header["SPECTID"]
@property
def base_filename(self):
filename, extension = os.path.splitext(os.path.basename(self.raw_filename))
return filename
@property
def reduced_filename(self):
return self.base_filename + "red.fits"
class SAMIReductionGroup(object):
"""Collect together matched calibrations and science observations"""
def __init__(self, plate_id, idx_file):
self.tlm_observation = None # type: SAMIObservation
self.arc_observation = None # type: SAMIObservation
self.fiber_flat_observation = None # type: SAMIObservation
self.science_observation_list = [] # type: List[SAMIObservation}
self.idx_file = idx_file # type: str
self.plate_id = plate_id # type: str
def make_tramline_map(self):
aaorun("make_tlm", self.tlm_observation.raw_filename, self.idx_file)
self.tlm_observation.tlm_filename = self.tlm_observation.base_filename + "tlm.fits"
def reduce_arc(self):
aaorun("reduce_arc", self.arc_observation.raw_filename, self.idx_file,
tlm_file=self.tlm_observation.tlm_filename)
self.arc_observation.is_reduced = True
def reduce_fiber_flat(self):
aaorun("reduce_fflat", self.fiber_flat_observation.raw_filename, self.idx_file,
tlm_file=self.tlm_observation.tlm_filename,
arc_file=self.arc_observation.reduced_filename)
self.fiber_flat_observation.is_reduced = True
def reduce_objects(self):
for science_observation in self.science_observation_list:
aaorun("reduce_object", science_observation.raw_filename, self.idx_file,
arc_file=self.arc_observation.reduced_filename,
fiber_flat_file=self.fiber_flat_observation.reduced_filename,
tlm_file=self.tlm_observation.tlm_filename)
science_observation.is_reduced = True
def reduce(self):
self.make_tramline_map()
self.reduce_arc()
self.reduce_fiber_flat()
self.reduce_objects()
class SAMIReductionManager(object):
def __init__(self):
self.tramline_observations = []
self.arc_observations = []
self.flatfield_observations = []
self.science_observations = []
self.reduction_groups = {} # type: Dict[str, SAMIReductionGroup]
def all_observations(self):
all_obs = set()
all_obs.update(self.tramline_observations)
all_obs.update(self.arc_observations)
all_obs.update(self.flatfield_observations)
all_obs.update(self.science_observations)
return all_obs
def import_new_observation(self, observation):
# type: (SAMIObservation) -> None
if isinstance(observation, str):
shutil.copy(observation, ".")
observation = SAMIObservation(os.path.basename(observation))
if observation.ndf_class not in ("MFFFF", "MFARC", "MFOBJECT"):
log.error("Don't know how to handle observation of class %s, skipped.", observation.ndf_class)
return
grouping_key = (observation.plate_id, observation.spectrograph_arm)
if grouping_key not in self.reduction_groups:
self.reduction_groups[grouping_key] = SAMIReductionGroup(observation.plate_id, "sami1000R.idx")
reduction_group = self.reduction_groups[grouping_key]
# Classify observation based on NDF CLASS
if observation.ndf_class == "MFFFF":
self.tramline_observations.append(observation)
if reduction_group.tlm_observation is None:
reduction_group.tlm_observation = observation
self.flatfield_observations.append(observation)
if reduction_group.fiber_flat_observation is None:
reduction_group.fiber_flat_observation = observation
elif observation.ndf_class == "MFARC":
self.arc_observations.append(observation)
if reduction_group.arc_observation is None:
reduction_group.arc_observation = observation
elif observation.ndf_class == "MFOBJECT":
self.science_observations.append(observation)
if observation not in reduction_group.science_observation_list:
reduction_group.science_observation_list.append(observation)
def reduce_all(self):
for reduction_group in self.reduction_groups.values():
reduction_group.reduce()
| astrogreen/obs_techniques_workshop | data_reducer.py | data_reducer.py | py | 5,514 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"... |
15973346603 | # module
import multiprocessing
import pandas as pd
import time
# custom utils
import utils_c
def get_asm_img(file_name):
root_path = '../'
colnames = ['asm_img_' + str(i+1) for i in range(1000)]
feature_list = {'hash': file_name}
for v in colnames:
feature_list[v] = 0
file_path = root_path + file_name
file_bytes = [v for v in open(file_path, 'rb').read()]
try:
for i in range(1000):
feature_list['asm_img_' + str(i+1)] = file_bytes[i]
except:
pass
return feature_list
def main():
# make result directory
utils_c.make_result_dir()
# constant
NUM_OF_PROCESSOR = int(input("please input the number of processor: "))
# get file name - ext:vir
file_names = utils_c.get_file_list('../', 'asm')
print("starting bytes code analysis")
print("num of asm code: {}".format(len(file_names)))
print("num of processor: {}".format(NUM_OF_PROCESSOR))
# job list
jobs = [get_asm_img]
# processor pool
pool = multiprocessing.Pool(processes=NUM_OF_PROCESSOR)
for job in jobs:
# start time
start = time.time()
feature_lists = pool.map(job, file_names)
# execution time
exec_time = int(time.time() - start)
print("[{}] hour: {}, minute: {}, second: {}".format(job.__name__, exec_time // 3600, exec_time % 3600 // 60, exec_time % 60))
# dict list to data frame
data = pd.DataFrame(feature_lists)
data = data.set_index('hash')
# to csv
data.to_csv('./result/feature_asm_img.csv')
if __name__ == '__main__':
main()
| SONG-WONHO/DataChallenge2018 | module/module_asm_to_img/main.py | main.py | py | 1,639 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils_c.make_result_dir",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "utils_c.get_file_list",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "ti... |
36861387593 | """Models for storing VCF variant statistics information."""
import enum
import hashlib
import math
import pathlib
import typing
import attr
import cattr
import json
from logzero import logger
import vcfpy
_TGenotype = typing.TypeVar("Genotype")
class Genotype(enum.Enum):
#: Reference homozygous.
REF = "0/0"
#: Heterozygous alternative.
HET = "0/1"
#: Homozygous alternative.
HOM = "1/1"
@classmethod
def from_value(cls, value: str) -> _TGenotype:
val = value.replace("|", "/")
for release in cls:
if release.value == val:
return release
else: # pragma: no cover
raise ValueError("Could not get release for value %s" % value)
@attr.s(auto_attribs=True, frozen=True)
class Site:
"""Define a single site."""
#: The genome release
genome_release: str
#: The contig/chromosome name.
chromosome: str
#: 1-based position of the site.
position: int
#: Reference base string in VCF notation.
reference: str
#: Alternative base string in VCF notation.
alternative: str
def with_prefix(self, prefix):
"""Return ``Site`` having the ``"chr"`` prefix or not."""
if prefix and not self.chromosome.startswith("chr"):
return attr.evolve(self, chromosome="chr" + self.chromosome)
elif not prefix and self.chromosome.startswith("chr"):
return attr.evolve(self, chromosome=self.chromosome[3:])
@property
def short_notation(self) -> str:
return "-".join(map(str, [self.genome_release, self.chromosome, self.position]))
@attr.s(auto_attribs=True, frozen=True)
class VariantStats:
"""Statistics of a variant call.
A missing genotype indicates a "no-call". Missing coverage information indicates that the
VCF file that this was generated from did not provide that information or a "no-call".
"""
#: Genotype at the site.
genotype: typing.Optional[Genotype] = None
#: Total coverage at the site (ref + alt).
total_cov: typing.Optional[int] = None
#: Variant coverage at the site (alt).
alt_cov: typing.Optional[int] = None
@attr.s(auto_attribs=True, frozen=True)
class SiteStats:
"""Variant statistics at a site."""
#: Site
site: Site
#: Variant statistics.
stats: VariantStats
@attr.s(auto_attribs=True, frozen=True)
class Sample:
"""Information regarding a sample."""
#: Sample identifier.
name: str
@attr.s(auto_attribs=True, frozen=True)
class SampleStats:
"""Information about variant statistics per sample."""
#: The sample information.
sample: Sample
#: The site-wise variant statistics.
site_stats: typing.List[SiteStats]
@attr.s(auto_attribs=True, frozen=True)
class SimilarityPair:
"""Store information about the similarity of a sample pair.
By convention, the first sample is the lexicographically smaller one.
"""
#: First sample.
sample_i: str
#: Second sample.
sample_j: str
#: Number of sites sharing no allele.
n_ibs0: int
#: Number of sites sharing one allele.
n_ibs1: int
#: Number of sites sharing both alleles.
n_ibs2: int
#: Number of sites where sample i is heterozygous.
het_i: int
#: Number of sites where sample j is heterozygous.
het_j: int
#: Number of sites where both samples are heterozygous.
het_i_j: int
@property
def relatedness(self):
"""Compute peddy relatedness."""
return (self.het_i_j - 2 * self.n_ibs0) / (0.5 * math.sqrt(self.het_i * self.het_j))
@property
def key(self):
return (self.sample_i, self.sample_j)
def read_sites(
*,
path: typing.Optional[typing.Union[str, pathlib.Path]] = None,
stream: typing.Optional[vcfpy.Reader] = None,
genome_release: typing.Optional[str] = None,
max_sites: typing.Optional[int] = None,
) -> typing.List[Site]:
"""Load sites from the given VCF file."""
if not genome_release:
raise ValueError("genome_release must be given") # pragma: no cover
if bool(path) == bool(stream):
raise ValueError("Exactly one of path and stream must be provided") # pragma: no cover
else:
if path:
reader = vcfpy.Reader.from_path(path)
else:
reader = vcfpy.Reader.from_stream(stream)
result = []
for record in reader:
for lineno, record in enumerate(reader):
if not max_sites or lineno < max_sites:
result.append(
Site(
genome_release=genome_release,
chromosome=record.CHROM,
position=record.POS,
reference=record.REF,
alternative=record.ALT[0].value,
)
)
else:
break # pragma: no cover
return result
def hash_sample_id(sample_id: str) -> str:
return hashlib.sha256(sample_id.encode("utf-8")).hexdigest()
def sample_path(storage_path: str, sample_id: str) -> pathlib.Path:
sample_hash = hash_sample_id(sample_id)
output_path = (
pathlib.Path(storage_path)
/ sample_hash[:2]
/ sample_hash[:4]
/ (sample_hash + "-stats.json")
)
return output_path
def write_site_stats(site_stats: typing.List[SiteStats], storage_path: str, sample_id: str) -> str:
"""Write site stats to the storage path and return path to JSON."""
output_path = sample_path(storage_path, sample_id)
output_path.parent.mkdir(parents=True, exist_ok=True)
logger.info("Writing results to %s", output_path)
with output_path.open("wt") as outputf:
json.dump(cattr.unstructure(site_stats), outputf)
return output_path
| holtgrewe/clin-qc-tk | qctk/models/vcf.py | vcf.py | py | 5,860 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.TypeVar",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "attr.evolve",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "attr.evolve",
"line_num... |
674845764 | import sys
if sys.version_info.major == 2:
import mock
else:
from unittest import mock
import json
import numpy as np
import random
import string
import tensorflow as tf
from grpc._cython import cygrpc
from nose.tools import assert_equal
from nose.tools import assert_is_instance
from nose.tools import assert_raises
from nose.tools import assert_set_equal
from nose.tools import assert_true
from nose.tools import assert_tuple_equal
from numpy.testing import assert_array_almost_equal
from parameterized import parameterized
from tensorflow.contrib.util import make_tensor_proto
from tensorflow.python.framework import tensor_util
from .. import segmenter
def test_Segmenter_init_with_defaults():
host, port = 'localhost', 8080
model_name, signature_name = 'model', 'signature'
input_name, output_name = 'input', 'output'
model = segmenter.Segmenter(
host,
port,
model_name,
signature_name,
input_name,
output_name)
expected_attributes_and_values = [
('host', host),
('port', port),
('model_name', model_name),
('signature_name', signature_name),
('input_name', input_name),
('output_name', output_name),
('request_timeout', 3.),
('max_send_message_length', None),
('max_receive_message_length', None),
]
for (attribute, value) in expected_attributes_and_values:
assert_true(hasattr(model, attribute),
msg='expected model to have `{0}` attribute'.format(
attribute))
assert_equal(getattr(model, attribute), value,
msg='incorrect value for `{0}` attribute'.format(
attribute))
def test_Segmenter_init_with_provided_arguments():
host, port = 'localhost', 8080
model_name, signature_name = 'model', 'signature'
input_name, output_name = 'input', 'output'
request_timeout = random.randint(0, 1000)
max_send_message_length = random.randint(0, 1000)
max_receive_message_length = random.randint(0, 1000)
model = segmenter.Segmenter(
host,
port,
model_name,
signature_name,
input_name,
output_name,
request_timeout=request_timeout,
max_send_message_length=max_send_message_length,
max_receive_message_length=max_receive_message_length)
expected_attributes_and_values = [
('host', host),
('port', port),
('model_name', model_name),
('signature_name', signature_name),
('input_name', input_name),
('output_name', output_name),
('request_timeout', request_timeout),
('max_send_message_length', max_send_message_length),
('max_receive_message_length', max_receive_message_length),
]
for (attribute, value) in expected_attributes_and_values:
assert_true(hasattr(model, attribute),
msg='expected model to have `{0}` attribute'.format(
attribute))
assert_equal(getattr(model, attribute), value,
msg='incorrect value for `{0}` attribute'.format(
attribute))
@mock.patch(
'src.models.segmenter.prediction_service_pb2_grpc.PredictionServiceStub',
autospec=True)
@mock.patch(
'src.models.segmenter.grpc.insecure_channel',
autospec=True)
def test_Segmenter_channel_and_stub_creation(
mock_insecure_channel, mock_PredictionServiceStub):
host = ''.join(random.choice(string.ascii_letters) for _ in range(50))
port = random.randint(0, int(10e6))
model_name, signature_name = 'model', 'signature'
input_name, output_name = 'input', 'output'
_ = segmenter.Segmenter(
host,
port,
model_name,
signature_name,
input_name,
output_name)
mock_insecure_channel.assert_called_once_with(
target='{host}:{port}'.format(host=host, port=port),
options=[])
mock_PredictionServiceStub.assert_called_once_with(
mock_insecure_channel.return_value)
@mock.patch(
'src.models.segmenter.prediction_service_pb2_grpc.PredictionServiceStub',
autospec=True)
@mock.patch(
'src.models.segmenter.grpc.insecure_channel',
autospec=True)
def test_Segmenter_channel_creation_with_options(
mock_insecure_channel, mock_PredictionServiceStub):
host, port = 'localhost', 8080
model_name, signature_name = 'model', 'signature'
input_name, output_name = 'input', 'output'
max_send_message_length = random.randint(0, 1000)
max_receive_message_length = random.randint(0, 1000)
_ = segmenter.Segmenter(
host,
port,
model_name,
signature_name,
input_name,
output_name,
max_send_message_length=max_send_message_length,
max_receive_message_length=max_receive_message_length)
expected_options = [
(
cygrpc.ChannelArgKey.max_send_message_length,
max_send_message_length,
),
(
cygrpc.ChannelArgKey.max_receive_message_length,
max_receive_message_length,
),
]
mock_insecure_channel.assert_called_once_with(
mock.ANY,
options=expected_options)
@parameterized.expand([
[
'int32_dtype',
np.random.randint(0, 256, (32, 200, 200, 3)).astype(np.int32),
32, 200, 200, 1,
],
[
'uint8_dtype',
np.random.randint(0, 256, (16, 512, 512, 3)).astype(np.uint8),
16, 512, 512, 1,
],
[
'float64_dtype',
np.random.rand(100, 227, 227, 3).astype(np.float64),
100, 227, 227, 1,
],
])
def test_Segmenter_call_without_errors(
name, images, expected_num_images, expected_height, expected_width,
expected_channels):
host, port = 'localhost', 8080
model_name = ''.join(
random.choice(string.ascii_letters) for _ in range(100))
signature_name = ''.join(
random.choice(string.ascii_letters) for _ in range(100))
input_name = ''.join(
random.choice(string.ascii_letters) for _ in range(100))
output_name = ''.join(
random.choice(string.ascii_letters) for _ in range(100))
request_timeout = random.randint(100, 1000)
model = segmenter.Segmenter(
host,
port,
model_name,
signature_name,
input_name,
output_name,
request_timeout=request_timeout)
mock_stub = mock.MagicMock(name='mock stub')
model.stub = mock_stub
mock_results = np.random.rand(
expected_num_images,
expected_height,
expected_width,
expected_channels)
result = mock.MagicMock(
name='mock future result',
outputs={output_name: make_tensor_proto(mock_results.ravel())})
mock_future = mock.MagicMock(name='mock future')
mock_future.exception.return_value = None
mock_future.result.return_value = result
mock_stub.Predict.future.return_value = mock_future
output = model(images)
request, request_timeout = mock_stub.Predict.future.call_args[0]
assert_equal(request.model_spec.name, model_name,
msg='model name is incorrect')
assert_equal(request.model_spec.signature_name, signature_name,
msg='signature name is incorrect')
input_names = [
input_name,
]
assert_set_equal(set(request.inputs.keys()), set(input_names),
msg='expected input keys to be {0}, got {1}'.format(
json.dumps(sorted(list(set(request.inputs.keys())))),
json.dumps(sorted(list(set(input_names))))))
expected_keys_and_values = [
(
input_name,
images,
),
]
for (key, value) in expected_keys_and_values:
assert_array_almost_equal(
tensor_util.MakeNdarray(request.inputs[key]),
value,
err_msg='incorrect value for "{0}"'.format(key))
# special check for data type enforcement
assert_equal(request.inputs[input_name].dtype, tf.float32,
msg='expected the data type for "{0}" to be `tf.float32`')
assert_is_instance(output, np.ndarray,
msg='expected return value to be an instance of `numpy.ndarray`')
assert_array_almost_equal(output, mock_results,
err_msg='return value is incorrect')
def test_Segmenter_call_with_exception():
host, port = 'localhost', 8080
model_name, signature_name = 'model', 'signature'
input_name, output_name = 'input', 'output'
model = segmenter.Segmenter(
host,
port,
model_name,
signature_name,
input_name,
output_name)
mock_stub = mock.MagicMock(name='mock stub')
model.stub = mock_stub
mock_future = mock.MagicMock(name='mock future')
mock_error = RuntimeError('mock future error')
mock_future.exception.return_value = mock_error
mock_stub.Predict.future.return_value = mock_future
images = np.random.rand(16, 200, 200, 3)
assert_raises(type(mock_error), model, images)
@parameterized.expand([
[
'within_bounds',
1000,
(400, 700, 3),
(400, 700, 3),
],
[
'width_out_of_bounds',
400,
(300, 500, 3),
(240, 400, 3),
],
[
'height_out_of_bounds',
400,
(800, 300, 3),
(400, 150, 3),
],
[
'both_width_and_height_out_of_bounds',
500,
( 600, 800, 3),
( 375, 500, 3),
],
])
@mock.patch(
'src.models.segmenter.Segmenter.__init__',
autospec=False,
return_value=None)
def test_Segmenter_aspect_aware_resizing(
name, max_size, input_dims, expected_dims, mock_Segmenter):
model = segmenter.Segmenter()
images = np.random.randint(0, 256, input_dims).astype(np.uint8)
output = model.aspect_aware_resizing(images, max_size)
assert_is_instance(output, np.ndarray,
msg='expected return value to be an instance of `numpy.ndarray`')
assert_tuple_equal(output.shape, expected_dims,
msg='resized image dimensions are incorrect')
@mock.patch(
'src.models.segmenter.Image.fromarray',
autospec=True)
@mock.patch(
'src.models.segmenter.Segmenter.__init__',
autospec=False,
return_value=None)
def test_Segmenter_aspect_aware_resizing_interpolation(
mock_Segmenter, mock_fromarray):
max_size = 100
interpolation = mock.MagicMock(name='mock interpolation enum')
mock_image = mock.MagicMock(name='mock image', shape=(256, 256))
mock_pillow_image = mock.MagicMock(name='mock Pillow image')
mock_resized_image = np.random.rand(max_size, max_size)
mock_pillow_image.resize.return_value = mock_resized_image
mock_fromarray.return_value = mock_pillow_image
model = segmenter.Segmenter()
output = model.aspect_aware_resizing(
mock_image,
max_size,
interpolation=interpolation)
mock_pillow_image.resize.assert_called_once_with(
(max_size, max_size),
resample=interpolation)
| maibrahim2016/background_removal | src/models/tests/test_segmenter.py | test_segmenter.py | py | 10,993 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.version_info",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "nose.tools.assert_true",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_equal",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "r... |
28796011783 | """
Nが200,000もある
2つを選ぶと10**10となり間に合わない
Nの時間計算量で求める必要がある
"""
from collections import Counter
N = int(input())
A = list(map(int, input().split()))
C = Counter(A)
ans = 0
for combi in [(100,400), (200,300)]:
l, r = combi
ans += C[l] * C[r]
print(ans) | bun913/math_and_algorithm | 018/main.py | main.py | py | 323 | python | ja | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 10,
"usage_type": "call"
}
] |
73214463715 | import os
import json
import torch
from simpletransformers.question_answering import QuestionAnsweringModel
from evaluate import in_eval
def create_parentDir(path, exist_ok=True):
head, tail = os.path.split(path)
os.makedirs(head, exist_ok=exist_ok)
def read_data(train_file, dev_file, test_file=None):
train_data = json.load(open(train_file, encoding='utf-8'))
train_data = [item for topic in train_data['data'] for item in topic['paragraphs']]
dev_data = json.load(open(dev_file, encoding='utf-8'))
dev_data = [item for topic in dev_data['data'] for item in topic['paragraphs'] ]
if test_file:
test_data = json.load(open(test_file, encoding='utf-8'))
test_data = [item for topic in test_data['data'] for item in topic['paragraphs']]
return train_data, dev_data, test_data
else:
return train_data, dev_data
def save_json(data, file):
create_parentDir(file)
with open(file, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=1)
print(f'data -> {file}')
def split_preds(preds):
submission = {}
n_submission = {}
a_str, a_pro = preds
for i in range(len(a_str)):
assert a_str[i]['id'] == a_pro[i]['id']
id = a_str[i]['id']
a = sorted(zip(a_str[i]['answer'], a_pro[i]['probability']), key=lambda x: x[1], reverse=True)[0][0]
submission[id] = a
n_submission[id] = a_str[i]['answer']
return submission, n_submission
train_args = {
'n_gpu': 2,
'learning_rate': 5e-5,
'max_seq_length': 384,
'max_answer_length': 30,
'doc_stride': 128,
'num_train_epochs': 2,
'train_batch_size': 24,
'eval_batch_size': 24,
'gradient_accumulation_steps': 1,
'warmup_ratio': 0.0,
'manual_seed': 42,
'do_lower_case': True,
'reprocess_input_data': True,
'output_dir': 'outputs/',
'save_model_every_epoch': False,
'save_eval_checkpoints': False,
'save_optimizer_and_scheduler': True,
'save_steps': -1, # -1 is disable
'overwrite_output_dir': True,
'evaluate_during_training': False,
'best_model_dir': 'outputs3/best/'
}
bert_base_uncased_file = '../../pretrained_data/bert-base-uncased'
os.environ['CUDA_VISIBLE_DEVICES']="6,7"
train_args['n_gpu'] = torch.cuda.device_count()
## search train and eval without num ********************************
lrs = [3e-5] # 3e-5, 5e-5, 7e-5
num_epoch = 2
gradient_accumulation_step = 1
batch_sizes = [12] # 6, 12, 24
train_file, dev_file, test_file = 'data/train.json', 'data/dev.json', 'data/test.json'
train_data, dev_data, test_data = read_data(train_file, dev_file, test_file)
for batch_size in batch_sizes:
for lr in lrs:
if lr == 3e-5 and batch_size == 24:
continue
if lr == 7e-5 and batch_size == 6:
continue
output_path = f'outputs' + str(lr) + '_' + str(batch_size*gradient_accumulation_step)
# if os.path.exists(output_path):
# continue
train_args['output_dir'] = output_path
train_args['learning_rate'] = lr
train_args['num_train_epochs'] = num_epoch
train_args['gradient_accumulation_steps'] = gradient_accumulation_step
train_args['train_batch_size'] = batch_size
model = QuestionAnsweringModel('bert', bert_base_uncased_file, args=train_args)
model.train_model(train_data, eval_data=None)
model.eval_model(dev_data, output_dir=f'{output_path}/eval/')
preds, n_preds = split_preds(model.predict(test_data))
os.makedirs(f'{output_path}/pred', exist_ok=True)
save_json(preds, f'{output_path}/pred/predict.json')
# save_json(n_preds, f'{output_path}/pred/n_predict.json')
print(f"lr: {lr}, batch:{batch_size*gradient_accumulation_step}, last eval: {in_eval(dev_file, f'{output_path}/eval/predictions_test.json')}")
print(f"lr: {lr}, batch:{batch_size*gradient_accumulation_step}, last test: {in_eval(test_file, f'{output_path}/pred/predict.json')}")
## end *************************************
| TingFree/WDA | bert_qa.py | bert_qa.py | py | 4,038 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.split",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 12... |
72357043233 | # This script creates the color-color and rms plots used in state separation
# This is quite messy because of the different ways the rms and coco files are defined
import os
import numpy as np
from matplotlib import pyplot as plt
from math import exp
from math import sqrt
import matplotlib
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
# List of pre-bursts
PREbursts=[]
doublebursts=[]
PREbursts2=[]
pre = open('burst_characteristics.txt','r')
for line in pre :
if not line.startswith("#"):
ad = line.rstrip('\n').split()
ax = ad[0].split('_')
if len(ad) >= 2:
if ad[1] == 'pre':
PREbursts.append(ad[0])
PREbursts2.append(ax[0])
if ad[1] == 'double':
doublebursts.append(ad[0])
if len(ad) == 3:
if ad[2] == 'pre':
PREbursts.append(ad[0])
PREbursts2.append(ax[0])
if ad[2] == 'double':
doublebursts.append(ad[0])
pre.close()
# Read the touchdown fluxes of prebursts and calculate the average of those
# This is used as an eddington flux
tdfluxes = []
f = open('burst_properties.txt','r')
for line in f:
if not line.startswith("#"):
ae = line.rstrip('\n').split()
if ae[0] in PREbursts:
tdfluxes.append(float(ae[7]))
f.close()
tdflux = 0
for j in tdfluxes:
tdflux = tdflux + j
tdflux = 10**(-7)*tdflux/len(tdfluxes)
# Create lists of hard and soft bursts
# hard and soft lists contain the burstids in form 10088-01-08-01_3
# while hard2 and soft2 contain the burstids in form 10088-01-08-01
hard = []
soft = []
hard2 = []
soft2 = []
f = open('burst_hardness.txt','r')
for line2 in f:
ac=[]
if not line2.startswith("#"):
ac=line2.rstrip('\n').split()
if ac[len(ac)-1] == 'hard':
hard.append(ac[0])
ax=ac[0].split('_')
hard2.append(ax[0])
elif ac[len(ac)-1] == 'soft':
soft.append(ac[0])
ax=ac[0].split('_')
soft2.append(ax[0])
f.close()
# Read the fluxes in four different energy ranges
flux1 = []
flux2 = []
flux3 = []
flux4 = []
flux_total = []
f = open('1636_coco_nobursts.dat','r')
for line2 in f:
ac=[]
if not line2.startswith("#"):
ac=line2.rstrip('\n').split()
flux1.append(float(ac[1]))
flux2.append(float(ac[3]))
flux3.append(float(ac[5]))
flux4.append(float(ac[7]))
flux_total.append(float(ac[9]))
f.close()
flux1 = np.array([float(j) for j in flux1])
flux2 = np.array([float(j) for j in flux2])
flux3 = np.array([float(j) for j in flux3])
flux4 = np.array([float(j) for j in flux4])
#Persistent flux divided by eddington flux
flux_total = np.array([float(j) for j in flux_total])/tdflux
# Hard and soft colours
hard_all = flux4/flux3
soft_all = flux2/flux1
# Find out the hard and soft colours and the persistent flux of each burst
hard_burst_hard = []
hard_burst_soft = []
soft_burst_hard = []
soft_burst_soft = []
hard_burst_hard_pre = []
hard_burst_soft_pre = []
soft_burst_hard_pre = []
soft_burst_soft_pre = []
fluxper_hard = []
fluxper_soft = []
fluxper_hard_pre = []
fluxper_soft_pre = []
f = open('burst_hardness.txt','r')
for line2 in f:
ac=[]
if not line2.startswith("#"):
ac=line2.rstrip('\n').split()
if ac[0] not in PREbursts:
if ac[0] in hard:
hard_burst_hard.append(float(ac[4]))
hard_burst_soft.append(float(ac[6]))
fluxper_hard.append(float(ac[2])/tdflux)
elif ac[0] in soft:
soft_burst_hard.append(float(ac[4]))
soft_burst_soft.append(float(ac[6]))
fluxper_soft.append(float(ac[2])/tdflux)
elif ac[0] in PREbursts:
if ac[0] in hard:
hard_burst_hard_pre.append(float(ac[4]))
hard_burst_soft_pre.append(float(ac[6]))
fluxper_hard_pre.append(float(ac[2])/tdflux)
elif ac[0] in soft:
soft_burst_hard_pre.append(float(ac[4]))
soft_burst_soft_pre.append(float(ac[6]))
fluxper_soft_pre.append(float(ac[2])/tdflux)
f.close()
# Plotting
fig = plt.figure()
ax = fig.add_subplot(131)
ax.minorticks_on()
ax.plot(soft_all, hard_all, color='0.75',marker='.',linestyle='none')
#ax.set_xlabel('Soft color (4$-$6.4 keV)/(3$-$4 keV)')
ax.set_ylabel('Hard color (9.7$-$16 keV)/(6.4$-$9.7 keV)')
rmshard,=ax.plot(hard_burst_soft, hard_burst_hard, 'ko')
rmssoft,=ax.plot(soft_burst_soft, soft_burst_hard, 'bo')
rmshard_pre,=ax.plot(hard_burst_soft_pre, hard_burst_hard_pre, 'k^', markeredgecolor='grey')
rmssoft_pre,=ax.plot(soft_burst_soft_pre, soft_burst_hard_pre, 'b^', markeredgecolor='c')
ax3 = fig.add_subplot(132)
ax3.minorticks_on()
ax3.set_xscale('log')
ax3.set_xlim(0.01, 0.5)
ax3.plot(flux_total, hard_all, color='0.75',marker='.',linestyle='none')
#ax3.set_xlabel(r'Persistent flux F$_{\mathrm{per}}$/<F$_{\mathrm{td}}$>')
ax3.set_ylabel('Hard color (9.7$-$16 keV)/(6.4$-$9.7 keV)')
ax3.plot(fluxper_hard, hard_burst_hard, 'ko')
ax3.plot(fluxper_soft, soft_burst_hard, 'bo')
ax3.plot(fluxper_hard_pre, hard_burst_hard_pre, 'k^', markeredgecolor='grey')
ax3.plot(fluxper_soft_pre, soft_burst_hard_pre, 'b^', markeredgecolor='c')
# Read the rms-values for each burst
rms_hard = []
rms_soft = []
rms_hard_error = []
rms_soft_error = []
rms_hard_pre = []
rms_soft_pre = []
rms_hard_error_pre = []
rms_soft_error_pre = []
f = open('4U1636_rms.dat','r')
for line2 in f:
ac=[]
if not line2.startswith("#"):
ac=line2.rstrip('\n').split()
if ac[1] not in PREbursts2:
if ac[1] in hard2:
rms_hard.append(float(ac[2]))
rms_hard_error.append(float(ac[3]))
elif ac[1] in soft2:
rms_soft.append(float(ac[2]))
rms_soft_error.append(float(ac[3]))
elif ac[1] in PREbursts2:
if ac[1] in hard2:
rms_hard_pre.append(float(ac[2]))
rms_hard_error_pre.append(float(ac[3]))
elif ac[1] in soft2:
rms_soft_pre.append(float(ac[2]))
rms_soft_error_pre.append(float(ac[3]))
f.close()
# Read the count rate (not hardness) for each burst
hardness_hard = []
hardness_soft = []
hardness_hard_error = []
hardness_soft_error = []
hardness_hard_pre = []
hardness_soft_pre = []
hardness_hard_error_pre = []
hardness_soft_error_pre = []
f = open('4U1636_rms.dat','r')
for line2 in f:
ac=[]
if not line2.startswith("#"):
ac=line2.rstrip('\n').split()
if ac[1] not in PREbursts2:
if ac[1] in hard2:
hardness_hard.append(float(ac[6]))
hardness_hard_error.append(float(ac[7]))
elif ac[1] in soft2:
hardness_soft.append(float(ac[6]))
hardness_soft_error.append(float(ac[7]))
elif ac[1] in PREbursts2:
if ac[1] in hard2:
hardness_hard_pre.append(float(ac[6]))
hardness_hard_error_pre.append(float(ac[7]))
elif ac[1] in soft2:
hardness_soft_pre.append(float(ac[6]))
hardness_soft_error_pre.append(float(ac[7]))
f.close()
# Plot count rate vs rms
ax2 = fig.add_subplot(133)
ax2.minorticks_on()
ax2.plot(hardness_hard, rms_hard, marker='o', color='k', linestyle='none')
ax2.plot(hardness_soft, rms_soft, marker='o', color='b', linestyle='none')
ax2.plot(hardness_hard_pre, rms_hard_pre, marker='^', color='k', markeredgecolor='grey', linestyle='none')
ax2.plot(hardness_soft_pre, rms_soft_pre, marker='^', color='b', markeredgecolor='c', linestyle='none')
ax2.set_ylabel(r'Fractional rms')
ax2.set_ylim(0, 0.23)
# Create legend
hards = mpatches.Patch(color='k', label='Hard bursts')
softs = mpatches.Patch(color='b', label='Soft bursts')
pre = mlines.Line2D([], [], color='k', marker='^', linestyle='none', label='PRE bursts')
norm = mlines.Line2D([], [], color='k', marker='o', linestyle='none', label='Normal bursts')
ax2.legend(handles=[hards, softs, pre, norm], prop={'size':9}, loc=1)
# Typical error bars
ax.errorbar(2.0, 0.65, xerr=0.03, yerr=0.008, fmt='.', color='g')
ax3.errorbar(0.3, 0.65, xerr=0.03, yerr=0.008, fmt='.', color='g')
ax2.errorbar(50, 0.02, xerr=15, yerr=0.005, fmt='.', color='g')
# Set every other tick label invisible
for label in ax.xaxis.get_ticklabels()[::2]:
label.set_visible(False)
for label in ax2.xaxis.get_ticklabels()[::2]:
label.set_visible(False)
plt.suptitle('4U 1636-536')
matplotlib.rcParams['pdf.fonttype'] = 42
plt.subplots_adjust(wspace=0.3)
fig.set_size_inches(13.0, 4.0)
fig.savefig('pdfplots/1636_colcol_rms.pdf', bbox_inches='tight', dpi=200)
plt.close()
| jkuut/dyn-pow-method | colcol_rms.py | colcol_rms.py | py | 8,867 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": ... |
23411365980 | #!/usr/bin/python3
"""Program to automatically type strings in application windows upon
authenticating with a RFID or NFC UID. Useful for example to enter your master
password in Mozilla Firefox or Mozilla Thunderbird, which don't integrate with
any third-party keyring manager.
To add a rule for automatic typing, invoke the program with the -w argument,
then focus on the window you want to type the string in, then authenticate
with your RFID or NFC transponder. The windows's unique characteristics and
your custom string will be written in the configuration file.
If you don't want the program to type ENTER at the end of the string, use the
-n argument.
Finally, run the program without any arguments to automatically type the strings
in the windows defined in the configuration file.
This program is a PAM.py NFC client. It requires the PAM.py NFC server to interact
with authenticated RFID / NFC transponders.
"""
# Parameters
from socket import socket, timeout, AF_UNIX, SOCK_STREAM, SOL_SOCKET, \
SO_PASSCRED
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from base64 import b64encode, b64decode
from filelock import FileLock
from getpass import getuser
from psutil import Process
from time import sleep
import Xlib.display
import argparse
import secrets
import json
import sys
import os
import re
default_autotype_definitions_file = "~/.ppnfc_autotype_definitions"
socket_path = "/tmp/ppnfc_server.socket"
# Modules
try:
from xdo import xdo
typer = "xdo"
except:
try:
from pynput.keyboard import Controller
typer = "pynput"
except:
typer = None
pass
# Global variables
autotype_definitions_file = None
defsfile_mtime = None
defsfile = []
defsfile_lock = None
defsfile_locked = False
# Functions
def load_defsfile():
"""Read and verify the content of the definitions file, if it has been
modified. Return True if the file didn't need reloading and there was
no error, False in case of read or format error.
"""
global defsfile_mtime
global defsfile
# Get the file's modification time
try:
mt = os.stat(autotype_definitions_file).st_mtime
except:
return(False)
# Check if the file needs reloading
if not defsfile_mtime:
defsfile_mtime = mt
else:
if mt <= defsfile_mtime:
return(True)
# Re-read the file
try:
with open(autotype_definitions_file, "r") as f:
new_defsfile = json.load(f)
except:
return(False)
# Validate the structure of the JSON format
if not isinstance(new_defsfile, list):
return(False)
for entry in new_defsfile:
if not (
isinstance(entry, list) and
len(entry) == 4 and
isinstance(entry[0], str) and
isinstance(entry[1], str) and
isinstance(entry[2], str) and
isinstance(entry[3], str)
):
return(False)
# Update the definitions currently in memory
defsfile_mtime = mt
defsfile = new_defsfile
return(True)
def write_defsfile(new_defsfile):
"""Save a new definitions file
"""
try:
with open(autotype_definitions_file, "w") as f:
json.dump(new_defsfile, f, indent=2)
except:
return(False)
return(True)
# Encrypt a plaintext string into an encrypted base64 string
def encrypt(pst, key):
# Repeat the key to make it 32 bytes long (AES256 needs 32 bytes)
key = (key.encode("ascii") * 32)[:32]
# Encrypt the string
nonce = secrets.token_bytes(12) # GCM mode needs 12 fresh bytes every time
es = nonce + AESGCM(key).encrypt(nonce, pst.encode("utf-8"), b"")
# Return the encrypted text as a base64 string
return(b64encode(es).decode("ascii"))
# Decrypt an encrypted base64 string into a plaintext string
def decrypt(bes, key):
# Repeat the key to make it 32 bytes long (AES256 needs 32 bytes)
key = (key.encode("ascii") * 32)[:32]
try:
es = b64decode(bes)
return(AESGCM(key).decrypt(es[:12], es[12:], b"").decode("utf-8"))
except:
return(None)
def main():
"""Main routine
"""
global autotype_definitions_file
# Get the PID of our parent process, to detect if it changes later on
ppid = Process().parent()
# Parse the command line arguments if we have parameters
argparser = argparse.ArgumentParser()
argparser.add_argument(
"-d", "--defsfile",
help="Autotype definitions file (default {})".format(
default_autotype_definitions_file),
type=str,
default=default_autotype_definitions_file
)
mutexargs = argparser.add_mutually_exclusive_group()
mutexargs.add_argument(
"-s", "--showwininfo",
help="Don't send any string, just show the current window's info"
"when authenticating",
action="store_true",
)
mutexargs.add_argument(
"-w", "--writedefstring",
help="Add or update a string in the definition file for the "
"current window",
type=str,
)
mutexargs.add_argument(
"-r", "--removedefstring",
help="Remove string in the definition file for the current window",
action="store_true",
)
argparser.add_argument(
"-n", "--nocr",
help="Don't add a carriage return at the end of the string",
action="store_true",
)
args = argparser.parse_args()
autotype_definitions_file = os.path.expanduser(args.defsfile) \
if args.defsfile \
else default_autotype_definitions_file
defsfile_lock = FileLock(autotype_definitions_file + ".lock")
# Get the user's name
user = getuser()
# If the definitions file doesn't exist, create it
if not os.path.isfile(autotype_definitions_file) and not write_defsfile([]):
print("Error creating the definitions file")
return(-1)
sock = None
defsfile_locked = False
do_release_defsfile_lock = False
do_return_status = None
auth_uids = set()
firstauth = True
# Main loop
while True:
# If the definitions file lock is locked, release it if we've been told to,
# if the socket is closed or if we're about to return
if (do_release_defsfile_lock or not sock or do_return_status != None) \
and defsfile_locked:
defsfile_lock.release()
defsfile_locked = False
do_release_defsfile_lock = False
# Do return if we've been told to
if do_return_status != None:
return(do_return_status)
# If our parent process has changed, the session that initially started
# us up has probably terminated - in which case, we should terminate also
if Process().parent() != ppid:
do_return_status = 0
continue
if not sock:
# Open a socket to the auth server
try:
sock = socket(AF_UNIX, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_PASSCRED, 1)
sock.connect(socket_path)
sock.settimeout(5) # Don't get stuck on a closed socket
except:
if sock:
sock.close()
sock = None
sleep(1)
continue
user_authenticated = False
crecvbuf = ""
# If we're asked to manipulate the definition file, lock it before
# the user authenticates, so another instance of the program can't
# trigger an autotype with an old definition before we've had a
# chance to change the file
if args.writedefstring != None or args.removedefstring:
try:
defsfile_lock.acquire(timeout=1)
defsfile_locked = True
except:
defsfile_locked = False
print("Error securing exclusive access to the definitions file")
print("Maybe delete {} if it's stale?".format(
autotype_definitions_file + ".lock"))
do_return_status = -1
continue
# Send the request to the server
try:
sock.sendall("WAITAUTH {} {}\n".format(user, "0" if firstauth else "1").
encode("ascii"))
except:
sock.close()
sock = None
sleep(1)
continue
# Get the user's authentication status
got_waitauth_reply = False
while not got_waitauth_reply:
clines = []
# Get data from the socket
try:
b = sock.recv(256).decode("ascii")
except KeyboardInterrupt:
sock.close()
sock = None
do_return_status = 0
break
except:
sock.close()
sock = None
break
# If we got nothing, the server has closed its end of the socket.
if len(b) == 0:
sock.close()
sock = None
break
# Read CR- or LF-terminated lines
for c in b:
if c == "\n" or c == "\r":
clines.append(crecvbuf)
crecvbuf = ""
elif len(crecvbuf) < 256 and c.isprintable():
crecvbuf += c
# Process the lines
for l in clines:
# Retrieve the user's authentication status from the server's reply
if l[:6] == "AUTHOK":
got_waitauth_reply = True
last_auth_uids = auth_uids
auth_uids = set(l[6:].split())
elif l == "NOAUTH":
last_auth_uids = auth_uids
auth_uids = set()
got_waitauth_reply = True
if not sock:
if do_return_status == None:
sleep(1)
continue
# The first authentication was just to get the current authentication status
# of the user the first time the program is run, in case they're already
# authenticated, and we should only consider a new additional UID for
# automatic typing
if firstauth:
print("Waiting for UID - CTRL-C to quit...")
last_auth_uids = auth_uids
firstauth = False
# Do we have new UIDs (meaning either the user has authenticated for the
# first time, or has authenticated again with one or more another UIDs)?
if auth_uids > last_auth_uids:
# Get the first new authentication UID
auth_uid = list(auth_uids - last_auth_uids)[0]
# Get the active window
try:
display = Xlib.display.Display()
window = display.get_input_focus().focus
wmclass = window.get_wm_class()
wmname = window.get_wm_name()
if wmname == None:
window = window.query_tree().parent
wmname = window.get_wm_name()
wmclass = window.get_wm_class()
if wmname == None or wmclass == None or len(wmclass) < 2:
print("Error getting the window in focus")
continue
except:
print("Error getting the window in focus. Are you running in X?")
continue
# Only print the information of the window in focus
if args.showwininfo:
print("Window in focus:")
print(" Application: {}".format(wmclass[1]))
print(" class: {}".format(wmclass[0]))
print(" Title: {}".format(wmname))
do_return_status = 0
continue
# Create an entry, replace an existing entry or delete any entries for
# this window in the
# definitions file
elif args.writedefstring != None or args.removedefstring:
# Load the existing definitions file if one exists
if not load_defsfile():
print("Error loading the definitions file")
do_return_status = -1
continue
# Create the contents of the new definitions file
new_defsfile = []
defsfile_modified = False
entry_appended = False
# New entry in plaintext
newstr = (args.writedefstring if args.writedefstring != None else "") + \
("" if args.nocr else "\r")
# New entry as an encrypted base64 string
newstr = encrypt(newstr, auth_uid)
for d in defsfile:
if d[0] == wmclass[1] and d[1] == wmclass[0] and d[2] == wmname:
if not defsfile_modified:
if args.writedefstring != None:
new_defsfile.append(
[wmclass[1], wmclass[0], wmname, newstr])
defsfile_modified = True
print("{} existing entry for this window".format(
"Updated" if args.writedefstring != None else "Removed"))
else:
new_defsfile.append(d)
if not defsfile_modified:
if args.writedefstring != None:
new_defsfile.append(
[wmclass[1], wmclass[0], wmname, newstr])
defsfile_modified = True
print("Created entry for this window")
else:
print("No entry found for this window")
do_return_status = 0
# Save the new definition file
if defsfile_modified and not write_defsfile(new_defsfile):
print("Error writing the definitions file")
do_return_status = -1
# Sleep a bit before releasing the lockfile and returning, to give
# another process waiting on a successful authentication to autotype
# something a chance to choke on the lock, so it won't immediately
# autotype the new string
sleep(1)
continue
# "Type" string if we find a definition matching the window currently in
# focus
else:
# Acquire the lock to the definitions file. If we can't, quietly pass
# our turn
try:
defsfile_lock.acquire(timeout=0)
defsfile_locked = True
except:
defsfile_locked = False
continue
if not load_defsfile():
print("Error loading the definitions file")
else:
# Find a matching window in the definitions file
for d in defsfile:
if d[0] == wmclass[1] and d[1] == wmclass[0] and d[2] == wmname:
# Decrypt the encrypted string to type
s = decrypt(d[3], auth_uid)
if s == None:
print("Error decrypting the string to type. Are you sure "
"it was encoded with this UID?")
break
# "Type" the corresponding string
if typer == "xdo":
try:
xdo().enter_text_window(s)
except:
print(
"Error typing synthetic keyboard events using xdo")
elif typer == "pynput":
try:
kbd = Controller()
kbd.type(s)
except:
print(
"Error typing synthetic keyboard events using pynput")
else:
print(
"Error: no usable typer module. Install xdo or pynput")
break
do_release_defsfile_lock = True
# If the server has returned a successful authentication but the list of
# active authenticated UIDs hasn't changed, sleep a bit so we don't run
# a tight loop as long as the same UIDs are active
if auth_uids and auth_uids == last_auth_uids:
sleep(0.2)
# Jump to the main routine
if __name__ == "__main__":
sys.exit(main())
| richardevcom/PAMPy-NFC | bin/scripts/ppnfc_autotype.py | ppnfc_autotype.py | py | 17,131 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.stat",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "secrets.token_bytes",
"line_number":... |
37120203160 | from typing import List
import timm
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from detectron2.layers import ShapeSpec
from detectron2.modeling import Backbone
from detectron2.modeling.backbone.fpn import LastLevelMaxPool
__all__ = ["BiFPN"]
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
class DepthwiseSeparableConv2d(nn.Sequential):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
bias=True,
):
dephtwise_conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=False,
)
pointwise_conv = nn.Conv2d(
out_channels,
out_channels,
kernel_size=1,
bias=bias,
)
super().__init__(dephtwise_conv, pointwise_conv)
class Conv3x3BnReLU(nn.Sequential):
def __init__(self, in_channels, stride=1):
conv = DepthwiseSeparableConv2d(
in_channels,
in_channels,
kernel_size=3,
bias=False,
padding=1,
stride=stride,
)
if get_world_size() > 1:
bn = nn.SyncBatchNorm(in_channels, momentum=0.03)
else:
bn = nn.BatchNorm2d(in_channels, momentum=0.03)
relu = nn.ReLU(inplace=True)
super().__init__(conv, bn, relu)
class FastNormalizedFusion(nn.Module):
def __init__(self, in_nodes):
super().__init__()
self.in_nodes = in_nodes
self.weight = nn.Parameter(torch.ones(in_nodes, dtype=torch.float32))
self.register_buffer("eps", torch.tensor(0.0001))
def forward(self, x: List[torch.Tensor]):
if len(x) != self.in_nodes:
raise RuntimeError(
"Expected to have {} input nodes, but have {}.".format(self.in_nodes, len(x))
)
# where wi ≥ 0 is ensured by applying a relu after each wi (paper)
weight = F.relu(self.weight)
x_sum = 0
for xi, wi in zip(x, weight):
x_sum = x_sum + xi * wi
normalized_weighted_x = x_sum / (weight.sum() + self.eps)
return normalized_weighted_x
class BiFPN(Backbone):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up, out_channels, top_block=None):
super().__init__()
self.bottom_up = bottom_up
self.top_block = top_block
self.l5 = nn.Conv2d(bottom_up.feature_info[4]['num_chs'], out_channels, kernel_size=1)
self.l4 = nn.Conv2d(bottom_up.feature_info[3]['num_chs'], out_channels, kernel_size=1)
self.l3 = nn.Conv2d(bottom_up.feature_info[2]['num_chs'], out_channels, kernel_size=1)
self.l2 = nn.Conv2d(bottom_up.feature_info[1]['num_chs'], out_channels, kernel_size=1)
self.p4_tr = Conv3x3BnReLU(out_channels)
self.p3_tr = Conv3x3BnReLU(out_channels)
self.up = nn.Upsample(scale_factor=2, mode="nearest")
self.fuse_p4_tr = FastNormalizedFusion(in_nodes=2)
self.fuse_p3_tr = FastNormalizedFusion(in_nodes=2)
self.down_p2 = Conv3x3BnReLU(out_channels, stride=2)
self.down_p3 = Conv3x3BnReLU(out_channels, stride=2)
self.down_p4 = Conv3x3BnReLU(out_channels, stride=2)
self.fuse_p5_out = FastNormalizedFusion(in_nodes=2)
self.fuse_p4_out = FastNormalizedFusion(in_nodes=3)
self.fuse_p3_out = FastNormalizedFusion(in_nodes=3)
self.fuse_p2_out = FastNormalizedFusion(in_nodes=2)
self.p5_out = Conv3x3BnReLU(out_channels)
self.p4_out = Conv3x3BnReLU(out_channels)
self.p3_out = Conv3x3BnReLU(out_channels)
self.p2_out = Conv3x3BnReLU(out_channels)
self._out_features = ["p2", "p3", "p4", "p5", "p6"]
self._out_feature_channels = {k: out_channels for k in self._out_features}
self._size_divisibility = 32
self._out_feature_strides = {}
for k, name in enumerate(self._out_features):
self._out_feature_strides[name] = 2 ** (k + 2)
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
p2, p3, p4, p5 = self.bottom_up(x)
if self.training:
_dummy = sum(x.view(-1)[0] for x in self.bottom_up.parameters()) * 0.0
p5 = p5 + _dummy
p5 = self.l5(p5)
p4 = self.l4(p4)
p3 = self.l3(p3)
p2 = self.l2(p2)
p4_tr = self.p4_tr(self.fuse_p4_tr([p4, self.up(p5)]))
p3_tr = self.p3_tr(self.fuse_p3_tr([p3, self.up(p4_tr)]))
p2_out = self.p2_out(self.fuse_p2_out([p2, self.up(p3_tr)]))
p3_out = self.p3_out(self.fuse_p3_out([p3, p3_tr, self.down_p2(p2_out)]))
p4_out = self.p4_out(self.fuse_p4_out([p4, p4_tr, self.down_p3(p3_out)]))
p5_out = self.p5_out(self.fuse_p5_out([p5, self.down_p4(p4_out)]))
return {"p2": p2_out, "p3": p3_out, "p4": p4_out, "p5": p5_out, "p6": self.top_block(p5_out)[0]}
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
if __name__ == "__main__":
m = timm.create_model('spnasnet_100', pretrained=True, features_only=True, out_indices=(1, 2, 3, 4))
x = torch.rand(1, 3, 224, 224)
m2 = BiFPN(bottom_up=m, out_channels=112, top_block=LastLevelMaxPool())
# torch.jit.trace(m2, x)
m2 = torch.jit.script(m2)
print(m2(x))
| zetyquickly/DensePoseFnL | fpn.py | fpn.py | py | 5,990 | python | en | code | 113 | github-code | 1 | [
{
"api_name": "torch.distributed.is_available",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.distributed",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.distributed.is_initialized",
"line_number": 18,
"usage_type": "call"
},
{
... |
72316114274 | from lxml import html
from lxml.etree import XPath
TBODY_XPATH = XPath('//table[@class="observations"]/tbody')
OBSERVATION_XPATH = XPath('./td//text()[normalize-space()]')
DETAILS_XPATH = XPath('./td/div/table/tbody/tr/td//text()')
def _clean_cell(value):
"""
Removes dashes and strips whitespace from the given value.
"""
return value.replace('\u2014', '').strip()
class WebObsResultsParser(object):
"""
Parser for WebObs search results page.
The parser reads an HTML page with search results (presented as a table)
and parses the table into a list of observations.
"""
def __init__(self, html_source):
"""
Creates the parser and feeds it source code of the page.
"""
self.empty = "There were no results for this search." in html_source
if not self.empty:
root = html.fromstring(html_source)
self.tbody = TBODY_XPATH(root)[0]
def get_observations(self):
"""
Parses the HTML table into a list of dictionaries, each of which
represents a single observation.
"""
if self.empty:
return []
rows = list(self.tbody)
observations = []
for row_observation, row_details in zip(rows[::2], rows[1::2]):
data = {}
cells = OBSERVATION_XPATH(row_observation)
data['name'] = _clean_cell(cells[0])
data['date'] = _clean_cell(cells[1])
data['magnitude'] = _clean_cell(cells[3])
data['obscode'] = _clean_cell(cells[6])
cells = DETAILS_XPATH(row_details)
data['comp1'] = _clean_cell(cells[0])
data['chart'] = _clean_cell(cells[3]).replace('None', '')
data['comment_code'] = _clean_cell(cells[4])
data['notes'] = _clean_cell(cells[5])
observations.append(data)
return observations
| zsiciarz/pyaavso | pyaavso/parsers/webobs.py | webobs.py | py | 1,906 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "lxml.etree.XPath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "lxml.etree.XPath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "lxml.etree.XPath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring"... |
73113782435 | import math
from sys import stdin
from collections import defaultdict
class exist_negative_cycle(Exception):
pass
inf = float('inf')
# ベルマンフォード
# O(E + V)
def bellman_ford(g, size, start=0):
d = [inf] * size
d[start] = 0
for _ in range(size):
for u in g:
for v, d in g[u]:
if d[v] > d[u] + d:
d[v] = d[u] + d
for u in g:
for v, d in g[u]:
if d[v] > d[u] + d:
raise exist_negative_cycle
return d
v, e, r = map(int, input().split())
g = defaultdict(list)
for i in range(e):
s, t, d = map(int, readline().split())
g[s].append((t, d))
try:
d = bellman_ford(g, v, r)
for di in d:
print('INF' if math.isinf(di) else di)
except exist_negative_cycle:
print('NEGATIVE CYCLE')
| elzup/algo-py | graph/bellmanford.py | bellmanford.py | py | 836 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "math.isinf",
"line_number": 38,
"usage_type": "call"
}
] |
72083112354 | from tkinter import filedialog as fd
from tkinter import messagebox
from PIL import Image
import customtkinter as ctk
import requests
from io import BytesIO
from PIL import Image
import os
def select_file():
filetypes = (
('All files', '*.*'),
('text files', '*.txt'),
)
file_path = fd.askopenfilenames(
title='Open a file',
initialdir='/',
filetypes=filetypes
)
return file_path
def open_ctk_img(file_path, size=None):
# Cria um objeto de imagem
img = Image.open(file_path)
if size == None:
img = ctk.CTkImage(light_image=img)
else:
img = ctk.CTkImage(light_image=img, size=size)
return img
def get_image(image_url, size=(200, 200), resize=True):
response = requests.get(image_url)
img = Image.open(BytesIO(response.content))
if (resize == False):
return img
return img.resize(size)
def saveResults(image_paths):
save_path = fd.askdirectory()
folder_name = "Results_CIICAM"
# Create the full path by joining the downloads path and folder name
parent_folder_path = os.path.join(save_path, folder_name)
# Create the folder
if not os.path.exists(parent_folder_path):
os.makedirs(parent_folder_path)
print("Folder created at:", parent_folder_path)
for i, page in enumerate(image_paths):
# Specify the name of the folder you want to create
folder_name = "Image_"+str(i+1)
# Create the full path by joining the parent folder path and folder name
folder_path = os.path.join(parent_folder_path, folder_name)
# Create the folder
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for j, list_path in enumerate(page):
if (j == 0):
image = Image.open(list_path)
image_name = 'original_'+str(i+1)+'.jpg'
else:
image = get_image(list_path, resize=False)
image_name = 'similarity'+str(j)+'.jpg'
image.save(os.path.join(folder_path, image_name))
print("Images saved in:", parent_folder_path)
def save_images(images,names = [], save_path = '', ask_path = False):
save_path = fd.askdirectory() if ask_path else save_path
if save_path != '':
names = names if len(names)!= 0 else [i for i in range(len(images))]
if not os.path.exists(save_path):
os.makedirs(save_path)
for i in range(len(images)):
caminho_imagem = os.path.join(save_path, f"{names[i]}.png")
images[i].save(caminho_imagem)
else:
messagebox.showinfo("Alerta", "Nenhum diretório foi selecionado!") | gumartinslopes/TI-VI | interface/utils/file_handle.py | file_handle.py | py | 2,691 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.filedialog.askopenfilenames",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": ... |
74732843554 | import configparser
from tkinter import ttk
import datetime
from datetime import timedelta
import tkinter as tk
import os
from distutils.dir_util import copy_tree
from datepicker import Datepicker
# import win32print
class Data(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent, controller)
self.controller = controller
self.data = datetime.date.today()
print(self.data - timedelta(days=1))
self.config = self.leggi_file_ini()
self.mesi_dict = {'Gennaio': 1, 'Febbraio': 2, 'Marzo': 3,
'Aprile': 4, 'Maggio': 5, 'Giugno': 6,
'Luglio': 7, 'Agosto': 8, 'Settembre': 9,
'Ottobre': 10, 'Novembre': 11, 'Dicembre': 12}
# STRINGVAR
self.data_scelta = tk.StringVar()
self.data_scelta.set(self.data.strftime('%d-%m-%Y'))
# LABELFRAME Date
self.lblfrm_intervallo_date = tk.LabelFrame(self,
text='Data da elaborare',
labelanchor='n',
font=(self.config['Font']['font'], 20),
foreground='blue')
# DATEPICKER
self.picker = Datepicker(self.lblfrm_intervallo_date, dateformat='%d-%m-%Y', datevar=self.data_scelta)
# COMBOBOX per selezione mese
self.cmb_box_mese = ttk.Combobox(self.lblfrm_intervallo_date,
state='readonly',
values=list(self.mesi_dict.keys()))
self.cmb_box_mese.current(0)
self.cmb_box_mese.bind('<<ComboboxSelected>>', self.combo_selected)
# CHECKBUTTON per selezionare data 'ieri'
self.ieri = tk.Checkbutton(self.lblfrm_intervallo_date, text="Ieri", command=self._ieri)
# LAYOUT
self.lblfrm_intervallo_date.grid()
self.picker.grid()
self.ieri.grid(sticky='w')
self.cmb_box_mese.grid()
@staticmethod
def leggi_file_ini():
ini = configparser.ConfigParser()
ini.read('config.ini')
return ini
def combo_selected(self, event):
self.data_scelta.set('2017-' + '0' + str(self.mesi_dict[self.cmb_box_mese.get()]))
def _ieri(self):
self.data_scelta.set((self.data - timedelta(days=1)).strftime('%d-%m-%Y'))
if __name__ == "__main__":
root = tk.Tk()
main = tk.Frame(root)
x = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2
y = (root.winfo_screenheight() - root.winfo_reqheight()) / 2
root.geometry("600x300+%d+%d" % (x, y))
root.title('PyInsta')
notebook = ttk.Notebook(main)
tab1 = Data(notebook, main)
notebook.add(tab1, text='Data', compound='left')
main.grid()
notebook.grid()
root.mainloop()
| AleLuzzi/PyInsta | data.py | data.py | py | 2,921 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.Frame",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Frame.__init__",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "datetime... |
40894999462 | from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium.webdriver.firefox.webdriver import WebDriver
from django.conf import settings
import os
SCREEN_DUMP_LOCATION = os.path.join(settings.BASE_DIR, "screendumps")
class StatusViewsTests(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.selenium = WebDriver()
cls.selenium.implicitly_wait(10)
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super().tearDownClass()
def test_landing_page_title(self):
self.selenium.get(f"{self.live_server_url}")
self.selenium.save_screenshot(
f"{SCREEN_DUMP_LOCATION}/test_landing_page_title.png"
)
self.assertIn("Sprinkler Controller ][", self.selenium.title)
| why-pengo/sprinkler | status/tests/status_views_test.py | status_views_test.py | py | 829 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.BASE_DIR",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.conf... |
22288570912 | import torch
import torch.nn.functional as F
from torch.distributed.tensor.parallel import (
PairwiseParallel,
parallelize_module,
)
from torch.distributed._tensor import DeviceMesh, distribute_tensor, DTensor
from torch.distributed._tensor.placement_types import _Partial, Replicate, Shard
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
with_comms,
)
class DummyMLP(torch.nn.Module):
def __init__(self, device):
super().__init__()
self.net1 = torch.nn.Linear(5, 1024, device=device)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(1024, 4, device=device)
def forward(self, x):
return self.net2(F.relu(self.net1(x)))
def reset_parameters(self, *args, **kwargs):
with torch.no_grad():
self.net1.weight.fill_(0.5)
self.net2.weight.fill_(1)
self.net1.bias.fill_(1.5)
self.net2.bias.fill_(1.2)
class DTensorTest(DTensorTestBase):
@with_comms
def test_dtensor_constructor(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
shard_spec = [Shard(0)]
local_tensor = torch.randn(3, 3, requires_grad=True)
dist_tensor_shape = torch.Size([self.world_size * 3, 3])
dist_tensor = DTensor(
local_tensor,
device_mesh,
shard_spec,
size=dist_tensor_shape,
requires_grad=True,
)
self.assertEqual(dist_tensor.size(), torch.Size((self.world_size * 3, 3)))
with self.assertWarnsRegex(UserWarning, "To construct"):
DTensor(local_tensor, device_mesh, shard_spec, size=dist_tensor_shape)
local_tensor = torch.randn(3, 3, requires_grad=False)
with self.assertWarnsRegex(UserWarning, "To construct"):
dist_tensor = DTensor(
local_tensor,
device_mesh,
shard_spec,
size=dist_tensor_shape,
requires_grad=True,
)
@with_comms
def test_meta_dtensor(self):
device_mesh = self.build_device_mesh()
dist_specs = [[Shard(0)], [Replicate()]]
meta_tensor = torch.randn(1024, 2048, device="meta")
for dist_spec in dist_specs:
# Test distribute_tensor on meta tensor
meta_dtensor = distribute_tensor(meta_tensor, device_mesh, dist_spec)
self.assertTrue(meta_dtensor.is_meta)
meta_dtensor = torch.empty_like(meta_dtensor, device=self.device_type)
torch.nn.init.constant_(meta_dtensor, 1.2)
value_tensor = torch.empty_like(meta_dtensor.to_local()).fill_(1.2)
self.assertFalse(meta_dtensor.is_meta)
self.assertEqual(meta_dtensor.device.type, self.device_type)
self.assertEqual(meta_dtensor.to_local(), value_tensor)
# Test from_local on meta tensor
meta_dtensor = DTensor.from_local(meta_tensor, device_mesh, dist_spec)
meta_dtensor = torch.empty_like(meta_dtensor, device=self.device_type)
torch.nn.init.constant_(meta_dtensor, 1.5)
self.assertEqual(meta_dtensor.device.type, self.device_type)
value_tensor = torch.empty_like(meta_dtensor.to_local()).fill_(1.5)
self.assertEqual(meta_dtensor.to_local(), value_tensor)
@with_comms
def test_modules_w_meta_dtensor(self):
model = DummyMLP("meta")
device_mesh = self.build_device_mesh()
model_tp = parallelize_module(model, device_mesh, PairwiseParallel())
model_tp.to_empty(device=self.device_type)
model_tp.reset_parameters()
optim = torch.optim.SGD(model_tp.parameters(), lr=0.1)
model_regular = DummyMLP(self.device_type)
model_regular_tp = parallelize_module(model_regular, device_mesh, PairwiseParallel())
optim_regular = torch.optim.SGD(model_regular_tp.parameters(), lr=0.1)
model_regular_tp.reset_parameters()
torch.manual_seed(0)
inp = torch.randn(20, 5, device=self.device_type)
output = model_tp(inp)
output_regular = model_regular_tp(inp)
self.assertEqual(output, output_regular)
output.sum().backward()
output_regular.sum().backward()
optim.step()
optim_regular.step()
torch.manual_seed(1)
inp = torch.randn(20, 5, device=self.device_type)
self.assertEqual(model_tp(inp), model_regular_tp(inp))
@with_comms
def test_dtensor_stride(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
shard0_spec = [Shard(0)]
local_tensor = torch.randn(4, 8)
global_shape = torch.Size([self.world_size * 4, 8])
dist_tensor = DTensor(local_tensor, device_mesh, shard0_spec, size=global_shape)
# won't affect stride
self.assertEqual(dist_tensor.stride(), (8, 1))
shard1_spec = [Shard(1)]
local_tensor = torch.randn(8, 4)
global_shape = torch.Size([8, self.world_size * 4])
dist_tensor = DTensor(local_tensor, device_mesh, shard1_spec, size=global_shape)
# will affect stride after DT initialized
self.assertEqual(dist_tensor.stride(), (4 * self.world_size, 1))
# if initialized from a transposed mat
local_tensor = torch.randn(8, 4, 8)
local_tensor_t = local_tensor.permute(1, 2, 0)
global_shape = torch.Size([4, self.world_size * 8, 8])
self.assertEqual(local_tensor_t.stride(), (8, 1, 32))
dist_tensor = DTensor(
local_tensor_t, device_mesh, shard1_spec, size=global_shape
)
global_stride = (8 * self.world_size, 1, 32 * self.world_size)
self.assertEqual(dist_tensor.stride(), global_stride)
@with_comms
def test_from_local(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
shard_spec = [Shard(0)]
local_tensor = torch.randn(3, 3)
sharded_tensor = DTensor.from_local(local_tensor, device_mesh, shard_spec)
self.assertEqual(sharded_tensor.size(), torch.Size([self.world_size * 3, 3]))
replica_spec = [Replicate()]
ddp_tensor = DTensor.from_local(local_tensor, device_mesh, replica_spec)
self.assertEqual(ddp_tensor.size(), local_tensor.size())
partial_spec = [_Partial()]
partial_tensor = DTensor.from_local(local_tensor, device_mesh, partial_spec)
self.assertEqual(partial_tensor.size(), local_tensor.size())
# test dist tensor works with torch.Tensor during backwards
local_tensor_with_grad = torch.randn(3, 3, requires_grad=True)
# do some operations on local tensor
local_tensor_temp = local_tensor_with_grad * 3
# create the dist tensor with non leaf local tensor, dist tensor created
# should also be non leaf node
dist_tensor = DTensor.from_local(local_tensor_temp, device_mesh, shard_spec)
self.assertFalse(dist_tensor.is_leaf)
# do some random operations on dist tensor
output = dist_tensor * 3
self.assertIsInstance(output, DTensor)
# trigger .backward() on dist tensor directly
local_grad = torch.ones(3, 3)
grad_output = DTensor.from_local(local_grad, device_mesh, shard_spec)
# run backward directly on dist tensor
output.backward(grad_output)
# check it gradients flow back to original torch.Tensor
self.assertIsNotNone(local_tensor_with_grad.grad)
expected_grad = torch.ones(3, 3) * 9
self.assertEqual(local_tensor_with_grad.grad, expected_grad)
@with_comms
def test_to_local(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
shard_spec = [Shard(0)]
dist_tensor_shape = torch.Size([self.world_size * 3, 3])
local_tensor_with_grad = torch.randn(
3, 3, device=self.device_type, requires_grad=True
)
sharded_tensor = DTensor(
local_tensor_with_grad,
device_mesh,
shard_spec,
size=dist_tensor_shape,
requires_grad=True,
)
self.assertEqual(sharded_tensor.size(), dist_tensor_shape)
self.assertEqual(sharded_tensor.to_local(), local_tensor_with_grad)
# test dist tensor works with torch.Tensor during backwards
# dist tensor created is a leaf node, do some operation on dist tensor
temp_st = sharded_tensor * 3
# do some operation on local tensor of the dist tensor
new_tensor_with_grad = torch.randn(
3, 3, device=self.device_type, requires_grad=True
)
res = temp_st.to_local() + new_tensor_with_grad
# call backward directly on torch.Tensor, and see if it works by
# propagating through dist tensor
res.sum().backward()
self.assertIsNotNone(sharded_tensor.grad)
self.assertEqual(sharded_tensor.grad.to_local(), torch.ones(3, 3) * 3)
@with_comms
def test_from_local_then_to_local(self):
# this test ensure end to end from torch.Tensor -> dist tensor -> torch.Tensor works
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
shard_spec = [Shard(0)]
# step 1. construct from construct local tensor
local_tensor_with_grad = torch.randn(
3, 3, device=self.device_type, requires_grad=True
)
# do some operations on local tensor
local_tensor_temp = local_tensor_with_grad + 8
# step 2. create the dist tensor with non leaf local tensor, dist tensor
# created should also be non leaf node
dist_tensor = DTensor.from_local(local_tensor_temp, device_mesh, shard_spec)
self.assertFalse(dist_tensor.is_leaf)
# do some random operations on dist tensor
output = dist_tensor * 6
self.assertIsInstance(output, DTensor)
# step 3. do some operation on local tensor of the dist tensor
new_tensor_with_grad = torch.randn(
3, 3, device=self.device_type, requires_grad=True
)
res = output.to_local() + new_tensor_with_grad
# call backward directly on torch.Tensor, and see if it works by
# propagating all the way back to the original torch.Tensor
res.sum().backward()
self.assertIsNotNone(local_tensor_with_grad.grad)
expected_grad = torch.ones(3, 3) * 6
self.assertEqual(local_tensor_with_grad.grad, expected_grad)
@with_comms
def test_dtensor_spec_read_only_after_set(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
shard_spec = [Shard(0)]
local_tensor = torch.randn(3, 3)
sharded_tensor = DTensor.from_local(local_tensor, device_mesh, shard_spec)
# modify shard_spec, and dist_tensor's spec should not be changed
shard_spec[0] = Replicate()
self.assertTrue(sharded_tensor.placements is not shard_spec)
self.assertNotEqual(sharded_tensor.placements, shard_spec)
@with_comms
def test_dtensor_spec_hash(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
shard_spec = [Shard(0)]
local_tensor = torch.randn(3, 3)
local_tensor2 = torch.randn(3, 3)
sharded_tensor = DTensor.from_local(local_tensor, device_mesh, shard_spec)
sharded_tensor2 = DTensor.from_local(local_tensor2, device_mesh, shard_spec)
# note that DTensorSpec without real tensor data, so the hash would be the same
# as long as the mesh, placements and tensor properties are the same
self.assertEqual(hash(sharded_tensor._spec), hash(sharded_tensor2._spec))
# change the placements would change the hash
local_tensor3 = torch.ones(3, 3)
replica_spec = [Replicate()]
replica_tensor = DTensor.from_local(
local_tensor3, device_mesh, replica_spec, run_check=False
)
self.assertNotEqual(hash(sharded_tensor._spec), hash(replica_tensor._spec))
@with_comms
def test_dtensor_properties(self):
device_mesh = DeviceMesh(self.device_type, list(range(self.world_size)))
shard_spec = [Shard(0)]
local_tensor = torch.randn(3, 3)
sharded_tensor = DTensor.from_local(local_tensor, device_mesh, shard_spec)
self.assertEqual(sharded_tensor.device.type, self.device_type)
class DTensorMeshTest(DTensorTestBase):
@property
def world_size(self):
return 8
@with_comms
def test_dtensor_device_mesh_device_conversion(self):
# construct a cuda device mesh
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
# construct from a cpu local tensor with cuda device mesh
# should automatically convert the dist tensor to cuda
shard_spec = [Shard(0)]
local_tensor = torch.randn(3, 3)
dist_tensor = DTensor.from_local(local_tensor, mesh, shard_spec)
self.assertEqual(dist_tensor.device.type, self.device_type)
self.assertEqual(dist_tensor.to_local().device.type, self.device_type)
@with_comms
def test_dtensor_api_device_mesh_context_manager(self):
with DeviceMesh(self.device_type, list(range(self.world_size))) as mesh:
shard_spec = [Shard(0)]
local_tensor = torch.randn(3, 3)
sharded_tensor = DTensor.from_local(
local_tensor, device_mesh=mesh, placements=shard_spec
)
with DeviceMesh(self.device_type, list(range(self.world_size))):
shard_spec = [Shard(0)]
local_tensor = torch.randn(3, 3)
sharded_tensor = DTensor.from_local(local_tensor, placements=shard_spec)
replica_spec = [Replicate()]
replica_tensor = sharded_tensor.redistribute(placements=replica_spec)
self.assertEqual(
replica_tensor.size(), torch.Size([3 * self.world_size, 3])
)
@with_comms
def test_dtensor_2d_mesh(self):
mesh_tensor = torch.arange(self.world_size).reshape(2, 4)
# construct a cuda device mesh
mesh = DeviceMesh(self.device_type, mesh_tensor)
# construct a dist tensor on 2d device mesh and test if works
shard_spec = [Shard(0), Shard(1)]
local_tensor = torch.randn(3, 3)
dist_tensor = DTensor.from_local(local_tensor, mesh, shard_spec)
self.assertEqual(
dist_tensor.size(), torch.Size([3 * mesh.size(0), 3 * mesh.size(1)])
)
self.assertEqual(dist_tensor.device.type, self.device_type)
self.assertEqual(dist_tensor.to_local().device.type, self.device_type)
# if shard on the same tensor dimension
# we should correctly construct the global tensor size
shard_same_dim_spec = [Shard(0), Shard(0)]
local_tensor = torch.randn(3, 3)
dist_tensor = DTensor.from_local(local_tensor, mesh, shard_same_dim_spec)
self.assertEqual(dist_tensor.size(), torch.Size([3 * self.world_size, 3]))
@with_comms
def test_device_mesh_nd(self):
# construct a cuda device mesh
mesh_tensor = torch.arange(self.world_size).reshape(2, 2, 2)
mesh = DeviceMesh(self.device_type, mesh_tensor)
# construct a dist tensor on 3d device mesh and test if works
shard_spec = [Shard(0), Shard(1), Shard(2)]
local_tensor = torch.randn(3, 3, 3)
dist_tensor = DTensor.from_local(local_tensor, mesh, shard_spec)
self.assertEqual(dist_tensor.size(), torch.Size([6, 6, 6]))
self.assertEqual(dist_tensor.device.type, self.device_type)
self.assertEqual(dist_tensor.to_local().device.type, self.device_type)
# construct a dist tensor on 3d device mesh with some shards on same dim
shard_spec = [Shard(0), Shard(0), Shard(2)]
local_tensor = torch.randn(3, 3, 3)
dist_tensor = DTensor.from_local(local_tensor, mesh, shard_spec)
self.assertEqual(dist_tensor.size(), torch.Size([12, 3, 6]))
self.assertEqual(dist_tensor.device.type, self.device_type)
self.assertEqual(dist_tensor.to_local().device.type, self.device_type)
@with_comms
def test_dtensor_spec_local_shard_offset(self):
device_mesh = DeviceMesh(
self.device_type, torch.arange(self.world_size).reshape(2, 4)
)
tensor_shape = (3 * self.world_size, 3 * self.world_size)
# sharding specs and its corresponding local shard offsets
shard_spec_and_offsets = [
(
[Shard(0), Replicate()],
(3 * (self.world_size // 2) * (self.rank // 4), 0),
),
(
[Shard(1), Replicate()],
(0, 3 * (self.world_size // 2) * (self.rank // 4)),
),
(
[Replicate(), Shard(0)],
(3 * (self.world_size // 4) * (self.rank % 4), 0),
),
(
[Replicate(), Shard(1)],
(0, 3 * (self.world_size // 4) * (self.rank % 4)),
),
]
# loop through all sharding specs and check local shard offsets
logical_tensor = torch.randn(tensor_shape)
for shard_spec, expected_shard_offsets in shard_spec_and_offsets:
dtensor = distribute_tensor(logical_tensor, device_mesh, shard_spec)
self.assertEqual(expected_shard_offsets, dtensor._spec.local_offsets)
if __name__ == "__main__":
run_tests()
| llv22/pytorch-macOS-cuda | test/distributed/_tensor/test_dtensor.py | test_dtensor.py | py | 17,654 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.ReLU",
"line... |
1908888817 | import torch
import torch.nn.functional as F
import math
"""
DISCLAIMER: most of these functions were implemented by me (Vaclav Vavra)
during the MPV course in the Spring semester of 2020, mostly with the help
of the provided template.
"""
def get_gausskernel_size(sigma, force_odd = True):
ksize = 2 * math.ceil(sigma * 3.0) + 1
if ksize % 2 == 0 and force_odd:
ksize +=1
return int(ksize)
def gaussian_filter2d(x: torch.Tensor, sigma: float) -> torch.Tensor:
r"""
DISCLAIMER: this is a function implemented by me (Vaclav Vavra)
during the MPV course in spring semester 2020 with the help of the provided
template.
Function that blurs a tensor using a Gaussian filter.
Arguments:
sigma (Tuple[float, float]): the standard deviation of the kernel.
Returns:
Tensor: the blurred tensor.
Shape:
- Input: :math:`(B, C, H, W)`
- Output: :math:`(B, C, H, W)`
"""
ksize = get_gausskernel_size(sigma)
kernel_inp = torch.linspace(-float(ksize // 2), float(ksize // 2), ksize)
kernel1d = gaussian1d(kernel_inp, sigma).reshape(1, -1)
outx = filter2d(x, kernel1d)
out = filter2d(outx, kernel1d.t())
return out
def gaussian1d(x: torch.Tensor, sigma: float) -> torch.Tensor:
'''
DISCLAIMER: this is a function implemented by me (Vaclav Vavra)
during the MPV course in spring semester 2020 with the help of the provided
template.
Function that computes values of a (1D) Gaussian with zero mean and variance sigma^2
'''
coef = 1./ (math.sqrt(2.0*math.pi)*sigma)
out = coef*torch.exp(-(x**2)/(2.0*sigma**2))
return out
def spatial_gradient_first_order(x: torch.Tensor, mask=torch.tensor([[0.5, 0, -0.5]]).float(), smoothed: bool = False, sigma: float = 1.0) -> torch.Tensor:
r"""
DISCLAIMER: this is a function implemented by me (Vaclav Vavra)
during the MPV course in spring semester 2020 with the help of the provided
template.
Computes the first order image derivative in both x and y directions using Gaussian derivative
Return:
torch.Tensor: spatial gradients
Shape:
- Input: :math:`(B, C, H, W)`
- Output: :math:`(B, C, 2, H, W)`
"""
#b, c, h, w = x.shape
if smoothed:
filtered_input = gaussian_filter2d(x, sigma)
else:
filtered_input = x
outx = filter2d(filtered_input, mask)
outy = filter2d(filtered_input, mask.t())
return outx, outy
def filter2d(x: torch.Tensor, kernel: torch.Tensor) -> torch.Tensor:
"""
DISCLAIMER: this is a function implemented by me (Vaclav Vavra)
during the MPV course in spring semester 2020 with the help of the provided
template.
Function that convolves a tensor with a kernel.
The function applies a given kernel to a tensor. The kernel is applied
independently at each depth channel of the tensor. Before applying the
kernel, the function applies padding according to the specified mode so
that the output remains in the same shape.
Args:
input (torch.Tensor): the input tensor with shape of
:math:`(B, C, H, W)`.
kernel (torch.Tensor): the kernel to be convolved with the input
tensor. The kernel shape must be :math:`(kH, kW)`.
Return:
torch.Tensor: the convolved tensor of same size and numbers of channels
as the input.
"""
assert len(kernel.size()) == 2
assert len(x.size()) == 4
b, c, h, w = x.shape
height, width = kernel.size()
tmp_kernel = kernel[None,None,...].to(x.device).to(x.dtype)
padding_shape = [width // 2, width // 2, height // 2, height // 2]
input_pad: torch.Tensor = F.pad(x, padding_shape, mode='replicate')
out = F.conv2d(input_pad,
tmp_kernel.expand(c, -1, -1, -1),
groups=c,
padding=0,
stride=1)
return out
| vicsyl/extreme_two_view_matching_research | image_processing.py | image_processing.py | py | 3,933 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "math.ceil",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.linspace",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "torch.Tensor",
"line_n... |
15870050682 | import gym
import sys
import itertools
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from queue import Queue
from agent import Agent
def is_int(string):
try: return int(string) > 0
except: return False
def process(state):
batch = []
for frame in state:
image = Image.fromarray(frame).convert('L')
matrix = np.array(image)
for x in range(85, 96):
for y in range(96):
matrix[x][y] = 0
batch += [matrix / 255]
return np.array(batch).transpose(1, 2, 0)
args = sys.argv[1:]
if len(args) in [2, 3] and args[0] in ['train', 'continue'] and is_int(args[1]):
mode = args[0]
episodes = int(args[1])
show = 'human' if len(args) == 3 and args[2] == 'show' else 'rgb_array'
elif len(args) == 1 and args[0] == 'test':
mode = args[0]
show = 'human'
else:
print('wrong format')
exit(1)
steering = [-1, 0, 1]
gas = [0, 1]
breaking = [0, .2]
agent = Agent(
list(itertools.product(steering, gas, breaking)), 500, 50,
alpha=.01, gamma=.95, epsilon=1, epsilon_lower=.1, epsilon_decay=.99
)
env = gym.make('CarRacing-v2', render_mode=show)
if mode in ['continue', 'test']:
agent.load()
if mode == 'test':
should_break = False
def take_action(action):
global frames, should_break
step_reward = 0
step_game_over = False
for _ in range(3):
observation, reward, game_over, _, _ = env.step(action)
step_reward += reward
step_game_over |= game_over
should_break |= step_game_over
frames.get()
frames.put(observation)
return process(frames.queue), step_reward, step_game_over
frames = Queue(3)
observation, _ = env.reset()
frames.put(observation)
frames.put(observation)
frames.put(observation)
for _ in range(20):
take_action((0, .5, 0))
while not should_break:
agent.step(process(frames.queue), take_action)
exit(0)
rewards = []
for episode in range(1, episodes + 1):
episode_reward = 0
negative_rewards = 0
should_break = False
def take_action(action):
global frames, episode_reward, negative_rewards, should_break
step_reward = 0
step_game_over = False
for _ in range(3):
observation, reward, game_over, _, _ = env.step(action)
step_reward += reward
step_game_over |= game_over
episode_reward += step_reward
negative_rewards = negative_rewards + 1 if step_reward < 0 else 0
should_break |= step_game_over
frames.get()
frames.put(observation)
return process(frames.queue), step_reward, step_game_over
frames = Queue(3)
observation, _ = env.reset()
frames.put(observation)
frames.put(observation)
frames.put(observation)
for _ in range(20):
take_action((0, .5, 0))
step = 0
while not should_break:
agent.step(process(frames.queue), take_action)
step += 1
negative_rewards = 0 if step < 50 + episode else negative_rewards
should_break |= negative_rewards == 20
agent.replay()
if episode % 5 == 0:
agent.calibrate()
agent.save()
rewards += [episode_reward]
print(f'episode {episode}/{episodes}: reward {episode_reward}')
plt.plot(range(len(rewards)), rewards)
plt.show()
| gareth618/car-race | main.py | main.py | py | 3,389 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.Image.fromarray",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_num... |
25376879137 | import psutil
from opentelemetry import metrics
from opentelemetry.sdk.metrics import MeterProvider, ValueObserver
from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter
metrics.set_meter_provider(MeterProvider())
meter = metrics.get_meter(__name__)
metrics.get_meter_provider().start_pipeline(meter, ConsoleMetricsExporter(), 5)
# Callback to gather cpu usage
def get_cpu_usage_callback(observer):
for (number, percent) in enumerate(psutil.cpu_percent(percpu=True)):
labels = {"cpu_number": str(number)}
observer.observe(percent, labels)
meter.register_valueobserver(
callback=get_cpu_usage_callback,
name="cpu_percent",
description="per-cpu usage",
unit="1",
value_type=float,
)
# Callback to gather RAM memory usage
def get_ram_usage_callback(observer):
ram_percent = psutil.virtual_memory().percent
observer.observe(ram_percent, {})
meter.register_valueobserver(
callback=get_ram_usage_callback,
name="ram_percent",
description="RAM memory usage",
unit="1",
value_type=float,
)
input("Metrics will be printed soon. Press a key to finish...\n")
| NathanielRN/clone-opentelemetry-python | docs/examples/basic_meter/observer.py | observer.py | py | 1,140 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "opentelemetry.metrics.set_meter_provider",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "opentelemetry.metrics",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "opentelemetry.sdk.metrics.MeterProvider",
"line_number": 7,
"usage_type": "c... |
70742294433 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
import os
import re
import time
from os.path import dirname, basename, join
from urllib.parse import urlparse
from itemadapter import ItemAdapter
from scrapy.pipelines.files import FilesPipeline
from scrapy.pipelines.images import ImagesPipeline
from . import settings
class RobertocavallihomeinteriorsPipeline:
def process_item(self, item, spider):
print('保存产品描述')
print(item['desc'])
desc = item['desc']
title = item['title']
page_url = item['page_url']
print('++++++++++++++++')
# txt
with open(os.path.join(settings.FILES_STORE, title, '{0}_desc.txt'.format(title)), 'w',
encoding='utf-8') as f:
f.write(desc)
# txt
with open(os.path.join(settings.FILES_STORE, title, '{0}_url.txt'.format(title)), 'w',
encoding='utf-8') as f:
f.write(page_url)
return item
class ImagePipeline(ImagesPipeline):
def get_media_requests(self, item, info):
media_requests = super(ImagePipeline, self).get_media_requests(item, info)
for media_request in media_requests:
media_request.item = item
print('{0}的图片正在下载中.....'.format(item['title']))
# print(media_requests)
return media_requests
def file_path(self, request, response=None, info=None):
origin_path = super(ImagePipeline, self).file_path(request, response, info)
# 过滤文件夹非法字符串
title = re.sub(r'[\\/:\*\?"<>\|]', "", request.item['title'])
save_path = origin_path.replace("full", title)
for i in request.item['images']:
if i['image_url'] == request.url:
return re.sub(r'\b[0-9a-f]{40}\b', i['image_name'] + '_' + str(int(round(time.time() * 1000))),
save_path)
return save_path
class FileDownloadPipeline(FilesPipeline):
def get_media_requests(self, item, info):
media_requests = super(FileDownloadPipeline, self).get_media_requests(item, info)
for media_request in media_requests:
media_request.item = item
# print('{0}的文件正在下载中.....'.format(item['title']))
return media_requests
def file_path(self, request, response=None, info=None):
# 获取默认保存的文件路径
origin_path = super(FileDownloadPipeline, self).file_path(request, response, info)
# 过滤文件夹非法字符串
title = re.sub(r'[\\/:\*\?"<>\|]', "", request.item['title'])
# 修改保存文件夹路径
save_path = origin_path.replace("full", title)
# 重命名文件名
for i in request.item['files']:
if i['pdf_url'] == request.url:
print('{0}的文件{1}正在下载中.....'.format(title, i['pdf_name']))
return re.sub(r'\b[0-9a-f]{40}\b', i['pdf_name'] + '_' + str(int(round(time.time() * 1000))),
save_path)
return origin_path
| huangweiwei99/scrapy | robertocavallihomeinteriors/robertocavallihomeinteriors/pipelines.py | pipelines.py | py | 3,291 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 3... |
10379903084 | from selenium import webdriver
from selenium.webdriver.common.by import By
import urllib.parse
from flask import Flask,jsonify, request
from flask_restful import Api, Resource
import time
app = Flask(__name__)
api = Api(app)
PATH = ".chromedriver.exe"
def get_first_image_url_from_google(delay, search_term):
wd = webdriver.Chrome(PATH)
def scroll_down(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(delay)
url = f"https://www.google.com/search?q={urllib.parse.quote(search_term)}&tbm=isch"
wd.get(url)
scroll_down(wd)
thumbnails = wd.find_elements(By.CLASS_NAME, "Q4LuWd")
if len(thumbnails) > 0:
try:
thumbnails[0].click()
time.sleep(delay)
except:
pass
images = wd.find_elements(By.CLASS_NAME, "n3VNCb")
if len(images) > 0 and images[0].get_attribute('src') and 'http' in images[0].get_attribute('src'):
return images[0].get_attribute('src')
wd.quit()
return None
@app.route('/search-image')
def search_image():
search_term = request.args.get('search_term')
url = get_first_image_url_from_google(1, search_term)
if url:
return jsonify({'status': 'success', 'url': url})
else:
return jsonify({'status': 'failed', 'message': 'No related images found on this page'})
| Chaitanyarai899/Video-Rendering-Service-backend | scraper.py | scraper.py | py | 1,384 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "selenium.webdr... |
7351651718 | # -*- coding:utf-8 -*-
from flask import Blueprint, render_template, session, request, redirect, flash, url_for, jsonify
from flask_login import login_user, login_required, logout_user, current_user
from app.email import send_confirm_email, send_reset_email
from app.message.models import Message, Pri_letter
from app.message.forms import LetterForm
from app.forum.models import Topic, Reply
from .forms import *
from .models import Member
from app.util.helper import mark_online
member = Blueprint('members', __name__)
captcha_id = "a68de1af20340f49c85a2cd6ba4611e3"
private_key = "7e9a00ea63636ff005afa90ab27ff5af"
@member.before_app_request
def mark_current_user_online():
mark_online(request.remote_addr)
@member.route('/member/<username>', methods=['GET', 'POST'])
def index(username):
form = LetterForm()
user = Member.query.filter_by(username=username).first()
msg = object
letter = object
like_topics = []
if user.collect_topic_num > 0:
for t_id in user.get_collect_topics():
like_topics.append(Topic.query.get(int(t_id)))
if current_user.is_authenticated:
msg = Message.get_user_message(user.id)
letter = Pri_letter.get_user_letter(current_user.id)
topics = Topic.get_user_topic(username)
replies = Reply.get_user_reply(username)
if user is None:
return "No this member"
return render_template('user/index.html', user=user, topics=topics, replies=replies, msg=msg, form=form, letters=letter, like_topics=like_topics)
@member.route('/signup', methods=['GET', 'POST'])
def signup():
if current_user.is_authenticated:
flash('Already sign in', 'info')
return redirect('/')
form = SignupForm()
if form.validate_on_submit():
user = form.save()
send_confirm_email(user, user.set_token())
login_user(user)
flash('Sign up success, do not forget to your mail-box to checking mail', 'success')
return redirect("/")
return render_template('user/signup.html', form=form)
@member.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
flash('Already sign in', "info")
return redirect("/")
form = LoginForm()
if form.validate_on_submit():
user, authonticated = Member.authenticate(form.email.data, form.password.data)
if authonticated:
login_user(user, form.remeber_me.data)
return redirect("/")
else:
flash("email or password not correct", 'warning')
return render_template('user/login.html', form=form)
@member.route('/signout', methods=['GET', 'POST'])
@login_required
def signout():
logout_user()
return redirect("/")
@member.route("/forget", methods=["GET", "POST"])
def forget():
if request.method == "GET":
email = request.args.get("email")
if email is not None:
user = Member.query.filter_by(email=email).first()
if user is None:
return "email error"
try:
send_reset_email(user, user.set_token(), email)
return "sended"
except:
return "failed"
return render_template("user/forget.html")
@member.route('/confirm/<token>', methods=['GET', 'POST'])
@login_required
def confirm(token):
if current_user.is_confirmed:
return "Don't be naughty"
if current_user.confirm(token):
flash('Activate success', 'success')
return redirect("/")
return "failed"
@member.route('/reset_pw/<string:email>/<token>', methods=['GET', 'POST'])
def reset_pw(email, token):
form = ResetpwForm()
user = Member.query.filter_by(email=email).first()
if user.confirm(token):
if form.validate_on_submit():
user.set_pw(form.password.data)
flash('Reset success', 'success')
return redirect(url_for(".login"))
return render_template("user/resetpw.html", form=form, r_token=token, r_email=email)
@member.route('/setting', methods=['GET', 'POST'])
@login_required
def setting():
form = SettingForm()
if form.validate_on_submit():
current_user.gender = form.gender.data
current_user.signature = form.signature.data
avatar_url = form.avatar.data
if avatar_url:
key, info = current_user.set_avatar(avatar_url)
if not key:
flash(info, 'warning')
return redirect(url_for(".setting"))
try:
current_user.save()
flash('Save success', 'success')
except:
print('Save failed.')
return render_template('user/setting.html', form=form)
@member.route('/send_confirm/<int:id>', methods=['GET', 'POST'])
@login_required
def send_confirm(id):
if request.method == "POST":
user = Member.query.get(id)
if user.username == current_user.username:
send_confirm_email(user, user.set_token())
else:
return "Don't be naughty"
return "-_-"
@member.route("/follow", methods=["POST", "GET"])
@login_required
def deal_follow():
if request.method == "GET":
u_id = request.args.get("u_id")
action = request.args.get('action')
if action == "follow":
if current_user.following(u_id):
return 'success'
else:
return "failed"
elif action=="unsubscribe":
if current_user.remove_following(u_id):
return "success"
else:
return "failed"
elif action=="is_followed":
if current_user.is_followed(u_id):
return "yes"
else:
return "no"
return False
@member.route('/check-in', methods=['GET', 'POST'])
@login_required
def check_in():
if request.method == 'GET':
if not current_user.is_check_in:
if current_user.check_in():
d = current_user.continuous_check_in
if d != 0:
return jsonify({"info": "In success, at present continuous sign {} days".format(current_user.continuous_check_in)})
else:
return jsonify({"info": "Sign in success"})
else:
return jsonify({"info": "Sign in failure"})
return False
@member.route('/balance', methods=['GET', 'POST'])
@login_required
def balance():
if request.method == "GET":
page = request.args.get('p')
if page is not None:
pagination = current_user.get_bill(page)
else:
page = 1
pagination = current_user.get_bill(page)
return render_template("user/balance.html", pagination=pagination)
| NilsGuo/0tinn | app/member/views.py | views.py | py | 6,733 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "app.util.helper.mark_online",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.request.remote_addr",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_na... |
10711671633 | import pandas as pd
import pandas.testing as tm
import numpy as np
from numpy import loadtxt
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
import xgboost
from xgboost import XGBClassifier
import hashlib
import json
from time import time
from urllib.parse import urlparse
from uuid import uuid4
import requests
from flask import Flask, jsonify, request
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons
from sklearn.cluster import SpectralClustering
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
order= pd.read_csv("lpetrocelli-czech-financial-dataset-real-anonymized-transactions/order.csv")
account= pd.read_csv("lpetrocelli-czech-financial-dataset-real-anonymized-transactions/account.csv")
transaction= pd.read_csv("lpetrocelli-czech-financial-dataset-real-anonymized-transactions/transaction.csv")
#concatenate three dataframes
X= pd.concat([account,order,transaction], axis=0)
#convert non-numeric to numeric
X = X.apply(pd.to_numeric, errors='coerce', downcast='float')
#replace nan by 0
X = X.replace(np.nan, 0)
X_new= X.copy() #create a copy of your data
#40% for training data
X_train = X_new.sample(frac=0.40, random_state=0)
#rest for test data
X_test = X_new.drop(X_train.index)
#Create a class to store the block chain
class Blockchain:
def __init__(self):
self.current_trans = []
self.chain = []
self.nodes = set()
#Create the genesis block
self.new_block(prev_hash='1', proof=100)
def new_node(self, address):
"""
Add a new node. View the node here:'http://192.168.0.5:5000'
"""
parsed_url = urlparse(address)
if parsed_url.netloc:
self.nodes.add(parsed_url.netloc)
elif parsed_url.path:
self.nodes.add(parsed_url.path)
else:
raise ValueError('Invalid URL. Please try again.')
def valid_chain(self, chain):
"""
Determine if blockchain is valid.
"""
prev_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
print(f'{prev_block}')
print(f'{block}')
print("\n-----------\n")
#Check that the hash of the block is correct
prev_block_hash = self.hash(prev_block)
if block['prev_hash'] != prev_block_hash:
return False
#Check that the Proof of Work is correct
if not self.valid_proof(prev_block['proof'], block['proof'], prev_block_hash):
return False
prev_block = block
current_index += 1
return True
def conflict_resolution(self):
"""
Resolves conflicts by replacing current chain with the longest one in the network.
"""
neighbours = self.nodes
new_chain = None
#Identifying long chains
max_length = len(self.chain)
#Grab and verify the chains from all the nodes in the network
for node in neighbours:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
#Check if the length is longer and the chain is valid
if length > max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
#Replace chain if a valid longer chain is discovered
if new_chain:
self.chain = new_chain
return True
return False
def new_block(self, proof, prev_hash):
block = {
'index': len(self.chain) + 1,
'timestamp': time(),
'transactions': self.current_trans,
'proof': proof,
'prev_hash': prev_hash or self.hash(self.chain[-1]),
}
#Reset the current list of transactions
self.current_trans = []
self.chain.append(block)
return block
def new_trans(self, sender, recipient, amount):
"""
Creates a new transaction to go into the next mined Block.
"""
self.current_trans.append({
'sender': sender,
'recipient': recipient,
'amount': amount,
})
return self.prev_block['index'] + 1
@property
def prev_block(self):
return self.chain[-1]
@staticmethod
def hash(block):
"""
SHA-256 encryption
"""
#Ensure that dictionary is ordered, to avoid inconsistent hashes.
block_str = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_str).hexdigest()
def proof_of_work(self, prev_block):
#Proof of Work Algorithm:
#- Find a number p' such that hash(pp') contains leading 4 zeroes
#- Where p is the previous proof, and p' is the new proof
prev_proof = prev_block['proof']
prev_hash = self.hash(prev_block)
proof = 0
while self.valid_proof(prev_proof, proof, prev_hash) is False:
proof += 1
return proof
@staticmethod
def valid_proof(prev_proof, proof, prev_hash):
#Validates Proof
guess = f'{prev_proof}{proof}{prev_hash}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
#Instantiate the Node
app = Flask(__name__)
#Generate a globally unique address for this node
node_id = str(uuid4()).replace('-', '')
#Instantiate the Blockchain
blockchain = Blockchain()
@app.route('/mine', methods=['GET'])
def mine():
#Run the proof of work algorithm to get the next proof...
prev_block = blockchain.prev_block
proof = blockchain.proof_of_work(prev_block)
#Receive a reward for finding the proof.
#The sender is "0" to signify a new transaction.
blockchain.new_trans(
sender="0",
recipient=node_id,
amount=1,
)
#Forge the new Block by adding it to the chain
prev_hash = blockchain.hash(prev_block)
block = blockchain.new_block(proof, prev_hash)
response = {
'message': "New Block Forged",
'index': block['index'],
'transactions': block['transactions'],
'proof': block['proof'],
'prev_hash': block['prev_hash'],
}
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_trans():
#values = request.get_json()
values = request.args
#Check that the required fields are in the POST'ed data
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
return 'Missing values', 400
#Create a new Transaction
index = blockchain.new_trans(values['sender'], values['recipient'], values['amount'])
response = {'message': f'Transaction will be added to Block {index}'}
#Kmeans clustering is implemented on the newly formed chain
#Building the k-means model
#kmeans = KMeans(n_clusters=2)
kmeans = KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
n_clusters=2, n_init=10, n_jobs=1, precompute_distances='auto',
random_state=None, tol=0.0001, verbose=0)
kmeans = kmeans.fit(X_train)
labels = kmeans.labels_
print("silhouette_score =", silhouette_score(X_train, labels, metric = 'euclidean'))
return jsonify(response), 201
#fit model to training data
model = XGBClassifier()
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain),
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def new_nodes():
#values = request.get_json()
#nodes = values.get('nodes')
nodes = request.args.get('nodes')
if nodes is None:
return "Error: Please supply a valid list of nodes", 400
for node in nodes:
blockchain.new_node(node)
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchain.nodes),
}
return jsonify(response), 201
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.conflict_resolution()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
app.debug = True
app.run(host='0.0.0.0', port=port)
| eelay234/blockchain | progress/Web3/June/fraud_detection_using_kmeans.py | fraud_detection_using_kmeans.py | py | 9,159 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"... |
30331506653 | import os
import datetime
import shutil
import sys
import pathlib
p = pathlib.Path(__file__).resolve().parent
p = p.parent.joinpath("pysrc")
sys.path.insert(0, str(p))
from service import delete_some
"""删除文件测试"""
conf = "D:/"
disk = "E:/"
dst_folder = f"{disk}LT-VIDEO-SS91456-北京蓝天多维"
def get_free():
"""剩余空间"""
free = shutil.disk_usage(disk).free
return free / (2**30)
def setup():
"""创建测试文件"""
if not os.path.exists(disk):
raise SystemError("no disk")
date = datetime.datetime.now()
date_str = datetime.datetime.strftime(date, "%Y-%m-%d")
while get_free() > 6:
folder = os.path.join(dst_folder, date_str)
os.makedirs(folder, exist_ok=True)
files = [
os.path.join(folder, filename)
for filename in ["{}.mp4".format(p) for p in range(1, 7)]
]
for file in files:
with open(file, "wb") as out:
out.truncate(500 * 1024 * 1024)
date = date - datetime.timedelta(days=1)
date_str = datetime.datetime.strftime(date, "%Y-%m-%d")
from service import delete_some, remove_empty_dirs
def t_delete_some():
"""测试删除文件"""
setup()
free1 = shutil.disk_usage(disk).free
delete_some(dst_folder, 4)
free2 = shutil.disk_usage(disk).free
size_deleted = (free2 - free1) / (2**30)
assert 5 > size_deleted > 4
if __name__ == "__main__":
setup()
# delete_some(dst_folder, 4)
# remove_empty_dirs(dst_folder)
| soda92/NVRTool | test/delete_file_test_d.py | delete_file_test_d.py | py | 1,539 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "shutil.disk_usage",
"li... |
1272262655 | import torch
import librosa
import numpy as np
import matplotlib.pyplot as plt
from specAugment.spec_augment_pytorch import spec_augment
# Borrowed from: https://github.com/DemisEom/SpecAugment
if __name__ == "__main__":
# Get example mel
audio, sampling_rate = librosa.load(librosa.util.example_audio_file(), duration = 4, sr = 8000, mono= True)
mel_spectrogram = librosa.feature.melspectrogram(y=audio,
sr=sampling_rate,
n_mels=256,
hop_length=128,
fmax=8000)
# Visualize
librosa.display.specshow(librosa.power_to_db(mel_spectrogram, ref=np.max))
plt.show()
# Do SpecAugment
mel_spectrogram = torch.Tensor(mel_spectrogram).unsqueeze(0)
warped_masked_spectrogram = spec_augment(mel_spectrogram=mel_spectrogram).squeeze().numpy()
# Visualize
librosa.display.specshow(librosa.power_to_db(warped_masked_spectrogram, ref=np.max))
plt.show() | HudsonHuang/yata | yata/spectaug.py | spectaug.py | py | 1,119 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "librosa.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "librosa.util.example_audio_file",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "librosa.util",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "libros... |
42214197070 | import os
import chardet
folder_path = 'txt_data'
stopwords_files = ['baidu_stopwords.txt']
stopwords_list = ["的", "了", "在", "是", "我", "有", "和", "就",
"不", "人", "都", "一", "一个", "上", "也", "很", "到", "说", "要", "去", "你", "会", "着", "没有", "看", "好", "自己", "这",
"罢", "这", '在', '又', '在', '得', '那', '他', '她', '不', '而', '道', '与', '之', '⻅', '却', '问', '可', '但', '没', '啦', '给', '来', '既',
'叫', '只', '中', '么', '便', '听', '为', '跟', '个', '甚', '下', '还', '过', '向', '如此', '已', '位', '对', '如何', '将', '岂', '哪', '似', '以免', '均', '虽然', '即',
'由', '再', '使', '从', '麽', '其实', '阿', '被']
def get_files():
with open(folder_path + '/inf.txt', encoding='utf-8', mode='r') as f:
names = str(f.read())
print(names)
name_list = [folder_path + os.sep + name + '.txt' for name in names.split(',')]
return name_list
if __name__ == '__main__':
get_files()
def import_stopwords():
for sf in stopwords_files:
with open(sf, 'r') as f:
stopwords_list.extend([word.strip('\n') for word in f.readlines()])
print(stopwords_list)
def is_chinese(uchar):
if u'\u4e00' <= uchar <= u'\u9fa5':
return True
else:
return False
def get_texts():
import_stopwords()
corpus_context_dict = {}
id_corpus_dict = {}
id = 0
for file in get_files():
simple_name = str(file).split(os.sep)[1].split('.')[0]
with open(file, 'rb') as f:
context = f.read()
real_encode = chardet.detect(context)['encoding']
context = context.decode(real_encode, errors='ignore')
new_context = ''
for c in context:
if is_chinese(c):
new_context += c
# for sw in stopwords_list:
# new_context = new_context.replace(sw, '')
corpus_context_dict[simple_name] = new_context
id_corpus_dict[id] = simple_name
id += 1
print(id)
return corpus_context_dict, id_corpus_dict
if __name__ == '__main__':
import_stopwords()
get_texts()
| 9aLucky/DL_NLP_2022_HW | HW4/preprocessor.py | preprocessor.py | py | 2,312 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.sep",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "chardet.detect",
"line_number": 49,
"usage_type": "call"
}
] |
36448088007 | import sys
import logging
FORMAT = '%(levelname) - %(asctime)s -AutoClicker %(message)s'
FORMAT = ("%(levelname) %(message)s")
FORMAT = ("%(asctime)s %(name)s %(levelname)s %(message)s")
logger = logging.getLogger('otog')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(FORMAT)
console = logging.StreamHandler(sys.stdout)
console.setFormatter(formatter)
logger.addHandler(console)
file_handler = logging.FileHandler('testtube.log', mode='a')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
#logging.basicConfig(filename='test.log',level=logging.WARNING,format = FORMAT)
try:
for i in range(10,0,-1):
logger.info("{} reps".format(i))
89/i
89/0
except:
logger.warning("hola")
print("Ching") | cnhy-nero-diskard/AutoClicker_quizlet | test_debug.py | test_debug.py | py | 767 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.StreamHa... |
8251155538 | import pandas as pd
from bs4 import BeautifulSoup
import requests
df = pd.read_csv('Lists/2015_jeju_test.tsv', sep='\t',encoding='utf8')
def search(keyword1,keyword2,keyword3, category, city, name):
sd= "20150101"
ed= "20191231"
query= keyword1 +"+"+ "%7c" + "+" +keyword2 + "+" + "%7c"+ "+" +keyword3 + '+"' + city + '"+"' + name +'"'
url= "https://search.naver.com/search.naver?where=post&query={}&date_from={}&date_to={}&date_option=8&qvt=0".format(query,sd,ed)
url1=makeURL(keyword1,sd,ed,city,name)
url2=makeURL(keyword2,sd,ed,city,name)
url3=makeURL(keyword3,sd,ed,city,name)
total= [request(url),request(url1),request(url2),request(url3)]
print(total[0])
return total
def makeURL(keyword,sd,ed,city,name):
query= keyword +'+"' + city + '"+"' + name +'"'
return "https://search.naver.com/search.naver?where=post&query={}&date_from={}&date_to={}&date_option=8&qvt=0".format(query,sd,ed)
def request(url):
req = requests.get(url)
print(url)
# 정상적인 request 확인
if req.ok:
html = req.text
soup = BeautifulSoup(html, 'html.parser')
total= soup.select( 'div.section_head > span.title_num')
try :
total = total[0].text.split(' ')[2]
total = total.replace(',','').strip()
total = int(total[:-1])
print(total)
return total
except:
return 0
return 0
if __name__ == '__main__':
category=[]
a=[]
b=[]
c=[]
keyword0= "편리성(화장실,도로변,주차장)"
keyword1= "화장실"
keyword2= "도로변"
keyword3= "주차장"
for data in df['name']:
if len(data) <=2:
data= "카페 "+data
total = search(keyword1,keyword2,keyword3,keyword0,"제주",data)
category.append(total[0])
a.append(total[1])
b.append(total[2])
c.append(total[3])
df[keyword0] = category
df[keyword1] = a
df[keyword2] = b
df[keyword3] = c
category=[]
a=[]
b=[]
c=[]
keyword0= "기능(시그니처,다양한메뉴,친절한)"
keyword1= "시그니처"
keyword2= "다양한메뉴"
keyword3= "친절한"
for data in df['name']:
if len(data) <=2:
data= "카페 "+data
total = search(keyword1,keyword2,keyword3,keyword0,"제주",data)
category.append(total[0])
a.append(total[1])
b.append(total[2])
c.append(total[3])
df[keyword0] = category
df[keyword1] = a
df[keyword2] = b
df[keyword3] = c
category=[]
a=[]
b=[]
c=[]
keyword0= "여가(오름,바다,빵)"
keyword1= "오름"
keyword2= "바다"
keyword3= "빵"
for data in df['name']:
if len(data) <=2:
data= "카페 "+data
total = search(keyword1,keyword2,keyword3,keyword0,"제주",data)
category.append(total[0])
a.append(total[1])
b.append(total[2])
c.append(total[3])
df[keyword0] = category
df[keyword1] = a
df[keyword2] = b
df[keyword3] = c
category=[]
a=[]
b=[]
c=[]
keyword0= "분위기(예쁜,분위기,음악)"
keyword1= "예쁜"
keyword2= "분위기"
keyword3= "음악"
for data in df['name']:
if len(data) <=2:
data= "카페 "+data
total = search(keyword1,keyword2,keyword3,keyword0,"제주",data)
category.append(total[0])
a.append(total[1])
b.append(total[2])
c.append(total[3])
df[keyword0] = category
df[keyword1] = a
df[keyword2] = b
df[keyword3] = c
df.to_csv("Lists/result_test.csv", encoding='utf-8-sig')
| vyvydkf628/PythonWebCrawler | count blogs/countBlogs.py | countBlogs.py | py | 3,757 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 30,
"usage_type": "call"
}
] |
3758809888 | from notion.client import NotionClient
from notion.collection import NotionDate
from datetime import datetime, timedelta
from math import ceil
class todo_list_mgr:
def __init__(self, client, page_to_update):
assert(isinstance(client, NotionClient))
self.target_view = client.get_collection_view(page_to_update)
self.task_mgr = task_mgr()
def fresh_list(self):
print("Current Run Start Time: " + str(datetime.now()))
for row in self.target_view.collection.get_rows():
self.task_mgr.update(row)
class task_mgr:
"""
Used for Notion template
"""
@staticmethod
def __is_valid(row):
try:
_ = row.Interval + str(row.Done) + str(row.Scheduled)
return True
except:
return False
@staticmethod
def __is_recurring(row):
if row.Interval == "":
return False
else:
return True
@staticmethod
def __is_done(row):
return row.Done
@staticmethod
def __is_due(row):
sys_time = datetime.today().date()
due_time = row.Scheduled
if isinstance(due_time.start, datetime):
due_time = due_time.start.date()
else:
due_time = due_time.start
if (due_time - sys_time).days < 0:
return True
else:
return False
@staticmethod
def __update_next_due(row):
interval = int(row.Interval)
last_start = row.Scheduled.start
today = datetime.today().date()
start_to_now_days = (today - last_start).days
next_day = last_start + timedelta(days=ceil(start_to_now_days / interval) * interval)
row.Scheduled = NotionDate(start=next_day)
row.Done = False
def __update_timeline(self, row):
if row.Scheduled == None:
print("No scheduled time set! Skipping...")
return
scheduled_time = row.Scheduled.start
sys_date = datetime.strptime(
str(datetime.today().date()), '%Y-%m-%d')
# new week start from Monday
# delta = timedelta((12 - sys_date.weekday()) % 6)
next_seven_date = (sys_date + timedelta(7)).date()
next_thirty_day_date = (sys_date + timedelta(30)).date()
if isinstance(scheduled_time, datetime):
scheduled_time = scheduled_time.date()
if self.__is_done(row):
result = "Completed"
elif self.__is_due(row):
result = "Delay"
elif (scheduled_time - sys_date.date()).days == 0:
result = "Today"
elif (scheduled_time - next_seven_date).days <= 0:
result = "Next 7 day"
elif (scheduled_time - next_thirty_day_date).days <= 0:
result = "Next 30 day"
else:
result = "later"
if result != row.Timeline:
row.Timeline = result
print("Task moved to " + row.Timeline)
else:
print("Timeline not changed")
def update(self, row):
print("------------------------------------")
print("Processing Task with name : " +
row.get_property("Task Name"))
if self.__is_valid(row) and self.__is_recurring(row):
if not self.__is_due(row):
print("Task should be completed in the future, skipping...")
else:
if self.__is_done(row):
print("Task is done, update the next due time")
self.__update_next_due(row)
else:
print("Task is due but not finished, skipping...")
else:
print("Not recurring task or the task is not valid, skipping")
# refresh task and update timeline info
self.__update_timeline(row)
| hanbo1990/notion_automation | todo_list_mgr.py | todo_list_mgr.py | py | 3,813 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "notion.client.NotionClient",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name":... |
3565853497 | from PyQt5 import QtCore, QtWidgets, QtGui
from sys import exit
import os
import time
import csv
from multiprocessing import Process, Pipe, Queue
import datetime
import BACModbus
from setup import read_setup
from jbdMain import JBD
from ampy import Ui_MainWindow
import serial
import logging # logging
import modbus_tk_ampy.defines as cst
from modbus_tk_ampy import modbus_rtu
#from scipy import integrate
from integrate import simps
#from scipy.stats import linregress
from numpy import mean, isnan, array, prod, abs
import pdb # pdb.set_trace() to add breakpoint
import simple_pid as pid
#DisableForDesktopDebug
#from platform import system as platsys
#if platsys() == 'Linux': # Need better solution for crossplatform dev...
import RPi.GPIO as GPIO
# import psutil # Process = psutil.Process(getpid()) # To get memory use
import sqlite3
import argparse
from number_pad import numberPopup
from options_dialog import optionsDialog
from bms_dialog import bmsDialog
from bmscfg_dialog import bmscfgDialog
# todo: Misc dev.
# A. try a fork with restricted imports/refactors and recheck strace to minimize depsize, include .pyc; e.g.
# QtWidgets.QMainWindow .QApplication .QMessageBox, QSlider... etc
# from scipy.integrate import simps vs. from scipy import integrate >> integrate.simps
# B. Consider serial control of walk mode registers for 'zero RR' riding assist.
# Three RR settings could be derived from trip simulator experimental RR;
# "Coast" e.g. eliminate all RR at x mph, "Normal" to offset hysteresis torque only,
# and "Emulate Cancer Farts" for ICE-like "engine-braking" AKA "plug-braking".
# C.Verify 'Regeneration_battery_current_limit' parameter behavior. Currently controller reports
# calculated battery current braking limit parameter = system voltage * Rated motor power (Race mode PAS power)
# is this the only difference between rated/remote power registers?
# C1. Does assist level affect regeneration current?
# C2. Does PAS power actually control regen current with Throttle bypass assist level = 1?
# C3. Does Race mode PAS power actually scale regen current with Alternate speed/power limit disabled?
# C4. 'Rated battery voltage' is supposed to be peak battery voltage...? I prefer average/shallow DoD...
# C5. Also test the Overload_..._current/time values (Addr 99-104) after ferrofluid mod.
# E. Backup EEPROM in SQL each write, include ComboBox to select priors?
# Or, add a manual save/restore feature with existing JBD backend.
# F. Simulate cruise control by switching to remote throttle, speed type, set current speed? Then back?
# Throttle bypass assist level: 6.015+ Features3 Bit0 enable, to ignore assist level and use 100% throttle input.
# Human Power Assist Standard: If enabled: https://support.accelerated-systems.com/KB/?p=2175
# Full Pedelec Assistance Speed: Point at which foldback on Max Pedelec Assistance Gain terminates.
# Pedelec gain = 0 when speed = Vehicle_Maximum_Speed (for assistance only, addr 1920)
# SOC rundown test, while polling;
# 12:25 AM
# 38.7% SOC (16.2567Ahrem)
# 9:46 AM
# 37.4% SOC (15.7276Ahrem
# 0.134974971 percent SOC per hour
# 0.056689306 AH/H
# 4.32681378 Watt-hours/H
# ~5.14285714 watts idle controller power not detected by shunt-- factor into mileage?
# Self. vars used only for labels should be updated as formatted string, instead of np_float64's.
# ~18750 elements for last 5 minutes of data
# Redirect stdout print to file for debug logs
# orig_stdout = sys.stdout
# f = open('out.txt', 'w')
# sys.stdout = f
# sys.stdout = orig_stdout
# f.close()
class BACProcessEmitter(QtCore.QThread):
bac_msg = QtCore.pyqtSignal(object)
diag_msg = QtCore.pyqtSignal(object)
hack_msg = QtCore.pyqtSignal(object)
def __init__(self, from_bac_process: Pipe):
super().__init__()
self.data_from_bacprocess = from_bac_process
self.bacmsg = None
self.workercmd = 0
self.setup = setup
self.running = True
def run(self):
while True:
try:
self.bacmsg = self.data_from_bacprocess.recv()
if type(self.bacmsg) is tuple:
self.bac_msg.emit(self.bacmsg)
elif type(self.bacmsg) is dict:
self.diag_msg.emit(self.bacmsg)
elif type(self.bacmsg) is list:
self.hack_msg.emit(self.bacmsg)
except EOFError:
pass
class BMSProcessEmitter(QtCore.QThread):
bms_basic_msg = QtCore.pyqtSignal()
bms_eeprom_msg = QtCore.pyqtSignal()
bms_exception = QtCore.pyqtSignal(str)
def __init__(self, from_bms_process: Pipe):
super().__init__()
# BMS Attributes
self.data_from_bmsprocess = from_bms_process
self.bmsmsg = None
self.basicMsg = None
self.eepromMsg = None
# BAC Attributes
# self.msg.connect(callback)
self.workercmd = 0
self.setup = setup
self.newcmd = False
self.running = True
#self.client = modbus_rtu.RtuMaster(serial.Serial(port=BAC.port, baudrate=BAC.baudrate, bytesize=BAC.bytesize,
# parity=BAC.parity, stopbits=BAC.stopbits))
#self.client.set_timeout(1)
def run(self):
while True:
# todo: Check for alternative to try: if you can find a way to use 'if', may improve performance here
try:
self.bmsmsg = self.data_from_bmsprocess.recv()
except EOFError:
break
if self.bmsmsg[0] == 0:
# todo: instead store locally for accessing by Parent. Use signal only to trigger check whether to...
# throw faults, or update bmspopup, etc
self.basicMsg = self.bmsmsg[1:]
self.bms_basic_msg.emit()
elif self.bmsmsg[0] == 1:
self.eepromMsg = self.bmsmsg[1:]
self.bms_eeprom_msg.emit()
else:
print('BMSSerialEmitter message not recognized!')
class BMSSerialProcess(Process):
def __init__(self, bmsport, to_emitter: Pipe, from_window: Queue):
#super(BMSSerialProcess, self).__init__(target=self.pickle_wrapper)
super(BMSSerialProcess, self).__init__()
#print('BMSSerialProcV2 init begin.')
self.daemon = True
self.data_to_emitter = to_emitter
self.data_from_window = from_window
self.basicData = None
self.cellData = None
self.scanning = True
self.t1 = time.time_ns() / 1000000000
self.t2 = None
self.jbdcmd = 0 # 0 = basic/cell loop, 1 = eeprom read, 2 = eeprom write
self.bmsport = bmsport
self.j = JBD(self.bmsport, timeout = 1, debug = False)
def run(self):
print('bmsProc runloop begin.')
while self.scanning:
try:
if not self.data_from_window.empty():
self.jbdcmd = self.data_from_window.get()
print('bmsProc: cmd recvd: ', self.jbdcmd)
self.bms_loop()
except Exception as e:
print('bmsProc: exception: ', e)
def bms_loop(self):
if self.jbdcmd == 0:
#print('bmsProc.loop: ', self.jbdcmd)
self.poller()
elif self.jbdcmd == 1:
#print('bmsProc.loop: ', self.jbdcmd)
self.eeprom_read()
self.jbdcmd = 0
elif self.jbdcmd == 2:
self.j.clearErrors()
self.jbdcmd = 0
elif len(self.jbdcmd[0]) > 1:
#print('bmsProc::run:serloop; ', self.jbdcmd)
self.eeprom_write(self.jbdcmd[0])
self.jbdcmd = 0
def poller(self):
lastTime = self.t1
self.t1 = time.time_ns() / 1000000000
self.cellData = self.j.readCellInfo()
#print('bmsProc: basicPoller: cellData: ', self.cellData)
self.basicData = self.j.readBasicInfo()
#print('bmsProc: basicPoller: basicData: ', self.basicData)
self.t2 = time.time_ns() / 1000000000
runtime = self.t2 - self.t1
looptime = self.t1 - lastTime
msg = (0, self.cellData, self.basicData, looptime, runtime)
self.data_to_emitter.send(msg)
#print('bmsProc: basicPoller finished')
#print(self.cellData, '\n', self.basicData, '\n', looptime, runtime)
def eeprom_read(self):
if self.j.s.isOpen():
self.j.close()
msg = (1, self.j.readEeprom())
self.data_to_emitter.send(msg)
else:
msg = (1, self.j.readEeprom())
self.data_to_emitter.send(msg)
def eeprom_write(self, update_eeprom):
self.j.writeEeprom(update_eeprom)
class BACSerialProcess(Process):
#bac_msg = QtCore.pyqtSignal(object)
#hack_msg = QtCore.pyqtSignal(object) # Notify main thread when successful
# cmd = QtCore.pyqtSignal(object)
def __init__(self, setup, to_emitter: Pipe, from_window: Queue, BAC):
super(BACSerialProcess, self).__init__()
# self.msg.connect(callback)
self.data_to_emitter = to_emitter
self.data_from_window = from_window
self.BAC = BAC
self.workercmd = 0
self.lastworkercmd = None
self.setup = setup
self.bmsmsg = None
self.battamps = 0
self.fluxcommand = 0
self.newcmd = False
self.running = True
self.time1 = time.time_ns() / 1000000000
self.time2 = self.time1
#self.client = modbus_rtu.RtuMaster(serial.Serial(port=BACport, baudrate=BACbaud, bytesize=BACbytesize,
# parity=BACparity, stopbits=BACstopbits))
#self.client.set_timeout(1)
#self.serial = serial.Serial(port=BACport, baudrate=BACbaud, bytesize=BACbytesize,parity=BACparity, stopbits=BACstopbits)
#self.client = modbus_rtu.RtuMaster(self.serial)
# self.client.connect()
# self.client.set_debug(True)
# self.client.strict = False # Use MODBUS interchar spacing as timeout... MODBUSIOException err
# self.client.inter_char_timeout = 3.5 *
def run(self): # Executed via .start() method on instance, NOT .run()! Method name MUST be run.
self.client = modbus_rtu.RtuMaster(self.BAC.port, self.BAC.baudrate, self.BAC.bytesize, self.BAC.parity, self.BAC.stopbits)
self.client.set_timeout(1)
while self.running:
#self.time2 = time.time_ns() / 1000000000
#print('bacProc: ', self.time2 - self.time1)
#self.time1 = time.time_ns() / 1000000000
if not self.data_from_window.empty():
self.lastworkercmd = self.workercmd
message = self.data_from_window.get()
if len(message) == 1: # If only integer command, update command; elif data included, update command/data
self.workercmd = message[0]
elif message[0] == -30:
self.workercmd = message[0]
self.battamps = message[1]
elif message[0] == -31:
self.workercmd = message[0]
self.flux = message[1]
elif message[0] == -32:
self.workercmd = message[0]
self.bmsmsg = (message[1], message[2])
#print('bmsmsg:', self.bmsmsg)
elif message[0] == -34:
self.workercmd = message[0]
self.motamps = message[1]
self.run_command()
# print('worker: ', self.workercmd)
# time.sleep(.2)
# Tempting to convert each 'if' into a function, use dict to lookup function. However, this setup
# prioritizes the main fast-loop slightly which is most important.
def run_command(self):
#if not self.newcmd or self.workercmd == 0:
if self.workercmd == 0:
# output = self.client.read_holding_registers(BAC.ObdicAddress['Faults'], count=9, unit=self.unit)
self.data_to_emitter.send(self.reads('Faults', 9))
elif self.workercmd > 0: # Positive ints reserved for simple rangelimiter integration
print('Rangelimiter received val: ', self.workercmd)
self.write('Remote_Maximum_Battery_Current_Limit', self.workercmd) # Enable to limit
self.data_to_emitter.send(self.reads('Faults', 9))
# Remain in this worker while rangelimiter enabled
elif self.workercmd == -32: # Update remote battery SOC/temperature for BAC foldbacks.
self.writes('Remote_Battery_SOC', self.bmsmsg)
self.workercmd = self.lastworkercmd # Because this is an automatic period command, do not disrupt ongoing processes.
elif self.workercmd == -11: # Set Profile 1
# Possibly need to scale up Maximum braking torque/Maximum braking current parameters
# which are a % of rated motor current, for consistent regen braking across profiles.
# Currently both are 1229 / 40.96 = 30%!
# 84 amps would be a conservative limit. 100 ok in bursts
for i in self.setup['profile1']:
self.write_scaled(i[0], i[1])
self.workercmd = 0
print('profile 1 called')
elif self.workercmd == -12: # Set Profile 2
for i in self.setup['profile2']:
self.write_scaled(i[0], i[1])
self.workercmd = 0
print('profile 2 called')
elif self.workercmd == -13: # Set Profile 3
for i in self.setup['profile3']:
self.write_scaled(i[0], i[1])
self.workercmd = 0
print('profile 3 called')
elif self.workercmd < 0 and self.workercmd >= -10: # -1 to -10 for Assist Levels 1-9
self.write_scaled('Remote_Assist_Mode', -self.workercmd) # -(-x) = positive x
self.workercmd = 0
elif self.workercmd == -14: # Clear Fault codes
self.client.execute(BAC.address, cst.WRITE_MULTIPLE_REGISTERS, 508, output_value=[1])
self.workercmd = 0
elif self.workercmd == -15:
self.write('Remote_Maximum_Battery_Current_Limit', 0) # Reset range power limiter-- ensure 0 = ignore.
# Todo: Check if 0 = ignore, then keep track of profile state to choose limit.
self.workercmd = 0
elif self.workercmd == -16: # Antitheft disable
# intelligent bit-parsing for conditional switching;
# modbit(n, p, b): # mod byte at bit p in n to b
# mask = 1 << p
# return (n & ~mask) | ((b << p) & mask)
bits = self.read('Features3')
modbyte = (bits & ~(1 << 3)) | ((1 << 3)) & (1 << 3)
print('Antitheft disable. bits: ', bits, 'modbyte: ', modbyte, '\n', 'bits: ', "{0:b}".format(bits),
'modbyte: ', "{0:b}".format(modbyte))
self.write('Features3', modbyte) # 8 = 4th binary bit
self.workercmd = 0
elif self.workercmd == -17: # Antitheft enable
bits = self.read('Features3')
modbyte = (bits & ~(1 << 3)) | ((0 << 3)) & (1 << 3)
print('Antitheft enable. Features3 bits: ', bits, 'modbyte: ', modbyte, '\n', 'bits: ',
"{0:b}".format(bits), 'modbyte: ', "{0:b}".format(modbyte))
self.write('Features3', modbyte)
self.workercmd = 0
elif self.workercmd == -18: # Reverse (cruise input) enable
bits = self.read('Features3')
modbyte = (bits & ~(1 << 4)) | ((0 << 4)) & (1 << 4)
print('Reverse enable. Features3 bits: ', bits, 'modbyte: ', modbyte, '\n', 'bits: ', "{0:b}".format(bits),
'modbyte: ', "{0:b}".format(modbyte))
self.write('Features3', modbyte) # 16 = 5th binary bit
self.workercmd = 0
elif self.workercmd == -19: # Reverse (cruise input) disable
bits = self.read('Features3')
modbyte = (bits & ~(0 << 4)) | ((1 << 4)) & (1 << 4)
print('Reverse disable. Features3 bits: ', bits, 'modbyte: ', modbyte, '\n', 'bits: ', "{0:b}".format(bits),
'modbyte: ', "{0:b}".format(modbyte))
self.write('Features3', modbyte)
self.workercmd = 0
elif self.workercmd == -20:
bits = self.read('Features3')
modbyte = (bits & ~(0 << 0)) | ((1 << 0)) & (1 << 0)
print('Throttle bypass assist level enable. Features3 bits: ', bits, 'modbyte: ', modbyte, '\n', 'bits: ',
"{0:b}".format(bits),
'modbyte: ', "{0:b}".format(modbyte))
self.write('Features3', modbyte)
self.workercmd = 0
elif self.workercmd == -21:
bits = self.read('Features3')
modbyte = (bits & ~(1 << 0)) | ((0 << 0)) & (1 << 0)
print('Throttle bypass assist level disable. Features3 bits: ', bits, 'modbyte: ', modbyte, '\n', 'bits: ',
"{0:b}".format(bits),
'modbyte: ', "{0:b}".format(modbyte))
self.write('Features3', modbyte)
self.workercmd = 0
elif self.workercmd == -22:
bits = self.read('Features')
modbyte = (bits & ~(0 << 11)) | ((1 << 11)) & (1 << 11)
print('Walk mode enable. Features bits: ', bits, 'modbyte: ', modbyte, '\n', 'bits: ', "{0:b}".format(bits),
'modbyte: ', "{0:b}".format(modbyte))
self.write('Features', modbyte)
self.workercmd = 0
elif self.workercmd == -23:
bits = self.read('Features')
modbyte = (bits & ~(1 << 11)) | ((0 << 11)) & (1 << 11)
print('Walk mode disable. Features bits: ', bits, 'modbyte: ', modbyte, '\n', 'bits: ',
"{0:b}".format(bits), 'modbyte: ', "{0:b}".format(modbyte))
self.write('Features', modbyte)
self.workercmd = 0
elif self.workercmd == -24:
bits = self.read('Features')
modbyte = (bits & ~(0 << 13)) | ((1 << 13)) & (1 << 13)
print('Engine braking enable. Features bits: ', bits, 'modbyte: ', modbyte, '\n', 'bits: ',
"{0:b}".format(bits),
'modbyte: ', "{0:b}".format(modbyte))
self.write('Features', modbyte)
self.workercmd = 0
elif self.workercmd == -25:
bits = self.read('Features')
modbyte = (bits & ~(1 << 13)) | ((0 << 13)) & (1 << 13)
print('Engine braking disable. Features bits: ', bits, 'modbyte: ', modbyte, '\n', 'bits: ',
"{0:b}".format(bits), 'modbyte: ', "{0:b}".format(modbyte))
self.write('Features', modbyte)
self.workercmd = 0
elif self.workercmd == -26:
print('Motor Position Sensor Type set Hall')
self.write('Motor_Position_Sensor_Type', 0)
elif self.workercmd == -27:
print('Motor Position Sensor Type set Hall start & Sensorless')
self.write('Motor_Position_Sensor_Type', 1)
elif self.workercmd == -28:
print('Motor Position Sensor Type set Sensorless Only')
self.write('Motor_Position_Sensor_Type', 2)
elif self.workercmd == -29: # Diagnostics Mode-- Poller
input_voltages = self.reads('Throttle_Voltage', 8)
EbikeFlags = self.read('Ebike_Flags')
SensorlessState = self.read('Sensorless_State')
EbikeFlagsLabels = ['Brake', 'Cut out', 'Run Req', 'Pedal', 'Regen', 'Walk', 'Walk Start', 'Throttle',
'Reverse Mode', 'Interlock Off', 'Pedal Ramps', 'Gate Req', 'Gate Enabled',
'Boost Mode', 'Antitheft', 'Free Wheel']
DigitalInputsLabels = ['Hall C', 'Hall B', 'Hall A', 'Pedal First Input', 'Cruise Input', 'Brake 1 Input',
'Brake 2 Input', 'HWOC Pin', 'HWOC Latch', 'Remote Brake', 'Remote Pwr Rating Sw',
'Remote Regen1', 'Remote Regen2', 'Remote Spd Rating Sw', 'Throttle Spd Rating Sw', 'N/A']
WarningsLabels = ['Communication timeout', 'Hall sensor', 'Hall stall', 'Wheel speed sensor', 'CAN Bus',
'Hall sector', 'Hall transition', 'VdcLowFLDBK', 'VdcHighFLDBK', 'MotorTempFLDBK',
'ControllerTempFLDBK', 'LowSoCFLDBK', 'HiSoCFLDBK', 'I2tFLDBK', 'Reserved',
'LIN - BMS communication timeout']
SensorlessStateEnum = ['Sensorless Idle', 'Sensorless DC-Ramp', 'Sensorless DC-Hold',
'Sensorless FreqRamp', 'Sensorless CloseLoop', 'Sensorless Stall']
DigitalInputsFlagged = []
WarningsFlagged = []
EbikeFlagsFlagged = []
bitstring = "{0:b}".format(input_voltages[6])
for i in range(len(bitstring)):
if int(bitstring[i]):
DigitalInputsFlagged.append(DigitalInputsLabels[i])
bitstring = "{0:b}".format(input_voltages[7])
for i in range(len(bitstring)):
if int(bitstring[i]):
WarningsFlagged.append(WarningsLabels[i])
bitstring = "{0:b}".format(EbikeFlags)
for i in range(len(bitstring)):
if int(bitstring[i]):
EbikeFlagsFlagged.append(EbikeFlagsLabels[i])
outmsg = {'Throttle_Voltage': input_voltages[0]/self.BAC.ObdicScale[self.BAC.ObdicAddress['Throttle_Voltage']],
'Brake_1_Voltage': input_voltages[1]/self.BAC.ObdicScale[self.BAC.ObdicAddress['Brake_1_Voltage']],
'Brake_2_Voltage': input_voltages[2] / self.BAC.ObdicScale[self.BAC.ObdicAddress['Brake_2_Voltage']],
'Analog_BMS_SOC_Voltage': input_voltages[5] / self.BAC.ObdicScale[self.BAC.ObdicAddress['Analog_BMS_SOC_Voltage']],
'EbikeFlags': EbikeFlagsFlagged, 'DigitalInputs': DigitalInputsFlagged, 'Warnings': WarningsFlagged,
'SensorlessState': SensorlessStateEnum[SensorlessState]}
self.data_to_emitter.send(outmsg)
# Digital Inputs 276
# Throttle_Voltage 270
# Brake_1_Voltage 271
# Brake_2_Voltage 272
# Analog_BMS_SOC_Voltage 275
#
# Warnings 277
# Warnings2 359
# Sensorless State 330
# Motor_Temperature_Sensor_Voltage 398
# Ebike Flags 327
# 0 Brake
# 1 Cutout
# 2 Run Req
# 3 Pedal
# 4 Regen
# 5 Walk
# 6 Walk Start
# 7 Throttle
# 8 Reverse
# 9 Interlock off
# 10 Pedal ramp rate active
# 11 Gate enable request
# 12 Gate enabled
# 13 Boost Enabled
# 14 Antitheft enabled
# 15 Free wheel
# Ebike Flags2 488
# 0 Regen always without analog input
# 1 Cruise enable
elif self.workercmd == -30: # Adjust max battery power %
self.write_scaled('Battery_Current_Limit', self.battamps)
self.workercmd = 0
elif self.workercmd == -31:
self.write_scaled('Maximum_Field_Weakening_Current', self.fluxcommand)
elif self.workercmd == -33: # Hack access level code.
print('Beginning brute-force of BAC User Access Level codes.')
# Keys = Spare_430, Spare_431, Spare_432
# Check spare registers first:
code1_spare = self.read('Spare_430')
self.write('User_Access_Level', code1_spare)
read = self.read('User_Access_Level')
if read == 1:
print('User access code cracked! Level ', read, ' code is #', code1_spare)
self.data_to_emitter.send([read, code1_spare])
self.code1 = read
code2_spare = self.read('Spare_431')
self.write('User_Access_Level', code2_spare)
read = self.read('User_Access_Level')
if read == 2:
print('User access code cracked! Level ', read, ' code is #', code2_spare)
self.data_to_emitter.send([read, code2_spare])
self.code2 = read
code3_spare = self.read('Spare_432')
self.write('User_Access_Level', code3_spare)
read = self.read('User_Access_Level')
if read == 3:
print('User access code cracked! Level ', read, ' code is #', code3_spare)
self.data_to_emitter.send([read, code3_spare])
self.code3 = read
# If any of the above failed/deprecated, fallback to bruteforce, passes if code1 & code2 & code3 > 0
val = 0
running = True
code1 = False
code2 = False
code3 = False
while running:
val += 1
self.write('Parameter_Access_Code', val)
read = self.read('User_Access_Level')
if read == 1:
print('User access code cracked! Level ', read, ' code is #', val)
self.data_to_emitter.send([read, val])
self.code1 = read
elif read == 2:
print('User access code cracked! Level 2 code is #', read)
self.data_to_emitter.send([read, val])
self.code2 = read
elif read == 3:
print('User access code cracked! Level 3 code is #', read)
self.data_to_emitter.send([read, val])
self.code3 = read
if code1 and code2 and code3:
running = False
print("Code1:", code1, "Code2:", code2, "Code3:", code3)
self.write('Parameter_Access_Code', code3)
elif val > 100000:
running = False
elif self.workercmd == -34:
self.write_scaled('Rated_Motor_Current', self.motamps)
def read(self, address):
output = self.client.execute(self.BAC.address, cst.READ_HOLDING_REGISTERS, self.BAC.ObdicAddress[address], 1)
return output[0]
def reads(self, address, length):
return self.client.execute(self.BAC.address, cst.READ_HOLDING_REGISTERS, self.BAC.ObdicAddress[address], length)
def read_scaled(self, address):
val = (self.client.execute(self.BAC.address, cst.READ_HOLDING_REGISTERS, self.BAC.ObdicAddress[address], 1))
scalar = self.BAC.ObdicScale[self.BAC.ObdicAddress[address]]
output = tuple([x / scalar for x in val])
return output[0]
def reads_scaled(self, address, length):
val = (self.client.execute(self.BAC.address, cst.READ_HOLDING_REGISTERS, self.BAC.ObdicAddress[address], length))
scalar = self.BAC.ObdicScale[self.BAC.ObdicAddress[address]]
output = tuple([x / scalar for x in val])
return output
def write(self, address, value): # Helper function
self.client.execute(self.BAC.address, cst.WRITE_MULTIPLE_REGISTERS, self.BAC.ObdicAddress[address], output_value=[value])
def writes(self, address, value): # Helper function
self.client.execute(self.BAC.address, cst.WRITE_MULTIPLE_REGISTERS, self.BAC.ObdicAddress[address], output_value=value)
def write_scaled(self, address, value): # Helper function
# todo: use returned values (register, 1 if written) to check for serial errors?
write = int(value * self.BAC.ObdicScale[self.BAC.ObdicAddress[address]])
self.client.execute(self.BAC.address, cst.WRITE_MULTIPLE_REGISTERS, self.BAC.ObdicAddress[address], output_value=[write])
class AmpyDisplay(QtWidgets.QMainWindow):
#bacqueue = QtCore.pyqtSignal(int)
powercmd = QtCore.pyqtSignal(int)
fluxcmd = QtCore.pyqtSignal(float)
hackaccesscmd = QtCore.pyqtSignal(int)
bmsmsg_bac = QtCore.pyqtSignal(object)
bmsbasiccmd = QtCore.pyqtSignal(object)
bmseepromcmd = QtCore.pyqtSignal(object)
def __init__(self, setup, bacqueue: Queue, bmsqueue: Queue, processManager: BMSProcessEmitter, *args, **kwargs, ):
self.setup = setup
self.battseries = setup['battery'][0]
self.battparallel = setup['battery'][1]
self.battah = setup['battery'][2]
self.wheelcircum = setup['wheel'] # In mm
self.speedparse = True
self.first_floop = True
self.lockpin = setup['pin']
if setup['units'] == 'imperial':
self.units = False
elif setup['units'] == 'metric':
self.units = True
else:
print('Setup.csv \"units\" parameter not recognized!')
#
super().__init__(*args, **kwargs)
# DISPLAY AND VEHICLE VARIABLES
self.bmsqueue = bmsqueue
self.bacqueue = bacqueue
self.processEmitter = processManager
self.processEmitter.daemon = True
self.processEmitter.start()
self.bmseeprom_initter = True
self.bmstemps = (0, 0, 0, 0)
self.bmscmd = 10 # 0 = Basic Poll, 1 = Read EEPROM, 2 = Write EEPROM, 10 = Poll then EEPROM init
self.chargestate = False
self.bmsCall() # Init EEPROM.
self.message = {}
self.profile = 0
self.assist_level = 0
self.opt_tripRangeValue = None # Check Wh/mi every interval with floop/lastfloop for Range fxn only
self.opt_throttleAssistBool = None
self.opt_battaValue = None # todo: update in SQL setup
self.opt_fluxValue = None
self.tripReset(True) # To instantiate all floats, lists
# For lifestats:
self.lifestat_iter_ID = 0
# Todo: update profilestate in sql init setup
self.lifestat_ah_used, self.lifestat_ah_charged, self.lifestat_ahregen, self.lifestat_wh, self.lifestat_whregen, \
self.lifestat_dist, self.lifestat_Rbatt = \
float(0), float(0), float(0), float(0), float(0), float(0), float(0)
# Range limiter PID:
self.pid_kp = 0.09375
self.pid_ki = 0.032
self.pid_kd = 0.008
self.pid = pid.PID(self.pid_kp, self.pid_ki, self.pid_kd, setpoint=self.flt_range_limit,
sample_time=0.016, output_limits=(0, 1))
self.pid.auto_mode = False # Don't run PID calculation until enabled. Possibly could replace trip_range_enabled
# Kp = 1.2 * (width of process 'bump') / (amplitude * dead time)
## Kp = 1.2 * 0.1 / (80*0.016
# Kt = 2*dead time
## Kt = 0.032
# Kd = 0.5*dead time
#RPi GPIO Brightness for Makerplane 5" display (pin18) conditional for PC development
#if platsys() == 'Linux':
#Makerplane Brightness Output
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
self.pwm = GPIO.PWM(18, 100)
self.pwm.start(0)
# Profile Selector Switch
GPIO.setup([22, 23], GPIO.IN, GPIO.PUD_DOWN)
GPIO.add_event_detect(22, GPIO.BOTH)
GPIO.add_event_detect(23, GPIO.BOTH)
# Iterators and thresholds for averaging, interpolation, etc
self.mean_length = 18750 # Average for trip_ floats over last 5 minutes (300s / 16ms)
# trip_wh, trip_ah, trip_soc, trip_range based on cumulative integrals instead
self.exceptions = 0
self.iter = 0
self.iter_threshold = 3 # Must be odd number for accurate/low-resource Simpsons integration
self.iter_sql = 0
self.iter_sql_threshold = 20 # ~3 hz
self.iter_bmsmsg_threshold = 11
self.iter_bmsmsg = 0
self.iter_attribute_slicer = 0
self.iter_attribute_slicer_threshold = self.mean_length + 500 # 500 = 8 seconds; re-slice for new means each 8 sec.
self.iter_interp_threshold = 3750 # Equivalent time vs. mean_length
self.trip_selector = 1
self.displayinvert_bool = False
#self.trip_selected = True
self.gui_dict = {}
# Set up the GUI
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowFlags(QtCore.Qt.WindowFlags(QtCore.Qt.FramelessWindowHint))
self.statusBar().setVisible(False)
#if platsys() == 'Linux':
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.BlankCursor))
#QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
# Initialize stored profile/assist states;
# First Setup SQL and populate lifestats, send optional controls to ASI
self.sql_conn = sqlite3.connect(os.path.abspath(os.path.dirname(__file__)) + '/' + 'ampy.db')
self.sql = self.sql_conn.cursor()
self.SQL_init() # Updates ID's to latest in table, creates tables if not exists.
# Init optional controls;
try:
if self.profile == -11:
self.ui.ProfileRb1.setChecked(True)
elif self.profile == -12:
self.ui.ProfileRb2.setChecked(True)
elif self.profile == -13:
self.ui.ProfileRb3.setChecked(True)
self.ui.AssistSlider.setValue(int(abs(self.assist_level)-1))
#self.ui.AssistSliderLabel.setText(str('Assist: ', int(abs(self.assist_level)-1)))
self.ui.AssistSliderLabel.setText('Assist: ' + str(abs(self.assist_level)))
self.bacqueue.put([self.profile]) # Assist emitted later
self.bacqueue.put([self.assist_level])
# todo: add below to sql init table. Or, setup SQL setup to write 0 = False val = true value, then check all
self.signalThrottleBypassAssist(self.opt_throttleAssistBool)
#self.signalBatta(True, self.opt_battaValue)
#self.signalFlux(True, self.opt_fluxValue)
except Exception as e:
print('init: ', e)
# Connect buttons
self.ui.OptionsBtn.clicked.connect(self.popupOptions)
#self.ui.BMSButton.clicked.connect(self.popupBms) # Moved to bmsBasicUpdate to prevent error on early access
self.ui.BatterySOCReset.clicked.connect(self.socreset)
self.ui.Reverse.toggled.connect(lambda: self.signalReverse(self.ui.Reverse.isChecked()))
#self.ui.Reverse.setStyleSheet()
#try:
# self.ui.PID_Kp_Slider.valueChanged.connect(lambda: self.pid_tuner_update(self.ui.PID_Kp_Slider.value(),
# self.ui.PID_Ki_Slider.value(),
# self.ui.PID_Kd_Slider.value()))
# self.ui.PID_Ki_Slider.valueChanged.connect(lambda: self.pid_tuner_update(self.ui.PID_Kp_Slider.value(),
# self.ui.PID_Ki_Slider.value(),
# self.ui.PID_Kd_Slider.value()))
# self.ui.PID_Kd_Slider.valueChanged.connect(lambda: self.pid_tuner_update(self.ui.PID_Kp_Slider.value(),
# self.ui.PID_Ki_Slider.value(),
# self.ui.PID_Kd_Slider.value()))
#except AttributeError:
# pass
self.ui.LockButton.clicked.connect(lambda: self.signalAntitheft(True))
#self.ui.RangeBtn.toggled.connect(lambda: self.trip_range_enable(
# self.ui.RangeBtn.isChecked(), self.ui.RangeSlider.value()))
#self.ui.RangeSlider.valueChanged.connect(lambda: self.trip_range_enable(
# self.trip_range_enabled, self.ui.RangeSlider.value()))
self.ui.AssistSlider.valueChanged.connect(self.signalAssistLevel)
self.ui.AssistSlider.setMaximum(9)
# self.ui.AssistSlider.setTickInterval(1)
# self.ui.AssistSlider.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.ui.ProfileRb1.toggled.connect(lambda: self.signalProfile(self.ui.ProfileRb1.isChecked(), -11))
################# Convert profile1 to integers? floop = 0? Faultreset = 10, assist = 11?
self.ui.ProfileRb2.toggled.connect(lambda: self.signalProfile(self.ui.ProfileRb2.isChecked(), -12))
self.ui.ProfileRb3.toggled.connect(lambda: self.signalProfile(self.ui.ProfileRb3.isChecked(), -13))
self.ui.Trip_Selector_1.toggled.connect(lambda: self.tripselect(self.ui.Trip_Selector_1.isChecked(), 1))
self.ui.Trip_Selector_2.toggled.connect(lambda: self.tripselect(self.ui.Trip_Selector_2.isChecked(), 2))
self.ui.Trip_Selector_3.toggled.connect(lambda: self.tripselect(self.ui.Trip_Selector_3.isChecked(), 3))
# self.ui.Trip_Selector_Debug.toggled.connect(lambda: self.debug(self.ui.Trip_Selector_Debug.isChecked(), 'debug'))
# Define display widgets
# self.ui.TripDistance.setText('\xB0'+'C') # DegreeC. 2-byte unicode + escape\ to get special char.
self.ui.CheckEngineButton.clicked.connect(self.popupFault)
self.ui.CheckEngineButton.hide()
#self.ui.SpeedGauge_Static.set_MaxValue(80)
#self.ui.SpeedGauge_Static.set_scala_main_count(8)
#self.ui.SpeedGauge_Static.set_gauge_color_inner_radius_factor(980)
#self.ui.SpeedGauge_Static.set_enable_Needle_Polygon(False)
#self.ui.SpeedGauge_Static.set_enable_CenterPoint(False)
#self.ui.SpeedGauge_Static.set_enable_value_text(False)
#self.ui.SpeedGauge_Static.set_total_scale_angle_size(240)
#self.ui.SpeedGauge_Static.set_start_scale_angle(150)
#self.ui.SpeedGauge_Static.initial_scale_fontsize = 30
#self.ui.SpeedGauge_Static.text_radius_factor = 0.75
#self.ui.SpeedGauge.set_enable_fine_scaled_marker(False)
#self.ui.SpeedGauge.set_enable_big_scaled_grid(False)
if self.units:
self.ui.SpeedGaugeLabelUnits.setText('kmh')
else:
self.ui.SpeedGaugeLabelUnits.setText('mph')
self.ui.SpeedGauge.set_enable_value_text(False)
self.ui.SpeedGauge.set_gauge_color_inner_radius_factor(950)
self.ui.SpeedGauge.set_scale_polygon_colors([[0.00, QtCore.Qt.red], [0.25, QtCore.Qt.yellow], [1,
QtCore.Qt.green]])
self.ui.SpeedGauge.set_enable_filled_Polygon(True)
#self.ui.SpeedGauge.enable
self.ui.SpeedGauge.set_enable_barGraph(False)
#self.ui.SpeedGauge.set_enable_ScaleText(False)
self.ui.SpeedGauge.set_MaxValue(80)
self.ui.SpeedGauge.set_total_scale_angle_size(240)
self.ui.SpeedGauge.set_start_scale_angle(150)
self.ui.SpeedGauge.set_scala_main_count(8)
self.ui.SpeedGauge.initial_scale_fontsize = 30
self.ui.PowerGauge.set_scale_polygon_colors([[0.00, QtCore.Qt.red], [0.15, QtCore.Qt.yellow], [1,
QtCore.Qt.green]])
self.ui.PowerGauge.set_enable_value_text(False)
self.ui.PowerGauge.set_gauge_color_inner_radius_factor(950)
self.ui.PowerGauge.set_enable_filled_Polygon(False)
self.ui.PowerGauge.set_enable_barGraph(True)
self.ui.PowerGauge.set_MaxValue(24)
self.ui.PowerGauge.set_total_scale_angle_size(240)
self.ui.PowerGauge.set_start_scale_angle(150)
self.ui.PowerGauge.set_scala_main_count(8)
self.ui.PowerGauge.scala_subdiv_count = 3
self.ui.PowerGauge.initial_scale_fontsize = 30
# todo: check which one of these is adjusting stretch properly
#for i in range(6):
# self.ui.TripBoxGrid.setColumnMinimumWidth(i, 200)
self.ui.Trip_1_1.sizePolicy().setHorizontalStretch(1)
self.ui.Trip_1_2.sizePolicy().setHorizontalStretch(1)
self.ui.Trip_1_3.sizePolicy().setHorizontalStretch(1)
self.ui.Trip_1_1_prefix.sizePolicy().setHorizontalStretch(1)
self.ui.Trip_1_2_prefix.sizePolicy().setHorizontalStretch(1)
self.ui.Trip_1_3_prefix.sizePolicy().setHorizontalStretch(1)
# Update floop with SQL-initiated tripstat lists for first run.
try:
self.floop = {'Faults': [], 'Powerboard_Temperature': 0, 'Vehicle_Speed': self.list_speed[-1:],
'Motor_Temperature': self.list_motor_temp[-1:], 'Motor_Current': self.list_motor_amps[-1],
'Motor_RPM': self.list_motor_rpm[-1], 'Percent_Of_Rated_RPM': 0.0,
'Battery_Voltage': self.list_batt_volts[-1], 'Battery_Current': self.list_batt_amps[-1]}
except (IndexError, ValueError):
self.floop = {'Faults': [], 'Powerboard_Temperature': 0, 'Vehicle_Speed': 0, 'Motor_Temperature': 0,
'Motor_Current': 0, 'Motor_RPM': 0, 'Percent_Of_Rated_RPM': 0,
'Battery_Voltage': 0, 'Battery_Current': 0}
# Run
self.time1 = self.ms()
self.time2 = self.ms()
# self.timeinterval = 0.016 #
self.show()
@QtCore.pyqtSlot(object)
#### Fast Loop (FLOOP) Processing ####
def floopReceive(self, message): # You can update the UI from here.
self.gettime() # Calculate msg interval, increment iterators
if self.speedparse:
self.floop = BAC.floop_parse(message)
else:
self.floop = BAC.parse(message, 'Faults')
# attributes for this class from BAC.BACModbus, e.g. the scales/keynames, and bring bitflags function into
# this class also. TWO THIRDS of recieve_floop time is spent on this one line!!!
self.floopToLists()
self.SQL_tripstat_upload()
if self.opt_tripRangeValue:
self.tripRangeLimiter()
if self.iter_attribute_slicer >= self.iter_attribute_slicer_threshold: # Every 6 minutes, cut lists to last 5 minutes.
self.floopSlicer()
self.iter_attribute_slicer = 0
if self.iter >= self.iter_threshold: # Ideally an odd number to pass even number of *intervals* to Simpsons quadratic integrator
if self.first_floop: # Needed so socreset(), SQL has data for first init
# Also will compensate for any self-discharge, charge since last start.
#todo: find a way around unitasker bool in such a frequently used loop
self.first_floop = False
self.floopProcess() # of last -self.iter in lists from floop_to_list()
self.socreset()
# self.SQL_lifestat_upload() # todo: update fxn for new table, if not bmsinitted, upload...
else:
self.floopProcess()
if self.setup['gpioprofile']: # If gpioprofiles in setup.csv, set profile with SPTT switch
self.checkGPIO()
self.guiPrepare()
self.guiUpdate()
self.iter = 0
if self.iter_sql >= self.iter_sql_threshold: # 3hz
self.sql_conn.commit() # Previously in sql_tripstat_upload but moved here for massive speedup
self.iter_sql = 0
if self.iter_bmsmsg >= self.iter_bmsmsg_threshold: #0.5hz
self.SQL_update_setup()
self.SQL_lifestat_upload_bms()
self.iter_bmsmsg = 0
##################
# Message indices:
# [0] = 258 = Faults
# [1] = 259 = Powerboard_Temperature
# [2] = 260 = Vehicle_Speed
# [3] = 261 = Motor_Temperature
# [4] = 262 = Motor_Current
# [5] = 263 = Motor_RPM
# [6] = 264 = Percent_Of_Rated_RPM
# [7] = 265 = Battery_Voltage
# [8] = 266 = Battery_Current
def floopToLists(self): # save each floop to instance attribute lists for trip stats
if self.units:
self.list_speed.append(self.floop['Vehicle_Speed'])
else:
self.list_speed.append(self.floop['Vehicle_Speed'] * 0.621371192) # 0.621371192 is Km -> Mph conversion
self.list_motor_temp.append(self.floop['Motor_Temperature'])
self.list_motor_amps.append(self.floop['Motor_Current'])
self.list_batt_volts.append(self.floop['Battery_Voltage'])
self.list_batt_amps.append(self.floop['Battery_Current'])
self.list_motor_rpm.append(self.floop['Motor_RPM'])
def floopSlicer(self): # Occassionally trim lists (averages only need last x minutes)
self.list_speed = self.list_speed[-self.mean_length:]
self.list_motor_temp = self.list_motor_temp[-self.mean_length:]
self.list_motor_amps = self.list_motor_amps[-self.mean_length:]
self.list_batt_volts = self.list_batt_volts[-self.mean_length:]
self.list_batt_amps = self.list_batt_amps[-self.mean_length:]
self.list_motor_rpm = self.list_motor_rpm[-self.mean_length:]
self.list_whmi = self.list_whmi[-self.iter_interp_threshold:] # From integral; self.mean_length/self.iter threshold = 986.842
self.list_floop_interval = self.list_floop_interval[-self.mean_length:]
def floopProcess(self):
x_interval = array([sum(([(self.list_floop_interval[-self.iter:])[:i] for i in range(1, self.iter + 1, 1)])
[i]) for i in range(self.iter)]) # calc cumulative time from list of intervals
try:
y_revsec = array(
[(self.list_motor_rpm[-self.iter:])[i] / 60 for i in range(self.iter)]) # revolutions per second to match x
except IndexError:
y_revsec = array([0 for i in range(len(x_interval))])
# Integrate distance fromm speed and increment distance counter
revolutions = simps(y_revsec, x=x_interval, even='avg')
if isnan(revolutions):
distance = 0
else:
distance = (revolutions * self.wheelcircum) / (1609344) ## miles
self.flt_dist += distance
array_volts, array_amps = array(self.list_batt_volts[-self.iter:]), array(self.list_batt_amps[-self.iter:])
y_power = [prod(array_volts[i] * array_amps[i]) for i in range(self.iter)]
y_current = array(self.list_batt_amps[-self.iter:])
# Integrate watt-seconds from speed and increment watt-hour counter
wattsec = simps(y_power, x=x_interval, even='avg')
if wattsec >= 0:
self.flt_wh += wattsec / 3600 # /(60x60) = Watt-hour
elif wattsec < 0:
self.flt_wh += wattsec / 3600
self.flt_whregen += abs(wattsec)
# Integrate amp-seconds from speed and increment amp-hour counter
ampsec = simps(y_current, x=x_interval, even='avg')
if ampsec >= 0:
self.flt_ah += ampsec / 3600
elif ampsec < 0:
self.flt_ah += ampsec / 3600
self.flt_ahregen += abs(wattsec)
self.flt_soc = ((self.battah - self.flt_ah) / self.battah) * 100 # Percent SOC from Ah (charge)
self.list_whmi.append(self.divzero(self.flt_wh, self.flt_dist))
self.flt_whmi_avg = mean(self.list_whmi[-self.iter_interp_threshold:]) # 18750 / 19 self.iter =
self.flt_whmi_inst = mean(self.list_whmi[-3:])
self.flt_range = self.divzero((self.get_battwh()), self.flt_whmi_inst) # Wh for range to account for eff.
self.flt_batt_volts = mean(self.list_batt_volts)
self.flt_batt_volts_max = max(self.list_batt_volts)
self.flt_batt_volts_min = min(self.list_batt_volts)
self.flt_batt_volts_drop = self.flt_batt_volts_min - self.flt_batt_volts_max
self.flt_batt_amps_max = max(self.list_batt_amps)
self.flt_motor_amps = mean(self.list_motor_amps[-self.mean_length:])
self.flt_motor_temp_max = max(self.list_motor_temp)
def guiPrepare(self): # Prepare gui elements to avoid EOL errors during gui update
self.gui_dict['Time'] = time.strftime('%I:%M:%S', time.localtime())
self.gui_dict['MotorTemperatureLabel'] = '{:.0f}'.format(self.floop['Motor_Temperature']) + '\xB0' + 'C' # 'T<sub>M</sub>:' +
self.gui_dict['MotorTemperatureBar'] = int(self.floop['Motor_Temperature'])
self.gui_dict['BatteryVoltageLabel'] = '{:.1f}'.format(self.floop['Battery_Voltage']) + '<sub>V</sub>'
self.gui_dict['BatteryVoltageDropLabel'] = '{:.1f}'.format(self.flt_batt_volts_drop)
self.gui_dict['BatteryVoltageBar'] = int(self.floop['Battery_Voltage'])
self.gui_dict['BatterySOCLabel'] = 'SOC: ' + '{:.1f}'.format(self.flt_soc)
self.gui_dict['BatterySOCBar'] = int(self.flt_soc)
self.gui_dict['SpeedGaugeLabel'] = '{:.0f}'.format(self.floop['Vehicle_Speed'])
self.gui_dict['PowerGaugeLabel'] = '{:.2f}'.format((self.floop['Battery_Current'] *
self.floop['Battery_Voltage']) / 1000)
self.gui_dict['SpeedGauge'] = self.floop['Vehicle_Speed']
self.gui_dict['PowerGauge'] = self.floop['Battery_Current'] * self.floop['Battery_Voltage']
if self.units:
self.gui_dict['WhmiLabel'] = '{:.1f}'.format(self.flt_whmi_inst) + '<sub>Wh/km</sub>'
else:
self.gui_dict['WhmiLabel'] = '{:.1f}'.format(self.flt_whmi_inst) + '<sub>Wh/mi</sub>'
if self.trip_selector == 1: # populate for first schema:
self.gui_dict['Trip_1_1'] = '{:.2f}'.format(self.flt_wh)
self.gui_dict['Trip_1_2'] = '{:.2f}'.format(self.flt_whmi_avg)
self.gui_dict['Trip_1_3'] = '{:.1f}'.format(self.flt_ah)
self.gui_dict['Trip_2_1'] = '{:.0f}'.format(self.get_battwh())
#self.gui_dict['Trip_2_2'] = '{:.1f}'.format(self.flt_whmi_inst)
self.gui_dict['Trip_2_2'] = '{:.1f}'.format(self.flt_whmi_avg / self.get_battwh())
self.gui_dict['Trip_2_3'] = '{:.1f}'.format(self.battah - self.flt_ah)
self.gui_dict['Trip_3_1'] = '{:.0f}'.format(self.flt_whregen)
self.gui_dict['Trip_3_2'] = '{:.0f}'.format(self.flt_dist)
self.gui_dict['Trip_3_3'] = '{:.1f}'.format(self.flt_ahregen)
if self.trip_selector == 2:
# Get indexes where speed > 0
moving_indexes = [i for i in range(self.iter_attribute_slicer) if self.list_speed[i] > 0]
moving_speed_list = [self.list_speed[i] for i in moving_indexes]
# Fill dict
self.gui_dict['Trip_1_1'] = '{:.1f}'.format(self.flt_whmi_avg / self.get_battwh())
self.gui_dict['Trip_1_2'] = self.strfdelta(datetime.timedelta(seconds = (self.time2 - self.start_time)), '{hours}:{minutes}')
self.gui_dict['Trip_1_3'] = '{:.0f}'.format(max(self.list_batt_amps))
self.gui_dict['Trip_2_1'] = '{:.1f}'.format(self.flt_whmi_inst / self.get_battwh())
# Get indexes where speed > 0, then sum flooptime for those indexes, convert to timedelta, then format
self.gui_dict['Trip_2_3'] = '{:.1f}'.format(self.flt_batt_volts_min)
self.gui_dict['Trip_3_1'] = '{:.0f}'.format(self.flt_motor_temp_max) # Intensive if long
if len(moving_indexes) > 0:
self.gui_dict['Trip_2_2'] = self.strfdelta(datetime.timedelta(seconds = sum([self.list_floop_interval[i]
for i in moving_indexes])), '{hours}:{minutes}')
self.gui_dict['Trip_3_2'] = '{:.0f}'.format(mean(moving_speed_list))
self.gui_dict['Trip_3_3'] = '{:.0f}'.format(max(moving_speed_list))
else:
self.gui_dict['Trip_2_2'], self.gui_dict['Trip_3_2'], self.gui_dict['Trip_3_3'] = str(0), str(0), str(0)
if self.trip_selector == 3:
# Setup gui_dict
self.gui_dict['Trip_1_1'] = '{:.0f}'.format(self.processEmitter.basicMsg[1]['ntc0'])
self.gui_dict['Trip_1_2'] = '{:.0f}'.format(self.processEmitter.basicMsg[1]['ntc1'])
self.gui_dict['Trip_1_3'] = '{:.0f}'.format(self.flt_bmsmaxtemp)
self.gui_dict['Trip_2_1'] = '{:.0f}'.format(self.processEmitter.basicMsg[1]['ntc2'])
self.gui_dict['Trip_2_2'] = '{:.0f}'.format(self.processEmitter.basicMsg[1]['ntc3'])
self.gui_dict['Trip_2_3'] = '{:.2f}'.format(self.processEmitter.basicMsg[1]['pack_ma'] / 1000)
self.gui_dict['Trip_3_1'] = '{:.3f}'.format(self.flt_bmscellvrng)
self.gui_dict['Trip_3_2'] = '{:.2f}'.format(self.flt_bmscellvmean)
self.gui_dict['Trip_3_3'] = '{:.2f}'.format(self.flt_bmscellvmin)
def guiUpdate(self): # Means are parsed within this fxn to update GUI
self.ui.Time.setText(self.gui_dict['Time'])
if len(self.floop['Faults']) > 0:
self.ui.CheckEngineButton.show()
else:
self.ui.CheckEngineButton.hide()
if self.trip_selector == 1: # Update unit labels for changed trip display.
self.ui.Trip_1_1_prefix.setText('Wh<sub>use</sub>:')
if self.units:
self.ui.Trip_1_2_prefix.setText('Wh/km<sub>Trip</sub>:')
else:
self.ui.Trip_1_2_prefix.setText('Wh/mi<sub>Trip</sub>:')
self.ui.Trip_1_3_prefix.setText('Ah<sub>use</sub>:')
self.ui.Trip_2_1_prefix.setText('Wh<sub>rem</sub>:')
self.ui.Trip_2_2_prefix.setText('Range:')
self.ui.Trip_2_3_prefix.setText('Ah<sub>rem</sub>:')
self.ui.Trip_3_1_prefix.setText('Wh<sub>reg</sub>:')
if self.units:
self.ui.Trip_3_2_prefix.setText('Km:')
else:
self.ui.Trip_3_2_prefix.setText('Miles:')
self.ui.Trip_3_3_prefix.setText('Ah<sub>reg</sub>:')
elif self.trip_selector == 2:
self.ui.Trip_1_1_prefix.setText('Rng<sub>avg</sub>:')
self.ui.Trip_1_2_prefix.setText('T<sub>trip</sub>:')
self.ui.Trip_1_3_prefix.setText('A<sub>max</sub>:')
self.ui.Trip_2_1_prefix.setText('Rng<sub>inst</sub>:')
self.ui.Trip_2_2_prefix.setText('T<sub>mov</sub>:')
self.ui.Trip_2_3_prefix.setText('V<sub>min</sub>:')
self.ui.Trip_3_1_prefix.setText('T<sub>max</sub>:')
#self.ui.Trip_3_2_prefix.setText('Miles: ')
if self.units:
self.ui.Trip_3_2_prefix.setText('Kmh<sub>mov</sub>:')
else:
self.ui.Trip_3_2_prefix.setText('Mph<sub>mov</sub>:')
if self.units:
self.ui.Trip_3_3_prefix.setText('Kmh<sub>max</sub>:')
else:
self.ui.Trip_3_3_prefix.setText('Mph<sub>max</sub>:')
elif self.trip_selector == 3:
self.ui.Trip_1_1_prefix.setText('T1<sub>Batt</sub>:')
self.ui.Trip_1_2_prefix.setText('T2<sub>Batt</sub>:')
self.ui.Trip_1_3_prefix.setText('T<sub>BMax</sub>:') # Add self.flt_battmaxtemp
self.ui.Trip_2_1_prefix.setText('T3<sub>Batt</sub>:')
self.ui.Trip_2_2_prefix.setText('T4<sub>Batt</sub>:')
self.ui.Trip_2_3_prefix.setText('A<sub>acc</sub>:')
self.ui.Trip_3_1_prefix.setText('CV<sub>rng</sub>:')
self.ui.Trip_3_2_prefix.setText('CV<sub>avg</sub>:')
self.ui.Trip_3_3_prefix.setText('CV<sub>min</sub>:')
self.ui.WhmiBar.setValue(int(self.flt_whmi_inst)) # Breaking dict rules but-- performance trumps them.
self.ui.WhmiLabel.setText(self.gui_dict['WhmiLabel'])
self.ui.MotorTemperatureLabel.setText(self.gui_dict['MotorTemperatureLabel'])
self.ui.MotorTemperatureBar.setValue(self.gui_dict['MotorTemperatureBar'])
self.ui.BatteryVoltageLabel.setText(self.gui_dict['BatteryVoltageLabel'])
self.ui.BatteryVoltageBar.setValue(self.gui_dict['BatteryVoltageBar'])
self.ui.BatteryVoltageDropLabel.setText(self.gui_dict['BatteryVoltageDropLabel']) # Label written as formatted str.
self.ui.BatterySOCLabel.setText(self.gui_dict['BatterySOCLabel'])
self.ui.BatterySOCBar.setValue(self.gui_dict['BatterySOCBar'])
self.ui.SpeedGauge.update_value(self.gui_dict['SpeedGauge'])
self.ui.SpeedGaugeLabel.setText(self.gui_dict['SpeedGaugeLabel'])
self.ui.PowerGauge.update_value(self.gui_dict['PowerGauge'])
self.ui.PowerGaugeLabel.setText(self.gui_dict['PowerGaugeLabel'])
self.ui.Trip_1_1.setText(self.gui_dict['Trip_1_1'])
self.ui.Trip_1_2.setText(self.gui_dict['Trip_1_2'])
self.ui.Trip_1_3.setText(self.gui_dict['Trip_1_3'])
self.ui.Trip_2_1.setText(self.gui_dict['Trip_2_1'])
self.ui.Trip_2_2.setText(self.gui_dict['Trip_2_2'])
self.ui.Trip_2_3.setText(self.gui_dict['Trip_2_3'])
self.ui.Trip_3_1.setText(self.gui_dict['Trip_3_1'])
self.ui.Trip_3_2.setText(self.gui_dict['Trip_3_2'])
self.ui.Trip_3_3.setText(self.gui_dict['Trip_3_3'])
def checkGPIO(self):
#Profile Signaller.
# 22/23 for 3p switch. if A = 1, if not A and not B = 2, if B = 3
if GPIO.event_detected(22) or GPIO.event_detected(23):
pinA = GPIO.input(22)
pinB = GPIO.input(23)
if pinA:
self.signalProfile(True, -11)
if not pinA and not pinB:
self.signalProfile(True, -12)
if pinB:
self.signalProfile(True, -13)
#### Main Display Command Functions and BAC Signals ####
def tripRangeEnable(self, bool, range):
# todo: check that slider dynamically updates self.flt_range_limit
if bool:
self.opt_tripRangeValue = bool
self.flt_range_limit = range
self.pid.auto_mode = True
self.opt_tripRangeValue = True
elif not bool:
self.bacqueue.put([-15]) # Code to reset range power limiter
self.pid_auto_mode = False
self.opt_tripRangeValue = False
# Add var so GUI knows active profile amps. --> self.profile -11 = 1, -12 = 2...
def tripRangeLimiter(self):
# Check which profile is active. Wh =/= Ah but they are proportional, and no ASI pwr limit exists.
if self.profile == -11:
indice = ()
#Return 1st, 2nd index in list of Setup profile tuples for 'Battery Current Limit'
for i, tup in enumerate(self.setup['profile1']):
print('i:', i, tup)
for ii, string in enumerate(tup):
print('ii:', ii, string)
try:
if 'Battery_Current_Limit' in string:
indice = (i, ii+1)
except Exception as e:
pass
max_amps = self.setup['profile1'][indice[0]][indice[1]]
elif self.profile == -12:
indice = 0
for i, tup in enumerate(self.setup['profile2']):
print('i:', i, tup)
for ii, string in enumerate(tup):
print('ii:', ii, string)
try:
if 'Battery_Current_Limit' in string:
indice = (i, ii+1)
except Exception as e:
pass
max_amps = self.setup['profile2'][indice[0]][indice[1]]
elif self.profile == -13:
indice = 0
for i, tup in enumerate(self.setup['profile3']):
print('i:', i, tup)
for ii, string in enumerate(tup):
print('ii:', ii, string)
try:
if 'Battery_Current_Limit' in string:
indice = (i, ii+1)
except Exception as e:
pass
max_amps = self.setup['profile3'][indice[0]][indice[1]]
range_div = ((self.get_battwh()) / (self.flt_whmi_inst)) / self.flt_range_limit
# Instantaneous range / range limit
# Setpoint is 1, :. range / range limit = 1 is target.
limit = self.pid.__call__(range_div, self.list_floop_interval[-1:])
self.bacqueue.emit([int(limit * max_amps)])
def tripReset(self, bool):
if bool: # Reset all variables of floop_to_list, and flt.
self.flt_batt_volts, self.flt_batt_volts_max, self.flt_batt_volts_min, self.flt_batt_amps_max, \
self.flt_motor_temp_max, self.flt_batt_amps_max, self.flt_batt_volts_drop, self.flt_motor_amps, \
self.flt_soc, self.flt_range, self.flt_range_limit, self.flt_whmi_avg, self.flt_whmi_inst, self.flt_dist, \
self.flt_wh, self.flt_ah, self.flt_whregen, self.flt_ahregen, self.flt_bmscellvrng, self.flt_bmscellvmean, \
self.flt_bmsmaxtemp, self.flt_bmscellvmin, self.flt_bmsah, self.flt_bmswh, self.flt_bmsahregen, self.flt_bmswhregen = \
float(0), float(0), float(0), float(0), float(0), float(0), float(0), float(0), float(0), float(0), \
float(0), float(0), float(0), float(0), float(0), float(0), float(0), float(0), float(0), float(0), \
float(0), float(0), float(0), float(0), float(0), float(0),
#iterators:
self.exceptions, self.iter, self.iter_sql, self.iter_bmsmsg, self.iter_attribute_slicer = 0, 0, 0, 0, 0
#clear lists:
self.list_batt_amps, self.list_batt_volts, self.list_motor_amps, self.list_motor_temp, self.list_speed, \
self.list_motor_rpm, self.list_floop_interval, self.list_whmi, \
self.list_bms_interval, self.list_bms_amps, self.list_bms_volts = \
[], [], [], [], [], [], [], [], [], [], []
#reset trip timer:
self.start_time = self.ms()
self.first_floop = True
#QtCore.QTimer.singleShot(1000, lambda: self.socreset())
def tripPidUpdateTune(self, kp, ki, kd):
self.pid_kp = kp / 200 # /200 to convert QSlider int to float coefficient
self.pid_ki = ki / 200
self.pid_kd = kd / 200
print('PID tunings: ', self.pid_kp, self.pid_ki, self.pid_kd)
self.pid.tunings = (kp, ki, kd)
self.optpopupwindow.ui.PID_Kp_Label.setText('{:.2f}'.format(self.pid_kp))
self.optpopupwindow.ui.PID_Ki_Label.setText('{:.2f}'.format(self.pid_ki))
self.optpopupwindow.ui.PID_Kd_Label.setText('{:.2f}'.format(self.pid_kd))
def signalFaultReset(self):
self.bacqueue.put([-14]) # clear BAC faults
self.bmsqueue.put(2) # clear BMS faults
def signalAssistLevel(self):
self.assist_level = -(self.ui.AssistSlider.value()+1)
self.ui.AssistSliderLabel.setText('Assist: ' + str(self.ui.AssistSlider.value()))
self.bacqueue.put([self.assist_level]) # Positive integers in worker reserved for trip limiter %'s
self.SQL_update_setup()
def signalProfile(self, button_bool, command):
if button_bool == True:
self.bacqueue.put([command]) # command is integer (-11 = profile1, -12 = profile2...)
self.profile = command
if command == -11 and not self.ui.ProfileRb1.isChecked():
self.ui.ProfileRb1.setChecked(True)
elif command == -12 and not self.ui.ProfileRb2.isChecked():
self.ui.ProfileRb2.setChecked(True)
elif command == -13 and not self.ui.ProfileRb3.isChecked():
self.ui.ProfileRb2.setChecked(True)
self.SQL_update_setup()
def signalReverse(self, bool):
if bool:
self.ui.Reverse.setText('R')
self.bacqueue.put([-18])
if not bool:
self.ui.Reverse.setText('D')
self.bacqueue.put([-19])
def signalAntitheft(self, bool):
if bool:
# emit signal to enable antitheft
self.popupAntitheft()
print('Antitheft signal true, enabling antitheft...')
self.bacqueue.put([-17])
if not bool:
# Emit signal here to disable antitheft
# Close popup within number_pad.py
print('Antitheft signal false, disabling antitheft...')
self.bacqueue.put([-16])
def signalTripRangeLimiter(self, bool, value):
if bool:
self.flt_range_limit = value
elif not bool:
self.flt_range_limit = 0
def signalThrottleBypassAssist(self, bool):
if bool:
self.bacqueue.put([-20])
self.opt_throttleAssistBool = True
self.SQL_update_setup()
if not bool:
self.bacqueue.put([-21])
self.opt_throttleAssistBool = False
self.SQL_update_setup()
def signalBatta(self, bool, value): # Bool = btn, value = slider
if bool:
self.bacqueue.put([-30, value])
self.opt_battaValue = value
self.optpopupwindow.ui.BattPowerLabel.setText('BattAmp:' + '{:.0f}'.format(value) + '%')
if not bool or self.opt_battaValue == 0:
self.opt_battaValue = 0
self.optpopupwindow.ui.BattPowerBtn.setChecked(False)
self.optpopupwindow.ui.BattPowerLabel.setText('BattAmp: 0%')
def signalMota(self, bool, value): # Bool = btn, value = slider
if bool:
print('mota:', value)
self.bacqueue.put([-34, value])
self.opt_motaValue = value
self.optpopupwindow.ui.MotPowerLabel.setText('MotAmp:' + '{:.0f}'.format(value)+ '<sub>A</sub>')
if not bool or self.opt_motaValue == 0:
self.opt_battaValue = 0
self.optpopupwindow.ui.MotPowerBtn.setChecked(False)
self.optpopupwindow.ui.MotPowerLabel.setText('MotAmp: 0<sub>A</sub>')
def signalFlux(self, bool, value):
val = value/10 #500 int -> 50.0%
if bool:
self.bacqueue.put([-31, val])
self.opt_fluxValue = val #
self.optpopupwindow.ui.FluxLabel.setText('Flux: ' + '{:.1f}'.format(val) + '%')
if not bool or self.opt_fluxValue == 0: # and to both disable signals when slider to 0, and detect 0 for sql setup
self.bacqueue.put([-31, 0])
self.opt_fluxValue = 0
self.optpopupwindow.ui.FluxBtn.setChecked(False)
self.optpopupwindow.ui.FluxLabel.setText('Flux: 0')
def signalBMSMsgBAC(self, soc, temp): #-32 bacqueue
self.bacqueue.put([-32, soc, temp])
def signalDiagnosticPoller(self, bool):
if bool:
self.bacqueue.put([-29])
else:
self.bacqueue.put([0])
def diagnosticsReceive(self, msg):
self.optpopupwindow.ui.DiagThrottleV.setText('{:.4f}'.format(msg['Throttle_Voltage']))
self.optpopupwindow.ui.DiagBMSV.setText('{:.4f}'.format(msg['Throttle_Voltage']))
self.optpopupwindow.ui.DiagBrake1V.setText('{:.4f}'.format(msg['Brake_1_Voltage']))
self.optpopupwindow.ui.DiagBrake2V.setText('{:.4f}'.format(msg['Brake_2_Voltage']))
self.optpopupwindow.ui.DiagEbikeFlags.setText(', '.join(msg['EbikeFlags']))
self.optpopupwindow.ui.DiagDigitalInputs.setText(', '.join(msg['DigitalInputs']))
self.optpopupwindow.ui.DiagWarnings.setText(', '.join(msg['Warnings']))
self.optpopupwindow.ui.DiagSensorless.setText(msg['SensorlessState'])
def signalHackBACAccessCode(self, bool):
if bool:
self.bacqueue.put([-33])
def receiveHackBACAccessCode(self, msg):
level = msg[0]
val = msg[1]
if level == 1:
self.optpopupwindow.ui.HackAccessLabel_code1.setText(('1: ' + str(val)))
with open((os.path.abspath((os.path.dirname(__file__)))) + '/access_codes.csv', mode='w') as file:
writer = csv.writer(file, delimiter = ',')
writer.writerow(['Level 1 Access Code: ' + str(val)])
file.close()
if level == 2:
self.optpopupwindow.ui.HackAccessLabel_code1.setText('2: ' + str(val))
with open((os.path.abspath((os.path.dirname(__file__)))) + '/access_codes.csv', mode='w') as file:
writer = csv.writer(file, delimiter = ',')
writer.writerow(['Level 2 Access Code: ' + str(val)])
file.close()
if level == 3:
self.optpopupwindow.ui.HackAccessLabel_code1.setText('3: ' + str(val))
with open((os.path.abspath((os.path.dirname(__file__)))) + '/access_codes.csv', mode='w') as file:
writer = csv.writer(file, delimiter = ',')
writer.writerow(['Level 3 Access Code: ' + str(val)])
file.close()
#self.optpopupwindow.ui.HackAccessLabel.setText('Level:', level, '#:', val)
#### Subwindow Calls ####
def popupFault(self): # Check Controller indicator.
msg = QtWidgets.QMessageBox()
msg.setWindowTitle("Fault Detected")
msg.setText('Faults detected: ' + str(self.floop['Faults']).replace('[', '').replace(']', ''))
#todo: make a custom window instead of MessageBox. Separate BMS/BAC fault clearing.
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setStandardButtons(QtWidgets.QMessageBox.Reset | QtWidgets.QMessageBox.Ignore)
msg.buttonClicked.connect(self.signalFaultReset)
msg.exec_()
def popupAntitheft(self):
self.pinpopup = numberPopup(self.ui, self.setup['pin'], self.signalAntitheft)
# self.pinpopup.setParent(self.ui.centralwidget)
self.pinpopup.setStyleSheet('QPushButton {border-style: inset;border-color: dark grey;'
'border-width: 3px;border-radius:10px;font: 40pt "Luxi Mono";font-weight: bold;padding: 0px 0px 0px 0px;} '
'QPushButton::pressed{border-style: outset;}'
'QLineEdit{font: 40pt "Luxi Mono";font-weight: bold;}')
#self.pinpopup.move(self.ui.centralwidget.rect().center() + QtCore.QPoint(self.pinpopup.width()/5, 37))
# For some reason 'numberPopup' doesn't center like other custom .ui widgets. Below fix only valid for 800x480
self.pinpopup.move(QtCore.QPoint(0, 0))
self.pinpopup.showMaximized()
self.pinpopup.show()
def popupOptions(self): # Independent widget
# todo: initialize states of btns. if self.optpopupwindow: self.___set(self.___)
# Check if 'closing' window just hides it and if so, instead of re-intializing and re-populating just unhide
self.optpopupwindow = optionsDialog(self.displayinvert_bool)
self.optpopupwindow.displayinvertmsg.connect(self.displayinverter)
self.optpopupwindow.displaybacklightcmd.connect(self.displaybacklight)
self.optpopupwindow.ui.ThrottleBypassBtn.toggled.connect(lambda: self.signalThrottleBypassAssist(
self.optpopupwindow.ui.ThrottleBypassBtn.isChecked()))
self.optpopupwindow.ui.FluxSlider.valueChanged.connect(lambda: self.signalFlux(
self.optpopupwindow.ui.FluxBtn.isChecked(), self.optpopupwindow.ui.FluxSlider.value()))
self.optpopupwindow.ui.BattPowerSlider.valueChanged.connect(lambda: self.signalBatta(
self.optpopupwindow.ui.BattPowerBtn.isChecked(), self.optpopupwindow.ui.BattPowerSlider.value()))
self.optpopupwindow.ui.MotPowerSlider.valueChanged.connect(lambda: self.signalMota(
self.optpopupwindow.ui.MotPowerBtn.isChecked(), self.optpopupwindow.ui.MotPowerSlider.value()))
self.optpopupwindow.ui.TripReset.clicked.connect(lambda: self.tripReset(True))
self.optpopupwindow.ui.DiagnosticsUpdateBtn.toggled.connect(lambda:
self.signalDiagnosticPoller(self.optpopupwindow.ui.DiagnosticsUpdateBtn.isChecked()))
self.optpopupwindow.ui.HackAccessBtn.toggled.connect(lambda:
self.signalHackBACAccessCode(self.optpopupwindow.ui.HackAccessBtn.isChecked()))
#self.optpopupwindow.showMaximized()
self.optpopupwindow.show()
def popupBms(self): # Inherited widget
#self.bmspopup.bmspoll.connect(BMSSerialThread.bmspoller)
#self.bmspopup.bmscut.connect(window.bmscutoff)
#self.bmspopupwindow.bmscut.connect(self.bmspopEepromWrite)
#self.bmsqueue.put(1)
self.bmspopupwindow = bmsDialog(self.battseries)
self.bmsbasiccmd.connect(self.bmspopupwindow.bmsBasicUpdate)
self.bmseepromcmd.connect(self.bmspopupwindow.bmsEepromUpdate)
self.bmspopupwindow.bmsEepromUpdate(self.processEmitter.eepromMsg)
# Connect Btns
self.bmspopupwindow.ui.SaveEepromBtn.clicked.connect(self.bmspopEepromWrite)
self.bmspopupwindow.ui.ConfigBtn.clicked.connect(self.popupBmsCfg)
self.bmspopupwindow.show()
def popupBmsCfg(self):
self.bmscfgpopupwindow = bmscfgDialog()
self.bmscfgpopupwindow.ui.ExitBtn.clicked.connect(lambda: self.bmscfgpopupwindow.hide())
self.bmscfgpopupwindow.bmscfgGuiUpdate(self.processEmitter.eepromMsg)
self.bmscfgpopupwindow.ui.ReadEepromBtn.clicked.connect(lambda: self.bmsqueue.put(1))
self.bmscfgpopupwindow.ui.WriteEepromBtn.clicked.connect(self.bmscfgpopEepromWrite)
self.bmscfgpopupwindow.show()
#### BMS FUNCTIONS #####
# todo: add BMS EEPROM SQL backups
def bmsCall(self):
# 0 = Poll Basic Info, 1 = Read EEPROM, 2 = Write EEPROM
#print('bmsCall: ', self.bmscmd)
if self.bmscmd == 0:
#print('called: ', self.bmscmd)
self.bmsqueue.put(0)
elif self.bmscmd == 1:
self.bmsqueue.put(1)
self.bmscmd = 0
elif self.bmscmd == 2:
#msg = (2, self.bmsemitter.eepromMsg[0]) # Now using len to detect write, 2 = clear faults.
self.bmsqueue.put(2)
self.bmscmd = 0
elif self.bmscmd == 10:
self.bmsqueue.put(0)
self.bmsqueue.put(1)
self.bmscmd = 0
@QtCore.pyqtSlot()
def bmsGetEeprom(self):
self.bmscmd = 1
self.bmsCall()
def bmsGuiUpdate(self):
# Get CellV's to find min/max for Range/Diff labels
keys = ['cell0_mv', 'cell1_mv', 'cell2_mv', 'cell3_mv', 'cell4_mv', 'cell5_mv', 'cell6_mv', 'cell7_mv',
'cell8_mv', 'cell9_mv', 'cell10_mv', 'cell11_mv', 'cell12_mv', 'cell13_mv', 'cell14_mv', 'cell15_mv',
'cell16_mv', 'cell17_mv', 'cell18_mv', 'cell19_mv', 'cell20_mv', 'cell21_mv', 'cell22_mv',
'cell23_mv', 'cell24_mv']
cellv = []
for i in range(self.battseries):
cellv.append(self.processEmitter.basicMsg[0][keys[i]] / 1000) # mv -> V
cellvmin = min(cellv)
cellvmax = max(cellv)
self.bmspopupwindow.ui.VRangeLabel.setText('{:.2f}'.format(cellvmax) + '~' + '{:.2f}'.format(cellvmin)
+ '<sub>V</sub>')
self.bmspopupwindow.ui.VDiffLabel.setText('{:.3f}'.format(cellvmin - cellvmax))
# Temp, Current, Power
self.bmspopupwindow.ui.CurrentLabel.setText('{:.2f}'.format(self.processEmitter.basicMsg[1]['pack_ma'] / 1000) + '<sub>A</sub>')
self.bmspopupwindow.ui.BattPowerLabel.setText('{:.1f}'.format(((self.processEmitter.basicMsg[1]['pack_ma'] / 1000) *
(self.processEmitter.basicMsg[1]['pack_mv'] / 1000))) + '<sub>W</sub>')
self.bmspopupwindow.ui.T1Bar.setValue(self.processEmitter.basicMsg[1]['ntc0'])
self.bmspopupwindow.ui.T2Bar.setValue(self.processEmitter.basicMsg[1]['ntc1'])
self.bmspopupwindow.ui.T3Bar.setValue(self.processEmitter.basicMsg[1]['ntc2'])
self.bmspopupwindow.ui.T4Bar.setValue(self.processEmitter.basicMsg[1]['ntc3'])
# Voltage Bars & Balance Labels # Interleaved to support <24s configurations), cheaper to `try` here
try:
self.bmspopupwindow.ui.C1Bar.setValue(self.processEmitter.basicMsg[0]['cell0_mv'])
self.bmspopupwindow.ui.C1Balance.setChecked(self.processEmitter.basicMsg[1]['bal0'])
self.bmspopupwindow.ui.C2Bar.setValue(self.processEmitter.basicMsg[0]['cell1_mv'])
self.bmspopupwindow.ui.C2Balance.setChecked(self.processEmitter.basicMsg[1]['bal1'])
self.bmspopupwindow.ui.C3Bar.setValue(self.processEmitter.basicMsg[0]['cell2_mv'])
self.bmspopupwindow.ui.C3Balance.setChecked(self.processEmitter.basicMsg[1]['bal2'])
self.bmspopupwindow.ui.C4Bar.setValue(self.processEmitter.basicMsg[0]['cell3_mv'])
self.bmspopupwindow.ui.C4Balance.setChecked(self.processEmitter.basicMsg[1]['bal3'])
self.bmspopupwindow.ui.C5Bar.setValue(self.processEmitter.basicMsg[0]['cell4_mv'])
self.bmspopupwindow.ui.C5Balance.setChecked(self.processEmitter.basicMsg[1]['bal4'])
self.bmspopupwindow.ui.C6Bar.setValue(self.processEmitter.basicMsg[0]['cell5_mv'])
self.bmspopupwindow.ui.C6Balance.setChecked(self.processEmitter.basicMsg[1]['bal5'])
self.bmspopupwindow.ui.C7Bar.setValue(self.processEmitter.basicMsg[0]['cell6_mv'])
self.bmspopupwindow.ui.C7Balance.setChecked(self.processEmitter.basicMsg[1]['bal6'])
self.bmspopupwindow.ui.C8Bar.setValue(self.processEmitter.basicMsg[0]['cell7_mv'])
self.bmspopupwindow.ui.C8Balance.setChecked(self.processEmitter.basicMsg[1]['bal7'])
self.bmspopupwindow.ui.C9Bar.setValue(self.processEmitter.basicMsg[0]['cell8_mv'])
self.bmspopupwindow.ui.C9Balance.setChecked(self.processEmitter.basicMsg[1]['bal8'])
self.bmspopupwindow.ui.C10Bar.setValue(self.processEmitter.basicMsg[0]['cell9_mv'])
self.bmspopupwindow.ui.C10Balance.setChecked(self.processEmitter.basicMsg[1]['bal9'])
self.bmspopupwindow.ui.C11Bar.setValue(self.processEmitter.basicMsg[0]['cell10_mv'])
self.bmspopupwindow.ui.C11Balance.setChecked(self.processEmitter.basicMsg[1]['bal10'])
self.bmspopupwindow.ui.C12Bar.setValue(self.processEmitter.basicMsg[0]['cell11_mv'])
self.bmspopupwindow.ui.C12Balance.setChecked(self.processEmitter.basicMsg[1]['bal11'])
self.bmspopupwindow.ui.C13Bar.setValue(self.processEmitter.basicMsg[0]['cell12_mv'])
self.bmspopupwindow.ui.C13Balance.setChecked(self.processEmitter.basicMsg[1]['bal12'])
self.bmspopupwindow.ui.C14Bar.setValue(self.processEmitter.basicMsg[0]['cell13_mv'])
self.bmspopupwindow.ui.C14Balance.setChecked(self.processEmitter.basicMsg[1]['bal13'])
self.bmspopupwindow.ui.C15Bar.setValue(self.processEmitter.basicMsg[0]['cell14_mv'])
self.bmspopupwindow.ui.C15Balance.setChecked(self.processEmitter.basicMsg[1]['bal14'])
self.bmspopupwindow.ui.C16Bar.setValue(self.processEmitter.basicMsg[0]['cell15_mv'])
self.bmspopupwindow.ui.C16Balance.setChecked(self.processEmitter.basicMsg[1]['bal15'])
self.bmspopupwindow.ui.C17Bar.setValue(self.processEmitter.basicMsg[0]['cell16_mv'])
self.bmspopupwindow.ui.C17Balance.setChecked(self.processEmitter.basicMsg[1]['bal16'])
self.bmspopupwindow.ui.C18Bar.setValue(self.processEmitter.basicMsg[0]['cell17_mv'])
self.bmspopupwindow.ui.C18Balance.setChecked(self.processEmitter.basicMsg[1]['bal17'])
self.bmspopupwindow.ui.C19Bar.setValue(self.processEmitter.basicMsg[0]['cell18_mv'])
self.bmspopupwindow.ui.C19Balance.setChecked(self.processEmitter.basicMsg[1]['bal18'])
self.bmspopupwindow.ui.C20Bar.setValue(self.processEmitter.basicMsg[0]['cell19_mv'])
self.bmspopupwindow.ui.C20Balance.setChecked(self.processEmitter.basicMsg[1]['bal19'])
self.bmspopupwindow.ui.C21Bar.setValue(self.processEmitter.basicMsg[0]['cell20_mv'])
self.bmspopupwindow.ui.C21Balance.setChecked(self.processEmitter.basicMsg[1]['bal20'])
self.bmspopupwindow.ui.C22Bar.setValue(self.processEmitter.basicMsg[0]['cell21_mv'])
self.bmspopupwindow.ui.C22Balance.setChecked(self.processEmitter.basicMsg[1]['bal21'])
self.bmspopupwindow.ui.C23Bar.setValue(self.processEmitter.basicMsg[0]['cell22_mv'])
self.bmspopupwindow.ui.C23Balance.setChecked(self.processEmitter.basicMsg[1]['bal22'])
self.bmspopupwindow.ui.C24Bar.setValue(self.processEmitter.basicMsg[0]['cell23_mv'])
self.bmspopupwindow.ui.C24Balance.setChecked(self.processEmitter.basicMsg[1]['bal23'])
except AttributeError:
pass # Ignore missing UI elements.
@QtCore.pyqtSlot()
def bmspopEepromWrite(self):
# Get bmspop eeprom values, update eeprom, send all to bmsProc
msg = self.processEmitter.eepromMsg
msg[0]['bal_start'] = self.bmspopupwindow.ui.BalanceLevelSlider.value()
msg[0]['covp'] = self.bmspopupwindow.ui.ChargeLevelSlider.value()
msg[0]['covp_release'] = self.bmspopupwindow.ui.ChargeLevelSlider.value() + 50 # +0.05V default release
self.processEmitter.eepromMsg = msg
print('bmspopEepromWrite: ', self.processEmitter.eepromMsg, '\n', msg)
self.bmsqueue.put(msg)
#self.bmsWriteEeprom()
def bmscfgpopEepromWrite(self):
msg = self.processEmitter.eepromMsg
msg[0]['switch'] = self.bmscfgpopupwindow.ui.SwitchBtn.isChecked()
msg[0]['scrl'] = self.bmscfgpopupwindow.ui.SCReleaseBtn.isChecked()
msg[0]['balance_en'] = self.bmscfgpopupwindow.ui.BalanceEnableBtn.isChecked()
msg[0]['chg_balance_en'] = self.bmscfgpopupwindow.ui.ChargeBalanceBtn.isChecked()
msg[0]['led_en'] = self.bmscfgpopupwindow.ui.LEDEnableBtn.isChecked()
msg[0]['led_num'] = self.bmscfgpopupwindow.ui.LEDNumberBtn.isChecked()
msg[0]['ntc1'] = self.bmscfgpopupwindow.ui.NTC1Btn.isChecked()
msg[0]['ntc2'] = self.bmscfgpopupwindow.ui.NTC2Btn.isChecked()
msg[0]['ntc3'] = self.bmscfgpopupwindow.ui.NTC3Btn.isChecked()
msg[0]['ntc4'] = self.bmscfgpopupwindow.ui.NTC4Btn.isChecked()
msg[0]['ntc5'] = self.bmscfgpopupwindow.ui.NTC5Btn.isChecked()
msg[0]['ntc6'] = self.bmscfgpopupwindow.ui.NTC6Btn.isChecked()
msg[0]['ntc7'] = self.bmscfgpopupwindow.ui.NTC7Btn.isChecked()
msg[0]['ntc8'] = self.bmscfgpopupwindow.ui.NTC8Btn.isChecked()
# Balance and Misc Configuration
msg[0]['bal_start'] = self.bmscfgpopupwindow.ui.BalanceStartSpin.value() * 1000
msg[0]['bal_window'] = self.bmscfgpopupwindow.ui.BalanceWindowSpin.value()
msg[0]['shunt_res'] = self.bmscfgpopupwindow.ui.ShuntSpin.value()
msg[0]['cycle_cnt'] = self.bmscfgpopupwindow.ui.CycleCountSpin.value()
msg[0]['design_cap'] = self.bmscfgpopupwindow.ui.DesignCapSpin.value() * 1000
msg[0]['cycle_cap'] = self.bmscfgpopupwindow.ui.CycleCapSpin.value() * 1000
msg[0]['cap_100'] = self.bmscfgpopupwindow.ui.SOC100Spin.value()
msg[0]['cap_80'] = self.bmscfgpopupwindow.ui.SOC80Spin.value()
msg[0]['cap_60'] = self.bmscfgpopupwindow.ui.SOC60Spin.value()
msg[0]['cap_40'] = self.bmscfgpopupwindow.ui.SOC40Spin.value()
msg[0]['cap_20'] = self.bmscfgpopupwindow.ui.SOC20Spin.value()
msg[0]['cap_0'] = self.bmscfgpopupwindow.ui.SOC0Spin.value()
msg[0]['dsg_rate'] = self.bmscfgpopupwindow.ui.SelfDschgSpin.value()
msg[0]['fet_ctrl'] = self.bmscfgpopupwindow.ui.FETControlSpin.value()
msg[0]['led_timer'] = self.bmscfgpopupwindow.ui.LEDTimerSpin.value()
msg[0]['cell_cnt'] = self.bmscfgpopupwindow.ui.CellCntSpin.value()
# Protection Configuration
msg[0]['covp'] = self.bmscfgpopupwindow.ui.COVPSpin.value() * 1000
msg[0]['cuvp'] = self.bmscfgpopupwindow.ui.CUVPSpin.value() * 1000
msg[0]['povp'] = self.bmscfgpopupwindow.ui.POVPSpin.value() * 1000
msg[0]['puvp'] = self.bmscfgpopupwindow.ui.PUVPSpin.value() * 1000
msg[0]['chgot'] = self.bmscfgpopupwindow.ui.CHGOTSpin.value()
msg[0]['chgut'] = self.bmscfgpopupwindow.ui.CHGUTSpin.value()
msg[0]['dsgot'] = self.bmscfgpopupwindow.ui.DSGOTSpin.value()
msg[0]['dsgut'] = self.bmscfgpopupwindow.ui.DSGUTSpin.value()
msg[0]['chgoc'] = self.bmscfgpopupwindow.ui.CHGOCSpin.value() * 1000
msg[0]['dsgoc'] = self.bmscfgpopupwindow.ui.DSCHOCSpin.value() * 1000
msg[0]['covp_rel'] = self.bmscfgpopupwindow.ui.COVPReleaseSpin.value() * 1000
msg[0]['cuvp_rel'] = self.bmscfgpopupwindow.ui.CUVPReleaseSpin.value() * 1000
msg[0]['povp_rel'] = self.bmscfgpopupwindow.ui.POVPReleaseSpin.value() * 1000
msg[0]['puvp_rel'] = self.bmscfgpopupwindow.ui.PUVPReleaseSpin.value() * 1000
msg[0]['chgot_rel'] = self.bmscfgpopupwindow.ui.CHGOTReleaseSpin.value()
msg[0]['chgut_rel'] = self.bmscfgpopupwindow.ui.CHGUTReleaseSpin.value()
msg[0]['dsgot_rel'] = self.bmscfgpopupwindow.ui.DSGOTReleaseSpin.value()
msg[0]['dsgut_rel'] = self.bmscfgpopupwindow.ui.DSGUTReleaseSpin.value()
msg[0]['chgoc_rel'] = self.bmscfgpopupwindow.ui.CHGOCReleaseSpin.value()
msg[0]['dsgoc_rel'] = self.bmscfgpopupwindow.ui.DSCHOCReleaseSpin.value()
msg[0]['covp_delay'] = self.bmscfgpopupwindow.ui.COVPDelaySpin.value()
msg[0]['cuvp_delay'] = self.bmscfgpopupwindow.ui.CUVPDelaySpin.value()
msg[0]['povp_delay'] = self.bmscfgpopupwindow.ui.POVPDelaySpin.value()
msg[0]['puvp_delay'] = self.bmscfgpopupwindow.ui.PUVPDelaySpin.value()
msg[0]['chgot_delay'] = self.bmscfgpopupwindow.ui.CHGOTDelaySpin.value()
msg[0]['chgut_delay'] = self.bmscfgpopupwindow.ui.CHGUTDelaySpin.value()
msg[0]['dsgot_delay'] = self.bmscfgpopupwindow.ui.DSGOTDelaySpin.value()
msg[0]['dsgut_delay'] = self.bmscfgpopupwindow.ui.DSGUTDelaySpin.value()
msg[0]['chgoc_delay'] = self.bmscfgpopupwindow.ui.CHGOCDelaySpin.value()
msg[0]['dsgoc_delay'] = self.bmscfgpopupwindow.ui.DSCHOCDelaySpin.value()
# Finally, send updated eeprom to bms.
self.bmsqueue.put(msg)
def bmsProcessBasic(self):
x_interval = array([sum(([(self.list_bms_interval[-self.iter_bmsmsg_threshold:])[:i]
for i in range(1, self.iter_bmsmsg_threshold + 1, 1)])
[i]) for i in range(self.iter_bmsmsg_threshold)])
ampsec = simps(array(self.list_bms_amps[-self.iter_bmsmsg_threshold:]), x=x_interval, even='avg')
power = array(self.list_bms_amps[-self.iter_bmsmsg_threshold:]) * \
array(self.list_bms_volts[-self.iter_bmsmsg_threshold:])
wattsec = simps(power, x=x_interval, even='avg')
#print('bms: ', ampsec, wattsec, '\n', ampsec/3600, wattsec/3600, '\n', power)
#todo: Wh used/rem counter is still reversed during charging.
# bmshah/bmswh/__regen vars used for nothing.
# FIXED: TripReset doesn't reset Time<sub>trip</sub> in #2 parameter display.
# FIXED: Or CV<sub>min</sub>
# FIXED: Options Pane BatAmp/MotAmp labels don't update on slider toggle.
# FIXED: When button pressed, BattAmp slider updates MotorAmp label!
# FIXED: MotAmp still doesn't with button.
# Update options pane with values from profile or with special controller cmd.
# Range label doesn't update.
# FIXEDFlux label always updates to Flux: 0
# FIXED: Check PrintScrn for slider fault error
if ampsec <= 0:
self.flt_ah -= ampsec / 3600
self.flt_bmsah -= ampsec / 3600
self.chargestate = False
elif ampsec > 0:
self.flt_ah -= ampsec / 3600
self.flt_bmsah -= ampsec / 3600
self.flt_bmsahregen += abs(ampsec / 3600)
# Set chargestarted to detect end of charge, and create new row in SQL lifestats to mark cycle.
self.chargestate = True
self.SQL_lifestat_upload_bms()
if wattsec <= 0:
self.flt_wh -= wattsec / 3600
self.flt_bmswh -= wattsec / 3600
elif wattsec > 0:
self.flt_wh -= wattsec / 3600
self.flt_bmswh -= wattsec / 3600
self.flt_bmswhregen += abs(wattsec / 3600)
if not self.chargestate: # todo verify correct boolean
self.SQL_lifestat_upload_bms()
@QtCore.pyqtSlot()
def bmsReceiveBasic(self):
self.iter_bmsmsg += 1
# Store data for couloumb counting
self.list_bms_interval.append(self.processEmitter.basicMsg[3])
self.list_bms_amps.append(self.processEmitter.basicMsg[1]['pack_ma'] / 1000)
self.list_bms_volts.append(self.processEmitter.basicMsg[1]['pack_mv'] / 1000)
# Process cellV's, if new low minimum, store
keys = self.processEmitter.basicMsg[0].keys()
cellv = []
for i in keys:
cellv.append(self.processEmitter.basicMsg[0][i] / 1000)
cellvmin = min(cellv)
cellvmax = max(cellv)
self.flt_bmscellvrng = (cellvmax - cellvmin)
self.flt_bmscellvmean = mean(cellv)
if cellvmin < self.flt_bmscellvmin:
self.flt_bmscellvmin = cellvmin
if self.flt_bmscellvmin == 0:
self.flt_bmscellvmin = cellvmin
# Process NTC temp, if new max, store
self.bmstemps = [self.processEmitter.basicMsg[1]['ntc0'], self.processEmitter.basicMsg[1]['ntc1'],
self.processEmitter.basicMsg[1]['ntc2'], self.processEmitter.basicMsg[1]['ntc3']]
try:
if max(self.bmstemps) > self.flt_bmsmaxtemp:
self.flt_bmsmaxtemp = max(self.bmstemps)
except TypeError:
pass
# Update Main BatteryTemperatureBar
maxtemp = int(max(self.bmstemps))
self.ui.BatteryTemperatureBar.setValue(maxtemp)
# Process pack_ma to detect charging, accessory current drain.
# todo: detect here whether pack_ma is negative, use to open bmspop, set charge bool and store SOC,
# then when not negative, use QTimer.singleShot(5000?) to store %SOC charged after ~stable voltage.
# Additionally interpolate
try:
if self.bmspopupwindow.isVisible():
self.bmsbasiccmd.emit(self.processEmitter.basicMsg)
except AttributeError:
pass
# 11 ~= 2 seconds
if self.iter_bmsmsg >= self.iter_bmsmsg_threshold:
self.bmsProcessBasic()
mincellsoc = int(BAC.socmapper(cellvmin))
self.signalBMSMsgBAC(mincellsoc, maxtemp)
#self.bacqueue.put([-32, int(self.flt_soc), maxtemp])
self.iter_bmsmsg = 0
if self.processEmitter.basicMsg[1]['covp_err']:
self.bmsExceptionReceive('BMS: Cell Overvoltage Protection:' + str(self.processEmitter.basicMsg[1]['covp_err']))
elif self.processEmitter.basicMsg[1]['cuvp_err']:
self.bmsExceptionReceive('BMS: Cell Undervoltage Protection:' + str(self.processEmitter.basicMsg[1]['cuvp_err']))
elif self.processEmitter.basicMsg[1]['povp_err']:
self.bmsExceptionReceive('BMS: Pack Overvoltage Protection:' + str(self.processEmitter.basicMsg[1]['povp_err']))
elif self.processEmitter.basicMsg[1]['puvp_err']:
self.bmsExceptionReceive('BMS: Pack Undervoltage Protection:' + str(self.processEmitter.basicMsg[1]['puvp_err']))
elif self.processEmitter.basicMsg[1]['chgot_err']:
self.bmsExceptionReceive('BMS: Charge Overtemperature Protection:' + str(self.processEmitter.basicMsg[1]['chgot_err']))
elif self.processEmitter.basicMsg[1]['chgut_err']:
self.bmsExceptionReceive('BMS: Charge Undertemperature Protection:' + str(self.processEmitter.basicMsg[1]['chgut_err']))
elif self.processEmitter.basicMsg[1]['dsgot_err']:
self.bmsExceptionReceive('BMS: Discharge Overtemperature Protection:' + str(self.processEmitter.basicMsg[1]['dsgot_err']))
elif self.processEmitter.basicMsg[1]['dsgut_err']:
self.bmsExceptionReceive('BMS: Discharge Undertemperature Protection:' + str(self.processEmitter.basicMsg[1]['dsgut_err']))
elif self.processEmitter.basicMsg[1]['chgoc_err']:
self.bmsExceptionReceive('BMS: Charge Overcurrent Protection:' + str(self.processEmitter.basicMsg[1]['chgoc_err']))
elif self.processEmitter.basicMsg[1]['dsgoc_err']:
self.bmsExceptionReceive('BMS: Discharge Overcurrent Protection:', str(self.processEmitter.basicMsg[1]['dsgoc_err']))
elif self.processEmitter.basicMsg[1]['sc_err']:
self.bmsExceptionReceive('BMS: High SC Protection:' + str(self.processEmitter.basicMsg[1]['sc_err']))
elif self.processEmitter.basicMsg[1]['afe_err']:
self.bmsExceptionReceive('BMS: AFE Protection:' + str(self.processEmitter.basicMsg[1]['afe_err']))
elif self.processEmitter.basicMsg[1]['software_err']:
self.bmsExceptionReceive('BMS: Software Error!' + str(self.processEmitter.basicMsg[1]['software_err']))
@QtCore.pyqtSlot()
def bmsReceiveEeprom(self):
print('window.receive_eeprom_msg: ', self.processEmitter.eepromMsg)
try:
self.bmspopupwindow.bmsEepromUpdate(self.processEmitter.eepromMsg)
except AttributeError as e:
print(e)
pass
try:
#self.bmscfgeepromcmd.emit(self.bmsemitter.eepromMsg)
self.bmscfgpopupwindow.bmscfgGuiUpdate(self.processEmitter.eepromMsg)
except AttributeError as e:
print(e)
pass
if self.bmseeprom_initter: # Now that EEPROM/Basic are read, allow BMS window popup.
print('MainWindow has received BMS intialization data from subprocess.')
self.ui.BMSButton.clicked.connect(self.popupBms)
self.bmseeprom_initter = False
@QtCore.pyqtSlot(str)
def bmsExceptionReceive(self, val):
isfaulted = len(self.floop['Faults'])
if isfaulted > 0 and isfaulted < 20: # To prevent overrun with repeat errors.
#self.floop['Faults'] = self.floop['Faults'].append(val)
self.floop['Faults'].append(val)
else:
self.floop['Faults'] = val
@QtCore.pyqtSlot(int)
def displaybacklight(self, val):
self.pwm.ChangeDutyCycle(val)
self.pwm.ChangeDutyCycle(val)
@QtCore.pyqtSlot(int)
def displayinverter(self, bool):
# Dark Theme. Apply shiteload of stylesheets:
self.displayinvert_bool = bool
if self.displayinvert_bool:
self.ui.centralwidget.setStyleSheet("QWidget#centralwidget{background: solid black}")
self.ui.SpeedGauge.set_NeedleColor(255, 255, 255, 255)
self.ui.SpeedGauge.set_ScaleValueColor(255, 255, 255, 255)
self.ui.SpeedGauge.set_DisplayValueColor(255, 255, 255, 255)
self.ui.SpeedGauge.black = QtGui.QColor(255, 255, 255, 255)
self.ui.PowerGauge.set_NeedleColor(255, 255, 255, 255)
self.ui.PowerGauge.set_ScaleValueColor(255, 255, 255, 255)
self.ui.PowerGauge.set_DisplayValueColor(255, 255, 255, 255)
self.ui.PowerGauge.black = QtGui.QColor(255, 255, 255, 255)
self.ui.SpeedGaugeLabel.setStyleSheet("QLabel{font: 70pt \"Luxi Mono\"; font-weight: bold; color: white}")
self.ui.SpeedGaugeLabelUnits.setStyleSheet("QLabel{font: 16pt \"Luxi Mono\"; font-weight: bold; color: white}")
self.ui.PowerGaugeLabel.setStyleSheet("QLabel{font: 48pt \"Luxi Mono\"; font-weight: bold; color: white}")
self.ui.PowerGaugeLabelUnits.setStyleSheet("QLabel{font: 16pt \"Luxi Mono\"; font-weight: bold; color: white}")
self.ui.TripBox.setStyleSheet("QGroupBox{background: solid black; border: 5px solid gray;\n"
" border-radius: 10px; margin-top: 50px;}\n"
"QGroupBox::title {subcontrol-origin: margin; subcontrol-position: top left; left: 25px;\n"
" padding: -25 0px 0 0px;}"
"QLabel{font: 18pt \"Luxi Mono\"; font-weight: bold; color: white}\n"
"QCheckBox::indicator {width: 60px; height: 60px;}"
"QPushButton{background: black; font: 48pt \"Luxi Mono\"; font-weight: bold; color: white;\n"
"border-style: inset; border-color: light grey; border-width: 4px; border-radius 20px;}\n"
"QPushButton::pressed{border-style: outset}")
self.ui.BatteryVoltageBar.setStyleSheet("QProgressBar::chunk {background-color: black;}\n"
"QProgressBar {border-style: solid; border-color: gray; background-color: gray; border-width: 3px; border-radius: 6px}")
self.ui.BatteryVoltageLabel.setStyleSheet("QLabel{font: 25pt \"Luxi Mono\";font-weight: bold;\n"
"color: white}")
self.ui.BatteryVoltageDropLabel.setStyleSheet("QLabel{font: 16pt \"Luxi Mono\";font-weight: bold;\n"
"color: white}")
self.ui.BatteryVoltageLine.setStyleSheet("QObject{color:white}")
self.ui.BatterySOCBar.setStyleSheet("QProgressBar::chunk {background-color: white;}\n"
"QProgressBar {border-style: solid; border-color: gray; background-color: gray; border-width: 3px; border-radius: 6px}")
self.ui.BatterySOCLabel.setStyleSheet("QLabel{font: 25pt \"Luxi Mono\";font-weight: bold;\n"
"color: white}")
self.ui.MotorTemperatureLine.setStyleSheet("QObject{color:white}")
self.ui.MotorTemperatureLine_2.setStyleSheet("QObject{color:white}")
self.ui.MotorTemperatureBar.setStyleSheet("QProgressBar::chunk {margin-top: 3px; margin-bottom: 3px; background-color: white;}\n"
"QProgressBar {border-style: solid; border-color: black; background-color: rgba(0,0,0,0); border-width: 3px; border-radius: 6px}")
self.ui.BatteryTemperatureBar.setStyleSheet(" QProgressBar::chunk {\n"
" background-color: rgba(0,0,0,150);}\n"
"QProgressBar {\n"
" background-color: rgba(0,0,0,0);\n"
" border-style: solid;\n"
" border-color: gray;\n"
" border-width: 3px;\n"
"border-radius: 6px")
self.ui.MotorTemperatureLabel.setStyleSheet("QLabel{font: 25pt \"Luxi Mono\";font-weight: bold;\n"
"color: white}")
self.ui.WhmiBar.setStyleSheet("QProgressBar::chunk {background-color: white;}\n"
"QProgressBar {border-style: solid; border-color: gray; background-color: gray; border-width: 3px; border-radius: 6px}")
self.ui.WhmiLabel.setStyleSheet("QLabel{font: 25pt \"Luxi Mono\";font-weight: bold; color: white}")
self.ui.Time.setStyleSheet("QLabel{font: 36pt \"Luxi Mono\";font-weight: bold; color: white}")
self.ui.AssistSliderLabel.setStyleSheet("QLabel{font: 25pt \"Luxi Mono\";font-weight: bold; color: white}")
self.ui.AssistSlider.setStyleSheet("QSlider {border-style: none; border-color: gray; border-width: 4px;\n"
"border-radius: 18px; height: 80px}\n"
"QSlider::handle:horizontal{background-color: white; border: 5px solid; border-radius: 12px;\n"
"width: 30px; margin: 0px 0px;}\n"
"QSlider::groove:horizontal{border: 4px solid gray; border-radius: 18px; height: 28px}")
self.ui.Profile1Label.setStyleSheet("QLabel{font: 16pt \"Luxi Mono\";font-weight: bold; color: white}")
self.ui.Profile2Label.setStyleSheet("QLabel{font: 16pt \"Luxi Mono\";font-weight: bold; color: white}")
self.ui.Profile3Label.setStyleSheet("QLabel{font: 16pt \"Luxi Mono\";font-weight: bold; color: white}")
self.ui.ProfileRb1.setStyleSheet("QPushButton{border: none; background: transparent;}")
self.ui.ProfileRb2.setStyleSheet("QPushButton{border: none; background: transparent;}")
self.ui.ProfileRb3.setStyleSheet("QPushButton{border: none; background: transparent;}")
else: # Light Theme
self.ui.centralwidget.setStyleSheet("QWidget#centralwidget{background: solid white; }")
self.ui.SpeedGaugeLabel.setStyleSheet("QLabel{font: 70pt \"Luxi Mono\"; font-weight: bold; color: black}")
self.ui.SpeedGaugeLabelUnits.setStyleSheet(
"QLabel{font: 16pt \"Luxi Mono\"; font-weight: bold; color: black}")
self.ui.PowerGaugeLabel.setStyleSheet("QLabel{font: 48pt \"Luxi Mono\"; font-weight: bold; color: black}")
self.ui.PowerGaugeLabelUnits.setStyleSheet(
"QLabel{font: 16pt \"Luxi Mono\"; font-weight: bold; color: black}")
self.ui.SpeedGauge.set_NeedleColor(50, 50, 50, 255)
self.ui.SpeedGauge.set_ScaleValueColor(50, 50, 50, 255)
self.ui.SpeedGauge.set_DisplayValueColor(50, 50, 50, 255)
self.ui.SpeedGauge.black = QtGui.QColor(0, 0, 0, 255)
self.ui.PowerGauge.set_NeedleColor(50, 50, 50, 255)
self.ui.PowerGauge.set_ScaleValueColor(50, 50, 50, 255)
self.ui.PowerGauge.set_DisplayValueColor(50, 50, 50, 255)
self.ui.SpeedGauge.black = QtGui.QColor(0, 0, 0, 255)
self.ui.TripBox.setStyleSheet("QGroupBox{background: solid white; border: 5px solid black;\n"
" border-radius: 10px; margin-top: 50px;}\n"
"QGroupBox::title{subcontrol-origin: margin; subcontrol-position: top left; left: 25px;\n"
" padding: -25 0px 0 0px;}"
"QLabel{font: 18pt \"Luxi Mono\"; font-weight: bold; color: black}\n"
"QCheckBox::indicator {width: 60px; height: 60px;}"
"QPushButton{background: transparent; font: 48pt \"Luxi Mono\"; font-weight: bold; color: black;\n"
"border-style: inset; border-color: dark grey; border-width: 4px; border-radius 20px;}\n"
"QPushButton::pressed{border-style: outset}")
self.ui.BatteryVoltageBar.setStyleSheet("QProgressBar::chunk {background-color: black;}\n"
"QProgressBar {border-style: solid; border-color: gray; border-width: 3px; border-radius: 6px}")
self.ui.BatteryVoltageLabel.setStyleSheet("QLabel{font: 25pt \"Luxi Mono\";font-weight: bold;\n"
"color: black}")
self.ui.BatteryVoltageDropLabel.setStyleSheet("QLabel{font: 16pt \"Luxi Mono\";font-weight: bold;\n"
"color: black}")
self.ui.BatteryVoltageLine.setStyleSheet("QObject{color:black}")
self.ui.BatterySOCBar.setStyleSheet("QProgressBar::chunk {background-color: black;}\n"
"QProgressBar {border-style: solid; border-color: gray; border-width: 3px; border-radius: 6px}")
self.ui.BatterySOCLabel.setStyleSheet("QLabel{font: 25pt \"Luxi Mono\";font-weight: bold;\n"
"color: black}")
self.ui.MotorTemperatureLine.setStyleSheet("QObject{color:black}")
self.ui.MotorTemperatureLine_2.setStyleSheet("QObject{color:black}")
self.ui.MotorTemperatureBar.setStyleSheet("QProgressBar::chunk {background-color: black; margin-top: 3px; margin-bottom: 3px;}\n"
"QProgressBar {border-style: solid; border-color: gray; border-width: 3px; border-radius: 6px}")
self.ui.BatteryTemperatureBar.setStyleSheet(" QProgressBar::chunk {\n"
" background-color: rgba(0,0,0,150);}\n"
"QProgressBar {\n"
" background-color: rgba(0,0,0,0);\n"
" border-style: solid;\n"
" border-color: white;\n"
" border-width: 3px;\n"
"border-radius: 6px")
self.ui.MotorTemperatureLabel.setStyleSheet("QLabel{font: 25pt \"Luxi Mono\";font-weight: bold;\n"
"color: black}")
self.ui.MotorTemperatureLine.setStyleSheet("QObject{color:black}")
self.ui.WhmiBar.setStyleSheet("QProgressBar::chunk {background-color: black;}\n"
"QProgressBar {border-style: solid; border-color: gray; border-width: 3px; border-radius: 6px}")
self.ui.WhmiLabel.setStyleSheet("QLabel{font: 25pt \"Luxi Mono\";font-weight: bold; color: black}")
self.ui.Time.setStyleSheet("QLabel{font: 36pt \"Luxi Mono\";font-weight: bold; color: black}")
self.ui.AssistSliderLabel.setStyleSheet("QLabel{font: 25pt \"Luxi Mono\";font-weight: bold; color: black}")
self.ui.AssistSlider.setStyleSheet("QSlider {border-style: none; border-color: gray; border-width: 4px;\n"
"border-radius: 18px; height: 80px}\n"
"QSlider::handle:horizontal {background-color: black; border: 5px solid; border-radius: 12px;\n"
"width: 30px; margin: 0px 0px;}\n"
"QSlider::groove:horizontal {border: 4px solid gray; border-radius: 18px; height: 28px}")
self.ui.Profile1Label.setStyleSheet("QLabel{font: 16pt \"Luxi Mono\";font-weight: bold; color: black}")
self.ui.Profile2Label.setStyleSheet("QLabel{font: 16pt \"Luxi Mono\";font-weight: bold; color: black}")
self.ui.Profile3Label.setStyleSheet("QLabel{font: 16pt \"Luxi Mono\";font-weight: bold; color: black}")
self.ui.ProfileRb1.setStyleSheet("QPushButton{border: none; background: transparent;}")
self.ui.ProfileRb2.setStyleSheet("QPushButton{border: none; background: transparent;}")
self.ui.ProfileRb3.setStyleSheet("QPushButton{border: none; background: transparent;}")
def tripselect(self, button_bool, command):
print('Trip Selector ' + str(command) + ' is: ' + str(button_bool))
if button_bool == True:
self.trip_selector = command
#self.trip_selected = True
#### SQL LOGGING FUNCTIONS ####
def SQL_init(self):
# Ensure tables exist, then update lifeID
self.sql.execute('CREATE TABLE IF NOT EXISTS lifestat (id integer PRIMARY KEY, '
'datetime string, ah_used float, ah_charged float, ahregen float, wh float, whregen float, '
'bmsah float, bmsahregen float, bmswh float, bmswhregen float, dist float, cycle int)') # Cycle int is bool for perserving columns.
self.sql.execute('CREATE TABLE IF NOT EXISTS tripstat (id integer PRIMARY KEY, '
'batt_amps float, batt_volts float, motor_amps float, motor_temp float, '
'speed float, motor_rpm float, floop_interval float)')
self.sql.execute('CREATE TABLE IF NOT EXISTS bmsstat (id integer PRIMARY KEY, '
'bms_interval float, bms_amps float, bms_volts float)')
# Not used, could compute again from tripstat
# 'interp_interval float, whmi float)')
self.sql.execute('CREATE TABLE IF NOT EXISTS setup (id integer PRIMARY KEY, ' # Identifier/key
'profile integer, assist integer, range_enabled integer, ' # Display/control parameters
'ah float, ahregen float, wh float, whregen float, bmsah float, bmsahregen float, '
'bmswh float, bmswhregen float, dist float, iter integer, chargestate integer, '
'triprange integer, throttleassist integer, batta integer, flux integer)') # Trip counters
lfs = []
self.sql.execute('select max(id), total(ah_used), total(ah_charged), total(ahregen), total(wh), '
'total(whregen), total(dist) from lifestat')
for i in self.sql:
lfs.append(i)
if lfs[0][0] == None:
lfs[0] = 0 # If new table, else;
else:
self.lifestat_iter_ID, self.lifestat_ah_used, self.lifestat_ah_charged, self.lifestat_ahregen, \
self.lifestat_wh, self.lifestat_whregen, self.lifestat_dist = \
lfs[0][0], lfs[0][1], lfs[0][2], lfs[0][3], lfs[0][4], lfs[0][5], lfs[0][6]
stp = []
self.sql.execute('SELECT * FROM setup') # Replace into ID = 0 on update
for i in self.sql:
stp.append(i)
if len(stp) > 0:
self.profile, self.assist_level, self.opt_tripRangeValue, self.flt_ah, self.flt_ahregen, \
self.flt_wh, self.flt_whregen, self.flt_bmsah, self.flt_bmsahregen, self.flt_bmswh, self.flt_bmswhregen, \
self.flt_dist, self.iter_attribute_slicer, self.chargestate, self.opt_tripRangeValue, self.opt_throttleAssistBool,\
self.opt_battaValue, self.opt_fluxValue = \
stp[0][1], stp[0][2], stp[0][3], stp[0][4], stp[0][5], stp[0][6], stp[0][7], stp[0][8], stp[0][9], \
stp[0][10], stp[0][11], stp[0][12], stp[0][13], stp[0][14], stp[0][15], stp[0][16], stp[0][17], stp[0][18]
# todo: add bms list stats to new table with this format. Update if-not-exists SQL inits above.
self.sql.execute('select * from tripstat')
for x in self.sql.fetchall():
self.list_batt_amps.append(x[1])
self.list_batt_volts.append(x[2])
self.list_motor_amps.append(x[3])
self.list_motor_temp.append(x[4])
self.list_speed.append(x[5])
self.list_motor_rpm.append(x[6])
self.list_floop_interval.append(x[7])
# Get max ID from tripstats: using iter_attribute_slicer
# self.sql.execute('select max(id) from tripstat') # Just use self.iter_attribute_slicer instead?
# ID = self.sql.fetchone()[0]
# if ID == None:
# self.iter_sql_tripID = 0
# else:
# self.iter_sql_tripID = ID
# self.iter_sql_tripID = [i[0] for i in self.sql][0]
def SQL_update_setup(self):
self.sql.execute('replace into setup values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
(0, self.profile, self.assist_level, self.opt_tripRangeValue, self.flt_ah, self.flt_ahregen,
self.flt_wh, self.flt_whregen, self.flt_bmsah, self.flt_bmsahregen, self.flt_bmswh, self.flt_bmswhregen,
self.flt_dist, self.iter_attribute_slicer, self.chargestate, self.opt_tripRangeValue, self.opt_throttleAssistBool,
self.opt_battaValue, self.opt_fluxValue))
def SQL_tripstat_upload(self):
# Committed every iter_threshold (integration) interval in receive_floop.
payload = (self.iter_attribute_slicer, self.floop['Battery_Current'], self.floop['Battery_Voltage'],
self.floop['Motor_Current'], self.floop['Motor_Temperature'], self.floop['Vehicle_Speed'],
self.floop['Motor_RPM'], self.list_floop_interval[-1:][0])
#print('sql_tripstat_upload payload: ', payload)
self.sql.execute('replace into tripstat values (?,?,?,?,?,?,?,?)', payload)
def SQL_bmsstat_upload(self):
payload = (self.iter_bmsmsg, self.list_bms_interval[-1:], self.list_bms_amps[-1:], self.list_bms_volts[-1:])
self.sql.execute('replace into tripstat values (?,?,?,?)', payload)
def SQL_lifestat_upload(self):
# On SOC reset, take Ah and compare to last row to determine if you have charged or discharged.
# If you have charged, create new row with cycle bool = True to ensure it is preserved and sortable.
# If you have discharged, and last row was not charged (cycle = True) then it is replaced with updated values.
# self.sql.execute('SELECT datetime FROM lifestat ORDER BY id DESC LIMIT 1')
try: # in case table is new/empty:
#last_time = self.sql.fetchone()[0]
current_datetime = datetime.datetime.strftime(datetime.datetime.now(), '%D, %I:%M:%S')
#dif_time = datetime.datetime.strptime(last_time, '%m/%d/%y, %I:%M:%S') - datetime.datetime.now()
#delta_time = datetime.timedelta(minutes=5) # Minimum time between updating lifestat database.
self.sql.execute('SELECT * FROM lifestat ORDER BY id DESC LIMIT 1')
lastrow = self.sql.fetchall()[0]
dif_ah = self.flt_ah - lastrow[3]
if dif_ah < -0.01: # If difference between current Ah_used and last is negative (+ noise margin)
# you have just charged. A new row is created, with total Ah charged as negative float.
# cycle bool = True, to ensure this row is not replaced in discharged elif condition below.
self.lifestat_iter_ID += 1
payload = (self.lifestat_iter_ID, current_datetime, self.flt_ah, dif_ah, self.flt_ahregen,
self.flt_wh, self.flt_whregen, self.flt_bmsah, self.flt_bmsahregen,
self.flt_bmswh, self.flt_bmswhregen, self.flt_dist, True)
self.sql.execute('insert into lifestat values (?,?,?,?,?,?,?,?,?,?,?,?,?)', payload)
elif dif_ah >= -0.01 and lastrow[7] == True: # If difference is positive, you have discharged.
# If last row was finalized with charge data, create new row.
self.lifestat_iter_ID += 1
payload = (self.lifestat_iter_ID, current_datetime, self.flt_ah, 0, self.flt_ahregen,
self.flt_wh, self.flt_whregen, self.flt_bmsah, self.flt_bmsahregen,
self.flt_bmswh, self.flt_bmswhregen, self.flt_dist, False)
self.sql.execute('insert into lifestat values (?,?,?,?,?,?,?,?,?,?,?,?,?)', payload)
elif dif_ah >= -0.01:
# If last row was not finalized, update it instead:
payload = (self.lifestat_iter_ID, current_datetime, self.flt_ah, 0, self.flt_ahregen,
self.flt_wh, self.flt_whregen, self.flt_bmsah, self.flt_bmsahregen,
self.flt_bmswh, self.flt_bmswhregen, self.flt_dist, False)
self.sql.execute('replace into lifestat values (?,?,?,?,?,?,?,?,?,?,?,?,?)', payload)
except (TypeError, IndexError): # In case of new/empty table, initialize:
print('SQL Lifestats empty. Initializing...')
current_datetime = datetime.datetime.strftime(datetime.datetime.now(), '%D, %I:%M:%S')
payload = (self.lifestat_iter_ID, current_datetime, self.flt_ah, 0, self.flt_ahregen,
self.flt_wh, self.flt_whregen, self.flt_bmsah, self.flt_bmsahregen,
self.flt_bmswh, self.flt_bmswhregen, self.flt_dist, False)
self.sql.execute('insert into lifestat values (?,?,?,?,?,?,?,?,?,?,?,?,?)', payload)
def SQL_lifestat_upload_bms(self):
try:
current_datetime = datetime.datetime.strftime(datetime.datetime.now(), '%D, %I:%M:%S')
self.sql.execute('SELECT * FROM lifestat ORDER BY id DESC LIMIT 1')
lastrow = self.sql.fetchall()[0]
charging = lastrow[12]
dif_ah = self.flt_ah - lastrow[3]
if not charging and not self.chargestate: # if not/weren't charging, update only
payload = (self.lifestat_iter_ID, current_datetime, self.flt_ah, dif_ah, self.flt_ahregen,
self.flt_wh, self.flt_whregen, self.flt_bmsah, self.flt_bmsahregen,
self.flt_bmswh, self.flt_bmswhregen, self.flt_dist, False)
self.sql.execute('replace into lifestat values (?,?,?,?,?,?,?,?,?,?,?,?,?)', payload)
elif not charging and self.chargestate: # if now/weren't charging, iterate and update
self.lifestat_iter_ID += 1
payload = (self.lifestat_iter_ID, current_datetime, self.flt_ah, 0, self.flt_ahregen,
self.flt_wh, self.flt_whregen, self.flt_bmsah, self.flt_bmsahregen,
self.flt_bmswh, self.flt_bmswhregen, self.flt_dist, True)
self.sql.execute('insert into lifestat values (?,?,?,?,?,?,?,?,?,?,?,?,?)', payload)
elif charging and self.chargestate: # if now/were charging, update only
payload = (self.lifestat_iter_ID, current_datetime, self.flt_ah, 0, self.flt_ahregen,
self.flt_wh, self.flt_whregen, self.flt_bmsah, self.flt_bmsahregen,
self.flt_bmswh, self.flt_bmswhregen, self.flt_dist, True)
self.sql.execute('replace into lifestat values (?,?,?,?,?,?,?,?,?,?,?,?,?)', payload)
except(TypeError, IndexError): # In case of new/empty table, initialize:
print('SQL Lifestats TypeError or IndexError. Is this your first run? Initializing database...')
current_datetime = datetime.datetime.strftime(datetime.datetime.now(), '%D, %I:%M:%S')
payload = (self.lifestat_iter_ID, current_datetime, self.flt_ah, 0, self.flt_ahregen,
self.flt_wh, self.flt_whregen, self.flt_bmsah, self.flt_bmsahregen,
self.flt_bmswh, self.flt_bmswhregen, self.flt_dist, False)
self.sql.execute('insert into lifestat values (?,?,?,?,?,?,?,?,?,?,?,?,?)', payload)
#### HELPER FUNCTIONS ####
def socreset(self):
if self.iter > 938:
val = 938
else:
val = self.iter
self.flt_ah = self.battah * (
1 - (0.01 * BAC.socmapper(mean(self.list_batt_volts[-val:]) / 21))) # battah * SOC used coefficient
def ms(self): # helper function; nanosecond-scale time in milli units, for comparisons
return time.time_ns() / 1000000000 # Returns time to nanoseconds in units seconds
def gettime(self): #
self.iter_attribute_slicer += 1
self.iter += 1
self.iter_sql += 1
#self.iter_bmsmsg += 1
self.time2 = self.ms()
self.list_floop_interval.append(self.time2 - self.time1)
#print('gettime:', self.time2 - self.time1)
self.time1 = self.ms()
# self.lastfloop = self.floop # Deprecated
def divzero(self, n, d): # Helper to convert division by zero to zero
return n / d if d else 0
def get_battwh(self): # For non Li-NMC or typical lithium, derive curve experimentally.
# Many cell experiments are listed on https://lygte-info.dk/,
# and can be digitzed with https://automeris.io/WebPlotDigitizer/
if self.flt_ah > 0:
return BAC.whmap.interp1d(BAC.wh_a2v_map.interp1d(self.flt_ah / self.battparallel))*self.battseries*self.battparallel
elif self.flt_ah == 0:
return BAC.whmap.interp1d(4.2)*self.battseries*self.battparallel
def strfdelta(self, tdelta, fmt): # Print formatted time from timedelta object, desired format
d = {"days": tdelta.days}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
if __name__ == '__main__':
# Logging for debugging Modbus
#logger = modbus_tk.utils.create_logger("console", level=logging.DEBUG)
#logging.basicConfig(level='INFO')
"""# Cmdline **kwargs for key vehicle stats # Final release; my defaults --> required= True
parser = argparse.ArgumentParser(description='Vehicle Settings')
parser.add_argument('-battseries', '-bs', action='store', required=True, type=int, dest='bs',
help='Number of series battery groups')
parser.add_argument('-battparallel', '-bp', action='store', required=True, type=int, dest='bp',
help='Number of parallel battery cells per group')
parser.add_argument('-battah', '-bah', '-ba', action='store', required=True, type=int, dest='ba',
help='Total amp-hours of battery')
parser.add_argument('-wheelcircumference', '-wheel', '-whl', '-wheelcircum', action='store', default=1927.225,
type=float, dest='whl', required=True,
help='Circumference of wheel in mm to convert revolutions to speed, distance, range, etc')
parser.add_argument('-lockpin', '-lp', '-pin', '-lock', action='store', default=0000,
type=int, dest='lockpin', help='PIN code to unlock antitheft.')
parser.add_argument('-speedparse', '-spd', '-sp', action='store_false', default=True, dest='sp',
help='Reduce CPU time considerably by assuming v6.++ parameter addresses in fastloop.')
parser.add_argument('-controlport', '-cpt', '-bacport', action='store', dest='bacport', required=True,
type=str, help='Serial port for controller, e.g. /dev/ttyUSB0, /dev/TTYAMA0, COM4, etc')
parser.add_argument('-bmsport', '-bpt', action='store', dest='bmsport', type=str,
help='Serial port for BMS, e.g. /dev/ttyUSB0, /dev/TTYAMA0, COM4, etc')
args = parser.parse_args()"""
#BAC = BACModbus.BACModbus(args.bacport)
# print('args inside of main:', args.bs, args.bp, args.ba, args.whl, args.sp)
setup = read_setup(os.path.abspath((os.path.dirname(__file__))) + '/setup.csv') # setup.csv dict
BAC = BACModbus.BACModbus(setup['cpt'])
app = QtWidgets.QApplication([])
# Communication lines:
window_bms_pipe, bms_process_pipe = Pipe()
window_bac_pipe, bac_process_pipe = Pipe()
bmsqueue = Queue()
bacqueue = Queue()
#bacThread = BACSerialThread(setup)
BMSEmitter = BMSProcessEmitter(window_bms_pipe)
BACEmitter = BACProcessEmitter(window_bac_pipe)
bacProc = BACSerialProcess(setup, bac_process_pipe, bacqueue, BAC)
bmsProc = BMSSerialProcess(setup['bpt'], bms_process_pipe, bmsqueue)
#window = AmpyDisplay(setup['battery'][0], setup['battery'][1], setup['battery'][2], setup['wheel'], True, setup['pin'], bmsqueue, processManager)
#window = AmpyDisplay(args.bs, args.bp, args.ba, args.whl, args.sp, args.lockpin, queue, bmsThread)
window = AmpyDisplay(setup, bacqueue, bmsqueue, BMSEmitter)
# todo: setup cfg to enable GPIO e.g. -makerplaneGPIO
# Setup cfg for changing units from mph/kph
#bacThread.bac_msg.connect(window.floopReceive)
#bacThread.hack_msg.connect(window.receiveHackBACAccessCode)
BACEmitter.bac_msg.connect(window.floopReceive)
BACEmitter.diag_msg.connect(window.diagnosticsReceive)
BACEmitter.hack_msg.connect(window.receiveHackBACAccessCode)
# todo: save received access codes to file
# replace this signal regular bac_msg = -33
#bmsProc.bms_basic_msg.connect(window.receive_bms_basic)
#bmsProc.bms_eeprom_msg.connect(window.receive_bms_eeprom)
#bmsProc.bms_exception.connect(window.receive_bms_exception)
BMSEmitter.bms_exception.connect(window.bmsExceptionReceive)
BMSEmitter.bms_eeprom_msg.connect(window.bmsReceiveEeprom)
BMSEmitter.bms_basic_msg.connect(window.bmsReceiveBasic)
#window.workmsg.connect(processManager.workercommandsetter)
#window.powercmd.connect(processManager.powercommandsetter)
#window.fluxcmd.connect(processManager.fluxcommandsetter)
#window.bmsmsg_bac.connect(processManager.bmsupdatesetter)
#window.hackaccesscmd.connect(processManager.hackaccesscommandsetter)
#bacThread.start()
BMSEmitter.start()
BACEmitter.start()
bmsProc.start()
bacProc.start()
#bmsProc.join()
exit(app.exec_())
| cwkowalski/ASI_AmpyDisplay | main.py | main.py | py | 123,655 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "PyQt5.... |
32244067237 | # Python 3.6
"""
Peak handle functions.
Maintainer: Shpakov Konstantin
Link: https://github.com/shpakovkv/SignalProcess
"""
from __future__ import print_function
import matplotlib
import matplotlib.pyplot as pyplot
import os
import sys
import numpy
import bisect
import argparse
import numpy as np
import scipy.integrate as integrate
# import SignalProcess as sp
import arg_parser
import arg_checker
import file_handler
import plotter
from multiprocessing import Pool
from multiplier_and_delay import multiplier_and_delay
from data_types import SinglePeak
pos_polarity_labels = {'pos', 'positive', '+'}
neg_polarity_labels = {'neg', 'negative', '-'}
NOISEATTENUATION = 0.75
SAVETODIR = 'Peaks'
SINGLEPLOTDIR = 'SinglePlot'
MULTIPLOTDIR = 'MultiPlot'
PEAKDATADIR = 'PeakData'
DEBUG = False
PEAKFINDERDEBUG = False
def get_parser():
"""Returns final CL args parser.
:return: argparse.parser
"""
p_use = ('python %(prog)s [options]\n'
' python %(prog)s @file_with_options')
p_desc = ('')
p_ep = ('')
parser = argparse.ArgumentParser(
parents=[arg_parser.get_input_files_args_parser(),
arg_parser.get_mult_del_args_parser(),
arg_parser.get_plot_args_parser(),
arg_parser.get_peak_args_parser(),
arg_parser.get_output_args_parser(),
arg_parser.get_utility_args_parser()],
prog='PeakProcess.py',
description=p_desc, epilog=p_ep, usage=p_use,
fromfile_prefix_chars='@',
formatter_class=argparse.RawTextHelpFormatter
)
return parser
def find_nearest_idx(sorted_arr, value, side='auto'):
"""
Returns the index of the 'sorted_arr' element closest to 'value'
:param sorted_arr: sorted array/list of ints or floats
:param value: the int/float number to which the
closest value should be found
:param side: 'left': search among values that are lower then X
'right': search among values that are greater then X
'auto': handle all values (default)
:type sorted_arr: array-like
:type value: int, float
:type side: str ('left', 'right', 'auto')
:return: the index of the value closest to 'value'
:rtype: int
.. note:: if two numbers are equally close and side='auto',
returns the index of the smaller one.
"""
idx = bisect.bisect_left(sorted_arr, value)
if idx == 0:
return idx if side == 'auto' or side == 'right' else None
if idx == len(sorted_arr):
return idx if side == 'auto' or side == 'left' else None
after = sorted_arr[idx]
before = sorted_arr[idx - 1]
if side == 'auto':
return idx if after - value < value - before else idx - 1
else:
return idx if side == 'right' else idx - 1
def level_excess(x, y, level, start=0, step=1,
window=0, is_positive=True):
"""Checks if 'Y' values excess 'level' value
for 'X' in range from X(start) to X(start) + window
OR for x in range from X(start) - window to X(start)
:param x: array with X data
:param y: array with Y data
:param level: level value
:param start: start index of data
:param step: step with which elements of the array are traversed
step > 0: checks elements to the RIGHT from start idx
step < 0: checks elements to the LEFT from start idx
:param window: check window width
:param is_positive: the direction of the check
True: checks whether the 'Y' value rises above 'level'
False: checks whether the 'Y' value
comes down below 'level'
:type x: array-like
:type y: array-like
:type level: float, int ''same as param y''
:type start: int
:type step: int
:type window: float, int ''same as param x''
:type is_positive: bool
:return: True and an index of first Y element that are
bigger/lower 'level' OR returns False and
an index of the last checked element
:rtype: tuple ''(bool, int)''
"""
idx = start # zero-based index
if window == 0: # window default value
window = x[-1] - x[start]
while ((idx >= 0) and (idx < len(y)) and
(abs(x[idx] - x[start]) <= window)):
if not is_positive and (y[idx] < level):
# downward
return True, idx
elif is_positive and (y[idx] > level):
# upward
return True, idx
idx += step
return False, idx
def is_pos(polarity):
"""Checks if the polarity (str flag) is positive.
:param polarity: word denoting polarity
:type polarity: str
:return: True if the polarity is positive,
otherwise returns False
:rtype: bool
"""
global pos_polarity_labels
global neg_polarity_labels
if polarity.lower() in pos_polarity_labels:
return True
if polarity.lower() in neg_polarity_labels:
return False
else:
raise ValueError("Wrong polarity value ({})".format(polarity))
def is_neg(polarity):
"""Checks if the polarity (str flag) is negative.
:param polarity: word denoting polarity
:type polarity: str
:return: True if the polarity is negative,
otherwise returns False
:rtype: bool
"""
return not is_pos(polarity)
def check_polarity(curve, time_bounds=(None, None)):
"""Checks whether the curve is mostly positive or negative
on a certain interval.
:param curve: curve data
:param time_bounds: the left and the right boundaries of
the specified interval
:type curve: SignalProcess.SingleCurve
:type time_bounds: tuple, list ''(float, float)''
:return: the word denoting polarity
:rtype: str
"""
if time_bounds[0] is None:
time_bounds = (0, time_bounds[1])
if time_bounds[1] is None:
time_bounds = (time_bounds[0], curve.points)
integr = integrate.trapz(curve.val[time_bounds[0]:time_bounds[1]],
curve.time[time_bounds[0]:time_bounds[1]])
# print("Voltage_INTEGRAL = {}".format(integr))
if integr >= 0:
return 'positive'
return 'negative'
def find_curve_front(curve,
level=-0.2,
polarity='auto',
save_plot=False,
plot_name="voltage_front.png"):
"""Find time point (x) of voltage curve edge at specific level
Default: Negative polarity, -0.2 MV level
:param curve: curve data
:param level: amplitude value to find
:param polarity: the polarity of the curve
:param save_plot: bool flag
:param plot_name: plot full file name to save as
:type curve: SingleCurve
:type level: float
:type polarity: str '+'/'pos'/'-'/'neg'/'auto'
:type save_plot: bool
:type plot_name: str
:return: (time, amplitude) or (None, None)
:rtype: tuple(float, float)
"""
if polarity=='auto':
polarity = check_polarity(curve)
if is_pos(polarity):
level = abs(level)
else:
level = -abs(level)
front_checked, idx = level_excess(curve.time, curve.val, level,
is_positive=is_pos(polarity))
if front_checked:
if save_plot:
pyplot.close('all')
pyplot.plot(curve.time, curve.val, '-b')
pyplot.plot([curve.time[idx]], [curve.val[idx]], '*r')
# pyplot.show()
folder = os.path.dirname(plot_name)
if folder != "" and not os.path.isdir(folder):
os.makedirs(folder)
pyplot.savefig(plot_name)
pyplot.close('all')
return curve.time[idx], curve.val[idx]
return None, None
def peak_finder(x, y, level, diff_time, time_bounds=(None, None),
tnoise=None, is_negative=True, graph=False,
noise_attenuation=0.5):
"""Finds peaks on the curve (x, y).
Searchs for negative peaks by default.
:param x: array of time values
:param y: array of amplitude values
:param level: peak threshold (all amplitude values
below this level will be ignored)
:param diff_time: the minimum difference between two neighboring peaks.
If the next peak is at the front (fall or rise)
of the previous peak, and the "distance" from
its maximum to that front (at the same level) is less
than the diff_time, this second peak will be ignored.
:param time_bounds: tuple with the left and the right search boundaries
:param tnoise: maximum half-period of noise fluctuation.
:param is_negative: specify False for positive curve.
for negative curve (by default) the 'y' array will be
inverted before process and inverted again at the end.
:param graph: specify True to display a graph with found peaks
:param noise_attenuation: Attenuation of the second half-wave
with a polarity reversal (noise). If too many
noise maxima are defined as real peaks,
reduce this value.
:type x: numpy.ndarray
:type y: numpy.ndarray
:type level: float, int ''same as values of param y''
:type diff_time: float, int ''same as values of param x''
:type time_bounds: tuple, list ''(float, float)''
:type tnoise: float, int ''same as values of param x''
:type is_negative: bool
:type graph: bool
:type noise_attenuation: float
:return: (peaks_list, log) - the list of peaks (SinglePeak instances)
and the process log
:rtype: (list, str)
"""
# print("============ peak_finder ================")
# print("level : {}\ndiff_time : {}\ntime_bounds : {}\ntnoise : {}\n"
# "is_negative : {}\ngraph : {}\nnoise_attenuation : {}\n"
# "start_idx : {}\nstop_idx : {}"
# "".format(level, diff_time, time_bounds, tnoise,
# is_negative, graph, noise_attenuation,
# start_idx, stop_idx))
# print("-------------------")
# Checkout the inputs
peak_log = ""
assert level != 0, 'Invalid level value!'
if is_negative:
y = -y
level = -level
if not tnoise:
tnoise = (x[1] - x[0]) * 4
peak_log += 'Set "tnoise" to default 4 stops = ' + str(tnoise) + "\n"
assert len(time_bounds) == 2, ("time_bounds has incorrect number of "
"values. 2 expected, " +
str(len(time_bounds)) + " given.")
assert len(x) == len(y), ("The length of X ({}) is not equal to the "
"length of Y ({}).".format(len(x), len(y)))
if time_bounds[0] is None:
time_bounds = (x[0], time_bounds[1])
if time_bounds[1] is None:
time_bounds = (time_bounds[0], x[-1])
start_idx = find_nearest_idx(x, time_bounds[0], side='right')
stop_idx = find_nearest_idx(x, time_bounds[1], side='left')
peak_list = []
if start_idx is None or stop_idx is None:
# the interval is [start_idx, stop_idx)
# start_idx is included; stop_idx is excluded
peak_log += "Time bounds is out of range.\n"
return peak_list, peak_log
time_delta = 0.0
if x[0] != x[1]:
time_delta = x[1] - x[0]
else:
time_part = x[stop_idx] - x[start_idx]
time_delta = time_part / (stop_idx - start_idx - 1)
diff_idx = int(diff_time / time_delta)
if PEAKFINDERDEBUG:
print("Diff_time = {}, Diff_idx = {}".format(diff_time, diff_idx))
i = start_idx
while i < stop_idx :
if y[i] > level:
max_y = y[i] # local max (may be real peak or not)
max_idx = i
# search for a bigger local max within the diff_time from
# the found one
# y[i] == max_y condition is needed
# for flat-top peaks (wider than diff_time)
while (i <= stop_idx and
(x[i] - x[max_idx] <= diff_time or
y[i] == max_y)):
if y[i] > max_y:
max_y = y[i]
max_idx = i
i += 1
if PEAKFINDERDEBUG:
print("local_max = [{:.3f}, {:.3f}] i={}"
"".format(x[max_idx], max_y, max_idx))
# search for a bigger value within the diff_time
# to the left from the found local maximum
# if found: this is a little signal fluctuation on the fall edge
# (not a real peak)
[is_noise, _] = level_excess(x, y, max_y, start=max_idx,
step=-1, window=diff_time,
is_positive=True)
if PEAKFINDERDEBUG and is_noise:
print('Left Excess at x({:.2f}, {:.2f}) '
'== Not a peak at fall edge!'.format(x[i], y[i]))
# search for a polarity reversal within tnose from this local max
# if found: this is a noise (not a real peak)
if not is_noise:
# search to the right from the local max
[is_noise, j] = level_excess(x, y,
-max_y * noise_attenuation,
start=max_idx, step=1,
window=tnoise,
is_positive=False)
if PEAKFINDERDEBUG and is_noise:
print('Noise to the right x({:.2f}, {:.2f})'
''.format(x[j], y[j]))
else:
# search to the left from the local max
[is_noise, j] = level_excess(x, y,
-max_y * noise_attenuation,
start=max_idx, step=-1,
window=tnoise,
is_positive=False)
if PEAKFINDERDEBUG and is_noise:
print('Noise to the left x({:.2f}, {:.2f})'
''.format(x[j], y[j]))
if not is_noise:
# all checks passed, the local max is the real peak
peak_list.append(SinglePeak(x[max_idx], max_y, max_idx))
continue
i += 1
peak_log += 'Number of peaks: ' + str(len(peak_list)) + "\n"
# LOCAL INTEGRAL CHECK
# needed for error probability estimation
di = int(diff_time * 2 // time_delta) # diff window in index units
if di > 3:
for idx in range(len(peak_list)):
pk = peak_list[idx]
# square = pk.val * time_delta * di
square = pk.val * di
intgr_l = 0
intgr_r = 0
peak_log += ("Peak[{:3d}] = [{:7.2f}, {:4.1f}] "
"Square factor [".format(idx, pk.time, pk.val))
if pk.idx - di >= 0:
intgr_l = integrate.trapz(y[pk.idx-di : pk.idx+1])
peak_list[idx].sqr_l = intgr_l / square
peak_log += "{:.3f}".format(intgr_l / square)
peak_log += " | "
if pk.idx + di < len(y): # stop_idx
intgr_r = integrate.trapz(y[pk.idx: pk.idx + di + 1])
peak_list[idx].sqr_r = intgr_r / square
peak_log += "{:.3f}".format(intgr_r / square)
peak_log += "]"
peak_log += " ({:.3f})".format((intgr_r + intgr_l) / square)
peak_log += "\n"
if peak_list:
peak_log += "\n"
# integr_l, integr_r: The closer the value to unity,
# the greater the probability that the peak is imaginary (erroneous)
if is_negative:
y = -y
level = -level
for i in range(len(peak_list)):
peak_list[i].invert()
if graph:
# plotting curve
pyplot.plot(x[start_idx:stop_idx], y[start_idx:stop_idx], '-',
color='#8888bb')
pyplot.xlim(time_bounds)
# plotting level line
pyplot.plot([x[0], x[len(x) - 1]], [level, level], ':',
color='#80ff80')
# marking overall peaks
peaks_x = [p.time for p in peak_list]
peaks_y = [p.val for p in peak_list]
pyplot.scatter(peaks_x, peaks_y, s=50, edgecolors='#ff7f0e',
facecolors='none', linewidths=2)
pyplot.scatter(peaks_x, peaks_y, s=80, edgecolors='#dd3328',
facecolors='none', linewidths=2)
pyplot.show()
return peak_list, peak_log
def group_peaks(data, window):
"""Groups the peaks from different curves.
Each group corresponds to one single event (for example:
one act of X-Ray emission, registered by several detectors).
:param data: three-dimensional array containing data
on all the peaks of all curves
The array structure:
data[curve_idx][peak_idx] == SinglePeak instance
If a curve with curve_idx index has no peaks
the data[curve_idx] contains an empty list.
:param window: peaks coincide when their X values are within
+/-window interval from average X (time) position
of peak (event). "Average" because X (time) value
of a peak (event) may differ from curve to curve.
:return: peak_data - the three-dimensional array containing data
on all the peaks (grouped by time) of all curves
The array structure:
peak_data[curve_idx][group_idx] == SinglePeak instance if
this curve has a peak related to this event (group), else None
"""
def insert_group(peak, peak_data, groups_time,
num_peak_in_gr, wf, gr):
"""Inserts new group of peaks to the peak_data array
at a specific index.
:param peak: new peak to add
:param peak_data: the 3-dimensional array with peaks data
:param groups_time: the list with groups average time
:param num_peak_in_gr: the list contains
the number of peaks in each group
:param wf: waveform (curve) index
:param gr: new group index (insert on this index)
:return: None
"""
groups_time.insert(gr, peak.time)
num_peak_in_gr.insert(gr, 1)
for curve_i in range(len(peak_data)):
if curve_i == wf:
peak_data[curve_i].insert(gr, SinglePeak(*peak.data_full))
else:
peak_data[curve_i].insert(gr, None)
def add_pk_to_gr(peak, peak_data, groups_time,
num_peak_in_gr, wf, gr):
"""Adds new peak (from another curve) to existing group.
It is assumed that the group contains None value
on the place of this peak.
:param peak: new peak to add
:param peak_data: the 3-dimensional array with peaks data
:param groups_time: the list with groups average time
:param num_peak_in_gr: the list contains
the number of peaks in each group
:param wf: waveform (curve) index
:param gr: new group index (insert on this index)
:return: None
"""
groups_time[gr] = ((groups_time[gr] * num_peak_in_gr[gr] +
peak.time) /
(num_peak_in_gr[gr] + 1))
num_peak_in_gr[gr] += 1
peak_data[wf][gr] = SinglePeak(*peak.data_full)
if len(data) == 1 and len(data[0]) == 0:
return [[]]
# wf == waveform == curve
start_wf = 0
# skip first curves if they have no peaks
while not data[start_wf] and start_wf < len(data):
start_wf += 1
# 1D array with average time value of peak group
groups_time = [peak.time for peak in data[start_wf]]
# 1D array with numbers of peaks in each group
num_peak_in_gr = [1] * len(groups_time)
dt = abs(window)
curves_count = len(data)
# the 3-dimensional array will contain data
# on all the peaks (grouped by time)
peak_data = [[]]
for peak in data[start_wf]:
peak_data[0].append(SinglePeak(*peak.data_full))
for curve_idx in range(0, start_wf):
peak_data.insert(0, [None] * len(groups_time))
for curve_idx in range(start_wf + 1, curves_count):
peak_data.append([None] * len(groups_time))
if curves_count <= 1:
return peak_data
'''---------- making groups of peaks ------------------------------
two peaks make group when they are close enough
('X' of a peak is within +/- dt interval from 'X' of the group)
with adding new peak to a group,
the 'X' parameter of the group changes to (X1 + X2 + ... + Xn)/n
where n - number of peaks in group
'''
for wf in range(start_wf + 1, curves_count):
'''
wf == waveform index = curve index
gr == group index (zero-based index of current group)
pk == peak index (zero-based index of current peak
in the peak list of current waveform)
'''
gr = 0
pk = 0
while data[wf] is not None and pk < len(data[wf]): # and len(data[wf]) > 0:
'''ADD PEAK TO GROUP
when curve[i]'s peak[j] is in
+/-dt interval from peaks of group[gr]
'''
if gr < len(groups_time) \
and abs(groups_time[gr] - data[wf][pk].time) <= dt:
if (len(data[wf]) > pk + 1 and
(abs(groups_time[gr] - data[wf][pk].time) >
abs(groups_time[gr] - data[wf][pk + 1].time))):
# next peak of data[wf] matches better
# insert new group for current data[wf]'s peak
insert_group(data[wf][pk], peak_data, groups_time,
num_peak_in_gr, wf, gr)
pk += 1
elif (len(groups_time) > gr + 1 and
(abs(groups_time[gr] - data[wf][pk].time) >
abs(groups_time[gr + 1] - data[wf][pk].time))):
# current peak matches next group better
pass
else:
add_pk_to_gr(data[wf][pk], peak_data, groups_time,
num_peak_in_gr, wf, gr)
pk += 1
if gr == len(groups_time) - 1 and pk < len(data[wf]):
# Last peak_data column was filled but there are
# more peaks in the data[wf], so adds new group
gr += 1
elif (gr < len(groups_time) and
data[wf][pk].time < groups_time[gr] - dt):
'''INSERT NEW GROUP
when X-position of current peak of curve[wf] is
to the left of current group by more than dt
'''
insert_group(data[wf][pk], peak_data, groups_time,
num_peak_in_gr, wf, gr)
pk += 1
elif gr >= len(groups_time) - 1:
'''APPEND NEW GROUP
when X-position of current peak of curve[wf] is to the right
of the last group
'''
insert_group(data[wf][pk], peak_data, groups_time,
num_peak_in_gr, wf, len(groups_time))
pk += 1
gr += 1
if gr < len(groups_time) - 1:
gr += 1
return peak_data
def get_peaks(data, args, verbose):
"""Searches for peaks using parameters from args namespace.
:param data: SignalsData instance
:param args: argparse.namespace with arguments
:param verbose: shows more info during the process
:return: three-dimensional array containing data
on all the peaks of curves with index in args.curves list
The array structure:
data[curve_idx][peak_idx] == SinglePeak instance
For the curves not in the args.curves list:
data[curve_idx] == None
"""
unsorted_peaks = [None] * data.count
for idx in args.curves:
if verbose:
print("Curve #" + str(idx))
new_peaks, peak_log = peak_finder(
data.time(idx), data.value(idx),
level=args.level, diff_time=args.pk_diff,
time_bounds=args.t_bounds, tnoise=args.t_noise,
is_negative=args.level < 0,
noise_attenuation=args.noise_att,
graph=False
)
unsorted_peaks[idx] = new_peaks
if verbose:
print(peak_log)
return unsorted_peaks
def check_curves_list(curves, signals_data):
"""Checks the indexes of the curves to process.
Raises the exception if the index is out of range.
:param curves: the list of indexes of curves to find peaks for
:param signals_data: SignalsData instance
:return: None
"""
for curve_idx in curves:
assert curve_idx < signals_data.count, \
("The curve index {} is out of range. The total number "
"of curves: {}.".format(curve_idx, signals_data.count))
def global_check(options):
"""Input options global check.
Returns changed options with converted values.
options -- namespace with options
"""
# file import args check
options = arg_checker.file_arg_check(options)
# partial import args check
options = arg_checker.check_partial_args(options)
# plot args check
options = arg_checker.plot_arg_check(options)
# curve labels check
arg_checker.label_check(options.labels)
# # data manipulation args check
# options = arg_checker.data_corr_arg_check(options)
#
# # save data args check
# options = arg_checker.save_arg_check(options)
#
# # convert_only arg check
# options = arg_checker.convert_only_arg_check(options)
# peak search args check
options = arg_checker.peak_param_check(options)
options = arg_checker.check_utility_args(options)
return options
def get_pk_filename(data_files, save_to, shot_name):
"""Compiles the full path to the files with peaks data.
:param data_files: the list of files with signals data
:param save_to: the folder to save peaks data to
:param shot_name: the name of current shot
:return: full path + prefix for file name with peak data
"""
return os.path.join(os.path.dirname(data_files[0]),
save_to,
PEAKDATADIR,
shot_name)
def get_peak_files(pk_filename):
"""Returns the list of the peak files.
If peak files are not found or the folder containing
peak data is not found, returns [].
:param pk_filename: full path + prefix of file names with peak data
:return: list of full paths
"""
peak_folder = os.path.dirname(pk_filename)
file_prefix = os.path.basename(pk_filename)
if os.path.isdir(peak_folder):
peak_file_list = []
for name in file_handler.get_file_list_by_ext(peak_folder, '.csv', sort=True):
if os.path.basename(name).startswith(file_prefix):
peak_file_list.append(name)
return peak_file_list
return []
def read_single_peak(filename):
"""Reads one file containing the data of the peaks.
:param filename: file with peak (one group of peaks) data
:return: grouped peaks data with one peak (group)
peaks[curve_idx][0] == SinglePeak instance if
this curve has a peak related to this event (group),
else peaks[curve_idx][0] == None.
"""
data = numpy.genfromtxt(filename, delimiter=',')
if data.ndim == 1:
data = np.expand_dims(data, axis=1)
peaks = []
curves_count = data.shape[1]
for idx in range(curves_count):
# new_peak = SinglePeak(time=data[idx, 1], value=data[idx, 2],
# sqr_l=data[idx, 3], sqr_r=data[idx, 4])
new_peak = SinglePeak(time=data[1, idx], value=data[2, idx],
sqr_l=data[3, idx], sqr_r=data[4, idx])
if new_peak.time != 0 or new_peak.val != 0:
peaks.append([new_peak])
# peaks[idx].append(new_peak)
else:
peaks.append([None])
# peaks[idx].append(None)
return peaks
def read_peaks(file_list):
"""Reads all the files containing the data of the peaks.
:param file_list: list of files with peak (one group of peaks) data
:return: grouped peaks data
peaks[curve_idx][group_idx] == SinglePeak instance if
this curve has a peak related to this event (group),
else peaks[curve_idx][group_idx] == None.
"""
if file_list is None or len(file_list) == 0:
return None
else:
groups = read_single_peak(file_list[0])
curves_count = len(groups)
for file_idx in range(1, len(file_list)):
new_group = read_single_peak(file_list[file_idx])
for wf in range(curves_count): # wavefrorm number
groups[wf].append(new_group[wf][0])
return groups
def renumber_peak_files(file_list, start=1):
"""Checks the file numbering, if the numbering is not continuous
or does not start from the specified value,
then renames the files and changes the file_list.
:param file_list: the list of files names
:param start: the numbering must begin with this value
:return: None
"""
n1, n2 = file_handler.numbering_parser(file_list)
digits = n2 - n1
short_names = [os.path.basename(name) for name in file_list]
file_nums = [int(name[n1: n2]) for name in short_names]
dir = os.path.dirname(file_list[0])
name_format = '{prefix}{num:0' + str(digits) + 'd}{postfix}'
for i in range(len(file_nums)):
if file_nums[i] != i + start:
new_name = (name_format.format(prefix=short_names[i][0: n1],
num=i + start,
postfix=short_names[i][n2:]))
new_name = os.path.join(dir, new_name)
os.rename(file_list[i], new_name)
file_list[i] = new_name
def do_job(args, shot_idx):
"""Process one shot according to the input arguments:
- applies multiplier, delay
- finds peaks
- groups peaks from different curves by time
- saves peaks and peak plots
- re-read peak files after peak plot closed
(user may delete false-positive peak files
while peak plot window is not closed)
- plots and saves user specified plots and multiplots
:param args: namespace with all input args
:param shot_idx: the number of shot to process
:type args: argparse.Namespace
:type shot_idx: int
:return: None
"""
number_of_shots = len(args.gr_files)
if shot_idx < 0 or shot_idx > number_of_shots:
raise IndexError("Error! The shot_index ({}) is out of range ({} shots given)."
"".format(shot_idx, number_of_shots))
file_list = args.gr_files[shot_idx]
verbose = not args.silent
shot_name = file_handler.get_shot_number_str(file_list[0], args.num_mask,
args.ext_list)
# get SignalsData
data = file_handler.read_signals(file_list,
start=args.partial[0],
step=args.partial[1],
points=args.partial[2],
labels=args.labels,
units=args.units,
time_unit=args.time_unit)
if verbose:
print("The number of curves = {}".format(data.count))
# checks the number of columns with data,
# and the number of multipliers, delays, labels
args.multiplier = arg_checker.check_multiplier(args.multiplier,
count=data.count)
args.delay = arg_checker.check_delay(args.delay,
count=data.count)
arg_checker.check_coeffs_number(data.count, ["label", "unit"],
args.labels, args.units)
# multiplier and delay
data = multiplier_and_delay(data,
args.multiplier,
args.delay)
# find peaks
peaks_data = None
if args.level:
if verbose:
print('LEVEL = {}'.format(args.level))
check_curves_list(args.curves, data)
if verbose:
print("Searching for peaks...")
unsorted_peaks = get_peaks(data, args, verbose)
# step 7 - group peaks [and plot all curves with peaks]
peaks_data = group_peaks(unsorted_peaks, args.gr_width)
# step 8 - save peaks data
if verbose:
print("Saving peak data...")
# full path without peak number and extension:
pk_filename = get_pk_filename(file_list,
args.save_to,
shot_name)
file_handler.save_peaks_csv(pk_filename, peaks_data, args.labels)
# step 9 - save multicurve plot
multiplot_name = pk_filename + ".plot.png"
if verbose:
print("Saving all peaks as " + multiplot_name)
fig = plotter.plot_multiplot(data, peaks_data, args.curves,
xlim=args.t_bounds, hide=args.peak_hide)
pyplot.savefig(multiplot_name, dpi=300)
if args.peak_hide:
pyplot.close(fig)
else:
pyplot.show()
if args.read:
if verbose:
print("Reading peak data...")
pk_filename = get_pk_filename(file_list,
args.save_to,
shot_name)
peak_files = get_peak_files(pk_filename)
peaks_data = read_peaks(peak_files)
renumber_peak_files(peak_files)
# plot preview and save
if args.plot:
plotter.do_plots(data, args, shot_name,
peaks=peaks_data, verbose=verbose,
hide=args.p_hide)
# plot and save multi-plots
if args.multiplot:
plotter.do_multiplots(data, args, shot_name,
peaks=peaks_data, verbose=verbose,
hide=args.mp_hide)
def main():
parser = get_parser()
# # for debugging
# file_name = '/home/shpakovkv/Projects/PythonSignalProcess/untracked/args/peak_20150515N99.arg'
# with open(file_name) as fid:
# file_lines = [line.strip() for line in fid.readlines()]
# args = parser.parse_args(file_lines)
args = parser.parse_args()
verbose = not args.silent
# try:
args = global_check(args)
'''
num_mask (tuple) - contains the first and last index
of substring of filenamepyplot.show
That substring contains the shot number.
The last idx is excluded: [first, last).
Read numbering_parser docstring for more info.
'''
num_mask = file_handler.numbering_parser([files[0] for
files in args.gr_files])
args_dict = vars(args)
args_dict["num_mask"] = num_mask
if args.hide_all:
# by default backend == Qt5Agg
# savefig() time for Qt5Agg == 0.926 s
# for Agg == 0.561 s
# for single curve with 10000 points and one peak
# run on Intel Core i5-4460 (average for 100 runs)
# measured by cProfile
matplotlib.use("Agg")
# MAIN LOOP
import time
start_time = time.time()
if (args.level or
args.read):
# for shot_idx in range(len(args.gr_files)):
# do_job(args, shot_idx)
with Pool(args.threads) as p:
p.starmap(do_job, [(args, shot_idx) for shot_idx in range(len(args.gr_files))])
stop_time = time.time()
# arg_checker.print_duplicates(args.gr_files)
print()
print("--------- Finished ---------")
spent = stop_time - start_time
units = "seconds"
if spent > 3600:
spent /= 3600
units = "hours"
elif spent > 60:
spent /= 60
units = "minutes"
print("--- Time spent: {:.2f} {units} for {n} shots ---".format(spent, units=units, n=len(args.gr_files)))
if __name__ == '__main__':
main()
# TODO: cl description
# TODO: test refactored PeakProcess
# TODO: refactor verbose mode
| shpakovkv/SignalProcess | scripts/PeakProcess.py | PeakProcess.py | py | 37,640 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "arg_parser.get_input_files_args_parser",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "arg_parser.get_mult_del_args_parser",
"line_number": 59,
"usage_type": "ca... |
1538871246 | import logging
import numpy as np
from src.Utils.Types.VolType import VolType
__author__ = 'frank.ma'
logger = logging.getLogger(__name__)
class SABRModel(object):
def __init__(self, t: float, alpha: float, beta: float, nu: float, rho: float):
self.t = t
self.alpha = alpha
self.beta = beta
self.nu = nu
self.rho = rho
self.abs_tol = 1e-12
def sim_fwd_den(self, forward: float, rel_bounds: tuple = (0.01, 20.0), n_bins: int = 500, n_steps: int = 100,
n_scenarios: int = 10 ** 6):
taus = np.linspace(self.t, 0.0, num=n_steps)
bins = np.linspace(rel_bounds[0] * forward, rel_bounds[1] * forward, num=n_bins)
# 1st, simulate forwards
forwards = np.full(n_scenarios, forward)
sigmas = np.full(n_scenarios, self.alpha)
mean = [0.0, 0.0]
correlation = [[1.0, self.rho], [self.rho, 1.0]]
for idx, tau in enumerate(taus[1:]):
dt = taus[idx] - tau
sqrt_dt = np.sqrt(dt)
rands = np.random.multivariate_normal(mean, correlation, size=n_scenarios)
forwards += sigmas * (forwards ** self.beta) * rands[:, 0] * sqrt_dt
# use lognormal transform to avoid negative volatility
sigmas *= np.exp(-0.5 * (self.nu ** 2) * dt + self.nu * rands[:, 1] * sqrt_dt)
# 2nd, analyse the density
freq, bins = np.histogram(forwards, bins=bins, normed=True)
bins_mid = 0.5 * (bins[:-1] + bins[1:])
return freq, bins_mid
def _calc_z(self, forward, strike):
return self.nu / self.alpha * np.log(forward / strike) * ((forward * strike) ** ((1.0 - self.beta) / 2.0))
def _calc_z_norm(self, forward, strike):
return self.nu / self.alpha * (forward - strike)
def _calc_x(self, z):
return np.log((np.sqrt(1.0 - 2.0 * self.rho * z + z ** 2) + z - self.rho) / (1.0 - self.rho))
def calc_vol(self, forward: float, strike: float, vol_type: VolType) -> float:
raise NotImplementedError('unexpected call of abstract method')
def calc_vol_vec(self, forward: float or np.array, strikes: np.array or float, vol_type: VolType) -> np.array:
raise NotImplementedError('unexpected call of abstract method')
def calc_fwd_den(self, forward: float, rel_bounds: tuple = (0.01, 20.0), n_bins: int = 500):
raise NotImplementedError('unexpected call of abstract method')
@staticmethod
def solve_alpha(forward: float, vol_atm: float, t: float, beta: float, nu: float, rho: float) -> float:
raise NotImplementedError('unexpected call of abstract method')
| frankma/Finance | src/SABRModel/SABRModel.py | SABRModel.py | py | 2,640 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.full",
"line... |
3356348223 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from sqlalchemy.orm import sessionmaker
from scrapy.exceptions import DropItem
from minimalist_scrapy.models import (
Quote, Author, Tag, db_connect, create_table
)
import logging
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class MinimalistScrapyPipeline:
def process_item(self, item, spider):
return item
class DuplicatesPipeLine:
def __init__(self):
"""
Initialize database connection
create Table
"""
engine = db_connect()
create_table(engine)
self.session = sessionmaker(bind=engine)
logging.info(
"****DuplicatesPipeLine: database connection established****"
)
def process_item(self, item, spider):
"""
Prosess the item
"""
session = self.session()
quote_exists = session.query(
Quote
).filter_by(text=item.get('text')).first()
if quote_exists is not None:
raise DropItem(f"Duplicate item found: f{item.get('text')}")
session.close()
else:
return item
session.close()
class SaveQuotesPipeline(object):
"""
Save quotes
"""
def __init__(self):
"""
Intiialize database connection and session maker
Creates Table
"""
engine = db_connect()
create_table(engine)
self.session = sessionmaker(bind=engine)
logging.info("****SaveQuotePipeline: database connected")
def process_item(self, item, spider):
"""
Save the quotes to database
This method is called for every item pipeline component
"""
session = self.session()
quote = Quote()
author = Author()
tag = Tag()
author.name = item.get('name')
author.birthday = item.get('birthday')
author.born_location = item.get('born_location')
author.bio = item.get('bio')
quote.text = item.get('text')
author_exists = session.query(
Author
).filter_by(name=author.name).first()
if author_exists is not None:
quote.author = author_exists
else:
quote.author = author
if 'tags' in item:
for tag_name in item.get('tags', []):
tag = Tag(name=tag_name)
tag_exists = session.query(
Tag
).filter_by(name=tag_name).first()
if tag_exists is not None:
tag = tag_exists
quote.tags.append(tag)
try:
session.add(quote)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return item
| pace-noge/minimalist-scrapy | minimalist_scrapy/pipelines.py | pipelines.py | py | 2,995 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "minimalist_scrapy.models.db_connect",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "minimalist_scrapy.models.create_table",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 30,
"usage_type": ... |
9625126741 | import uuid
import json
import logging
import sqlite3
import threading
import time
from datetime import datetime
from .const import (LIST_TYPE_CHANNEL_BRAND,
LIST_MODEL_DEVICE_BRAND, NAME_TYPE_CHANNEL, TYPE_DEVICE)
from .config import DATABASE
LOGGER = logging.getLogger("Database")
DB_VERSION = 0x0001
class DbInterface(object):
"""docstring fs DbInterface."""
def __init__(self, app=None):
self._db = sqlite3.connect(DATABASE, check_same_thread=False)
self._cursor = self._db.cursor()
self._lock = threading.Lock()
self.clear_notifi()
self._alarm_on = self.get_rule_alarm(1)
self._alarm_off = self.get_rule_alarm(2)
self._athome = self.get_rule_alarm(3)
self._sos = self.get_rule_alarm(4)
self._door_reminder = {}
self._door_sensor = {}
self.data_init()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
'''Always remember to close properly for changes to be saved.'''
if self._db:
self._db.commit()
self._cursor.close()
self._db.close()
def execute(self, *args, **kwargs):
try:
self._lock_acquire()
return self._cursor.execute(*args, **kwargs)
except Exception as e:
print(e)
finally:
self._lock_release()
def executemany(self, *args, **kwargs):
try:
self._lock_acquire()
return self._cursor.executemany(*args, **kwargs)
except Exception as e:
print(e)
finally:
self._lock_release()
def _lock_acquire(self):
LOGGER.debug('Acquire Lock on device %s', self)
r = self._lock.acquire(True)
if not r:
print('Failed to acquire Lock on device %s', self)
def _lock_release(self):
LOGGER.debug('Release Lock on device %s', self)
if not self._lock.locked():
print('Device Lock not locked for device %s !', self)
else:
self._lock.release()
def _fetchall(self, table):
'''Gets and returns an entire row (in a list) of data from the DB given:
table = Name of the table
column = Name of the column in the row
value = The number of the row (Primary Key ID)'''
try:
# self._db.row_factory = lambda c, r: dict([(column[0], r[idx]) for idx, column in enumerate(c.description)])
return self.execute("SELECT * FROM {};".format(table)).fetchall()
except Exception as e:
LOGGER.error('Fetchall error %s', e)
def _fetchall_col(self, table, column=None):
'''Gets and returns an entire row (in a list) of data from the DB given:
table = Name of the table
column = Name of the column in the row
value = The number of the row (Primary Key ID)'''
try:
# self._db.row_factory = lambda c, r: dict([(column[0], r[idx]) for idx, column in enumerate(c.description)])
return self.execute("SELECT {} FROM {};".format(column, table)).fetchall()
except Exception as e:
LOGGER.error('Fetchall col %s', e)
def _fetchone(self, table, column=None, value=None):
'''Gets and returns a single piece of data from the DB given:
table = Name of the table
column = Name of the column being read
id = The number of the row (Primary Key ID)
'''
# self._db.row_factory = lambda c, r: dict([(column[0], r[idx]) for idx, column in enumerate(c.description)])
try:
return self.execute("SELECT * FROM {} WHERE {}='{}';".format(table, column, value)).fetchone()
except Exception as e:
LOGGER.error('Fetchone error %s', e)
def _fetch_by_col(self, table, column=None, value=None):
'''Gets and returns a single piece of data from the DB given:
table = Name of the table
column = Name of the column being read
id = The number of the row (Primary Key ID)
'''
# self._db.row_factory = lambda c, r: dict([(column[0], r[idx]) for idx, column in enumerate(c.description)])
try:
return self.execute("SELECT * FROM {} WHERE {}='{}';".format(table, column, value)).fetchall()
except Exception as e:
LOGGER.error('Fetchone col error %s', e)
def _fetch_multi(self, table, column1, column2, condi1, condi2):
try:
# self._db.row_factory = lambda c, r: dict([(column[0], r[idx]) for idx, column in enumerate(c.description)])
return self.execute("SELECT * FROM {} WHERE {}='{}' and {}={};".format(table, column1, condi1, column2, condi2)).fetchall()
except Exception as e:
LOGGER.error('Fetch multi col %s', e)
def _add_new(self, table, column, value, save=True):
query = "INSERT OR IGNORE INTO {} ({}) VALUES {};".format(
table, column, value)
try:
LOGGER.debug('ADD NEW DATA: %s', query)
ex = self.execute(query)
if save and ex:
self._db.commit()
return ex.lastrowid
except Exception as e:
LOGGER.error('Insert error: %s', e)
# finally:
# # self._cursor.close()
def _update_one(self, table, column, value, id):
'''Update a single piece of data from the DB given:
table = Name of the table
column = Name of the column being read
id = The number of the row (Primary Key ID)
value = The data to be written to this space'''
try:
query = "UPDATE {} SET {}='{}' WHERE id='{}';".format(
table, column, value, id)
# LOGGER.debug('Update one: %s', query)
self.execute(query)
self._db.commit()
except Exception as e:
LOGGER.error('UPDATE one error: %s', e)
def _update_all(self, table, columns, value, id):
'''Overwrites a whole row of data from the DB given:
table = Name of the table
columns = A list of the names of the columns in the row
values = A list of the new values to be written
id = The number of the row (Primary Key ID)'''
try:
query = "UPDATE %s SET " % table
for x in range(0, len(columns)):
query += ("%s='%s', " % (columns[x], values[x]))
query = query[:-2] + (" WHERE id = %s" % (id))
# LOGGER.debug('Update one: %s', query)
self.execute(query)
self._db.commit()
except Exception as e:
LOGGER.error('UPDATE all error: %s', e)
def _update_one_col(self, table, column, value, col_condition, value_condition):
'''Update a single piece of data from the DB given:
table = Name of the table
column = Name of the column being read
id = The number of the row (Primary Key ID)
value = The data to be written to this space
condition = Where column in db
'''
try:
query = "UPDATE {} SET {}='{}' WHERE {}='{}';".format(
table, column, value, col_condition, value_condition)
# LOGGER.debug('Update one col: %s', query)
if self.execute(query):
self._db.commit()
except Exception as e:
LOGGER.error('UPDATE one col error: %s', e)
def _get_id(self, table, value):
return self._fetchone(table, 'id', value)[0]
def _remove(self, table, column, value):
try:
query = "DELETE FROM {} WHERE {}='{}';".format(
table, column, value)
if self.execute(query):
self._db.commit()
return True
except Exception as e:
LOGGER.error("REMOVE row in table error : %s", e)
return False
def _remove_muti(self, table, column1, column2, value1, value2):
try:
query = "DELETE FROM {} WHERE {}='{}' AND {}='{}';".format(
table, column1, value1, column2, value2)
if self.execute(query):
self._db.commit()
return True
except Exception as e:
LOGGER.error("REMOVE muti row in table error : %s", e)
return False
def _to_dict(self, value):
if value:
return json.loads(value)
else:
return None
def data_init(self):
id = self._fetchone("rules","type",5)[0]
self._door_reminder = self.get_rule(id=id)
##### ADD_NEW DEVICE #######
def _save_device(self, device):
try:
id = []
id.append(str(uuid.uuid4()))
id.append(str(uuid.uuid4()))
addr = device['addr']
detail = device['info']
ieee = detail.get("ieee", ' ')
device_id = self._fetchone("devices", "ieee", ieee)
channel_id = self._fetchone("channels", "ieee", ieee)
if device_id and channel_id:
LOGGER.debug('Dumplicate devices no need to do')
query_update_channel = "UPDATE channels SET name=?,type=?,room_id=? WHERE id=?;"
return self.get_device_channel(id=device_id[0])
elif device_id and channel_id is None:
LOGGER.debug('Update channel info :%',device_id)
self._add_new_channels(
device_id[0], id[1], ieee, device.get('endpoints', []))
self._db.commit()
return self.get_device_channel(id=device_id[0])
else:
LOGGER.debug('Create new device')
self._add_new("devices", """id, ieee, addr, discovery, generictype, ids, bit_field, descriptor_capability,
lqi, mac_capability, manufacturer_code, power_type, server_mask, rejoin_status, created, updated ,last_seen""", (id[0], ieee, addr,
device.get("discovery", "no"), device.get(
"generictype", "no"), detail.get("id", 0), detail.get("bit_field", 0),
detail.get("descriptor_capability", 0), detail.get("lqi", 0), detail.get(
"mac_capability", 0), detail.get("manufacturer_code", 0),
detail.get("power_type", 0), detail.get("server_mask", 0), int(detail.get("rejoin_status", 0)), int(time.time()), int(time.time()), int(time.time())))
self._add_new_channels(id[0], id[1], ieee, device['endpoints'])
self._db.commit()
return self.get_device_channel(id=id[0])
except Exception as e:
LOGGER.debug('Add new device error : %s',exc_info=True)
def _add_new_channels(self, device_id, channel_id, ieee, enpoints):
config = ""
zone_id = self.generate_zone_id()
for endpoint in enpoints:
self._add_new("channels", "id, ieee, endpoint_id, type, config, profile_id, device_type, in_clusters, out_clusters,zone_id,zone_status, created, updated, favorite,notification,device_id", (channel_id, ieee,
endpoint['endpoint'], 0, config, endpoint['profile'], endpoint['device'], json.dumps(endpoint['in_clusters']), json.dumps(endpoint['out_clusters']), zone_id, 1, int(time.time()), int(time.time()), 0, 0, device_id))
for cluster in endpoint['in_clusters']:
self._add_new("clusters", "ieee, endpoint_id, cluster",
(ieee, endpoint['endpoint'], cluster))
list_status = {}
type_channel = None
for cluster in list(endpoint['clusters']):
for attribute in cluster['attributes']:
if int(cluster['cluster']) == 0 and int(attribute['attribute']) == 5:
model = attribute.get('value', None)
if model:
self.set_device_type(model, device_id)
type_channel = self.set_type_channels(model, channel_id)
self._db.commit()
self._add_new("attributes", "ieee,endpoint_id,cluster,attribute,expire,data,name,type,value", (ieee, endpoint['endpoint'], cluster['cluster'], attribute['attribute'],
attribute.get('expire', 0), attribute.get('data', None), attribute.get('name', None), attribute.get('type', None), attribute.get('value', None)))
else:
self.remove_device(device_id)
break
# IAS ZONE
elif int(cluster['cluster']) == 1280 and attribute.get('name', None) == "zone_status":
value = attribute.get('value', None)
alarm_status = {"alarm1": int(value['alarm1']), "alarm2": int(value['alarm2']), "tamper": int(value['tamper']), "low_battery": int(value['low_battery']), "supervision": int(value['supervision']),
"restore": int(value['restore']), "trouble": int(value['trouble']), "ac_fault": int(value['ac_fault']), "test_mode": int(value['test_mode']),
"battery_defect": int(value['battery_defect']), "armed": int(value['armed']), "disarmed": int(value['disarmed']), "athome": int(value['athome'])}
self._add_new("attributes", "ieee, endpoint_id, cluster, attribute, zone_status,name, type", (
ieee, endpoint['endpoint'], cluster['cluster'], attribute['attribute'], json.dumps(alarm_status), attribute['name'], attribute['type']))
# channel_info = self.execute("SELECT type,ieee,zone_id FROM channels WHERE id='{}';".format(channel_id)).fetchone()
self.set_status_channel(alarm_status, channel_id, type_channel)
self.add_device_to_rule_secure(channel_id, type_channel, ieee)
else:
if int(cluster['cluster']) == 1026:
list_status["temperature"] = attribute.get(
'value', None)
elif int(cluster['cluster']) == 1029:
list_status["humidity"] = attribute.get(
'value', None)
elif int(cluster['cluster']) == 6:
list_status["onoff"] = attribute.get('value', None)
else:
pass
# OTHER DEVICE
self._add_new("attributes", "ieee,endpoint_id,cluster,attribute,expire,data,name,type,value", (ieee, endpoint['endpoint'], cluster['cluster'], attribute['attribute'],
attribute.get('expire', 0), attribute.get('data', None), attribute.get('name', None), attribute.get('type', None), attribute.get('value', None)))
if not list_status:
pass
else:
self.set_status_channel(list_status, channel_id, type_channel)
##### LOAD DEVICE ##########
def _load_device(self):
try:
devices = []
for ieee in self._fetchall("devices"):
device = {}
device["addr"] = ieee[2]
device["discovery"] = ieee[4]
device["generictype"] = ieee[11]
device["info"] = {"addr": ieee[2], "id": ieee[12], "bit_field": ieee[13], "descriptor_capability": ieee[14],
"ieee": ieee[3], "last_seen": ieee[24], "lqi": ieee[15], "mac_capability": ieee[16], "manufacturer_code": ieee[17],
"power_type": ieee[18], "server_mask": ieee[20], "rejoin_status": ieee[21]}
enpoints = []
for endt in self._fetch_by_col("channels", 'ieee', ieee[3]):
enpoint = {}
enpoint["device"] = endt[8]
enpoint["endpoint"] = endt[3]
enpoint["in_clusters"] = json.loads(endt[9])
enpoint["out_clusters"] = json.loads(endt[10])
enpoint["profile"] = endt[7]
clusters = []
for clu in json.loads(endt[9]):
cluster = {}
cluster["cluster"] = clu
attributes = []
for cl in self._fetch_multi("attributes", "ieee", "cluster", ieee[3], clu):
attribute = {}
attribute["attribute"] = cl[3]
attribute["expire"] = cl[4]
attribute["name"] = cl[7]
attribute["type"] = cl[8]
if cl[7] == "zone_status":
res = json.loads(cl[5])
attribute["value"] = res
attribute["data"] = res
else:
attribute["data"] = cl[6]
attribute["value"] = cl[9]
# print(attribute)
attributes.append(attribute)
cluster["attributes"] = attributes
clusters.append(cluster)
enpoint["clusters"] = clusters
enpoints.append(enpoint)
device["endpoints"] = enpoints
devices.append(device)
return devices
except Exception as e:
LOGGER.debug('Load device to zigate error:',exc_info=True)
###### HOMEGATE ##########
def get_homegate_info(self):
self._db.row_factory = lambda c, r: dict(
[(column[0], r[idx]) for idx, column in enumerate(c.description)])
query = "SELECT id,name,model,serial, ip_local, ip_public, zig_version, sw_version, config, updated, last_update FROM homegate"
hg = self.execute(query)
data = {"id": hg[0], "name": hg[1], "model": hg[2], "serial_number": hg[3], "ip_local": hg[4], "ip_public": hg[5],
"zig_version": hg[6], "sw_version": hg[7], "config": self._to_dict(hg[8]), "updated": hg[9], "last_update": hg[10]}
return data
def set_homegate_entity(self):
return self._fetchall("homegate")[0]
def get_homegate_info_all(self):
try:
hg = self._fetchall("homegate")[0]
data = {"id": hg[0], "site": hg[1], "name": hg[2], "token": hg[3], "wan_mac": hg[4], "wwan_mac": hg[5], "ip_local": hg[6], "ip_public": hg[7], "model": hg[8], "serial": hg[9],
"zig_version": hg[12], "sw_version": hg[13], "hw_version": hg[14], "state": hg[10], "config": self._to_dict(hg[11]), "created": hg[15], "updated": hg[16], "last_update": hg[17], "last_seen": hg[18]}
return data
except Exception as e:
print(e)
def add_homegate_info(self, data):
try:
query = """DELETE * from homegate;
INSERT OR IGNORE INTO homegate(id,name,site,wan_mac,wwan_mac,ip_local,
ip_public,model,serial,state,config,zig_version,hw_version,sw_version,
updated,last_seen) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
"""
self.execute(query, (data.id, data.name, data.site, data.wan_mac, data.wwan_mac, data.ip_local, data.ip_public, data.model,
data.serial, data.state, json.dumps(data.config), data.zig_version, data.hw_version, data.sw_version,int(time.time()), int(time.time())))
self._db.commit()
return True
except Exception as e:
print(e)
return e
def update_homegate_info(self, colum, value, id):
try:
query = "UPDATE homegate SET {}='{}' WHERE id='{}'".format(
colum, value, id)
self.execute(query)
self._db.commit()
return True
except Exception as e:
return e
print(e)
def update_total_homegate_db(self):
list = {}
hg = self.get_homegate_info_all()
list['id'] = hg['id']
list['devices'] = self.get_device_channel(all=True)
list['rules'] = self.get_rule(all=True)
list['homegate'] = hg
list['rooms'] = self.get_room(all=True)
list['camera'] = self.get_camera(all=True)
list['groups'] = None
return list
###### USER ##########
def get_user(self, id):
return self._fetchone("users", "id", id)
def udpate_user(self, data, id):
pass
def add_channel_to_user(self, user_id, channel_id):
return self._add_new("user_access", "user_id,channel_id,created", (user_id, channel_id, int(time.time())))
def add_user(self, user_id, name, permission_type, access_token):
user = self._fetchone("users", "id", user_id)
if user:
return user
else:
return self._add_new("users", "id,name,permission_type,status,access_token,created,last_seen", (user_id, name, permission_type, 1, access_token, int(time.time()), int(time.time())))
def remove_user(self, user_id):
return self._remove("users", "id", user_id)
def remove_user_accsess(self, channel_id):
pass
def check_user_access_channle(self, user_id, channel_id):
return self._fetch_multi("user_access", "user_id", "channel_id", user_id, channel_id)
def get_all_user(self):
return self._fetchall("users")
def get_all_user_token(self):
return self._fetchall_col("users", "id,access_token")
def get_all_user_id(self):
return self._fetchall_col("users", "id")
def get_all_user_id(self):
return self.execute("SELECT id from users;").fetchall()[0]
###### ROOM ##########
def get_default_room(self):
return self._fetchone("rooms", "name", "Mặc định")[0]
def get_room(self, all=False, id=False):
list_room = []
if all:
for r in self._fetchall("rooms"):
obj = {"id": r[0], "name": r[1], "icon": r[2], "channels": self._to_dict(r[3]), "floor_id": str(r[4]), "created": r[5], "updated": r[6]}
list_room.append(obj)
return list_room
if id:
for r in self._fetchone("rooms", "id", id):
obj = {"id": r[0], "name": r[1], "icon": r[2], "channels": self._to_dict([3]), "floor_id": r[4], "created": r[5], "updated": r[6]}
list_room.append(obj)
return list_room[0]
def add_room(self,data):
return self._add_new("rooms", "id,name,channels,icon,floor_id,created,updated", (str(uuid.uuid4()),data['name'],jsont.dumps(data['channels']),
data['icon'],data['floor_id'], int(time.time()), int(time.time())))
def update_room(self,data):
try:
query = """update rooms set name='{}',channels='{}',icon='{}',
floor_id='{}',updated='{}' where id='{}';""".format(data['name'],jsont.dumps(data['channels']),
data['icon'],data['floor_id'],data['id'])
self.execute(query)
self._db.commit()
return self.get_room(id=data['id'])
except Exception as e:
LOGGER.error("Update room error : ",exc_info=True)
return False
def remove_room(self,id):
if self._fetchone("rooms", "id", id):
self._remove("rooms","id",id)
else:
return False
###### RULE ##########
def check_rule_timer(self):
list_actions = []
for t in self.execute("select id,timer from conditions;").fetchall():
condi = json.loads(t)
print(condi)
if currentDay in condi['repeat']:
if condi['type']== 0 and condi['type']== 1: # 0 is moment , 1 is period
if condi['value']['start_time'] == currentTime:
rule = self.execute("select id,stauts,type from rules where id='{}';".format(condi['id'])).fetchone()
print("Rule",rule)
action = self.execute("select * from actions where id='{}'".format(condi['id'])).fetchone()
print("Action rule",action)
if action:
list_actions['channels'] = json.loads(action[2])
list_actions['notification'] = action[3]
action_channels = self._fetch_by_col("action_channels", "id",condi['id'])
if action_channels:
for c in action_channels:
list_actions['channels'] = {"id":c[1],"ieee":c[3],"type":c[4],"status":c[5]}
return list_actions
def compare_date_time(self,type,repeat,start_time,end_time):
currentTime = datetime.now().strftime("%H:%M")
currentDay = datetime.today().weekday() + 2
if currentDay in repeat and repeat is not None:
if type:
if currentTime == start_time:
return True
else:
return False
else:
now = datetime.now()
st_time = datetime.strptime(start_time, '%H:%M')
st_time = now.replace(hour=st_time.hour,minute=st_time.minute)
e_time = datetime.strptime(end_time, '%H:%M')
e_time = now.replace(hour=e_time.hour,minute=e_time.minute)
if st_time <= now <= e_time:
return True
else:
return False
def check_door_open(self):
if self._door_reminder:
for d in self._door_reminder['conditions']['alarm_mode']:
print("check door timer",d)
if d['channel_id'] in self._door_sensor:
door = self._door_sensor[d['channel_id']]
check_time = int(time.time())-int(door['updated']) # cout time , 180 is 180 second
if check_time > 160:
timer = self._door_reminder['conditions']['timer']
print(timer)
if timer is not None:
if self.compare_date_time(1,timer['repeat'],timer['value']['start_time'],timer['value']['end_time']):
return self.notifi_rule_door_reminder(door['id'],door['name'],d['channel_id'])
else:
break
else:
return self.notifi_rule_door_reminder(door['id'],door['name'],d['channel_id'])
def notifi_rule_door_reminder(self,rule_id,rule_name,channel):
room_name = self._fetchone("rooms", "id",self._door_sensor[channel]['room_id'])
notifi = self.add_notifi("", rule_id,4,5,rule_name,self._door_sensor[channel], room_name[1])
return notifi
def add_device_to_rule_secure(self, channel_id, type, ieee):
zone_status = 1
# Check type channel
if type == 8 or type == 9 or type == 10 or type == 13 or type == 25:
''' Add device channel normal eg: motion,smoke,door sensor to alarm mode
"alarm_mode":[{
"channel_id":"{string}",
"status":{}, // don't use
"ieee":{string}, // ieee device
"zone_status":{integer}
}]
'''
if type == 9:
zone_status = 0
self._add_new("condition_alarm_mode", "id,channel_id,ieee,zone_status",
(self._alarm_on[0], channel_id, ieee, zone_status))
self._add_new("condition_alarm_mode", "id,channel_id,ieee,zone_status",
(self._athome[0], channel_id, ieee, zone_status))
elif type == 15:
''' Add remote control to alarm mode
"access_control":{
"virtual":{integer},
"bind_channel_ids":[{"channel_id":"{string}","channel_type":{integer}}]
}
'''
self._add_new("conditions_bind_channel", "id,channel_id,channel_type",
(self._alarm_on[0], channel_id, type))
self._add_new("conditions_bind_channel", "id,channel_id,channel_type",
(self._athome[0], channel_id, type))
self._add_new("conditions_bind_channel",
"id,channel_id,channel_type", (self._sos[0], channel_id, type))
elif type == 21:
'''' Add siren to Alarm mode
"channels":[ { "channel_id":"{string}",
"channel_icon":"{string}",
"channel_type":{integer},
"channel_status":{ "type":"{string}","value":"{string}" }
}]
'''
siren = [{"type": "volume", "value": 1},
{"type": "duration", "value": 180}]
self._add_new("action_channels", "id,channel_id,channel_ieee,channel_type,channel_status",
(self._alarm_on[0], channel_id, ieee, type, json.dumps(siren)))
self._add_new("action_channels", "id,channel_id,channel_ieee,channel_type,channel_status",
(self._athome[0], channel_id, ieee, type, json.dumps(siren)))
self._add_new("action_channels", "id,channel_id,channel_ieee,channel_type,channel_status",
(self._sos[0], channel_id, ieee, type, json.dumps(siren)))
else:
pass
def get_rule(self, all=False, id=False):
try:
list_rules = []
if all:
for r in self._fetchall("rules"):
list = {"id": r[0], "name": r[1], "status": r[2], "created": r[3], "updated": r[4],
"user_id": r[5], "homegate_id": r[6], "type": r[7], "favorite": bool(r[8])}
conditions = {}
for c in self._fetch_by_col("conditions", "id", r[0]):
alarm_mode = []
for a in self._fetch_by_col("condition_alarm_mode", "id", r[0]):
if a:
alarm_mode.append({"channel_id": a[1], "ieee": a[2], "zone_status": a[3]})
access_control = json.loads(c[3])
for b in self._fetch_by_col("conditions_bind_channel", "id", r[0]):
if access_control['bind_channel_ids'] is None:
access_control['bind_channel_ids'] = [{"channel_id": b[1], "channel_ieee": b[2], "channel_type": b[3], "channel_status": b[4]}]
else:
access_control['bind_channel_ids'].append({"channel_id": b[1], "channel_ieee": b[2], "channel_type": b[3], "channel_status": b[4]})
list["conditions"] = {"alarm_mode": alarm_mode, "auto_mode": self._to_dict(c[1]), "timer": self._to_dict(c[2]), "access_control": access_control}
for a in self._fetch_by_col("actions", "id", r[0]):
action_channels = []
for ac in self._fetch_by_col("action_channels", "id", r[0]):
if ac:
action_channels.append({"channel_id": ac[1], "channel_ieee": ac[3], "channel_icon": ac[2], "channel_type": ac[4], "channel_status": json.loads(ac[5])})
list["actions"] = {"delay": a[1], "channels": action_channels, "rules": self._to_dict(a[2]), "activate_notification": a[3]}
list_rules.append(list)
return list_rules
if id:
for r in self.execute("select * from rules where id='{}';".format(id)).fetchall():
list = {"id": r[0], "name": r[1], "status": r[2], "created": r[3], "updated": r[4],
"user_id": r[5], "homegate_id": r[6], "type": r[7], "favorite": bool(r[8])}
conditions = {}
for c in self._fetch_by_col("conditions", "id", r[0]):
alarm_mode = []
for a in self._fetch_by_col("condition_alarm_mode", "id", r[0]):
if a:
alarm_mode.append({"channel_id": a[1], "ieee": a[2], "zone_status": a[3]})
access_control = json.loads(c[3])
for b in self._fetch_by_col("conditions_bind_channel", "id", r[0]):
if access_control['bind_channel_ids'] is None:
access_control['bind_channel_ids'] = [{"channel_id": b[1], "channel_ieee": b[2], "channel_type": b[3], "channel_status": b[4]}]
else:
access_control['bind_channel_ids'].append({"channel_id": b[1], "channel_ieee": b[2], "channel_type": b[3], "channel_status": b[4]})
list["conditions"] = {"alarm_mode": alarm_mode, "auto_mode": self._to_dict(c[1]), "timer": self._to_dict(c[2]), "access_control": access_control}
for a in self._fetch_by_col("actions", "id", r[0]):
action_channels = []
for ac in self._fetch_by_col("action_channels", "id", r[0]):
if ac:
action_channels.append({"channel_id": ac[1], "channel_ieee": ac[3], "channel_icon": ac[2], "channel_type": ac[4], "channel_status": json.loads(ac[5])})
list["actions"] = {"delay": a[1], "channels": action_channels, "rules": self._to_dict(a[2]), "activate_notification": a[3]}
list_rules.append(list)
return list_rules[0]
except Exception as e:
LOGGER.debug('Get rule Error : %s',e)
def get_rule_secure(self):
list_rules = []
query = "SELECT * from rules where type >0 and type <6;"
rules = self.execute(query).fetchall()
for r in rules:
list = {"id": r[0], "name": r[1], "status": r[2], "created": r[3], "updated": r[4],
"user_id": r[5], "homegate_id": r[6], "type": r[7], "favorite": bool(r[8])}
conditions = {}
for c in self._fetch_by_col("conditions", "id", r[0]):
alarm_mode = []
for a in self._fetch_by_col("condition_alarm_mode", "id", r[0]):
if a:
alarm_mode.append(
{"channel_id": a[1], "ieee": a[2], "zone_status": a[3]})
access_control = json.loads(c[3])
for b in self._fetch_by_col("conditions_bind_channel", "id", r[0]):
if access_control['bind_channel_ids'] is None:
access_control['bind_channel_ids'] = [
{"channel_id": b[1], "channel_ieee": b[2], "channel_type": b[3], "channel_status": b[4]}]
else:
access_control['bind_channel_ids'].append(
{"channel_id": b[1], "channel_ieee": b[2], "channel_type": b[3], "channel_status": b[4]})
list["conditions"] = {"alarm_mode": alarm_mode, "auto_mode": self._to_dict(c[1]), "timer": self._to_dict(c[2]), "access_control": access_control}
for a in self._fetch_by_col("actions", "id", r[0]):
action_channels = []
for ac in self._fetch_by_col("action_channels", "id", r[0]):
if ac:
action_channels.append(
{"channel_id": ac[1], "channel_ieee": ac[3], "channel_icon": ac[2], "channel_type": ac[4], "channel_status": json.loads(ac[5])})
list["actions"] = {"delay": a[1], "channels": action_channels, "rules": self._to_dict(a[2]), "activate_notification": a[3]}
list_rules.append(list)
return list_rules
def remove_rule(self, id):
self._remove("conditions", "id", id)
self._remove("actions", "id", id)
self._remove("rules", "id", id)
def remove_channel_in_rule(self, channel_id):
self._remove("conditions_bind_channel", "channel_id", channel_id)
self._remove("condition_alarm_mode", "channel_id", channel_id)
self._remove("action_channels", "channel_id", channel_id)
def update_rule_status(self, status, id):
if self._update_one("rules", "status", status, id):
return {"id": id, "status": status}
else:
return None
def update_rule_secure(self, status, channel_id):
channel = self._fetchone("channels", "id", channel_id)
type_rules = channel[4]
id_alarm = self._fetchone("rules", "type", type_rules)[0]
condi_alarm = self._fetchone("conditions", "id", id_alarm)
list_channel = []
if condi_alarm[1]:
for c in json.loads(condi_alarm[1]):
if c['zone_status'] == 0:
list_channel.append(
{"channel_id": c['channel_id'], "zone_status": c['zone_status']})
if list_channel:
self._update_one_col("rules", "status", status, id)
return None
else:
return list_channel
def update_rule_alarm(self, type, status):
''' Update rule alarm mode
AlarmOn get list channel have zone_status in alarm_mode entity of condition table
type: 1 :AlarmOn , 2: AlarmOff , 3: athome , 4 sos , 5 DoorReminder
'''
if type == 2 and status == 1:
query1 = "UPDATE rules SET status=0 WHERE type=1 OR type=3;"
self.execute(query1)
query3 = "UPDATE rules SET status=1 WHERE type=2;"
self.execute(query3)
self._db.commit()
return self._alarm_off
elif type == 1 and status == 1:
query1 = "UPDATE rules SET status=0 WHERE type=2 OR type=3;"
self.execute(query1)
query2 = "UPDATE rules SET status=1 WHERE type=1;"
self.execute(query2)
self._db.commit()
return self.change_zone_status(type)
elif type == 3 and status == 1:
query1 = "UPDATE rules SET status=0 WHERE type=1 OR type=2;"
self.execute(query1)
query2 = "UPDATE rules SET status=1 WHERE type=3;"
self.execute(query2)
self._db.commit()
return self.change_zone_status(type)
elif type == 4 and status == 1:
id_sos = self.execute(
"SELECT id FROM rules WHERE type='{}';".format(4)).fetchone()[0]
action_alarm = self.execute(
"SELECT channels FROM action WHERE id='{}';".format(id_sos)).fetchone()[0]
list_channel = []
if action_alarm:
for c in json.loads(action_alarm):
list_channel.append(
{"ieee": c['ieee'], "status": c['channel_status'], "type": c['channel_type']})
return list_channel
else:
pass
def change_zone_status(self, type):
''' Change zone status in alarm mode
'''
self._db.row_factory = lambda c, r: dict(
[(column[0], r[idx]) for idx, column in enumerate(c.description)])
condi_alarm = self.execute(
"SELECT ieee,zone_status FROM condition_alarm_mode WHERE id='{}';".format(self._athome[0])).fetchall()
list_channel = []
if condi_alarm:
for c in condi_alarm:
if type == 1 and c['zone_status'] == 0:
list_channel.append({"ieee": c['ieee'], "zone_status": 1})
else:
list_channel.append(
{"ieee": c['ieee'], "zone_status": c['zone_status']})
if type == 1:
return {"id": self._alarm_on[0], "channels": list_channel}
else:
return {"id": self._athome[0], "channels": list_channel}
def get_rule_alarm_status(self):
status = self.execute("SELECT status from rules where type<3;").fetchall()
if status[0][0] == 1 or status[2][0] == 1:
return 1
else:
return 0
def get_rule_alarm(self, type):
return self.execute("SELECT id,type,status FROM rules WHERE type='{}';".format(type)).fetchone()
###### DEVICE ##########
def get_device(self, all=None, id=None):
self._db.row_factory = lambda c, r: dict(
[(column[0], r[idx]) for idx, column in enumerate(c.description)])
try:
if all is not None:
query_all = """SELECT id, ieee, addr, type, model, manufacturer, serial_number, sw_version, hw_version,
lqi,low_battery, created, updated from devices;"""
return self.execute(query_all)
elif id is not None:
query_id = """SELECT id, ieee, addr, type, model, manufacturer, serial_number, sw_version, hw_version, zone_id, zone_status,
lqi,low_battery, created, updated from devices where id=? ;"""
return self.execute(query_all, id)
else:
return "No param selected"
except Exception as e:
LOGGER.error(" Get Device error :", exc_info=True)
def get_device_channel(self, all=False, id=False):
try:
devices = []
if all:
query_all = """SELECT id, ieee, addr, type, model, manufacturer, serial_number, sw_version, hw_version,
lqi,low_battery, created, updated , name from devices;"""
for d in self.execute(query_all).fetchall():
device = {"id": d[0], "ieee": d[1], "addr": d[2], "type": d[3], "model": d[4], "manufacturer": d[5], "serial_number": d[6], "sw_version": d[7], "hw_version": d[8],
"signal": round(100 * int(d[9]) / 255), "low_battery": d[10], "created": d[11], "updated": d[12], "name": d[13]}
channels = []
query_channel = "SELECT id, name, endpoint_id, type, status, config ,zone_id, zone_status, created, updated, favorite,notification,room_id, device_id from channels where device_id='{}';".format(
d[0])
for c in self.execute(query_channel).fetchall():
channel = {"id": c[0], "name": c[1], "endpoint": c[2], "type": c[3], "status": json.loads(c[4]), "config": c[5], "zone_id": c[6], "zone_status": c[7],
"created": c[8], "updated": c[9], "favorite": bool(c[10]), "notification": c[11], "room_id": c[12], "device_id": c[13]}
channels.append(channel)
device['channels'] = channels
devices.append(device)
return devices
if id:
query_device = """SELECT id, ieee, addr, type, model, manufacturer, serial_number, sw_version, hw_version,
lqi, low_battery, created, updated, name from devices where id='{}';""".format(id)
d = self.execute(query_device).fetchone()
device = {"id": d[0], "ieee": d[1], "addr": d[2], "type": d[3], "model": d[4], "manufacturer": d[5], "serial_number": d[6], "sw_version": d[7],
"hw_version": d[8], "signal": round(100 * int(d[9]) / 255), "low_battery": d[10], "created": d[11], "updated": d[12], "name": d[13]}
channels = []
query_channel = "SELECT id, name, endpoint_id, type, status, config, zone_id, zone_status, created, updated, favorite,notification,room_id, device_id from channels where device_id='{}';".format(
id)
for c in self.execute(query_channel).fetchall():
channel = {"id": c[0], "name": c[1], "endpoint": c[2], "type": c[3], "status": json.loads(c[4]), "config": c[5], "zone_id": c[6], "zone_status": c[7],
"created": c[8], "updated": c[9], "favorite": bool(c[10]), "notification": c[11], "room_id": c[12], "device_id": c[13]}
channels.append(channel)
device['channels'] = channels
return device
except Exception as e:
LOGGER.error("Get all device error :",exc_info=True)
return None
def update_device(self, name, value, id):
pass
device = self._update_one("devices", name, value, id)
if device:
return True
else:
return device
def set_device_type(self, model, device_id):
try:
name = LIST_TYPE_CHANNEL_BRAND[model]
query_update_device = "UPDATE devices SET name=?, type=?, model=?, manufacturer=?, sw_version=?, hw_version=?,serial_number=? WHERE id=?;"
self.execute(query_update_device, (
NAME_TYPE_CHANNEL[name], 1, LIST_MODEL_DEVICE_BRAND[model], "DICOM", "1.0", "1.0", "", device_id))
except Exception as e:
LOGGER.error("Set device type error : ",exc_info=True)
def generate_zone_id(self):
query = "SELECT zone_id from channels;"
list_zone_id = self.execute(query).fetchall()
i = 0
if list_zone_id:
for z in list_zone_id:
i += 1
if i != z[0]:
return i
break
elif i == int(max(list_zone_id)[0]):
return i+1
break
else:
return 1
def remove_device(self, id):
device = self._fetchone("devices", "id", id)
if device:
channel = self._fetchone("channels", "device_id", id)
if device and channel:
self._remove("attributes", "ieee", device[3])
self._remove("clusters", "ieee", device[3])
self._remove("group_members", "channel_id", channel[0])
self._remove("user_access", "channel_id", channel[0])
self._remove("channels", "device_id", id)
self._remove("devices", "id", id)
return device[3]
else:
return False
def remove_channel(self, id):
channel = self._fetchone("channels", "id", id)
if channel:
self._remove_muti("attributes", "ieee",
"endpoint_id", channel[2], channel[3])
self._remove_muti("clusters", "ieee",
"endpoint_id", channel[2], channel[3])
self._remove("group_members", "channel_id", channel[0])
self._remove("user_access", "channel_id", channel[0])
self.remove_channel_in_rule(channel[0])
number_enpoint = self.execute(
"select count(id) from channels where device_id='{}';".format(channel[18])).fetchone()
if number_enpoint[0] == 1:
self._remove("channels", "id", channel[0])
self._remove("devices", "id", channel[18])
return channel[2]
else:
return True
else:
return False
####### CHANNEL ##########
def get_channel(self, all=False, id=False):
self._db.row_factory = lambda c, r: dict([(column[0], r[idx]) for idx, column in enumerate(c.description)])
try:
if all:
query_all = """SELECT id, name, enpoint_id, type, status,config ,zone_id,zone_status, created, updated, favorite, device_id from channels;"""
return self.execute(query_all)
if id:
query_all = """SELECT id, name, enpoint_id, type, status , config, zone_id, zone_status created, updated, favorite, device_id from channels where id=?;"""
return self.execute(query_all, id)
except Exception as e:
LOGGER.error("Get channel error : %s", e)
def get_channel_by_ieee(self, ieee, endpoint_id):
return self.execute("SELECT id,type,status,name,notification,room_id,zone_status FROM channels where ieee='{}' and endpoint_id='{}';".format(ieee, endpoint_id)).fetchone()
def update_channel_mqtt(self, channel_id, status):
try:
query_update_channel = "UPDATE channels SET status='{}',updated='{}' WHERE id='{}';".format(
json.dumps(status_old), timer, channel_id)
self.execute(query_update_channel)
self._db.commit()
except Exception as e:
LOGGER.error("UPDATE channel mqtt % ",e)
return False
def update_channel_info(self,channel_id,data):
channel = self._fetchone("channels", "id", channel_id)
if channel:
try:
self.execute("""update channels set name='{}',status='{}',zone_status='{}',favorite='{}',notification='{}',
room_id='{}' where id='{}';""".format(data['name'],json.dumps(data['status']),data['zone_status'],int(data['favorite']),
data['notification'],data['room_id'],channel_id))
self._db.commit()
return
except Exception as e:
LOGGER.error("UPDATE channel info : %s", e)
return False
def update_channel_alarm(self, channel, status):
''' Update channel
Check type channel and notification
Check Enviroment sensor type 28 : combine temperature and humidity to status
'''
rule_state = self.get_rule_alarm_status()
try:
data = {}
channel_status = self.generate_channel_value(channel[1], status)
timer = int(time.time())
query_update_channel = "UPDATE channels SET status='{}',updated='{}' WHERE id='{}';".format(json.dumps(channel_status), timer, channel[0])
self.execute(query_update_channel)
self._db.commit()
data["notifi"] = False
if rule_state == 1 and channel[6] == 1:
room_name = self._fetchone("rooms", "id", channel[5])
# self,user_id,id,type_noti,type,name,status,room_name
notifi = self.add_notifi("", channel[0], 1, channel[1], channel[3], channel_status, room_name[1])
data["notifi"] = notifi
LOGGER.debug("Notifi rule alarm")
elif channel[4] == 1:
room_name = self._fetchone("rooms", "id", channel[5])
# self,user_id,id,type_noti,type,name,status,room_name
notifi = self.add_notifi("", channel[0], 0, channel[1], channel[3], channel_status, room_name[1])
data["notifi"] = notifi
else:
pass
data["channel"] = {"id": channel[0],'status': channel_status, 'updated': timer}
#door_reminder add sensor
if channel[1] == 8:
if channel_status[0]['value'] == 1:
self._door_sensor[str(channel[0])]={'id':channel[0],'name':channel[3],'status': channel_status[0]['value'], 'updated': timer,'room_id':channel[5]}
else:
del self._door_sensor[str(channel[0])]
print("Door sensor ",self._door_sensor)
return data
except Exception as e:
LOGGER.error("Update channel alarm status: %s", e)
def update_channel_normal(self, ieee, endpoint_id, status):
try:
channel = self._fetch_multi(
"channels", "ieee", "endpoint_id", ieee, endpoint_id)
for c in channel:
channel_status = self.generate_channel_value(c[4], status)
status_old = json.loads(c[5])
if c[4] == 28:
if not status_old:
status_old = [{"type": "temperature", "value": int(status.get('temperature', 25))}, {"type": "humidity", "value": int(status.get('humidity', 50))}]
elif status.get('name', None) == 'temperature':
status_old[0]['value'] = int(status.get('value', 25))
else:
status_old[1]['value'] = int(status.get('value', 25))
channel_status = status_old
timer = int(time.time())
query_update_channel = "UPDATE channels SET status='{}',updated='{}' WHERE id='{}';".format(
json.dumps(channel_status), timer, c[0])
self.execute(query_update_channel)
self._db.commit()
return {"id": c[0], 'status': channel_status, 'updated': timer}
except Exception as e:
LOGGER.error("Update channel error: %s", e)
def generate_channel_value(self, channel_type, status):
list_status = []
if channel_type == 21:
list_status.append({"type": "volume", "value": "1"})
list_status.append({"type": "duration", "value": 180})
list_status.append({"type": "tamper", "value": int(status.get('tamper', 0))})
elif channel_type == 13 or channel_type == 0:
list_status.append(
{"type": "onoff", "value": int(status.get('alarm1', 0))})
elif channel_type == 8:
list_status.append(
{"type": "closeopen", "value": int(status.get('alarm1', 0))})
elif channel_type == 9:
list_status.append(
{"type": "present", "value": int(status.get('alarm1', 0))})
elif channel_type == 10:
list_status.append(
{"type": "smoke", "value": int(status.get('alarm1', 0))})
elif channel_type == 15:
list_status.append(
{"type": "sos", "value": int(status.get('alarm1', 0))})
list_status.append(
{"type": "athome", "value": int(status.get('athome', 0))})
list_status.append(
{"type": "armed", "value": int(status.get('armed', 0))})
list_status.append(
{"type": "disarmed", "value": int(status.get('disarmed', 0))})
elif channel_type == 28:
if status.get('temperature', True) and status.get('humidity', True):
if status.get('name', None) == 'temperature':
list_status.append(
{"type": "temperature", "value": int(status.get('value', 25))})
if status.get('name', None) == 'humidity':
list_status.append(
{"type": "humidity", "value": int(status.get('value', 50))})
else:
list_status.append(
{"type": "temperature", "value": status.get('temperature', 25)})
list_status.append(
{"type": "humidity", "value": status.get('humidity', 50)})
elif channel_type == 25:
list_status.append(
{"type": "present", "value": int(status.get('alarm1', 1))})
list_status.append(
{"type": "tamper", "value": int(status.get('tamper', 0))})
else:
pass
return list_status
def set_status_channel(self, status, channel_id, type):
'''
Table type status value
21: Indoor Siren
"0x<volume/duration>"
"volume": 1-4
"duration" : seconds
siren = {"0x210":"Volume is medium and duration alarm 60s"}
8: Door Sensor
13: Waterleak
25: Pir pet
9 : Pir sensor
28: Enviroment Sensor -> Temperature andf Humidity
10: Smoke sensor
15: Alarm Remote control
16: S0S button
'''
notifi = 0
if type == 10:
notifi = 1
try:
list_status = self.generate_channel_value(type, status)
query_update_channel = "UPDATE channels SET status='{}',notification='{}' WHERE id='{}';".format(
json.dumps(list_status), notifi, channel_id)
self.execute(query_update_channel)
self._db.commit()
except Exception as e:
LOGGER.error("UPDATE status channel error: %s",e)
def set_type_channels(self, model, channel_id):
""""
Set model , type devices
"""
try:
room_id = self.get_default_room()
query_update_channel = "UPDATE channels SET name=?,type=?,room_id=? WHERE id=?;"
name = LIST_TYPE_CHANNEL_BRAND[model]
self.execute(query_update_channel,
(NAME_TYPE_CHANNEL[name], LIST_TYPE_CHANNEL_BRAND[model], room_id, channel_id))
return LIST_TYPE_CHANNEL_BRAND[model]
except Exception as e:
LOGGER.error("UPDATE status channel error: %s",e)
###### GROUPS ##########
def get_group(self, all=None, id=None):
try:
if all is not None:
groups = {}
for group in self._fetchall("groups"):
groups["id"] = group[0]
groups["group_idx"] = group[1]
groups["name"] = group[2]
group["type"] = group[3]
group_member = []
for member in self._fetchone("group_members", 'group_id', group[0]):
channels = {}
channels["channel_id"] = member[1]
channels["status"] = member[2]
group_member.append(channels)
groups["group_members"] = group_member
return groups
elif id is not None:
groups = {}
for group in self._fetchone("groups", 'id', id):
groups["id"] = group[0]
groups["group_idx"] = group[1]
groups["name"] = group[2]
group["type"] = group[3]
group_member = []
for member in self._fetch_by_col("group_members", 'group_id', group[0]):
channels = {}
channels["channel_id"] = member[1]
channels["status"] = member[2]
group_member.append(channels)
groups["group_members"] = group_member
return groups
else:
return "No select param"
except Exception as e:
LOGGER.debug('Error get group %s', e)
def update_group(self, data):
pass
def group_member_removed(self, group, ep):
q = """DELETE FROM group_members WHERE group_id=?AND addr=?AND endpoint_id=?"""
self.execute(q, (group.group_id, *ep.unique_id))
self._db.commit()
####### NOTIFICATION ##########
def get_all_notifi(self):
list = []
for n in self._fetchall("notification"):
list.append({"id": n[0], "user_id": n[1], "type": n[2],
"title": n[3], "body": n[4], "created": n[5]})
return list
def delete_notifi(self, id):
self._remove("notification", id)
def clear_notifi(self):
query = "DELETE from notification where id not in ( SELECT id FROM notification ORDER BY created DESC LIMIT 200);"
self.execute(query)
def add_notifi(self, user_id, id, type_noti, type, name, status, room_name):
''' Create notification format
Type 0 : Notify channel
Type 1 : Notify alarm
Type 3 : Notify rule alarm
Type 5 : Notify door reminder
Type 5 : Notify rule normal
'''
data = {}
if type_noti == 0 or type_noti == 1:
data = {"channel_id": id, "type": type,
"status": status, "room_name": room_name}
elif type_noti == 2:
data = {"rule_id": id, "type": type,
"status": status, "room_name": room_name}
elif type_noti == 3:
pass
elif type_noti == 4:
data = {"rule_id": id, "type": type,"channel":status,"room_name": room_name}
else:
pass
id = str(uuid.uuid4())
timer = int(time.time())
noti_id = self._add_new("notification", "id,user_id,type,title,body,created", (
id, user_id, type_noti, name, json.dumps(data), timer))
if noti_id:
noti = {"id": id, "user_id": user_id, "type": type_noti,
"title": name, "body": data, "created": timer}
return noti
def add_door_bell_noti(self):
d = str(uuid.uuid4())
timer = int(time.time())
noti_id = self._add_new("notification", "id,user_id,type,title,body,created", (id, " ",4,"Chuông cửa", "Chuông cửa đang gọi", timer))
if noti_id:
noti = {"id": id, "user_id": "", "type": 4,"title": "Chuông cửa", "body": "Chuông cửa đang gọi", "created": timer}
return noti
##### Camera #####
def get_camera(self, id=False, all=False):
if id:
c = self._fetchone("cameras", "id", id)
if c:
return {"id": c[0], "name": c[1], "roomId": c[2], "cameraIp": json.load(c[3]), "cameraInfo": json.load(c[4]), "streamUri": json.load(c[5]),"snapshotUri": json.load(c[6]), "created": c[7], "updated": c[8]}
if all:
camera = self._fetchall("cameras")
if camera:
data = []
for c in camera:
data.append({"id": c[0], "name": c[1], "roomId": c[2], "cameraIp": json.load(c[3]), "cameraInfo": json.load(c[4]), "streamUri": json.load(c[5]),"snapshotUri": json.load(c[6]), "created": c[7], "updated": c[8]})
return data
def update_camera(self, id, data):
try:
query = """UPDATE cameras SET name='{}',roomId='{}',cameraIp='{}',cameraInfo='{}',
streamUri='{}',snapshotUri='{}',update='{}' WHERE id='{}';""".format(name, room_id, camera_ip, camera_info, camera_uri)
self.execute(query)
self._db.commit()
return self.get_camera(id=data['id'])
except Exception as e:
LOGGER.debug('UPDATE camera error : %s',e)
return False
def remove_camera(self, id):
if self._fetchone("cameras", "id", id)[0]:
self._remove("cameras","id",id)
else:
return False
# if __name__ == '__main__':
# d = DbInterface()
# # print(json.dumps(d._fetchone("rules", "id",'5a2d8b58-5bfb-4998-b6da-8ff4ecf0cebe')))
# print(d.get_rule(id='5a2d8b58-5bfb-4998-b6da-8ff4ecf0cebe'))
# print(d.remove_channel("0d3fe41c-229f-4797-8bcf-54f657c7af34"))
# # d.remove_device("3edae810-5b68-421a-8ef3-ff69d80926e0")
# print(d._init_homegate("e731f132-b313-420e-b6c2-2257854f5149","CPIQGFvD13bS]ur2dGmT@5AI)","dicomiot","Dhome","DHG-A1","23:24:234","25:24:234","DH-A1-A05B2000011","1.0","1.2"))
| minhtan58/HomeGate | dbsync.py | dbsync.py | py | 64,592 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "config.DATABASE",
"line_number": 20,
"usage_type": "argument"
},
{
"api_name": "threading.Lock"... |
26104190475 | # coding: utf-8
from __future__ import absolute_import
from flask import json
from six import BytesIO
from swagger_server.models.image import Image # noqa: E501
from swagger_server.models.image_id_body import ImageIdBody # noqa: E501
from swagger_server.test import BaseTestCase
class TestImageController(BaseTestCase):
"""ImageController integration test stubs"""
def test_delete_image(self):
"""Test case for delete_image
deletes image with specific id
"""
response = self.client.open(
'/MATEUSZTEPLICKI/foto_portfolio_project/1.1.0/image/{id}'.format(id=56),
method='DELETE')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_image(self):
"""Test case for get_image
returns specific image
"""
response = self.client.open(
'/MATEUSZTEPLICKI/foto_portfolio_project/1.1.0/image/{id}'.format(id=56),
method='GET')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_image_array(self):
"""Test case for get_image_array
get array of images
"""
response = self.client.open(
'/MATEUSZTEPLICKI/foto_portfolio_project/1.1.0/image',
method='GET')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_patch_image(self):
"""Test case for patch_image
modify metadata of image (alt and title)
"""
body = ImageIdBody()
response = self.client.open(
'/MATEUSZTEPLICKI/foto_portfolio_project/1.1.0/image/{id}'.format(id=56),
method='PATCH',
data=json.dumps(body),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_post_image(self):
"""Test case for post_image
uploads an image
"""
data = dict(file='file_example',
alt='alt_example',
title='title_example')
response = self.client.open(
'/MATEUSZTEPLICKI/foto_portfolio_project/1.1.0/image',
method='POST',
data=data,
content_type='multipart/form-data')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| JakubKuderski/Programowanie_Zespolowe | server/swagger_server/test/test_image_controller.py | test_image_controller.py | py | 2,607 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "swagger_server.test.BaseTestCase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "swagger_server.models.image_id_body.ImageIdBody",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "flask.json.dumps",
"line_number": 58,
"usage_type": "cal... |
71807437474 | import argparse
import datetime
import json
import sys
import time
import colorama
import requests
session = requests.Session()
def get_changes(auth_creds, query):
auth = requests.auth.HTTPDigestAuth(*auth_creds)
result = session.get('https://review.openstack.org/a/changes/',
params='q=%s&'
'pp=0&'
'o=DETAILED_ACCOUNTS&'
'o=DETAILED_LABELS&'
'n=22' % query,
auth=auth,
timeout=30)
result.raise_for_status()
data = ''.join(x for x in result.iter_content(1024, decode_unicode=True))
result = data[5:]
changes = json.loads(result)
return changes
def green_line(line):
return colorama.Fore.GREEN + line + colorama.Fore.RESET
def yellow_line(line):
return colorama.Fore.YELLOW + line + colorama.Fore.RESET
def red_line(line):
return colorama.Fore.RED + line + colorama.Fore.RESET
def cyan_line(line):
return colorama.Fore.CYAN + line + colorama.Fore.RESET
def red_background_line(line):
return (colorama.Back.RED + colorama.Style.BRIGHT + line +
colorama.Style.RESET_ALL + colorama.Back.RESET)
def dim_line(line):
return colorama.Style.DIM + line + colorama.Style.RESET_ALL
def _reset_terminal():
sys.stderr.write("\x1b[2J\x1b[H")
def error(msg):
_reset_terminal()
print(red_background_line(msg))
def format_time(secs):
if secs < 60:
return "%is" % secs
elif secs < 3600:
return "%im" % (secs / 60)
elif secs < 3600 * 24:
return "%ih%im" % ((secs / 3600),
(secs % 3600) / 60)
else:
return "%id%ih" % ((secs / (3600 * 24)),
(secs % (3600 * 24)) / (3600))
def vote_to_colored_char(vote):
if vote > 0:
vote = green_line(str(vote))
elif vote == 0:
vote = '_'
else:
vote = red_line(str(abs(vote)))
return vote
def build_change_line(change):
review_votes = [vote.get('value', 0) for vote in change['labels'].get(
'Code-Review', {}).get('all', [])]
if review_votes:
if abs(min(review_votes)) >= abs(max(review_votes)):
review_vote = min(review_votes)
else:
review_vote = max(review_votes)
else:
review_vote = 0
review_vote = vote_to_colored_char(review_vote)
verified_votes = change['labels'].get('Verified', {}).get('all', [])
jenkins = list(filter(lambda vote: vote.get('username') == 'zuul',
verified_votes))
if jenkins:
jenkins_vote = jenkins[0].get('value', 0)
else:
jenkins_vote = 0
jenkins_vote = vote_to_colored_char(jenkins_vote)
workflow_vote = max([0] + [vote.get('value', 0) for vote in change['labels'].get(
'Workflow', {}).get('all', [])])
workflow_vote = vote_to_colored_char(workflow_vote)
updated_ago = (time.time() -
(datetime.datetime.strptime(
change['updated'][0:-3],
"%Y-%m-%d %H:%M:%S.%f") - datetime.datetime(1970, 1, 1)).total_seconds())
updated_ago = format_time(updated_ago)
mergeable = '_' if change.get('mergeable', True) else red_line('M')
number = str(change['_number'])
if change['status'] == 'MERGED':
subject = green_line(change['subject'])
number = green_line(number)
elif change['status'] == 'ABANDONED':
subject = dim_line(change['subject'])
else:
subject = change['subject']
line = ''.join([number, ' ', mergeable, review_vote, jenkins_vote,
workflow_vote, ' ', subject, ' - ', updated_ago,
' ago'])
return line
def do_dashboard(auth_creds, query):
try:
changes = get_changes(auth_creds, query)
except Exception as e:
error('Failed to get changes from Gerrit: %s' % e)
return
_reset_terminal()
print('Salmon review dashboard - %s' % time.asctime())
print('id MRVW subject - updated at')
for change in changes:
print(build_change_line(change))
def parse_args():
argparser = argparse.ArgumentParser(
description="Show the result of the result of a Gerrit query.")
argparser.add_argument('-u', '--user', help='Gerrit username')
argparser.add_argument('-P', '--passwd', help='Gerrit password')
argparser.add_argument('-r', '--refresh', help='Refresh in seconds',
default=0, type=int)
argparser.add_argument('-q', '--query', help='The Gerrit query to show')
return argparser.parse_args()
def main():
opts = parse_args()
auth_creds = (opts.user, opts.passwd)
while True:
try:
do_dashboard(auth_creds, opts.query)
if not opts.refresh:
break
time.sleep(opts.refresh)
except KeyboardInterrupt:
break
if __name__ == '__main__':
main()
| gibizer/gerrit-review-dashboard | dashboard.py | dashboard.py | py | 5,084 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.Session",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.auth.HTTPDigestAuth",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.auth",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "json... |
32807094996 | import torch
from YOLOX.yolox.data.data_augment import preproc
from YOLOX.yolox.data.datasets import COCO_CLASSES
from YOLOX.yolox.exp.build import get_exp_by_name
from YOLOX.yolox.utils import postprocess
from utils.visualize import vis
class Detector():
def __init__(self, model='yolox-m', ckpt='自己训练的yolox检测模型.pth'):
super(Detector, self).__init__()
self.device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
self.device = torch.device('cpu')
self.exp = get_exp_by_name(model)
self.test_size = self.exp.test_size
self.model = self.exp.get_model()
self.model.to(self.device)
self.model.eval()
# checkpoint = torch.load(ckpt, map_location="cpu")
checkpoint = torch.load(ckpt)
self.model.load_state_dict(checkpoint["model"])
def detect(self, raw_img, visual=True, conf=0.5):
info = {}
img, ratio = preproc(raw_img, self.test_size)
info['raw_img'] = raw_img
info['img'] = img
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(self.device)
with torch.no_grad():
outputs = self.model(img)
outputs = postprocess(
outputs, self.exp.num_classes, self.exp.test_conf, self.exp.nmsthre)
if outputs[0] != None:
outputs = outputs[0].cpu().numpy()
else:
pass
if outputs[0] is None:
info['boxes'], info['scores'], info['class_ids'],info['box_nums']=None,None,None,0
else:
info['boxes'] = outputs[:, 0:4]/ratio
info['scores'] = outputs[:, 4] * outputs[:, 5]
info['class_ids'] = outputs[:, 6]
info['box_nums'] = outputs.shape[0]
if visual:
info['visual'] = vis(info['raw_img'], info['boxes'], info['scores'], info['class_ids'], conf, COCO_CLASSES)
return info | Leonlww/YOLOX_DeepSort_stu | objdetector.py | objdetector.py | py | 1,985 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.device",
... |
18991897731 | from genericpath import exists
import aiohttp
from aiohttp import web
from aiohttp.client_exceptions import ClientConnectionError
import asyncio
import pprint
import traceback
import time
async def reverse_proxprox_websocket(ws_proxying, ws_client, connection_id):
from proxy import msg_pack
#print("PROXPROX")
async for msg in ws_client:
#print('>>> msg-proxprox: %s',pprint.pformat(msg))
mt = msg.type
md = msg.data
if mt == aiohttp.WSMsgType.TEXT:
mt = "1"
elif mt == aiohttp.WSMsgType.BINARY:
mt = "0"
else:
raise ValueError('unexpected message type: %s',pprint.pformat(msg))
header = mt + connection_id
msg_wrapped = msg_pack("ws", header, md)
await ws_proxying.send_bytes(msg_wrapped)
"""
Reverse proxy code from:
https://github.com/oetiker/aio-reverse-proxy/blob/master/paraview-proxy.py'
(Copyright (c) 2018 Tobias Oetiker, MIT License)
"""
async def reverse_proxy_websocket(req, client, update_server, port, tail):
ws_server = web.WebSocketResponse()
await ws_server.prepare(req)
#logger.info('##### WS_SERVER %s' % pprint.pformat(ws_server))
async with client.ws_connect(
"{}:{}/{}".format(update_server, port, tail),
) as ws_client:
#logger.info('##### WS_CLIENT %s' % pprint.pformat(ws_client))
async def ws_forward(ws_from,ws_to):
async for msg in ws_from:
#logger.info('>>> msg: %s',pprint.pformat(msg))
mt = msg.type
md = msg.data
if mt == aiohttp.WSMsgType.TEXT:
await ws_to.send_str(md)
elif mt == aiohttp.WSMsgType.BINARY:
await ws_to.send_bytes(md)
else:
raise ValueError('unexpected message type: %s',pprint.pformat(msg))
# keep forwarding websocket data in both directions
await asyncio.wait([ws_forward(ws_server,ws_client),ws_forward(ws_client,ws_server)],return_when=asyncio.FIRST_COMPLETED)
async def reverse_proxy_http(reqdata, client, rest_server, port, tail, instance=None):
reqH = reqdata["headers"]
async with client.request(
reqdata["method"],"{}:{}/{}".format(rest_server, port, tail),
params=reqdata["query"],
headers = reqH,
allow_redirects=False,
data = reqdata["data"]
) as res:
headers = res.headers.copy()
del headers['content-length']
if "location" in headers:
instance_name = reqdata["instance"]
headers["location"] = "/instance/{}{}".format(
instance_name,
headers["location"]
)
if instance is not None:
instance.last_request_time = time.time()
body = await res.read()
if instance is not None:
instance.last_request_time = time.time()
return web.Response(
headers = headers,
status = res.status,
body = body
)
async def reverse_proxy(req, rest_server, update_server, instances):
reqH = req.headers.copy()
instance = req.match_info.get('instance')
try:
instance = int(instance)
except ValueError:
pass
tail = req.match_info.get('tail')
if instance not in instances:
graph = get_graph(instance)
if graph is None:
return web.Response(status=404,text="Unknown instance")
graph, service = graph
try:
launch_instance(
service,
instance=instance,
existing_graph=graph
)
assert instance in instances
except Exception:
exc = traceback.format_exc()
return web.Response(
status=500,
text=exc
)
inst = instances[instance]
if not inst.complete:
if inst.error:
return web.Response(
status=500,
text="***Launch error***\n\n" + inst.error_message
)
else:
if req.method == 'GET':
return web.Response(
status=202,
text="""
<head>
<meta http-equiv="refresh" content="3">
</head>
<body>
Loading...
</body>
""",
content_type='text/html'
)
else:
for retries in range(30):
await asyncio.sleep(1)
inst = instances.get(instance)
if inst is None:
return web.Response(500)
if inst.complete:
break
update_port = inst.update_port
rest_port = inst.rest_port
for retries in range(5):
try:
async with aiohttp.ClientSession(cookies=req.cookies) as client:
if reqH.get('connection','').lower() == 'upgrade' \
and reqH.get('upgrade', '').lower() == 'websocket' \
and req.method == 'GET':
await reverse_proxy_websocket(req, client, update_server, update_port, tail)
return
else:
inst.last_request_time = time.time()
reqdata = {
"method": req.method,
"headers": req.headers.copy(),
"query": req.query,
"instance" : req.match_info.get('instance'),
"data": await req.read()
}
inst.last_request_time = time.time()
return await reverse_proxy_http(
reqdata, client, rest_server,
rest_port, tail,
inst
)
except ClientConnectionError:
await asyncio.sleep(3)
from icicle import get_graph
launch_instance = None # to be set by importing module
| sjdv1982/cloudless | reverse_proxy.py | reverse_proxy.py | py | 6,042 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "aiohttp.WSMsgType",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "aiohttp.WSMsgType",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pprint.pformat",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "proxy.ms... |
31502391023 | #!/usr/bin/python3
from helpers import session
from helpers import cookies
from helpers import form
import json
import cgi
import os
import datetime
print("Content-Type: text/html")
def simple_message(message):
print("")
print(message)
print("<br>")
print('Redirecting you back in 5 seconds...<meta http-equiv="refresh" content="5; url=./profile.py">')
exit()
if not session.is_signed_in():
simple_message("You need to be signed in to use this feature!")
existing_homeworks= json.loads(open("homeworks.json").read())
homework_id = form.form("homework_id")
if homework_id not in existing_homeworks:
simple_message("That homework does not exist!")
homework_submissions = json.loads(open("submissions.json").read())
if homework_id in homework_submissions[session.get_user_id()]:
os.remove("submissions/" + homework_submissions[session.get_user_id()][homework_id]["filename"])
new_filename = homework_id + "-" + session.get_user_id() + "-" + cookies.random_str(6) + ".txt"
comments = form.form("comments")
homework_submissions[session.get_user_id()][homework_id] = {
"filename": new_filename,
"time": datetime.datetime.now().strftime("%Y,%m,%d,%H,%M,%S"),
"comments": comments
}
file_data = form.data['homework_file'].file.read()
open("submissions.json", "w").write(json.dumps(homework_submissions))
open("submissions/" + new_filename, "w+").write(str(file_data))
simple_message("Homework sucessfully uploaded!")
| abir-taheer/silver-potato | process_submission.py | process_submission.py | py | 1,439 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "helpers.session.is_signed_in",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "helpers.session",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "helpers.form.... |
13883453328 | #This script lists all *.txt files in the folder that you choose (assuming that they are the Slim output files).
#The script creates 2 output files- one for counts and one for frequency.
#How to run:
#python get_sfs_from_full_output_general_reps.py -input_folder /path/to/intput/folder -output_folder /path/to/output -output_prefix /name/of/output/file -mutn_types m1,m2,m3
import sys
import argparse
import os
#parsing user given constants
parser = argparse.ArgumentParser(description='Information about number of sliding windows and step size')
#parser.add_argument('-regionLen', dest = 'regionLen', action='store', nargs = 1, type = int, help = 'length in bp of region simulated')#Length of coding region simulated
parser.add_argument('-input_folder', dest = 'input_folder', action='store', nargs = 1, type = str, help = 'full path to folder with .ms files')
parser.add_argument('-output_folder', dest = 'output_folder', action='store', nargs = 1, type = str, help = 'full path to folder where you want to write the output')
parser.add_argument('-output_prefix', dest = 'output_prefix', action='store', nargs = 1, type = str, help = 'full path to output file')
parser.add_argument('-mutn_types', dest = 'mutn_types', action='store', nargs = 1, default="m5", type = str, help = 'list of mutation types separated by only a comma')
#read input parameters
args = parser.parse_args()
#chr_len = args.regionLen[0]
in_folder = args.input_folder[0]
out_folder = args.output_folder[0]
prefix = args.output_prefix[0]
mutn_types = args.mutn_types[0]
print (out_folder)
num_indv = 100
def get_sfs_count(l_af):
d_sfs = {}
s_seg = 0 #total number of truly segregating sites
for x in l_af:
try:
d_sfs[x] = d_sfs[x] + 1
except:
d_sfs[x] = 1
if int(x) > 0 and int(x) < int(num_indv):
s_seg += 1
print("total number of mutations of type selected:" + str(s_seg))
return(d_sfs)
def get_sfs_freq(d_sfs_count):
d_sfs_freq = {}
s_tot = 0
for x in d_sfs_count.keys():
s_tot = s_tot + int(d_sfs_count[x])
for x in d_sfs_count.keys():
d_sfs_freq[x] = float(d_sfs_count[x])/float(s_tot)
return (d_sfs_freq)
def get_af(f_txt, mutn_types):
l_af = []
for line in f_txt:
line1 = line.strip('\n')
if "#" not in line1:
if "Mutations" not in line1:
if "m" in line1:
line2 = line1.split()
if len(line2) == 9:
if line2[2] in mutn_types:
l_af.append(line2[8])
return(l_af)
#Open output file for counts
result_count = open(out_folder + "/" + prefix + "_" + str(num_indv) + "_" + mutn_types + "_count.sfs", 'w+')
i = 1
result_count.write("filename")
while i < int(num_indv):
result_count.write('\t' + str(i))
i = i + 1
result_count.write('\n')
#Open output file for frequency
result_freq = open(out_folder + "/" + prefix + "_" + str(num_indv) + "_" + mutn_types + "_freq.sfs", 'w+')
i = 1
result_freq.write("filename")
while i < int(num_indv):
result_freq.write('\t' + str(i))
i = i + 1
result_freq.write('\n')
#Make a list of all .txt files:
os.system("ls " + in_folder + "/*.txt > " + out_folder + "/" + prefix + ".list")
f_list = open(out_folder + "/" + prefix + ".list", 'r')
for Aline in f_list:
Aline1 = Aline.strip('\n')
f_name = Aline1.split("/").pop()
print ("Reading file:" + Aline1)
f_txt = open(in_folder + "/" + f_name, 'r')
l_AF = get_af(f_txt, mutn_types)
d_SFS_count = get_sfs_count(l_AF)
d_SFS_freq = get_sfs_freq(d_SFS_count)
f_txt.close()
#Write the full result in counts:
result_count.write(f_name)#write the d0_0 class
i = 1
while (i < int(num_indv)):
result_count.write('\t' + str(d_SFS_count.get(str(i), 0)))
i = i + 1
result_count.write('\n')
#Write the full result in freq:
result_freq.write(f_name)#write the d0_0 class
i = 1
while (i < int(num_indv)):
result_freq.write('\t' + str(d_SFS_freq.get(str(i), 0)))
i = i + 1
result_freq.write('\n')
f_list.close()
print ("done")
| paruljohri/Perspective_Statistical_Inference | CalculateStatisticsTestSet/get_sfs_from_full_output_general_reps.py | get_sfs_from_full_output_general_reps.py | py | 4,176 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 80,
"usage_type": "call"
}
] |
14374176573 | import os
from flask import Flask, render_template, request
from plot import plot_graph
from download import options, download, set_download_config
from shell import reboot_stable_diffusion, set_shell_config
import json
app = Flask(__name__)
CONFIG_PATH = '../../config/config.json'
HOST = {'host': 'localhost', 'port': 7778}
def read_config():
if os.path.exists(CONFIG_PATH):
with open(CONFIG_PATH, 'r') as f:
config = json.load(f)
set_download_config(app, config['download'])
set_shell_config(config['stable_diffusion'])
global HOST
HOST = config['server']
@app.route('/')
def index():
return render_template('index.html')
@app.route('/plot')
def plot():
time = request.args.get('max_time', default=1)
plot_graph(int(time))
plot_file = os.path.join('static', 'plot.png')
return render_template('plot.html', plot=plot_file)
@app.route('/download')
def download_page():
return render_template('download.html')
@app.route('/action/options')
def action_options():
return options()
@app.route('/action/download/<key>', methods=['POST'])
def action_download(key):
return download(key)
@app.route('/action/restart')
def action_restart():
result = reboot_stable_diffusion()
if result:
return 'OK'
else:
return 'NO'
if __name__ == '__main__':
read_config()
host = HOST['host']
port = int(HOST['port'])
app.run(host=host, port=port, debug=False)
| chun92/my_home_server_manager | src/web/app.py | app.py | py | 1,480 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number":... |
31899827759 | # -*- coding: utf-8 -*-
"""
@File : T3.py
@Author : wenhao
@Time : 2023/4/9 10:27
@LC :
"""
import bisect
from typing import List
from collections import Counter
from bisect import bisect_left
# 最大化最小值 == 二分答案
# 二分 mx
# 尽量多的选下标对 使得选出来的对数 >= p
# 如果下标不影响答案 那么可以排序
# 贪心 如果前两个数可以选 那么必选
#
class Solution:
def minimizeMax(self, nums: List[int], p: int) -> int:
nums.sort()
# 优化 check 函数
# 贪心的思想 相邻的元素选或者不选
def check(mx: int) -> bool:
cnt = i = 0
while i < len(nums) - 1:
if nums[i + 1] - nums[i] <= mx: # 可以选
cnt += 1
i += 2
else: # 没法选 跳过
i += 1
return cnt >= p
# 闭区间模板
l, r = 0, nums[-1] - nums[0]
while l <= r:
m = l + (r - l) // 2
if check(m):
r = m - 1
else:
l = m + 1
return l
# 使用 库函数 版本
# nums.sort()
# def check(mx: int) -> int:
# cnt = i = 0
# while i < len(nums) - 1:
# if nums[i + 1] - nums[i] <= mx: # 可以选
# cnt += 1
# i += 2
# else: # 没法选 跳过
# i += 1
# return cnt
#
# return bisect_left(range(nums[-1] - nums[0]), p, key=check)
| callmewenhao/leetcode | contests/weekly-contest-340/T3.py | T3.py | py | 1,601 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 22,
"usage_type": "name"
}
] |
23468598421 | # MS MARCO Document: Script for plotting leaderboard over time scatter plots
import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import pandas as pd
df = pd.read_csv('../leaderboard/leaderboard.csv', parse_dates=['date'])
# Plot all the runs
ax = df.plot(x='date',y='MRR@100 (Eval)',marker='o',linestyle='none',label='Submission')
# Overlay all SOTA runs, in red.
sota = df[df['Unnamed: 2'] == '🏆']
sota.plot(ax=ax, x='date',y='MRR@100 (Eval)',marker='o',color = 'red',linestyle='none',label='SOTA')
# Guide to formatting date ticks
# https://matplotlib.org/3.1.1/gallery/text_labels_and_annotations/date.html
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.set_xlim([datetime.date(2020, 8, 1), datetime.date(2021, 3, 1)])
plt.title('MS MARCO Document Leaderboard')
plt.xlabel('Date')
plt.ylabel('MRR@100')
plt.savefig('leaderboard.pdf', bbox_inches='tight', format='pdf')
| Whem2020/MSMARCO-Document-Ranking-Archive | analysis/plot_leaderboard_over_time.py | plot_leaderboard_over_time.py | py | 947 | python | en | code | null | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.switch_backend",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matp... |
36412878597 | import cv2
import os
import glob
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument("--video_dir", type=str,
help="Dataset directory", default='/home/park/0808_capture/video/trade_tower/')
parser.add_argument("--video_result_dir", type=str,
help="Test result save directory", default='/home/park/0808_capture/video/trade_tower/results/')
args = parser.parse_args()
if __name__ == '__main__':
video_list = os.path.join(args.video_dir, '*.mp4')
video_list = glob.glob(video_list)
os.makedirs(args.video_result_dir, exist_ok=True)
for video_idx, video_file in enumerate(video_list):
video_idx += 1
if os.path.isfile(video_file):
cap = cv2.VideoCapture(video_file)
else:
raise('cannot find file : {0}'.format(video_file))
# Get camera FPS
fps = cap.get(cv2.CAP_PROP_FPS)
fps = 30
# Frame width size
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# Frame height size
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_size = (frameWidth, frameHeight)
print('frame_size={0}'.format(frame_size))
frame_idx = 0
while True:
print(frame_idx)
retval, frame = cap.read()
frame_idx+=1
if not(retval):
break
if frame_idx % 50 == 0:
cv2.imwrite(args.video_result_dir + '_' + str(video_idx) + '_' + str(frame_idx) + '.jpg', frame)
if cap.isOpened():
cap.release()
| chansoopark98/Tensorflow-Keras-Semantic-Segmentation | data_augmentation/capture_from_video.py | capture_from_video.py | py | 1,663 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"lin... |
26807469463 | #!/usr/bin/env python3
import requests
import socket
from utils import logger
moviePage_url = 'https://trailers.apple.com/'
movieSearch_url = 'https://trailers.apple.com/trailers/home/scripts/quickfind.php'
log = logger.get_log(__name__)
class Apple():
def __init__(self, min_resolution, max_resolution):
self.min_resolution = int(min_resolution)
self.max_resolution = int(max_resolution)
def _getMoivePage(self, title, year):
movies = self._getJson(movieSearch_url, params={'q': title})
if not movies:
return False
if movies.get('error', True) == True:
log.debug('Apple could not find the movie "{}" url: {}'.format(title, movies['url']))
return False
if not 'results' in movies or len(movies.get('results')) < 1:
log.debug('Apple returned no results for "{}" url: {}'.format(title, movies['url']))
return False
# find matching movie in results
location = None
for movie in movies.get('results'):
if title.lower() == movie.get('title', '').lower() and str(year) in movie.get('releasedate', ''):
location = movie.get('location', None)
break
# check if we found the right movie
if not location:
return False
# build and get data for movie
url = requests.compat.urljoin(moviePage_url, location + '/data/page.json')
log.debug('Getting movie data from url: {}'.format(url))
movieData = self._getJson(url)
if not movieData:
return False
return movieData
def _getJson(self, url, params=None):
try:
with requests.get(url, params=params, timeout=5) as r:
r.raise_for_status()
result = r.json()
result['url'] = r.url
return result
except ValueError:
log.debug('Failed to parse data returned from Apple. url: {} response:{}'.format(r.url, r.text))
return None
except requests.exceptions.Timeout:
log.warning('Timed out while connecting to {}'.format(url))
return None
except requests.exceptions.ConnectionError as e:
log.warning('Failed to connect to {} Error: {}'.format(url, e))
return None
except requests.exceptions.HTTPError as e:
log.warning('Apple search failed for {} Error: {}'.format(url, e))
return None
except requests.exceptions.RequestException as e:
log.warning('Unknown error: {}'.format(e))
return None
def getLinks(self, title, year):
links =[]
# Get movie page data
movieData = self._getMoivePage(title, year)
# return empty list if no movie page was found
if not movieData:
return links
# Collect all trailer links
for clip in movieData['clips']:
if 'trailer' in clip['title'].lower():
for item in clip['versions']['enus']['sizes']:
height = int(clip['versions']['enus']['sizes'][item]['height'])
try:
stdSize = int(item.replace('hd', ''))
except ValueError:
stdSize = 480
if '720p' in clip['versions']['enus']['sizes'][item]['srcAlt']:
altSize = 720
elif '1080p' in clip['versions']['enus']['sizes'][item]['srcAlt']:
altSize = 1080
else:
altSize = 480
# Filter based on clip height
if height >= self.min_resolution and height <= self.max_resolution:
# Parse clip into links
links.append({
'url': clip['versions']['enus']['sizes'][item]['src'],
'height': stdSize,
'source': 'apple'
})
links.append({
'url': clip['versions']['enus']['sizes'][item]['srcAlt'],
'height': altSize,
'source': 'apple'
})
return links
| jsaddiction/TrailerTech | providers/apple.py | apple.py | py | 4,381 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "utils.logger.get_log",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "utils.logger",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "requests.compat.urljoin",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "requests.comp... |
73948967712 | """Import the compiled Python for .Net module"""
import clr
import sys
print()
print ('clr version = {}'.format(str(clr.__version__)))
"""Import the Keysight automated test app remote library DLL"""
sys.path.append(r'C:\ProgramData\Keysight\DigitalTestApps\Remote Toolkit\Version 6.3\Tools')
clr.AddReference("Keysight.DigitalTestApps.Framework.Remote")
import Keysight.DigitalTestApps.Framework.Remote as KtRemote
"""Connect to the automated test application running on the scope
This will wait for the application to be fully launched and ready
before proceeding"""
scopeIpAddress = "127.0.0.1"
remoteObj = KtRemote.RemoteAteUtilities.GetRemoteAte(scopeIpAddress)
remoteApp = KtRemote.IRemoteAte(remoteObj)
print (remoteApp.ApplicationName) | GuyMcBride/TxCompliancePythonExample | simple.py | simple.py | py | 745 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "clr.__version__",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "clr.AddReference",
... |
17519684943 | from collections import defaultdict
import pandas as pd
import datetime
from lode.utilities.util import parse_date
from itertools import izip
def create_date_limited_sql(master_table, dates=None,
begin_date=None, end_date=None,
date_col="trading_date", range_break="Year"):
"""
This is the initialisation function which takes into account the separation
of the tables on a year by year basis in the Database (due to memory
considerations). It is called with either the dates keyword or both the
begin_date and end_date keywords in order to change the behaviour apparent.
These are ensured to exist by the check_required_range function which has
been previously run. Will return a list of queries which may then be
modified as needed to construct the full custom queries desired
Parameters:
-----------
master_table: The master table to query
dates: A string or iterable object of dates to return
begin_date: The first date in a range of dates to return
end_date: The last date in a range of dates to return
date_col: What column the dates are contained in
range_break: A modification for performance purposes, default set to Year
but setting to 'Month' may result in reduced memory usage.
Returns:
--------
all_queries: A list of SQL queries contained as strings
"""
all_queries = []
# If passing a selection of date objects
if dates:
if hasattr(dates, '__iter__'):
return list(singular_sql_dates(master_table, dates, date_col))
else:
dt = parse_date(dates)
return ["""SELECT * FROM %s_%s WHERE %s='%s'""" % (
master_table, dt.year, date_col, dt.strftime('%d-%m-%Y'))]
# Work with Date Ranges
else:
if range_break == "Year":
return list(yearly_sql_dates(begin_date, end_date, master_table,
date_col))
elif range_break == "Month":
return list(monthly_sql_dates(begin_date, end_date, master_table,
date_col))
else:
raise ValueError("""Range Breaks for %s have not been
implemented, try 'Year'""" % range_break)
return all_queries
def singular_sql_dates(master_table, dates, date_col):
"""
For a list of dates this will create a series of SQL queries with the
basic format SELECT * FROM table where dates in date_col. It has a little
bit of magic in it to handle the fact that tables are sharded into smaller
tables, often on a year by year basis.
It also handles parsing dates if needed (although datetime objects may be
passed to the method).
Parameters:
-----------
master_table: The root database table to construct the query for, e.g. for
nodal_prices_2012 this would just simply be nodal_prices
dates: Either a singluar date string or object or a list of date strings
and objects to construct the SQL queries for. These may be in any
order for any number of dates
date_col: The column name for the dates in the SQL database
Returns:
--------
SQL: This is a generator expression where each iteration is a separate
SQL query for each year with all of the dates from that year contained
as a selection query
"""
dts = [parse_date(x) for x in dates]
# Map the specific dates to the specific years isomg a default dict
years_dict = defaultdict(list)
for dt in dts:
years_dict[dt.year].append(dt)
# Iterate through each of the years and add the trading dates belonging
# to each query to a specific query, yield this SQL string as a generator
for year in years_dict:
# Set up a custom string and then place it inside brackets for
# Use in the query
strings = join_date_strings(years_dict[year])
date_string = "('%s')" % strings
# The base query string to use
query_string = """ SELECT * FROM %s_%s WHERE %s in %s"""
# Substitute the values into the string
SQL = query_string % (master_table, year, date_col, date_string)
yield SQL
def yearly_sql_dates(begin_date, end_date, mtable, date_col,
df="%d-%m-%Y"):
"""
Create full year SQL dates which ar every useful if a range goes over
the yearly amount. For example, if the requested date range is
20/12/2014 to 20/02/2015 then this requires querying both the 2014 and
the 2015 table. This function creates two separate queries for each of
the tables which allows this to happen behind the scenes. Should only
be called if the yearly overlap is different.
Note that this is a kind of a hacky use of a generator to create the
SQL queries desired but this is due to the desire to use smaller table
sizes on RAM limited machines. For example, a year of data is often 1GB
of data and as there are 10+ years this exceeds the RAM available on an
8GB machine easily.
Parameters:
-----------
begin_date: String or datetime object of the first date to consider
end_date: String or datetime object of the last date to consider
mtable: What table to query (master)
date_col: The column containing the trading dates
df: The date format to use if needed
Returns:
--------
query_string: A string containing the appropriate date SQL query as a base
"""
# Parse the dates as they're probably strings
begin_date = parse_date(begin_date)
end_date = parse_date(end_date)
# Set up dummy strings
jan1, dec31 = "01-01-%s", "31-01-%s"
query_string = """SELECT * FROM %s_%s WHERE %s BETWEEN '%s' AND '%s'"""
if begin_date.year == end_date.year:
fd, ld = begin_date.strftime(df), end_date.strftime(df)
yield query_string % (mtable, begin_date.year, date_col, fd, ld)
else:
# Return the first string
# From the beginning date to the 31st of December
fd, ld = begin_date.strftime(df), dec31 % begin_date.year
yield query_string % (mtable, begin_date.year, date_col, fd, ld)
# Yield the intermediate dates if any
# For example, if we are "12/12/2013" and "02/04/2015" we would
# still want all of the dates in 2014 to be returned.
years = range(begin_date.year + 1, end_date.year)
if len(years) > 0:
for year in years:
fd, ld = jan1 % year, dec31 % year
yield query_string % (mtable, year, date_col, fd, ld)
# Yield the last date
# This is from the 1st of January of that year to the ending date
if end_date.year != begin_date.year:
fd, ld = jan1 % end_date.year, end_date.strftime(df)
yield query_string % (mtable, end_date.year, date_col, fd, ld)
def monthly_sql_dates(begin_date, end_date, mtable, date_col, df='%d-%m-%Y'):
"""
Returns queries which have been isolated on a per month basis which can
then be fed into a DataFrame.
This is a modified version of the yearly SQL dates return function which
has been implemented due to memory considerations. There is a cut off
where the additional overhead from running more queries is less than the
overhead of holding large datasets in memory and coercing these to Pandas
DataFrames. By using a generator expressions we are able to overcome this
and keep total RAM usage reduced as the garbage collection runs.
Parameters:
-----------
begin_date: The first date as either a datetime object or string
end_date: The last date as either a datetime object or string
mtable: The master table to query from
date_col: What column contains date information
df: What date format to use
Returns:
--------
query_string: The SQL query which we may then modify
"""
query_string = """SELECT * FROM %s_%s WHERE %s BETWEEN '%s' AND '%s'"""
# Parse the dates as they're probably strings
begin_date = parse_date(begin_date)
end_date = parse_date(end_date)
# Add an additional day in order to get the next months
month_range = list(pd.date_range(begin_date, end_date, freq="M"))
month_range_p1 = [x + datetime.timedelta(days=1) for x in
month_range]
if month_range[-1] == end_date:
end_dates = month_range
else:
end_dates = month_range + [end_date]
# Can I do this functionally? I don't want to mutate the data
# structures, currently copying the list
begin_dates = [begin_date] + month_range_p1
for s, e in izip(begin_dates, end_dates):
beg, end = s.strftime(df), e.strftime(df)
yield query_string % (mtable, s.year, date_col, beg, end)
def join_date_strings(dates, separator="','", df="%d-%m-%Y"):
"""
Join a list of dates together in a specific string time format separated
by a custom string. In many cases this is used to get it into the SQL
format string needed
"""
return separator.join([x.strftime(df) for x in dates])
def add_equality_constraint(column, values):
if not hasattr(values, '__iter__'):
return add_single_selection_constraint(column, values)
else:
return add_multiple_selection_constraint(column, values)
def add_exclusion_constraint(column, values):
if not hasattr(values, '__iter__'):
return add_single_exclusion_constraint(column, values)
else:
return add_multiple_exclusion_constraint(column, values)
def add_minimum_constraint(column, value):
return """ AND %s >= '%s'""" % (column, value)
def add_maximum_constraint(column, value):
return """ AND %s <= '%s'""" % (column, value)
def add_range_constraint(column, begin, end):
return """ AND %s BETWEEN '%s' AND '%s'""" % (column, begin, end)
def add_single_selection_constraint(column, value):
return """ AND %s='%s'""" % (column, value)
def add_multiple_selection_constraint(column, values):
joined = "','".join(values)
jvalues = "('%s')" % joined
return """ AND %s IN %s""" % (column, jvalues)
def add_single_exclusion_constraint(column, value):
return """ AND %s!='%s'""" % (column, value)
def add_multiple_exclusion_constraint(column, values):
joined = "','".join(values)
jvalues = "('%s')" % joined
return """ AND %s NOT IN %s""" % (column, jvalues)
if __name__ == '__main__':
pass
| NigelCleland/lode | lode/database/query_builders.py | query_builders.py | py | 10,563 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "lode.utilities.util.parse_date",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "lode.utilities.util.parse_date",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 93,
"usage_type": "call"
},
{
... |
5103258436 | from datetime import date
atual = date.today().year
totalmenor = 0
totalmaior = 0
for ano in range(1, 8):
pessoa = int(input('Em que ano a {}ª nasceu? '.format(ano)))
idade = atual - pessoa
print('Essa pessoa tem {} anos.'.format(idade))
if idade < 21:
totalmenor += + 1
else:
totalmaior += +1
print('O total de maiores foi de: {}'.format(totalmaior))
print('O total de menores foi de: {}'.format(totalmenor)) | RaphaelHenriqueOS/Exercicios_Guanabara | Desafio054.py | Desafio054.py | py | 446 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "datetime.date.today",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 2,
"usage_type": "name"
}
] |
12622362396 | import face_recognition
from PIL import Image, ImageDraw
# This is an example of running face recognition on a single image
# and drawing a box around each person that was identified.
# Load a sample picture and learn how to recognize it.
neutral_image = face_recognition.load_image_file("neutral.jpg")
neutral_face_encoding = face_recognition.face_encodings(neutral_image)[0]
# Load a second sample picture and learn how to recognize it.
sad_image = face_recognition.load_image_file("sad.jpg")
sad_face_encoding = face_recognition.face_encodings(sad_image)[0]
fear_image = face_recognition.load_image_file("fear.jpg")
fear_face_encoding = face_recognition.face_encodings(fear_image)[0]
disgust_image = face_recognition.load_image_file("disgust.jpg")
disgust_face_encoding = face_recognition.face_encodings(disgust_image)[0]
happy_image = face_recognition.load_image_file("happy.jpg")
happy_face_encoding = face_recognition.face_encodings(happy_image)[0]
angry_image = face_recognition.load_image_file("angry.jpg")
angry_face_encoding = face_recognition.face_encodings(angry_image)[0]
surprize_image = face_recognition.load_image_file("surprize.jpg")
surprize_face_encoding = face_recognition.face_encodings(surprize_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
neutral_face_encoding,
sad_face_encoding,
fear_face_encoding,
disgust_face_encoding,
happy_face_encoding,
angry_face_encoding,
surprize_face_encoding
]
known_face_names = [
"neutral",
"sad",
"happy",
"surprize","disgust","fear","angry"
]
# Load an image with an unknown face
unknown_image = face_recognition.load_image_file("exp.jpg")
# Find all the faces and face encodings in the unknown image
face_locations = face_recognition.face_locations(unknown_image)
face_encodings = face_recognition.face_encodings(unknown_image, face_locations)
# Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library
# See http://pillow.readthedocs.io/ for more about PIL/Pillow
pil_image = Image.fromarray(unknown_image)
# Create a Pillow ImageDraw Draw instance to draw with
draw = ImageDraw.Draw(pil_image)
# Loop through each face found in the unknown image
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
# Draw a box around the face using the Pillow module
draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))
# Draw a label with a name below the face
text_width, text_height = draw.textsize(name)
draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
pil_image.show()
# You can also save a copy of the new image to disk if you want by uncommenting this line
# pil_image.save("image_with_boxes.jpg")
| VishalPatnaik/Facial-Emotion-Detection | expressions.py | expressions.py | py | 3,370 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "face_recognition.load_image_file",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "face_recognition.face_encodings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "face_recognition.load_image_file",
"line_number": 12,
"usage_type": "call"... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.