id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
12854015 | <filename>python/191122.py
from os import getcwd
def rdfile():
data = list()
# 顯示這個程式碼檔案是在哪裡被執行
print(getcwd())
with open("pm25.txt", 'r') as fd:
for line in fd:
try:
data.append(float(line.replace('\n', '')))
except:
pass
print('Max =', max(data))
print('Min =', min(data))
print('Avg =', (sum(data)/len(data)))
data_bigger_than_70 = int()
for x in range(len(data)):
if data[x] > 70:
data_bigger_than_70 += 1
print('The amount of data which is bigger than 70 :', data_bigger_than_70)
def main():
rdfile()
if __name__ == '__main__':
main()
| StarcoderdataPython |
8128744 | <gh_stars>1-10
from django.urls import path
from .views import (
LoginAPIView, RegistrationAPIView, UserRetrieveUpdateAPIView,
TwitterAuthAPIView, GoogleAuthAPIView, FacebookAuthAPIView,
AccountActivateAPIView,
PasswordResetRequestAPIView, PasswordResetAPIView
)
app_name = "authentication"
urlpatterns = [
path('user', UserRetrieveUpdateAPIView.as_view(),
name='retrieve-update'),
path('users', RegistrationAPIView.as_view(),
name='register'),
path('users/login', LoginAPIView.as_view(),
name='login'),
path('users/login/twitter', TwitterAuthAPIView.as_view(),
name='twitter-auth'),
path('users/login/google', GoogleAuthAPIView.as_view(),
name='google-auth'),
path('users/login/facebook', FacebookAuthAPIView.as_view(),
name='facebook-auth'),
path('users/<token>', AccountActivateAPIView.as_view(),
name='activate'),
path(
'user/request-password-reset', PasswordResetRequestAPIView.as_view(),
name='request-password-reset'
),
path(
'user/reset-password/<token>', PasswordResetAPIView.as_view(),
name='reset-password'
),
]
| StarcoderdataPython |
5183110 | execfile("core.py")
from algorithms.ucb.ucb2 import *
import random
random.seed(1)
means = [0.1, 0.1, 0.1, 0.1, 0.9]
n_arms = len(means)
random.shuffle(means)
arms = map(lambda (mu): BernoulliArm(mu), means)
print("Best arm is " + str(ind_max(means)))
for alpha in [0.1, 0.3, 0.5, 0.7, 0.9]:
algo = UCB2(alpha, [], [])
algo.initialize(n_arms)
results = test_algorithm(algo, arms, 5000, 250)
f = open("algorithms/ucb/ucb2_results_%s.tsv" % alpha, "w")
for i in range(len(results[0])):
f.write("\t".join([str(results[j][i]) for j in range(len(results))]))
f.write("\t%s\n" % alpha)
f.close()
| StarcoderdataPython |
4934077 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""OpenCL ML network tests."""
import numpy as np
import pytest
from tvm import testing
from tvm import relay
import tvm
from test_clml.infrastructure import skip_runtime_test, build_and_run
from test_clml.infrastructure import Device
def _build_and_run_network(mod, params, inputs, data, device, atol, rtol):
"""Helper function to build and run a network."""
outputs = []
for clml in [True, False]:
outputs.append(
build_and_run(
mod,
data,
1,
params,
device,
enable_clml=clml,
)[0]
)
return outputs
def _get_keras_model(keras_model, inputs_dict, data):
"""Convert Keras graph to relay."""
inputs = {}
for name, (shape, _) in inputs_dict.items():
inputs[keras_model.input_names[0]] = shape
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
def get_bottom_top_model(model, layer_name):
layer = model.get_layer(layer_name)
bottom_input = model.layers[0].input
bottom_output = bottom_input
for layer in model.layers:
bottom_output = layer(bottom_output)
if layer.name == layer_name:
break
bottom_model = Model(bottom_input, bottom_output)
return bottom_model
keras_model = get_bottom_top_model(keras_model, "predictions")
ref_output = keras_model.predict(data["input_1"].transpose(0, 2, 3, 1))
mod, params = relay.frontend.from_keras(keras_model, inputs, layout="NCHW")
return mod, params, ref_output
def test_mobilenet():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
dtype = "float16"
def get_model():
from tensorflow.keras.applications import MobileNet
mobilenet = MobileNet(
include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000
)
mobilenet.load_weights("mobilenet_1_0_224_tf.h5")
inputs = {mobilenet.input_names[0]: ((1, 3, 224, 224), "float32")}
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs.items():
if dtype == "uint8":
low, high = 0, 1
else:
low, high = -1, 1
data[name] = np.random.uniform(low, high, shape).astype(dtype)
mod, params, ref_outputs = _get_keras_model(mobilenet, inputs, data)
return mod, params, inputs, data, ref_outputs
mod, params, inputs, input_data, ref_outputs = get_model()
outputs = _build_and_run_network(
mod, params, inputs, input_data, device=device, atol=1e-5, rtol=1e-5
)
# test
print("OpenCL:", outputs[0][0].asnumpy().shape)
print("CLML:", outputs[1][0].asnumpy().shape)
opencl_sort = np.argsort(outputs[1][0].asnumpy()).flatten()
clml_sort = np.argsort(outputs[0][0].asnumpy()).flatten()
tvm.testing.assert_allclose(opencl_sort[:10], clml_sort[:10], rtol=1e-5, atol=1e-5)
"""
tvm.testing.assert_allclose(
ref_outputs, outputs[1][0].asnumpy(), rtol=1e-5, atol=1e-5)
print("OpenCL to Keras looks good")
tvm.testing.assert_allclose(
outputs[0][0].asnumpy(), outputs[1][0].asnumpy(), rtol=1e-5, atol=1e-5)
print("OpenCL to CLML looks good")
exit(0)
tvm.testing.assert_allclose(
ref_outputs.transpose(0, 3, 1, 2), outputs[1][0].asnumpy(), rtol=1e-5, atol=1e-5)
print("OpenCL to Keras looks good")
tvm.testing.assert_allclose(
outputs[0][0].asnumpy(), outputs[1][0].asnumpy(), rtol=1e-5, atol=1e-5)
print("OpenCL to CLML looks good")
"""
if __name__ == "__main__":
test_mobilenet()
| StarcoderdataPython |
6478633 | #!/usr/bin/env python
__author__ = '<NAME>'
import os
from collections import OrderedDict
from scipy import stats
from RouToolPa.Collections.General import TwoLvlDict
def read_data(filename):
data_dict = OrderedDict()
with open(filename, "r") as in_fd:
for line in in_fd:
tmp = line.strip().split("\t")
if tmp[0] == "YCL066W" or tmp[0] == "YCR097W_with_introns":
data_dict[tmp[0]] = float(tmp[1])
return data_dict
def calculate_chi_squared(data_dict, proportion_list):
exp_value_list = [data_dict["YCL066W"], data_dict["YCR097W_with_introns"]]
total = sum(exp_value_list)
theor_value_list = []
for proportion in proportion_list:
theor_value_list.append(total * float(proportion) / sum(proportion_list))
print exp_value_list
print theor_value_list
return stats.chisquare(exp_value_list, theor_value_list)
def get_results(samples_list, data_type):
results = TwoLvlDict()
for sample in samples_list:
results[sample] = OrderedDict()
filename = "%s/all_reads/%s_all_%s_coverage.tab" % (sample, sample, data_type)
data = read_data(filename)
if not data:
print sample
continue
print sample
for gene in data:
results[sample][gene] = data[gene]
for proportions, name in zip([[1, 2], [2, 1], [1, 1]], ["1:2", "2:1", "1:1"]):
chi_results = calculate_chi_squared(data, proportions)
print name
results[sample][name + " Chi"] = chi_results[0]
results[sample][name + " p-value"] = chi_results[1]
print chi_results
return results
work_dir = "/media/mahajrod/d9e6e5ee-1bf7-4dba-934e-3f898d9611c8/Data/LAN2xx/mating_type_detection"
os.chdir(work_dir)
temp = os.listdir("./")
samples_list = []
for filename in temp:
if filename[0] == "N":
samples_list.append(filename)
mean_results = get_results(samples_list, "mean")
median_results = get_results(samples_list, "median")
mean_results.write("all_mean_results.tab")
median_results.write("all_median_results.tab") | StarcoderdataPython |
1834289 | <reponame>willwhitney/exploration-reimplementation
import time
import os
import math
import pickle
import queue
from typing import Any
import numpy as np
import matplotlib.pyplot as plt
import jax
from jax import numpy as jnp, random
from flax import nn, struct
from dm_control import suite
import replay_buffer
import q_learning
import utils
from environments.observation_domains import DOMAINS
from environments import jax_specs
from policies.pytorch_sac.video import VideoRecorder
R_MAX = 100
@struct.dataclass
class ExplorationState():
"""The pure-JAX components that can be jitted/vmapped.
"""
novq_state: q_learning.QLearnerState
target_novq_state: q_learning.QLearnerState
density_state: Any
temperature: float
update_temperature: float
prior_count: float
@struct.dataclass
class AgentState():
"""A container for the entire state; not jittable.
"""
exploration_state: ExplorationState
policy_state: Any = struct.field(pytree_node=False)
replay: Any = struct.field(pytree_node=False)
j_action_spec: Any
n_candidates: int
n_update_candidates: int
n_updates_per_step: int
update_target_every: int
warmup_steps: int
optimistic_actions: bool
uniform_update_candidates: bool
batch_size: int
steps_since_tupdate: int = 0
@jax.jit
def _novelty_given_counts(counts):
ones = jnp.ones(jnp.array(counts).shape)
rewards = (counts + 1e-8) ** (-0.5)
options = jnp.stack([ones, rewards], axis=1)
# Clip rewards to be at most 1 (when count is 0)
return jnp.min(options, axis=1)
# @jax.jit
@jax.profiler.trace_function
def compute_novelty_reward(exploration_state, states, actions):
"""Returns a novelty reward in [0, 1] for each (s, a) pair."""
counts = density.get_count_batch(
exploration_state.density_state, states, actions)
return _novelty_given_counts(counts)
@jax.jit
def _targets_given_values(next_values, next_values_target,
novelty_reward, temp, discount):
# double DQN rule:
# - select next action according to current Q
# - evaluate it according to target Q
next_value_probs = nn.softmax(next_values / temp, axis=1)
next_value_elements = (next_value_probs * next_values_target)
expected_next_values = next_value_elements.sum(axis=1)
expected_next_values = expected_next_values.reshape(novelty_reward.shape)
# compute targets and update
q_targets = novelty_reward + discount * expected_next_values
# clip targets to be within the feasible set
return jnp.minimum(q_targets, R_MAX)
# @jax.partial(jax.jit, static_argnums=(3, 4))
@jax.profiler.trace_function
def train_step_candidates(exploration_state: ExplorationState,
transitions,
candidate_next_actions,
use_target_network,
use_optimistic_updates):
"""The jittable component of the exploration Q function training step."""
states, actions, next_states, rewards = transitions
discount = exploration_state.novq_state.discount
temp = exploration_state.update_temperature
# if use_target_network:
# target_q_state = exploration_state.target_novq_state
# else:
# target_q_state = exploration_state.novq_state
with jax.profiler.TraceContext("compute next value"):
if use_optimistic_updates:
next_values = predict_optimistic_values_batch(
exploration_state.novq_state,
exploration_state.density_state,
exploration_state.prior_count,
next_states, candidate_next_actions)
next_values_target = predict_optimistic_values_batch(
exploration_state.target_novq_state,
exploration_state.density_state,
exploration_state.prior_count,
next_states, candidate_next_actions)
else:
next_values = q_learning.predict_action_values_batch(
exploration_state.novq_state,
next_states,
candidate_next_actions)
next_values_target = q_learning.predict_action_values_batch(
exploration_state.target_novq_state,
next_states,
candidate_next_actions)
with jax.profiler.TraceContext("compute targets"):
novelty_reward = compute_novelty_reward(
exploration_state, states, actions).reshape(rewards.shape)
q_targets = _targets_given_values(next_values, next_values_target,
novelty_reward, temp, discount)
# import ipdb; ipdb.set_trace()
with jax.profiler.TraceContext("Q update"):
novq_state, losses = q_functions.train_step(
exploration_state.novq_state,
states, actions, q_targets)
return exploration_state.replace(novq_state=novq_state), losses
@jax.profiler.trace_function
def train_step(agent_state: AgentState, transitions, rng):
"""A full (optimistic) training step for the exploration Q function."""
states, actions, next_states, rewards = transitions
# candidate actions should be (bsize x n_update_candidates x *action_shape)
with jax.profiler.TraceContext("get n_update_candidates"):
n_update_candidates = int(agent_state.n_update_candidates)
with jax.profiler.TraceContext("get candidates"):
if agent_state.uniform_update_candidates:
candidate_next_actions = utils.sample_uniform_actions_batch(
agent_state.j_action_spec, rng,
states.shape[0], n_update_candidates)
else:
policy_state, candidate_next_actions = policy.action_fn(
agent_state.policy_state, next_states,
n_update_candidates, True)
agent_state = agent_state.replace(policy_state=policy_state)
with jax.profiler.TraceContext("train_step_candidates"):
exploration_state, losses = train_step_candidates(
agent_state.exploration_state,
transitions,
candidate_next_actions)
agent_state = agent_state.replace(exploration_state=exploration_state)
return agent_state, losses
@jax.profiler.trace_function
def update_target_q(agent_state: AgentState):
exploration_state = agent_state.exploration_state.replace(
target_novq_state=agent_state.exploration_state.novq_state)
agent_state = agent_state.replace(exploration_state=exploration_state,
steps_since_tupdate=0)
return agent_state
@jax.profiler.trace_function
def uniform_update(agent_state: AgentState, rng):
n_updates = agent_state.n_updates_per_step
batch_size = agent_state.batch_size
rngs = random.split(rng, n_updates)
for step_rng in rngs:
transitions = agent_state.replay.sample(batch_size)
agent_state, losses = train_step(agent_state, transitions, step_rng)
agent_state = agent_state.replace(
steps_since_tupdate=agent_state.steps_since_tupdate + 1)
if agent_state.steps_since_tupdate >= agent_state.update_target_every:
agent_state = update_target_q(agent_state)
return agent_state
@jax.profiler.trace_function
def update_exploration(agent_state, rng, transition_id):
s, a, sp, r = agent_state.replay.get_transitions(transition_id)
# update density on new observations
with jax.profiler.TraceContext("update density"):
exploration_state = agent_state.exploration_state
density_state = density.update_batch(exploration_state.density_state,
np.expand_dims(s, axis=0),
np.expand_dims(a, axis=0))
exploration_state = exploration_state.replace(
density_state=density_state)
agent_state = agent_state.replace(exploration_state=exploration_state)
if len(agent_state.replay) > agent_state.warmup_steps:
# update exploration Q to consistency with new density
rng, novq_rng = random.split(rng)
with jax.profiler.TraceContext("uniform update"):
agent_state = uniform_update(agent_state, novq_rng)
return agent_state
@jax.profiler.trace_function
@jax.jit
def compute_weight(prior_count, count):
root_real_count = count ** 0.5
root_total_count = (count + prior_count) ** 0.5
return root_real_count / root_total_count
compute_weight_batch = jax.vmap(compute_weight, in_axes=(None, 0))
@jax.profiler.trace_function
def predict_optimistic_value(novq_state, density_state, prior_count,
state, action):
expanded_state = np.expand_dims(state, axis=0)
expanded_action = np.expand_dims(action, axis=0)
return predict_optimistic_value_batch(novq_state, density_state,
prior_count,
expanded_state, expanded_action)
@jax.profiler.trace_function
def predict_optimistic_value_batch(novq_state, density_state, prior_count,
states, actions):
counts = density.get_count_batch(density_state, states, actions)
return optimistic_value_batch_given_count(novq_state, prior_count,
states, actions, counts)
@jax.profiler.trace_function
@jax.jit
def optimistic_value_batch_given_count(novq_state, prior_count,
states, actions, counts):
predicted_values = q_learning.predict_value(novq_state, states, actions)
predicted_values = predicted_values.reshape((-1,))
weights = compute_weight_batch(prior_count, counts)
optimistic_values = weights * predicted_values + (1 - weights) * R_MAX
return optimistic_values
@jax.profiler.trace_function
def predict_optimistic_values(novq_state, density_state, prior_count,
state, actions):
expanded_state = np.expand_dims(state, axis=0)
repeated_state = expanded_state.repeat(len(actions), axis=0)
return predict_optimistic_value_batch(novq_state, density_state,
prior_count,
repeated_state, actions)
@jax.profiler.trace_function
def predict_optimistic_values_batch(novq_state, density_state, prior_count,
states, actions_per_state):
# actions_per_state is len(states) x n_actions_per_state x action_dim
# idea is to flatten actions to a single batch dim and repeat each state
bsize = len(states)
asize = actions_per_state.shape[1]
action_shape = actions_per_state.shape[2:]
flat_actions = actions_per_state.reshape((bsize * asize, *action_shape))
repeated_states = states.repeat(asize, axis=0)
values = predict_optimistic_value_batch(novq_state, density_state,
prior_count,
repeated_states, flat_actions)
# now reshape the values to match the shape of actions_per_state
return values.reshape((bsize, asize))
# @jax.jit
@jax.profiler.trace_function
def select_candidate_optimistic(exploration_state, rng,
state, candidate_actions):
optimistic_values = predict_optimistic_values(
exploration_state.novq_state,
exploration_state.density_state,
exploration_state.prior_count,
state, candidate_actions).reshape(-1)
return q_learning.sample_boltzmann(
rng, optimistic_values, candidate_actions,
exploration_state.temperature)
@jax.profiler.trace_function
def sample_exploration_action(agent_state: AgentState, rng, s, train=True):
# during test, take only one action sample from the task policy
# -> will follow the task policy
n = agent_state.n_candidates if train else 1
with jax.profiler.TraceContext("sample candidate actions"):
s_batch = jnp.expand_dims(s, axis=0)
policy_state, candidate_actions = policy.action_fn(
agent_state.policy_state, s_batch, n, train)
# policy.action_fn deals with batches and we only have one element
candidate_actions = candidate_actions[0]
agent_state = agent_state.replace(policy_state=policy_state)
with jax.profiler.TraceContext("select from candidates"):
if agent_state.optimistic_actions:
a, h = select_candidate_optimistic(agent_state.exploration_state,
rng, s, candidate_actions)
else:
a, _, h = q_learning.sample_action_boltzmann(
agent_state.exploration_state.novq_state, rng,
s, candidate_actions,
agent_state.exploration_state.temperature)
flag = 'train' if train else 'test'
logger.update(f'{flag}/explore_entropy', h)
return agent_state, a
def update_agent(agent_state: AgentState, rng, transition):
# add transition to replay
transition_id = agent_state.replay.append(*transition)
# update the exploration policy and density with the observed transition
agent_state = update_exploration(agent_state, rng, transition_id)
return agent_state
def run_episode(agent_state: AgentState, rng, env,
train=True, max_steps=None, video_recorder=None):
timestep = env.reset()
score, novelty_score = 0, 0
i = 0
while not timestep.last():
if video_recorder is not None:
video_recorder.record(env)
rng, action_rng = random.split(rng)
s = utils.flatten_observation(timestep.observation)
replay = agent_state.replay
warmup_steps = agent_state.warmup_steps
# put some random steps in the replay buffer
if len(replay) < warmup_steps:
action_spec = jax_specs.convert_dm_spec(env.action_spec())
a = utils.sample_uniform_actions(action_spec, action_rng, 1)[0]
flag = 'train' if train else 'test'
logger.update(f'{flag}/policy_entropy', np.nan)
logger.update(f'{flag}/explore_entropy', np.nan)
logger.update(f'{flag}/alpha', np.nan)
else:
agent_state, a = sample_exploration_action(
agent_state, action_rng, s, train)
timestep = env.step(a)
sp = utils.flatten_observation(timestep.observation)
r = timestep.reward
novelty_reward = compute_novelty_reward(agent_state.exploration_state,
jnp.expand_dims(s, axis=0),
jnp.expand_dims(a, axis=0))
score += r
novelty_score += float(novelty_reward)
if train:
transition = (s, a, sp, r)
rng, update_rng = random.split(rng)
agent_state = update_agent(agent_state, update_rng, transition)
i += 1
if max_steps is not None and i >= max_steps:
break
return agent_state, env, score, novelty_score
# ------------- Visualizations ---------------------------------
@jax.profiler.trace_function
def display_state(agent_state: AgentState, ospec, aspec,
max_steps=100, bins=20,
rendering='local', savedir=None, episode=None):
exploration_state = agent_state.exploration_state
policy_state = agent_state.policy_state
if 'object_pos' in ospec:
vis_elem = {'object_pos'}
elif 'orientations' in ospec and 'height' in ospec:
vis_elem = {'height', 'orientations'}
else:
vis_elem = None
render_function = jax.partial(utils.render_function, vis_elem=vis_elem)
# min_count_map = dmcontrol_gridworld.render_function(
# jax.partial(density.get_count_batch, exploration_state.density_state),
# env, reduction=jnp.min)
count_map = render_function(
jax.partial(density.get_count_batch, exploration_state.density_state),
agent_state.replay,
ospec, aspec, bins=bins)
novq_map = render_function(
jax.partial(q_learning.predict_value, exploration_state.novq_state),
agent_state.replay,
ospec, aspec, bins=bins)
optimistic_novq_map = render_function(
jax.partial(predict_optimistic_value_batch,
exploration_state.novq_state,
exploration_state.density_state,
exploration_state.prior_count),
agent_state.replay,
ospec, aspec, bins=bins)
novelty_reward_map = render_function(
jax.partial(compute_novelty_reward, exploration_state),
agent_state.replay,
ospec, aspec, bins=bins)
traj_map = replay_buffer.render_trajectory(
agent_state.replay, max_steps, ospec, bins=bins, vis_elem=vis_elem)
subfigs = [
# (min_count_map, "Visit count (min)"),
(count_map, "Visit count (max)"),
(novq_map, "Novelty value (max)"),
(optimistic_novq_map, "Optimistic novelty value (max)"),
(novelty_reward_map, "Novelty reward (max)"),
(traj_map, "Last trajectory"),
]
q_policies = ['policies.deep_q_policy', 'policies.tabular_q_policy']
if policy.__name__ in q_policies:
taskq_map = render_function(
jax.partial(q_learning.predict_value, policy_state.q_state),
agent_state.replay,
ospec, aspec, bins=bins)
subfigs.append((taskq_map, "Task value (max)"))
elif policy.__name__ == 'policies.sac_policy':
import torch
def get_task_value(s, a):
s = torch.FloatTensor(np.array(s)).to(policy_state.device)
a = torch.FloatTensor(np.array(a)).to(policy_state.device)
with torch.no_grad():
v = policy_state.critic.Q1(torch.cat([s, a], dim=-1))
return v.cpu().detach().numpy()
taskq_map = render_function(
get_task_value,
agent_state.replay,
ospec, aspec, bins=bins)
subfigs.append((taskq_map, "Task value (max)"))
# dump the raw data for later rendering
raw_path = f"{savedir}/data/{episode}.pkl"
os.makedirs(os.path.dirname(raw_path), exist_ok=True)
with open(raw_path, 'wb') as f:
pickle.dump(subfigs, f, protocol=4)
fig, axs = plt.subplots(1, len(subfigs))
for ax, subfig in zip(axs, subfigs):
render, title = subfig
img = ax.imshow(render)
fig.colorbar(img, ax=ax)
ax.set_title(title)
fig.set_size_inches(4 * len(subfigs), 3)
fig_path = f"{savedir}/vis/{episode}.png"
utils.display_figure(fig, rendering, savepath=fig_path)
# -------------------------------------------------------------------
def main(args):
rng = random.PRNGKey(args.seed)
if args.env == 'gridworld':
from environments import dmcontrol_gridworld
env = dmcontrol_gridworld.GridWorld(args.env_size, args.max_steps)
observation_spec = env.observation_spec()
else:
env = suite.load(args.env, args.task)
observation_spec = DOMAINS[args.env][args.task]
action_spec = env.action_spec()
j_action_spec = jax_specs.convert_dm_spec(action_spec)
j_observation_spec = jax_specs.convert_dm_spec(observation_spec)
state_shape = utils.flatten_spec_shape(j_observation_spec)
action_shape = action_spec.shape
# drawing only one candidate action sample from the policy
# will result in following the policy directly
n_candidates = 64 if args.use_exploration else 1
novq_state = q_functions.init_fn(args.seed,
observation_spec,
action_spec,
discount=args.novelty_discount,
max_value=R_MAX)
density_state = density.new(observation_spec, action_spec,
state_bins=args.n_state_bins,
action_bins=args.n_action_bins,
state_scale=args.density_state_scale,
action_scale=args.density_action_scale,
max_obs=args.density_max_obs,
tolerance=args.density_tolerance,
reweight_dropped=args.density_reweight_dropped,
conserve_weight=args.density_conserve_weight)
replay = replay_buffer.Replay(state_shape, action_shape)
policy_state = policy.init_fn(observation_spec, action_spec, args.seed,
lr=args.policy_lr,
update_rule=args.policy_update,
temp=args.policy_temperature,
test_temp=args.policy_test_temperature)
exploration_state = ExplorationState(
novq_state=novq_state,
target_novq_state=novq_state,
density_state=density_state,
temperature=args.temperature,
update_temperature=args.update_temperature,
prior_count=args.prior_count)
agent_state = AgentState(exploration_state=exploration_state,
policy_state=policy_state,
replay=replay,
j_action_spec=j_action_spec,
n_candidates=n_candidates,
n_update_candidates=args.n_update_candidates,
n_updates_per_step=args.n_updates_per_step,
update_target_every=args.update_target_every,
warmup_steps=args.warmup_steps,
optimistic_actions=args.optimistic_actions,
uniform_update_candidates=args.uniform_update_candidates,
batch_size=args.batch_size)
current_time = np.nan
for episode in range(1, args.max_episodes + 1):
last_time = current_time
current_time = time.time()
logger.update('train/elapsed', current_time - last_time)
# run an episode
rng, episode_rng = random.split(rng)
video_recorder = VideoRecorder(args.save_dir, fps=args.max_steps/10)
video_recorder.init(enabled=(episode % args.video_every == 0))
agent_state, env, score, novelty_score = run_episode(
agent_state, episode_rng, env,
train=True, max_steps=args.max_steps,
video_recorder=video_recorder)
video_recorder.save(f'train_{episode}.mp4')
logger.update('train/episode', episode)
logger.update('train/score', score)
logger.update('train/novelty_score', novelty_score)
# update the task policy
# TODO: pull this loop inside the policy.update_fn
policy_state = agent_state.policy_state
for _ in range(int(args.max_steps * args.policy_updates_per_step)):
transitions = agent_state.replay.sample(1024)
transitions = tuple((jnp.array(el) for el in transitions))
with jax.profiler.TraceContext("policy update"):
policy_state = policy.update_fn(policy_state, transitions)
agent_state = agent_state.replace(policy_state=policy_state)
# output / visualize
if episode % args.eval_every == 0:
rng, episode_rng = random.split(rng)
video_recorder = VideoRecorder(args.save_dir, fps=args.max_steps/10)
video_recorder.init(enabled=(episode % args.video_every == 0))
_, _, test_score, test_novelty_score = run_episode(
agent_state, episode_rng, env,
train=False, max_steps=args.max_steps,
video_recorder=video_recorder)
video_recorder.save(f'test_{episode}.mp4')
logger.update('test/episode', episode)
logger.update('test/score', test_score)
logger.update('test/novelty_score', test_novelty_score)
density_state = agent_state.exploration_state.density_state
if hasattr(density_state, "total"):
logger.update('train/density_size', density_state.total)
logger.write_all()
if args.vis != 'none':
# savepath = f"{args.save_dir}/{episode}"
display_state(agent_state, observation_spec, action_spec,
max_steps=args.max_steps, bins=args.n_state_bins,
rendering=args.vis, savedir=args.save_dir,
episode=episode)
if episode % args.save_replay_every == 0:
replay_path = f"{args.save_dir}/replay.pkl"
replay_buffer.save(agent_state.replay, replay_path)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
# basic environment settings
parser.add_argument('--name', default='default')
parser.add_argument('--env', default='gridworld')
parser.add_argument('--task', default='default')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--env_size', type=int, default=20)
parser.add_argument('--max_steps', type=int, default=1000)
parser.add_argument('--max_episodes', type=int, default=1000)
# visualization and logging
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--vis', default='disk')
parser.add_argument('--eval_every', type=int, default=10)
parser.add_argument('--video_every', type=int, default=10)
parser.add_argument('--save_replay_every', type=int, default=10000000)
parser.add_argument('--warmup_steps', type=int, default=128)
# policy settings
parser.add_argument('--policy', type=str, default='deep_q')
parser.add_argument('--policy_update', type=str, default='ddqn')
parser.add_argument('--policy_lr', type=float, default=1e-3)
parser.add_argument('--policy_temperature', type=float, default=3e-1)
parser.add_argument('--policy_test_temperature', type=float, default=1e-1)
parser.add_argument('--policy_updates_per_step', type=float, default=1)
# count settings
parser.add_argument('--density', type=str, default='tabular')
parser.add_argument('--density_state_scale', type=float, default=1.)
parser.add_argument('--density_action_scale', type=float, default=1.)
parser.add_argument('--density_max_obs', type=float, default=1e5)
parser.add_argument('--density_tolerance', type=float, default=0.95)
parser.add_argument('--density_reweight_dropped', action='store_true')
parser.add_argument('--density_conserve_weight', action='store_true')
parser.add_argument('--prior_count', type=float, default=1e-3)
# novelty q learning
parser.add_argument('--novelty_q_function', type=str, default='deep')
parser.add_argument('--novelty_discount', type=float, default=0.99)
parser.add_argument('--temperature', type=float, default=1e-1)
parser.add_argument('--update_temperature', type=float, default=None)
parser.add_argument('--n_update_candidates', type=int, default=64)
parser.add_argument('--n_updates_per_step', type=int, default=10)
parser.add_argument('--update_target_every', type=int, default=10)
parser.add_argument('--uniform_update_candidates', action='store_true')
parser.add_argument('--batch_size', type=int, default=128)
# tabular settings (also for vis)
parser.add_argument('--n_state_bins', type=int, default=20)
parser.add_argument('--n_action_bins', type=int, default=2)
# ablations
parser.add_argument('--no_optimistic_updates', dest='optimistic_updates',
action='store_false', default=True)
parser.add_argument('--no_optimistic_actions', dest='optimistic_actions',
action='store_false', default=True)
parser.add_argument('--target_network', action='store_true', default=True)
parser.add_argument('--no_target_network', dest='target_network',
action='store_false')
parser.add_argument('--no_exploration', dest='use_exploration',
action='store_false', default=True)
parser.add_subparsers()
args = parser.parse_args()
print(args)
if args.update_temperature is None:
print("Using --temperature as --update_temperature.")
args.update_temperature = args.temperature
args.save_dir = f"results/exploration/{args.name}"
os.makedirs(args.save_dir, exist_ok=True)
import experiment_logging
experiment_logging.setup_default_logger(args.save_dir)
from experiment_logging import default_logger as logger
import json
with open(args.save_dir + '/args.json', 'w') as argfile:
json.dump(args.__dict__, argfile, indent=4)
if args.novelty_q_function == 'deep':
import deep_q_functions as q_functions
elif args.novelty_q_function == 'sigmoid':
import sigmoid_q_functions as q_functions
elif args.novelty_q_function == 'tabular':
import tabular_q_functions as q_functions
else:
raise Exception("Argument --novelty_q_function was invalid.")
if args.policy == 'deep_q':
import policies.deep_q_policy as policy
elif args.policy == 'sac':
import policies.sac_policy as policy
elif args.policy == 'uniform':
import policies.uniform_policy as policy
elif args.policy == 'tabular':
import policies.tabular_q_policy as policy
else:
raise Exception("Argument --policy was invalid.")
if args.density == 'tabular':
import densities.tabular_density as density
elif args.density == 'kernel':
import densities.kernel_density as density
elif args.density == 'kernel_count':
import densities.kernel_count as density
elif args.density == 'faiss_kernel_count':
from densities import faiss_kernel_count as density
elif args.density == 'keops_kernel_count':
from densities import keops_kernel_count as density
elif args.density == 'dummy':
import densities.dummy_density as density
else:
raise Exception("Argument --density was invalid.")
print(f"CUDA_VISIBLE_DEVICES={os.environ['CUDA_VISIBLE_DEVICES']}")
train_step_candidates = jax.partial(train_step_candidates,
use_target_network=args.target_network,
use_optimistic_updates=args.optimistic_updates)
# train_step_candidates = jax.jit(train_step_candidates)
jit = not args.debug
if jit:
main(args)
else:
with jax.disable_jit():
main(args)
| StarcoderdataPython |
9735128 | <filename>apiserver/report/__init__.py
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:owefsad
# datetime:2020/10/23 11:54
# software: PyCharm
# project: webapi
from apiserver.report.handler.error_log_handler import ErrorLogHandler
from apiserver.report.handler.heartbeat_handler import HeartBeatHandler
from apiserver.report.handler.narmal_vul_handler import NormalVulnHandler
from apiserver.report.handler.saas_method_pool_handler import SaasMethodPoolHandler
from apiserver.report.handler.sca_handler import (ScaHandler, ScaBulkHandler)
from apiserver.report.handler.api_route_handler import ApiRouteHandler
from apiserver.report.handler.hardencode_vul_handler import HardEncodeVulHandler
if __name__ == '__main__':
ErrorLogHandler()
HeartBeatHandler()
ScaHandler()
NormalVulnHandler()
SaasMethodPoolHandler()
ApiRouteHandler()
HardEncodeVulHandler()
ScaBulkHandler()
| StarcoderdataPython |
3426907 | # -*- coding: utf-8 -*-
import unittest
from iktomi.utils import (
quoteattr, quoteattrs, quote_js, weakproxy,
cached_property, cached_class_property,
)
class Tests(unittest.TestCase):
def test_quoteattr(self):
for src, dst in [('', '""'),
('abc', '"abc"'),
('"', '"""')]:
self.assertEqual(quoteattr(src), dst)
def test_quoteattrs(self):
self.assertEqual(quoteattrs({}), '')
self.assertIn(quoteattrs({'a': u'abc', 'b': u'bcd'}), [
u'a="abc" b="bcd"',
u'b="bcd" a="abc"',
])
def test_quote_js(self):
# Any idea for better test? json.loads() can't be used since the result
# doesn't conform(?) JSON spec while being correct JavaScript string.
# eval() seems OK except for "\r" removal.
bad_chars = '\n\r\'"<>&'
quoted = quote_js(u'\\\n\r\'"<>&')
for char in bad_chars:
self.assertNotIn(char, quoted)
quoted = quote_js('\\\n\r\'"<>&')
for char in bad_chars:
self.assertNotIn(char, quoted)
def test_weakproxy(self):
# Immutable objects that can't be weakly referenced
o = object()
self.assertIs(weakproxy(o), o)
# The rest objects
class C(object):
pass
o = C()
p = weakproxy(o)
o.a = object()
self.assertIs(p.a, o.a)
del o
with self.assertRaises(ReferenceError):
p.a
def test_cached_property(self):
class C(object):
def __init__(self):
self.c = 0
@cached_property
def p(self):
self.c += 1
return 'a'
self.assertIsInstance(C.p, cached_property)
obj = C()
self.assertEqual(obj.p, 'a')
self.assertEqual(obj.c, 1)
self.assertEqual(obj.p, 'a')
self.assertEqual(obj.c, 1)
del obj.p
self.assertEqual(obj.p, 'a')
self.assertEqual(obj.c, 2)
obj = C()
obj.p = 'b'
self.assertEqual(obj.p, 'b')
self.assertEqual(obj.c, 0)
del obj.p
self.assertEqual(obj.p, 'a')
self.assertEqual(obj.c, 1)
def test_cached_class_property(self):
def create_C():
class C(object):
c = 0
@cached_class_property
def p(cls):
cls.c += 1
return 'a'
return C
C = create_C()
obj = C()
self.assertEqual(obj.p, 'a')
self.assertEqual(C.c, 1)
self.assertEqual(obj.p, 'a')
self.assertEqual(C.c, 1)
self.assertEqual(C.p, 'a')
self.assertEqual(C.c, 1)
C = create_C()
obj = C()
self.assertEqual(C.p, 'a')
self.assertEqual(C.c, 1)
self.assertEqual(obj.p, 'a')
self.assertEqual(C.c, 1)
# def test_cached_property_attribute_error(self):
# class C(object):
# @cached_property
# def p(self):
# return self.c
#
# c = C()
# self.assertRaises(Exception, hasattr, c, 'p')
| StarcoderdataPython |
5077674 | # Functions are used to create small reusable parts of code.
# They are defined using the def keyword
# This is the function head
def fun():
# This is called the function body.
# The function body is associated with the function fun because of the indentation
print("Hello")
# Functions are called with parentheses. Calling a functions means that it is executed.
# You can think of it as jumping to the beginning of the function and then jumping back
fun()
print("World")
| StarcoderdataPython |
9655303 | from setuptools import setup
setup(name="blundercheck",
author='<NAME>',
author_email="<EMAIL>",
version="0.0.1",
license="None",
keywords="chess",
url='http://www.github.com/dsjoerg/blundercheck/',
py_modules=["blundercheck"],
description="Scores chess games",
long_description='''
''',
classifiers = [
"Programming Language :: Python",
"Development Status :: 2 - Pre-Alpha",
])
| StarcoderdataPython |
3373820 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 2 14:37:04 2019
@author: B.Mika-Gospodorz
Input files: stdin with bam file with multi-mapped reads, reference_host_names.txt and reference_pathogen_names.txt files that contain references extracted with extract_reference_names_from_fasta_files.sh
Output: txt file with cross-mapped reads
Description: Used to identify and extract reads that mapped onto both host and pathogen genomes (cross-mapped reads). The script is executed by remove_crossmapped_reads_BAM.sh and remove_crossmapped_read_paires_BAM.sh scripts.
"""
import sys
import argparse
import pysam
# function to identify references given read mapped to
def check_reference_organisms(read_reference_name, host_reference_names,pathogen_reference_names):
reference = ''
if read_reference_name in host_reference_names: # if reference of read is in the list of host references, set host as reference
reference = 'host'
elif read_reference_name in pathogen_reference_names: # if reference of read is in the list of pathogen references, set pathogen as reference
reference = 'pathogen'
else:
print(('There is no ' + read_reference_name + ' in the reference name set'))
return reference
# function to add read and its reference to dictionary
def add_read(multimapped_reads, read_name, reference_name):
if read_name in multimapped_reads: # if read is in the dict, and reference is not defined, append the reference
if reference_name not in multimapped_reads[read_name]:
multimapped_reads[(read_name)].append((reference_name))
else: # else create new key (read) and set reference as value
multimapped_reads[read_name] = [reference_name]
# function to find and save cross-mapped reads
def find_and_save_cross_mapped_reads(multimapped_reads_with_reference, output_file_name):
# extract reads with more than 1 reference name defined (cross-mapped reads)
crossmapeed_reads = [read_name for read_name, reference_name in list(multimapped_reads_with_reference.items()) if len(reference_name) > 1]
# save cross-mapped reads
with open(output_file_name, 'w') as f:
for cross_mapped_read in crossmapeed_reads:
f.write(str(cross_mapped_read) + '\n')
# function to identify and save cross-mapped reads
def read_reads_from_samfile(sam_file_name, host_reference_names, pathogen_reference_names, output_file_name):
# read bam file
samfile = pysam.AlignmentFile(sam_file_name, "rb")
# initialize dictionary of multi-mapped reads
multimapped_reads = dict()
for read in samfile: # iterate over reads from bam file
# find reference the read mapped to
reference_organisms = check_reference_organisms(read.reference_name,host_reference_names,pathogen_reference_names)
# add read and reference to multimapped_reads dict.
add_read(multimapped_reads,read.query_name,reference_organisms)
# find and save cross-mapped reads
find_and_save_cross_mapped_reads(multimapped_reads,output_file_name)
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", default="cross_mapped_reads.txt",metavar='output_file_name', help="output file name")
parser.add_argument("-h_ref", "--host_reference_names", metavar='<host_reference_name>', help="Path to reference_host_names.txt file")
parser.add_argument("-p_ref", "--pathogen_reference_names", metavar='<pathogen_reference_name>', help="Path to reference_pathogen_names.txt file")
args = parser.parse_args()
# create list of host and pathogen chromosome/plasmid names
host_reference_names = [line.rstrip() for line in open(args.host_reference_names)]
pathogen_reference_names = [line.rstrip() for line in open(args.pathogen_reference_names)]
# identify and extract cross-mapped reads
read_reads_from_samfile(sys.stdin, host_reference_names, pathogen_reference_names, args.output)
| StarcoderdataPython |
276268 | <gh_stars>0
#!/usr/bin/env python3
# imports go here
#
# Free Coding session for 2015-03-02
# Written by <NAME>
#
class SimpleClass:
def main(self):
return "HI"
def main2(self):
return "HELLO AGAIN"
if __name__ == '__main__':
sc = SimpleClass()
assert(sc.main.__name__ == 'main')
fn = getattr(sc, 'main')
assert(fn() == 'HI')
fn = getattr(sc, 'main', sc.main2)
assert(fn() == 'HI')
fn = getattr(sc, 'main3', sc.main2)
assert(fn() == 'HELLO AGAIN')
setattr(sc, 'main3', fn)
fn = getattr(sc, 'main3', sc.main2)
assert(fn() == 'HELLO AGAIN')
assert(sc.main3() == 'HELLO AGAIN')
| StarcoderdataPython |
9600040 | print("Hello World!")
Name = "SaMeeM"
# Printing Name to the screen
print(Name)
age = 22
# Printing Age to screen
print(age)
print("My Name is " + Name + " and I am " + str(age) + " years old.")
random_number = 45.6
result = random_number / 9
float_division_result = random_number // 9
print(result)
print(float_division_result)
random_string = "Hello"
print(random_string * 2)
| StarcoderdataPython |
5059684 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-locals
import json
from datetime import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.contrib.contenttypes.models import ContentType
from django.db.transaction import atomic
from django.db.utils import IntegrityError
from cm.errors import AdcmEx
from cm.adcm_config import save_file_type
from cm.models import (
Bundle,
Cluster,
Prototype,
ClusterObject,
ServiceComponent,
Host,
HostProvider,
GroupConfig,
ObjectConfig,
ConfigLog,
PrototypeConfig,
HostComponent,
)
def deserializer_datetime_fields(obj, fields=None):
"""
Modifies fields of type ISO string to datetime type
:param obj: Object in dictionary format
:type obj: dict
:param fields: List of fields in ISO string format
:type fields: list
"""
if obj is not None and fields is not None:
for field in fields:
obj[field] = datetime.fromisoformat(obj[field])
def get_prototype(**kwargs):
"""
Returns prototype object
:param kwargs: Parameters for finding a prototype
:return: Prototype object
:rtype: models.Prototype
"""
bundle = Bundle.objects.get(hash=kwargs.pop('bundle_hash'))
prototype = Prototype.objects.get(bundle=bundle, **kwargs)
return prototype
def create_config(config):
"""
Creating current ConfigLog, previous ConfigLog and ObjectConfig objects
:param config: ConfigLog object in dictionary format
:type config: dict
:return: ObjectConfig object
:rtype: models.ObjectConfig
"""
if config is not None:
current_config = config['current']
deserializer_datetime_fields(current_config, ['date'])
previous_config = config['previous']
deserializer_datetime_fields(previous_config, ['date'])
conf = ObjectConfig.objects.create(current=0, previous=0)
current = ConfigLog.objects.create(obj_ref=conf, **current_config)
current_id = current.id
if previous_config is not None:
previous = ConfigLog.objects.create(obj_ref=conf, **previous_config)
previous_id = previous.id
else:
previous_id = 0
conf.current = current_id
conf.previous = previous_id
conf.save()
return conf
else:
return None
def create_group(group, ex_hosts_list, obj):
"""
Creating GroupConfig object
:param group: GroupConfig object in dictionary format
:type group: dict
:param ex_hosts_list: Map of ex_host_ids and new hosts
:type ex_hosts_list: dict
:return: GroupConfig object
:rtype: models.GroupConfig
"""
model_name = group.pop('model_name')
ex_object_id = group.pop('object_id')
group.pop('object_type')
config = create_config(group.pop('config'))
hosts = []
for host in group.pop('hosts'):
hosts.append(ex_hosts_list[host])
gc = GroupConfig.objects.create(
object_id=obj.id,
config=config,
object_type=ContentType.objects.get(model=model_name),
**group
)
gc.hosts.set(hosts)
return ex_object_id, gc
def create_file_from_config(obj, config):
if config is not None:
conf = config["current"]["config"]
proto = obj.prototype
for pconf in PrototypeConfig.objects.filter(prototype=proto, type='file'):
if pconf.subname and conf[pconf.name].get(pconf.subname):
save_file_type(obj, pconf.name, pconf.subname, conf[pconf.name][pconf.subname])
elif conf.get(pconf.name):
save_file_type(obj, pconf.name, '', conf[pconf.name])
def create_cluster(cluster):
"""
Creating Cluster object
:param cluster: Cluster object in dictionary format
:type cluster: dict
:return: Cluster object
:rtype: models.Cluster
"""
try:
Cluster.objects.get(name=cluster['name'])
raise AdcmEx('CLUSTER_CONFLICT', 'Cluster with the same name already exist')
except Cluster.DoesNotExist:
prototype = get_prototype(bundle_hash=cluster.pop('bundle_hash'), type='cluster')
ex_id = cluster.pop('id')
config = cluster.pop('config')
cluster = Cluster.objects.create(
prototype=prototype, config=create_config(config), **cluster
)
create_file_from_config(cluster, config)
return ex_id, cluster
def create_provider(provider):
"""
Creating HostProvider object
:param provider: HostProvider object in dictionary format
:type provider: dict
:return: HostProvider object
:rtype: models.HostProvider
"""
bundle_hash = provider.pop('bundle_hash')
ex_id = provider.pop('id')
try:
same_name_provider = HostProvider.objects.get(name=provider['name'])
if same_name_provider.prototype.bundle.hash != bundle_hash:
raise IntegrityError('Name of provider already in use in another bundle')
create_file_from_config(same_name_provider, provider['config'])
return ex_id, same_name_provider
except HostProvider.DoesNotExist:
prototype = get_prototype(bundle_hash=bundle_hash, type='provider')
config = provider.pop('config')
provider = HostProvider.objects.create(
prototype=prototype, config=create_config(config), **provider
)
create_file_from_config(provider, config)
return ex_id, provider
def create_host(host, cluster):
"""
Creating Host object
:param host: Host object in dictionary format
:type host: dict
:param cluster: Cluster object
:type cluster: models.Cluster
:return: Host object
:rtype: models.Host
"""
host.pop('provider')
provider = HostProvider.objects.get(name=host.pop('provider__name'))
try:
Host.objects.get(fqdn=host['fqdn'])
provider.delete()
cluster.delete()
raise AdcmEx('HOST_CONFLICT', 'Host fqdn already in use')
except Host.DoesNotExist:
prototype = get_prototype(bundle_hash=host.pop('bundle_hash'), type='host')
ex_id = host.pop('id')
config = host.pop('config')
new_host = Host.objects.create(
prototype=prototype,
provider=provider,
config=create_config(config),
cluster=cluster,
**host,
)
create_file_from_config(new_host, config)
return ex_id, new_host
def create_service(service, cluster):
"""
Creating Service object
:param service: ClusterObject object in dictionary format
:type service: dict
:param cluster: Cluster object
:type cluster: models.Cluster
:return: ClusterObject object
:rtype: models.ClusterObject
"""
prototype = get_prototype(
bundle_hash=service.pop('bundle_hash'), type='service', name=service.pop('prototype__name')
)
ex_id = service.pop('id')
config = service.pop('config')
service = ClusterObject.objects.create(
prototype=prototype, cluster=cluster, config=create_config(config), **service
)
create_file_from_config(service, config)
return ex_id, service
def create_component(component, cluster, service):
"""
Creating Component object
:param component: ServiceComponent object in dictionary format
:type component: dict
:param cluster: Cluster object
:type cluster: models.Cluster
:param service: Service object
:type service: models.ClusterObject
:return: Component object
:rtype: models.ServiceComponent
"""
prototype = get_prototype(
bundle_hash=component.pop('bundle_hash'),
type='component',
name=component.pop('prototype__name'),
parent=service.prototype,
)
ex_id = component.pop('id')
config = component.pop('config')
component = ServiceComponent.objects.create(
prototype=prototype,
cluster=cluster,
service=service,
config=create_config(config),
**component
)
create_file_from_config(component, config)
return ex_id, component
def create_host_component(host_component, cluster, host, service, component):
"""
Creating HostComponent object
:param host_component: HostComponent object in dictionary format
:type host_component: dict
:param cluster: Cluster object
:type cluster: models.Cluster
:param host: Host object
:type host: models.Host
:param service: Service object
:type service: models.ClusterObject
:param component: Component object
:type component: models.ServiceComponent
:return: HostComponent object
:rtype: models.HostComponent
"""
host_component.pop('cluster')
host_component = HostComponent.objects.create(
cluster=cluster, host=host, service=service, component=component, **host_component
)
return host_component
def check(data):
"""
Checking cluster load
:param data: Data from file
:type data: dict
"""
if settings.ADCM_VERSION != data['ADCM_VERSION']:
raise AdcmEx(
'DUMP_LOAD_ADCM_VERSION_ERROR',
msg=(
f'ADCM versions do not match, dump version: {data["ADCM_VERSION"]},'
f' load version: {settings.ADCM_VERSION}'
),
)
for bundle_hash, bundle in data['bundles'].items():
try:
Bundle.objects.get(hash=bundle_hash)
except Bundle.DoesNotExist as err:
raise AdcmEx(
'DUMP_LOAD_BUNDLE_ERROR',
msg=f'Bundle "{bundle["name"]} {bundle["version"]}" not found',
) from err
@atomic
def load(file_path):
"""
Loading and creating objects from JSON file
:param file_path: Path to JSON file
:type file_path: str
"""
try:
with open(file_path, 'r', encoding='utf_8') as f:
data = json.load(f)
except FileNotFoundError as err:
raise AdcmEx('DUMP_LOAD_CLUSTER_ERROR', msg='Loaded file not found') from err
check(data)
_, cluster = create_cluster(data['cluster'])
ex_provider_ids = {}
for provider_data in data['providers']:
ex_provider_id, provider = create_provider(provider_data)
ex_provider_ids[ex_provider_id] = provider
ex_host_ids = {}
for host_data in data['hosts']:
ex_host_id, host = create_host(host_data, cluster)
ex_host_ids[ex_host_id] = host
ex_service_ids = {}
for service_data in data['services']:
ex_service_id, service = create_service(service_data, cluster)
ex_service_ids[ex_service_id] = service
ex_component_ids = {}
for component_data in data['components']:
ex_component_id, component = create_component(
component_data, cluster, ex_service_ids[component_data.pop('service')]
)
ex_component_ids[ex_component_id] = component
for host_component_data in data['host_components']:
create_host_component(
host_component_data,
cluster,
ex_host_ids[host_component_data.pop('host')],
ex_service_ids[host_component_data.pop('service')],
ex_component_ids[host_component_data.pop('component')],
)
for group_data in data['groups']:
if group_data['model_name'] == 'cluster':
obj = cluster
elif group_data['model_name'] == 'clusterobject':
obj = ex_service_ids[group_data['object_id']]
elif group_data['model_name'] == 'servicecomponent':
obj = ex_component_ids[group_data['object_id']]
elif group_data['model_name'] == 'hostprovider':
obj = ex_provider_ids[group_data['object_id']]
create_group(group_data, ex_host_ids, obj)
class Command(BaseCommand):
"""
Command for load cluster object from JSON file
Example:
manage.py loadcluster cluster.json
"""
help = 'Load cluster object from JSON format'
def add_arguments(self, parser):
"""Parsing command line arguments"""
parser.add_argument('file_path', nargs='?')
def handle(self, *args, **options):
"""Handler method"""
file_path = options.get('file_path')
load(file_path)
| StarcoderdataPython |
5061695 | <reponame>AnnaKPolyakova/bbbs<gh_stars>1-10
from rest_framework.views import exception_handler
def custom_exception_handler(exc, context):
handlers = {
"ValidationError": _handle_validation_error,
"Http404": _handle_not_found_error,
}
response = exception_handler(exc, context)
if response is not None:
exception_class = exc.__class__.__name__
if exception_class in handlers:
return handlers[exception_class](exc, context, response)
return _handle_drf_general_error(exc, context, response)
def _handle_validation_error(exc, context, response):
"""Generic error for 'ValidationError' exception."""
response.data = {
"error": "ValidationError",
"message": "Отправленные данные не прошли проверку",
"details": response.data,
}
return response
def _handle_not_found_error(exc, context, response):
"""Generic error for 'get_object_or_404()' function."""
response.data = {
"error": "NotFound",
"message": "Запрошенный объект не найден",
}
return response
def _handle_drf_general_error(exc, context, response):
"""Generic handler for DRF exceptions. Expects 'detail' in response."""
response.data = {
"error": exc.__class__.__name__,
"message": response.data.get("detail"),
}
return response
| StarcoderdataPython |
95365 | #!/usr/bin/env python
import sys
import imp
import re
def main(argv):
cloud_provider = sys.argv[1]
reqd_module_names_and_versions = {}
reqd_module_names_and_versions['requests'] = '2.2.1'
reqd_module_names_and_versions['json'] = '2.0.9'
reqd_module_names_and_versions['docopt'] = '0.6.2'
if cloud_provider.lower() == 'aws':
reqd_module_names_and_versions['boto'] = '2.38.0'
#elif cloud_provider.lower() == 'gcp':
# reqd_module_names_and_versions['libcloud'] = '0.20.0'
ret_val = 0
for module_name in reqd_module_names_and_versions:
try:
__import__(module_name)
installed_version = str(__import__(module_name).__version__)
installed_version_formatted = int(re.sub(
"\.", '', str(__import__(module_name).__version__)))
reqd_version = int(re.sub(
"\.", '', reqd_module_names_and_versions[module_name]))
if installed_version_formatted < reqd_version:
print "ERROR: Module " + module_name + " is not of high enough version. You need: v" + reqd_module_names_and_versions[module_name] + ", you have: " + installed_version
ret_val = 1
except ImportError:
print "ERROR: Could not import required python module '" + module_name + "'. Please install it with pip."
ret_val = 1
sys.exit(ret_val)
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
4883191 | from ionotomo import *
from ionotomo.utils.gaussian_process import *
from rathings.phase_unwrap import *
import pylab as plt
import numpy as np
import logging as log
import os
import h5py
import sys
import astropy.time as at
import astropy.coordinates as ac
import astropy.units as au
if sys.hexversion >= 0x3000000:
def str_(s):
return str(s,'utf-8')
else:
def str_(s):
return str(s)
tec_conversion = -8.4480e9# rad Hz/tecu
def prepare_phase(phase,axis=0,center=True):
"""unwrap phase and mean center
phase : array
phase to be unwrapped.
axis : int
the axis to unwrap down (default 0)
center : bool
whether to mean center (defualt True)
"""
phase = phase_unwrapp1d(phase,axis=axis)
if center:
phase -= np.mean(phase)
return phase
def opt_kernel(times, phase, K, sigma_y=0, n_random_start=0):
"""Bayes Optimization of kernel wrt hyper params
times : array
array of times in seconds most likely
phase : array
1D array of phases already prepared.
K : NDKernel
the kernel for the level 2 optimization.
sigma_y : float or array
if float then measurement uncertainty for all phase.
if array then measurement uncertainty for each phase array element
n_random_start : int
number of random initializations to use in optimization (default 0)
"""
assert len(times) < np.sqrt(1e6), "Don't want to do too many ops"
X = times.reshape((-1,1))
y = phase
K.hyperparams = level2_solve(X,y,sigma_y,K,n_random_start=n_random_start)
return K
def multi_opt_kernel(times, phase, K, sigma_y=0, n_random_start=0):
"""Bayes Optimization of kernel wrt hyper params over multiple directions
times : array (num_times,)
time array
phase : array (num_times, num_directions)
phases in several directions
K : NDKernel
the kernel for the level 2 optimization.
sigma_y : float or array
if float then measurement uncertainty for all phase.
if array then measurement uncertainty for each phase array element
n_random_start : int
number of random initializations to use in optimization (default 0)
"""
assert len(times) < np.sqrt(1e6), "Don't want to do too many ops"
num_directions = phase.shape[1]
X = [times.reshape((-1,1))]*num_directions
y = phase
K.hyperparams = level2_multidataset_solve(X,y,[sigma_y]*num_directions,K,n_random_start=10)
return K
def plot_prediction(times_predict, times, phase, K, sigma_y = 0,phase_true=None,figname=None,ant_label=None,patch_name=None):
"""Level1 predictive and plot
times_predict : array
the times to predict at
times : array
times for training set
phase : array
phase for training set
K : NDKernel
optimized kernel
sigma_y : float of array
if float then measurement uncertainty for all phase.
if array then measurement uncertainty for each phase array element
phase_true : array (optional)
if given then the phases for `times_predict`
ant_label : str (optional)
if given plots the label
"""
assert len(times) < np.sqrt(1e6), "Don't want to do too many ops"
X = times.reshape((-1,1))
#smooth
Xstar = X
y = phase
ystar, cov, lml = level1_solve(X,y,sigma_y,Xstar,K)
plt.plot(X[:,0],y,c='teal',label='data')
plt.plot(Xstar[:,0],ystar,c='red',ls='--')
plt.plot(Xstar[:,0],ystar+np.sqrt(np.diag(cov)),c='green',ls='--')
plt.plot(Xstar[:,0],ystar-np.sqrt(np.diag(cov)),c='blue',ls='--')
if ant_label is not None:
plt.title(ant_label)
plt.xlabel('time (s)')
plt.ylabel('phase (rad)')
#y_true = prepare_phase(phase_true)
Xstar = times_predict.reshape((-1,1))
ystar, cov, lml = level1_solve(X,y,sigma_y,Xstar,K)
std = np.sqrt(np.diag(cov))
plt.plot(Xstar[:,0],ystar,c='red',ls='-',label='pred')
plt.plot(Xstar[:,0],ystar+std,c='green',ls='-',label=r'$+\sigma$')
plt.plot(Xstar[:,0],ystar-std,c='blue',ls='-',label=r'$-\sigma$')
if phase_true is not None:
y_true = phase_true
plt.plot(Xstar[:,0],y_true,c='orange',label="true")
plt.legend(frameon=False)
plt.tight_layout()
if figname is not None:
plt.savefig(figname)
else:
plt.show()
plt.close()
def plot_bayes_smoothed(times, data, smoothed, std, figname,ant_label,patch_name,type):
"""Plot the smoothed
times : array
times for training set
data : array
tec_dd for training set
smoothed : array
smoothed version of tec
figname : str
figure name to save to
ant_label : str
antenna label
patch_name : str
patch name
"""
plt.plot(times,data,c='orange',label='data')
plt.plot(times,smoothed,c='red',ls='--',label='mean')
plt.plot(times,smoothed + std,c='green',ls='--',label=r'$+\sigma$')
plt.plot(times,smoothed - std,c='blue',ls='--',label=r'$-\sigma$')
plt.title("{} | {}".format(ant_label,patch_name))
plt.xlabel('time (s)')
if type == 'tec':
plt.ylabel('TEC (TEC units)')
if type == 'cs':
plt.ylabel('Scalar Phase (radians)')
plt.legend(frameon=False)
plt.tight_layout()
plt.savefig(figname,format='png')
plt.close()
#plt.show()
def smooth_data(times, phase, K, sigma_y = 0):
"""Level1 predictive of data
times : array
times for training set
phase : array
phase for training set
K : NDKernel
optimized kernel
sigma_y : float of array
if float then measurement uncertainty for all phase.
if array then measurement uncertainty for each phase array element
"""
assert len(times) < np.sqrt(1e6), "Don't want to do too many ops"
X = times.reshape((-1,1))
#smooth
Xstar = X
y = phase
ystar, cov, lml = level1_solve(X,y,sigma_y,Xstar,K)
std = np.sqrt(np.diag(cov))
return ystar, std
def smooth_dd_tec(dd_file,output_folder):
"""Use optima bayesian filtering.
dd_file : str
the hdf5 file containing direction dependent solutions
"""
output_folder = os.path.join(os.getcwd(),output_folder)
diagnostic_folder = os.path.join(output_folder,'diagnostics')
try:
os.makedirs(diagnostic_folder)
except:
pass
log.basicConfig(filename=os.path.join(output_folder,"log"),format='%(asctime)s %(levelname)s:%(message)s', level=log.DEBUG)
f_dd = h5py.File(dd_file,"r",libver="earliest")
antenna_labels = []
antenna_positions = []
for row in f_dd['/sol000/antenna']:
antenna_labels.append(str_(row[0]))
antenna_positions.append(row[1])
antenna_labels = np.array(antenna_labels)
antenna_positions = np.array(antenna_positions)
Na = len(antenna_labels)
times = at.Time(f_dd['/sol000/tec000/time'][...]/86400., format='mjd',scale='tai')
timestamps = times.isot
times = times.gps
Nt = len(times)
patch_names = []
directions = []
for row in f_dd['/sol000/source']:
patch_names.append(str_(row[0]).replace('[','').replace(']',''))
directions.append(row[1])
patch_names = np.array(patch_names).flatten()
directions = np.array(directions)
directions = ac.SkyCoord(directions[:,0]*au.rad, directions[:,1]*au.rad,frame='icrs')
Nd = len(patch_names)
#times, antennas, direction -> ijk
tec_dd = np.einsum("jik->ijk",
f_dd['/sol000/tec000/val'][:,:,:,0])
scalarphase_dd = np.einsum("jik->ijk",
f_dd['/sol000/scalarphase000/val'][:,:,:,0])
coherence_time = 200.#seconds
dt = times[1]-times[0]
num_opt = int(coherence_time / dt * 4)
sigma_y = 0#0.14/8.4480e9*120e6 #0.14 rad in TEC at 120MHz for approximation
K1 = Diagonal(1, sigma = sigma_y / 2.)
K1.set_hyperparams_bounds([1e-5,0.2],name='sigma')
K2 = SquaredExponential(1,l=20)
K2.set_hyperparams_bounds([dt*2,70],name='l')
K2.set_hyperparams_bounds([1e-5,1],name='sigma')
K3 = SquaredExponential(1,l=220)
K3.set_hyperparams_bounds([70,300],name='l')
K3.set_hyperparams_bounds([1e-5,1],name='sigma')
K = K2 * K3 + K1
tec_dd_smoothed = np.zeros([Na,Nt,Nd],dtype=float)
tec_dd_std = np.zeros([Na,Nt,Nd],dtype=float)
sc_dd_smoothed = np.zeros([Na,Nt,Nd],dtype=float)
sc_dd_std = np.zeros([Na,Nt,Nd],dtype=float)
for i in range(Na):
for k in range(Nd):
log.info("Working on {} | {}".format(antenna_labels[i],patch_names[k]))
slices = range(0,Nt,num_opt>>1)
count = np.zeros(Nt)
tec_m = np.mean(tec_dd[i,:,k])
scalarphase_dd[i,:,k] = phase_unwrapp1d(scalarphase_dd[i,:,k])
for s in slices:
start = s
stop = min(Nt,start+num_opt)
X = times[start:stop]
count[start:stop] += 1
y = tec_dd[i,start:stop,k]-tec_m
K = opt_kernel(X,y, K, sigma_y=sigma_y, n_random_start=1)
log.info(K)
ystar,std = smooth_data(X, y, K, sigma_y = 0)
ystar += tec_m
tec_dd_smoothed[i,start:stop,k] += ystar
tec_dd_std[i,start:stop,k] += std**2
y = scalarphase_dd[i,start:stop,k]
K = opt_kernel(X,y, K, sigma_y=sigma_y, n_random_start=1)
log.info(K)
ystar,std = smooth_data(X, y, K, sigma_y = 0)
sc_dd_smoothed[i,start:stop,k] += ystar
sc_dd_std[i,start:stop,k] += std
tec_dd_smoothed[i,:,k] /= count
tec_dd_std[i,:,k] /= count
sc_dd_smoothed[i,:,k] /= count
sc_dd_std[i,:,k] /= count
np.sqrt(tec_dd_std[i,:,k],out=tec_dd_std[i,:,k])
np.sqrt(sc_dd_std[i,:,k],out=sc_dd_std[i,:,k])
figname=os.path.join(diagnostic_folder,"tec_bayes_smoothed_{}_{}.png".format(antenna_labels[i],patch_names[k]))
plot_bayes_smoothed(times, tec_dd[i,:,k], tec_dd_smoothed[i,:,k], tec_dd_std[i,:,k],
figname,antenna_labels[i],patch_names[k],type='tec')
figname=os.path.join(diagnostic_folder,"scalarphase_bayes_smoothed_{}_{}.png".format(antenna_labels[i],patch_names[k]))
plot_bayes_smoothed(times, scalarphase_dd[i,:,k], sc_dd_smoothed[i,:,k], sc_dd_std[i,:,k],
figname,antenna_labels[i],patch_names[k],type='cs')
f_dd.close()
os.system("cp {} {}".format(dd_file,os.path.join(output_folder,dd_file.split('/')[-1].replace('.hdf5','_bayes_smoothed.hdf5'))))
f_dd = h5py.File(os.path.join(output_folder,dd_file.split('/')[-1].replace('.hdf5','_bayes_smoothed.hdf5')),"r",libver="earliest")
f_dd['/sol000/tec000/val'][:,:,:,0] = tec_dd_smoothed
f_dd['/sol000/scalarphase000/val'][:,:,:,0] = sc_dd_smoothed
f_dd.close()
if __name__=='__main__':
dd_file = "../../data/NsolutionsDDE_2.5Jy_tecandphasePF_correctedlosoto.hdf5"
smooth_dd_tec(dd_file,'output_bayes_smoothing')
| StarcoderdataPython |
3502464 | <gh_stars>1-10
# Generated by Django 2.1.1 on 2018-11-04 09:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20181104_0923'),
]
operations = [
migrations.AlterField(
model_name='platform_models',
name='id',
field=models.CharField(max_length=10, primary_key=True, serialize=False),
),
]
| StarcoderdataPython |
279717 | <reponame>yaroslavNikolaev/A.R.M.O.R.
__all__ = ['kubernetes']
| StarcoderdataPython |
3246612 | <reponame>RevansChen/online-judge
# Python - 2.7.6
test.assert_equals(find_slope([19, 3, 20, 3]), '0')
test.assert_equals(find_slope([-7, 2, -7, 4]), 'undefined')
test.assert_equals(find_slope([10, 50, 30, 150]), '5')
test.assert_equals(find_slope([10, 20, 20, 80]), '6')
test.assert_equals(find_slope([-10, 6, -10, 3]), 'undefined')
| StarcoderdataPython |
4957802 | <reponame>Abhizx/snake-charming<filename>tests/test_factory.py
from flaskr import create_app
import json
def test_config():
assert not create_app().testing
assert create_app({'TESTING': True}).testing
def test_view_single_get(client):
response = client.get('/view?username=humble')
assert json.loads(response.data) == { 'count': 1, 'views': [{ 'id': 1, 'username': 'humble' }] }
def test_view_multiple_gets(client):
response_humble = client.get('/view?username=humble')
response_bundle = client.get('/view?username=bundle')
assert json.loads(response_humble.data) == { 'count': 1, 'views': [{ 'id': 1, 'username': 'humble' }] }
assert json.loads(response_bundle.data) == { 'count': 2, 'views': [{ 'id': 1, 'username': 'humble' }, { 'id': 2, 'username': 'bundle' }] } | StarcoderdataPython |
3577291 | <reponame>hououin/pdm<gh_stars>0
import numpy as np
import cv2
import czifile
import pickle
import matplotlib.pyplot as plt
import scipy.misc
import math
import random
DDEPTH = 1
NUM_LANDMARKS = 50
def cropImage(im_cell):
sum_i = 0
sum_j = 0
stevec = 0
for i in range(im_cell.shape[0]):
for j in range(im_cell.shape[1]):
if im_cell[i][j][0] != im_cell[i][j][1] != im_cell[i][j][2]:
sum_j += j
sum_i += i
stevec += 1
average_i = int(sum_i/stevec)
average_j = int(sum_j/stevec)
#cv2.circle(im_cell_1, (average_i, average_j), 2, (0, 255, 0), cv2.FILLED, cv2.LINE_AA)
new_i_z = average_i - 250
new_j_z = average_j - 250
crop_image = im_cell[new_i_z:new_i_z+500, new_j_z:new_j_z+500].copy()
# print(crop_image.shape)
# cv2.imshow("cropped", crop_img)
# celica_edge = cv2.Canny(crop_img,100,200)
# cv2.imshow('edge_detection1',celica_edge)
return crop_image
def findEdge(crop_img):
sigma = 25
for i in range(500):
for j in range(500):
dif1 = abs(int(crop_img[i][j][0]) - int(crop_img[i][j][1]))
dif2 = abs(int(crop_img[i][j][1]) - int(crop_img[i][j][2]))
dif3 = abs(int(crop_img[i][j][0]) - int(crop_img[i][j][2]))
if (dif1 < sigma or dif2 < sigma or dif3 < sigma) and crop_img[i][j][2] <= 200:
crop_img[i][j][0] = 0
crop_img[i][j][1] = 0
crop_img[i][j][2] = 0
elif crop_img[i][j][0] == crop_img[i][j][1] == crop_img[i][j][2]:
crop_img[i][j][0] = 0
crop_img[i][j][1] = 0
crop_img[i][j][2] = 0
else:
crop_img[i][j][0] = 0
crop_img[i][j][1] = 0
crop_img[i][j][2] = 255
# cv2.imshow("blackened", crop_img)
celica_edge = cv2.Canny(crop_img,100,200)
#cv2.imshow('edge_detection1',celica_edge)
return celica_edge
#
# def distanceBetweenPoints(a,b):
# return math.sqrt(math.pow(b[0]-a[0]) + math.pow(b[1]-a[0]))
origin = []
refvec = [0,1]
def clockwiseangle_and_distance(point):
# Vector between point and the origin: v = p - o
vector = [point[0]-origin[0], point[1]-origin[1]]
# Length of vector: ||v||
lenvector = math.hypot(vector[0], vector[1])
# If length is zero there is no angle
if lenvector == 0:
return -math.pi, 0
# Normalize vector: v/||v||
normalized = [vector[0]/lenvector, vector[1]/lenvector]
dotprod = normalized[0]*refvec[0] + normalized[1]*refvec[1] # x1*x2 + y1*y2
diffprod = refvec[1]*normalized[0] - refvec[0]*normalized[1] # x1*y2 - y1*x2
angle = math.atan2(diffprod, dotprod)
# Negative angles represent counter-clockwise angles so we need to subtract them
# from 2*pi (360 degrees)
if angle < 0:
return 2*math.pi+angle, lenvector
# I return first the angle because that's the primary sorting criterium
# but if two vectors have the same angle then the shorter distance should come first.
return angle, lenvector
def getVectorOfEdge(celica_edge):
counter = 0
# temp_prev = 0
# distance = 0
landmarks = np.zeros((500, 500))
vector_edge = []
for i in range(500):
for j in range(500):
if celica_edge[i][j] == 255:
vector_edge.append([i,j])
counter+=1
return vector_edge
def getVectorOfLandmakrs(sorted_vec, num_landmarks, depth):
layer_landmarks = []
n = len(sorted_vec)
d = int(n/num_landmarks)
counter = 0
for i in range(0,n,d):
if counter == num_landmarks:
break
else:
layer_landmarks.append([sorted_vec[i][0],sorted_vec[i][1]])
vec_landmarks.append([sorted_vec[i][0],sorted_vec[i][1], depth*DDEPTH])
counter += 1
# showLandmark(layer_landmarks)
def showLandmark(x):
landmarks = np.zeros((500, 500))
for i in range(len(x)):
landmarks[int(x[i][0])][int(x[i][1])] = 255
cv2.imshow("landmarks",landmarks)
cv2.waitKey(0)
if __name__ == "__main__":
depth = 1
vec_landmarks = []
for i in range(30,45):
im_cell = cv2.imread(f"data/p16-1/p16-{i}.jpg")
# print(im_cell)
# cv2.imshow(f'p06-{i}.jpg',im_cell)
crop_img = cropImage(im_cell)
celica_edge = findEdge(crop_img)
vec_edge = getVectorOfEdge(celica_edge)
origin = vec_edge[0]
#print(origin)
sorted_vec_edge = sorted(vec_edge, key=clockwiseangle_and_distance)
getVectorOfLandmakrs(sorted_vec_edge, NUM_LANDMARKS, depth)
depth += 1
print(len(vec_landmarks))
with open("data/landmarks/x5.pickle","wb+") as f:
pickle.dump(vec_landmarks, f)
cv2.destroyAllWindows()
print("Process finished")
| StarcoderdataPython |
5113102 | <reponame>OSavchik/python_training
import pymysql.connections
from model.group import Group
from model.contact import Contact
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = pymysql.Connect(host=host, database=name, user=user, password=password, autocommit=True)
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute(f"select id, firstname, middlename, lastname, nickname, address, email, email2, email3, home, mobile, work, phone2 from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id, first_name, middle_name, last_name, nick_name, address, email, email2, email3, home, mobile, work, phone2) = row
list.append(Contact(id=str(id), first_name=first_name, middle_name=middle_name, last_name=last_name,
nick_name=nick_name, address_name=address,
email = email, email2 = email2, email3 = email3,
home_phone = home, mobil_phone=mobile, work_phone=work, secondary_phone = phone2 ))
finally:
cursor.close()
return list
def get_max_contact_index_db(self):
cursor = self.connection.cursor()
try:
cursor.execute("select MAX(a.id) from addressbook a")
contact_id = cursor.fetchone()
finally:
cursor.close()
return contact_id
def get_contact_by_index_db(self, index):
cursor = self.connection.cursor()
try:
cursor.execute(f"select addressbook.id, addressbook.firstname, addressbook.middlename, addressbook.lastname, addressbook.nickname from addressbook\
join address_in_groups on addressbook.id = address_in_groups.id and addressbook.id = {index}")
find_contact_by_index = cursor.fetchone()
finally:
cursor.close()
return find_contact_by_index
def search_not_empty_group(self):
cursor = self.connection.cursor()
try:
cursor.execute("select group_name from group_list where group_name is not NULL and group_name <> '' ")
group_with_name = cursor.fetchone()
finally:
cursor.close()
return group_with_name
def get_first_contact_with_group(self):
cursor = self.connection.cursor()
try:
cursor.execute("select addressbook.id, address_in_groups.group_id, group_list.group_name from addressbook\
join address_in_groups on addressbook.id = address_in_groups.id and addressbook.deprecated='0000-00-00 00:00:00'\
join group_list on address_in_groups.group_id = group_list.group_id and group_list.group_name <> '' ")
find_contact_with_group = cursor.fetchall()
finally:
cursor.close()
return find_contact_with_group
def delete_link_group_with_contact(self, index):
cursor = self.connection.cursor()
try:
cursor.execute(f"delete from address_in_groups where address_in_groups.id = {index}")
find_link_group_with_contact = cursor.fetchone()
finally:
cursor.close()
return find_link_group_with_contact
def find_link_group_with_contact(self, index):
cursor = self.connection.cursor()
try:
cursor.execute(f"select id from address_in_groups where address_in_groups.id = {index}")
find_link_group_with_contact = cursor.fetchone()
finally:
cursor.close()
return find_link_group_with_contact
def destroy(self):
self.connection.close()
| StarcoderdataPython |
9764616 | from PyQt4.QtCore import QMetaObject, QRect, Qt
from PyQt4.QtGui import QApplication, QHBoxLayout, QMainWindow, QMenuBar, \
QStatusBar, QVBoxLayout, QWidget, QIcon
from filetree import EzSphinxTreeView
from splitter import EzSphinxSplitter
from restedit import EzSphinxRestEdit
from util import EasyConfigParser
from warnreport import EzSphinxWarnReportView
from web import EzSphinxWebView
import os
import sys
class EzSphinxMenuBar(QMenuBar):
"""Main menu bar"""
def __init__(self, parent):
QMenuBar.__init__(self, parent)
# TBD: manage the size
# self.setGeometry(QRect(0, 0, 929, 22))
self.setObjectName("menubar")
self.setNativeMenuBar(True)
# file_menu = self.addMenu(self.tr("&File"));
class EzSphinxStatusBar(QStatusBar):
"""Status bar of the main window"""
def __init__(self, parent):
QStatusBar.__init__(self, parent)
self.setObjectName("statusbar")
class EzSphinxWindow(QMainWindow):
"""Main application window"""
def __init__(self):
QMainWindow.__init__(self)
self.setObjectName('EzSphinx')
self.setWindowTitle('EzSphinx')
# TODO: Use setuptools resources here
pngpath = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]),
'images', 'ezsphinx.png')
self._icon = QIcon(pngpath)
self.setWindowIcon(self._icon)
QApplication.instance().setWindowIcon(self._icon)
QMetaObject.connectSlotsByName(self)
self.config = EasyConfigParser()
self._setup_ui()
# load last user settings
self._load_preferences()
def save_presentation(self, config):
self._save_preferences(config)
def select_warning(self, line):
self.widgets['restedit'].select_line(line)
self.widgets['restedit'].set_focus()
def render(self, html=''):
self.widgets['webview'].refresh(html) # a document would be better
self.widgets['warnreport'].refresh()
self.widgets['restedit'].refresh()
def update(self, what):
for w in what:
if w in self.widgets:
self.widgets[w].update_text()
else:
print "Invalid widget: %s" % w
#-------------------------------------------------------------------------
# Private implementation
#-------------------------------------------------------------------------
def _setup_ui(self):
self.widgets = {}
mainwidget = QWidget(self)
mainwidget.setMouseTracking(True)
mainwidget.setObjectName("mainwidget")
flayout = QHBoxLayout(mainwidget)
flayout.setObjectName("flayout")
fsplitter = EzSphinxSplitter(mainwidget, self, 'fsplitter',
Qt.Horizontal)
ftwidget = QWidget(fsplitter)
ftwidget.setObjectName("ftwidget")
ftlayout = QVBoxLayout(ftwidget)
ftlayout.setObjectName("ftlayout")
ftlayout.setContentsMargins(0,4,1,0)
filetree = EzSphinxTreeView(ftwidget)
ftlayout.addWidget(filetree)
vlayout = QVBoxLayout(fsplitter)
vlayout.setObjectName("vlayout")
vlayout.setContentsMargins(4,0,4,0)
vsplitter = EzSphinxSplitter(fsplitter, self, 'vsplitter',
Qt.Vertical)
editwidget = QWidget(vsplitter)
editwidget.setObjectName("editwidget")
elayout = QVBoxLayout(editwidget)
elayout.setObjectName("elayout")
elayout.setContentsMargins(1,4,1,0)
hsplitter = EzSphinxSplitter(editwidget, self, 'hsplitter',
Qt.Horizontal)
elayout.addWidget(hsplitter)
textwidget = QWidget(hsplitter)
textwidget.setObjectName("textwidget")
textlayout = QHBoxLayout(textwidget)
textlayout.setObjectName("textlayout")
textlayout.setContentsMargins(0,0,2,0)
restedit = EzSphinxRestEdit(textwidget, self)
textlayout.addWidget(restedit)
webwidget = QWidget(hsplitter)
webwidget.setObjectName("webwidget")
weblayout = QHBoxLayout(webwidget)
weblayout.setObjectName("weblayout")
weblayout.setContentsMargins(1,0,2,2)
webview = EzSphinxWebView(webwidget, self)
weblayout.addWidget(webview)
tablewidget = QWidget(vsplitter)
tablewidget.setObjectName("tablewidget")
tablelayout = QHBoxLayout(tablewidget)
tablelayout.setObjectName("tablelayout")
tablelayout.setContentsMargins(1,0,2,0)
vlayout.addWidget(vsplitter)
flayout.addWidget(fsplitter)
warnreport = EzSphinxWarnReportView(self)
tablelayout.addWidget(warnreport)
self.setCentralWidget(mainwidget)
self.setMenuBar(EzSphinxMenuBar(self))
self.setStatusBar(EzSphinxStatusBar(self))
self._add_widgets((hsplitter, vsplitter, fsplitter))
self._add_widgets((filetree, restedit, webview, warnreport))
def _add_widgets(self, widgets):
if not isinstance(widgets, tuple) and not isinstance(widgets, list):
widgets = (widgets,)
for widget in widgets:
name = str(widget.objectName().toUtf8())
self.widgets[name] = widget
def _load_preferences(self):
"""reload previously saved UI configuration from a file"""
if not self.config.read(os.path.expanduser('~/.ezsphinxrc')):
return
self.resize(int(self.config.get('window', 'width', '750')),
int(self.config.get('window', 'height', '650')))
config = {}
for section in self.config.sections():
for k, v in self.config.items(section):
config.setdefault(section, []).append((k.lower(), v))
for widget in self.widgets.values():
if hasattr(widget, 'load_presentation'):
widget.load_presentation(config)
def _save_preferences(self, config={}):
"""save current UI configuration into a configuration file"""
size = self.size()
config['window'] = [('width', size.width()), ('height', size.height())]
for section in config:
for key, value in config[section]:
self.config.set(section, key, value)
with open(os.path.expanduser('~/.ezsphinxrc'), 'w') as out_:
self.config.write(out_)
| StarcoderdataPython |
11206007 | <reponame>vivek28111992/AlgoDaily<gh_stars>0
"""
This is a classic and very common interview problem. Given an array of integers, return the indices of the two numbers in it that add up to a specific goal number.
So let's say our goal number was 10. Our numbers to sum to it would be 3 and 7, and their indices 1 and 3 respectively.
let arr = [1, 3, 6, 7, 9];
let goal = 10;
twoSum(arr, goal);
// [1, 3]
You may assume that each input would have exactly one solution. Additionally, you may not use the same element twice towards the sum. This means if given [1, 3] and a goal of 2, you cannot use 1 twice and return [0, 0].
Here's the function signature to fill in:
function twoSum(arr, goal) {
return arr;
}
"""
def twoSum(arr, goal):
d = dict()
for i in range(len(arr)):
if arr[i] not in d:
d[arr[i]] = i
for i in range(len(arr)):
n = goal - arr[i]
if n in d.keys():
return [i, d[n]]
return []
if __name__ == '__main__':
arr = [3, 2, 4, 1, 9]
goal = 12
print(twoSum(arr, goal))
| StarcoderdataPython |
9730151 | <filename>app/main/views.py
from flask import g, jsonify, Markup, render_template, redirect, url_for, current_app, abort, flash, request, \
make_response
from flask_login import login_required, current_user
from datetime import datetime
from mongoengine.queryset.visitor import Q
from . import main
from .forms import *
from ..models import *
from ..analyse import analyser
from ..analyse import fork_comparer
from ..decorators import admin_required, permission_required
from ..auth.views import get_user_repo_list, get_upperstream_repo
# ------------------------------------------------------------------
# Following are all the function wrap the database operation.
def db_find_project(project_name):
return Project.objects(project_name=project_name).first()
def db_delete_project(project_name):
Project.objects(project_name=project_name).delete()
ProjectFork.objects(project_name=project_name).delete()
ChangedFile.objects(project_name=project_name).delete()
def db_followed_project(project_name):
if project_name not in current_user.followed_projects:
User.objects(username=current_user.username).update_one(push__followed_projects=project_name)
# Update project followed time
tmp_dict = current_user.followed_projects_time
tmp_dict[project_name] = datetime.utcnow()
User.objects(username=current_user.username).update_one(set__followed_projects_time=tmp_dict)
def db_unfollowed_project(project_name):
User.objects(username=current_user.username).update_one(
pull__followed_projects=project_name)
tmp_dict = current_user.followed_projects_time
if project_name in tmp_dict:
tmp_dict.pop(project_name)
User.objects(username=current_user.username).update_one(set__followed_projects_time=tmp_dict)
# ------------------------------------------------------------------
@main.route('/', methods=['GET', 'POST'])
def start():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
return redirect(url_for('main.welcome'))
@main.route('/welcome', methods=['GET', 'POST'])
def welcome():
return render_template('welcome.html')
@main.route('/INTRUDE-welcome', methods=['GET', 'POST'])
def INTRUDE_welcome():
return render_template('INTRUDE-welcome.html')
@main.route('/INTRUDE-survey', methods=['GET', 'POST'])
def INTRUDE_survey():
repo = request.args.get('repo')
pr1 = request.args.get('pr1')
pr2 = request.args.get('pr2')
response = request.args.get('response')
f = open("/DATA/luyao/dupPR/surveyResponse.txt", "a+")
f.write("%s:%s,%s,%s,%s\n" % (str(datetime.datetime.now()), repo, pr1, pr2, response))
f.close()
return render_template('INTRUDE-survey.html', repo_url=repo, pr1_id=pr1, pr2_id=pr2, PRcomment_response=response)
@main.route('/INTRUDE-Thankyou', methods=['GET', 'POST'])
def INTRUDE_extraQuestion():
reason_useful = request.form['reason_useful']
how_to_improve = request.form['how_to_improve']
repo = request.form['repo']
pr1 = request.form['pr1']
pr2 = request.form['pr2']
f = open("/DATA/luyao/dupPR/surveyResponse_extra.txt", "a+")
f.write("%s:%s,%s,%s,%s,%s\n" % (str(datetime.datetime.now()), repo, pr1, pr2, reason_useful, how_to_improve))
f.close()
return render_template('INTRUDE-Thankyou.html')
@main.route('/INTRUDE-subscribe', methods=['GET', 'POST'])
def INTRUDE_subscribe():
text = request.form.get('loginID')
email = request.form.get('email')
f = open("/DATA/luyao/dupPR/subscribe_account_list.txt", "a+")
f.write("%s:%s,%s\n" % (str(datetime.datetime.now()), text, email))
f.close()
return render_template('INTRUDE-subscribe.html')
@main.route('/compare_forks', methods=['GET', 'POST'])
def compare_forks():
""" Compare two forks by Key words
"""
form = CompareForkForm()
if form.validate_on_submit():
return redirect(url_for('main.compare_forks', form=form, fork1=form.fork1.data, fork2=form.fork2.data))
_fork1_name = request.args.get("fork1")
_fork2_name = request.args.get("fork2")
if _fork1_name and _fork2_name:
_fork1 = ProjectFork.objects(fork_name=_fork1_name).first()
_fork2 = ProjectFork.objects(fork_name=_fork2_name).first()
if _fork1 and _fork2:
_common_files = fork_comparer.compare_on_files(_fork1, _fork2)
_common_words = fork_comparer.compare_on_key_words(_fork1, _fork2)
return render_template('compare_forks.html', form=form, common_files=_common_files,
common_words=_common_words)
else:
if _fork1 is None:
flash('(%s) is not found!' % form.fork1.data, 'warning')
if _fork2 is None:
flash('(%s) is not found!' % form.fork2.data, 'warning')
return redirect(url_for('main.compare_fork'))
return render_template('compare_forks.html', form=form)
@main.route('/load_from_github', methods=['GET', 'POST'])
@login_required
@permission_required(Permission.ADD)
def load_from_github():
if current_user.owned_repo_sync_time is not None:
_ownered_project = list(current_user.owned_repo.items())
else:
return redirect(url_for('main.sync'))
class ProjectSelection(FlaskForm):
pass
for project in _ownered_project:
setattr(ProjectSelection, project[0],
BooleanField(project[1], default=project[0] in current_user.followed_projects))
setattr(ProjectSelection, 'load_button', SubmitField('Follow'))
setattr(ProjectSelection, 'sync_button', SubmitField('Refresh List'))
form = ProjectSelection()
if form.load_button.data:
at_least_one_load = False
add_list = []
for field in form:
if field.type == "BooleanField" and field.data:
at_least_one_load = True
_project_name = field.id
if not db_find_project(_project_name):
add_list.append(_project_name)
db_followed_project(_project_name)
analyser.add_repos(current_user.username, add_list)
if at_least_one_load:
flash(
'All the selected repos start loading into INFOX. We will send you emails to update status. Please wait.',
'info')
return redirect(url_for('main.index'))
elif form.sync_button.data:
return redirect(url_for('main.sync'))
return render_template('load_from_github.html', form=form)
@main.route('/sync', methods=['GET', 'POST'])
@login_required
def sync():
""" Sync owned repos with GitHub
"""
_ownered_project = []
_tmp_project_list = get_user_repo_list(current_user.username)
if _tmp_project_list:
for project in _tmp_project_list:
_ownered_project.append((project, project))
# Add upperstream_repo
upperstream_repo = get_upperstream_repo(project)
if upperstream_repo is not None:
_ownered_project.append((upperstream_repo, upperstream_repo + "(Upperstream of %s)" % project))
User.objects(username=current_user.username).update_one(set__owned_repo_sync_time=datetime.utcnow())
# mongoDB don't support key value contains '.'
for i in range(len(_ownered_project)):
_ownered_project[i] = (_ownered_project[i][0].replace('.', '[dot]'), _ownered_project[i][1])
User.objects(username=current_user.username).update_one(set__owned_repo=dict(_ownered_project))
flash('Refresh your own GitHub repositories list successfully!', 'success')
return redirect(url_for('main.load_from_github'))
@main.route('/guide', methods=['GET', 'POST'])
@login_required
def guide():
return render_template('guide.html')
@main.route('/index', methods=['GET', 'POST'])
@login_required
def index():
# TODO:implement smart search.
_search_form = SearchProjectForm()
if _search_form.validate_on_submit():
print(_search_form.project_name.data)
return redirect(url_for('main.index', search=_search_form.project_name.data))
_keyword_search = request.args.get('search')
if _keyword_search:
project_list = Project.objects(
Q(project_name__in=current_user.followed_projects) & Q(project_name__contains=_keyword_search))
if len(project_list) == 0:
flash(Markup(
'Sorry, we don\'t find (%s) in your followed repositories. Try <a href="/find_repos" class="alert-link">Search on GitHub</a>.' % _keyword_search),
'warning')
return redirect(url_for('main.index'))
else:
project_list = Project.objects(project_name__in=current_user.followed_projects)
if len(project_list) == 0:
return redirect(url_for('main.guide'))
page = request.args.get('page', 1, type=int) # default is 1st page
pagination = project_list.paginate(page=page, per_page=current_app.config['SHOW_NUMBER_FOR_PAGE'])
projects = pagination.items
return render_template('index.html', projects=projects, pagination=pagination, time_now=datetime.utcnow(),
form=_search_form)
@main.route('/project/<path:project_name>', methods=['GET', 'POST'])
def project_overview(project_name):
""" Overview of the project
Args:
project_name
"""
if not db_find_project(project_name):
abort(404)
_project = Project.objects(project_name=project_name).first()
_forks = ProjectFork.objects(project_name=project_name, file_list__ne=[], total_changed_line_number__ne=0)
# TODO _all_tags could be opted by AJAX
_all_tags = {}
if current_user.is_authenticated:
_project_tags = ForkTag.objects(project_name=project_name, username=current_user.username)
for tag in _project_tags:
_all_tags[tag.fork_full_name] = tag.tags
if current_user.is_authenticated:
print('View: ', current_user.username, project_name)
return render_template('project_overview.html', project=_project, forks=_forks, all_tags=_all_tags)
@main.route('/followed_project/<path:project_name>', methods=['GET', 'POST'])
@login_required
@permission_required(Permission.FOLLOW)
def followed_project(project_name):
db_followed_project(project_name)
flash(Markup(
'Followed Project %s successfully! Please click <a href="/project/%s" class="alert-link">here</a> to view.' % (
project_name, project_name)), 'success')
return redirect(url_for('main.find_repos'))
@main.route('/unfollowed_project/<path:project_name>', methods=['GET', 'POST'])
@login_required
@permission_required(Permission.FOLLOW)
def unfollowed_project(project_name):
db_unfollowed_project(project_name)
return redirect(url_for('main.index'))
@main.route('/find_repos', methods=['GET', 'POST'])
@login_required
@permission_required(Permission.ADD)
def find_repos():
form = AddProjectForm()
if form.validate_on_submit():
_input = form.project_name.data
if db_find_project(_input) is not None:
db_followed_project(_input)
flash(Markup(
'The repo (%s) is already in INFOX. Followed successfully! Please click <a href="/project/%s" class="alert-link">here</a> to view.' % (
_input, _input)), 'success')
else:
if analyser.check_repo(_input, current_user.github_access_token) is not None:
analyser.add_repos(current_user.username, [_input])
db_followed_project(_input)
flash(
'The repo (%s) starts loading into INFOX. We will send you an email when it is finished. Please wait.' % _input,
'info')
else:
flash('Not found!', 'danger')
if current_user.is_authenticated:
project_list = Project.objects(
project_name__nin=current_user.followed_projects)
else:
project_list = Project.objects
page = request.args.get('page', 1, type=int) # default is 1st page
pagination = project_list.order_by('-fork_number').paginate(page=page,
per_page=current_app.config['SHOW_NUMBER_FOR_PAGE'])
projects = pagination.items
return render_template('find_repos.html', form=form, projects=projects, pagination=pagination,
time_now=datetime.utcnow())
@main.route('/about')
def about():
"""About Page
"""
form = FeedbackForm()
if form.validate_on_submit():
flash('Feedback received successfully!', 'success')
print(form.feedback.data)
return redirect(url_for('main.about'))
return render_template('about.html', form=form)
# ---------------- Following is all admin required. ----------------
@main.route('/admin_manage')
@login_required
@admin_required
def admin_manage():
_projects = Project.objects()
_users = User.objects()
return render_template('admin_manage.html', projects=_projects, users=_users, time_now=datetime.utcnow())
@main.route('/project_refresh/<path:project_name>', methods=['GET', 'POST'])
@login_required
@admin_required
def project_refresh(project_name):
""" Refresh the specfic project.
"""
if not db_find_project(project_name):
abort(404)
analyser.add_repos(current_user.username, [project_name])
return redirect(url_for('main.admin_manage'))
@main.route('/user_refresh', methods=['GET', 'POST'])
@login_required
@admin_required
def user_refresh():
User.objects().update(is_crawling=0)
User.objects().update(repo_waiting_list=[])
flash('Refresh all users successfully!', 'success')
return redirect(url_for('main.admin_manage'))
@main.route('/repo_refresh', methods=['GET', 'POST'])
@login_required
@admin_required
def project_refresh_all():
""" Refresh all the project.
"""
project_list = Project.objects()
analyser.add_repos(current_user.username, [repo.project_name for repo in project_list])
flash('Refresh all successfully!', 'success')
return redirect(url_for('main.admin_manage'))
@main.route('/repo_refresh_for_unfinished', methods=['GET', 'POST'])
@login_required
@admin_required
def repo_refresh_for_unfinished():
""" Refresh all the project which is unfinished.
"""
project_list = Project.objects()
crawl_list = []
for repo in project_list:
if repo.analyser_progress != "100%":
crawl_list.append(repo.project_name)
analyser.add_repos(current_user.username, crawl_list)
flash('Refresh for unfinished successfully!', 'success')
return redirect(url_for('main.admin_manage'))
@main.route('/delete_project/<path:project_name>', methods=['GET', 'POST'])
@login_required
@admin_required
def delete_project(project_name):
db_delete_project(project_name)
flash('The repo (%s) is deleted!' % project_name, 'success')
return redirect(url_for('main.admin_manage'))
@main.route('/delete_user/<username>')
@login_required
@admin_required
def delete_user(username):
User.objects(username=username).delete()
flash('User (%s) is deleted!' % username, 'success')
return redirect(url_for('main.admin_manage'))
@main.route('/_fork_edit_tag', methods=['GET', 'POST'])
@login_required
@permission_required(Permission.ADD)
def _fork_edit_tag():
_full_name = request.args.get('full_name')
_tag = request.args.get('tag')
_oper = request.args.get('oper')
# print(current_user.username, _full_name, _tag, _oper)
_user_fork_tag = ForkTag.objects(fork_full_name=_full_name, username=current_user.username).first()
if _user_fork_tag is None:
_fork = ProjectFork.objects(full_name=_full_name).first()
if _fork is None:
return None
ForkTag(fork_full_name=_full_name, project_name=_fork.project_name, username=current_user.username).save()
_user_fork_tag = ForkTag.objects(fork_full_name=_full_name, username=current_user.username).first()
if _oper == 'delete':
if _tag:
ForkTag.objects(fork_full_name=_full_name, username=current_user.username).update_one(pull__tags=_tag)
elif _oper == 'add':
if _tag and (_tag not in _user_fork_tag.tags):
ForkTag.objects(fork_full_name=_full_name, username=current_user.username).update_one(push__tags=_tag)
elif _oper == 'clear':
ForkTag.objects(fork_full_name=_full_name, username=current_user.username).update_one(set__tags=[])
upd_tags = ForkTag.objects(fork_full_name=_full_name, username=current_user.username).first()
return jsonify(",\n".join(upd_tags.tags))
@main.route('/_get_similar_fork', methods=['GET', 'POST'])
def _get_similar_fork():
_full_name = request.args.get('full_name')
if _full_name is not None:
_fork = ProjectFork.objects(full_name=_full_name).first()
if _fork is None:
return None
_fork_list = ProjectFork.objects(project_name=_fork.project_name)
_result = fork_comparer.get_similar_fork(_fork_list, _fork)
return jsonify(result=_result)
else:
return None
@main.route('/_get_predict_tag', methods=['GET', 'POST'])
def _get_predict_tag():
_full_name = request.args.get('full_name')
_tag_list = ["merge", "update", "fix", "add", "branch", "pull", "request", "version", "readme", "master", "change",
"delete", "release", "remote", "track", "test", "remove", "patch", "configuration", "upstream",
"support", "missing", "move", "conflict", "config"]
_tag_value = dict([(x, 0.0) for x in _tag_list])
if _full_name is None:
return None
_fork = ProjectFork.objects(full_name=_full_name).first()
if _fork is None:
return None
for commit in _fork.commit_list:
for tag in _tag_list:
_tag_value[tag] += commit["title"].lower().count(tag) * 3 + commit["description"].lower().count(tag)
_sorted_tag = [(x, y) for x, y in sorted(_tag_value.items(), key=lambda x: x[1], reverse=True)]
_sorted_tag = [(x, y) for x, y in filter(lambda x: x[1] > 0, _sorted_tag)]
return jsonify(result=_sorted_tag[:5])
@main.route('/_get_fork_commit_list', methods=['GET', 'POST'])
def _get_fork_commit_list():
_full_name = request.args.get('full_name')
if _full_name:
_fork = ProjectFork.objects(full_name=_full_name).first()
if _fork:
return jsonify(_fork.commit_list)
return None
@main.route('/_get_fork_changed_file_list', methods=['GET', 'POST'])
def _get_fork_changed_file_list():
_full_name = request.args.get('full_name')
if _full_name:
_fork = ProjectFork.objects(full_name=_full_name).first()
if _fork:
# TODO(use fullname)
# If use fork.file_list, the dir is not full.
_changed_files = ChangedFile.objects(fork_name=_fork.fork_name)
result_list = []
for file in _changed_files:
result_list.append({'link': file.diff_link, 'title': file.file_name})
return jsonify(result_list)
return None
@main.route('/_get_fork_tag', methods=['GET', 'POST'])
@login_required
def _get_fork_tag():
_full_name = request.args.get('full_name')
if _full_name:
_fork_tag = ForkTag.objects(fork_full_name=_full_name, username=current_user.username).first()
if _fork_tag is None:
user_tags = []
else:
user_tags = _fork_tag.tags
result = []
default_tags = ['Configuration', 'New Feature', 'Bug fix', 'Refactoring']
for tag in default_tags:
if tag in user_tags:
result.append({'name': tag, 'status': True})
else:
result.append({'name': tag, 'status': False})
for tag in user_tags:
if tag not in default_tags:
result.append({'name': tag, 'status': True})
return jsonify(result)
return None
@main.route('/graph/<category>/<path:project_name>', methods=['GET', 'POST'])
def graph(category, project_name):
return render_template('graph.html', category=category, project_name=project_name)
@main.route('/_get_pie_graph_data', methods=['GET', 'POST'])
def _get_pie_graph_data():
category = request.args.get('category')
project_name = request.args.get('project_name')
graph_classify = {
'commit': [0, 1, 5, 9, 99],
'LOC': [0, 9, 99, 999, 9999],
'file': [0, 1, 3, 9, 99, 999],
}
if category not in graph_classify:
return None
_fork_list = ProjectFork.objects(project_name=project_name, total_changed_line_number__ne=0)
bound = graph_classify[category]
num = len(bound)
tot = [0 for i in range(num + 1)]
for fork in _fork_list:
if fork.total_changed_line_number is None:
continue
if category == 'commit':
t = fork.total_commit_number
if (fork.total_changed_line_number > 0) and (t == 0): # commit Bug
t = 250
elif category == 'LOC':
t = fork.total_changed_line_number
elif category == 'file':
t = fork.total_changed_file_number
for i in range(num + 1):
if i == num:
tot[i] += 1
elif t <= bound[i]:
tot[i] += 1
break
result = []
for i in range(num + 1):
if i == 0:
result.append({'type': '0', 'total': tot[i]})
elif i == num:
result.append({'type': str(bound[i - 1] + 1) + '+', 'total': tot[i]})
else:
result.append(
{'type': str(bound[i]) if bound[i - 1] + 1 == bound[i] else str(bound[i - 1] + 1) + '~' + str(bound[i]),
'total': tot[i]})
return jsonify(result)
@main.route('/repo_list', methods=['GET', 'POST'])
def repo_list():
_project = Project.objects(analyser_progress="100%").order_by('-fork_number')
result = []
for project in _project:
_forks = ProjectFork.objects(project_name=project.project_name, file_list__ne=[],
total_changed_line_number__ne=0)
result.append([project.project_name, project.fork_number, project.activate_fork_number, _forks.count()])
return render_template('repo_list.html', result=result)
@main.route('/privacy_policy', methods=['GET', 'POST'])
def privacy_policy():
return render_template('privacy_policy.html')
@main.route('/_search_log', methods=['GET', 'POST'])
def _search_log():
if current_user.is_authenticated:
print('Search: ', current_user.username, request.args.get('repo'), request.args.get('col'),
request.args.get('input'))
return jsonify(None)
"""
# ---------------------------- use for test ------------------------
@main.route('/admin_email_update')
@login_required
@admin_required
def admin_email_update():
_users = User.objects()
for user in _users:
db_update_email(user.username)
return redirect(url_for('main.admin_manage'))
@main.route('/test', methods=['GET', 'POST'])
def test():
from ..analyse.util import word_extractor
fork_list = ProjectFork.objects()
s = ""
for fork in fork_list:
for commit in fork.commit_list:
s+=commit["title"] + "\n"
s+=commit["description"] + "\n"
return jsonify(word_extractor.get_top_words_from_text(s, 50))
@main.route('/test_send_email', methods=['GET', 'POST'])
def test_send_email():
email_sender = EmailSender('<NAME>', '<EMAIL>', 'Repo Status Update', 'email.html')
email_sender.repo_finish('test_repo')
return 'Finish Send!'
"""
| StarcoderdataPython |
3257816 | <reponame>yifan-you-37/rl_swiss
import joblib
import numpy as np
from numpy.random import choice, randint
from rlkit.data_management.env_replay_buffer import get_dim as gym_get_dim
from rlkit.data_management.simple_replay_buffer import SimpleReplayBuffer
from rlkit.envs.maze_envs.trivial_grid import TrivialGrid
from rlkit.envs.maze_envs.pogrid import PartiallyObservedGrid
from rlkit.envs.maze_envs.mem_map_grid import MemoryGrid
class RandomGridPolicy():
def __init__(self, max_num_consecutive):
self.max_num_consecutive = max_num_consecutive
self.cur_act = randint(4)
self.num_left = randint(max_num_consecutive) + 1
def get_action(self, obs, *args):
if self.num_left == 0:
self.num_left = randint(self.max_num_consecutive) + 1
self.cur_act = randint(4)
self.num_left -= 1
return self.cur_act
def reset(self):
pass
class ListPolicy():
def __init__(self, act_list):
self.act_list = act_list
self.ptr = 0
def get_action(self, obs, *args):
a = self.act_list[self.ptr]
self.ptr = (self.ptr + 1) % len(self.act_list)
return a
def reset(self):
self.ptr = 0
def generate_transitions(policy, env, num_timesteps_total, max_steps_per_episode, save_path):
buff = SimpleReplayBuffer(
num_timesteps_total, env.observation_space.shape,
gym_get_dim(env.action_space), discrete_action_dim=True
)
cur_total = 0
steps_left_in_episode = 0
while cur_total != num_timesteps_total:
if steps_left_in_episode == 0:
steps_left_in_episode = max_steps_per_episode
obs = env.reset()
act = policy.get_action(obs)
next_obs, rew, done, _ = env.step(act)
buff.add_sample(obs, act, rew, done, next_obs)
obs = next_obs
cur_total += 1
steps_left_in_episode -= 1
save_dict = dict(
observations=buff._observations,
actions=buff._actions,
rewards=buff._rewards,
terminals=buff._terminals,
next_observations=buff._next_obs,
)
joblib.dump(save_dict, save_path)
# debug
from scipy.misc import imsave
actions = buff._actions
observations = buff._observations
for i in range(1000):
a = actions[i]
obs = observations[i]
print(a)
imsave('junk_vis/tiny/mem_grid_{}.png'.format(i), np.transpose(obs, (1,2,0)))
# for i in range(90, 110):
# a = actions[i]
# obs = observations[i]
# print(a)
# imsave('junk_vis/maze_{}.png'.format(i), np.transpose(obs, (1,2,0)))
# for i in range(70, 90):
# a = actions[i]
# obs = observations[i]
# print(a)
# imsave('junk_vis/maze_{}.png'.format(i), np.transpose(obs, (1,2,0)))
# for i in range(110, 130):
# a = actions[i]
# obs = observations[i]
# print(a)
# imsave('junk_vis/maze_{}.png'.format(i), np.transpose(obs, (1,2,0)))
if __name__ == '__main__':
# env_specs = {
# 'flat_repr': False,
# 'one_hot_repr': False,
# 'maze_h': 4,
# 'maze_w': 4,
# 'scale': 1,
# }
# env = TrivialGrid(env_specs)
# policy = RandomGridPolicy(1)
# env_specs = {
# 'flat_repr': False,
# 'one_hot_repr': False,
# 'maze_h': 9,
# 'maze_w': 9,
# 'obs_h': 5,
# 'obs_w': 5,
# 'scale': 4,
# 'num_objs': 10
# }
# act_list = [1, 0, 3, 2]
# env = MemoryGrid(env_specs)
# policy = ListPolicy(act_list)
env_specs = {
'flat_repr': False,
'one_hot_repr': False,
'maze_h': 9,
'maze_w': 9,
'obs_h': 5,
'obs_w': 5,
'scale': 4,
'num_objs': 10
}
act_list = [1, 0, 3, 2]
env = PartiallyObservedGrid(env_specs)
policy = RandomGridPolicy(1)
generate_transitions(policy, env, 50000, 8, '/ais/gobi6/kamyar/oorl_rlkit/maze_trans_data/pogrid_len_8_scale_4')
# 32, 128, 512 for 3x3
# 32, 512, 2048 for 5x5 | StarcoderdataPython |
5075898 | <gh_stars>0
from main import no_space
def test_no_space(benchmark):
assert benchmark(no_space, '8 j 8 mBliB8g imjB8B8 jl B') == '8j8mBliB8gimjB8B8jlB'
assert benchmark(no_space, '8 8 Bi fk8h B 8 BB8B B B B888 c hl8 BhB fd') == '88Bifk8hB8BB8BBBB888chl8BhBfd'
assert benchmark(no_space, '8aaaaa dddd r ') == '8aaaaaddddr'
assert benchmark(no_space, 'jfBm gk lf8hg 88lbe8 ') == 'jfBmgklf8hg88lbe8'
assert benchmark(no_space, '8j aam') == '8jaam'
| StarcoderdataPython |
3374006 | #!/usr/bin/env python
"""
<Program>
test_download.py
<Author>
<NAME>.
<Started>
March 26, 2012.
<Copyright>
See LICENSE for licensing information.
<Purpose>
Unit test for 'download.py'.
NOTE: Make sure test_download.py is ran in 'tuf/tests/' directory.
Otherwise, module that launches simple server would not be found.
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import hashlib
import logging
import os
import random
import subprocess
import time
import unittest
import tuf
import tuf.download as download
import tuf.log
import tuf.unittest_toolbox as unittest_toolbox
import tuf.exceptions
import securesystemslib
import six
logger = logging.getLogger('tuf.test_download')
class TestDownload(unittest_toolbox.Modified_TestCase):
def setUp(self):
"""
Create a temporary file and launch a simple server in the
current working directory.
"""
unittest_toolbox.Modified_TestCase.setUp(self)
# Making a temporary file.
current_dir = os.getcwd()
target_filepath = self.make_temp_data_file(directory=current_dir)
self.target_fileobj = open(target_filepath, 'r')
self.target_data = self.target_fileobj.read()
self.target_data_length = len(self.target_data)
# Launch a SimpleHTTPServer (serves files in the current dir).
self.PORT = random.randint(30000, 45000)
command = ['python', 'simple_server.py', str(self.PORT)]
self.server_proc = subprocess.Popen(command, stderr=subprocess.PIPE)
logger.info('\n\tServer process started.')
logger.info('\tServer process id: '+str(self.server_proc.pid))
logger.info('\tServing on port: '+str(self.PORT))
junk, rel_target_filepath = os.path.split(target_filepath)
self.url = 'http://localhost:'+str(self.PORT)+'/'+rel_target_filepath
# NOTE: Following error is raised if delay is not applied:
# <urlopen error [Errno 111] Connection refused>
time.sleep(1)
# Computing hash of target file data.
m = hashlib.md5()
m.update(self.target_data.encode('utf-8'))
digest = m.hexdigest()
self.target_hash = {'md5':digest}
# Stop server process and perform clean up.
def tearDown(self):
unittest_toolbox.Modified_TestCase.tearDown(self)
if self.server_proc.returncode is None:
logger.info('\tServer process '+str(self.server_proc.pid)+' terminated.')
self.server_proc.kill()
self.target_fileobj.close()
# Test: Normal case.
def test_download_url_to_tempfileobj(self):
download_file = download.safe_download
temp_fileobj = download_file(self.url, self.target_data_length)
self.assertEqual(self.target_data, temp_fileobj.read().decode('utf-8'))
self.assertEqual(self.target_data_length, len(temp_fileobj.read()))
temp_fileobj.close_temp_file()
# Test: Incorrect lengths.
def test_download_url_to_tempfileobj_and_lengths(self):
# We do *not* catch 'securesystemslib.exceptions.DownloadLengthMismatchError' in the following two
# calls because the file at 'self.url' contains enough bytes to satisfy the
# smaller number of required bytes requested. safe_download() and
# unsafe_download() will only log a warning when the the server-reported
# length of the file does not match the required_length. 'updater.py'
# *does* verify the hashes of downloaded content.
download.safe_download(self.url, self.target_data_length - 4)
download.unsafe_download(self.url, self.target_data_length - 4)
# We catch 'tuf.exceptions.SlowRetrievalError' for both safe_download() and
# unsafe_download() because they will not download more bytes than
# requested and the connection eventually hits a slow retrieval error when
# the server can't satisfy the request (in this case, a length greater
# than the size of the target file).
self.assertRaises(tuf.exceptions.SlowRetrievalError, download.safe_download,
self.url, self.target_data_length + 1)
self.assertRaises(tuf.exceptions.SlowRetrievalError, download.unsafe_download,
self.url, self.target_data_length + 1)
def test_download_url_to_tempfileobj_and_performance(self):
"""
# Measuring performance of 'auto_flush = False' vs. 'auto_flush = True'
# in download._download_file() during write. No change was observed.
star_cpu = time.clock()
star_real = time.time()
temp_fileobj = download_file(self.url,
self.target_data_length)
end_cpu = time.clock()
end_real = time.time()
self.assertEqual(self.target_data, temp_fileobj.read())
self.assertEqual(self.target_data_length, len(temp_fileobj.read()))
temp_fileobj.close_temp_file()
print "Performance cpu time: "+str(end_cpu - star_cpu)
print "Performance real time: "+str(end_real - star_real)
# TODO: [Not urgent] Show the difference by setting write(auto_flush=False)
"""
# Test: Incorrect/Unreachable URLs.
def test_download_url_to_tempfileobj_and_urls(self):
download_file = download.safe_download
unsafe_download_file = download.unsafe_download
self.assertRaises(securesystemslib.exceptions.FormatError,
download_file, None, self.target_data_length)
self.assertRaises(securesystemslib.exceptions.FormatError,
download_file,
self.random_string(), self.target_data_length)
self.assertRaises(six.moves.urllib.error.HTTPError,
download_file,
'http://localhost:' + str(self.PORT) + '/' + self.random_string(),
self.target_data_length)
self.assertRaises(six.moves.urllib.error.URLError,
download_file,
'http://localhost:' + str(self.PORT+1) + '/' + self.random_string(),
self.target_data_length)
# Specify an unsupported URI scheme.
url_with_unsupported_uri = self.url.replace('http', 'file')
self.assertRaises(securesystemslib.exceptions.FormatError, download_file, url_with_unsupported_uri,
self.target_data_length)
self.assertRaises(securesystemslib.exceptions.FormatError, unsafe_download_file,
url_with_unsupported_uri, self.target_data_length)
def test__get_opener(self):
# Test normal case.
# A simple https server should be used to test the rest of the optional
# ssl-related functions of 'tuf.download.py'.
fake_cacert = self.make_temp_data_file()
with open(fake_cacert, 'wt') as file_object:
file_object.write('fake cacert')
tuf.settings.ssl_certificates = fake_cacert
tuf.download._get_opener('https')
tuf.settings.ssl_certificates = None
def test_https_connection(self):
# Make a temporary file to be served to the client.
current_directory = os.getcwd()
target_filepath = self.make_temp_data_file(directory=current_directory)
target_data = None
target_data_length = 0
with open(target_filepath, 'r') as target_file_object:
target_data = target_file_object.read()
target_data_length = len(target_data)
# Launch an https server (serves files in the current dir).
port = random.randint(30000, 45000)
command = ['python', 'simple_https_server.py', str(port)]
https_server_process = subprocess.Popen(command, stderr=subprocess.PIPE)
# NOTE: Following error is raised if delay is not applied:
# <urlopen error [Errno 111] Connection refused>
time.sleep(1)
junk, relative_target_filepath = os.path.split(target_filepath)
https_url = 'https://localhost:' + str(port) + '/' + relative_target_filepath
# Download the target file using an https connection.
tuf.settings.ssl_certificates = 'ssl_cert.crt'
message = 'Downloading target file from https server: ' + https_url
logger.info(message)
try:
download.safe_download(https_url, target_data_length)
download.unsafe_download(https_url, target_data_length)
finally:
https_server_process
if https_server_process.returncode is None:
message = \
'Server process ' + str(https_server_process.pid) + ' terminated.'
logger.info(message)
self.server_proc.kill()
def test__get_content_length(self):
content_length = \
tuf.download._get_content_length({'bad_connection_object': 8})
self.assertEqual(content_length, None)
# Run unit test.
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6691841 | # -*- coding: utf-8 -*-
"""Top-level package for Azure IoT Edge Dev Tool."""
__author__ = 'Microsoft Corporation'
__email__ = '<EMAIL>'
__version__ = '2.1.2'
__AIkey__ = '95b20d64-f54f-4de3-8ad5-165a75a6c6fe'
| StarcoderdataPython |
9641027 | <filename>replay_parser.py
import os
import binascii
import zlib
import struct
import datetime
import functools
import argparse
import json
FRAME_TO_MILLIS = 42
def is_zlib_compressed(data):
unsigned_byte = data[1] & 0xFF
return data[0] == 0x78 and (unsigned_byte in [0x9c, 0x01, 0x5e, 0xda])
def read_int(*, buffer, start_idx):
return int.from_bytes(buffer[start_idx:start_idx+4], byteorder='little')
def check_replay_version(content):
# Modern Replays 1.21+ start have 12-15 bytes == seRS
if bytes.decode(content[12:12+4]) != 'seRS':
raise Exception("Replay File is of unsupported version")
def parse_replay_header(content):
headers = {}
# for our sake we can ignore the first 16 bytes, as these identify the replay file and we only support 1.21 for now
hdr_len_bytes = content[28:32]
_hdr_len_int = int.from_bytes(hdr_len_bytes, byteorder='little')
compressed_hdr = is_zlib_compressed(content[32:])
if compressed_hdr:
decompressed = zlib.decompress(content[32:32+633])
else:
decompressed = content[32:]
frame_count = read_int(buffer=decompressed, start_idx=1)
secs = frame_count * 42 / 1000
headers['time_seconds'] = secs
duration = datetime.timedelta(seconds=secs)
duration = str(duration).split('.')[0] # remove millisecond part, we do not want to be _that_ precise
headers['time_formatted'] = duration
start_time = read_int(buffer=decompressed, start_idx=8)
dt = datetime.datetime.fromtimestamp(start_time)
headers['start_time'] = str(dt)
headers['start_time_ts'] = start_time
map_name = decompressed[97: 97 + 26]
map_name = map_name.strip()
map_name = map_name.strip(b'\x00')
map_name = map_name.decode()
headers['map_name'] = map_name
headers['player_info'] = get_player_data(decompressed[161: 161 + 432])
return headers
def get_player_data(player_buffer):
player_data = []
SLOTS_COUNT = 12 # 8 players + 4 observers
# MAX_PLAYERS = 8
PLAYER_CHUNK_SIZE = len(player_buffer) // SLOTS_COUNT
for i in range(SLOTS_COUNT):
player_chunk_bytes = player_buffer[i*PLAYER_CHUNK_SIZE: i*PLAYER_CHUNK_SIZE + PLAYER_CHUNK_SIZE] # get the next PLAYER_CHUNK_SIZE bytes to decode player info
player = {}
player['slot_id'] = int.from_bytes(player_chunk_bytes[0:2], byteorder='little')
player['player_id'] = player_chunk_bytes[4]
player['player_type'] = player_chunk_bytes[8]
player['player_race'] = player_chunk_bytes[9]
player['player_team'] = player_chunk_bytes[10]
player['player_name'] = player_chunk_bytes[11: 11+25]
# # Colour info
# slot_colour_start_idx = 593 + i * 4
# player['colour'] = read_int(buffer = player_buffer, start_idx=slot_colour_start_idx)
r = functools.reduce(lambda a,b: a+b, player['player_name'])
if r:
player['player_name'] = player['player_name'].decode().strip('\x00')
player_data.append(player)
return player_data
def parse(fname):
with open(fname, 'rb') as f:
content = f.read()
try:
check_replay_version(content)
except Exception as e:
print(f"Warning: Replay file {fname} is of an older version, skipping...")
return {}
headers = parse_replay_header(content)
return headers
def batch_parse(replay_root):
output = []
for path, _, files in os.walk(replay_root):
if files:
series_length = 0
players_series = set()
for rep in files:
if not rep.endswith('.rep'):
continue
fname = os.path.join(path, rep)
parsed = parse(fname)
if not parsed:
# Parsing failed for any reason, e.g., replay was not 1.21
continue
players = set([ x['player_name'] for x in parsed['player_info'] ])
if batch:
series_length += parsed['time_seconds']
players_series = players_series.union(players)
if print_all:
output.append(f'{fname}: {parsed["time_formatted"]} {players}')
if series_length and batch:
#normalise time
series_duration = datetime.timedelta(seconds=series_length)
series_duration = str(series_duration).split('.')[0]
output.append(f'{path}: {series_duration} {players_series}')
return '\n'.join(output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="SC:R replay statistics")
parser.add_argument('--rep_root', default='.')
parser.add_argument('--batch', default=True)
parser.add_argument('--print_all', default=True)
args = parser.parse_args()
if os.path.exists(os.path.join('.', 'config.json')):
with open('config.json') as f:
config = json.load(f)
config = [['--' + k, v] for k, v in config.items() if v and k in args]
config = functools.reduce(lambda x, y: x + y, config)
args = parser.parse_args(config, args)
else:
print("Config file not found, accepting arguments from cmd line")
global replay_root, batch, print_all
replay_root = args.rep_root
batch = args.batch
print_all = args.print_all
output = batch_parse(replay_root)
print(output)
| StarcoderdataPython |
197589 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-27 17:01
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('util', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ComplaintBounceMeta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('timestamp', models.DateTimeField()),
('headers', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('feedback_type', models.CharField(blank=True, max_length=50, null=True)),
('sub_type', models.CharField(blank=True, max_length=50, null=True)),
('destination', django.contrib.postgres.fields.ArrayField(
base_field=models.EmailField(max_length=254),
blank=True,
default=list,
null=True,
size=None
)),
('bounced_email', models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to='util.BouncedEmail'
)),
],
),
migrations.CreateModel(
name='PermanentBounceMeta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('timestamp', models.DateTimeField()),
('sub_type', models.CharField(
choices=[
('General', 'General'),
('Suppressed', 'Suppressed'),
('Undetermined', 'Undetermined')
], max_length=20)),
('headers', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('reason', models.TextField(blank=True, null=True)),
('destination', django.contrib.postgres.fields.ArrayField(
base_field=models.EmailField(max_length=254),
blank=True,
default=list,
null=True,
size=None
)),
('bounced_email', models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to='util.BouncedEmail'
)),
],
),
migrations.CreateModel(
name='TransientBounceEmail',
fields=[
('id', models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID'
)),
('created', models.DateTimeField(auto_now_add=True)),
('email', models.EmailField(db_index=True, max_length=254)),
('timestamp', models.DateTimeField(db_index=True)),
('headers', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
],
),
]
| StarcoderdataPython |
5136347 | @login_required
def profile(request, id):
entity = directory.models.Entity.objects.get(pk = id)
emails = directory.models.EntityEmail.objects.filter(entity__exact =
id).all()
all_entities = directory.models.Entity.objects.all()
all_locations = directory.models.Location.objects.all()
return HttpResponse(get_template(u'profile.html').render(Context(
{
u'entity': entity,
u'emails': emails,
u'departments': all_entities,
u'reports_to_candidates': all_entities,
u'locations': all_locations,
})))
| StarcoderdataPython |
4849692 | <gh_stars>1-10
"""[summary]
Returns:
[type]: [description]
"""
import os
import pickle
import random
from math import log
from collections import defaultdict
from src.countminsketch import CountMinSketch
from src.feature_generator import FeatureGenerator
from src.utils import add_lap
from src.sketch_heap import SketchHeap
class Federation:
"""[summary]
"""
def __init__(self, queries=None, docs=None):
"""[summary]
Args:
queries ([type], optional): [description]. Defaults to None.
docs ([type], optional): [description]. Defaults to None.
"""
self.queries = queries
self.docs = docs
if docs:
self.doc_num = len(self.docs)
else:
self.doc_num = 0
if queries:
self.query_num = len(self.queries)
else:
self.query_num = 0
self.sketches = []
self.all_sketches = None
self.invert_table = None
if self.docs:
self.dl = [[len(each[0]), len(each[1])] for each in self.docs]
else:
self.dl = [[0, 0]]
if self.docs:
len0 = 0
len1 = 1
for each in self.docs:
len0 += len(each[0])
len1 += len(each[1])
self.doc_avg_len = [len0 / self.doc_num, len1 / self.doc_num]
else:
self.doc_avg_len = [0, 0]
# caution!! len[0] is title len[1] is body
self.extended_cases = dict()
def save_fed(self, dst_path, fed_name):
"""[summary]
Args:
dst_path ([type]): [description]
fed_name ([type]): [description]
"""
dir_name = os.path.join(dst_path, fed_name)
print(dir_name)
if os.path.exists(dir_name):
os.remove(dir_name)
with open(dir_name, 'wb') as f:
pickle.dump(self, f)
@staticmethod
def load_fed(src_path, fed_name):
"""[summary]
Args:
src_path ([type]): [description]
fed_name ([type]): [description]
Returns:
[type]: [description]
"""
print(os.path.join(src_path, fed_name))
f = open(os.path.join(src_path, fed_name), 'rb')
a = pickle.load(f)
f.close()
return a
def build_sketch(self, d=10, m=120):
"""[summary]
Args:
d (int, optional): [description]. Defaults to 10.
m (int, optional): [description]. Defaults to 120.
"""
self.d = d
self.m = m
all_body_sketch = CountMinSketch(d=d, m=m)
all_title_sketch = CountMinSketch(d=d, m=m)
self.invert_table = [
CountMinSketch(
d=d,
m=int(
1.7 * m)),
CountMinSketch(
d=d,
m=m)]
cnt = 0
term_inc = dict()
# print(self.doc_num, len(self.docs))
for doc in self.docs:
if cnt % 100 == 0:
print(cnt)
cnt += 1
body_sketch = CountMinSketch(d=d, m=m)
title_sketch = CountMinSketch(d=d, m=m)
# title, body, a = doc
title, body, _ = doc
for term in body:
body_sketch.add(term)
all_body_sketch.add(term)
for term in title:
title_sketch.add(term)
all_title_sketch.add(term)
self.sketches.append([body_sketch, title_sketch])
# build invert table for idf
body = set(body)
title = set(title)
for each in body:
if term_inc.get(each, 0) == 0:
term_inc[each] = 0
term_inc[each] += 1
if term_inc[each] > cnt:
print(term_inc[each], cnt)
# self.invert_table[0].add(each)
for each in title:
self.invert_table[1].add(each)
self.all_sketches = [all_body_sketch, all_title_sketch]
for each in term_inc:
self.invert_table[0].add(each, value=term_inc[each])
# if self.invert_table[0].query(each) > term_inc[each]:
# print(self.invert_table[0].query(each), term_inc[each], self.doc_num)
def build_sketch_heap(self, a=1, d=50, w=200, k=150):
"""[summary]
Args:
a (int, optional): [description]. Defaults to 1.
d (int, optional): [description]. Defaults to 50.
w (int, optional): [description]. Defaults to 200.
k (int, optional): [description]. Defaults to 150.
"""
self.sketch_heap = SketchHeap(a, k, d, w)
for i in range(len(self.docs)):
# title, body, score = self.docs[i]
_, body, _ = self.docs[i]
for word in body:
# print(word)
self.sketch_heap.push_in_dict(word)
self.sketch_heap.build_sketch_heap(i)
self.sketch_heap.clear_dict()
if i % 100 == 0:
print(i)
def build_invert_table(self):
"""[summary]
"""
# build invert table for idf
d = 10
m = 60
self.d = d
self.m = m
all_body_sketch = CountMinSketch(d=d, m=m)
all_title_sketch = CountMinSketch(d=d, m=m)
self.invert_table = [
CountMinSketch(
d=d,
m=int(
1.7 * m)),
CountMinSketch(
d=d,
m=m)]
cnt = 0
term_inc = dict()
# print(self.doc_num, len(self.docs))
for doc in self.docs:
if cnt % 100 == 0:
print(cnt)
cnt += 1
# title, body, a = doc
title, body, _ = doc
for term in body:
all_body_sketch.add(term)
for term in title:
all_title_sketch.add(term)
# build invert table for idf
body = set(body)
title = set(title)
for each in body:
if term_inc.get(each, 0) == 0:
term_inc[each] = 0
term_inc[each] += 1
for each in title:
self.invert_table[1].add(each)
self.all_sketches = [all_body_sketch, all_title_sketch]
for each in term_inc:
self.invert_table[0].add(each, value=term_inc[each])
def get_hashed_query(self, d=10, m=120):
"""[summary]
Args:
d (int, optional): [description]. Defaults to 10.
m (int, optional): [description]. Defaults to 120.
Returns:
[type]: [description]
"""
hashed_queries = []
# hasher = CountMinSketch(d=self.d, m=self.m)
hasher = CountMinSketch(d=d, m=m)
for each in self.queries:
query_hashed = []
for term in each:
query_hashed.append(hasher.hash2(term))
hashed_queries.append(query_hashed)
return hashed_queries
def get_hashed_queries_sh(self):
"""[summary]
Returns:
[type]: [description]
"""
return [[self.sketch_heap.hash(term) for term in each]
for each in self.queries]
def extend_cases(self, q_id, cases):
"""[summary]
Args:
q_id ([type]): [description]
cases ([type]): [description]
"""
self.extended_cases[q_id] = cases
def build_cnt_dics(self):
"""[summary]
"""
dics = []
all_body_dic = dict()
all_title_dic = dict()
invert_body_dic = dict()
invert_title_dic = dict()
for one_doc in self.docs:
# title, body, a = one_doc
title, body, _ = one_doc
body_dic = dict()
for term in body:
if body_dic.get(term, 0) == 0:
body_dic[term] = 0
if invert_body_dic.get(term, 0) == 0:
invert_body_dic[term] = 0
invert_body_dic[term] += 1
body_dic[term] += 1
if all_body_dic.get(term, 0) == 0:
all_body_dic[term] = 0
all_body_dic[term] += 1
title_dic = dict()
for term in title:
if title_dic.get(term, 0) == 0:
title_dic[term] = 0
if invert_title_dic.get(term, 0) == 0:
invert_title_dic[term] = 0
invert_title_dic[term] += 1
title_dic[term] += 1
if all_title_dic.get(term, 0) == 0:
all_title_dic[term] = 0
all_title_dic[term] += 1
dics.append([body_dic, title_dic])
self.dics = dics
self.all_dics = [all_body_dic, all_title_dic]
self.invert_dic = [invert_body_dic, invert_title_dic]
def get_most_rel(self, query_hashed, eps=-1, min_median='min',
k1=1.2, b=0.75, lamb=0.1, mu=1000, delta=0.7, with_sketch=True):
"""[summary]
Args:
query_hashed ([type]): [description]
eps (int, optional): [description]. Defaults to -1.
min_median (str, optional): [description]. Defaults to 'min'.
k1 (float, optional): [description]. Defaults to 1.2.
b (float, optional): [description]. Defaults to 0.75.
lamb (float, optional): [description]. Defaults to 0.1.
mu (int, optional): [description]. Defaults to 1000.
delta (float, optional): [description]. Defaults to 0.7.
with_sketch (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
"""
# cal idf
include = [0, 0]
for term_hashed in query_hashed:
if with_sketch:
if min_median == 'min':
include[0] += self.invert_table[0].query_hash(term_hashed)
include[1] += self.invert_table[1].query_hash(term_hashed)
else:
include[0] += self.invert_table[0].query_hash_median(
term_hashed)
include[1] += self.invert_table[1].query_hash_median(
term_hashed)
else:
include[0] += self.invert_dic[0].get(term_hashed, 0)
include[1] += self.invert_dic[1].get(term_hashed, 0)
if eps > -0.1:
include[0] = add_lap(include[0], eps)
include[1] = add_lap(include[1], eps)
if bool(query_hashed):
include = [include[0] / len(query_hashed), include[1] / len(query_hashed)]
# print(self.doc_num, include[0], include[1])
idf = [log((max(0, self.doc_num - include[0]) + 0.5) / (include[0] + 0.5), 2),
log((max(0, self.doc_num - include[1]) + 0.5) / (include[1] + 0.5), 2)]
# cal tf, tf_idf and bm25s
tfs = []
tf_idfs = []
bm25s = []
for d_id in range(self.doc_num):
tf = [0, 0]
bm25 = [0, 0, d_id]
for term_hashed in query_hashed:
if with_sketch:
if min_median == 'min':
local_a = self.sketches[d_id][0].query_hash(
term_hashed)
local_b = self.sketches[d_id][1].query_hash(
term_hashed)
else:
local_a = self.sketches[d_id][0].query_hash_median(
term_hashed)
local_b = self.sketches[d_id][1].query_hash_median(
term_hashed)
else:
local_a = self.dics[d_id][0].get(term_hashed, 0)
local_b = self.dics[d_id][1].get(term_hashed, 0)
local_c = k1 * \
(1 - b + b * self.dl[d_id][0] / self.doc_avg_len[0])
local_d = k1 * \
(1 - b + b * self.dl[d_id][1] / self.doc_avg_len[1])
if eps > -0.1:
local_a = add_lap(local_a, eps)
local_b = add_lap(local_b, eps)
local_a /= self.dl[d_id][0]
local_b /= self.dl[d_id][1]
tf[0] += local_a
tf[1] += local_b
bm25[0] += idf[0] * local_a * (k1 + 1) / (local_a + local_c)
bm25[1] += idf[1] * local_b * (k1 + 1) / (local_b + local_d)
tfs.append(tf)
tf_idfs.append([tfs[d_id][0] * idf[0], tfs[d_id][1] * idf[1]])
bm25s.append(bm25)
# get most relevant based on bm25
bm25s.sort(reverse=True)
cands = [bm25s[x][2] for x in range(min(100, len(bm25s)))]
features = []
for k in range(len(cands)):
d_id = cands[k]
jm = [0, 0]
dirs = [0, 0]
ab = [0, 0]
for term_hashed in query_hashed:
for i in range(2):
if with_sketch:
if min_median == 'min':
local_a = self.sketches[d_id][i].query_hash(
term_hashed)
local_b = self.all_sketches[i].query_hash(
term_hashed)
else:
local_a = self.sketches[d_id][i].query_hash_median(
term_hashed)
local_b = self.all_sketches[i].query_hash_median(
term_hashed)
else:
local_a = self.dics[d_id][i].get(term_hashed, 0)
local_b = self.all_dics[i].get(term_hashed, 0)
if eps > -0.1:
local_a = add_lap(local_a, eps)
local_b = add_lap(local_b, eps)
local_a /= self.dl[d_id][i]
local_b /= (self.doc_avg_len[i] * self.doc_num)
denom = (1 - lamb) * local_a + lamb * local_b
if denom > 0:
jm[i] -= log(denom)
denom = (local_a * self.dl[d_id][i] +
mu * local_b) / (self.dl[d_id][i] + mu)
if denom > 0:
dirs[i] -= log(denom)
if local_b > 0:
ab[i] -= log(max(local_a *
self.dl[d_id][i] -
delta, 0) /
self.dl[d_id][i] +
delta *
0.8 *
local_b)
# exit(0)
vector = self.dl[d_id] + tfs[d_id] + idf + \
tf_idfs[d_id] + bm25s[k][:2] + jm + dirs + ab
features.append(vector)
return features
def get_most_rel_sh(self, query, idf_dict, avg_idf,
k1=1.2, b=0.75, lamb=0.1, mu=1000, delta=0.7):
"""[summary]
Args:
query ([type]): [description]
idf_dict ([type]): [description]
avg_idf ([type]): [description]
k1 (float, optional): [description]. Defaults to 1.2.
b (float, optional): [description]. Defaults to 0.75.
lamb (float, optional): [description]. Defaults to 0.1.
mu (int, optional): [description]. Defaults to 1000.
delta (float, optional): [description]. Defaults to 0.7.
Returns:
[type]: [description]
"""
# cal idf
idf = sum(
[idf_dict[word] if word in idf_dict else avg_idf for word in query]) / len(query)
# cal tf, tf_idf and bm25s
# top three words of the query according to idf
idf_word = [(idf_dict[term] if term in idf_dict else avg_idf, term)
for term in query]
idf_word.sort(reverse=True)
cand_terms = [idf_word[i][1] for i in range(min(len(query), 3))]
doc_cnts = defaultdict(lambda: 0)
for cand_term in cand_terms:
cnt_terms = self.sketch_heap.query(cand_term)
for cnt_term in cnt_terms:
doc_cnts[cnt_term[1]] += cnt_term[0]
avg_cnt_docs = [(doc_cnts[key] / len(cand_terms), key)
for key in doc_cnts]
avg_cnt_docs.sort(reverse=True)
cand_docs = [avg_cnt_docs[i][1]
for i in range(min(len(avg_cnt_docs), 150))]
tfs = {cand_docs[i]: avg_cnt_docs[i][0] / self.dl[cand_docs[i]][1] for i in range(len(cand_docs))}
features = []
for cand_doc in cand_docs:
tf = tfs[cand_doc]
tf_idf = tf * idf
local_c = k1 * \
(1 - b + b * self.dl[cand_doc][0] / self.doc_avg_len[0])
bm25 = tf_idf * (k1 + 1) / (tf + local_c)
jm = 0
dirs = 0
ab = 0
for cand_term in cand_terms:
try:
local_b = self.all_dics[0][cand_term]
except IndexError:
local_b = 0
local_b /= (self.doc_avg_len[0] * self.doc_num)
denom = (1 - lamb) * tf + lamb * local_b
if denom > 0:
jm -= log(denom)
denom = (tf * self.dl[cand_doc][0] + mu *
local_b) / (self.dl[cand_doc][0] + mu)
if denom > 0:
dirs -= log(denom)
if local_b > 0:
ab -= log(max(tf *
self.dl[cand_doc][0] -
delta, 0) /
self.dl[cand_doc][0] +
delta *
0.8 *
local_b)
# exit(0)
features.append([self.dl[cand_doc][0], tf, idf, tf_idf, bm25, jm, dirs, ab])
return features
def gen(self, dst_path):
"""[summary]
Args:
dst_path ([type]): [description]
"""
print("gen feature...")
local_docs = []
query_docs = dict()
query_docs_non_rel = dict()
for i in range(self.doc_num):
doc = self.docs[i]
if len(doc[-1]) > 2:
continue
q_id, score = doc[-1]
doc = doc[:-1]
if q_id != -1:
local_docs.append(doc)
if score > 0:
if not query_docs.get(q_id, None):
query_docs[q_id] = set()
query_docs[q_id].add((i, score))
elif score == 0:
if not query_docs_non_rel.get(q_id, None):
query_docs_non_rel[q_id] = set()
query_docs_non_rel[q_id].add(i)
fg = FeatureGenerator(local_docs, self.queries)
buffer_strs = []
for i in range(self.query_num):
if not query_docs.get(i, None):
continue
for d, score in query_docs[i]:
# print("query_docs:", self.query_docs[i], "len", self.docs[d])
line = str(score) + " qid:" + str(i) + ' ' + \
fg.gen_feature(i, d) + ' # rel\n'
buffer_strs.append(line)
for d in query_docs_non_rel[i]:
line = "0 qid:" + str(i) + ' ' + \
fg.gen_feature(i, d) + ' # non\n'
buffer_strs.append(line)
if self.extended_cases.get(i, None):
for vector_score in self.extended_cases[i]:
vector = vector_score[:-1]
score = vector_score[-1]
line = str(score) + " qid:" + str(i) + ' '
for k in range(len(vector)):
line += (str(k) + ':' + str(vector[k]) + ' ')
line += " # ext\n"
buffer_strs.append(line)
if len(buffer_strs) > 100:
print('push to file')
f = open(dst_path, 'a')
for each in buffer_strs:
f.write(each)
f.close()
buffer_strs.clear()
if bool(buffer_strs):
print('push to file')
f = open(dst_path, 'a')
for each in buffer_strs:
f.write(each)
f.close()
buffer_strs.clear()
def contribute(self, number=1000):
"""[summary]
Args:
number (int, optional): [description]. Defaults to 1000.
Returns:
[type]: [description]
"""
random.seed(0)
indices = random.sample([i for i in range(len(self.docs))], number)
sub_docs = []
sub_sketches = []
print(indices)
for i in indices:
sub_docs.append(self.docs[i])
sub_sketches.append(self.sketches[i])
return sub_docs, sub_sketches
| StarcoderdataPython |
3578281 | <filename>pydocx/openxml/drawing/blip.py
# coding: utf-8
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from pydocx.models import XmlModel, XmlAttribute
class Blip(XmlModel):
XML_TAG = 'blip'
embedded_picture_id = XmlAttribute(name='embed')
linked_picture_id = XmlAttribute(name='link')
| StarcoderdataPython |
320869 | # -*- coding: utf-8 -*-
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
from korean_lunar_calendar import __version__
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='korean_lunar_calendar',
version=__version__,
description='Korean Lunar Calendar',
long_description=long_description,
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
url='https://github.com/usingsky/korean_lunar_calendar_py',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
keywords=['calendar', 'korean', 'lunar'],
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
| StarcoderdataPython |
8069646 | from flask import Flask, request, make_response, Response
import os
import logging
import json
from slackclient import SlackClient
from botflow.engine_slack import SlackSocketEngine
from examples.math_controller import MathController
SLACK_BOT_TOKEN = os.environ.get('SLACK_BOT_TOKEN')
slack_bot = SlackSocketEngine(MathController(), SLACK_BOT_TOKEN)
app = Flask(__name__)
@app.route("/", methods=["POST"])
def message_actions():
# registration
try:
req = request.json
print(req)
if req is not None and 'challenge' in req:
return make_response(request.json['challenge'], 200)
if request.form is not None and 'payload' in request.form:
# handle action
slack_bot.process_message(json.loads(request.form['payload']))
elif 'event' in req:
# standard message
slack_bot.process_message(req)
else:
raise Exception('Unknown message %s' % request)
# if 'actions' in req:
# # req["actions"][0]["value"]
# # req["channel"]["id"]
# pass
# elif 'event' in req:
# # req['event']['channel']
# # req['event']['text']
# pass
return make_response("", 200)
except Exception as e:
logging.exception(e)
return make_response(str(e), 500)
@app.route('/', methods=['GET'])
def home():
return Response('It works!')
# Send a Slack message on load. This needs to be _before_ the Flask server is started
# A Dictionary of message attachment options
attachments_json = [
{
"fallback": "Upgrade your Slack client to use messages like these.",
"color": "#3AA3E3",
"attachment_type": "default",
"callback_id": "menu_options_2319",
"actions": [
{
"name": "bev_list",
"text": "One",
"value": "One",
"type": "button"
},
{
"name": "bev_list",
"text": "Two",
"value": "Two",
"type": "button"
}
]
}
]
# Send a message with the above attachment, asking the user if they want coffee
# slack_client.api_call(
# "chat.postMessage",
# channel="#onixbot-test",
# text="Would you like some coffee? :coffee:",
# attachments=attachments_json
# )
if __name__ == "__main__":
app.run() | StarcoderdataPython |
8091846 | import numpy as np
class Base_Automaton(object):
def __init__(self, count, reward=0.10, punish=0.001):
self.count = count
self.__reward = reward
self.__punish = punish
self.cells = np.ones(self.count) / self.count
def cell(self):
return np.random.choice(self.count, p=self.cells)
def cool(self, cooling_factor):
self.__reward = self.__reward * cooling_factor
self.__punish = self.__punish * cooling_factor
def reward(self, cell):
for p in range(0, self.count):
if p == cell:
self.cells[p] = self.cells[p] + self.__reward * (1 - self.cells[p])
else:
self.cells[p] = self.cells[p] * (1 - self.__reward)
self.cells[0] = 1 - sum(self.cells[1:])
def punish(self, cell):
for p in range(0, self.count):
if p == cell:
self.cells[p] = self.cells[p] * (1 - self.__punish)
else:
self.cells[p] = self.cells[p] * (1 - self.__punish) + self.__punish / (self.count - 1)
self.cells[0] = 1 - sum(self.cells[1:])
def summarise(self):
print(np.array2string(self.cells * 100 * self.count, precision=2, suppress_small=True, max_line_width=180), end=' ')
| StarcoderdataPython |
4983444 | <reponame>nizaevka/mlshell
"""Configuration example.
Create pipeline (sgd) and optimize hp_grid:
* target transformer on/off.
* polynomial degree 1/2.
"""
import lightgbm
import mlshell
import pycnfg
import sklearn
target_transformer = sklearn.preprocessing.PowerTransformer(
method='yeo-johnson', standardize=True, copy=True)
# Optimization hp ranges.
hp_grid = {
# 'process_parallel__pipeline_numeric__transform_normal__skip': [False],
# 'process_parallel__pipeline_numeric__scale_column_wise__quantile_range': [(1, 99)],
'process_parallel__pipeline_numeric__add_polynomial__degree': [1, 2],
'estimate__transformer': [None, target_transformer],
# sgd
# 'estimate__regressor__alpha': np.logspace(-2, -1, 10),
# 'estimate__regressor__l1_ratio': np.linspace(0.1, 1, 10),
}
CNFG = {
# Path section - specify project directory.
'path': {
'default': {
'priority': 1,
'init': pycnfg.find_path,
'producer': pycnfg.Producer,
}
},
# Logger section - create logger.
'logger': {
'default': {
'priority': 2,
'init': 'default',
'producer': mlshell.LoggerProducer,
'steps': [
('make',),
],
}
},
# Pipeline section - specify pipelines creation/loading.
'pipeline': {
'sgd': {
'priority': 3,
'init': mlshell.Pipeline,
'producer': mlshell.PipelineProducer,
'steps': [
('make', {
'estimator_type': 'regressor',
'estimator': sklearn.linear_model.SGDRegressor(
penalty='elasticnet', l1_ratio=1, shuffle=False,
max_iter=1000, alpha=0.02, random_state=42),
}),
],
},
'lgbm': {
'priority': 3,
'init': mlshell.Pipeline,
'producer': mlshell.PipelineProducer,
'steps': [
('make', {
'estimator_type': 'regressor',
'estimator': lightgbm.LGBMRegressor(
num_leaves=2, min_data_in_leaf=60,
n_estimators=200, max_depth=-1, random_state=42),
}),
],
}
},
# Metric section - specify metric creation/loading.
'metric': {
'r2': {
'priority': 4,
'init': mlshell.Metric,
'producer': mlshell.MetricProducer,
'steps': [
('make', {
'score_func': sklearn.metrics.r2_score,
'greater_is_better': True,
}),
],
},
'mse': {
'priority': 4,
'init': mlshell.Metric,
'producer': mlshell.MetricProducer,
'steps': [
('make', {
'score_func': sklearn.metrics.mean_squared_error,
'greater_is_better': False,
'squared': False
}),
],
},
},
# Dataset section - specify dataset loading/preprocessing/splitting.
'dataset': {
'train': {
'priority': 5,
'init': mlshell.Dataset,
'producer': mlshell.DatasetProducer,
'steps': [
('load', {'filepath': './data/train.csv'}),
('info',),
('preprocess', {'targets_names': ['wage'],
'categor_names': ['union', 'goodhlth',
'black', 'female',
'married', 'service']}),
('split', {'train_size': 0.75, 'shuffle': False, }),
],
},
'test': {
'priority': 5,
'init': mlshell.Dataset,
'producer': mlshell.DatasetProducer,
'steps': [
('load', {'filepath': 'data/test.csv'}),
('info',),
('preprocess', {'categor_names': ['union', 'goodhlth',
'black', 'female',
'married', 'service'],
'targets_names': ['wage']}),
],
},
},
# Workflow section - fit/predict pipelines on datasets, optimize/validate
# metrics.
'workflow': {
'conf': {
'priority': 6,
'init': {},
'producer': mlshell.Workflow,
'steps': [
# Train 'sgd' pipeline on 'train' subset of 'train' dataset
# with zero position hp from 'hp_grid'.
('fit', {
'pipeline_id': 'pipeline__sgd',
'dataset_id': 'dataset__train',
'subset_id': 'train',
'hp': hp_grid,
}),
# Validate 'sgd' pipeline on 'train' and 'test' subsets of
# 'train' dataset with 'r2' scorer (after fit).
('validate', {
'pipeline_id': 'pipeline__sgd',
'dataset_id': 'dataset__train',
'subset_id': ['train', 'test'],
'metric_id': ['metric__r2', 'metric__mse'],
}),
# Optimize 'sgd' pipeline on 'train' subset of 'train' dataset
# on hp combinations from 'hp_grid'. Score and refit on 'r2'
# scorer.
('optimize', {
'pipeline_id': 'pipeline__sgd',
'dataset_id': 'dataset__train',
'subset_id': 'train',
'metric_id': ['metric__r2', 'metric__mse'],
'hp_grid': hp_grid,
'gs_params': {
'n_iter': None,
'n_jobs': 1,
'refit': 'metric__r2',
'cv': sklearn.model_selection.KFold(n_splits=3,
shuffle=True,
random_state=42),
'verbose': 1,
'pre_dispatch': 'n_jobs',
'return_train_score': True,
},
}),
# Validate 'sgd' pipeline on 'train' and 'test' subsets of
# 'train' dataset with 'r2' scorer (after optimization).
('validate', {
'pipeline_id': 'pipeline__sgd',
'dataset_id': 'dataset__train',
'subset_id': ['train', 'test'],
'metric_id': ['metric__r2', 'metric__mse'],
}),
# Predict with 'sgd' pipeline on whole 'test' dataset.
('predict', {
'pipeline_id': 'pipeline__sgd',
'dataset_id': 'dataset__test',
'subset_id': '',
}),
# Dump 'sgd' pipeline on disk.
('dump', {'pipeline_id': 'pipeline__sgd',
'dirpath': None}),
],
},
},
}
if __name__ == '__main__':
objects = pycnfg.run(CNFG, dcnfg={})
| StarcoderdataPython |
1671939 | """Test the respond handler."""
import pytest
from cactusbot.api import CactusAPI
from cactusbot.handlers import ResponseHandler
from cactusbot.packets import Packet, MessagePacket
response_handler = ResponseHandler()
@pytest.mark.asyncio
async def test_user_update():
"""Test the user update event."""
await response_handler.on_username_update(Packet(username="TestUser"))
assert response_handler.username == "TestUser"
@pytest.mark.asyncio
async def test_on_message():
"""Test the message event."""
assert (await response_handler.on_message(
MessagePacket("!testing", user="TestUser")
)) == StopIteration
| StarcoderdataPython |
8098956 | <reponame>htlcnn/ironpython-stubs
class dotDeformingData_t(object):
# no doc
Angle=None
Angle2=None
Cambering=None
Shortening=None
| StarcoderdataPython |
9694631 | import hashlib
import json
import os.path
import platform
import shlex
import shutil
import stat
import subprocess
import sys
import urllib.request
from dataclasses import dataclass
from logging import getLogger
from typing import Dict, List, Optional, Set, Tuple
from urllib.request import urlopen
import filelock
from filelock import BaseFileLock, FileLock
from pipkin.adapters import Adapter
from pipkin.common import UserError
from pipkin.proxy import start_proxy
from pipkin.util import (
get_base_executable,
get_user_cache_dir,
get_venv_executable,
get_venv_site_packages_path,
parse_meta_dir_name,
)
logger = getLogger(__name__)
INITIAL_VENV_DISTS = ["pip", "setuptools", "pkg_resources", "wheel"]
INITIAL_VENV_FILES = ["easy_install.py"]
META_ENCODING = "utf-8"
@dataclass(frozen=True)
class DistInfo:
key: str
project_name: str
version: str
location: str
class Session:
"""
Allows performing several commands in row without releasing the venv.
"""
def __init__(self, adapter: Adapter, tty: bool = True):
self._adapter = adapter
self._venv_lock: Optional[BaseFileLock] = None
self._venv_dir: Optional[str] = None
self._quiet = False
self._tty = tty
def install(
self,
specs: Optional[List[str]] = None,
requirement_files: Optional[List[str]] = None,
constraint_files: Optional[List[str]] = None,
pre: bool = False,
no_deps: bool = False,
no_mp_org: bool = False,
index_url: Optional[str] = None,
extra_index_urls: Optional[List[str]] = None,
no_index: bool = False,
find_links: Optional[str] = None,
target: Optional[str] = None,
user: bool = False,
upgrade: bool = False,
upgrade_strategy: str = "only-if-needed",
force_reinstall: bool = False,
compile: Optional[bool] = None,
mpy_cross: Optional[str] = None,
**_,
):
logger.debug("Starting install")
if compile is None and mpy_cross:
compile = True
args = ["install", "--no-compile", "--use-pep517"]
if upgrade:
args.append("--upgrade")
if upgrade_strategy:
args += ["--upgrade-strategy", upgrade_strategy]
if force_reinstall:
args.append("--force-reinstall")
args += self._format_selection_args(
specs=specs,
requirement_files=requirement_files,
constraint_files=constraint_files,
pre=pre,
no_deps=no_deps,
)
self._populate_venv()
state_before = self._get_venv_state()
self._invoke_pip_with_index_args(
args,
no_mp_org=no_mp_org,
index_url=index_url,
extra_index_urls=extra_index_urls or [],
no_index=no_index,
find_links=find_links,
)
state_after = self._get_venv_state()
removed_meta_dirs = {name for name in state_before if name not in state_after}
assert not removed_meta_dirs
new_meta_dirs = {name for name in state_after if name not in state_before}
changed_meta_dirs = {
name
for name in state_after
if name in state_before and state_after[name] != state_before[name]
}
if new_meta_dirs or changed_meta_dirs:
self._report_progress("Starting to apply changes to the target.")
if target:
effective_target = target
elif user:
effective_target = self._adapter.get_user_packages_path()
else:
effective_target = self._adapter.get_default_target()
for meta_dir in changed_meta_dirs:
self._report_progress(f"Removing old version of {parse_meta_dir_name(meta_dir)[0]}")
# if target is specified by --target or --user, then don't touch anything
# besides corresponding directory, regardless of the sys.path and possible hiding
dist_name, _version = parse_meta_dir_name(meta_dir)
if target:
# pip doesn't remove old dist with --target unless --upgrade is given
if upgrade:
self._adapter.remove_dist(dist_name=dist_name, target=target)
elif user:
self._adapter.remove_dist(
dist_name=dist_name, target=self._adapter.get_user_packages_path()
)
else:
# remove the all installations of this dist, which would hide the new installation
self._adapter.remove_dist(
dist_name=dist_name, target=effective_target, above_target=True
)
for meta_dir in new_meta_dirs | changed_meta_dirs:
self._upload_dist_by_meta_dir(meta_dir, effective_target, compile, mpy_cross)
if new_meta_dirs or changed_meta_dirs:
self._report_progress("All changes applied.")
def uninstall(
self,
packages: Optional[List[str]] = None,
requirement_files: Optional[List[str]] = None,
yes: bool = False,
**_,
):
args = ["uninstall", "--yes"]
for rf in requirement_files or []:
args += ["-r", rf]
for package in packages or []:
args.append(package)
self._populate_venv()
state_before = self._get_venv_state()
self._invoke_pip(args)
state_after = self._get_venv_state()
removed_meta_dirs = {name for name in state_before if name not in state_after}
if removed_meta_dirs:
# NB! If you want to move confirmation back to pip process, then test the process
# in Windows via Thonny
if not yes:
names = [parse_meta_dir_name(d)[0] for d in removed_meta_dirs]
if input(f"Proceed removing {', '.join(names)} (Y/n) at target? ").lower() == "n":
return
self._report_progress("Starting to apply changes to the target.")
for meta_dir_name in removed_meta_dirs:
self._report_progress(f"Removing {parse_meta_dir_name(meta_dir_name)[0]}")
dist_name, _version = parse_meta_dir_name(meta_dir_name)
self._adapter.remove_dist(dist_name)
if removed_meta_dirs:
self._report_progress("All changes applied.")
def list(
self,
outdated: bool = False,
uptodate: bool = False,
not_required: bool = False,
pre: bool = False,
paths: Optional[List[str]] = None,
user: bool = False,
format: str = "columns",
no_mp_org: Optional[bool] = False,
index_url: Optional[str] = None,
extra_index_urls: Optional[List[str]] = None,
no_index: bool = False,
find_links: Optional[str] = None,
excludes: Optional[List[str]] = None,
**_,
):
args = ["list"]
if outdated:
args.append("--outdated")
if uptodate:
args.append("--uptodate")
if not_required:
args.append("--not-required")
if pre:
args.append("--pre")
if format:
args += ["--format", format]
args += self._format_exclusion_args(excludes)
self._populate_venv(paths=paths, user=user)
self._invoke_pip_with_index_args(
args,
no_mp_org=no_mp_org,
index_url=index_url,
extra_index_urls=extra_index_urls,
no_index=no_index,
find_links=find_links,
)
def basic_list(self) -> Set[DistInfo]:
"""
Allows listing without requiring the venv.
"""
dists_by_name = self._adapter.list_dists()
result = set()
for name in dists_by_name:
meta_dir_name, location = dists_by_name[name]
name, version = parse_meta_dir_name(meta_dir_name)
result.add(DistInfo(key=name, project_name=name, version=version, location=location))
return result
def show(self, packages: List[str], **_):
self._populate_venv()
self._invoke_pip(["show"] + packages)
def freeze(
self,
paths: Optional[List[str]] = None,
user: bool = False,
excludes: Optional[List[str]] = None,
**_,
):
args = ["freeze"]
args += self._format_exclusion_args(excludes)
self._populate_venv(paths=paths, user=user)
self._invoke_pip(args)
def check(self, **_):
self._populate_venv()
self._invoke_pip(["check"])
def download(
self,
specs: Optional[List[str]] = None,
requirement_files: Optional[List[str]] = None,
constraint_files: Optional[List[str]] = None,
pre: bool = False,
no_deps: bool = False,
no_mp_org: bool = False,
index_url: Optional[str] = None,
extra_index_urls: Optional[List[str]] = None,
no_index: bool = False,
find_links: Optional[str] = None,
dest: Optional[str] = None,
**_,
):
args = ["download"]
if dest:
args += ["--dest", dest]
args += self._format_selection_args(
specs=specs,
requirement_files=requirement_files,
constraint_files=constraint_files,
pre=pre,
no_deps=no_deps,
)
self._populate_venv()
self._invoke_pip_with_index_args(
args,
no_mp_org=no_mp_org,
index_url=index_url,
extra_index_urls=extra_index_urls,
no_index=no_index,
find_links=find_links,
)
def wheel(
self,
specs: Optional[List[str]] = None,
requirement_files: Optional[List[str]] = None,
constraint_files: Optional[List[str]] = None,
pre: bool = False,
no_deps: bool = False,
no_mp_org: bool = False,
index_url: Optional[str] = None,
extra_index_urls: Optional[List[str]] = None,
no_index: bool = False,
find_links: Optional[str] = None,
wheel_dir: Optional[str] = None,
**_,
):
args = ["wheel"]
if wheel_dir:
args += ["--wheel-dir", wheel_dir]
args += self._format_selection_args(
specs=specs,
requirement_files=requirement_files,
constraint_files=constraint_files,
pre=pre,
no_deps=no_deps,
)
self._populate_venv()
self._invoke_pip_with_index_args(
args,
no_mp_org=no_mp_org,
index_url=index_url,
extra_index_urls=extra_index_urls,
no_index=no_index,
find_links=find_links,
)
def cache(self, cache_command: str, **_) -> None:
if cache_command == "purge":
if os.path.exists(self._get_pipkin_cache_dir()):
shutil.rmtree(self._get_pipkin_cache_dir())
elif cache_command == "dir":
print(self._get_pipkin_cache_dir())
else:
self._invoke_pip(["cache", cache_command])
def close(self) -> None:
if self._venv_lock is not None:
# self._clear_venv()
self._venv_lock.release()
def _format_exclusion_args(self, excludes: Optional[List[str]]) -> List[str]:
args = []
for exclude in (excludes or []) + ["pip", "pkg_resources", "setuptools", "wheel"]:
args += ["--exclude", exclude]
return args
def _format_selection_args(
self,
specs: Optional[List[str]],
requirement_files: Optional[List[str]],
constraint_files: Optional[List[str]],
pre: bool,
no_deps: bool,
):
args = []
for path in requirement_files or []:
args += ["-r", path]
for path in constraint_files or []:
args += ["-c", path]
if no_deps:
args.append("--no-deps")
if pre:
args.append("--pre")
args += specs or []
return args
def _upload_dist_by_meta_dir(
self, meta_dir_name: str, target: str, compile: bool, mpy_cross: Optional[str]
) -> None:
self._report_progress(f"Copying {parse_meta_dir_name(meta_dir_name)[0]}", end="")
rel_record_path = os.path.join(meta_dir_name, "RECORD")
record_path = os.path.join(self._get_venv_site_packages_path(), rel_record_path)
assert os.path.exists(record_path)
target_record_lines = []
with open(record_path, encoding=META_ENCODING) as fp:
record_lines = fp.read().splitlines()
for line in record_lines:
rel_path = line.split(",")[0]
# don't consider files installed to e.g. bin-directory
if rel_path.startswith(".."):
continue
# don't consider absolute paths
if os.path.isabs(rel_path):
logger.warning("Skipping absolute path %s", rel_path)
continue
# only consider METADATA from meta dir
if rel_path.startswith(meta_dir_name) and os.path.basename(rel_path) != "METADATA":
continue
full_path = os.path.normpath(
os.path.join(self._get_venv_site_packages_path(), rel_path)
)
full_device_path = self._adapter.join_path(target, self._adapter.normpath(rel_path))
if full_path.endswith(".py") and compile:
self._compile_with_mpy_cross(
full_path, self._get_compiled_path(full_path), mpy_cross
)
# forget about the .py file
full_path = self._get_compiled_path(full_path)
full_device_path = self._get_compiled_path(full_device_path)
rel_path = self._get_compiled_path(rel_path)
with open(full_path, "rb") as source_fp:
content = source_fp.read()
if rel_path.startswith(meta_dir_name) and os.path.basename(rel_path) == "METADATA":
content = self._trim_metadata(content)
self._adapter.write_file(full_device_path, content)
self._report_progress(".", end="")
target_record_lines.append(self._adapter.normpath(rel_path) + ",,")
# add RECORD (without hashes)
target_record_lines.append(self._adapter.normpath(rel_record_path) + ",,")
full_device_record_path = self._adapter.join_path(
target, self._adapter.normpath(rel_record_path)
)
self._adapter.write_file(
full_device_record_path, "\n".join(target_record_lines).encode(META_ENCODING)
)
# add linebreak for the report
self._report_progress("")
def _trim_metadata(self, content: bytes) -> bytes:
# TODO:
return content
def _get_compiled_path(self, source_path: str) -> str:
assert source_path.endswith(".py"), f"Source path: {source_path}"
return source_path[: -len(".py")] + ".mpy"
def _ensure_venv(self) -> None:
if self._venv_lock is not None:
return
self._venv_lock, self._venv_dir = self._prepare_venv()
def _prepare_venv(self) -> Tuple[BaseFileLock, str]:
path = self._compute_venv_path()
if not os.path.exists(path):
self._report_progress("Preparing working environment ...")
logger.info("Start preparing working environment at %s ...", path)
subprocess.check_call(
[
sys.executable,
"-I",
"-m",
"venv",
path,
],
stdin=subprocess.DEVNULL,
)
logger.info("Done creating venv")
assert os.path.exists(path)
subprocess.check_call(
[
get_venv_executable(path),
"-I",
"-m",
"pip",
"--disable-pip-version-check",
"install",
"--no-warn-script-location",
"--upgrade",
"pip==22.0.*",
"setuptools==60.9.*",
"wheel==0.37.*",
],
stdin=subprocess.DEVNULL,
)
logger.info("Done preparing working environment.")
else:
logger.debug("Using existing working environment at %s", path)
lock = FileLock(os.path.join(path, "pipkin.lock"))
try:
lock.acquire(timeout=0.05)
except filelock.Timeout:
raise UserError(
"Could not get exclusive access to the working environment. "
"Is there another pipkin instance running?"
)
logger.debug("Received lock on the working environment")
return lock, path
def _get_venv_site_packages_path(self) -> str:
return get_venv_site_packages_path(self._venv_dir)
def _clear_venv(self) -> None:
sp_path = self._get_venv_site_packages_path()
logger.debug("Clearing %s", sp_path)
for name in os.listdir(sp_path):
full_path = os.path.join(sp_path, name)
if self._is_initial_venv_item(name):
continue
elif os.path.isfile(full_path):
os.remove(full_path)
else:
assert os.path.isdir(full_path)
shutil.rmtree(full_path)
def _populate_venv(self, paths: Optional[List[str]] = None, user: bool = False) -> None:
"""paths and user should be used only with list and freeze commands"""
logger.debug("Start populating venv")
self._ensure_venv()
# TODO: try to re-use the state from the previous command executed in the same session.
assert not (paths and user)
if user:
effective_paths = [self._adapter.get_user_packages_path()]
else:
effective_paths = paths
self._clear_venv()
dist_infos = self._adapter.list_dists(effective_paths)
for name in dist_infos:
meta_dir_name, original_path = dist_infos[name]
self._prepare_dummy_dist(meta_dir_name, original_path)
logger.debug("Done populating venv")
def _prepare_dummy_dist(self, meta_dir_name: str, original_path: str) -> None:
sp_path = self._get_venv_site_packages_path()
meta_path = os.path.join(sp_path, meta_dir_name)
os.mkdir(meta_path, 0o755)
for name in ["METADATA"]:
content = self._read_dist_meta_file(meta_dir_name, name, original_path)
with open(os.path.join(meta_path, name), "bw") as meta_fp:
meta_fp.write(content)
# INSTALLER is mandatory according to https://www.python.org/dev/peps/pep-0376/
with open(os.path.join(meta_path, "INSTALLER"), "w", encoding="utf-8") as installer_fp:
installer_fp.write("pip\n")
# create dummy RECORD
with open(os.path.join(meta_path, "RECORD"), "w", encoding=META_ENCODING) as record_fp:
for name in ["METADATA", "INSTALLER", "RECORD"]:
record_fp.write(f"{meta_dir_name}/{name},,\n")
def _read_dist_meta_file(
self, meta_dir_name: str, file_name: str, original_container_path: str
) -> bytes:
# TODO: add cache
path = self._adapter.join_path(original_container_path, meta_dir_name, file_name)
return self._adapter.read_file(path)
def _compute_venv_path(self) -> str:
try:
# try to share the pip-execution-venv among all pipkin-running-venvs created from
# same base executable
exe = get_base_executable()
except Exception:
exe = sys.executable
venv_name = hashlib.md5(str((exe, sys.version_info[0:2])).encode("utf-8")).hexdigest()
return os.path.join(self._get_workspaces_dir(), venv_name)
def _get_workspaces_dir(self) -> str:
return os.path.join(self._get_pipkin_cache_dir(), "workspaces")
def _get_pipkin_cache_dir(self) -> str:
result = os.path.join(get_user_cache_dir(), "pipkin")
if sys.platform == "win32":
# Windows doesn't have separate user cache dir
result = os.path.join(result, "cache")
return result
def _is_initial_venv_item(self, name: str) -> bool:
return (
name in INITIAL_VENV_FILES
or name in INITIAL_VENV_DISTS
or name.endswith(".dist-info")
and name.split("-")[0] in INITIAL_VENV_DISTS
)
def _get_venv_state(self, root: str = None) -> Dict[str, float]:
"""Returns mapping from meta_dir names to modification timestamps of METADATA files"""
if root is None:
root = self._get_venv_site_packages_path()
result = {}
for item_name in os.listdir(root):
if self._is_initial_venv_item(item_name):
continue
if item_name.endswith(".dist-info"):
metadata_full_path = os.path.join(root, item_name, "METADATA")
assert os.path.exists(metadata_full_path)
result[item_name] = os.stat(metadata_full_path).st_mtime
return result
def _invoke_pip_with_index_args(
self,
pip_args: List[str],
no_mp_org: bool,
index_url: str,
extra_index_urls: List[str],
no_index: bool,
find_links: Optional[str],
):
if no_index:
assert find_links
self._invoke_pip(pip_args + ["--no-index", "--find-links", find_links])
else:
proxy = start_proxy(no_mp_org, index_url, extra_index_urls)
logger.info("Using PipkinProxy at %s", proxy.get_index_url())
index_args = ["--index-url", proxy.get_index_url()]
if find_links:
index_args += ["--find-links", find_links]
try:
self._invoke_pip(pip_args + index_args)
finally:
proxy.shutdown()
def _invoke_pip(self, args: List[str]) -> None:
pip_cmd = [get_venv_executable(self._venv_dir), "-I", "-m", "pip"]
if not self._tty:
pip_cmd += ["--no-color"]
pip_cmd += [
"--disable-pip-version-check",
"--trusted-host",
"127.0.0.1",
] + args
logger.debug("Calling pip: %s", " ".join(shlex.quote(arg) for arg in pip_cmd))
env = {key: os.environ[key] for key in os.environ if not key.startswith("PIP_")}
env["PIP_CACHE_DIR"] = self._get_pipkin_cache_dir()
subprocess.check_call(pip_cmd, env=env, stdin=subprocess.DEVNULL)
def _compile_with_mpy_cross(
self, source_path: str, target_path: str, mpy_cross_path: Optional[str]
) -> None:
if mpy_cross_path is None:
mpy_cross_path = self._ensure_mpy_cross()
# user-provided executable is assumed to have been validated with proper error messages in main()
assert os.path.exists
assert os.access(mpy_cross_path, os.X_OK)
args = (
[mpy_cross_path] + self._adapter.get_mpy_cross_args() + ["-o", target_path, source_path]
)
subprocess.check_call(args)
def _ensure_mpy_cross(self) -> str:
impl_name, ver_prefix = self._adapter.get_implementation_name_and_version_prefix()
path = self._get_mpy_cross_path(impl_name, ver_prefix)
if not os.path.exists(path):
self._download_mpy_cross(impl_name, ver_prefix, path)
return path
def _download_mpy_cross(
self, implementation_name: str, version_prefix: str, target_path: str
) -> None:
os.makedirs(os.path.dirname(target_path), exist_ok=True)
meta_url = f"https://raw.githubusercontent.com/aivarannamaa/pipkin/master/data/{implementation_name}-mpy-cross.json"
with urlopen(url=meta_url) as fp:
meta = json.load(fp)
if version_prefix not in meta:
raise UserError(f"Can't find mpy-cross for {implementation_name} {version_prefix}")
version_data = meta[version_prefix]
if sys.platform == "win32":
os_marker = "windows"
elif sys.platform == "darwin":
os_marker = "macos"
elif sys.platform == "linux":
os_marker = "linux"
else:
raise AssertionError(f"Unexpected sys.platform {sys.platform}")
full_marker = f"{os_marker}-{platform.machine()}"
if full_marker not in version_data:
raise UserError(
f"Can't find {full_marker} mpy-cross for {implementation_name} {version_prefix}"
)
download_url = version_data[full_marker]
urllib.request.urlretrieve(download_url, target_path)
os.chmod(target_path, os.stat(target_path).st_mode | stat.S_IEXEC)
def _get_mpy_cross_path(self, implementation_name: str, version_prefix: str) -> str:
basename = f"mpy-cross_{implementation_name}_{version_prefix}"
if sys.platform == "win32":
basename += ".exe"
return os.path.join(self._get_pipkin_cache_dir(), "mpy-cross", basename)
def _report_progress(self, msg: str, end="\n") -> None:
if not self._quiet:
print(msg, end=end)
sys.stdout.flush()
| StarcoderdataPython |
4824486 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### Algorithm used for regression tests purposes
### </summary>
### <meta name="tag" content="regression test" />
class RegressionAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2013,10,7) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(10000000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddEquity("SPY", Resolution.Tick)
self.AddEquity("BAC", Resolution.Minute)
self.AddEquity("AIG", Resolution.Hour)
self.AddEquity("IBM", Resolution.Daily)
self.__lastTradeTicks = self.StartDate
self.__lastTradeTradeBars = self.__lastTradeTicks
self.__tradeEvery = timedelta(minutes=1)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.'''
if self.Time - self.__lastTradeTradeBars < self.__tradeEvery:
return
self.__lastTradeTradeBars = self.Time
for kvp in data.Bars:
period = kvp.Value.Period.total_seconds()
if self.roundTime(self.Time, period) != self.Time:
pass
symbol = kvp.Key
holdings = self.Portfolio[symbol]
if not holdings.Invested:
self.MarketOrder(symbol, 10)
else:
self.MarketOrder(symbol, -holdings.Quantity)
def roundTime(self, dt=None, roundTo=60):
"""Round a datetime object to any time laps in seconds
dt : datetime object, default now.
roundTo : Closest number of seconds to round to, default 1 minute.
"""
if dt is None : dt = datetime.now()
seconds = (dt - dt.min).seconds
# // is a floor division, not a comment on following line:
rounding = (seconds+roundTo/2) // roundTo * roundTo
return dt + timedelta(0,rounding-seconds,-dt.microsecond)
| StarcoderdataPython |
3355058 | import src.htc_calculator
App = FreeCAD
import ObjectsFem
from femmesh.gmshtools import GmshTools
def test_mesh_creation():
# more sophisticated example which changes the mesh size
doc = App.newDocument("MeshTest")
box_obj = doc.addObject("Part::Box", "Box")
doc.recompute()
max_mesh_sizes = [0.5, 1, 2, 3, 5, 10]
for len in max_mesh_sizes:
quantity_len = "{}".format(len)
print("\n\n Start length = {}".format(quantity_len))
femmesh_obj = ObjectsFem.makeMeshGmsh(doc, box_obj.Name + "_Mesh")
femmesh_obj.Part = box_obj
femmesh_obj.CharacteristicLengthMax = "{}".format(quantity_len)
femmesh_obj.CharacteristicLengthMin = "{}".format(quantity_len)
doc.recompute()
gm = GmshTools(femmesh_obj)
gm.update_mesh_data()
# set the tmp file path to some user path including the length
gm.get_tmp_file_paths("/tmp/fcgm_" + str(len), True)
gm.get_gmsh_command()
gm.write_gmsh_input_files()
error = gm.run_gmsh_with_geo()
print(error)
gm.read_and_set_new_mesh()
doc.recompute()
print("Done length = {}".format(quantity_len))
print('done')
if __name__ == '__main__':
test_mesh_creation()
| StarcoderdataPython |
1620049 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from edb import errors
from edb.common import markup
from edb.testbase import lang as tb
from edb import edgeql
from edb.edgeql import qltypes
from edb.schema import delta as s_delta
from edb.schema import ddl as s_ddl
from edb.schema import links as s_links
from edb.schema import objtypes as s_objtypes
from edb.tools import test
class TestSchema(tb.BaseSchemaLoadTest):
def test_schema_inherited_01(self):
"""
type UniqueName {
property name -> str {
constraint exclusive
}
};
type UniqueName_2 extending UniqueName {
inherited property name -> str {
constraint exclusive
}
};
"""
@tb.must_fail(errors.SchemaDefinitionError,
"'name'.*must be declared using the `inherited` keyword",
position=214)
def test_schema_inherited_02(self):
"""
type UniqueName {
property name -> str {
constraint exclusive
}
};
type UniqueName_2 extending UniqueName {
property name -> str {
constraint exclusive
}
};
"""
@tb.must_fail(errors.SchemaDefinitionError,
"'name'.*cannot be declared `inherited`",
position=47)
def test_schema_inherited_03(self):
"""
type UniqueName {
inherited property name -> str
};
"""
@tb.must_fail(errors.InvalidLinkTargetError,
'invalid link target, expected object type, got ScalarType',
position=55)
def test_schema_bad_link_01(self):
"""
type Object {
link foo -> str
};
"""
@tb.must_fail(errors.InvalidLinkTargetError,
'invalid link target, expected object type, got ScalarType',
position=55)
def test_schema_bad_link_02(self):
"""
type Object {
link foo := 1 + 1
};
"""
@tb.must_fail(errors.SchemaDefinitionError,
'link or property name length exceeds the maximum.*',
position=43)
def test_schema_bad_link_03(self):
"""
type Object {
link f123456789_123456789_123456789_123456789_123456789\
_123456789_123456789_123456789 -> Object
};
"""
@tb.must_fail(errors.InvalidPropertyTargetError,
"invalid property type: expected a scalar type, "
"or a scalar collection, got 'test::Object'",
position=59)
def test_schema_bad_prop_01(self):
"""
type Object {
property foo -> Object
};
"""
@tb.must_fail(errors.InvalidPropertyTargetError,
"invalid property type: expected a scalar type, "
"or a scalar collection, got 'test::Object'",
position=59)
def test_schema_bad_prop_02(self):
"""
type Object {
property foo := (SELECT Object)
};
"""
@tb.must_fail(errors.SchemaDefinitionError,
'link or property name length exceeds the maximum.*',
position=43)
def test_schema_bad_prop_03(self):
"""
type Object {
property f123456789_123456789_123456789_123456789_123456789\
_123456789_123456789_123456789 -> str
};
"""
@tb.must_fail(errors.InvalidReferenceError,
"type 'int' does not exist",
position=59,
hint='did you mean one of these: int16, int32, int64?')
def test_schema_bad_type_01(self):
"""
type Object {
property foo -> int
};
"""
def test_schema_computable_cardinality_inference_01(self):
schema = self.load_schema("""
type Object {
property foo -> str;
property bar -> str;
property foo_plus_bar := __source__.foo ++ __source__.bar;
};
""")
obj = schema.get('test::Object')
self.assertEqual(
obj.getptr(schema, 'foo_plus_bar').get_cardinality(schema),
qltypes.Cardinality.ONE)
def test_schema_computable_cardinality_inference_02(self):
schema = self.load_schema("""
type Object {
multi property foo -> str;
property bar -> str;
property foo_plus_bar := __source__.foo ++ __source__.bar;
};
""")
obj = schema.get('test::Object')
self.assertEqual(
obj.getptr(schema, 'foo_plus_bar').get_cardinality(schema),
qltypes.Cardinality.MANY)
def test_schema_refs_01(self):
schema = self.load_schema("""
type Object1;
type Object2 {
link foo -> Object1
};
type Object3 extending Object1;
type Object4 extending Object1;
type Object5 {
link bar -> Object2
};
type Object6 extending Object4;
""")
Obj1 = schema.get('test::Object1')
Obj2 = schema.get('test::Object2')
Obj3 = schema.get('test::Object3')
Obj4 = schema.get('test::Object4')
Obj5 = schema.get('test::Object5')
Obj6 = schema.get('test::Object6')
obj1_id = Obj1.getptr(schema, 'id')
obj1_type = Obj1.getptr(schema, '__type__')
obj1_type_source = obj1_type.getptr(schema, 'source')
obj2_type = Obj2.getptr(schema, '__type__')
foo = Obj2.getptr(schema, 'foo')
foo_target = foo.getptr(schema, 'target')
bar = Obj5.getptr(schema, 'bar')
self.assertEqual(
schema.get_referrers(Obj1),
frozenset({
foo, # Object 1 is a Object2.foo target
foo_target, # and also a target of its @target property
Obj3, # It is also in Object3's bases and ancestors
Obj4, # Likewise for Object4
Obj6, # Object6 through its ancestors
obj1_id, # Inherited id property
obj1_type, # Inherited __type__ link
obj1_type_source, # and its @source property
})
)
self.assertEqual(
schema.get_referrers(Obj1, scls_type=s_objtypes.ObjectType),
{
Obj3, # It is also in Object3's bases and ancestors
Obj4, # Likewise for Object4
Obj6, # Object6 through its ancestors
}
)
self.assertEqual(
schema.get_referrers(Obj2, scls_type=s_links.Link),
{
foo, # Obj2 is foo's source
bar, # Obj2 is bar's target
obj2_type, # Iherited Obj2.__type__ link
}
)
self.assertEqual(
schema.get_referrers(Obj2, scls_type=s_links.Link,
field_name='target'),
{
bar, # Obj2 is bar's target
}
)
schema = self.run_ddl(schema, '''
ALTER TYPE test::Object4 DROP EXTENDING test::Object1;
''')
self.assertEqual(
schema.get_referrers(Obj1),
frozenset({
foo, # Object 1 is a Object2.foo target
foo_target, # and also a target of its @target property
Obj3, # It is also in Object3's bases and ancestors
obj1_id, # Inherited id property
obj1_type, # Inherited __type__ link
obj1_type_source, # and its @source property
})
)
schema = self.run_ddl(schema, '''
ALTER TYPE test::Object3 DROP EXTENDING test::Object1;
''')
self.assertEqual(
schema.get_referrers(Obj1),
frozenset({
foo, # Object 1 is a Object2.foo target
foo_target, # and also a target of its @target property
obj1_id, # Inherited id property
obj1_type, # Inherited __type__ link
obj1_type_source, # and its @source property
})
)
schema = self.run_ddl(schema, '''
CREATE FUNCTION
test::my_contains(arr: array<anytype>, val: anytype) -> bool {
FROM edgeql $$
SELECT contains(arr, val);
$$;
};
CREATE ABSTRACT CONSTRAINT
test::my_one_of(one_of: array<anytype>) {
SET expr := (
WITH foo := test::Object1
SELECT test::my_contains(one_of, __subject__)
);
};
CREATE SCALAR TYPE test::my_scalar_t extending str {
CREATE CONSTRAINT test::my_one_of(['foo', 'bar']);
};
''')
my_scalar_t = schema.get('test::my_scalar_t')
abstr_constr = schema.get('test::my_one_of')
constr = my_scalar_t.get_constraints(schema).objects(schema)[0]
my_contains = schema.get_functions('test::my_contains')[0]
self.assertEqual(
schema.get_referrers(my_contains),
frozenset({
constr,
})
)
self.assertEqual(
schema.get_referrers(Obj1),
frozenset({
foo, # Object 1 is a Object2.foo target
foo_target, # and also a target of its @target property
abstr_constr, # abstract constraint my_one_of
constr, # concrete constraint in my_scalar_t
obj1_id, # Inherited id property
obj1_type, # Inherited __type__ link
obj1_type_source, # and its @source property
})
)
def test_schema_refs_02(self):
schema = self.load_schema("""
type Object1 {
property num -> int64;
};
type Object2 {
required property num -> int64 {
default := (
SELECT Object1.num + 1
ORDER BY Object1.num DESC
LIMIT 1
)
}
};
""")
Obj1 = schema.get('test::Object1')
obj1_num = Obj1.getptr(schema, 'num')
Obj2 = schema.get('test::Object2')
obj2_num = Obj2.getptr(schema, 'num')
self.assertEqual(
schema.get_referrers(obj1_num),
frozenset({
Obj1,
obj2_num,
})
)
def test_schema_refs_03(self):
schema = self.load_schema("""
type Object1 {
property num -> int64;
};
type Object2 {
required property num -> int64 {
default := (
SELECT Object1.num LIMIT 1
)
}
};
""")
Obj1 = schema.get('test::Object1')
obj1_num = Obj1.getptr(schema, 'num')
Obj2 = schema.get('test::Object2')
obj2_num = Obj2.getptr(schema, 'num')
self.assertEqual(
schema.get_referrers(obj1_num),
frozenset({
Obj1,
obj2_num,
})
)
def test_schema_annotation_inheritance(self):
schema = self.load_schema("""
abstract annotation noninh;
abstract inheritable annotation inh;
type Object1 {
annotation noninh := 'bar';
annotation inh := 'inherit me';
};
type Object2 extending Object1;
""")
Object1 = schema.get('test::Object1')
Object2 = schema.get('test::Object2')
self.assertEqual(Object1.get_annotation(schema, 'test::noninh'), 'bar')
# Attributes are non-inheritable by default
self.assertIsNone(Object2.get_annotation(schema, 'test::noninh'))
self.assertEqual(
Object1.get_annotation(schema, 'test::inh'), 'inherit me')
self.assertEqual(
Object2.get_annotation(schema, 'test::inh'), 'inherit me')
def test_schema_object_verbosename(self):
schema = self.load_schema("""
abstract inheritable annotation attr;
abstract link lnk_1;
abstract property prop_1;
type Object1 {
annotation attr := 'inherit me';
property foo -> std::str {
annotation attr := 'propprop';
constraint max_len_value(10)
}
link bar -> Object {
constraint exclusive;
annotation attr := 'bbb';
property bar_prop -> std::str {
annotation attr := 'aaa';
constraint max_len_value(10);
}
}
};
""")
schema = self.run_ddl(schema, '''
CREATE FUNCTION test::foo (a: int64) -> int64
FROM EdgeQL $$ SELECT a; $$;
''')
self.assertEqual(
schema.get('test::attr').get_verbosename(schema),
"abstract annotation 'test::attr'",
)
self.assertEqual(
schema.get('test::lnk_1').get_verbosename(schema),
"abstract link 'test::lnk_1'",
)
self.assertEqual(
schema.get('test::prop_1').get_verbosename(schema),
"abstract property 'test::prop_1'",
)
self.assertEqual(
schema.get('std::max_len_value').get_verbosename(schema),
"abstract constraint 'std::max_len_value'",
)
fn = list(schema.get_functions('std::json_typeof'))[0]
self.assertEqual(
fn.get_verbosename(schema),
'function std::json_typeof(json: std::json)',
)
fn_param = fn.get_params(schema).get_by_name(schema, 'json')
self.assertEqual(
fn_param.get_verbosename(schema, with_parent=True),
"parameter 'json' of function std::json_typeof(json: std::json)",
)
op = list(schema.get_operators('std::AND'))[0]
self.assertEqual(
op.get_verbosename(schema),
'operator "std::bool AND std::bool"',
)
obj = schema.get('test::Object1')
self.assertEqual(
obj.get_verbosename(schema),
"object type 'test::Object1'",
)
self.assertEqual(
obj.get_annotations(schema).get(
schema, 'test::attr').get_verbosename(
schema, with_parent=True),
"annotation 'test::attr' of object type 'test::Object1'",
)
foo_prop = obj.get_pointers(schema).get(schema, 'foo')
self.assertEqual(
foo_prop.get_verbosename(schema, with_parent=True),
"property 'foo' of object type 'test::Object1'",
)
self.assertEqual(
foo_prop.get_annotations(schema).get(
schema, 'test::attr').get_verbosename(
schema, with_parent=True),
"annotation 'test::attr' of property 'foo' of "
"object type 'test::Object1'",
)
self.assertEqual(
next(iter(foo_prop.get_constraints(
schema).objects(schema))).get_verbosename(
schema, with_parent=True),
"constraint 'std::max_len_value' of property 'foo' of "
"object type 'test::Object1'",
)
bar_link = obj.get_pointers(schema).get(schema, 'bar')
self.assertEqual(
bar_link.get_verbosename(schema, with_parent=True),
"link 'bar' of object type 'test::Object1'",
)
bar_link_prop = bar_link.get_pointers(schema).get(schema, 'bar_prop')
self.assertEqual(
bar_link_prop.get_annotations(schema).get(
schema, 'test::attr').get_verbosename(
schema, with_parent=True),
"annotation 'test::attr' of property 'bar_prop' of "
"link 'bar' of object type 'test::Object1'",
)
self.assertEqual(
next(iter(bar_link_prop.get_constraints(
schema).objects(schema))).get_verbosename(
schema, with_parent=True),
"constraint 'std::max_len_value' of property 'bar_prop' of "
"link 'bar' of object type 'test::Object1'",
)
class TestGetMigration(tb.BaseSchemaLoadTest):
"""Test migration deparse consistency.
This tests that schemas produced by `COMMIT MIGRATION foo` and
by deparsed DDL via `GET MIGRATION foo` are identical.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.std_schema = tb._load_std_schema()
cls.schema = cls.run_ddl(cls.schema, 'CREATE MODULE default;')
def _assert_migration_consistency(self, schema_text):
migration_text = f'''
CREATE MIGRATION m TO {{
{schema_text}
}};
'''
migration_ql = edgeql.parse_block(migration_text)
migration_cmd = s_ddl.cmd_from_ddl(
migration_ql[0],
schema=self.schema,
modaliases={
None: 'default'
},
)
migration_cmd = s_ddl.compile_migration(
migration_cmd,
self.std_schema,
self.schema,
)
context = s_delta.CommandContext()
schema, migration = migration_cmd.apply(self.schema, context)
ddl_plan = s_delta.DeltaRoot(canonical=True)
ddl_plan.update(migration.get_commands(schema))
baseline_schema, _ = ddl_plan.apply(schema, context)
ddl_text = s_ddl.ddl_text_from_delta(schema, migration)
try:
test_schema = self.run_ddl(schema, ddl_text)
except errors.EdgeDBError as e:
self.fail(markup.dumps(e))
diff = s_ddl.delta_schemas(baseline_schema, test_schema)
if list(diff.get_subcommands()):
self.fail(
f'unexpected difference in schema produced by\n'
f'COMMIT MIGRATION and DDL obtained from GET MIGRATION:\n'
f'{markup.dumps(diff)}\n'
f'DDL text was:\n{ddl_text}'
)
def _assert_migration_equivalence(self, migrations):
# Compare 2 schemas obtained by multiple-step migration to a
# single-step migration.
# Validate that the final schema state has consistent migration.
self._assert_migration_consistency(migrations[-1])
# Jump to final schema state in a single migration.
single_migration = self.load_schema(migrations[-1])
# Evolve a schema in a series of migrations.
multi_migration = self.load_schema(migrations[0])
for i, state in enumerate(migrations[1:]):
multi_migration = self.run_ddl(multi_migration, f'''
CREATE MIGRATION m{i} TO {{
{state}
}};
COMMIT MIGRATION m{i};
''', 'test')
diff = s_ddl.delta_modules(single_migration, multi_migration, ['test'])
if list(diff.get_subcommands()):
self.fail(
f'unexpected difference in schema produced by\n'
f'alternative migration paths:\n'
f'{markup.dumps(diff)}\n'
)
def test_get_migration_01(self):
schema = '''
abstract inheritable annotation my_anno;
abstract type Named {
property name -> str {
annotation title := 'Name';
delegated constraint exclusive {
annotation title := 'uniquely named';
}
}
}
type User extending Named {
required multi link friends -> User {
annotation my_anno := 'foo';
}
};
abstract link special;
abstract property annotated_name {
annotation title := 'Name';
}
type SpecialUser extending User {
inherited property name extending annotated_name -> str;
inherited link friends extending special -> SpecialUser;
};
'''
self._assert_migration_consistency(schema)
def test_get_migration_02(self):
schema = '''
abstract type Named {
property name -> str {
delegated constraint exclusive;
}
}
abstract type User extending Named {
inherited required property name -> str {
delegated constraint exclusive;
}
};
type SpecialUser extending User;
'''
self._assert_migration_consistency(schema)
def test_get_migration_03(self):
schema = '''
abstract type Named {
property name -> str {
delegated constraint exclusive;
}
}
type Ingredient extending Named {
property vegetarian -> bool {
default := false;
}
}
scalar type unit extending enum<'ml', 'g', 'oz'>;
type Recipe extending Named {
multi link ingredients -> Ingredient {
property quantity -> decimal {
annotation title := 'ingredient quantity';
};
property unit -> unit;
}
}
view VegRecipes := (
SELECT Recipe
FILTER all(.ingredients.vegetarian)
);
function get_ingredients(
recipe: Recipe
) -> tuple<name: str, quantity: decimal> {
from edgeql $$
SELECT (
name := recipe.ingredients.name,
quantity := recipe.ingredients.quantity,
);
$$
}
'''
self._assert_migration_consistency(schema)
def test_migrations_equivalence_01(self):
self._assert_migration_equivalence([r"""
type Base;
""", r"""
type Base {
property name -> str;
}
""", r"""
type Base {
property name -> str;
}
type Derived extending Base {
inherited required property name -> str;
}
"""])
def test_migrations_equivalence_02(self):
self._assert_migration_equivalence([r"""
type Base {
property foo -> str;
}
type Derived extending Base {
inherited required property foo -> str;
}
""", r"""
type Base {
# rename 'foo'
property foo2 -> str;
}
type Derived extending Base {
inherited required property foo2 -> str;
}
"""])
@test.xfail('''
edb.errors.SchemaError: cannot drop inherited property 'foo'
of object type 'test::Derived'
DETAILS: property 'foo' of object type 'test::Derived' is
inherited from:
- object type 'test::Base'
''')
def test_migrations_equivalence_03(self):
self._assert_migration_equivalence([r"""
type Base {
property foo -> str;
}
type Derived extending Base {
inherited required property foo -> str;
}
""", r"""
type Base;
# drop 'foo'
type Derived extending Base {
# completely different property
property foo2 -> str;
}
"""])
@test.xfail('''
edb.errors.SchemaError: cannot drop inherited property 'foo'
of object type 'test::Further'
DETAILS: property 'foo' of object type 'test::Further' is
inherited from:
- object type 'test::Derived'
''')
def test_migrations_equivalence_04(self):
self._assert_migration_equivalence([r"""
type Base {
property foo -> str;
}
type Derived extending Base;
type Further extending Derived {
inherited required property foo -> str;
}
""", r"""
type Base;
# drop 'foo'
type Derived extending Base;
type Further extending Derived {
# completely different property
required property foo2 -> str;
};
"""])
@test.xfail('''
edb.errors.SchemaError: cannot drop property 'foo' of object
type 'test::Base' because other objects in the schema depend
on it
DETAILS: property 'foo' of object type 'test::Derived' depends on foo
''')
def test_migrations_equivalence_05(self):
self._assert_migration_equivalence([r"""
type Base {
property foo -> str;
}
type Derived extending Base {
inherited required property foo -> str;
}
""", r"""
type Base;
# drop foo
type Derived extending Base {
# completely different property, but with the same old
# name 'foo'
property foo -> str;
}
"""])
def test_migrations_equivalence_06(self):
self._assert_migration_equivalence([r"""
type Base {
property foo -> str;
}
type Derived extending Base {
inherited required property foo -> str;
}
""", r"""
type Base {
# change property type
property foo -> int64;
}
type Derived extending Base {
inherited required property foo -> int64;
}
"""])
def test_migrations_equivalence_07(self):
self._assert_migration_equivalence([r"""
type Child;
type Base {
link bar -> Child;
}
""", r"""
type Child;
type Base {
required link bar -> Child {
# add a constraint
constraint exclusive;
}
}
"""])
@test.xfail('''
Fails in _assert_migration_consistency for the final state:
edb.errors.InvalidReferenceError: object type or view 'max'
does not exist
''')
def test_migrations_equivalence_08(self):
self._assert_migration_equivalence([r"""
type Base {
property foo -> str;
}
""", r"""
type Base {
required property foo -> str {
# add a constraint
constraint max_len_value(10);
}
}
"""])
@test.xfail('''
Fails in _assert_migration_consistency for the final state:
edb.errors.InvalidReferenceError: object type or view 'max'
does not exist
''')
def test_migrations_equivalence_09(self):
self._assert_migration_equivalence([r"""
scalar type constraint_length extending str {
constraint max_len_value(10);
}
""", r"""
scalar type constraint_length extending str {
constraint max_len_value(10);
# add a constraint
constraint min_len_value(5);
}
"""])
def test_migrations_equivalence_10(self):
self._assert_migration_equivalence([r"""
type Base {
property foo -> str;
}
""", r"""
type Child;
type Base {
# change property to link with same name
link foo -> Child;
}
"""])
def test_migrations_equivalence_11(self):
self._assert_migration_equivalence([r"""
type Base {
property foo -> str;
}
""", r"""
type Child;
type Base {
# change property to link with same name
link foo -> Child {
# add a constraint
constraint exclusive;
}
}
"""])
def test_migrations_equivalence_12(self):
self._assert_migration_equivalence([r"""
type Child;
type Base {
property foo -> str {
constraint exclusive;
}
link bar -> Child {
constraint exclusive;
}
}
""", r"""
type Child;
type Base {
# drop constraints
property foo -> str;
link bar -> Child;
}
"""])
@test.xfail('''
edb.errors.SchemaError: cannot drop link 'bar' of object type
'test::Base' because other objects in the schema depend on
it
DETAILS: link 'bar' of object type 'test::Derived' depends on bar
''')
def test_migrations_equivalence_13(self):
self._assert_migration_equivalence([r"""
type Child;
type Base {
link bar -> Child;
}
type Derived extending Base {
inherited required link bar -> Child;
}
""", r"""
type Child;
type Base;
# drop 'bar'
type Derived extending Base {
# completely different link
link bar -> Child;
}
"""])
def test_migrations_equivalence_14(self):
self._assert_migration_equivalence([r"""
type Base;
type Derived extending Base {
property foo -> str;
}
""", r"""
type Base {
# move the property earlier in the inheritance
property foo -> str;
}
type Derived extending Base {
inherited required property foo -> str;
}
"""])
def test_migrations_equivalence_15(self):
self._assert_migration_equivalence([r"""
type Child;
type Base;
type Derived extending Base {
link bar -> Child;
}
""", r"""
type Child;
type Base {
# move the link earlier in the inheritance
link bar -> Child;
}
type Derived extending Base;
"""])
@test.xfail('''
Fails in _assert_migration_consistency for the final state
''')
def test_migrations_equivalence_16(self):
self._assert_migration_equivalence([r"""
type Child;
type Base;
type Derived extending Base {
link bar -> Child;
}
""", r"""
type Child;
type Base {
# move the link earlier in the inheritance
link bar -> Child;
}
type Derived extending Base;
""", r"""
type Child;
type Base {
link bar -> Child;
}
type Derived extending Base {
# also make the link 'required'
inherited required link bar -> Child;
}
"""])
@test.xfail('''
Fails in python:
AttributeError: 'NoneType' object has no attribute 'text'
''')
def test_migrations_equivalence_17(self):
self._assert_migration_equivalence([r"""
type Base {
property name := 'computable'
}
""", r"""
type Base {
# change a property from a computable to regular
property name -> str
}
"""])
@test.xfail('''
Fails in python:
AttributeError: 'NoneType' object has no attribute 'text'
''')
def test_migrations_equivalence_18(self):
self._assert_migration_equivalence([r"""
type Base {
property name := 'something'
}
""", r"""
type Base {
# change a property from a computable to regular with a default
property name -> str {
default := 'something'
}
}
"""])
def test_migrations_equivalence_19(self):
self._assert_migration_equivalence([r"""
type Base {
property name -> str
}
""", r"""
type Base {
# change a regular property to a computable
property name := 'computable'
}
"""])
@test.xfail('''
Fails in python:
AttributeError: 'NoneType' object has no attribute 'text'
''')
def test_migrations_equivalence_20(self):
self._assert_migration_equivalence([r"""
type Base {
property name -> str {
default := 'something'
}
}
""", r"""
type Base {
# change a regular property to a computable
property name := 'something'
}
"""])
def test_migrations_equivalence_21(self):
self._assert_migration_equivalence([r"""
type Base {
property foo -> str;
}
""", r"""
type Base {
property foo -> str;
# add a property
property bar -> int64;
}
""", r"""
type Base {
# make the old property into a computable
property foo := <str>__source__.bar;
property bar -> int64;
}
"""])
def test_migrations_equivalence_22(self):
self._assert_migration_equivalence([r"""
type Base {
property foo -> str;
}
""", r"""
# rename the type, although this test doesn't ensure that
# renaming actually took place
type NewBase {
property foo -> str;
}
""", r"""
type NewBase {
property foo -> str;
# add a property
property bar -> int64;
}
""", r"""
type NewBase {
# drop 'foo'
property bar -> int64;
}
# add a view to emulate the original
view Base := (
SELECT NewBase {
foo := <str>.bar
}
);
"""])
@test.xfail('''
Fails in _assert_migration_consistency for the final state:
edb.errors.InvalidReferenceError: schema item 'std::Base'
does not exist
''')
def test_migrations_equivalence_23(self):
self._assert_migration_equivalence([r"""
type Child {
property foo -> str;
}
type Base {
link bar -> Child;
}
view View01 := (
SELECT Base {
child_foo := .bar.foo
}
);
""", r"""
type Child {
property foo -> str;
}
# exchange a type for a view
view Base := (
SELECT Child {
# bar is the same as the root object
bar := Child
}
);
view View01 := (
# now this view refers to another view
SELECT Base {
child_foo := .bar.foo
}
);
"""])
def test_migrations_equivalence_24(self):
self._assert_migration_equivalence([r"""
type Child;
type Base {
link bar -> Child;
}
""", r"""
type Child;
type Base {
# increase link cardinality
multi link bar -> Child;
}
"""])
def test_migrations_equivalence_25(self):
self._assert_migration_equivalence([r"""
type Child;
type Base {
multi link bar -> Child;
}
""", r"""
type Child;
type Base {
# reduce link cardinality
link bar -> Child;
}
""", r"""
type Child;
type Base {
link bar -> Child {
# further restrict the link
constraint exclusive
}
}
"""])
def test_migrations_equivalence_26(self):
self._assert_migration_equivalence([r"""
type Child;
type Parent {
link bar -> Child;
}
""", r"""
type Child;
type Parent {
link bar -> Child;
}
# derive a type
type DerivedParent extending Parent;
""", r"""
type Child;
type DerivedChild extending Child;
type Parent {
link bar -> Child;
}
# derive a type with a more restrictive link
type DerivedParent extending Parent {
inherited link bar -> DerivedChild;
}
"""])
def test_migrations_equivalence_27(self):
self._assert_migration_equivalence([r"""
abstract type Named {
property name -> str;
}
type Foo extending Named;
type Bar extending Named;
""", r"""
abstract type Named {
property name -> str;
}
# the types stop extending named, but retain the property
# 'name'
type Foo {
property name -> str;
};
type Bar {
property name -> str;
};
""", r"""
abstract type Named {
property name -> str;
}
type Foo {
property name -> str;
};
type Bar {
# rename 'name' to 'title'
property title -> str;
};
"""])
@test.xfail('''
edb.errors.SchemaError: cannot drop inherited link '__type__'
of object type 'test::Child'
DETAILS: link '__type__' of object type 'test::Child' is
inherited from:
- object type 'std::Object'
''')
def test_migrations_equivalence_28(self):
self._assert_migration_equivalence([r"""
type Child {
property foo -> str;
}
""", r"""
# drop everything
"""])
@test.xfail('''
edb.errors.SchemaError: cannot drop inherited link '__type__'
of object type 'test::Base'
DETAILS: link '__type__' of object type 'test::Base' is
inherited from:
- object type 'test::Child'
''')
def test_migrations_equivalence_29(self):
self._assert_migration_equivalence([r"""
type Child {
property foo -> str;
}
view Base := (
SELECT Child {
bar := .foo
}
);
""", r"""
# drop everything
"""])
| StarcoderdataPython |
5050807 | <filename>QFTSampler/transformers/Affine.py<gh_stars>1-10
import numpy as np
from .BaseTransformer import BaseTransformer
from .Standardizer import Standardization
from .Momentum import Momentum
class AffineX(BaseTransformer):
def __init__(self, N, M):
self.N = N
self.M = M
self.w = np.zeros([2**self.M*2],dtype=np.float64)
self.b = np.zeros([2**self.M*2])
self.b[0] = 1.
def phi(self,*arg, **argv):
self.b /= np.sum(np.square(self.b))**0.5
phi_li = []
for i in range( len(arg[0]) ):
x = (arg[0][i]/2**self.N) * 2. -1.
tmp =self.w * x +self.b
phi = tmp[:2**self.M] + tmp[2**self.M:] * 1j
phi /= np.sum(np.square(np.abs(phi)))**0.5
phi_li.append(phi)
phi_li = np.array(phi_li)
return phi_li
def update(self,grad_phi,*arg, lr = 1): # grad_phi = samplex(2**M) matrix
grad_phi_ri = np.concatenate([grad_phi.real,grad_phi.imag],axis=1)
# sample x (2* 2**M)
x = (arg[0]/2**self.N)*2. -1.
self.w -= lr* np.sum( x.reshape(-1,1) * grad_phi_ri, axis=0)/len(grad_phi)
self.b -= lr* np.sum( grad_phi_ri ,axis=0 )/len(grad_phi)
class Affine(BaseTransformer):
def __init__(self, N, M, in_dim=1):
self.N = N
self.M = M
self.w = np.zeros([in_dim, 2**self.M*2 ],dtype=np.float64)
self.b = np.zeros([2**self.M*2])
self.b[0] = 1.
self.stan = Standardization()
self.x = None
def phi(self, *arg, **argv):
#self.b /= np.sum(np.square(self.b))**0.5
x = np.array(arg).T #x (batch, dim)
x = (x/2**self.N) * 2. -1.
self.x = x
tmp = x@self.w +self.b
phi = tmp[:,:2**self.M] + tmp[:,2**self.M:] * 1j
phi = self.stan(phi)
return phi
def clear(self, ):
self.stan.clear()
def update(self, grad_phi,*arg, lr = 1): # grad_phi = samplex(2**M) matrix
grad_phi =self.stan.backward(grad_phi)
grad = np.concatenate([grad_phi.real,grad_phi.imag],axis=1)
x = self.x
self.w -= lr* (x.T@grad)#/len(grad_phi)
self.b -= lr*np.sum(grad, axis=0)#/len(grad_phi)
class AffineLinearBasis (BaseTransformer):
def __init__(self, N, M, save_params=False):
in_dim = 1
self.N = N
self.M = M
self.w = 0.01+0.01*np.random.rand(in_dim, 2**self.M*2).astype(np.float64)#np.zeros([in_dim, 2**self.M*2 ],dtype=np.float64)
self.b = np.zeros([2**self.M*2])
self.b[0] = 1.
self.stan = Standardization()
self.x = None
self.momentum_w = Momentum( self.w )
self.momentum_b = Momentum( self.b )
if save_params:
self.w_list = []
self.b_list = []
self.i = 0
self.I = 100
def get_x(me,x):
x1 = 2.*x -1.
return np.concatenate( [x1] ,axis=1 )
def phi(self, *arg, **argv):
x = arg[0].reshape(-1,1)/2**self.N #(samples,in_dim)
x = self.get_x(x)
tmp = self.b
phi = np.array( [tmp[:2**self.M] + tmp[2**self.M:] * 1j] * argv['sample_num'] )
#phi (samples,dim)
xw = x@self.w # (samples,in_dim)@(in_dim,2*dim)->(samples,2*dim)
wphi = xw[:,:2**self.M] + xw[:,2**self.M:] * 1j #(samples,dim)
#self.cphi = phi.copy()
#self.wphi = wphi.copy()
#self.x = x.copy()
#self.ww = self.w.copy()
phi = phi + wphi
phi = self.stan(phi)
return phi
def update(self, grad_phi,*arg, lr = 1): # grad_phi = samplex(2**M) matrix
sample_num = len(arg[0])
x = arg[0].reshape(-1,1)/2**self.N #(samples,in_dim=1)
x = self.get_x(x)
grad_phi = self.stan.backward(grad_phi)
grad = np.concatenate([grad_phi.real,grad_phi.imag],axis=1) #(samples,2*dim)
self.dw = x.T@grad/sample_num # (samples,in_dim).T @ (samples,2*dim) -> (in_dim,2*dim)
self.db = np.average(grad, axis=0) #(2*dim)
self.w -= lr* ( self.momentum_w(self.dw) )
self.b -= lr* ( self.momentum_b(self.db) )
if hasattr(self, 'i'):
if ( self.i % self.I == 0 ):
self.w_list.append( self.w.copy() )
self.b_list.append( self.b.copy() )
self.i += 1
def clear(self, ):
self.stan.clear()
class AffineNonLinearBasis (BaseTransformer):
def __init__(self, N, M, save_params=False):
in_dim = 4
self.N = N
self.M = M
self.w = 0.01+0.01*np.random.rand(in_dim, 2**self.M*2).astype(np.float64)#np.zeros([in_dim, 2**self.M*2 ],dtype=np.float64)
self.b = np.zeros([2**self.M*2])
self.b[0] = 1.
self.stan = Standardization()
self.x = None
self.momentum_w = Momentum( self.w )
self.momentum_b = Momentum( self.b )
if save_params:
self.w_list = []
self.b_list = []
self.i = 0
self.I = 100
def get_x(me,x):
x1 = 2.*x -1.
x2 = np.square(x1)
x3 = x1 ** 3
x4 = np.sqrt(np.abs(x1))
return np.concatenate( [x1,x2,x3,x4] ,axis=1 )
def phi(self, *arg, **argv):
x = arg[0].reshape(-1,1)/2**self.N #(samples,in_dim)
x = self.get_x(x)
tmp = self.b
phi = np.array( [tmp[:2**self.M] + tmp[2**self.M:] * 1j] * argv['sample_num'] )
#phi (samples,dim)
xw = x@self.w # (samples,in_dim)@(in_dim,2*dim)->(samples,2*dim)
wphi = xw[:,:2**self.M] + xw[:,2**self.M:] * 1j #(samples,dim)
#self.cphi = phi.copy()
#self.wphi = wphi.copy()
#self.x = x.copy()
#self.ww = self.w.copy()
phi = phi + wphi
phi = self.stan(phi)
return phi
def update(self, grad_phi,*arg, lr = 1): # grad_phi = samplex(2**M) matrix
sample_num = len(arg[0])
x = arg[0].reshape(-1,1)/2**self.N #(samples,in_dim=1)
x = self.get_x(x)
grad_phi = self.stan.backward(grad_phi)
grad = np.concatenate([grad_phi.real,grad_phi.imag],axis=1) #(samples,2*dim)
self.dw = x.T@grad/sample_num # (samples,in_dim).T @ (samples,2*dim) -> (in_dim,2*dim)
self.db = np.average(grad, axis=0) #(2*dim)
self.w -= lr* ( self.momentum_w(self.dw) )
self.b -= lr* ( self.momentum_b(self.db) )
if hasattr(self, 'i'):
if ( self.i % self.I == 0 ):
self.w_list.append( self.w.copy() )
self.b_list.append( self.b.copy() )
self.i += 1
def clear(self, ):
self.stan.clear()
| StarcoderdataPython |
6640500 | '''
--- Day 13: Shuttle Search ---
Your ferry can make it safely to a nearby port, but it won't get much further. When you call to book another ship, you discover that no ships embark from that port to your vacation island. You'll need to get from the port to the nearest airport.
Fortunately, a shuttle bus service is available to bring you from the sea port to the airport! Each bus has an ID number that also indicates how often the bus leaves for the airport.
Bus schedules are defined based on a timestamp that measures the number of minutes since some fixed reference point in the past. At timestamp 0, every bus simultaneously departed from the sea port. After that, each bus travels to the airport, then various other locations, and finally returns to the sea port to repeat its journey forever.
The time this loop takes a particular bus is also its ID number: the bus with ID 5 departs from the sea port at timestamps 0, 5, 10, 15, and so on. The bus with ID 11 departs at 0, 11, 22, 33, and so on. If you are there when the bus departs, you can ride that bus to the airport!
Your notes (your puzzle input) consist of two lines. The first line is your estimate of the earliest timestamp you could depart on a bus. The second line lists the bus IDs that are in service according to the shuttle company; entries that show x must be out of service, so you decide to ignore them.
To save time once you arrive, your goal is to figure out the earliest bus you can take to the airport. (There will be exactly one such bus.)
For example, suppose you have the following notes:
939
7,13,x,x,59,x,31,19
Here, the earliest timestamp you could depart is 939, and the bus IDs in service are 7, 13, 59, 31, and 19. Near timestamp 939, these bus IDs depart at the times marked D:
time bus 7 bus 13 bus 59 bus 31 bus 19
929 . . . . .
930 . . . D .
931 D . . . D
932 . . . . .
933 . . . . .
934 . . . . .
935 . . . . .
936 . D . . .
937 . . . . .
938 D . . . .
939 . . . . .
940 . . . . .
941 . . . . .
942 . . . . .
943 . . . . .
944 . . D . .
945 D . . . .
946 . . . . .
947 . . . . .
948 . . . . .
949 . D . . .
The earliest bus you could take is bus ID 59. It doesn't depart until timestamp 944, so you would need to wait 944 - 939 = 5 minutes before it departs. Multiplying the bus ID by the number of minutes you'd need to wait gives 295.
What is the ID of the earliest bus you can take to the airport multiplied by the number of minutes you'll need to wait for that bus?
--- Part Two ---
The shuttle company is running a contest: one gold coin for anyone that can find the earliest timestamp such that the first bus ID departs at that time and each subsequent listed bus ID departs at that subsequent minute. (The first line in your input is no longer relevant.)
For example, suppose you have the same list of bus IDs as above:
7,13,x,x,59,x,31,19
An x in the schedule means there are no constraints on what bus IDs must depart at that time.
This means you are looking for the earliest timestamp (called t) such that:
Bus ID 7 departs at timestamp t.
Bus ID 13 departs one minute after timestamp t.
There are no requirements or restrictions on departures at two or three minutes after timestamp t.
Bus ID 59 departs four minutes after timestamp t.
There are no requirements or restrictions on departures at five minutes after timestamp t.
Bus ID 31 departs six minutes after timestamp t.
Bus ID 19 departs seven minutes after timestamp t.
The only bus departures that matter are the listed bus IDs at their specific offsets from t. Those bus IDs can depart at other times, and other bus IDs can depart at those times. For example, in the list above, because bus ID 19 must depart seven minutes after the timestamp at which bus ID 7 departs, bus ID 7 will always also be departing with bus ID 19 at seven minutes after timestamp t.
In this example, the earliest timestamp at which this occurs is 1068781:
time bus 7 bus 13 bus 59 bus 31 bus 19
1068773 . . . . .
1068774 D . . . .
1068775 . . . . .
1068776 . . . . .
1068777 . . . . .
1068778 . . . . .
1068779 . . . . .
1068780 . . . . .
1068781 D . . . .
1068782 . D . . .
1068783 . . . . .
1068784 . . . . .
1068785 . . D . .
1068786 . . . . .
1068787 . . . D .
1068788 D . . . D
1068789 . . . . .
1068790 . . . . .
1068791 . . . . .
1068792 . . . . .
1068793 . . . . .
1068794 . . . . .
1068795 D D . . .
1068796 . . . . .
1068797 . . . . .
In the above example, bus ID 7 departs at timestamp 1068788 (seven minutes after t). This is fine; the only requirement on that minute is that bus ID 19 departs then, and it does.
Here are some other examples:
The earliest timestamp that matches the list 17,x,13,19 is 3417.
67,7,59,61 first occurs at timestamp 754018.
67,x,7,59,61 first occurs at timestamp 779210.
67,7,x,59,61 first occurs at timestamp 1261476.
1789,37,47,1889 first occurs at timestamp 1202161486.
However, with so many bus IDs in your list, surely the actual earliest timestamp will be larger than 100000000000000!
What is the earliest timestamp such that all of the listed bus IDs depart at offsets matching their positions in the list?
'''
def parse(filename):
curr_ts = None
buses = None
with open(filename) as f:
for line in f:
if not curr_ts:
curr_ts = int(line.strip())
buses = [int(item) for item in line.strip().split(',') if item != 'x']
return curr_ts, buses
def puzzle1(filename):
parsed = parse(filename)
curr_ts = parsed[0]
buses = parsed[1]
min_time = buses[0]
soonest_bus = buses[0]
for bus in buses:
minutes_til_arrival = bus - curr_ts % bus
if minutes_til_arrival < min_time:
min_time = minutes_til_arrival
soonest_bus = bus
print(f'Soonest bus #{soonest_bus} will be here in {min_time}')
print(f'Answer = {soonest_bus * min_time}')
# puzzle1('../data/day13.txt')
# -----------------------------------------------------
from functools import reduce
def chinese_remainder(n, a):
sum = 0
prod = reduce(lambda a, b: a * b, n)
for n_i, a_i in zip(n, a):
p = prod // n_i
sum += a_i * mul_inv(p, n_i) * p
return sum % prod
def mul_inv(a, b):
b0 = b
x0, x1 = 0, 1
if b == 1: return 1
while a > 1:
q = a // b
a, b = b, a % b
x0, x1 = x1 - q * x0, x0
if x1 < 0: x1 += b0
return x1
def parse2(filename):
line_number = 1
with open(filename) as f:
for line in f:
if line_number == 2:
return line.strip().split(',')
line_number += 1
def puzzle2(filename):
buses = parse2(filename)
divisors = []
remainders = []
for i in range(0, len(buses)):
if buses[i] != 'x':
divisor = int(buses[i])
divisors.append(divisor)
remainders.append((divisor-i) % divisor)
# print(buses)
print(chinese_remainder(divisors, remainders))
puzzle2('../data/day13.txt') | StarcoderdataPython |
5037662 | import ctypes
import faulthandler
faulthandler.enable()
# Get memory address 0, your kernel shouldn't allow this:
ctypes.string_at(0)
| StarcoderdataPython |
11337373 | from cassandra.cluster import Cluster
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
from colorclass import Color, Windows
from terminaltables import SingleTable
import random
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def sigmoid_derivative(x):
return sigmoid(x) * (1.0 - sigmoid(x))
def tanh(x):
return np.tanh(x)
def tanh_derivative(x):
return 1.0 - x ** 2
class NeuralNetwork:
def __init__(self, layers, activation='tanh'):
if activation == 'sigmoid':
self.activation = sigmoid
self.activation_prime = sigmoid_derivative
elif activation == 'tanh':
self.activation = tanh
self.activation_prime = tanh_derivative
# Init weights
self.weights = []
self.deltas = []
# Set random values
for i in range(1, len(layers) - 1):
r = 2 * np.random.random((layers[i-1] + 1, layers[i] + 1)) -1
self.weights.append(r)
r = 2 * np.random.random((layers[i] + 1, layers[i+1])) - 1
self.weights.append(r)
def fit(self, X, y, learning_rate=0.2, epochs=100000):
origin = X
ones = np.atleast_2d(np.ones(X.shape[0]))
X = np.concatenate((ones.T, X), axis=1)
for k in range(epochs):
i = np.random.randint(X.shape[0])
a = [X[i]]
for l in range(len(self.weights)):
dot_value = np.dot(a[l], self.weights[l])
activation = self.activation(dot_value)
a.append(activation)
error = y[i] - a[-1]
deltas = [error * self.activation_prime(a[-1])]
for l in range(len(a) - 2, 0, -1):
deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_prime(a[l]))
self.deltas.append(deltas)
deltas.reverse()
# Backpropagation
for i in range(len(self.weights)):
layer = np.atleast_2d(a[i])
delta = np.atleast_2d(deltas[i])
self.weights[i] += learning_rate * layer.T.dot(delta)
if k % 10000 == 0: print('epochs:', k)
# Generem el valor del gràfic
if k % (epochs / 200) == 0:
global values
total_errors = 0
total_items = len(origin)
for i, item in enumerate(origin):
coef = self.predict(item)
if coef[0] < 0 and y[i] == 1 or coef[0] > 0 and y[i] == -1:
total_errors += 1
pct_errors = round((total_errors * 100) / total_items, 2)
values.append(pct_errors)
# Predicció
global Z
global sol
global values_predicted
total_errors = 0
total_items = len(Z)
for i, item in enumerate(Z):
coef = self.predict(item)
if coef[0] < 0 and sol[i] == 1 or coef[0] > 0 and sol[i] == -1:
total_errors += 1
pct_errors = round((total_errors * 100) / total_items, 2)
values_predicted.append(pct_errors)
def predict(self, x):
ones = np.atleast_2d(np.ones(x.shape[0]))
a = np.concatenate((np.ones(1).T, np.array(x)), axis=0)
for l in range(0, len(self.weights)):
a = self.activation(np.dot(a, self.weights[l]))
return a
def print_weights(self):
print("Llistat de pesos de connexions")
for i in range(len(self.weights)):
print(self.weights[i])
def get_deltas(self):
return self.deltas
# Variables
Windows.enable(auto_colors=True, reset_atexit=True) # Does nothing if not on Windows
rounds = ["1R", "2R", "3R", "R16", "QF", "SF", "F"]
categories = ["250", "500", "1000", "grandslam"]
surfaces = ["H", "C", "G", "I"]
intervals = {"<1.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"1.50-1.99": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"2.00-2.49": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"2.50-2.99": {'units': 0.0, 'stake': 0, 'yield': 0.0},
">2.99": {'units': 0.0, 'stake': 0, 'yield': 0.0}}
value_intervals = {"<1.01": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"1.01-1.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"1.51-2.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"2.01-2.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"2.51-3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
">3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0}}
games_train = []
games = []
winners_train = []
games_predict = []
predict = []
winners_sim = []
dates_train = []
dates_predict = []
tournaments_train = []
tournaments_predict = []
games_names_train = []
games_names_predict = []
values = []
values_predicted = []
num_epochs = 50000000
nn = NeuralNetwork([21, 15, 15, 15, 1], activation = 'tanh')
# Open connection
cluster = Cluster(["127.0.0.1"])
session = cluster.connect("beast")
# Get games from DB
'''
Torneig - Ronda - Local - Edat - Rank - Race - RankMax - H2H - %Any - %AnySup - %SupCar - 3Mesos - PtsDef - Odd
'''
query = "SELECT COUNT(*) as num_games FROM game_train"
row = session.execute(query)
num_games = row[0].num_games
num_games_train = round(num_games * 0.8, 0)
index = 0
season = 2014
while season < 2016:
for week in range(1, 45):
query = "SELECT game_season, game_tournament, game_week, game_round, game_surface, game_category, game_sets, game_points, game_date, game_rank1, game_rank2, game_race1, game_race2, game_rankmax1, game_rankmax2, game_age1, game_age2, game_h2h1, game_h2h2, game_h2h_year1, game_h2h_year2, game_h2h_surface1, game_h2h_surface2, game_h2h_surface_year1, game_h2h_surface_year2, game_surface1, game_surface2, game_surface_year1, game_surface_year2, game_hand1, game_hand2, game_home1, game_home2, game_3months1, game_3months2, game_10streak1, game_10streak2, game_gp1m1, game_gp1m2, game_gp3m1, game_gp3m2, game_gp6m1, game_gp6m2, game_pts_def1, game_pts_def2, game_player1, game_player2, game_winner, game_odd1, game_odd2 FROM game_train WHERE game_season = " + str(season) + " AND game_week = " + str(week) + " ORDER BY game_id ASC"
games_db = session.execute(query)
for game_db in games_db:
game = []
game.append(round((rounds.index(game_db.game_round) + 1) / 7, 2))
game.append(round((surfaces.index(game_db.game_surface) + 1) / 4, 2))
game.append(round((categories.index(game_db.game_category) + 1) / 4, 2))
if game_db.game_rank1 == 0 or game_db.game_rank2 == 0:
game.append(0)
else:
if game_db.game_rank1 < game_db.game_rank2:
game.append(-1)
else:
game.append(1)
if game_db.game_race1 == 0 or game_db.game_race2 == 0:
game.append(0)
else:
if game_db.game_race1 < game_db.game_race2:
game.append(-1)
else:
game.append(1)
if game_db.game_rankmax1 == 0 or game_db.game_rankmax2 == 0 or game_db.game_rankmax1 == game_db.game_rankmax2:
game.append(0)
else:
if game_db.game_rankmax1 < game_db.game_rankmax2:
game.append(-1)
else:
game.append(1)
if game_db.game_h2h1 == -1 or game_db.game_h2h2 == -1:
game.append(0)
else:
if game_db.game_h2h1 > game_db.game_h2h2:
game.append(-1)
else:
game.append(1)
if game_db.game_h2h_year1 == -1 or game_db.game_h2h_year2 == -1:
game.append(0)
else:
if game_db.game_h2h_year1 > game_db.game_h2h_year2:
game.append(-1)
else:
game.append(1)
if game_db.game_h2h_surface1 == -1 or game_db.game_h2h_surface2 == -1:
game.append(0)
else:
if game_db.game_h2h_surface1 > game_db.game_h2h_surface2:
game.append(-1)
else:
game.append(1)
if game_db.game_surface1 == -1 or game_db.game_surface2 == -1 or game_db.game_surface1 == game_db.game_surface2:
game.append(0)
else:
if game_db.game_surface1 > game_db.game_surface2:
game.append(-1)
else:
game.append(1)
if game_db.game_surface_year1 == -1 or game_db.game_surface_year2 == -1 or game_db.game_surface_year1 == game_db.game_surface_year2:
game.append(0)
else:
if game_db.game_surface_year1 > game_db.game_surface_year2:
game.append(-1)
else:
game.append(1)
if game_db.game_home1 == game_db.game_home2:
game.append(0)
else:
if game_db.game_home1:
game.append(-1)
else:
game.append(1)
evol_3months1 = round((game_db.game_3months1 - game_db.game_rank1) * 100 / max(game_db.game_3months1, game_db.game_rank1), 0)
evol_3months2 = round((game_db.game_3months2 - game_db.game_rank2) * 100 / max(game_db.game_3months2, game_db.game_rank2), 0)
if evol_3months1 == evol_3months2:
game.append(0)
else:
if evol_3months1 > evol_3months2:
game.append(-1)
else:
game.append(1)
if game_db.game_10streak1 == game_db.game_10streak2:
game.append(0)
else:
if game_db.game_10streak1 > game_db.game_10streak2:
game.append(-1)
else:
game.append(1)
if game_db.game_gp1m1 == game_db.game_gp1m2:
game.append(0)
else:
if game_db.game_gp1m1 > game_db.game_gp1m2:
game.append(-1)
else:
game.append(1)
if game_db.game_gp3m1 == game_db.game_gp3m2:
game.append(0)
else:
if game_db.game_gp3m1 > game_db.game_gp3m2:
game.append(-1)
else:
game.append(1)
if game_db.game_gp6m1 == game_db.game_gp6m2:
game.append(0)
else:
if game_db.game_gp6m1 > game_db.game_gp6m2:
game.append(-1)
else:
game.append(1)
if game_db.game_pts_def1 == game_db.game_pts_def2:
game.append(0)
else:
if game_db.game_pts_def1 > game_db.game_pts_def2:
game.append(-1)
else:
game.append(1)
game.append(round(game_db.game_points / 2000, 4))
game.append(round(game_db.game_odd1 / 50, 4))
game.append(round(game_db.game_odd2 / 50, 4))
query_player1 = "SELECT player_name FROM player_by_atpid WHERE player_atpwt_id = '" + game_db.game_player1 + "'"
player1 = session.execute(query_player1)
player1_name = player1[0].player_name
query_player2 = "SELECT player_name FROM player_by_atpid WHERE player_atpwt_id = '" + game_db.game_player2 + "'"
player2 = session.execute(query_player2)
player2_name = player2[0].player_name
# Turn players randomly
turn = random.randint(0, 1)
if turn == 1:
game[3] *= -1
game[4] *= -1
game[5] *= -1
game[6] *= -1
game[7] *= -1
game[8] *= -1
game[9] *= -1
game[10] *= -1
game[11] *= -1
game[12] *= -1
game[13] *= -1
game[14] *= -1
game[15] *= -1
game[16] *= -1
game[17] *= -1
odd1 = game[19]
game[19] = game[20]
game[20] = odd1
aux_name = player1_name
player1_name = player2_name
player2_name = aux_name
winner = 1
else:
winner = -1
if index < num_games_train:
full_game = game.copy()
games.append(full_game)
#del game[0]
games_train.append(game)
games_names_train.append(player1_name + " - " + player2_name)
dates_train.append(str(game_db.game_date))
tournaments_train.append(game_db.game_tournament.capitalize())
winners_train.append([winner])
else:
full_game = game.copy()
predict.append(full_game)
#del game[0]
games_predict.append(game)
games_names_predict.append(player1_name + " - " + player2_name)
dates_predict.append(str(game_db.game_date))
tournaments_predict.append(game_db.game_tournament.capitalize())
winners_sim.append([winner])
index += 1
season += 1
np.set_printoptions(suppress=True)
X = np.array(games_train)
y = np.array(winners_train)
Z = np.array(games_predict)
sol = np.array(winners_sim)
# Previous prediction
index = 0
for e in X:
print("X:", games_names_train[index], "Sol:", y[index], "Network:", nn.predict(e))
index += 1
nn.fit(X, y, learning_rate = 0.01, epochs = num_epochs)
# Prediction after training
index = 0
train_table = []
train_table.append([Color('{autoyellow}Game{/autoyellow}'),
Color('{autoyellow}Date{/autoyellow}'),
Color('{autoyellow}Tournament{/autoyellow}'),
Color('{autoyellow}Round{/autoyellow}'),
Color('{autoyellow}Odd 1{/autoyellow}'),
Color('{autoyellow}Odd 2{/autoyellow}'),
Color('{autoyellow}Pick{/autoyellow}'),
Color('{autoyellow}Prob{/autoyellow}'),
Color('{autoyellow}Value{/autoyellow}'),
Color('{autoyellow}Res{/autoyellow}'),
Color('{autoyellow}Uts{/autoyellow}'),
Color('{autoyellow}Coef{/autoyellow}')])
units = 0.0
stake = 0
hits = 0
different_probs = []
for e in X:
stake += 1
row = []
row.append(games_names_train[index])
players = games_names_train[index].split(" - ")
row.append(dates_train[index][:10])
row.append(tournaments_train[index])
row.append(rounds[int(round(games[index][0] * 6, 0))])
odd1 = round(games[index][19] * 50, 2)
odd2 = round(games[index][20] * 50, 2)
row.append(odd1)
row.append(odd2)
coef = nn.predict(e)
if round(coef[0], 2) not in different_probs:
different_probs.append(round(coef[0], 2))
prob2 = round((coef[0] + 1) * 100 / 2, 2)
prob1 = round(100 - prob2, 2)
inv_odd1 = 1 / odd1
inv_odd2 = 1 / odd2
inv_tot = inv_odd1 + inv_odd2
prob_bookmark1 = round(inv_odd1 * (100 - (inv_tot * 100 - 100)), 2)
prob_bookmark2 = round(inv_odd2 * (100 - (inv_tot * 100 - 100)), 2)
new_odd1 = round(100 / prob_bookmark1, 2)
new_odd2 = round(100 / prob_bookmark2, 2)
value1 = round(odd1 * prob1 / 100, 2)
value2 = round(odd2 * prob2 / 100, 2)
if value1 >= value2:
units_pick = odd1 - 1
row.append(players[0])
row.append(str(prob1) + "%")
row.append(str(value1))
if y[index] == -1:
result = "W"
else:
result = "L"
else:
units_pick = odd2 - 1
row.append(players[1])
row.append(str(prob2) + "%")
row.append(str(value2))
if y[index] == -1:
result = "L"
else:
result = "W"
if result == "W":
hits += 1
row.append(Color('{autogreen}W{/autogreen}'))
row.append("+" + str(round(units_pick, 2)))
units += round(units_pick, 2)
# Odds intervals
if units_pick < 1.50:
intervals['<1.50']['units'] += round(units_pick, 2)
intervals['<1.50']['stake'] += 1
intervals['<1.50']['yield'] = round(intervals['<1.50']['units'] * 100 / intervals['<1.50']['stake'], 2)
elif units_pick < 2.00:
intervals['1.50-1.99']['units'] += round(units_pick, 2)
intervals['1.50-1.99']['stake'] += 1
intervals['1.50-1.99']['yield'] = round(intervals['1.50-1.99']['units'] * 100 / intervals['1.50-1.99']['stake'], 2)
elif units_pick < 2.49:
intervals['2.00-2.49']['units'] += round(units_pick, 2)
intervals['2.00-2.49']['stake'] += 1
intervals['2.00-2.49']['yield'] = round(intervals['2.00-2.49']['units'] * 100 / intervals['2.00-2.49']['stake'], 2)
elif units_pick < 2.99:
intervals['2.50-2.99']['units'] += round(units_pick, 2)
intervals['2.50-2.99']['stake'] += 1
intervals['2.50-2.99']['yield'] = round(intervals['2.50-2.99']['units'] * 100 / intervals['2.50-2.99']['stake'], 2)
else:
intervals['>2.99']['units'] += round(units_pick, 2)
intervals['>2.99']['stake'] += 1
intervals['>2.99']['yield'] = round(intervals['>2.99']['units'] * 100 / intervals['>2.99']['stake'], 2)
# Value intervals
if float(row[8]) < 1.01:
value_intervals['<1.01']['units'] += round(units_pick, 2)
value_intervals['<1.01']['stake'] += 1
value_intervals['<1.01']['yield'] = round(value_intervals['<1.01']['units'] * 100 / value_intervals['<1.01']['stake'], 2)
elif float(row[8]) < 1.51:
value_intervals['1.01-1.50']['units'] += round(units_pick, 2)
value_intervals['1.01-1.50']['stake'] += 1
value_intervals['1.01-1.50']['yield'] = round(value_intervals['1.01-1.50']['units'] * 100 / value_intervals['1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
value_intervals['1.51-2.00']['units'] += round(units_pick, 2)
value_intervals['1.51-2.00']['stake'] += 1
value_intervals['1.51-2.00']['yield'] = round(value_intervals['1.51-2.00']['units'] * 100 / value_intervals['1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
value_intervals['2.01-2.50']['units'] += round(units_pick, 2)
value_intervals['2.01-2.50']['stake'] += 1
value_intervals['2.01-2.50']['yield'] = round(value_intervals['2.01-2.50']['units'] * 100 / value_intervals['2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
value_intervals['2.51-3.00']['units'] += round(units_pick, 2)
value_intervals['2.51-3.00']['stake'] += 1
value_intervals['2.51-3.00']['yield'] = round(value_intervals['2.51-3.00']['units'] * 100 / value_intervals['2.51-3.00']['stake'], 2)
else:
value_intervals['>3.00']['units'] += round(units_pick, 2)
value_intervals['>3.00']['stake'] += 1
value_intervals['>3.00']['yield'] = round(value_intervals['>3.00']['units'] * 100 / value_intervals['>3.00']['stake'], 2)
else:
row.append(Color('{autored}L{/autored}'))
row.append(-1)
units -= 1
# Odds intervals
if units_pick < 1.50:
intervals['<1.50']['units'] -= 1
intervals['<1.50']['stake'] += 1
intervals['<1.50']['yield'] = round(intervals['<1.50']['units'] * 100 / intervals['<1.50']['stake'], 2)
elif units_pick < 2.00:
intervals['1.50-1.99']['units'] -= 1
intervals['1.50-1.99']['stake'] += 1
intervals['1.50-1.99']['yield'] = round(intervals['1.50-1.99']['units'] * 100 / intervals['1.50-1.99']['stake'], 2)
elif units_pick < 2.49:
intervals['2.00-2.49']['units'] -= 1
intervals['2.00-2.49']['stake'] += 1
intervals['2.00-2.49']['yield'] = round(intervals['2.00-2.49']['units'] * 100 / intervals['2.00-2.49']['stake'], 2)
elif units_pick < 2.99:
intervals['2.50-2.99']['units'] -= 1
intervals['2.50-2.99']['stake'] += 1
intervals['2.50-2.99']['yield'] = round(intervals['2.50-2.99']['units'] * 100 / intervals['2.50-2.99']['stake'], 2)
else:
intervals['>2.99']['units'] -= 1
intervals['>2.99']['stake'] += 1
intervals['>2.99']['yield'] = round(intervals['>2.99']['units'] * 100 / intervals['>2.99']['stake'], 2)
# Value intervals
if float(row[8]) < 1.01:
value_intervals['<1.01']['units'] -= 1
value_intervals['<1.01']['stake'] += 1
value_intervals['<1.01']['yield'] = round(value_intervals['<1.01']['units'] * 100 / value_intervals['<1.01']['stake'], 2)
elif float(row[8]) < 1.51:
value_intervals['1.01-1.50']['units'] -= 1
value_intervals['1.01-1.50']['stake'] += 1
value_intervals['1.01-1.50']['yield'] = round(value_intervals['1.01-1.50']['units'] * 100 / value_intervals['1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
value_intervals['1.51-2.00']['units'] -= 1
value_intervals['1.51-2.00']['stake'] += 1
value_intervals['1.51-2.00']['yield'] = round(value_intervals['1.51-2.00']['units'] * 100 / value_intervals['1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
value_intervals['2.01-2.50']['units'] -= 1
value_intervals['2.01-2.50']['stake'] += 1
value_intervals['2.01-2.50']['yield'] = round(value_intervals['2.01-2.50']['units'] * 100 / value_intervals['2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
value_intervals['2.51-3.00']['units'] -= 1
value_intervals['2.51-3.00']['stake'] += 1
value_intervals['2.51-3.00']['yield'] = round(value_intervals['2.51-3.00']['units'] * 100 / value_intervals['2.51-3.00']['stake'], 2)
else:
value_intervals['>3.00']['units'] -= 1
value_intervals['>3.00']['stake'] += 1
value_intervals['>3.00']['yield'] = round(value_intervals['>3.00']['units'] * 100 / value_intervals['>3.00']['stake'], 2)
row.append(coef[0])
train_table.append(row)
index += 1
table_instance = SingleTable(train_table, Color('{autocyan} Prediction with training games {/autocyan}'))
table_instance.inner_heading_row_border = False
table_instance.inner_row_border = True
table_instance.justify_columns = {0: 'left', 1: 'center', 2: 'center', 3: 'center', 4: 'center', 5: 'center', 6: 'left', 7: 'center', 8: 'center', 9: 'center', 10: 'center', 11: 'center'}
print("\n" + table_instance.table)
print("\n" + Color('{autogreen}Units: {/autogreen}') + str(round(units, 2)))
print(Color('{autogreen}Hits: {/autogreen}') + str(hits))
print(Color('{autogreen}Stake: {/autogreen}') + str(stake))
print(Color('{autogreen}Yield: {/autogreen}') + str(round(units * 100 / stake, 2)) + "%")
print(Color('{autogreen}Different probabilities: {/autogreen}') + str(len(different_probs)))
print(Color('{autogreen}Value intervals: {/autogreen}'))
pprint(value_intervals)
print(Color('{autogreen}Odd intervals: {/autogreen}'))
pprint(intervals)
# Prediction with new games
index = 0
intervals = {"<1.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"1.50-1.99": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"2.00-2.49": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"2.50-2.99": {'units': 0.0, 'stake': 0, 'yield': 0.0},
">2.99": {'units': 0.0, 'stake': 0, 'yield': 0.0}}
value_intervals = {"<1.01": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"1.01-1.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"1.51-2.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"2.01-2.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"2.51-3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
">3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0}}
odd_value_intervals = {"Odd <1.50 / Value <1.01": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd <1.50 / Value 1.01-1.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd <1.50 / Value 1.51-2.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd <1.50 / Value 2.01-2.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd <1.50 / Value 2.51-3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd <1.50 / Value >3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 1.50-1.99 / Value <1.01": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 1.50-1.99 / Value 1.01-1.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 1.50-1.99 / Value 1.51-2.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 1.50-1.99 / Value 2.01-2.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 1.50-1.99 / Value 2.51-3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 1.50-1.99 / Value >3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 2.00-2.49 / Value <1.01": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 2.00-2.49 / Value 1.01-1.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 2.00-2.49 / Value 1.51-2.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 2.00-2.49 / Value 2.01-2.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 2.00-2.49 / Value 2.51-3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 2.00-2.49 / Value >3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 2.50-2.99 / Value <1.01": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 2.50-2.99 / Value 1.01-1.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 2.50-2.99 / Value 1.51-2.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 2.50-2.99 / Value 2.01-2.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 2.50-2.99 / Value 2.51-3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd 2.50-2.99 / Value >3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd >2.99 / Value <1.01": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd >2.99 / Value 1.01-1.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd >2.99 / Value 1.51-2.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd >2.99 / Value 2.01-2.50": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd >2.99 / Value 2.51-3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0},
"Odd >2.99 / Value >3.00": {'units': 0.0, 'stake': 0, 'yield': 0.0}}
prediction_table = []
prediction_table.append([Color('{autoyellow}Game{/autoyellow}'),
Color('{autoyellow}Date{/autoyellow}'),
Color('{autoyellow}Tournament{/autoyellow}'),
Color('{autoyellow}Round{/autoyellow}'),
Color('{autoyellow}Odd 1{/autoyellow}'),
Color('{autoyellow}Odd 2{/autoyellow}'),
Color('{autoyellow}Pick{/autoyellow}'),
Color('{autoyellow}Prob{/autoyellow}'),
Color('{autoyellow}Value{/autoyellow}'),
Color('{autoyellow}Res{/autoyellow}'),
Color('{autoyellow}Uts{/autoyellow}'),
Color('{autoyellow}Coef{/autoyellow}')])
units = 0.0
stake = 0
hits = 0
different_probs = []
for e in Z:
row = []
row.append(games_names_predict[index])
players = games_names_predict[index].split(" - ")
row.append(dates_predict[index][:10])
row.append(tournaments_predict[index])
row.append(rounds[int(round(predict[index][0] * 6, 0))])
odd1 = round(predict[index][19] * 50, 2)
odd2 = round(predict[index][20] * 50, 2)
row.append(odd1)
row.append(odd2)
coef = nn.predict(e)
if round(coef[0], 2) not in different_probs:
different_probs.append(round(coef[0], 2))
prob2 = round((coef[0] + 1) * 100 / 2, 2)
prob1 = round(100 - prob2, 2)
inv_odd1 = 1 / odd1
inv_odd2 = 1 / odd2
inv_tot = inv_odd1 + inv_odd2
prob_bookmark1 = round(inv_odd1 * (100 - (inv_tot * 100 - 100)), 2)
prob_bookmark2 = round(inv_odd2 * (100 - (inv_tot * 100 - 100)), 2)
new_odd1 = round(100 / prob_bookmark1, 2)
new_odd2 = round(100 / prob_bookmark2, 2)
value1 = round(odd1 * prob1 / 100, 2)
value2 = round(odd2 * prob2 / 100, 2)
if value1 >= value2:
units_pick = odd1 - 1
row.append(players[0])
row.append(str(prob1) + "%")
row.append(str(value1))
if sol[index] == -1:
result = "W"
else:
result = "L"
else:
units_pick = odd2 - 1
row.append(players[1])
row.append(str(prob2) + "%")
row.append(str(value2))
if sol[index] == -1:
result = "L"
else:
result = "W"
if 1.50 < float(row[8]) <= 2.00 and 0.50 <= float(units_pick) < 2.00:
stake += 1
if result == "W":
hits += 1
row.append(Color('{autogreen}W{/autogreen}'))
row.append("+" + str(round(units_pick, 2)))
units += round(units_pick, 2)
# Odds intervals
if float(units_pick) < 0.50:
intervals['<1.50']['units'] += round(units_pick, 2)
intervals['<1.50']['stake'] += 1
intervals['<1.50']['yield'] = round(intervals['<1.50']['units'] * 100 / intervals['<1.50']['stake'], 2)
if float(row[8]) < 1.01:
odd_value_intervals['Odd <1.50 / Value <1.01']['units'] += round(units_pick, 2)
odd_value_intervals['Odd <1.50 / Value <1.01']['stake'] += 1
odd_value_intervals['Odd <1.50 / Value <1.01']['yield'] = round(odd_value_intervals['Odd <1.50 / Value <1.01']['units'] * 100 / odd_value_intervals['Odd <1.50 / Value <1.01']['stake'], 2)
elif float(row[8]) < 1.51:
odd_value_intervals['Odd <1.50 / Value 1.01-1.50']['units'] += round(units_pick, 2)
odd_value_intervals['Odd <1.50 / Value 1.01-1.50']['stake'] += 1
odd_value_intervals['Odd <1.50 / Value 1.01-1.50']['yield'] = round(odd_value_intervals['Odd <1.50 / Value 1.01-1.50']['units'] * 100 / odd_value_intervals['Odd <1.50 / Value 1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
odd_value_intervals['Odd <1.50 / Value 1.51-2.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd <1.50 / Value 1.51-2.00']['stake'] += 1
odd_value_intervals['Odd <1.50 / Value 1.51-2.00']['yield'] = round(odd_value_intervals['Odd <1.50 / Value 1.51-2.00']['units'] * 100 / odd_value_intervals['Odd <1.50 / Value 1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
odd_value_intervals['Odd <1.50 / Value 2.01-2.50']['units'] += round(units_pick, 2)
odd_value_intervals['Odd <1.50 / Value 2.01-2.50']['stake'] += 1
odd_value_intervals['Odd <1.50 / Value 2.01-2.50']['yield'] = round(odd_value_intervals['Odd <1.50 / Value 2.01-2.50']['units'] * 100 / odd_value_intervals['Odd <1.50 / Value 2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
odd_value_intervals['Odd <1.50 / Value 2.51-3.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd <1.50 / Value 2.51-3.00']['stake'] += 1
odd_value_intervals['Odd <1.50 / Value 2.51-3.00']['yield'] = round(odd_value_intervals['Odd <1.50 / Value 2.51-3.00']['units'] * 100 / odd_value_intervals['Odd <1.50 / Value 2.51-3.00']['stake'], 2)
else:
odd_value_intervals['Odd <1.50 / Value >3.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd <1.50 / Value >3.00']['stake'] += 1
odd_value_intervals['Odd <1.50 / Value >3.00']['yield'] = round(odd_value_intervals['Odd <1.50 / Value >3.00']['units'] * 100 / odd_value_intervals['Odd <1.50 / Value >3.00']['stake'], 2)
elif float(units_pick) < 1.00:
intervals['1.50-1.99']['units'] += round(units_pick, 2)
intervals['1.50-1.99']['stake'] += 1
intervals['1.50-1.99']['yield'] = round(intervals['1.50-1.99']['units'] * 100 / intervals['1.50-1.99']['stake'], 2)
if float(row[8]) < 1.01:
odd_value_intervals['Odd 1.50-1.99 / Value <1.01']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 1.50-1.99 / Value <1.01']['stake'] += 1
odd_value_intervals['Odd 1.50-1.99 / Value <1.01']['yield'] = round(odd_value_intervals['Odd 1.50-1.99 / Value <1.01']['units'] * 100 / odd_value_intervals['Odd 1.50-1.99 / Value <1.01']['stake'], 2)
elif float(row[8]) < 1.51:
odd_value_intervals['Odd 1.50-1.99 / Value 1.01-1.50']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 1.50-1.99 / Value 1.01-1.50']['stake'] += 1
odd_value_intervals['Odd 1.50-1.99 / Value 1.01-1.50']['yield'] = round(odd_value_intervals['Odd 1.50-1.99 / Value 1.01-1.50']['units'] * 100 / odd_value_intervals['Odd 1.50-1.99 / Value 1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
odd_value_intervals['Odd 1.50-1.99 / Value 1.51-2.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 1.50-1.99 / Value 1.51-2.00']['stake'] += 1
odd_value_intervals['Odd 1.50-1.99 / Value 1.51-2.00']['yield'] = round(odd_value_intervals['Odd 1.50-1.99 / Value 1.51-2.00']['units'] * 100 / odd_value_intervals['Odd 1.50-1.99 / Value 1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
odd_value_intervals['Odd 1.50-1.99 / Value 2.01-2.50']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 1.50-1.99 / Value 2.01-2.50']['stake'] += 1
odd_value_intervals['Odd 1.50-1.99 / Value 2.01-2.50']['yield'] = round(odd_value_intervals['Odd 1.50-1.99 / Value 2.01-2.50']['units'] * 100 / odd_value_intervals['Odd 1.50-1.99 / Value 2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
odd_value_intervals['Odd 1.50-1.99 / Value 2.51-3.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 1.50-1.99 / Value 2.51-3.00']['stake'] += 1
odd_value_intervals['Odd 1.50-1.99 / Value 2.51-3.00']['yield'] = round(odd_value_intervals['Odd 1.50-1.99 / Value 2.51-3.00']['units'] * 100 / odd_value_intervals['Odd 1.50-1.99 / Value 2.51-3.00']['stake'], 2)
else:
odd_value_intervals['Odd 1.50-1.99 / Value >3.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 1.50-1.99 / Value >3.00']['stake'] += 1
odd_value_intervals['Odd 1.50-1.99 / Value >3.00']['yield'] = round(odd_value_intervals['Odd 1.50-1.99 / Value >3.00']['units'] * 100 / odd_value_intervals['Odd 1.50-1.99 / Value >3.00']['stake'], 2)
elif float(units_pick) < 1.50:
intervals['2.00-2.49']['units'] += round(units_pick, 2)
intervals['2.00-2.49']['stake'] += 1
intervals['2.00-2.49']['yield'] = round(intervals['2.00-2.49']['units'] * 100 / intervals['2.00-2.49']['stake'], 2)
if float(row[8]) < 1.01:
odd_value_intervals['Odd 2.00-2.49 / Value <1.01']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 2.00-2.49 / Value <1.01']['stake'] += 1
odd_value_intervals['Odd 2.00-2.49 / Value <1.01']['yield'] = round(odd_value_intervals['Odd 2.00-2.49 / Value <1.01']['units'] * 100 / odd_value_intervals['Odd 2.00-2.49 / Value <1.01']['stake'], 2)
elif float(row[8]) < 1.51:
odd_value_intervals['Odd 2.00-2.49 / Value 1.01-1.50']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 2.00-2.49 / Value 1.01-1.50']['stake'] += 1
odd_value_intervals['Odd 2.00-2.49 / Value 1.01-1.50']['yield'] = round(odd_value_intervals['Odd 2.00-2.49 / Value 1.01-1.50']['units'] * 100 / odd_value_intervals['Odd 2.00-2.49 / Value 1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
odd_value_intervals['Odd 2.00-2.49 / Value 1.51-2.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 2.00-2.49 / Value 1.51-2.00']['stake'] += 1
odd_value_intervals['Odd 2.00-2.49 / Value 1.51-2.00']['yield'] = round(odd_value_intervals['Odd 2.00-2.49 / Value 1.51-2.00']['units'] * 100 / odd_value_intervals['Odd 2.00-2.49 / Value 1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
odd_value_intervals['Odd 2.00-2.49 / Value 2.01-2.50']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 2.00-2.49 / Value 2.01-2.50']['stake'] += 1
odd_value_intervals['Odd 2.00-2.49 / Value 2.01-2.50']['yield'] = round(odd_value_intervals['Odd 2.00-2.49 / Value 2.01-2.50']['units'] * 100 / odd_value_intervals['Odd 2.00-2.49 / Value 2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
odd_value_intervals['Odd 2.00-2.49 / Value 2.51-3.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 2.00-2.49 / Value 2.51-3.00']['stake'] += 1
odd_value_intervals['Odd 2.00-2.49 / Value 2.51-3.00']['yield'] = round(odd_value_intervals['Odd 2.00-2.49 / Value 2.51-3.00']['units'] * 100 / odd_value_intervals['Odd 2.00-2.49 / Value 2.51-3.00']['stake'], 2)
else:
odd_value_intervals['Odd 2.00-2.49 / Value >3.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 2.00-2.49 / Value >3.00']['stake'] += 1
odd_value_intervals['Odd 2.00-2.49 / Value >3.00']['yield'] = round(odd_value_intervals['Odd 2.00-2.49 / Value >3.00']['units'] * 100 / odd_value_intervals['Odd 2.00-2.49 / Value >3.00']['stake'], 2)
elif float(units_pick) < 2.00:
intervals['2.50-2.99']['units'] += round(units_pick, 2)
intervals['2.50-2.99']['stake'] += 1
intervals['2.50-2.99']['yield'] = round(intervals['2.50-2.99']['units'] * 100 / intervals['2.50-2.99']['stake'], 2)
if float(row[8]) < 1.01:
odd_value_intervals['Odd 2.50-2.99 / Value <1.01']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 2.50-2.99 / Value <1.01']['stake'] += 1
odd_value_intervals['Odd 2.50-2.99 / Value <1.01']['yield'] = round(odd_value_intervals['Odd 2.50-2.99 / Value <1.01']['units'] * 100 / odd_value_intervals['Odd 2.50-2.99 / Value <1.01']['stake'], 2)
elif float(row[8]) < 1.51:
odd_value_intervals['Odd 2.50-2.99 / Value 1.01-1.50']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 2.50-2.99 / Value 1.01-1.50']['stake'] += 1
odd_value_intervals['Odd 2.50-2.99 / Value 1.01-1.50']['yield'] = round(odd_value_intervals['Odd 2.50-2.99 / Value 1.01-1.50']['units'] * 100 / odd_value_intervals['Odd 2.50-2.99 / Value 1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
odd_value_intervals['Odd 2.50-2.99 / Value 1.51-2.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 2.50-2.99 / Value 1.51-2.00']['stake'] += 1
odd_value_intervals['Odd 2.50-2.99 / Value 1.51-2.00']['yield'] = round(odd_value_intervals['Odd 2.50-2.99 / Value 1.51-2.00']['units'] * 100 / odd_value_intervals['Odd 2.50-2.99 / Value 1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
odd_value_intervals['Odd 2.50-2.99 / Value 2.01-2.50']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 2.50-2.99 / Value 2.01-2.50']['stake'] += 1
odd_value_intervals['Odd 2.50-2.99 / Value 2.01-2.50']['yield'] = round(odd_value_intervals['Odd 2.50-2.99 / Value 2.01-2.50']['units'] * 100 / odd_value_intervals['Odd 2.50-2.99 / Value 2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
odd_value_intervals['Odd 2.50-2.99 / Value 2.51-3.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 2.50-2.99 / Value 2.51-3.00']['stake'] += 1
odd_value_intervals['Odd 2.50-2.99 / Value 2.51-3.00']['yield'] = round(odd_value_intervals['Odd 2.50-2.99 / Value 2.51-3.00']['units'] * 100 / odd_value_intervals['Odd 2.50-2.99 / Value 2.51-3.00']['stake'], 2)
else:
odd_value_intervals['Odd 2.50-2.99 / Value >3.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd 2.50-2.99 / Value >3.00']['stake'] += 1
odd_value_intervals['Odd 2.50-2.99 / Value >3.00']['yield'] = round(odd_value_intervals['Odd 2.50-2.99 / Value >3.00']['units'] * 100 / odd_value_intervals['Odd 2.50-2.99 / Value >3.00']['stake'], 2)
else:
intervals['>2.99']['units'] += round(units_pick, 2)
intervals['>2.99']['stake'] += 1
intervals['>2.99']['yield'] = round(intervals['>2.99']['units'] * 100 / intervals['>2.99']['stake'], 2)
if float(row[8]) < 1.01:
odd_value_intervals['Odd >2.99 / Value <1.01']['units'] += round(units_pick, 2)
odd_value_intervals['Odd >2.99 / Value <1.01']['stake'] += 1
odd_value_intervals['Odd >2.99 / Value <1.01']['yield'] = round(odd_value_intervals['Odd >2.99 / Value <1.01']['units'] * 100 / odd_value_intervals['Odd >2.99 / Value <1.01']['stake'], 2)
elif float(row[8]) < 1.51:
odd_value_intervals['Odd >2.99 / Value 1.01-1.50']['units'] += round(units_pick, 2)
odd_value_intervals['Odd >2.99 / Value 1.01-1.50']['stake'] += 1
odd_value_intervals['Odd >2.99 / Value 1.01-1.50']['yield'] = round(odd_value_intervals['Odd >2.99 / Value 1.01-1.50']['units'] * 100 / odd_value_intervals['Odd >2.99 / Value 1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
odd_value_intervals['Odd >2.99 / Value 1.51-2.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd >2.99 / Value 1.51-2.00']['stake'] += 1
odd_value_intervals['Odd >2.99 / Value 1.51-2.00']['yield'] = round(odd_value_intervals['Odd >2.99 / Value 1.51-2.00']['units'] * 100 / odd_value_intervals['Odd >2.99 / Value 1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
odd_value_intervals['Odd >2.99 / Value 2.01-2.50']['units'] += round(units_pick, 2)
odd_value_intervals['Odd >2.99 / Value 2.01-2.50']['stake'] += 1
odd_value_intervals['Odd >2.99 / Value 2.01-2.50']['yield'] = round(odd_value_intervals['Odd >2.99 / Value 2.01-2.50']['units'] * 100 / odd_value_intervals['Odd >2.99 / Value 2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
odd_value_intervals['Odd >2.99 / Value 2.51-3.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd >2.99 / Value 2.51-3.00']['stake'] += 1
odd_value_intervals['Odd >2.99 / Value 2.51-3.00']['yield'] = round(odd_value_intervals['Odd >2.99 / Value 2.51-3.00']['units'] * 100 / odd_value_intervals['Odd >2.99 / Value 2.51-3.00']['stake'], 2)
else:
odd_value_intervals['Odd >2.99 / Value >3.00']['units'] += round(units_pick, 2)
odd_value_intervals['Odd >2.99 / Value >3.00']['stake'] += 1
odd_value_intervals['Odd >2.99 / Value >3.00']['yield'] = round(odd_value_intervals['Odd >2.99 / Value >3.00']['units'] * 100 / odd_value_intervals['Odd >2.99 / Value >3.00']['stake'], 2)
# Value intervals
if float(row[8]) < 1.01:
value_intervals['<1.01']['units'] += round(units_pick, 2)
value_intervals['<1.01']['stake'] += 1
value_intervals['<1.01']['yield'] = round(value_intervals['<1.01']['units'] * 100 / value_intervals['<1.01']['stake'], 2)
elif float(row[8]) < 1.51:
value_intervals['1.01-1.50']['units'] += round(units_pick, 2)
value_intervals['1.01-1.50']['stake'] += 1
value_intervals['1.01-1.50']['yield'] = round(value_intervals['1.01-1.50']['units'] * 100 / value_intervals['1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
value_intervals['1.51-2.00']['units'] += round(units_pick, 2)
value_intervals['1.51-2.00']['stake'] += 1
value_intervals['1.51-2.00']['yield'] = round(value_intervals['1.51-2.00']['units'] * 100 / value_intervals['1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
value_intervals['2.01-2.50']['units'] += round(units_pick, 2)
value_intervals['2.01-2.50']['stake'] += 1
value_intervals['2.01-2.50']['yield'] = round(value_intervals['2.01-2.50']['units'] * 100 / value_intervals['2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
value_intervals['2.51-3.00']['units'] += round(units_pick, 2)
value_intervals['2.51-3.00']['stake'] += 1
value_intervals['2.51-3.00']['yield'] = round(value_intervals['2.51-3.00']['units'] * 100 / value_intervals['2.51-3.00']['stake'], 2)
else:
value_intervals['>3.00']['units'] += round(units_pick, 2)
value_intervals['>3.00']['stake'] += 1
value_intervals['>3.00']['yield'] = round(value_intervals['>3.00']['units'] * 100 / value_intervals['>3.00']['stake'], 2)
else:
row.append(Color('{autored}L{/autored}'))
row.append(-1)
units -= 1
# Odds intervals
if float(units_pick) < 0.50:
intervals['<1.50']['units'] -= 1
intervals['<1.50']['stake'] += 1
intervals['<1.50']['yield'] = round(intervals['<1.50']['units'] * 100 / intervals['<1.50']['stake'], 2)
if float(row[8]) < 1.01:
odd_value_intervals['Odd <1.50 / Value <1.01']['units'] -= 1
odd_value_intervals['Odd <1.50 / Value <1.01']['stake'] += 1
odd_value_intervals['Odd <1.50 / Value <1.01']['yield'] = round(odd_value_intervals['Odd <1.50 / Value <1.01']['units'] * 100 / odd_value_intervals['Odd <1.50 / Value <1.01']['stake'], 2)
elif float(row[8]) < 1.51:
odd_value_intervals['Odd <1.50 / Value 1.01-1.50']['units'] -= 1
odd_value_intervals['Odd <1.50 / Value 1.01-1.50']['stake'] += 1
odd_value_intervals['Odd <1.50 / Value 1.01-1.50']['yield'] = round(odd_value_intervals['Odd <1.50 / Value 1.01-1.50']['units'] * 100 / odd_value_intervals['Odd <1.50 / Value 1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
odd_value_intervals['Odd <1.50 / Value 1.51-2.00']['units'] -= 1
odd_value_intervals['Odd <1.50 / Value 1.51-2.00']['stake'] += 1
odd_value_intervals['Odd <1.50 / Value 1.51-2.00']['yield'] = round(odd_value_intervals['Odd <1.50 / Value 1.51-2.00']['units'] * 100 / odd_value_intervals['Odd <1.50 / Value 1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
odd_value_intervals['Odd <1.50 / Value 2.01-2.50']['units'] -= 1
odd_value_intervals['Odd <1.50 / Value 2.01-2.50']['stake'] += 1
odd_value_intervals['Odd <1.50 / Value 2.01-2.50']['yield'] = round(odd_value_intervals['Odd <1.50 / Value 2.01-2.50']['units'] * 100 / odd_value_intervals['Odd <1.50 / Value 2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
odd_value_intervals['Odd <1.50 / Value 2.51-3.00']['units'] -= 1
odd_value_intervals['Odd <1.50 / Value 2.51-3.00']['stake'] += 1
odd_value_intervals['Odd <1.50 / Value 2.51-3.00']['yield'] = round(odd_value_intervals['Odd <1.50 / Value 2.51-3.00']['units'] * 100 / odd_value_intervals['Odd <1.50 / Value 2.51-3.00']['stake'], 2)
else:
odd_value_intervals['Odd <1.50 / Value >3.00']['units'] -= 1
odd_value_intervals['Odd <1.50 / Value >3.00']['stake'] += 1
odd_value_intervals['Odd <1.50 / Value >3.00']['yield'] = round(odd_value_intervals['Odd <1.50 / Value >3.00']['units'] * 100 / odd_value_intervals['Odd <1.50 / Value >3.00']['stake'], 2)
elif float(units_pick) < 1.00:
intervals['1.50-1.99']['units'] -= 1
intervals['1.50-1.99']['stake'] += 1
intervals['1.50-1.99']['yield'] = round(intervals['1.50-1.99']['units'] * 100 / intervals['1.50-1.99']['stake'], 2)
if float(row[8]) < 1.01:
odd_value_intervals['Odd 1.50-1.99 / Value <1.01']['units'] -= 1
odd_value_intervals['Odd 1.50-1.99 / Value <1.01']['stake'] += 1
odd_value_intervals['Odd 1.50-1.99 / Value <1.01']['yield'] = round(odd_value_intervals['Odd 1.50-1.99 / Value <1.01']['units'] * 100 / odd_value_intervals['Odd 1.50-1.99 / Value <1.01']['stake'], 2)
elif float(row[8]) < 1.51:
odd_value_intervals['Odd 1.50-1.99 / Value 1.01-1.50']['units'] -= 1
odd_value_intervals['Odd 1.50-1.99 / Value 1.01-1.50']['stake'] += 1
odd_value_intervals['Odd 1.50-1.99 / Value 1.01-1.50']['yield'] = round(odd_value_intervals['Odd 1.50-1.99 / Value 1.01-1.50']['units'] * 100 / odd_value_intervals['Odd 1.50-1.99 / Value 1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
odd_value_intervals['Odd 1.50-1.99 / Value 1.51-2.00']['units'] -= 1
odd_value_intervals['Odd 1.50-1.99 / Value 1.51-2.00']['stake'] += 1
odd_value_intervals['Odd 1.50-1.99 / Value 1.51-2.00']['yield'] = round(odd_value_intervals['Odd 1.50-1.99 / Value 1.51-2.00']['units'] * 100 / odd_value_intervals['Odd 1.50-1.99 / Value 1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
odd_value_intervals['Odd 1.50-1.99 / Value 2.01-2.50']['units'] -= 1
odd_value_intervals['Odd 1.50-1.99 / Value 2.01-2.50']['stake'] += 1
odd_value_intervals['Odd 1.50-1.99 / Value 2.01-2.50']['yield'] = round(odd_value_intervals['Odd 1.50-1.99 / Value 2.01-2.50']['units'] * 100 / odd_value_intervals['Odd 1.50-1.99 / Value 2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
odd_value_intervals['Odd 1.50-1.99 / Value 2.51-3.00']['units'] -= 1
odd_value_intervals['Odd 1.50-1.99 / Value 2.51-3.00']['stake'] += 1
odd_value_intervals['Odd 1.50-1.99 / Value 2.51-3.00']['yield'] = round(odd_value_intervals['Odd 1.50-1.99 / Value 2.51-3.00']['units'] * 100 / odd_value_intervals['Odd 1.50-1.99 / Value 2.51-3.00']['stake'], 2)
else:
odd_value_intervals['Odd 1.50-1.99 / Value >3.00']['units'] -= 1
odd_value_intervals['Odd 1.50-1.99 / Value >3.00']['stake'] += 1
odd_value_intervals['Odd 1.50-1.99 / Value >3.00']['yield'] = round(odd_value_intervals['Odd 1.50-1.99 / Value >3.00']['units'] * 100 / odd_value_intervals['Odd 1.50-1.99 / Value >3.00']['stake'], 2)
elif float(units_pick) < 1.50:
intervals['2.00-2.49']['units'] -= 1
intervals['2.00-2.49']['stake'] += 1
intervals['2.00-2.49']['yield'] = round(intervals['2.00-2.49']['units'] * 100 / intervals['2.00-2.49']['stake'], 2)
if float(row[8]) < 1.01:
odd_value_intervals['Odd 2.00-2.49 / Value <1.01']['units'] -= 1
odd_value_intervals['Odd 2.00-2.49 / Value <1.01']['stake'] += 1
odd_value_intervals['Odd 2.00-2.49 / Value <1.01']['yield'] = round(odd_value_intervals['Odd 2.00-2.49 / Value <1.01']['units'] * 100 / odd_value_intervals['Odd 2.00-2.49 / Value <1.01']['stake'], 2)
elif float(row[8]) < 1.51:
odd_value_intervals['Odd 2.00-2.49 / Value 1.01-1.50']['units'] -= 1
odd_value_intervals['Odd 2.00-2.49 / Value 1.01-1.50']['stake'] += 1
odd_value_intervals['Odd 2.00-2.49 / Value 1.01-1.50']['yield'] = round(odd_value_intervals['Odd 2.00-2.49 / Value 1.01-1.50']['units'] * 100 / odd_value_intervals['Odd 2.00-2.49 / Value 1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
odd_value_intervals['Odd 2.00-2.49 / Value 1.51-2.00']['units'] -= 1
odd_value_intervals['Odd 2.00-2.49 / Value 1.51-2.00']['stake'] += 1
odd_value_intervals['Odd 2.00-2.49 / Value 1.51-2.00']['yield'] = round(odd_value_intervals['Odd 2.00-2.49 / Value 1.51-2.00']['units'] * 100 / odd_value_intervals['Odd 2.00-2.49 / Value 1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
odd_value_intervals['Odd 2.00-2.49 / Value 2.01-2.50']['units'] -= 1
odd_value_intervals['Odd 2.00-2.49 / Value 2.01-2.50']['stake'] += 1
odd_value_intervals['Odd 2.00-2.49 / Value 2.01-2.50']['yield'] = round(odd_value_intervals['Odd 2.00-2.49 / Value 2.01-2.50']['units'] * 100 / odd_value_intervals['Odd 2.00-2.49 / Value 2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
odd_value_intervals['Odd 2.00-2.49 / Value 2.51-3.00']['units'] -= 1
odd_value_intervals['Odd 2.00-2.49 / Value 2.51-3.00']['stake'] += 1
odd_value_intervals['Odd 2.00-2.49 / Value 2.51-3.00']['yield'] = round(odd_value_intervals['Odd 2.00-2.49 / Value 2.51-3.00']['units'] * 100 / odd_value_intervals['Odd 2.00-2.49 / Value 2.51-3.00']['stake'], 2)
else:
odd_value_intervals['Odd 2.00-2.49 / Value >3.00']['units'] -= 1
odd_value_intervals['Odd 2.00-2.49 / Value >3.00']['stake'] += 1
odd_value_intervals['Odd 2.00-2.49 / Value >3.00']['yield'] = round(odd_value_intervals['Odd 2.00-2.49 / Value >3.00']['units'] * 100 / odd_value_intervals['Odd 2.00-2.49 / Value >3.00']['stake'], 2)
elif float(units_pick) < 2.00:
intervals['2.50-2.99']['units'] -= 1
intervals['2.50-2.99']['stake'] += 1
intervals['2.50-2.99']['yield'] = round(intervals['2.50-2.99']['units'] * 100 / intervals['2.50-2.99']['stake'], 2)
if float(row[8]) < 1.01:
odd_value_intervals['Odd 2.50-2.99 / Value <1.01']['units'] -= 1
odd_value_intervals['Odd 2.50-2.99 / Value <1.01']['stake'] += 1
odd_value_intervals['Odd 2.50-2.99 / Value <1.01']['yield'] = round(odd_value_intervals['Odd 2.50-2.99 / Value <1.01']['units'] * 100 / odd_value_intervals['Odd 2.50-2.99 / Value <1.01']['stake'], 2)
elif float(row[8]) < 1.51:
odd_value_intervals['Odd 2.50-2.99 / Value 1.01-1.50']['units'] -= 1
odd_value_intervals['Odd 2.50-2.99 / Value 1.01-1.50']['stake'] += 1
odd_value_intervals['Odd 2.50-2.99 / Value 1.01-1.50']['yield'] = round(odd_value_intervals['Odd 2.50-2.99 / Value 1.01-1.50']['units'] * 100 / odd_value_intervals['Odd 2.50-2.99 / Value 1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
odd_value_intervals['Odd 2.50-2.99 / Value 1.51-2.00']['units'] -= 1
odd_value_intervals['Odd 2.50-2.99 / Value 1.51-2.00']['stake'] += 1
odd_value_intervals['Odd 2.50-2.99 / Value 1.51-2.00']['yield'] = round(odd_value_intervals['Odd 2.50-2.99 / Value 1.51-2.00']['units'] * 100 / odd_value_intervals['Odd 2.50-2.99 / Value 1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
odd_value_intervals['Odd 2.50-2.99 / Value 2.01-2.50']['units'] -= 1
odd_value_intervals['Odd 2.50-2.99 / Value 2.01-2.50']['stake'] += 1
odd_value_intervals['Odd 2.50-2.99 / Value 2.01-2.50']['yield'] = round(odd_value_intervals['Odd 2.50-2.99 / Value 2.01-2.50']['units'] * 100 / odd_value_intervals['Odd 2.50-2.99 / Value 2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
odd_value_intervals['Odd 2.50-2.99 / Value 2.51-3.00']['units'] -= 1
odd_value_intervals['Odd 2.50-2.99 / Value 2.51-3.00']['stake'] += 1
odd_value_intervals['Odd 2.50-2.99 / Value 2.51-3.00']['yield'] = round(odd_value_intervals['Odd 2.50-2.99 / Value 2.51-3.00']['units'] * 100 / odd_value_intervals['Odd 2.50-2.99 / Value 2.51-3.00']['stake'], 2)
else:
odd_value_intervals['Odd 2.50-2.99 / Value >3.00']['units'] -= 1
odd_value_intervals['Odd 2.50-2.99 / Value >3.00']['stake'] += 1
odd_value_intervals['Odd 2.50-2.99 / Value >3.00']['yield'] = round(odd_value_intervals['Odd 2.50-2.99 / Value >3.00']['units'] * 100 / odd_value_intervals['Odd 2.50-2.99 / Value >3.00']['stake'], 2)
else:
intervals['>2.99']['units'] -= 1
intervals['>2.99']['stake'] += 1
intervals['>2.99']['yield'] = round(intervals['>2.99']['units'] * 100 / intervals['>2.99']['stake'], 2)
if float(row[8]) < 1.01:
odd_value_intervals['Odd >2.99 / Value <1.01']['units'] -= 1
odd_value_intervals['Odd >2.99 / Value <1.01']['stake'] += 1
odd_value_intervals['Odd >2.99 / Value <1.01']['yield'] = round(odd_value_intervals['Odd >2.99 / Value <1.01']['units'] * 100 / odd_value_intervals['Odd >2.99 / Value <1.01']['stake'], 2)
elif float(row[8]) < 1.51:
odd_value_intervals['Odd >2.99 / Value 1.01-1.50']['units'] -= 1
odd_value_intervals['Odd >2.99 / Value 1.01-1.50']['stake'] += 1
odd_value_intervals['Odd >2.99 / Value 1.01-1.50']['yield'] = round(odd_value_intervals['Odd >2.99 / Value 1.01-1.50']['units'] * 100 / odd_value_intervals['Odd >2.99 / Value 1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
odd_value_intervals['Odd >2.99 / Value 1.51-2.00']['units'] -= 1
odd_value_intervals['Odd >2.99 / Value 1.51-2.00']['stake'] += 1
odd_value_intervals['Odd >2.99 / Value 1.51-2.00']['yield'] = round(odd_value_intervals['Odd >2.99 / Value 1.51-2.00']['units'] * 100 / odd_value_intervals['Odd >2.99 / Value 1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
odd_value_intervals['Odd >2.99 / Value 2.01-2.50']['units'] -= 1
odd_value_intervals['Odd >2.99 / Value 2.01-2.50']['stake'] += 1
odd_value_intervals['Odd >2.99 / Value 2.01-2.50']['yield'] = round(odd_value_intervals['Odd >2.99 / Value 2.01-2.50']['units'] * 100 / odd_value_intervals['Odd >2.99 / Value 2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
odd_value_intervals['Odd >2.99 / Value 2.51-3.00']['units'] -= 1
odd_value_intervals['Odd >2.99 / Value 2.51-3.00']['stake'] += 1
odd_value_intervals['Odd >2.99 / Value 2.51-3.00']['yield'] = round(odd_value_intervals['Odd >2.99 / Value 2.51-3.00']['units'] * 100 / odd_value_intervals['Odd >2.99 / Value 2.51-3.00']['stake'], 2)
else:
odd_value_intervals['Odd >2.99 / Value >3.00']['units'] -= 1
odd_value_intervals['Odd >2.99 / Value >3.00']['stake'] += 1
odd_value_intervals['Odd >2.99 / Value >3.00']['yield'] = round(odd_value_intervals['Odd >2.99 / Value >3.00']['units'] * 100 / odd_value_intervals['Odd >2.99 / Value >3.00']['stake'], 2)
# Value intervals
if float(row[8]) < 1.01:
value_intervals['<1.01']['units'] -= 1
value_intervals['<1.01']['stake'] += 1
value_intervals['<1.01']['yield'] = round(value_intervals['<1.01']['units'] * 100 / value_intervals['<1.01']['stake'], 2)
elif float(row[8]) < 1.51:
value_intervals['1.01-1.50']['units'] -= 1
value_intervals['1.01-1.50']['stake'] += 1
value_intervals['1.01-1.50']['yield'] = round(value_intervals['1.01-1.50']['units'] * 100 / value_intervals['1.01-1.50']['stake'], 2)
elif float(row[8]) < 2.01:
value_intervals['1.51-2.00']['units'] -= 1
value_intervals['1.51-2.00']['stake'] += 1
value_intervals['1.51-2.00']['yield'] = round(value_intervals['1.51-2.00']['units'] * 100 / value_intervals['1.51-2.00']['stake'], 2)
elif float(row[8]) < 2.51:
value_intervals['2.01-2.50']['units'] -= 1
value_intervals['2.01-2.50']['stake'] += 1
value_intervals['2.01-2.50']['yield'] = round(value_intervals['2.01-2.50']['units'] * 100 / value_intervals['2.01-2.50']['stake'], 2)
elif float(row[8]) < 3.01:
value_intervals['2.51-3.00']['units'] -= 1
value_intervals['2.51-3.00']['stake'] += 1
value_intervals['2.51-3.00']['yield'] = round(value_intervals['2.51-3.00']['units'] * 100 / value_intervals['2.51-3.00']['stake'], 2)
else:
value_intervals['>3.00']['units'] -= 1
value_intervals['>3.00']['stake'] += 1
value_intervals['>3.00']['yield'] = round(value_intervals['>3.00']['units'] * 100 / value_intervals['>3.00']['stake'], 2)
row.append(coef[0])
prediction_table.append(row)
index += 1
table_instance = SingleTable(prediction_table, Color('{autocyan} Prediction with new games {/autocyan}'))
table_instance.inner_heading_row_border = False
table_instance.inner_row_border = True
table_instance.justify_columns = {0: 'left', 1: 'center', 2: 'center', 3: 'center', 4: 'center', 5: 'center', 6: 'left', 7: 'center', 8: 'center', 9: 'center', 10: 'center', 11: 'center'}
print("\n" + table_instance.table)
print("\n" + Color('{autogreen}Units: {/autogreen}') + str(round(units, 2)))
print(Color('{autogreen}Hits: {/autogreen}') + str(hits))
print(Color('{autogreen}Stake: {/autogreen}') + str(stake))
print(Color('{autogreen}Yield: {/autogreen}') + str(round(units * 100 / stake, 2)) + "%")
print(Color('{autogreen}Different probabilities: {/autogreen}') + str(len(different_probs)))
print(Color('{autogreen}Value intervals: {/autogreen}'))
pprint(value_intervals)
print(Color('{autogreen}Odd intervals: {/autogreen}'))
pprint(intervals)
print(Color('{autogreen}Odd/Value intervals: {/autogreen}'))
pprint(odd_value_intervals)
# Gràfica
plt.figure("The Beast Training")
plt.title("Test 22/04/2019")
index = 0
axes = []
while index < num_epochs:
axes.append(index)
index += (num_epochs / 200)
plt.plot(axes, values)
plt.plot(axes, values_predicted, color='r')
plt.ylim([0, 100])
plt.xlim([0, num_epochs])
plt.ylabel('Error')
plt.xlabel('Training Time')
plt.tight_layout()
plt.show()
# Mostrar pesos
#nn.print_weights()
| StarcoderdataPython |
11374582 | <reponame>sbkirby/imagehub-librarian
#!/usr/bin/env python3
"""
mqtt_client.py - detect objects in images and preform ALPR on car images via Plate Recognizer account at
https://platerecognizer.com/
Edit JSON config.json file in work_dir defined below
Date: October 1, 2020
By: <NAME>
"""
import sys
import os
import time
import paho.mqtt.client as mqtt
import json
import requests
import tools.detect_objects_cv2 as detect_objects
# client name
client_name = 'ObjDector'
transmit = True
current_status = True
# set DEBUG=True to print debug statements in Docker log file of openalpr container
DEBUG = True
# init client object
client = None
# define Working Directory
work_dir = os.path.dirname( __file__ )
# read data from config file. json only
def load_config():
with open(os.path.join(work_dir, 'config.json')) as json_data_file:
return json.load(json_data_file)
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, rc, *extra_params):
global DEBUG
if DEBUG:
print('Connected with result code ' + str(rc))
client.subscribe('image/#') # SUB to all image messages
# PUBLISH data to server
def publish_message(topic, objs):
global client
if transmit:
if topic == 'image/id_objects/count':
client.publish(topic, objs, 1)
if topic == 'image/alpr/results':
client.publish(topic, objs, 1)
time.sleep(0.01)
# Decode JSON data in payload
def decode_msg(msg):
m_decode = str(msg.payload.decode("utf-8", "ignore"))
return json.loads(m_decode)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global current_status, DEBUG
if DEBUG:
print("on_message payload: ", msg.payload)
if msg.topic == 'image/id_objects/get_objects':
if current_status:
head, filename = os.path.split(msg.payload) # separate filename from path
if os.path.isfile(msg.payload):
tmp = detect_objects.detectObject(msg.payload)
tmp["filename"] = filename.decode('utf-8') # filename key field
tmp["isfile"] = True # file exists
objs = json.dumps(tmp, separators=(',', ':'))
if DEBUG:
print('get_objects objs: ', objs)
publish_message('image/id_objects/count', objs)
else:
# if file doesn't exist store result in image database
tmp = {"filename": filename.decode('utf-8'), "isfile": False}
objs = json.dumps(tmp, separators=(',', ':'))
publish_message('image/id_objects/count', objs)
if msg.topic == 'image/alpr/get_license':
if current_status:
# loop thru list of two images to read for license
cnt = 0
payload = json.loads(msg.payload)
for img in payload['filename']:
cnt += 1 # counter for test - publish & quit after second image
# check to see file exist
if DEBUG:
print('get_license cnt: ', cnt)
if os.path.isfile(img):
# open file
with open(img, 'rb') as fp:
try:
# POST the image to Plate Recognizer website - 2500/month
response = requests.post(
'https://api.platerecognizer.com/v1/plate-reader/',
data=dict(regions=regions), # Optional
files=dict(upload=fp),
headers={'Authorization': 'Token ' + API_TOKEN})
except NewConnectionError as err:
print('get_license Error: Failed to establish a new connection')
time.sleep(1)
break
if DEBUG:
print('get_license response.json: ', response.json())
results_str = json.dumps(response.json(), separators=(',', ':'))
# if results aren't empty publish data or second image
if (response.json()["results"] != []) or (cnt == 2):
publish_message('image/alpr/results', results_str)
break
time.sleep(1)
if msg.topic == 'image/information':
msg_data = decode_msg(msg)
if DEBUG:
print('image/information: ', msg_data)
# Logging
def on_log(client, userdata, level, buf):
global DEBUG
if DEBUG:
print("on_log: ", buf)
# save the data, cleanup GPIO (if used) and exit
def clean_and_exit():
time.sleep(0.1)
sys.exit() # exit python to system
def main():
global client, API_TOKEN, regions
# wait 15 seconds for network to start
if DEBUG:
print("[INFO] Standby 10 seconds for things to start.")
time.sleep(10)
data = load_config()
# ALPR regions for Plate Recognizer
regions = data['ALPR_regions']
API_TOKEN = data['ALPR_API_TOKEN']
# mqtt connect
client = mqtt.Client(client_name)
client.on_connect = on_connect
client.on_message = on_message
client.on_log = on_log
client.username_pw_set(data['MQTT_USER'], password=data['<PASSWORD>'])
client.connect(data['OIP_HOST'], data['MQTT_PORT'], 60)
time.sleep(0.5)
try:
client.loop_forever()
except Exception as ex:
if DEBUG:
print('main(): Unanticipated error with no Exception handler.', ex)
client.disconnect()
clean_and_exit()
if __name__ == "__main__":
main()
| StarcoderdataPython |
4990286 | from speech_style import *
from kb import *
from task3 import get_restaurants, rank_restaurants
def modify_options(dialogs, kb, accept_prob=0.25, save='all'):
new_dialogs = []
for dialog in dialogs:
restaurants = get_restaurants(dialog)
specialities = set([kb[restaurant]['R_speciality'] for restaurant in restaurants])
temp_dialog_set = [[['veg ' + np.random.choice(list(specialities))]],
[['non-veg ' + np.random.choice(list(specialities))]]]
for turn in dialog:
if 'resto_' in turn[0] : break
for temp_dialog in temp_dialog_set:
temp_dialog.append(turn)
for restaurant in restaurants:
attrib_list = ['R_phone', 'R_cuisine', 'R_address', 'R_location', 'R_number', 'R_price', 'R_rating', 'R_type', 'R_speciality', 'R_social_media', 'R_parking', 'R_public_transport']
for temp_dialog in temp_dialog_set:
for attrib in attrib_list:
temp_dialog.append([restaurant + ' ' + attrib + ' ' + kb[restaurant][attrib]])
# for turn in dialog:
# if len(turn) == 2:
# for temp_dialog in temp_dialog_set:
# temp_dialog.append(turn)
# if turn[1] == 'ok let me look into some options for you' : break
for temp_dialog in temp_dialog_set:
utterences = {
'reject' : ["do you have something else",
"no i don't like that",
"no this does not work for me"],
'accept' : ["that looks great",
"i love that",
"let's do it",
"it's perfect"]
}
ranked_restaurants = rank_restaurants(restaurants, temp_dialog[0][0].split(' ')[0], temp_dialog[0][0].split(' ')[1], kb)
for restaurant in ranked_restaurants:
temp_dialog.append(['<SILENCE>', 'what do you think of this option: ' + restaurant])
if restaurant == ranked_restaurants[-1] : choice = 'accept'
else : choice = np.random.choice(['accept', 'reject'], p=[accept_prob, 1-accept_prob])
if choice == 'accept':
temp_dialog.append([np.random.choice(utterences['accept']), 'great let me do the reservation'])
break
else :
temp_dialog.append([np.random.choice(utterences['reject']), 'sure let me find an other option for you'])
for turn in dialog:
temp_list = ['here it is', 'is there anything i can help you with', "you're welcome"]
if len(turn) == 2:
for item in temp_list:
if item in turn[1]:
for temp_dialog in temp_dialog_set:
temp_dialog.append(turn)
if save=='random':
new_dialogs.append(temp_dialog_set[np.random.choice(len(temp_dialog_set))])
else:
for temp_dialog in temp_dialog_set:
new_dialogs.append(temp_dialog)
return new_dialogs
def modify_info(dialogs, kb, utterences):
new_dialogs = []
for dialog in dialogs:
profile = ['male young', 'female young', 'male middle-aged', 'female middle-aged', 'male elderly', 'female elderly'].index(' '.join(dialog[0][0].split(' ')[:2]))
restaurant = ''
for i, turn in enumerate(dialog):
if len(turn) == 2:
if utterences['what do you think of this option:'][profile] in turn[1] : restaurant = turn[1].split(': ')[1]
elif turn[1] == utterences['great let me do the reservation'][profile] : break
temp_dialog = dialog[:i]
for turn in dialog[i:]:
queries = {
'contact' : ['do you have its contact details',
'may i have the contact details of the restaurant',
'what are the contact details of the restaurant'],
'directions' : ['do you have direction information',
'may i have the direction information to the restaurant',
'can you provide direction to the restaurant']
}
if 'phone number' in turn[0]:
turn[0] = np.random.choice(queries['contact'])
if temp_dialog[0][0].split(' ')[1] == 'young':
turn[1] = utterences['here it is'][profile] + ' ' + kb[restaurant]['R_social_media']
else:
turn[1] = utterences['here it is'][profile] + ' ' + kb[restaurant]['R_phone']
if 'address' in turn[0]:
turn[0] = np.random.choice(queries['directions'])
if kb[restaurant]['R_price'] == 'cheap':
turn[1] = utterences['here it is'][profile] + ' ' + kb[restaurant]['R_address'] + ' ' + kb[restaurant]['R_public_transport']
else:
turn[1] = utterences['here it is'][profile] + ' ' + kb[restaurant]['R_address'] + ' ' + kb[restaurant]['R_parking']
temp_dialog.append(turn)
new_dialogs.append(temp_dialog)
return new_dialogs
if __name__ == '__main__':
utterences = load_utterences()
kb = read_kb('../data/personalized-dialog-dataset/personalized-dialog-kb-all.txt')
dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt')
new_dialogs = modify_options(dialogs, kb)
new_dialogs = modify_speech_style(new_dialogs, utterences, 'modified')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/personalized-dialog-task5-full-dialogs-dev.txt')
dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt')
new_dialogs = modify_options(dialogs, kb)
new_dialogs = modify_speech_style(new_dialogs, utterences, 'modified')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/personalized-dialog-task5-full-dialogs-trn.txt')
dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt')
new_dialogs = modify_options(dialogs, kb)
new_dialogs = modify_speech_style(new_dialogs, utterences, 'modified')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/personalized-dialog-task5-full-dialogs-tst.txt')
dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt')
new_dialogs = modify_options(dialogs, kb)
new_dialogs = modify_speech_style(new_dialogs, utterences, 'modified')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/personalized-dialog-task5-full-dialogs-tst-OOV.txt')
# small dialogs set
dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='random')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/small/personalized-dialog-task5-full-dialogs-dev.txt')
dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='random')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/small/personalized-dialog-task5-full-dialogs-trn.txt')
dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='random')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/small/personalized-dialog-task5-full-dialogs-tst.txt')
dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='random')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/small/personalized-dialog-task5-full-dialogs-tst-OOV.txt')
# Split dialog sets
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='0')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_young/personalized-dialog-task5-full-dialogs-dev.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='0')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_young/personalized-dialog-task5-full-dialogs-trn.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='0')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_young/personalized-dialog-task5-full-dialogs-tst.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='0')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_young/personalized-dialog-task5-full-dialogs-tst-OOV.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='1')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_young/personalized-dialog-task5-full-dialogs-dev.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='1')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_young/personalized-dialog-task5-full-dialogs-trn.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='1')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_young/personalized-dialog-task5-full-dialogs-tst.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='1')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_young/personalized-dialog-task5-full-dialogs-tst-OOV.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='2')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_middle-aged/personalized-dialog-task5-full-dialogs-dev.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='2')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_middle-aged/personalized-dialog-task5-full-dialogs-trn.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='2')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_middle-aged/personalized-dialog-task5-full-dialogs-tst.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='2')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_middle-aged/personalized-dialog-task5-full-dialogs-tst-OOV.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='3')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_middle-aged/personalized-dialog-task5-full-dialogs-dev.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='3')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_middle-aged/personalized-dialog-task5-full-dialogs-trn.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='3')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_middle-aged/personalized-dialog-task5-full-dialogs-tst.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='3')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_middle-aged/personalized-dialog-task5-full-dialogs-tst-OOV.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='4')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_elderly/personalized-dialog-task5-full-dialogs-dev.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='4')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_elderly/personalized-dialog-task5-full-dialogs-trn.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='4')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_elderly/personalized-dialog-task5-full-dialogs-tst.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='4')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/male_elderly/personalized-dialog-task5-full-dialogs-tst-OOV.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-dev.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='5')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_elderly/personalized-dialog-task5-full-dialogs-dev.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-trn.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='5')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_elderly/personalized-dialog-task5-full-dialogs-trn.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='5')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_elderly/personalized-dialog-task5-full-dialogs-tst.txt')
dialogs = dialogs = read_babi('../data/dialog-bAbI-tasks/dialog-babi-task5-full-dialogs-tst-OOV.txt')
new_dialogs = modify_options(dialogs, kb, save='random')
new_dialogs = modify_speech_style(new_dialogs, utterences, setting='modified', save='5')
new_dialogs = modify_info(new_dialogs, kb, utterences)
save_babi(new_dialogs, '../data/personalized-dialog-dataset/split-by-profile/female_elderly/personalized-dialog-task5-full-dialogs-tst-OOV.txt')
| StarcoderdataPython |
8136655 | <reponame>komiya-atsushi/node-marisa-trie
{
'targets': [
{
'target_name': 'libmarisa',
'product_prefix': 'lib',
'type': 'static_library',
'sources': [
'libmarisa/marisa/agent.cc',
'libmarisa/marisa/grimoire/vector/bit-vector.cc',
'libmarisa/marisa/keyset.cc',
'libmarisa/marisa/grimoire/trie/louds-trie.cc',
'libmarisa/marisa/grimoire/io/mapper.cc',
'libmarisa/marisa/grimoire/io/reader.cc',
'libmarisa/marisa/grimoire/trie/tail.cc',
'libmarisa/marisa/trie.cc',
'libmarisa/marisa/grimoire/io/writer.cc',
],
'include_dirs': [
'libmarisa',
],
'conditions': [
['OS == "linux"', {
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ],
}],
],
},
],
}
| StarcoderdataPython |
5054838 | <filename>scripts/Augmentpp.py
import pandas as pd
import requests as rq
import os
from time import sleep
import math
baseURL = "https://maps.googleapis.com/maps/api/streetview?"
size = "size=640x480&"
location = "location="
keyfrag = "&key="
imgDir = "/datasets/sagarj/streetView/Translational_city_test/"
logFile = "dlLog.log"
saveFile = "../streetview/TranslateTest.csv"
# headingPitchCandidates = ["&fov=90&heading=0&pitch=-20&","&fov=90&heading=0&pitch=20&",
# "&fov=90&heading=-30&pitch=0&","&fov=90&heading=-30&pitch=-20&","&fov=90&heading=-30&pitch=20&",
# "&fov=90&heading=-60&pitch=0&","&fov=90&heading=-60&pitch=-20&","&fov=90&heading=-60&pitch=20&",
# "&fov=90&heading=30&pitch=0&","&fov=90&heading=30&pitch=-20&","&fov=90&heading=30&pitch=20&",
# "&fov=90&heading=60&pitch=0&","&fov=90&heading=60&pitch=-20&","&fov=90&heading=60&pitch=20&",]
# headingCandidates = ["&fov=90&heading=15&pitch=0&" , "&fov=90&heading=-15&pitch=0&"
# "&fov=90&heading=-30&pitch=0&", "&fov=90&heading=30&pitch=0&",
# "&fov=90&heading=-60&pitch=0&", "&fov=90&heading=60&pitch=0&",
# "&fov=90&heading=90&pitch=0&", "&fov=90&heading=-90&pitch=0&",
# "&fov=90&heading=120&pitch=0&", "&fov=90&heading=-120&pitch=0&",]
#headingCandidates = ["&fov=90&heading=0&pitch=0&" , "&fov=90&heading=-15&pitch=0&", "&fov=90&heading=15&pitch=0&" ]
headingCandidates = ["&fov=90&heading=0&pitch=0&"]
offsetMeters = [0 , 20 , 40 , 60 ]
#offsetMeters = [0]
def getOffsetLatLong(lat,lon,meters):
#offsets in meters
dn = meters
de = meters
#radius of earth in meters
R=6378137.0
#Coordinate offsets in radians
dLat = dn/R
dLon = de/(R*math.cos(math.pi*lat/180))
#OffsetPosition, decimal degrees
latO1 = lat + dLat * (180.0/math.pi)
latO2 = lat - dLat * (180.0/math.pi)
lonO1 = lon + dLon * (180.0/math.pi)
lonO2 = lon - dLon * (180.0/math.pi)
candidates = [ (latO1 , lonO1), (latO2 , lonO2) ]
print str(lat) + str(lon)
print candidates
return candidates
def getDownloadedList():
idList = [ name for name in os.listdir(imgDir) if os.path.isdir(os.path.join(imgDir, name)) ]
return idList
def getKey(path):
with open(path , 'rb') as f:
key = f.readline()
return key
if __name__ == "__main__":
df = pd.read_csv("../streetview/easternCities.csv")
#df = dfO[10:20]
if os.path.exists(saveFile):
saveDf = pd.read_csv(saveFile)
else:
saveDf = pd.DataFrame({'key':0 , 'disp':0 , 'path':[""]})
history_files = getDownloadedList()
keys = ['api.key' , 'api2.key', 'api3.key']
key = getKey(keys[1])
print "using key " + key
crawled = 0
sl = 1
idCrawled = 0
for index , row in df.iterrows():
ID = row['left_id']
if crawled > 24500:
print "Nearing rate limit, change key"
saveDf.to_csv(saveFile)
exit(0)
if ID not in history_files:
lat = row['left_lat']
lon = row['left_long']
augmentDir = imgDir + "/" + ID
if not os.path.exists(augmentDir):
os.makedirs(augmentDir)
offsetCandidates = []
for d in offsetMeters:
offsetCandidates = offsetCandidates + getOffsetLatLong(lat,lon,d)
candidates = headingCandidates
for j in range(len(offsetCandidates)):
displacement = j/2
imagePaths = []
for i in range(len(candidates)):
#displacement = i/2
imgName = augmentDir + "/" + ID + "_" + str(j) + "_" + str(i) + '_' + str(displacement) + ".jpg"
imagePaths.append(imgName)
#imgLoc = str(lat) + ',' + str(lon)
imgLoc = str(offsetCandidates[j][0]) + ',' + str(offsetCandidates[j][1])
url = baseURL + size + location + imgLoc + candidates[i] + keyfrag + key
print url
r = rq.get(url)
if r.status_code == 200:
with open(imgName, 'wb') as f:
f.write(r.content)
print " Downloaded," + ID
history_files.append(ID)
sleep(sl)
crawled+=1
else:
with open(logFile, 'a') as f:
line = "failed," + ID + ',' + str(r.status_code) + "\n"
f.write(line)
print "Failed ," + ID
d = {'key':ID , 'disp':displacement , 'path':imagePaths}
df = pd.DataFrame(data=d)
saveDf = saveDf.append(df)
idCrawled+=1
if idCrawled >= 3000:
print "Done crawling test set"
saveDf.to_csv(saveFile)
exit(0)
else:
print "ID already cralwed!! "
print "Done crawling IDs"
| StarcoderdataPython |
1816923 | labels_cityscapes = [
# name id trainId category catId hasInstances ignoreInEval color
( 'unlabeled' , 0 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
( 'ego vehicle' , 1 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
( 'rectification border' , 2 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
( 'out of roi' , 3 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
( 'static' , 4 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
( 'dynamic' , 5 , 255 , 'void' , 0 , False , True , (111, 74, 0) ),
( 'ground' , 6 , 255 , 'void' , 0 , False , True , ( 81, 0, 81) ),
( 'road' , 7 , 0 , 'flat' , 1 , False , False , (128, 64,128) ),
( 'sidewalk' , 8 , 1 , 'flat' , 1 , False , False , (244, 35,232) ),
( 'parking' , 9 , 255 , 'flat' , 1 , False , True , (250,170,160) ),
( 'rail track' , 10 , 255 , 'flat' , 1 , False , True , (230,150,140) ),
( 'building' , 11 , 2 , 'construction' , 2 , False , False , ( 70, 70, 70) ),
( 'wall' , 12 , 3 , 'construction' , 2 , False , False , (102,102,156) ),
( 'fence' , 13 , 4 , 'construction' , 2 , False , False , (190,153,153) ),
( 'guard rail' , 14 , 255 , 'construction' , 2 , False , True , (180,165,180) ),
( 'bridge' , 15 , 255 , 'construction' , 2 , False , True , (150,100,100) ),
( 'tunnel' , 16 , 255 , 'construction' , 2 , False , True , (150,120, 90) ),
( 'pole' , 17 , 5 , 'object' , 3 , False , False , (153,153,153) ),
( 'polegroup' , 18 , 255 , 'object' , 3 , False , True , (153,153,153) ),
( 'traffic light' , 19 , 6 , 'object' , 3 , False , False , (250,170, 30) ),
( 'traffic sign' , 20 , 7 , 'object' , 3 , False , False , (220,220, 0) ),
( 'vegetation' , 21 , 8 , 'nature' , 4 , False , False , (107,142, 35) ),
( 'terrain' , 22 , 9 , 'nature' , 4 , False , False , (152,251,152) ),
( 'sky' , 23 , 10 , 'sky' , 5 , False , False , ( 70,130,180) ),
( 'person' , 24 , 11 , 'human' , 6 , True , False , (220, 20, 60) ),
( 'rider' , 25 , 12 , 'human' , 6 , True , False , (255, 0, 0) ),
( 'car' , 26 , 13 , 'vehicle' , 7 , True , False , ( 0, 0,142) ),
( 'truck' , 27 , 14 , 'vehicle' , 7 , True , False , ( 0, 0, 70) ),
( 'bus' , 28 , 15 , 'vehicle' , 7 , True , False , ( 0, 60,100) ),
( 'caravan' , 29 , 255 , 'vehicle' , 7 , True , True , ( 0, 0, 90) ),
( 'trailer' , 30 , 255 , 'vehicle' , 7 , True , True , ( 0, 0,110) ),
( 'train' , 31 , 16 , 'vehicle' , 7 , True , False , ( 0, 80,100) ),
( 'motorcycle' , 32 , 17 , 'vehicle' , 7 , True , False , ( 0, 0,230) ),
( 'bicycle' , 33 , 18 , 'vehicle' , 7 , True , False , (119, 11, 32) ),
( 'license plate' , -1 , -1 , 'vehicle' , 7 , False , True , ( 0, 0,142) ),
]
labels_carla = [
('unlabeled', 0, 255, (0, 0, 0)),
('Building', 1, 0, (70, 70, 70)),
('Fence', 2, 1, (100, 40, 40)),
('Pedestrian', 4, 2, (220, 20, 60)),
('Pole', 5, 3, (153, 153, 153)),
('RoadLine', 6, 4, (157, 234, 50)),
('Road', 7, 5, (128, 64, 128)),
('Sidewalk', 8, 6, (244, 35, 232)),
('Vegetation', 9, 7, (107, 142, 35)),
('Vehicles', 10, 8, (0, 0, 142)),
('Wall', 11, 9, (102, 102, 156)),
('TrafficSign', 12, 10, (220, 220, 0)),
('Sky', 13, 11, (7, 130, 180)),
('Ground', 14, 12, (81, 0, 81)),
('Bridge', 15, 13, (150, 100, 100)),
('RailTrack', 16, 14, (230, 150, 140)),
('GuardRail', 17, 15, (180, 165, 180)),
('TrafficLight', 18, 16, (250, 170, 30)),
('Static', 19, 254, (110, 190, 160)),
('Dynamic', 20, 254, (110, 190, 160)),
('Water', 21, 17, (45, 60, 150)),
('Terrain', 22, 18, (145, 170, 100)),
]
labels_idd = [
# name csId csTrainId level4id level3Id category level2Id level1Id hasInstances ignoreInEval color
( 'road' , 7 , 0 , 0 , 0 , 'drivable' , 0 , 0 , False , False , (128, 64,128) ),
( 'parking' , 9 , 255 , 1 , 1 , 'drivable' , 1 , 0 , False , False , (250,170,160) ),
( 'drivable fallback' , 255 , 255 , 2 , 1 , 'drivable' , 1 , 0 , False , False , ( 81, 0, 81) ),
( 'sidewalk' , 8 , 1 , 3 , 2 , 'non-drivable' , 2 , 1 , False , False , (244, 35,232) ),
( 'rail track' , 10 , 255 , 3 , 3 , 'non-drivable' , 3 , 1 , False , False , (230,150,140) ),
( 'non-drivable fallback', 255 , 9 , 4 , 3 , 'non-drivable' , 3 , 1 , False , False , (152,251,152) ),
( 'person' , 24 , 11 , 5 , 4 , 'living-thing' , 4 , 2 , True , False , (220, 20, 60) ),
( 'animal' , 255 , 19 , 6 , 4 , 'living-thing' , 4 , 2 , True , True , (246, 198, 145)),
( 'rider' , 25 , 12 , 7 , 5 , 'living-thing' , 5 , 2 , True , False , (255, 0, 0) ),
( 'motorcycle' , 32 , 17 , 8 , 6 , '2-wheeler' , 6 , 3 , True , False , ( 0, 0,230) ),
( 'bicycle' , 33 , 18 , 9 , 7 , '2-wheeler' , 6 , 3 , True , False , (119, 11, 32) ),
( 'autorickshaw' , 255 , 20 , 10 , 8 , 'autorickshaw' , 7 , 3 , True , False , (255, 204, 54) ),
( 'car' , 26 , 13 , 11 , 9 , 'car' , 7 , 3 , True , False , ( 0, 0,142) ),
( 'truck' , 27 , 14 , 12 , 10 , 'large-vehicle' , 8 , 3 , True , False , ( 0, 0, 70) ),
( 'bus' , 28 , 15 , 13 , 11 , 'large-vehicle' , 8 , 3 , True , False , ( 0, 60,100) ),
( 'caravan' , 29 , 255 , 14 , 12 , 'large-vehicle' , 8 , 3 , True , True , ( 0, 0, 90) ),
( 'trailer' , 30 , 255 , 15 , 12 , 'large-vehicle' , 8 , 3 , True , True , ( 0, 0,110) ),
( 'train' , 31 , 16 , 15 , 12 , 'large-vehicle' , 8 , 3 , True , True , ( 0, 80,100) ),
( 'vehicle fallback' , 355 , 255 , 15 , 12 , 'large-vehicle' , 8 , 3 , True , False , (136, 143, 153)),
( 'curb' , 255 , 255 , 16 , 13 , 'barrier' , 9 , 4 , False , False , (220, 190, 40)),
( 'wall' , 12 , 3 , 17 , 14 , 'barrier' , 9 , 4 , False , False , (102,102,156) ),
( 'fence' , 13 , 4 , 18 , 15 , 'barrier' , 10 , 4 , False , False , (190,153,153) ),
( 'guard rail' , 14 , 255 , 19 , 16 , 'barrier' , 10 , 4 , False , False , (180,165,180) ),
( 'billboard' , 255 , 21 , 20 , 17 , 'structures' , 11 , 4 , False , False , (174, 64, 67) ),
( 'traffic sign' , 20 , 7 , 21 , 18 , 'structures' , 11 , 4 , False , False , (220,220, 0) ),
( 'traffic light' , 19 , 6 , 22 , 19 , 'structures' , 11 , 4 , False , False , (250,170, 30) ),
( 'pole' , 17 , 5 , 23 , 20 , 'structures' , 12 , 4 , False , False , (153,153,153) ),
( 'polegroup' , 18 , 5 , 23 , 20 , 'structures' , 12 , 4 , False , False , (153,153,153) ),
( 'obs-str-bar-fallback' , 255 , 255 , 24 , 21 , 'structures' , 12 , 4 , False , False , (169, 187, 214) ),
( 'building' , 11 , 2 , 25 , 22 , 'construction' , 13 , 5 , False , False , ( 70, 70, 70) ),
( 'bridge' , 15 , 22 , 26 , 23 , 'construction' , 13 , 5 , False , False , (150,100,100) ),
( 'tunnel' , 6 , 22 , 26 , 23 , 'construction' , 13 , 5 , False , False , (150,120, 90) ),
( 'vegetation' , 21 , 8 , 27 , 24 , 'vegetation' , 14 , 5 , False , False , (107,142, 35) ),
( 'sky' , 23 , 10 , 28 , 25 , 'sky' , 15 , 6 , False , False , ( 70,130,180) ),
( 'fallback background' , 255 , 255 , 29 , 25 , 'object fallback' , 15 , 6 , False , False , (169, 187, 214)),
( 'unlabeled' , 0 , 255 , 255 , 255 , 'void' , 255 , 255 , False , True , ( 0, 0, 0) ),
( 'ego vehicle' , 1 , 255 , 255 , 255 , 'void' , 255 , 255 , False , True , ( 0, 0, 0) ),
( 'rectification border' , 2 , 255 , 255 , 255 , 'void' , 255 , 255 , False , True , ( 0, 0, 0) ),
( 'out of roi' , 3 , 255 , 255 , 255 , 'void' , 255 , 255 , False , True , ( 0, 0, 0) ),
( 'license plate' , 255 , 255 , 255 , 255 , 'vehicle' , 255 , 255 , False , True , ( 0, 0,142) ),
]
def get_color_map(palette='cityscapes'):
if palette == 'cityscapes':
labels = labels_cityscapes
elif palette == 'carla':
labels = labels_carla
elif palette == 'idd':
labels = labels_idd
else:
raise Exception('Unknown Colormap')
seg_color_map = {}
for label in labels:
if label[2] not in [255, -1]:
seg_color_map[label[2]] = label[-1]
return seg_color_map
| StarcoderdataPython |
8045591 | from ..conversion_context import *
from torch2trt.module_test import add_module_test
# ASSUME EXPLICIT BATCH MODE to make things easier for now
def insert_dim(ctx, trt_tensor, new_dims: list):
ndims = len(trt_tensor.shape)
# if new_shape.count(-1) > 1:
layer = ctx.network.add_shuffle(trt_tensor)
layer.reshape_dims = [0] * ndims + [1] * len(new_dims)
perm2 = list(range(ndims))
# go from back to front to preserve index
for newdim, idx2 in zip(reversed(new_dims),
range(ndims, ndims + len(new_dims))):
perm2.insert(newdim, idx2)
layer.second_transpose = perm2
return layer.get_output(0)
def remove_dim(ctx, trt_tensor, old_dims: list):
ndims = len(trt_tensor.shape)
perm1 = list(range(ndims))
for od in old_dims:
perm1.remove(od)
perm1.extend(old_dims)
layer = ctx.network.add_shuffle(trt_tensor)
layer.first_transpose = perm1
layer.reshape_dims = [0] * (ndims - len(old_dims))
return layer.get_output(0)
@tensorrt_converter('torch.Tensor.reshape')
@tensorrt_converter('torch.Tensor.view')
def convert_view(ctx: ConversionContext):
input_trt = ctx.get_arg("self", pos=0, to_trt=True)
new_shape = list(ctx.method_args[1:])
if len(new_shape) >= 1 and isinstance(new_shape[0], (tuple, list)):
assert len(new_shape) == 1
new_shape = new_shape[0]
new_shape = tuple(nsi if isinstance(nsi, int)
else ctx.get_trt_one(nsi, return_int=True) for nsi in new_shape)
output = ctx.method_return
output._trt = ctx.reshape_to(input_trt, new_shape)
# Only accepts one dim
@tensorrt_converter('torch.Tensor.unsqueeze')
def convert_squeeze(ctx: ConversionContext):
input_trt = ctx.get_arg("input", 0, to_trt=True)
ndims = len(input_trt.shape)
new_dim = ctx.get_trt_dim(pos=1, ndims=ndims)
output = ctx.method_return
output._trt = insert_dim(ctx, input_trt, [new_dim])
@tensorrt_converter('torch.Tensor.squeeze')
def convert_squeeze(ctx: ConversionContext):
input_trt = ctx.get_arg("input", 0, to_trt=True)
ndims = len(input_trt.shape)
old_dim = ctx.get_trt_dim(pos=1, ndims=ndims)
output = ctx.method_return
output._trt = remove_dim(ctx, input_trt, [old_dim])
# @tensorrt_converter('torch.flatten')
class View(torch.nn.Module):
def __init__(self, *dims):
super(View, self).__init__()
self.dims = dims
def forward(self, x):
return x.view(*self.dims)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_view_1d():
return View(1, -1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_view_2d():
return View(1, 1, -1)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 3, 3)])
def test_view_3d():
return View(1, 1, 1, -1)
| StarcoderdataPython |
11387687 | user_data_script = '''#!/bin/bash
echo "You can put your userdata script right here!"
'''
cft = CloudFormationTemplate(description="A slightly more useful template.")
properties = {
'ImageId': 'ami-c30360aa',
'InstanceType': 'm1.small',
'UserData': base64(user_data_script),
}
attributes = [
Metadata(
{
"AWS::CloudFormation::Init": {
"config": {
"packages": {
"rpm": {
"epel": "http://download.fedoraproject.org/pub/epel/5/i386/epel-release-5-4.noarch.rpm"
},
"yum": {
"httpd": [],
"php": [],
"wordpress": []
},
"rubygems": {
"chef": ["0.10.2"]
}
},
"sources": {
"/etc/puppet": "https://github.com/user1/cfn-demo/tarball/master"
},
"commands": {
"test": {
"command": "echo \"$CFNTEST\" > test.txt",
"env": {"CFNTEST": "I come from config1."},
"cwd": "~",
"test": "test ! -e ~/test.txt",
"ignoreErrors": "false"
}
},
"files": {
"/tmp/setup.mysql": {
"content":
join('',
"CREATE DATABASE ", ref("DBName"), ";\n",
"CREATE USER '", ref("DBUsername"), "'@'localhost' IDENTIFIED BY '",
ref("DBPassword"),
"';\n",
"GRANT ALL ON ", ref("DBName"), ".* TO '", ref("DBUsername"), "'@'localhost';\n",
"FLUSH PRIVILEGES;\n"
),
"mode": "000644",
"owner": "root",
"group": "root"
}
},
"services": {
"sysvinit": {
"nginx": {
"enabled": "true",
"ensureRunning": "true",
"files": ["/etc/nginx/nginx.conf"],
"sources": ["/var/www/html"]
},
"php-fastcgi": {
"enabled": "true",
"ensureRunning": "true",
"packages": {
"yum": ["php", "spawn-fcgi"]
}
},
"sendmail": {
"enabled": "false",
"ensureRunning": "false"
}
}
},
"users": {
"myUser": {
"groups": ["groupOne", "groupTwo"],
"uid": "50",
"homeDir": "/tmp"
}
},
"groups": {
"groupOne": {
},
"groupTwo": {
"gid": "45"
}
}
}
}
}
),
UpdatePolicy(
{
"AutoScalingRollingUpdate": {
"MinInstancesInService": "1",
"MaxBatchSize": "1",
"PauseTime": "PT12M5S"
}
}
),
DeletionPolicy("Retain"),
DependsOn(ref("myDB"))
]
cft.resources.add(
Resource('MyInstance', 'AWS::EC2::Instance', properties, attributes)
)
| StarcoderdataPython |
209835 | from flask import current_app, _app_ctx_stack
import flask_login
from flaskloginintegration import _user_loader, User
from views import login_views
class ZKPP(object):
def __init__(self, app=None, login_manager=flask_login.LoginManager()):
self.app = app
self.login_manager = login_manager
if app is not None:
self.init_app(app)
def init_app(self, app):
self.login_manager.init_app(app)
self.init_login(self.login_manager)
app.config.setdefault('my_greeting', self.greet())
app.teardown_appcontext(self.teardown)
print 'initializing application'
print 'root path: ' + login_views.root_path
app.register_blueprint(login_views) # set login views
def init_login(self, login_manager):
login_manager.user_loader(_user_loader)
#login_manager.request_loader(_request_loader)
def greet(self):
return 'hello my friend why so serious?'
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'my_greeting'):
pass
#ctx.sqlite3_db.close()
print 'teardown called'
@property
def greeting(self):
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'my_greeting'):
ctx.my_greeting = self.greet()
return ctx.my_greeting
| StarcoderdataPython |
9654737 | <gh_stars>10-100
import asyncio
import discord
from models import DB
class PetRescueConfig:
DEFAULT_CONFIG = {
'mention': '@everyone',
'delete_mention': True,
'delete_message': True,
'delete_pet': True,
}
def __init__(self):
self.__data = {}
async def load(self):
lock = asyncio.Lock()
async with lock:
db = DB()
query = 'SELECT * FROM PetRescueConfig;'
db.cursor.execute(query)
self.__data = {
entry['channel_id']: {
'mention': entry['mention'],
'delete_mention': bool(entry['delete_mention']),
'delete_message': bool(entry['delete_message']),
'delete_pet': bool(entry['delete_pet']),
}
for entry in db.cursor.fetchall()
}
def get(self, channel):
return self.__data.get(channel.id, self.DEFAULT_CONFIG.copy())
@staticmethod
def atobool(input_value, translated_trues):
true_values = ['on', '1', 'true', 'yes']
true_values.extend(translated_trues)
for item in true_values:
if item in input_value.lower():
return True
return False
async def update(self, guild, channel, key, value, translated_trues):
translations = {
'delete_message': self.atobool,
'delete_mention': self.atobool,
'delete_pet': self.atobool,
}
def noop(x, _):
return x
config = self.get(channel)
config[key] = translations.get(key, noop)(value, translated_trues)
await self.set(guild, channel, config)
return config
async def set(self, guild, channel, config):
lock = asyncio.Lock()
async with lock:
self.__data[channel.id] = config
db = DB()
query = f"""
INSERT INTO PetRescueConfig (guild_name, guild_id, channel_name, channel_id, mention, delete_mention,
delete_message, delete_pet)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT (guild_id, channel_id)
DO UPDATE SET guild_name=?, guild_id=?, channel_name=?, channel_id=?, mention=?, delete_mention=?,
delete_message=?, delete_pet=?;"""
channel_type = channel.type
if channel_type == discord.ChannelType.private:
guild_name = 'Private Message'
guild_id = 0
channel_name = channel.recipient.name
else:
guild_id = guild.id
guild_name = guild.name
channel_name = channel.name
params = [
guild_name,
guild_id,
channel_name,
channel.id,
config['mention'],
config['delete_mention'],
config['delete_message'],
config['delete_pet'],
]
db.cursor.execute(query, (*params, *params))
db.commit()
db.close()
| StarcoderdataPython |
12807200 | <gh_stars>0
import cPickle
import numpy as np
import Image
f = file('../data/smile_detection/train/image_names', 'r')
trainImage = []
trainResult=[]
for line in f:
line=line.strip()
line = "../data/smile_detection/train/" + line
trainImage.append(line)
if "b" not in line:
trainResult.append(0)
else:
trainResult.append(1)
trainImageNumpy = [np.array(Image.open(fname))/255.0 for fname in trainImage]
trainFinal = []
trainFinal.append(trainImageNumpy)
trainFinal.append(trainResult)
f.close()
# # f = file('../data/smile_detection/train/train.pkl', 'wb')
# # cPickle.dump(train_data, f, protocol=cPickle.HIGHEST_PROTOCOL)
# # f.close()
#
# f = file('../data/smile_detection/test/image_names', 'r')
#
# test_data = []
# for line in f:
# line =line.strip()
# line = "../data/smile_detection/test/"+line
# image = open(line,'r')
# numpyImage = array(image)
# if "a" in line:
# test_data.append((numpyImage,0))
# else:
# test_data.append((numpyImage, 1))
# f.close
# # f = file('../data/smile_detection/test/test.pkl', 'wb')
# # cPickle.dump(test_data, f, protocol=cPickle.HIGHEST_PROTOCOL)
# # f.close()
#
# f = file('../data/smile_detection/validate/image_names', 'r')
#
# validate_data = []
# for line in f:
# line =line.strip()
# line = "../data/smile_detection/validate/" + line
# image = open(line,'r')
# numpyImage = array(image)
# if "a" in line:
# validate_data.append((numpyImage,0))
# else:
# validate_data.append((numpyImage, 1))
# f.close
f = file('../data/smile_detection/test/image_names', 'r')
testImage = []
testResult=[]
for line in f:
line=line.strip()
line = "../data/smile_detection/test/" + line
testImage.append(line)
if "b" not in line:
testResult.append(0)
else:
testResult.append(1)
testImageNumpy = [np.array(Image.open(fname))/255.0 for fname in testImage]
testFinal = []
testFinal.append(testImageNumpy)
testFinal.append(testResult)
f.close()
f = file('../data/smile_detection/validate/image_names', 'r')
validateImage = []
validateResult=[]
for line in f:
line=line.strip()
line = "../data/smile_detection/validate/" + line
validateImage.append(line)
if "b" not in line:
validateResult.append(0)
else:
validateResult.append(1)
validateImageNumpy = [np.array(Image.open(fname))/255.0 for fname in validateImage]
validateFinal = []
validateFinal.append(validateImageNumpy)
validateFinal.append(validateResult)
f.close()
f = file('../data/smile_detection/smile.pkl', 'wb')
cPickle.dump((trainFinal, validateFinal, testFinal), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
print "done" | StarcoderdataPython |
3351814 | <gh_stars>0
from operator import attrgetter
from django.test.testcases import TestCase
from .models import Address, Contact, Customer
class TestLookupQuery(TestCase):
@classmethod
def setUpTestData(cls):
cls.address = Address.objects.create(company=1, customer_id=20)
cls.customer1 = Customer.objects.create(company=1, customer_id=20)
cls.contact1 = Contact.objects.create(company_code=1, customer_code=20)
def test_deep_mixed_forward(self):
self.assertQuerysetEqual(
Address.objects.filter(customer__contacts=self.contact1),
[self.address.id],
attrgetter('id')
)
def test_deep_mixed_backward(self):
self.assertQuerysetEqual(
Contact.objects.filter(customer__address=self.address),
[self.contact1.id],
attrgetter('id')
)
| StarcoderdataPython |
1751559 | """
Class of logistic regression model
"""
from Logistic_Regression.cost_function import *
from Logistic_Regression.hypothesis_function import *
from Logistic_Regression.gradient_descent import *
class Logistic_regression():
"""
Purpose: Object initialization method
Parameter: feature, output, # of training iteration, learning rate
Return: None
"""
def __init__(self, x, y, iteration = 100, learning_rate = 0.01):
self.x = x
self.y = y
self.iter = iteration
self.learning_rate = learning_rate
self.weight = []
for _ in xrange(len(x[0])):
self.weight.append([0])
"""
Purpose: Training logistic regression model
Parameter: None
Return: List of costs
"""
def training(self):
costs = []
for _ in range(self.iter): # Training `iter` times
res = hypotheis(self.x, self.weight)
costs.append(cost(self.y, res))
self.weight = gradient_descent(self.y, res, self.x, self.weight, self.learning_rate)
print self.weight
return costs
"""
Purpose: Calculate logistic regression model's result for given feature
Parameter: Feature
Return: Logistic regression model's result for given feature
"""
def forward_propagation(self, x):
return hypotheis(x, self.weight)
| StarcoderdataPython |
355034 | #!/usr/bin/env python3
print(sum([i for i in range(1,1000) if i%3==0 or i%5==0]))
| StarcoderdataPython |
3398036 | def getUserInput():
filename = input("Please enter Filepath: ")
objectname = input("Please enter objects name to replace: ")
replacename = input("Please enter objects name to replace with: ")
replace(filename, objectname, replacename)
def replace(filename, objectname, replacename):
replacement = ""
with open(filename, 'r') as f:
replacement = f.read()
replacement = replacement.replace(objectname, replacename)
with open(filename, 'w') as f:
f.write(replacement)
def main():
getUserInput()
if __name__ == "__main__": main()
| StarcoderdataPython |
3596437 | #lims
from SBaaS_LIMS.lims_experiment_postgresql_models import *
from SBaaS_LIMS.lims_sample_postgresql_models import *
from .stage01_physiology_data_postgresql_models import *
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
class stage01_physiology_data_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for
'''
tables_supported = {'data_stage01_physiology_data':data_stage01_physiology_data,
};
self.set_supportedTables(tables_supported);
def reset_dataStage01_physiology_data(self,experiment_id_I = None):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_physiology_data).filter(data_stage01_physiology_data.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
else:
reset = self.session.query(data_stage01_physiology_data).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def add_dataStage01PhysiologyData(self, data_I):
'''add rows of data_stage01_physiology_data'''
if data_I:
for d in data_I:
try:
data_add = data_stage01_physiology_data(d
#d['experiment_id'],
#d['sample_id'],
##d['sample_name_short'],
##d['time_point'],
##d['sample_date'],
#d['met_id'],
#d['data_raw'],
#d['data_corrected'],
#d['data_units'],
#d['data_reference'],
#d['used_'],
#d['notes']
);
#d['comment_']
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def update_dataStage01PhysiologyData(self,data_I):
'''update rows of data_stage01_physiology_data'''
if data_I:
for d in data_I:
try:
data_update = self.session.query(data_stage01_physiology_data).filter(
#data_stage01_physiology_data.id == d['id'],
data_stage01_physiology_data.experiment_id.like(d['experiment_id']),
data_stage01_physiology_data.sample_id.like(d['sample_id']),
data_stage01_physiology_data.met_id.like(d['met_id']),
data_stage01_physiology_data.data_units.like(d['data_units']),
data_stage01_physiology_data.data_reference.like(d['data_reference'])).update(
{
'experiment_id':d['experiment_id'],
'sample_id':d['sample_id'],
#'sample_name_short':d['sample_name_short'],
#'time_point':d['time_point'],
#'sample_date':d['sample_date'],
'met_id':d['met_id'],
'data_raw':d['data_raw'],
'data_corrected':d['data_corrected'],
'data_units':d['data_units'],
'data_reference':d['data_reference'],
'used_':d['used_'],
'comment_':d['comment_']},
synchronize_session=False);
if data_update == 0:
print('row not found.')
print(d);
except SQLAlchemyError as e:
print(e);
self.session.commit();
# query sample names from data_stage01_physiology_data
def get_sampleNameShort_experimentID(self,experiment_id_I,exp_type_I):
'''Querry sample name short (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(sample_description.sample_name_short).filter(
data_stage01_physiology_data.experiment_id.like(experiment_id_I),
data_stage01_physiology_data.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_physiology_data.sample_id),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id)).group_by(
sample_description.sample_name_short).order_by(
sample_description.sample_name_short.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_short);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# query sample IDs from data_stage01_physiology_data
def get_sampleIDs_experimentID(self,experiment_id_I,exp_type_I):
'''Querry sample ids (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_physiology_data.sample_id).filter(
data_stage01_physiology_data.experiment_id.like(experiment_id_I),
data_stage01_physiology_data.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_physiology_data.sample_id)).group_by(
data_stage01_physiology_data.sample_id).order_by(
data_stage01_physiology_data.sample_id.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_short);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# query met_ids from data_stage01_physiology_data
def get_metIDs_experimentIDAndSampleNameShort(self,experiment_id_I,exp_type_I,sample_name_short_I):
'''Querry met_ids by sample name short that are used from
the experiment'''
try:
met_ids = self.session.query(data_stage01_physiology_data.met_id).filter(
data_stage01_physiology_data.experiment_id.like(experiment_id_I),
data_stage01_physiology_data.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_physiology_data.sample_id),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_short.like(sample_name_short_I)).group_by(
data_stage01_physiology_data.met_id).order_by(
data_stage01_physiology_data.met_id.asc()).all();
met_ids_O = [];
for met in met_ids: met_ids_O.append(met.met_id);
return met_ids_O;
except SQLAlchemyError as e:
print(e);
# query sample_date and data_corrected from data_stage01_physiology_data
def get_sampleDateAndDataCorrected_experimentIDAndSampleNameShortAndMetIDAndDataUnits(self,experiment_id_I,exp_type_I,sample_name_short_I,met_id_I,data_units_I):
'''Querry time and data_corrected by sample name short that are used from
the experiment sorted by time'''
try:
data = self.session.query(sample_description.sample_date,
data_stage01_physiology_data.data_corrected).filter(
data_stage01_physiology_data.experiment_id.like(experiment_id_I),
data_stage01_physiology_data.met_id.like(met_id_I),
data_stage01_physiology_data.data_units.like(data_units_I),
data_stage01_physiology_data.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_physiology_data.sample_id),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_short.like(sample_name_short_I)).order_by(
sample_description.sample_date.asc()).all();
sample_date_O = [];
data_corrected_O = [];
for d in data:
sample_date_O.append(d.sample_date);
data_corrected_O.append(d.data_corrected);
return sample_date_O,data_corrected_O;
except SQLAlchemyError as e:
print(e)
def get_sampleDateAndDataCorrectedAndSampleIDs_experimentIDAndSampleNameShortAndMetIDAndDataUnits(self,experiment_id_I,exp_type_I,sample_name_short_I,met_id_I,data_units_I):
'''Querry time and data_corrected by sample name short that are used from
the experiment sorted by time'''
try:
data = self.session.query(sample_description.sample_date,
data_stage01_physiology_data.data_corrected,
data_stage01_physiology_data.sample_id).filter(
data_stage01_physiology_data.experiment_id.like(experiment_id_I),
data_stage01_physiology_data.met_id.like(met_id_I),
data_stage01_physiology_data.data_units.like(data_units_I),
data_stage01_physiology_data.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_physiology_data.sample_id),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_short.like(sample_name_short_I)).order_by(
sample_description.sample_date.asc()).all();
sample_date_O = [];
data_corrected_O = [];
sample_id_O = [];
for d in data:
sample_date_O.append(d.sample_date);
data_corrected_O.append(d.data_corrected);
sample_id_O.append(d.sample_id);
return sample_date_O,data_corrected_O,sample_id_O;
except SQLAlchemyError as e:
print(e);
def get_sampleDateAndDataCorrected_experimentIDAndSampleNameShort(self,experiment_id_I,sample_name_short_I,data_units_I=['mM','OD600']):
'''Query time and data_corrected by sample name short that are used from
the experiment sorted by time'''
try:
data = self.session.query(sample_description.sample_date,
sample_description.sample_name_short,
sample_description.sample_name_abbreviation,
data_stage01_physiology_data.id,
data_stage01_physiology_data.data_corrected,
data_stage01_physiology_data.experiment_id,
data_stage01_physiology_data.sample_id,
data_stage01_physiology_data.met_id,
data_stage01_physiology_data.data_units,
data_stage01_physiology_data.data_reference).filter(
data_stage01_physiology_data.experiment_id.like(experiment_id_I),
data_stage01_physiology_data.data_units.in_(data_units_I),
data_stage01_physiology_data.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.sample_name.like(data_stage01_physiology_data.sample_id),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_short.like(sample_name_short_I)).order_by(
data_stage01_physiology_data.experiment_id.asc(),
data_stage01_physiology_data.data_units.asc(),
data_stage01_physiology_data.met_id.asc(),
sample_description.sample_date.asc()).all();
rows_O = [d._asdict() for d in data];
return rows_O;
except SQLAlchemyError as e:
print(e);
| StarcoderdataPython |
11390482 | <filename>backend/schemas/warn.py
def Warn(warn_id, warns):
return {
"id": warn_id,
"warns": warns
} | StarcoderdataPython |
6543081 | <gh_stars>10-100
from autopalette import af
print(af("Hello again!").h1) | StarcoderdataPython |
8024351 | # Copyright 2019 Dragonchain, Inc.
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
# You may obtain a copy of the Apache License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from typing import TYPE_CHECKING
import os
from dragonchain.webserver import helpers
from dragonchain.webserver.routes import api_keys
from dragonchain.webserver.routes import blocks
from dragonchain.webserver.routes import misc
from dragonchain.webserver.routes import dragonnet
LEVEL = os.environ["LEVEL"]
if LEVEL == "1":
from dragonchain.webserver.routes import verifications
from dragonchain.webserver.routes import smart_contracts
from dragonchain.webserver.routes import transaction_types
from dragonchain.webserver.routes import transactions
if LEVEL == "1" or LEVEL == "5":
from dragonchain.webserver.routes import interchain
if TYPE_CHECKING:
import flask
def route(app: "flask.Flask"):
# All Levels
api_keys.apply_routes(app)
blocks.apply_routes(app)
misc.apply_routes(app)
dragonnet.apply_routes(app)
if LEVEL == "1":
verifications.apply_routes(app)
smart_contracts.apply_routes(app)
transaction_types.apply_routes(app)
transactions.apply_routes(app)
if LEVEL == "1" or LEVEL == "5":
interchain.apply_routes(app)
# Error Handler
app.register_error_handler(Exception, helpers.webserver_error_handler)
| StarcoderdataPython |
8074778 | import pyaudio
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 41000
RECORD_SECONDS = 5
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
output = True,
frames_per_buffer = chunk)
print ("***Recording***")
a = []
for i in range(0, int(RATE / chunk * RECORD_SECONDS)):
data = stream.read(chunk)
a.append(data)
print("***Stop recording***")
print ("***START PLAY***")
data = b''.join(a)
for i in range(0, len(data), chunk):
stream.write(data[i:i+chunk]) | StarcoderdataPython |
1951429 | <gh_stars>10-100
from aorist import aorist, TrainFasttextModel
import json
programs = {}
@aorist(
programs,
TrainFasttextModel,
entrypoint="training_fasttext_model",
args={
"tmp_dir": lambda fasttext_embedding: fasttext_embedding.setup.local_storage_setup.tmp_dir,
"dim": lambda fasttext_embedding_schema: str(fasttext_embedding_schema.dim),
},
)
def recipe(
tmp_dir, dim,
):
import fasttext
def training_fasttext_model(tmp_dir, dim):
model = fasttext.train_unsupervised('tmp_dir' + 'data.txt', dim=int(dim))
words = model.get_words()
with open(tmp_dir + 'words.txt', 'w') as f:
for (i, word) in words.enumerate():
f.write(json.dumps(
{
"id": i,
"word": word,
"embedding": model.get_word_vector(word),
}
))
| StarcoderdataPython |
5164477 | import os
from konduit import *
from konduit.client import Client
from konduit.server import Server
from konduit.utils import default_python_path
from utils import to_base_64
# Set the working directory to this folder and register
# the "detect_image_str.py" script as code to be executed by konduit.
work_dir = os.path.abspath(".")
python_config = PythonConfig(
python_path=default_python_path(work_dir),
python_code_path=os.path.join(work_dir, "detect_image_str.py"),
python_inputs={"image": "STR"},
python_outputs={"num_boxes": "STR"},
)
# Configure a Python pipeline step for your Python code. Internally, konduit will take Strings as input and output
# for this example.
python_pipeline_step = PythonStep().step(python_config)
serving_config = ServingConfig(
http_port=1337, output_data_format="JSON"
)
# Start a konduit server and wait for it to start
server = Server(serving_config=serving_config, steps=[python_pipeline_step])
server.start()
# Initialize a konduit client that takes in and outputs JSON
client = Client(
input_data_format="JSON",
prediction_type="RAW",
output_data_format="JSON",
host="http://localhost",
port=1337,
)
# encode the image from a file to base64 and get back a prediction from the konduit server
encoded_image = to_base_64(
os.path.abspath("./Ultra-Light-Fast-Generic-Face-Detector-1MB/imgs/1.jpg")
)
predicted = client.predict({"image": encoded_image})
# the actual output can be found under "num_boxes"
print(predicted)
assert predicted["num_boxes"] == "51"
server.stop()
| StarcoderdataPython |
141337 | import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from astropy.io import ascii
from uncertainties import ufloat
import uncertainties.unumpy as unp
g = ufloat(9.811899, 0.000041)
x, d0, d = np.genfromtxt("Messdaten/c.txt", unpack=True)
D = d - d0
x1 = x[0:26]
x2 = x[28:52]
D1 = D[0:26]
D2 = D[28:52]
ascii.write([x, d0, d, D], 'Messdaten/beidseitig.tex', format="latex",
names=["messpunkt x ", "D0", "Dlast", "D"])
m_alu, d_alu, dx_alu = np.genfromtxt("Messdaten/a.txt", unpack=True)
L_stab_alueingespannt = 0.56
mlast_alu = 4720.1
L_stab_alu = 60
d_stab_alu = 0.01
v_stab_alu = d_stab_alu**2 * L_stab_alu
m_stab_alu = 167.1
pdichte_alu = m_stab_alu / v_stab_alu
print("Dichte Stab rechteckig", pdichte_alu)
dichte_lit_alu = 2.712 # in g/m^3
print("Dichte Alu Literatur", dichte_lit_alu)
x = x / 100
x1 = x1 / 100
x2 = x2 / 100
x_alu_fit = ((3 * L_stab_alueingespannt**2) * x1 - 4 * x1**3)
#x1_ls = np.linspace(3, 60, 50) / 100
#x_alu_fit_ls = ((3 * L_stab_alueingespannt**2) * x1_ls - 4 * x1_ls**3)
def Y1(x, a):
return a * x
params, covariance = curve_fit(Y1, x_alu_fit, D1)
errors = np.sqrt(np.diag(covariance))
print("params", *params, "und +/-", errors[0])
plt.plot(x_alu_fit, D1, 'rx', label="Messwerte")
plt.plot(x_alu_fit, Y1(x_alu_fit, *params),
'b-', label="Regressionsgrade")
plt.xlabel(r"$3L^2 x - 4x^3$/$10^{-3}\,\si{\cubic\meter}$")
plt.ylabel(r"$D(x)$/$\si{\milli\meter}$")
plt.legend(loc='best')
plt.tight_layout()
#plt.ylim(0, 3.5)
#plt.xlim(0, 0.19)
plt.savefig('Bilder/c.pdf')
a_alu = ufloat(params[0], errors[0])
F_alu = mlast_alu * g
I_alu = d_stab_alu**4 / 12
E_alu = F_alu / (48 * a_alu * I_alu)
print("E alu=", E_alu)
##########################################################################
def Y2(x, A):
return A * x
x_alufit = 4 * x2**3 - 12 * L_stab_alueingespannt * x2**2 + \
9 * L_stab_alueingespannt**2 * x2 - L_stab_alueingespannt**3
#x2_ls = np.linspace(3, 70, 50) / 100
# x_alufit_ls = 4 * x2_ls**3 - 12 * L_stab_alueingespannt * x2_ls**2 + \
#9 * L_stab_alueingespannt**2 * x2_ls - L_stab_alueingespannt**3
plt.clf()
params, covariance = curve_fit(Y2, x_alufit, D2)
errors = np.sqrt(np.diag(covariance))
print("params", *params, "fehler", *errors)
plt.plot(x_alufit, D2, 'rx', label="Messwerte")
plt.plot(x_alufit, Y2(x_alufit, *params), 'b-', label="Regressionsgrade")
plt.xlabel(
r"$4x^3 -12Lx^2 + 9L^2x - L^3$/$10^{-3}\,\si{\cubic\meter}$")
plt.ylabel(r"$D(x)$/$\si{\milli\meter}$")
plt.ylim(0.5, 3.0)
plt.xlim(0.03, 0.18)
plt.legend(loc='best')
plt.tight_layout()
plt.savefig('Bilder/c2.pdf')
a_alu = ufloat(params[0], errors[0])
F_alu = mlast_alu * g
I_alu = d_stab_alu**4 / 12
E_alu = F_alu / (48 * a_alu * I_alu)
print("E alu=", E_alu)
| StarcoderdataPython |
9705009 | <gh_stars>0
import numpy as np
from layers.base import Layer
class Output(Layer):
def __init__(self, input_layers, output_shape, loss_function=None, learning_rate=0.1):
super().__init__(input_layers, output_shape)
self.loss_function = loss_function
self.cur_y_true = None
self.learning_rate = learning_rate
self.delta = np.zeros([0, self.output_shape])
self.cur_loss = None
self.cur_accuracy = None
def get_cur_outputs(self):
return self.cur_outputs
def forward(self):
# print(self.cur_inputs)
self.cur_outputs = self.cur_inputs[0]
self.clear_cur_inputs_flags()
def backward(self):
self.cur_loss, self.delta, self.cur_accuracy = self.loss_function(self.cur_y_true, self.cur_outputs,
self.learning_rate)
list(map(lambda layer: layer.append_cur_delta(self, np.array(self.delta)), self.input_layers))
self.clear_cur_deltas_flags()
def set_cur_y_true(self, y_values):
self.cur_y_true = y_values
| StarcoderdataPython |
1917452 | '''
main.py
Created by <NAME> on 2020
Copyright © 2020 <NAME>. All rights reserved.
'''
import sys
a, b, c = map(int, sys.stdin.readline().rstrip().split(' '))
print((a + b) % c)
print(((a % c) + (b % c)) % c)
print((a * b) % c)
print(((a % c) * (b % c)) % c) | StarcoderdataPython |
1872341 | from sane_doc_reports import utils
from sane_doc_reports.domain.CellObject import CellObject
from sane_doc_reports.domain.Element import Element
from sane_doc_reports.conf import DEBUG, PYDOCX_FONT_SIZE, \
DEFAULT_TABLE_FONT_SIZE, DEFAULT_TABLE_STYLE, PYDOCX_FONT_NAME, \
PYDOCX_FONT_COLOR, DEFAULT_FONT_COLOR, DEFAULT_TITLE_FONT_SIZE, \
PYDOCX_FONT_BOLD, DEFAULT_TITLE_COLOR, MAX_MS_TABLE_COLS_LIMIT
from sane_doc_reports.domain.Section import Section
from sane_doc_reports.elements import image
from sane_doc_reports.populate.utils import insert_text
from sane_doc_reports.utils import get_chart_font
def fix_order(ordered, readable_headers) -> list:
""" Return the readable headers by the order given """
readable_headers_values = readable_headers.values()
temp_readable = {
**{i[0].lower() + i[1:]: i for i in readable_headers_values},
**{i.lower(): i for i in readable_headers_values}}
temp_readable = {k.replace(" ", ""): v for k, v in temp_readable.items()}
# Old json format table columns are not lowercase
inv_fix = {i: i for i in readable_headers_values}
temp_readable = {**temp_readable, **inv_fix}
# New format fix
if any([isinstance(i, dict) for i in ordered]):
ret = []
for k in ordered:
if isinstance(k, dict):
key = k.get('key')
key = readable_headers.get(key, key)
if key not in ret and not k.get('hidden', False):
ret.append(key)
else:
ret.append(temp_readable[k])
return ret
ret = []
for ordered_key in ordered:
if isinstance(ordered_key, str):
ret.append(temp_readable[ordered_key])
return ret
def insert_table_image(item, item_key, insertion_cell):
row_temp = item[item_key]
s = Section(row_temp['type'], row_temp['data'], {}, {})
co = CellObject(insertion_cell, add_run=False)
image.invoke(co, s)
class TableElement(Element):
style = {
'text': {
PYDOCX_FONT_SIZE: DEFAULT_TABLE_FONT_SIZE,
PYDOCX_FONT_NAME: get_chart_font(),
PYDOCX_FONT_COLOR: DEFAULT_FONT_COLOR,
PYDOCX_FONT_BOLD: False,
},
'title': {
PYDOCX_FONT_NAME: get_chart_font(),
PYDOCX_FONT_COLOR: DEFAULT_TITLE_COLOR,
PYDOCX_FONT_SIZE: DEFAULT_TITLE_FONT_SIZE,
PYDOCX_FONT_BOLD: False,
},
}
def insert(self):
if DEBUG:
print("Adding table...")
table_data = self.section.contents
if isinstance(table_data, dict):
table_data = table_data.get('data', table_data)
# If table columns isn't present, use the dict values of the table data
# as table columns (kind of like list).
if 'tableColumns' not in self.section.layout:
self.section.layout['tableColumns'] = list(table_data[0].keys())
# Use and order according to readableHeaders if present.
if 'readableHeaders' in self.section.layout:
ordered = self.section.layout['tableColumns']
readable_headers = self.section.layout['readableHeaders']
table_columns = fix_order(ordered, readable_headers)
else:
table_columns = self.section.layout['tableColumns']
# Quick fix, word crashes on more than MAX_MS_TABLE_COLS_LIMIT
# (64 right now) columns.
# See: https://stackoverflow.com/questions/36921010/docx-does-not-support-more-than-63-columns-in-a-table
table_columns = table_columns[0:MAX_MS_TABLE_COLS_LIMIT]
for i, row_title in enumerate(table_columns):
if not isinstance(row_title, str):
table_columns.remove(row_title)
if 'title' in self.section.extra:
table = self.cell_object.cell.add_table(rows=2,
cols=len(table_columns))
title = table.cell(0, 0)
title.merge(table.cell(0, len(table_columns) - 1))
insert_text(title, self.section.extra['title'], self.style['title'])
hdr_cells = table.rows[1].cells
else:
table = self.cell_object.cell.add_table(rows=1,
cols=len(table_columns))
hdr_cells = table.rows[0].cells
table.style = DEFAULT_TABLE_STYLE
if 'list_style' in self.section.extra and self.section.extra[
'list_style']:
table.style = None
for i, row_title in enumerate(table_columns):
insert_text(hdr_cells[i], row_title, self.style['text'])
for row_item in table_data:
row_cells = table.add_row().cells
for i, row_title in enumerate(table_columns):
if row_title not in row_item:
continue
# Old json format can have 'Avatars', which are images
if isinstance(row_item[row_title], dict) and \
row_item[row_title]['type'] == 'image':
insert_table_image(row_item, row_title, row_cells[i])
else:
insert_text(row_cells[i], str(row_item[row_title]),
self.style['text'])
def invoke(cell_object, section):
if section.type != 'table':
err_msg = f'Called table but not table - [{section}]'
return utils.insert_error(cell_object, err_msg)
TableElement(cell_object, section).insert()
| StarcoderdataPython |
1874228 | <gh_stars>1-10
class Cookie:
def __init__(self, time, value, ttl):
self.start = time
self.ttl = ttl
self.value = value
class Memcache:
def __init__(self):
self.graph = dict()
self.na = 2147483647
"""
@param: curtTime: An integer
@param: key: An integer
@return: An integer
"""
def get(self, curtTime, key):
if key not in self.graph:
return self.na
cookie = self.graph[key]
if cookie.ttl == 0 or curtTime - cookie.start < cookie.ttl:
return cookie.value
return self.na
"""
@param: curtTime: An integer
@param: key: An integer
@param: value: An integer
@param: ttl: An integer
@return: nothing
"""
def set(self, curtTime, key, value, ttl):
cookie = Cookie(curtTime, value, ttl)
self.graph[key] = cookie
return
"""
@param: curtTime: An integer
@param: key: An integer
@return: nothing
"""
def delete(self, curtTime, key):
if key in self.graph:
del self.graph[key]
return
"""
@param: curtTime: An integer
@param: key: An integer
@param: delta: An integer
@return: An integer
"""
def incr(self, curtTime, key, delta):
if key not in self.graph:
return self.na
cookie = self.graph[key]
if curtTime - cookie.start < cookie.ttl or cookie.ttl == 0:
cookie.value += delta
return cookie.value
return self.na
"""
@param: curtTime: An integer
@param: key: An integer
@param: delta: An integer
@return: An integer
"""
def decr(self, curtTime, key, delta):
if key not in self.graph:
return self.na
cookie = self.graph[key]
if curtTime - cookie.start < cookie.ttl or cookie.ttl == 0:
cookie.value -= delta
return cookie.value
return self.na | StarcoderdataPython |
345105 | # -*- coding: utf-8 -*-
"""
tfrecord torch dataset实现
"""
from . import dataset
from .dataset import TFRecordDataset
from .dataset import MultiTFRecordDataset
| StarcoderdataPython |
111605 | from datetime import date
m =0
me =0
for c in range(1,8):
i = int(input('que ano a {}ª pessoa nasceu ? >>>'.format(c)))
ano = int(date.today().year)
idade = ano - i
if idade > 18:
m += 1
else:
me += 1
print('{} pessoas são maiores de idade'.format(m))
print('{} pessoas são menores de idade'.format(me))
print(idade) | StarcoderdataPython |
5067885 | """Utils module of kytos/pathfinder Kytos Network Application."""
# pylint: disable=unused-argument
def lazy_filter(filter_type, filter_func):
"""
Lazy typed filter on top of the built-in function.
It's meant to be used when the values to be filtered for
are only defined later on dynamically at runtime.
"""
def filter_closure(value, items):
if not isinstance(value, filter_type):
raise TypeError(f"Expected type: {filter_type}")
return filter(filter_func(value), items)
return filter_closure
def nx_edge_data_weight(edge_u, edge_v, edge_data):
"""Return custom edge data value to be used as a callback by nx."""
if edge_data.get("hop"):
return edge_data["hop"]
return 1
def nx_edge_data_delay(edge_u, edge_v, edge_data):
"""Return custom edge data value to be used as a callback by nx."""
if edge_data.get("delay"):
return edge_data["delay"]
return 1
def nx_edge_data_priority(edge_u, edge_v, edge_data):
"""Return custom edge data value to be used as a callback by nx."""
if edge_data.get("priority"):
return edge_data["priority"]
return 1
def filter_le(metric):
"""Lazy filter_le."""
return lambda x: (lambda nx_edge_tup: nx_edge_tup[2].get(metric, x) <= x)
def filter_ge(metric):
"""Lazy filter_ge."""
return lambda x: (lambda nx_edge_tup: nx_edge_tup[2].get(metric, x) >= x)
def filter_in(metric):
"""Lazy filter_in."""
return lambda x: (lambda nx_edge_tup: x in nx_edge_tup[2].get(metric, {x}))
| StarcoderdataPython |
8096493 | import gym
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
def test_cartpole():
env = gym.make('CartPole-v0')
env = DummyVecEnv([lambda: env])
model = PPO2(MlpPolicy, env)
model.learn(total_timesteps=100000)
rewards = []
for i in range(10):
done = False
cum_rewards = 0
obs = env.reset()
while not done:
action, _states = model.predict(obs)
obs, reward, done, info = env.step(action)
cum_rewards += reward
env.render()
rewards.append(cum_rewards)
print(cum_rewards)
avg_rewards = sum(rewards) / len(rewards)
print('average', avg_rewards)
assert avg_rewards >= 200
env.close()
| StarcoderdataPython |
8199999 | <reponame>briancline/softlayer-python
"""Get details for a hardware device."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
from SoftLayer import utils
import click
@click.command()
@click.argument('identifier')
@click.option('--passwords',
is_flag=True,
help='Show passwords (check over your shoulder!)')
@click.option('--price',
is_flag=True,
help='Show associated prices')
@environment.pass_env
def cli(env, identifier, passwords, price):
"""Get details for a hardware device."""
hardware = SoftLayer.HardwareManager(env.client)
table = formatting.KeyValueTable(['Name', 'Value'])
table.align['Name'] = 'r'
table.align['Value'] = 'l'
hardware_id = helpers.resolve_id(hardware.resolve_ids,
identifier,
'hardware')
result = hardware.get_hardware(hardware_id)
result = utils.NestedDict(result)
table.add_row(['id', result['id']])
table.add_row(['guid', result['globalIdentifier'] or formatting.blank()])
table.add_row(['hostname', result['hostname']])
table.add_row(['domain', result['domain']])
table.add_row(['fqdn', result['fullyQualifiedDomainName']])
table.add_row(['status', result['hardwareStatus']['status']])
table.add_row(['datacenter',
result['datacenter']['name'] or formatting.blank()])
table.add_row(['cores', result['processorPhysicalCoreAmount']])
table.add_row(['memory', formatting.gb(result['memoryCapacity'])])
table.add_row(['public_ip',
result['primaryIpAddress'] or formatting.blank()])
table.add_row(['private_ip',
result['primaryBackendIpAddress'] or formatting.blank()])
table.add_row(['ipmi_ip',
result['networkManagementIpAddress'] or formatting.blank()])
table.add_row([
'os',
formatting.FormattedItem(
result['operatingSystem']['softwareLicense']
['softwareDescription']['referenceCode'] or formatting.blank(),
result['operatingSystem']['softwareLicense']
['softwareDescription']['name'] or formatting.blank()
)])
table.add_row(
['created', result['provisionDate'] or formatting.blank()])
if utils.lookup(result, 'billingItem') != []:
table.add_row(['owner', formatting.FormattedItem(
utils.lookup(result, 'billingItem', 'orderItem',
'order', 'userRecord',
'username') or formatting.blank(),
)])
else:
table.add_row(['owner', formatting.blank()])
vlan_table = formatting.Table(['type', 'number', 'id'])
for vlan in result['networkVlans']:
vlan_table.add_row([
vlan['networkSpace'], vlan['vlanNumber'], vlan['id']])
table.add_row(['vlans', vlan_table])
if result.get('notes'):
table.add_row(['notes', result['notes']])
if price:
table.add_row(['price rate',
utils.lookup(result,
'billingItem',
'nextInvoiceTotalRecurringAmount')])
if passwords:
pass_table = formatting.Table(['username', 'password'])
for item in result['operatingSystem']['passwords']:
pass_table.add_row([item['username'], item['password']])
table.add_row(['users', pass_table])
pass_table = formatting.Table(['ipmi_username', 'password'])
for item in result['remoteManagementAccounts']:
pass_table.add_row([item['username'], item['password']])
table.add_row(['remote users', pass_table])
tag_row = []
for tag_detail in result['tagReferences']:
tag = utils.lookup(tag_detail, 'tag', 'name')
if tag is not None:
tag_row.append(tag)
if tag_row:
table.add_row(['tags', formatting.listing(tag_row, separator=',')])
# Test to see if this actually has a primary (public) ip address
try:
if not result['privateNetworkOnlyFlag']:
ptr_domains = (env.client['Hardware_Server']
.getReverseDomainRecords(id=hardware_id))
for ptr_domain in ptr_domains:
for ptr in ptr_domain['resourceRecords']:
table.add_row(['ptr', ptr['data']])
except SoftLayer.SoftLayerAPIError:
pass
env.fout(table)
| StarcoderdataPython |
5195803 | <filename>contigtax/shred_fasta.py
#!/usr/bin/env python
import random
from Bio import SeqIO
from argparse import ArgumentParser
import sys
def read_seqs(f):
return SeqIO.to_dict(SeqIO.parse(f, "fasta"))
def shred(d, prefix=None, existing=False, contigs=10000, minsize=500,
maxsize=10000):
"""
Generate random shreds of input fasta file
:param d: Dictionary of sequences
:param prefix: Prefix string to append to random contigs
:param existing: Use existing prefix string ('|' splits prefix)
:param contigs: Number of contigs to generate
:param minsize: Minimum size of contigs
:param maxsize: Maximum size of contigs
:return: Dictionary of randomly shredded contigs
"""
random.seed(42)
shreds = {}
keys = list(d.keys())
for i in range(0, contigs):
# pick a random contig
key = random.choice(keys)
if existing:
prefix = key.split("|")[0]
if prefix is not None:
contig_id = ">{}|contig{}".format(prefix, i)
else:
contig_id = ">contig{}".format(i)
keylen = len(d[key])-1
# pick a random length
rand_len = random.randrange(minsize, maxsize)
# if random length is bigger than contig, choose entire contig
if rand_len >= keylen:
shreds[contig_id] = str(d[key].seq)
continue
# choose whether to start from beginning or end
if random.choice(["start", "end"]) == "start":
# if choosing from beginning, pick a random position between
# the first nucleotide and contig_length - rand_length
rand_start = random.randrange(0, keylen-rand_len)
rand_end = rand_start+rand_len
else:
rand_end = random.randrange(rand_len, keylen)
rand_start = rand_end-rand_len
rand_seq = d[key][rand_start:rand_end]
shreds[contig_id] = rand_seq.seq
return shreds
def write_shreds(shreds):
l = []
for contig_id in sorted(shreds.keys()):
seq = shreds[contig_id]
l.append(len(seq))
sys.stdout.write("{}\n{}\n".format(contig_id, str(seq)))
import numpy as np
sys.stderr.write(
"""
min: {min}
max: {max}
median: {median}
mean: {mean}
""".format(min=np.min(l), max=np.max(l), median=np.median(l),
mean=np.mean(l)))
def main(args):
seqs = read_seqs(args.infile)
shreds = shred(seqs, args.prefix, args.use_prefix, args.contigs,
args.minsize, args.maxsize)
write_shreds(shreds)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("infile", type=str, help="Input fasta file")
parser.add_argument("--prefix", type=str, help="prefix to add to ids")
parser.add_argument("--use-prefix", action="store_true",
help="Use already existing prefix for sequences")
parser.add_argument("--minsize", type=int, default=500,
help="Minimum contig size")
parser.add_argument("--maxsize", type=int, default=10000,
help="Maximum contig size")
parser.add_argument("--contigs", type=int, default=10000,
help="Contigs to generate")
args = parser.parse_args()
main(args) | StarcoderdataPython |
1706759 | <reponame>zavanton123/coderators<gh_stars>0
# Generated by Django 3.1.3 on 2021-01-23 13:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
'ordering': ['-created_at'],
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Tag',
'verbose_name_plural': 'Tags',
'ordering': ['-created_at'],
},
),
migrations.CreateModel(
name='Snippet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1000)),
('content', models.TextField()),
('published_at', models.DateTimeField(auto_now_add=True, verbose_name='Published')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='snippets', to='snippet.category')),
('tags', models.ManyToManyField(blank=True, null=True, related_name='snippets', to='snippet.Tag')),
],
),
]
| StarcoderdataPython |
1735343 | <filename>module/shaders/src/compile_shaders.py
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2015 <NAME>, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
import os
import sys
if len(sys.argv) != 2:
print("Usage: {0} [path-to-oslc]".format(sys.argv[0]))
sys.exit(0)
oslc_cmd = sys.argv[1]
include_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), "include")
for dirpath, dirnames, filenames in os.walk("."):
for filename in filenames:
if filename.endswith(".osl"):
src_filepath = os.path.join(dirpath, filename)
dest_dir = os.path.join("..", dirpath)
dst_filename = filename.replace(".osl", ".oso")
dst_filepath = os.path.join(dest_dir, dst_filename)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
retcode = os.system("{0} -v -I{1} -o {2} {3}".format(oslc_cmd, include_dir, dst_filepath, src_filepath))
if retcode != 0:
print("Compilation of {0} failed with error code {1}. Stopping.".format(src_filepath, retcode))
sys.exit(retcode)
| StarcoderdataPython |
3252598 | <filename>d3d/vis/pcl.py
import numpy as np
from d3d.abstraction import ObjectTarget3DArray
_pcl_available = False
try:
import pcl
import pcl.visualization as pv
_pcl_available = True
except:
pass
def visualize_detections(visualizer: pcl.Visualizer, visualizer_frame: str, targets: ObjectTarget3DArray, calib,
text_scale=0.8, box_color=(1, 1, 1), text_color=(1, 0.8, 1), id_prefix="", position_offset=None):
'''
Note: To use this visualizer, targets should be in the same frame as the visualizer frame (lidar frame)
'''
if not _pcl_available:
raise RuntimeError("pcl is not available, please check the installation of package pcl.py")
if id_prefix != "" and not id_prefix.endswith("/"):
id_prefix = id_prefix + "/"
# change frame to the same
if targets.frame != visualizer_frame:
targets = calib.transform_objects(targets, frame_to=visualizer_frame)
for i, target in enumerate(targets):
# convert coordinate
orientation = target.orientation.as_quat()
orientation = [orientation[3]] + orientation[:3].tolist() # To PCL quaternion
lx, ly, lz = target.dimension
cube_id = (id_prefix + "target%d") % i
visualizer.addCube(target.position, orientation, lx, ly, lz, id=cube_id)
visualizer.setShapeRenderingProperties(pv.RenderingProperties.Opacity, 0.8, id=cube_id)
visualizer.setShapeRenderingProperties(pv.RenderingProperties.Color, box_color, id=cube_id)
# draw tag
text_id = (id_prefix + "target%d/tag") % i
if target.id:
disp_text = "%s: %s" % (str(target.id)[:6], target.tag_name)
else:
disp_text = "#%d: %s" % (i, target.tag_name)
aux_text = []
if target.tag_score < 1:
aux_text.append("%.2f" % target.tag_score)
position_var = np.power(np.linalg.det(target.position_var), 1/3)
if position_var > 0:
aux_text.append("%.2f" % position_var)
dimension_var = np.power(np.linalg.det(target.dimension_var), 1/3)
if dimension_var > 0:
aux_text.append("%.2f" % dimension_var)
if target.orientation_var > 0:
aux_text.append("%.2f" % target.orientation_var)
if len(aux_text) > 0:
disp_text += " (" + ", ".join(aux_text) + ")"
disp_pos = target.position
disp_pos[2] += lz / 2 # lift the text out of box
if position_offset is not None: # apply offset
disp_pos += position_offset
visualizer.addText3D(disp_text, list(disp_pos),
text_scale=text_scale, color=text_color, id=text_id)
# draw orientation
arrow_id = (id_prefix + "target%d/direction") % i
direction = target.orientation.as_matrix().dot([1,0,0])
pstart = target.position
pend = target.position + direction * lx
visualizer.addLine(pstart, pend, id=arrow_id)
| StarcoderdataPython |
3522591 | from conans.client.generators.cmake import DepsCppCmake
from conans.model import Generator
class CMakePathsGenerator(Generator):
@property
def filename(self):
return "conan_paths.cmake"
@property
def content(self):
deps = DepsCppCmake(self.deps_build_info)
# We want to prioritize the FindXXX.cmake files:
# 1. First the files found in the packages
# 2. The previously set (by default CMAKE_MODULE_PATH is empty)
# 3. The "install_folder" ones, in case there is no FindXXX.cmake, try with the install dir
# if the user used the "cmake_find_package" will find the auto-generated
# 4. The CMake installation dir/Modules ones.
return """set(CMAKE_MODULE_PATH {deps.build_paths} ${{CMAKE_MODULE_PATH}} ${{CMAKE_CURRENT_LIST_DIR}})
set(CMAKE_PREFIX_PATH {deps.build_paths} ${{CMAKE_PREFIX_PATH}} ${{CMAKE_CURRENT_LIST_DIR}})
""".format(deps=deps)
| StarcoderdataPython |
4956996 | <filename>RFEM/Imperfections/imperfectionCase.py
from RFEM.initModel import Model, clearAtributes
class ImperfectionCase():
def __init__(self,
no: int = 1,
assigned_to_load_cases: str = '1',
comment: str = '',
params: dict = {}):
'''
Args:
no (int): Imperfection Case Tag
comment (str, optional): Comments
params (dict, optional): Parameters
'''
# Client model | Imperfection Case
clientObject = Model.clientModel.factory.create('ns0:imperfection_case')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Imperfection Case No.
clientObject.no = no
# Assign to Load Cases
clientObject.assigned_to_load_cases = assigned_to_load_cases
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
for key in params:
clientObject[key] = params[key]
# Add Imperfection Case to client model
Model.clientModel.service.set_imperfection_case(clientObject)
| StarcoderdataPython |
69112 | <filename>Boid.py
from Quadtree import *
class Boid:
def __init__(self, x, y):
self.pos = PVector(x, y)
self.vel = PVector.random2D().mult(random(1, 2))
self.acc = PVector.random2D().mult(random(0.1, 0.3))
self.r = 10
self.max_speed = 4
self.max_force = .2
def show(self):
pushMatrix()
translate(self.pos.x, self.pos.y)
rotate(self.vel.heading())
fill(0, 0, 100)
triangle(self.r, 0, -self.r/2, -self.r/2, -self.r/2, self.r/2)
popMatrix()
def update(self):
self.vel.add(self.acc)
# we don't want our velocity to go over max speed
self.vel.limit(self.max_speed)
self.pos.add(self.vel)
self.acc = PVector(0, 0)
# A good way for getting to a target from far away quickly.
# This algorithm is based on Creg Renold's paper.
# steering = desired - current, kind of like error correction
# What happens if the target is too close? The boid will overshoot,
# and it will continue going on forever, like a simple harmonic oscillator.
def seek(self, target):
distance = PVector.sub(target, self.pos)
# our desired velocity is that set to our max speed
desired = distance.setMag(self.max_speed)
# steering = desired - current
steering_force = PVector.sub(desired, self.vel)
# we need to make sure we don't apply too much force
steering_force.limit(self.max_force)
return steering_force
# A good way to approach a velocity.
# This algorithm is based on Creg Renold's paper.
# steering force = desired - current
def seek_velocity(self, desired_velocity):
desired_velocity.setMag(self.max_speed)
# desired_velocity is our desired velocity!
steering_force = PVector.sub(desired_velocity, self.vel)
# we need to make sure we don't apply too much force
steering_force.limit(self.max_force)
return steering_force
# Steering to avoid crowding other boids
def separation(self, boids):
perception_radius = 30
total = 0
average = PVector(0, 0) # this is our desired velocity
# find the average of the positions of all the boids
for boid in boids:
distance = PVector.dist(self.pos, boid.pos)
# only calculate within a desired perception radius
if boid != self and distance < perception_radius:
difference = PVector.sub(self.pos, boid.pos)
# we want this difference to be inversely proportional to the distance between
# self and other; the further away it is, the lower the magnitude we want
midigation_division_error = 0.0001
# TODO: fix zero division error
if distance != 0:
difference.div(distance)
else:
difference.div(midigation_division_error)
total += 1 # count how many are within our radius to divide later for average
# in self.align, we added the other boids' velocities. here we add position!
average.add(difference)
steering_force = average
if total > 0:
steering_force.div(total) # this is our desired velocity!
return self.seek_velocity(steering_force)
else:
return average
# Steer in the average direction of nearby boids
def alignment(self, boids):
# a Boid can't see everything!
perception_radius = 40
# we should find the average direction to find the steering force
average = PVector(0, 0)
# total is the number of boids we find
total = 0
# iterate through the boids
for boid in boids:
# We don't want to do anything unless our boid is a boid inside
# a circle centered at us with a radius of perception_radius!
# Also, we don't want to use us as a boid.
d = dist(self.pos.x, self.pos.y, boid.pos.x, boid.pos.y)
if boid != self and d < perception_radius:
# This means we've found a boid!
total += 1
# We can update our average
average.add(boid.vel) # we want to find the average direction
# the last step of taking the average is dividing by the number of
# elements, but if total = 0, we're going to get a ZeroDivisionError
try:
average.div(total)
stroke(210, 90, 100)
noFill()
circle(self.pos.x, self.pos.y, perception_radius*2)
# Now we can subtract our velocity, because our desired velocity =
# difference - current velocity
steering_force = average
steering_force.setMag(self.max_speed)
return self.seek_velocity(steering_force)
except:
return average
# Steer towards nearby boids
def cohesion(self, boids):
# a Boid can't see everything!
perception_radius = 40
# we should find the average position to find the steering force
average = PVector(0, 0, 0)
# total is the number of boids we find
total = 0
# iterate through the boids
for boid in boids:
# We don't want to do anything unless our boid is a boid inside
# a circle centered at us with a radius of perception_radius!
# Also, we don't want to use us as a boid.
d = dist(self.pos.x, self.pos.y, boid.pos.x, boid.pos.y)
if boid != self and d < perception_radius:
# This means we've found a boid!
total += 1
# We can update our average
average.add(boid.pos) # we want to find the average position
# the last step of taking the average is dividing by the number of
# elements, but if total = 0, we're going to get a ZeroDivisionError
try:
average.div(total)
stroke(210, 90, 100)
noFill()
circle(self.pos.x, self.pos.y, perception_radius*2)
return self.seek(average)
except ZeroDivisionError:
return average
# what if the boids go off the screen? We'll lose a pack though. The pack will
# continue to go on though.
def edges(self):
if self.pos.x + self.r > width: # right edge
self.pos.x = self.r
if self.pos.x - self.r < 0: # left edge
self.pos.x = width - self.r
if self.pos.y - self.r < 0: # top edge
self.pos.y = height - self.r
if self.pos.y + self.r > height: # bottom edge
self.pos.y = self.r
def flock(self, boids):
# alignment
alignment = self.alignment(boids).mult(1)
self.acc.add(alignment)
# cohesion
cohesion = self.cohesion(boids).mult(1)
self.acc.add(cohesion)
# separation
separation = self.separation(boids).mult(3)
self.acc.add(separation)
| StarcoderdataPython |
6636978 | # This is a sample settings file
| StarcoderdataPython |
3535770 | <reponame>zaanposni/umfrageBot
import os
import json
from datetime import datetime, timedelta
import discord
from discord.utils import get
from bt_utils.console import Console
from bt_utils.config import cfg
from bt_utils.embed_templates import InfoEmbed
SHL = Console("ActiveUserAssigment")
content_dir = "content" if os.path.isdir("content") else "content-default"
class UserStat:
def __init__(self, user_obj: discord.Member):
self.user_obj = user_obj
self.count = 1
def __str__(self):
return f"{self.user_obj.display_name}: {self.count}"
async def assign_active_member(*args):
SHL.info("Fetching last messages.")
client = args[0]
guild = await client.fetch_guild(cfg.get("guild_id"))
SHL.debug(f"Guild: {guild}")
# Remove all "active" members
SHL.info("Remove active role from all users.")
for role in cfg.get("apply_roles"):
role = guild.get_role(role)
async for member in guild.fetch_members():
if role not in member.roles:
continue
SHL.debug(f"Remove {role} from {member}")
try:
await member.remove_roles(role)
except:
SHL.debug(f"Failed for {member}")
# Find new active members
channels = await guild.fetch_channels()
announcement_channel = await client.fetch_channel(cfg.get("announce_channel"))
log_channel = await client.fetch_channel(cfg.get("log_channel"))
users = {}
before = datetime.now()
after = datetime.now() - timedelta(days=31)
with open(os.path.join(content_dir, "unsubs.json"), "r") as fh:
unsubs = json.load(fh)["unsub_ids"]
SHL.debug(f"{len(unsubs)} users unsubbed.")
for channel in channels:
if not isinstance(channel, discord.TextChannel):
continue
if channel.id in cfg.get("exclude_channels"):
continue
SHL.debug(f"Fetching {channel.name}")
async for message in channel.history(limit=None, before=before, after=after):
uid = message.author.id
if uid in unsubs: # filter opt-out user
continue
if uid in users:
users[uid].count += 1
else:
users[uid] = UserStat(message.author)
sorted_list = sorted([x for x in users.values() if x.count >= cfg.get("needed_messages")],
key=lambda item: item.count, reverse=True)
SHL.debug(f"{len(sorted_list)} users sent enough messages.")
log_embed = InfoEmbed(title="Aktivste User", description="Für die Auswahl der Stammmitglieder.\n"
"Anzahl an Nachrichten in den letzten 31 Tagen.\n")
for stat in sorted_list: # active user
try:
member = await guild.fetch_member(stat.user_obj.id)
except: # if user left or got banned
continue
SHL.debug(f"Apply roles for {member}")
log_embed.description += f"{member.mention} {stat.count} Nachrichten.\n"
for role in cfg.get("apply_roles"):
assign_role = get(guild.roles, id=role)
try:
await member.add_roles(assign_role)
except:
SHL.debug(f"Failed for {stat.user_obj}")
break
await log_channel.send(embed=log_embed)
await announcement_channel.send(embed=log_embed)
SHL.info("Done.")
| StarcoderdataPython |
6647550 | # Generated by Django 2.2.6 on 2020-02-13 10:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('comments', '0016_british_spelling'),
]
operations = [
migrations.AlterField(
model_name='category',
name='colour',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='colours.ColourPair'),
),
]
| StarcoderdataPython |
4932214 | <gh_stars>0
#coding=utf-8
import pyaudio
import wave
import time
from pygame import mixer # Load the required library
class player():
def __init__(self):
pass
def play(self,path = 'ans.mp3'):
mixer.init()
mixer.music.load(path)
mixer.music.play()
time.sleep(10)
mixer.music.stop()
# #define stream chunk
# chunk = 1024
# #open a wav format music
# f = wave.open(path,"rb")
# #instantiate PyAudio
# p = pyaudio.PyAudio()
# #open stream
# stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
# channels = f.getnchannels(),
# rate = f.getframerate(),
# output = True)
# #read data
# data = f.readframes(chunk)
# #paly stream
# while data != '':
# stream.write(data)
# data = f.readframes(chunk)
# #stop stream
# stream.stop_stream()
# stream.close()
# #close PyAudio
# p.terminate()
if __name__== '__main__':
a = player()
a.play() | StarcoderdataPython |
8043273 | <filename>partners/migrations/0004_auto_20210122_1315.py
# Generated by Django 3.1.5 on 2021-01-22 13:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partners', '0003_auto_20210122_1315'),
]
operations = [
migrations.AlterField(
model_name='partner',
name='proposal',
field=models.CharField(max_length=50, unique=True),
),
]
| StarcoderdataPython |
9779263 | <reponame>dapu/femagtools
# -*- coding: utf-8 -*-
"""
femagtools.plot
~~~~~~~~~~~~~~~
Creating plots
"""
import numpy as np
import scipy.interpolate as ip
import logging
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
matplotlibversion = matplotlib.__version__
except ImportError: # ModuleNotFoundError:
matplotlibversion = 0
logger = logging.getLogger("femagtools.plot")
def _create_3d_axis():
"""creates a subplot with 3d projection if one does not already exist"""
from matplotlib.projections import get_projection_class
from matplotlib import _pylab_helpers
create_axis = True
if _pylab_helpers.Gcf.get_active() is not None:
if isinstance(plt.gca(), get_projection_class('3d')):
create_axis = False
if create_axis:
plt.figure()
plt.subplot(111, projection='3d')
def _plot_surface(ax, x, y, z, labels, azim=None):
"""helper function for surface plots"""
# ax.tick_params(axis='both', which='major', pad=-3)
assert np.size(x) > 1 and np.size(y) > 1 and np.size(z) > 1
if azim is not None:
ax.azim = azim
X, Y = np.meshgrid(x, y)
Z = np.ma.masked_invalid(z)
ax.plot_surface(X, Y, Z,
rstride=1, cstride=1,
cmap=cm.viridis, alpha=0.85,
vmin=np.nanmin(z), vmax=np.nanmax(z),
linewidth=0, antialiased=True)
# edgecolor=(0, 0, 0, 0))
# ax.set_xticks(xticks)
# ax.set_yticks(yticks)
# ax.set_zticks(zticks)
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_title(labels[2])
# plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
def __phasor_plot(ax, up, idq, uxdq):
uref = max(up, uxdq[0])
uxd = uxdq[0]/uref
uxq = uxdq[1]/uref
u1d, u1q = (uxd, 1+uxq)
u1 = np.sqrt(u1d**2 + u1q**2)*uref
i1 = np.linalg.norm(idq)
i1d, i1q = (idq[0]/i1, idq[1]/i1)
qhw = 6 # width arrow head
qhl = 15 # length arrow head
qlw = 2 # line width
qts = 10 # textsize
# Length of the Current adjust to Ud: Initally 0.9, Maier(Oswald) = 0.5
curfac = max(0.9, 1.5*i1q/up)
def label_line(ax, X, Y, U, V, label, color='k', size=8):
"""Add a label to a line, at the proper angle.
Arguments
---------
line : matplotlib.lines.Line2D object,
label : str
x : float
x-position to place center of text (in data coordinated
y : float
y-position to place center of text (in data coordinates)
color : str
size : float
"""
x1, x2 = X, X + U
y1, y2 = Y, Y + V
if y2 == 0:
y2 = y1
if x2 == 0:
x2 = x1
x = (x1 + x2) / 2
y = (y1 + y2) / 2
slope_degrees = np.rad2deg(np.angle(U + V * 1j))
if slope_degrees < 0:
slope_degrees += 180
if 90 < slope_degrees <= 270:
slope_degrees += 180
x_offset = np.sin(np.deg2rad(slope_degrees))
y_offset = np.cos(np.deg2rad(slope_degrees))
bbox_props = dict(boxstyle="Round4, pad=0.1", fc="white", lw=0)
text = ax.annotate(label, xy=(x, y), xytext=(x_offset * 10, y_offset * 8),
textcoords='offset points',
size=size, color=color,
horizontalalignment='center',
verticalalignment='center',
fontfamily='monospace', fontweight='bold', bbox=bbox_props)
text.set_rotation(slope_degrees)
return text
if ax == 0:
ax = plt.gca()
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
# ax.set_aspect('equal')
ax.set_title(
r'$U_1$={0} V, $I_1$={1} A, $U_p$={2} V'.format(
round(u1, 1), round(i1, 1), round(up, 1)), fontsize=14)
up /= uref
ax.quiver(0, 0, 0, up, angles='xy', scale_units='xy', scale=1, units='dots',
headwidth=qhw/2, headlength=qhl/2, headaxislength=qhl/2, width=qlw*2, color='k')
label_line(ax, 0, 0, 0, up, '$U_p$', 'k', qts)
ax.quiver(0, 0, u1d, u1q, angles='xy', scale_units='xy', scale=1, units='dots',
headwidth=qhw, headlength=qhl, headaxislength=qhl, width=qlw, color='r')
label_line(ax, 0, 0, u1d, u1q, '$U_1$', 'r', qts)
ax.quiver(0, 1, uxd, 0, angles='xy', scale_units='xy', scale=1, units='dots',
headwidth=qhw, headlength=qhl, headaxislength=qhl, width=qlw, color='g')
label_line(ax, 0, 1, uxd, 0, '$U_d$', 'g', qts)
ax.quiver(uxd, 1, 0, uxq, angles='xy', scale_units='xy', scale=1, units='dots',
headwidth=qhw, headlength=qhl, headaxislength=qhl, width=qlw, color='g')
label_line(ax, uxd, 1, 0, uxq, '$U_q$', 'g', qts)
ax.quiver(0, 0, curfac*i1d, curfac*i1q, angles='xy', scale_units='xy', scale=1,
units='dots', headwidth=qhw, headlength=qhl, headaxislength=qhl, width=qlw, color='b')
label_line(ax, 0, 0, curfac*i1d, curfac*i1q, '$I_1$', 'b', qts)
xmin, xmax = (min(0, uxd, i1d), max(0, i1d, uxd))
ymin, ymax = (min(0, i1q, 1-uxq), max(1, i1q, 1+uxq))
ax.set_xlim([xmin-0.1, xmax+0.1])
ax.set_ylim([ymin-0.1, ymax+0.1])
ax.grid(True)
def i1beta_phasor(up, i1, beta, r1, xd, xq, ax=0):
"""creates a phasor plot
up: internal voltage
i1: current
beta: angle i1 vs up [deg]
r1: resistance
xd: reactance in direct axis
xq: reactance in quadrature axis"""
i1d, i1q = (i1*np.sin(beta/180*np.pi), i1*np.cos(beta/180*np.pi))
uxdq = ((r1*i1d - xq*i1q), (r1*i1q + xd*i1d))
__phasor_plot(ax, up, (i1d, i1q), uxdq)
def iqd_phasor(up, iqd, uqd, ax=0):
"""creates a phasor plot
up: internal voltage
iqd: current
uqd: terminal voltage"""
uxdq = (uqd[1]/np.sqrt(2), (uqd[0]/np.sqrt(2)-up))
__phasor_plot(ax, up, (iqd[1]/np.sqrt(2), iqd[0]/np.sqrt(2)), uxdq)
def phasor(bch, ax=0):
"""create phasor plot from bch"""
f1 = bch.machine['p']*bch.dqPar['speed']
w1 = 2*np.pi*f1
xd = w1*bch.dqPar['ld'][-1]
xq = w1*bch.dqPar['lq'][-1]
r1 = bch.machine['r1']
i1beta_phasor(bch.dqPar['up'][-1],
bch.dqPar['i1'][-1], bch.dqPar['beta'][-1],
r1, xd, xq, ax)
def airgap(airgap, ax=0):
"""creates plot of flux density in airgap"""
if ax == 0:
ax = plt.gca()
ax.set_title('Airgap Flux Density [T]')
ax.plot(airgap['pos'], airgap['B'],
label='Max {:4.2f} T'.format(max(airgap['B'])))
ax.plot(airgap['pos'], airgap['B_fft'],
label='Base Ampl {:4.2f} T'.format(airgap['Bamp']))
ax.set_xlabel('Position/°')
ax.legend()
ax.grid(True)
def airgap_fft(airgap, bmin=1e-2, ax=0):
"""plot airgap harmonics"""
unit = 'T'
if ax == 0:
ax = plt.gca()
ax.set_title('Airgap Flux Density Harmonics / {}'.format(unit))
ax.grid(True)
order, fluxdens = np.array([(n, b) for n, b in zip(airgap['nue'],
airgap['B_nue']) if b > bmin]).T
try:
markerline1, stemlines1, _ = ax.stem(order, fluxdens, '-.', basefmt=" ",
use_line_collection=True)
ax.set_xticks(order)
except ValueError: # empty sequence
pass
def torque(pos, torque, ax=0):
"""creates plot from torque vs position"""
k = 20
alpha = np.linspace(pos[0], pos[-1],
k*len(torque))
f = ip.interp1d(pos, torque, kind='quadratic')
unit = 'Nm'
scale = 1
if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
scale = 1e-3
unit = 'kNm'
if ax == 0:
ax = plt.gca()
ax.set_title('Torque / {}'.format(unit))
ax.grid(True)
ax.plot(pos, [scale*t for t in torque], 'go')
ax.plot(alpha, scale*f(alpha))
if np.min(torque) > 0 and np.max(torque) > 0:
ax.set_ylim(bottom=0)
elif np.min(torque) < 0 and np.max(torque) < 0:
ax.set_ylim(top=0)
def torque_fft(order, torque, ax=0):
"""plot torque harmonics"""
unit = 'Nm'
scale = 1
if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
scale = 1e-3
unit = 'kNm'
if ax == 0:
ax = plt.gca()
ax.set_title('Torque Harmonics / {}'.format(unit))
ax.grid(True)
try:
bw = 2.5E-2*max(order)
ax.bar(order, [scale*t for t in torque], width=bw, align='center')
ax.set_xlim(left=-bw/2)
except ValueError: # empty sequence
pass
def force(title, pos, force, xlabel='', ax=0):
"""plot force vs position"""
unit = 'N'
scale = 1
if min(force) < -9.9e3 or max(force) > 9.9e3:
scale = 1e-3
unit = 'kN'
if ax == 0:
ax = plt.gca()
ax.set_title('{} / {}'.format(title, unit))
ax.grid(True)
ax.plot(pos, [scale*f for f in force])
if xlabel:
ax.set_xlabel(xlabel)
if min(force) > 0:
ax.set_ylim(bottom=0)
def force_fft(order, force, ax=0):
"""plot force harmonics"""
unit = 'N'
scale = 1
if min(force) < -9.9e3 or max(force) > 9.9e3:
scale = 1e-3
unit = 'kN'
if ax == 0:
ax = plt.gca()
ax.set_title('Force Harmonics / {}'.format(unit))
ax.grid(True)
try:
bw = 2.5E-2*max(order)
ax.bar(order, [scale*t for t in force], width=bw, align='center')
ax.set_xlim(left=-bw/2)
except ValueError: # empty sequence
pass
def forcedens(title, pos, fdens, ax=0):
"""plot force densities"""
if ax == 0:
ax = plt.gca()
ax.set_title(title)
ax.grid(True)
ax.plot(pos, [1e-3*ft for ft in fdens[0]], label='F tang')
ax.plot(pos, [1e-3*fn for fn in fdens[1]], label='F norm')
ax.legend()
ax.set_xlabel('Pos / deg')
ax.set_ylabel('Force Density / kN/m²')
def forcedens_surface(fdens, ax=0):
if ax == 0:
_create_3d_axis()
ax = plt.gca()
xpos = [p for p in fdens.positions[0]['X']]
ypos = [p['position'] for p in fdens.positions]
z = 1e-3*np.array([p['FN']
for p in fdens.positions])
_plot_surface(ax, xpos, ypos, z,
(u'Rotor pos/°', u'Pos/°', u'F N / kN/m²'))
def forcedens_fft(title, fdens, ax=0):
"""plot force densities FFT
Args:
title: plot title
fdens: force density object
"""
if ax == 0:
ax = plt.axes(projection="3d")
F = 1e-3*fdens.fft()
fmin = 0.2
num_bars = F.shape[0] + 1
_xx, _yy = np.meshgrid(np.arange(1, num_bars),
np.arange(1, num_bars))
z_size = F[F > fmin]
x_pos, y_pos = _xx[F > fmin], _yy[F > fmin]
z_pos = np.zeros_like(z_size)
x_size = 2
y_size = 2
ax.bar3d(x_pos, y_pos, z_pos, x_size, y_size, z_size)
ax.view_init(azim=120)
ax.set_xlim(0, num_bars+1)
ax.set_ylim(0, num_bars+1)
ax.set_title(title)
ax.set_xlabel('M')
ax.set_ylabel('N')
ax.set_zlabel('kN/m²')
def winding_flux(pos, flux, ax=0):
"""plot flux vs position"""
if ax == 0:
ax = plt.gca()
ax.set_title('Winding Flux / Vs')
ax.grid(True)
for p, f in zip(pos, flux):
ax.plot(p, f)
def winding_current(pos, current, ax=0):
"""plot winding currents"""
if ax == 0:
ax = plt.gca()
ax.set_title('Winding Currents / A')
ax.grid(True)
for p, i in zip(pos, current):
ax.plot(p, i)
def voltage(title, pos, voltage, ax=0):
"""plot voltage vs. position"""
if ax == 0:
ax = plt.gca()
ax.set_title('{} / V'.format(title))
ax.grid(True)
ax.plot(pos, voltage)
def voltage_fft(title, order, voltage, ax=0):
"""plot FFT harmonics of voltage"""
if ax == 0:
ax = plt.gca()
ax.set_title('{} / V'.format(title))
ax.grid(True)
if max(order) < 5:
order += [5]
voltage += [0]
try:
bw = 2.5E-2*max(order)
ax.bar(order, voltage, width=bw, align='center')
except ValueError: # empty sequence
pass
def mcv_hbj(mcv, log=True, ax=0):
"""plot H, B, J of mcv dict"""
import femagtools.mcv
MUE0 = 4e-7*np.pi
ji = []
csiz = len(mcv['curve'])
if ax == 0:
ax = plt.gca()
ax.set_title(mcv['name'])
for k, c in enumerate(mcv['curve']):
bh = [(bi, hi*1e-3)
for bi, hi in zip(c['bi'],
c['hi'])]
try:
if csiz == 1 and mcv['ctype'] in (femagtools.mcv.MAGCRV,
femagtools.mcv.ORIENT_CRV):
ji = [b-MUE0*h*1e3 for b, h in bh]
except Exception:
pass
bi, hi = zip(*bh)
label = 'Flux Density'
if csiz > 1:
label = 'Flux Density ({0}°)'.format(mcv.mc1_angle[k])
if log:
ax.semilogx(hi, bi, label=label)
if ji:
ax.semilogx(hi, ji, label='Polarisation')
else:
ax.plot(hi, bi, label=label)
if ji:
ax.plot(hi, ji, label='Polarisation')
ax.set_xlabel('H / kA/m')
ax.set_ylabel('T')
if ji or csiz > 1:
ax.legend(loc='lower right')
ax.grid()
def mcv_muer(mcv, ax=0):
"""plot rel. permeability vs. B of mcv dict"""
MUE0 = 4e-7*np.pi
bi, ur = zip(*[(bx, bx/hx/MUE0)
for bx, hx in zip(mcv['curve'][0]['bi'],
mcv['curve'][0]['hi']) if not hx == 0])
if ax == 0:
ax = plt.gca()
ax.plot(bi, ur)
ax.set_xlabel('B / T')
ax.set_title('rel. Permeability')
ax.grid()
def mtpa(pmrel, i1max, title='', projection='', ax=0):
"""create a line or surface plot with torque and mtpa curve"""
nsamples = 10
i1 = np.linspace(0, i1max, nsamples)
iopt = np.array([pmrel.mtpa(x) for x in i1]).T
iqmax, idmax = pmrel.iqdmax(i1max)
iqmin, idmin = pmrel.iqdmin(i1max)
if projection == '3d':
nsamples = 50
else:
if iqmin == 0:
iqmin = 0.1*iqmax
id = np.linspace(idmin, idmax, nsamples)
iq = np.linspace(iqmin, iqmax, nsamples)
torque_iqd = np.array(
[[pmrel.torque_iqd(x, y)
for y in id] for x in iq])
if projection == '3d':
ax = idq_torque(id, iq, torque_iqd, ax)
ax.plot(iopt[1], iopt[0], iopt[2],
color='red', linewidth=2, label='MTPA: {0:5.0f} Nm'.format(
np.max(iopt[2][-1])))
else:
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
x, y = np.meshgrid(id, iq)
CS = ax.contour(x, y, torque_iqd, 6, colors='k')
ax.clabel(CS, fmt='%d', inline=1)
ax.set_xlabel('Id/A')
ax.set_ylabel('Iq/A')
ax.plot(iopt[1], iopt[0],
color='red', linewidth=2, label='MTPA: {0:5.0f} Nm'.format(
np.max(iopt[2][-1])))
ax.grid()
if title:
ax.set_title(title)
ax.legend()
def mtpv(pmrel, u1max, i1max, title='', projection='', ax=0):
"""create a line or surface plot with voltage and mtpv curve"""
w1 = pmrel.w2_imax_umax(i1max, u1max)
nsamples = 20
if projection == '3d':
nsamples = 50
iqmax, idmax = pmrel.iqdmax(i1max)
iqmin, idmin = pmrel.iqdmin(i1max)
id = np.linspace(idmin, idmax, nsamples)
iq = np.linspace(iqmin, iqmax, nsamples)
u1_iqd = np.array(
[[np.linalg.norm(pmrel.uqd(w1, iqx, idx))/np.sqrt(2)
for idx in id] for iqx in iq])
u1 = np.mean(u1_iqd)
imtpv = np.array([pmrel.mtpv(wx, u1, i1max)
for wx in np.linspace(w1, 20*w1, nsamples)]).T
if projection == '3d':
torque_iqd = np.array(
[[pmrel.torque_iqd(x, y)
for y in id] for x in iq])
ax = idq_torque(id, iq, torque_iqd, ax)
ax.plot(imtpv[1], imtpv[0], imtpv[2],
color='red', linewidth=2)
else:
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
x, y = np.meshgrid(id, iq)
CS = ax.contour(x, y, u1_iqd, 4, colors='b') # linestyles='dashed')
ax.clabel(CS, fmt='%d', inline=1)
ax.plot(imtpv[1], imtpv[0],
color='red', linewidth=2,
label='MTPV: {0:5.0f} Nm'.format(np.max(imtpv[2])))
# beta = np.arctan2(imtpv[1][0], imtpv[0][0])
# b = np.linspace(beta, 0)
# ax.plot(np.sqrt(2)*i1max*np.sin(b), np.sqrt(2)*i1max*np.cos(b), 'r-')
ax.grid()
ax.legend()
ax.set_xlabel('Id/A')
ax.set_ylabel('Iq/A')
if title:
ax.set_title(title)
def __get_linearForce_title_keys(lf):
if 'force_r' in lf:
return ['Force r', 'Force z'], ['force_r', 'force_z']
return ['Force x', 'Force y'], ['force_x', 'force_y']
def pmrelsim(bch, title=''):
"""creates a plot of a PM/Rel motor simulation"""
cols = 2
rows = 4
if len(bch.flux['1']) > 1:
rows += 1
htitle = 1.5 if title else 0
fig, ax = plt.subplots(nrows=rows, ncols=cols,
figsize=(10, 3*rows + htitle))
if title:
fig.suptitle(title, fontsize=16)
row = 1
plt.subplot(rows, cols, row)
if bch.torque:
torque(bch.torque[-1]['angle'], bch.torque[-1]['torque'])
plt.subplot(rows, cols, row+1)
tq = list(bch.torque_fft[-1]['torque'])
order = list(bch.torque_fft[-1]['order'])
if order and max(order) < 5:
order += [15]
tq += [0]
torque_fft(order, tq)
plt.subplot(rows, cols, row+2)
force('Force Fx',
bch.torque[-1]['angle'], bch.torque[-1]['force_x'])
plt.subplot(rows, cols, row+3)
force('Force Fy',
bch.torque[-1]['angle'], bch.torque[-1]['force_y'])
row += 3
elif bch.linearForce:
title, keys = __get_linearForce_title_keys(bch.linearForce[-1])
force(title[0], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[0]], 'Displt. / mm')
plt.subplot(rows, cols, row+1)
force_fft(bch.linearForce_fft[-2]['order'],
bch.linearForce_fft[-2]['force'])
plt.subplot(rows, cols, row+2)
force(title[1], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[1]], 'Displt. / mm')
plt.subplot(rows, cols, row+3)
force_fft(bch.linearForce_fft[-1]['order'],
bch.linearForce_fft[-1]['force'])
row += 3
plt.subplot(rows, cols, row+1)
flux = [bch.flux[k][-1] for k in bch.flux]
pos = [f['displ'] for f in flux]
winding_flux(pos,
[f['flux_k'] for f in flux])
plt.subplot(rows, cols, row+2)
winding_current(pos,
[f['current_k'] for f in flux])
plt.subplot(rows, cols, row+3)
voltage('Internal Voltage',
bch.flux['1'][-1]['displ'],
bch.flux['1'][-1]['voltage_dpsi'])
plt.subplot(rows, cols, row+4)
try:
voltage_fft('Internal Voltage Harmonics',
bch.flux_fft['1'][-1]['order'],
bch.flux_fft['1'][-1]['voltage'])
except:
pass
if len(bch.flux['1']) > 1:
plt.subplot(rows, cols, row+5)
voltage('No Load Voltage',
bch.flux['1'][0]['displ'],
bch.flux['1'][0]['voltage_dpsi'])
plt.subplot(rows, cols, row+6)
try:
voltage_fft('No Load Voltage Harmonics',
bch.flux_fft['1'][0]['order'],
bch.flux_fft['1'][0]['voltage'])
except:
pass
fig.tight_layout(h_pad=3.5)
if title:
fig.subplots_adjust(top=0.92)
def multcal(bch, title=''):
"""creates a plot of a MULT CAL simulation"""
cols = 2
rows = 4
htitle = 1.5 if title else 0
fig, ax = plt.subplots(nrows=rows, ncols=cols,
figsize=(10, 3*rows + htitle))
if title:
fig.suptitle(title, fontsize=16)
row = 1
plt.subplot(rows, cols, row)
if bch.torque:
torque(bch.torque[-1]['angle'], bch.torque[-1]['torque'])
plt.subplot(rows, cols, row+1)
tq = list(bch.torque_fft[-1]['torque'])
order = list(bch.torque_fft[-1]['order'])
if order and max(order) < 5:
order += [15]
tq += [0]
torque_fft(order, tq)
plt.subplot(rows, cols, row+2)
force('Force Fx',
bch.torque[-1]['angle'], bch.torque[-1]['force_x'])
plt.subplot(rows, cols, row+3)
force('Force Fy',
bch.torque[-1]['angle'], bch.torque[-1]['force_y'])
row += 3
elif bch.linearForce:
title, keys = __get_linearForce_title_keys(bch.linearForce[-1])
force(title[0], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[0]], 'Displt. / mm')
plt.subplot(rows, cols, row+1)
force_fft(bch.linearForce_fft[-2]['order'],
bch.linearForce_fft[-2]['force'])
plt.subplot(rows, cols, row+2)
force(title[1], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[1]], 'Displt. / mm')
plt.subplot(rows, cols, row+3)
force_fft(bch.linearForce_fft[-1]['order'],
bch.linearForce_fft[-1]['force'])
row += 3
plt.subplot(rows, cols, row+1)
flux = [bch.flux[k][-1] for k in bch.flux]
pos = [f['displ'] for f in flux]
winding_flux(pos,
[f['flux_k'] for f in flux])
plt.subplot(rows, cols, row+2)
winding_current(pos,
[f['current_k'] for f in flux])
plt.subplot(rows, cols, row+3)
voltage('Internal Voltage',
bch.flux['1'][-1]['displ'],
bch.flux['1'][-1]['voltage_dpsi'])
plt.subplot(rows, cols, row+4)
try:
voltage_fft('Internal Voltage Harmonics',
bch.flux_fft['1'][-1]['order'],
bch.flux_fft['1'][-1]['voltage'])
except:
pass
if len(bch.flux['1']) > 1:
plt.subplot(rows, cols, row+5)
voltage('No Load Voltage',
bch.flux['1'][0]['displ'],
bch.flux['1'][0]['voltage_dpsi'])
plt.subplot(rows, cols, row+6)
try:
voltage_fft('No Load Voltage Harmonics',
bch.flux_fft['1'][0]['order'],
bch.flux_fft['1'][0]['voltage'])
except:
pass
fig.tight_layout(h_pad=3.5)
if title:
fig.subplots_adjust(top=0.92)
def fasttorque(bch, title=''):
"""creates a plot of a Fast Torque simulation"""
cols = 2
rows = 4
if len(bch.flux['1']) > 1:
rows += 1
htitle = 1.5 if title else 0
fig, ax = plt.subplots(nrows=rows, ncols=cols,
figsize=(10, 3*rows + htitle))
if title:
fig.suptitle(title, fontsize=16)
row = 1
plt.subplot(rows, cols, row)
if bch.torque:
torque(bch.torque[-1]['angle'], bch.torque[-1]['torque'])
plt.subplot(rows, cols, row+1)
torque_fft(bch.torque_fft[-1]['order'], bch.torque_fft[-1]['torque'])
plt.subplot(rows, cols, row+2)
force('Force Fx',
bch.torque[-1]['angle'], bch.torque[-1]['force_x'])
plt.subplot(rows, cols, row+3)
force('Force Fy',
bch.torque[-1]['angle'], bch.torque[-1]['force_y'])
row += 3
elif bch.linearForce:
title, keys = __get_linearForce_title_keys(bch.linearForce[-1])
force(title[0], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[0]], 'Displt. / mm')
plt.subplot(rows, cols, row+1)
force_fft(bch.linearForce_fft[-2]['order'],
bch.linearForce_fft[-2]['force'])
plt.subplot(rows, cols, row+2)
force(title[1], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[1]], 'Displt. / mm')
plt.subplot(rows, cols, row+3)
force_fft(bch.linearForce_fft[-1]['order'],
bch.linearForce_fft[-1]['force'])
row += 3
plt.subplot(rows, cols, row+1)
flux = [bch.flux[k][-1] for k in bch.flux]
pos = [f['displ'] for f in flux]
winding_flux(pos, [f['flux_k'] for f in flux])
plt.subplot(rows, cols, row+2)
winding_current(pos, [f['current_k'] for f in flux])
plt.subplot(rows, cols, row+3)
voltage('Internal Voltage',
bch.flux['1'][-1]['displ'],
bch.flux['1'][-1]['voltage_dpsi'])
plt.subplot(rows, cols, row+4)
try:
voltage_fft('Internal Voltage Harmonics',
bch.flux_fft['1'][-1]['order'],
bch.flux_fft['1'][-1]['voltage'])
except:
pass
if len(bch.flux['1']) > 1:
plt.subplot(rows, cols, row+5)
voltage('No Load Voltage',
bch.flux['1'][0]['displ'],
bch.flux['1'][0]['voltage_dpsi'])
plt.subplot(rows, cols, row+6)
try:
voltage_fft('No Load Voltage Harmonics',
bch.flux_fft['1'][0]['order'],
bch.flux_fft['1'][0]['voltage'])
except:
pass
fig.tight_layout(h_pad=3.5)
if title:
fig.subplots_adjust(top=0.92)
def cogging(bch, title=''):
"""creates a cogging plot"""
cols = 2
rows = 3
htitle = 1.5 if title else 0
fig, ax = plt.subplots(nrows=rows, ncols=cols,
figsize=(10, 3*rows + htitle))
if title:
fig.suptitle(title, fontsize=16)
row = 1
plt.subplot(rows, cols, row)
if bch.torque:
torque(bch.torque[0]['angle'], bch.torque[0]['torque'])
plt.subplot(rows, cols, row+1)
if bch.torque_fft:
torque_fft(bch.torque_fft[0]['order'], bch.torque_fft[0]['torque'])
plt.subplot(rows, cols, row+2)
force('Force Fx',
bch.torque[0]['angle'], bch.torque[0]['force_x'])
plt.subplot(rows, cols, row+3)
force('Force Fy',
bch.torque[0]['angle'], bch.torque[0]['force_y'])
row += 3
elif bch.linearForce:
title, keys = __get_linearForce_title_keys(bch.linearForce[-1])
force(title[0], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[0]], 'Displt. / mm')
plt.subplot(rows, cols, row+1)
force_fft(bch.linearForce_fft[-2]['order'],
bch.linearForce_fft[-2]['force'])
plt.subplot(rows, cols, row+2)
force(title[1], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[1]], 'Displt. / mm')
plt.subplot(rows, cols, row+3)
force_fft(bch.linearForce_fft[-1]['order'],
bch.linearForce_fft[-1]['force'])
row += 3
plt.subplot(rows, cols, row+1)
voltage('Voltage',
bch.flux['1'][0]['displ'],
bch.flux['1'][0]['voltage_dpsi'])
plt.subplot(rows, cols, row+2)
voltage_fft('Voltage Harmonics',
bch.flux_fft['1'][0]['order'],
bch.flux_fft['1'][0]['voltage'])
fig.tight_layout(h_pad=2)
if title:
fig.subplots_adjust(top=0.92)
def transientsc(bch, title=''):
"""creates a transient short circuit plot"""
cols = 1
rows = 2
htitle = 1.5 if title else 0
fig, ax = plt.subplots(nrows=rows, ncols=cols,
figsize=(10, 3*rows + htitle))
if title:
fig.suptitle(title, fontsize=16)
row = 1
plt.subplot(rows, cols, row)
ax = plt.gca()
ax.set_title('Currents / A')
ax.grid(True)
for i in ('ia', 'ib', 'ic'):
ax.plot(bch.scData['time'], bch.scData[i], label=i)
ax.set_xlabel('Time / s')
ax.legend()
row = 2
plt.subplot(rows, cols, row)
ax = plt.gca()
ax.set_title('Torque / Nm')
ax.grid(True)
ax.plot(bch.scData['time'], bch.scData['torque'])
ax.set_xlabel('Time / s')
fig.tight_layout(h_pad=2)
if title:
fig.subplots_adjust(top=0.92)
def i1beta_torque(i1, beta, torque, title='', ax=0):
"""creates a surface plot of torque vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
azim = 210
if 0 < np.mean(beta) or -90 > np.mean(beta):
azim = -60
unit = 'Nm'
scale = 1
if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
scale = 1e-3
unit = 'kNm'
if title:
_plot_surface(ax, i1, beta, scale*np.asarray(torque),
(u'I1/A', u'Beta/°', title),
azim=azim)
else:
_plot_surface(ax, i1, beta, scale*np.asarray(torque),
(u'I1/A', u'Beta/°', u'Torque/{}'.format(unit)),
azim=azim)
def i1beta_ld(i1, beta, ld, ax=0):
"""creates a surface plot of ld vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, i1, beta, np.asarray(ld)*1e3,
(u'I1/A', u'Beta/°', u'Ld/mH'),
azim=60)
def i1beta_lq(i1, beta, lq, ax=0):
"""creates a surface plot of ld vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
azim = 60
if 0 < np.mean(beta) or -90 > np.mean(beta):
azim = -120
_plot_surface(ax, i1, beta, np.asarray(lq)*1e3,
(u'I1/A', u'Beta/°', u'Lq/mH'),
azim=azim)
def i1beta_psim(i1, beta, psim, ax=0):
"""creates a surface plot of psim vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, i1, beta, psim,
(u'I1/A', u'Beta/°', u'Psi m/Vs'),
azim=60)
def i1beta_up(i1, beta, up, ax=0):
"""creates a surface plot of up vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, i1, beta, up,
(u'I1/A', u'Beta/°', u'Up/V'),
azim=60)
def i1beta_psid(i1, beta, psid, ax=0):
"""creates a surface plot of psid vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
azim = -60
if 0 < np.mean(beta) or -90 > np.mean(beta):
azim = 60
_plot_surface(ax, i1, beta, psid,
(u'I1/A', u'Beta/°', u'Psi d/Vs'),
azim=azim)
def i1beta_psiq(i1, beta, psiq, ax=0):
"""creates a surface plot of psiq vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
azim = 210
if 0 < np.mean(beta) or -90 > np.mean(beta):
azim = -60
_plot_surface(ax, i1, beta, psiq,
(u'I1/A', u'Beta/°', u'Psi q/Vs'),
azim=azim)
def idq_torque(id, iq, torque, ax=0):
"""creates a surface plot of torque vs id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
unit = 'Nm'
scale = 1
if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
scale = 1e-3
unit = 'kNm'
_plot_surface(ax, id, iq, scale*np.asarray(torque),
(u'Id/A', u'Iq/A', u'Torque/{}'.format(unit)),
azim=-60)
return ax
def idq_psid(id, iq, psid, ax=0):
"""creates a surface plot of psid vs id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, psid,
(u'Id/A', u'Iq/A', u'Psi d/Vs'),
azim=210)
def idq_psiq(id, iq, psiq, ax=0):
"""creates a surface plot of psiq vs id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, psiq,
(u'Id/A', u'Iq/A', u'Psi q/Vs'),
azim=210)
def idq_psim(id, iq, psim, ax=0):
"""creates a surface plot of psim vs. id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, psim,
(u'Id/A', u'Iq/A', u'Psi m [Vs]'),
azim=120)
def idq_ld(id, iq, ld, ax=0):
"""creates a surface plot of ld vs. id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, np.asarray(ld)*1e3,
(u'Id/A', u'Iq/A', u'L d/mH'),
azim=120)
def idq_lq(id, iq, lq, ax=0):
"""creates a surface plot of lq vs. id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, np.asarray(lq)*1e3,
(u'Id/A', u'Iq/A', u'L q/mH'),
azim=120)
def ldlq(bch):
"""creates the surface plots of a BCH reader object
with a ld-lq identification"""
beta = bch.ldq['beta']
i1 = bch.ldq['i1']
torque = bch.ldq['torque']
ld = np.array(bch.ldq['ld'])
lq = np.array(bch.ldq['lq'])
psid = bch.ldq['psid']
psiq = bch.ldq['psiq']
rows = 3
fig = plt.figure(figsize=(10, 4*rows))
fig.suptitle('Ld-Lq Identification {}'.format(bch.filename), fontsize=16)
fig.add_subplot(rows, 2, 1, projection='3d')
i1beta_torque(i1, beta, torque)
fig.add_subplot(rows, 2, 2, projection='3d')
i1beta_psid(i1, beta, psid)
fig.add_subplot(rows, 2, 3, projection='3d')
i1beta_psiq(i1, beta, psiq)
fig.add_subplot(rows, 2, 4, projection='3d')
try:
i1beta_psim(i1, beta, bch.ldq['psim'])
except:
i1beta_up(i1, beta, bch.ldq['up'])
fig.add_subplot(rows, 2, 5, projection='3d')
i1beta_ld(i1, beta, ld)
fig.add_subplot(rows, 2, 6, projection='3d')
i1beta_lq(i1, beta, lq)
def psidq(bch):
"""creates the surface plots of a BCH reader object
with a psid-psiq identification"""
id = bch.psidq['id']
iq = bch.psidq['iq']
torque = bch.psidq['torque']
ld = np.array(bch.psidq_ldq['ld'])
lq = np.array(bch.psidq_ldq['lq'])
psim = bch.psidq_ldq['psim']
psid = bch.psidq['psid']
psiq = bch.psidq['psiq']
rows = 3
fig = plt.figure(figsize=(10, 4*rows))
fig.suptitle('Psid-Psiq Identification {}'.format(
bch.filename), fontsize=16)
fig.add_subplot(rows, 2, 1, projection='3d')
idq_torque(id, iq, torque)
fig.add_subplot(rows, 2, 2, projection='3d')
idq_psid(id, iq, psid)
fig.add_subplot(rows, 2, 3, projection='3d')
idq_psiq(id, iq, psiq)
fig.add_subplot(rows, 2, 4, projection='3d')
idq_psim(id, iq, psim)
fig.add_subplot(rows, 2, 5, projection='3d')
idq_ld(id, iq, ld)
fig.add_subplot(rows, 2, 6, projection='3d')
idq_lq(id, iq, lq)
def felosses(losses, coeffs, title='', log=True, ax=0):
"""plot iron losses with steinmetz or jordan approximation
Args:
losses: dict with f, B, pfe values
coeffs: list with steinmetz (cw, alpha, beta) or
jordan (cw, alpha, ch, beta, gamma) coeffs
title: title string
log: log scale for x and y axes if True
"""
import femagtools.losscoeffs as lc
if ax == 0:
ax = plt.gca()
fo = losses['fo']
Bo = losses['Bo']
B = plt.np.linspace(0.9*np.min(losses['B']),
1.1*0.9*np.max(losses['B']))
for i, f in enumerate(losses['f']):
pfe = [p for p in np.array(losses['pfe'])[i] if p]
if f > 0:
if len(coeffs) == 5:
ax.plot(B, lc.pfe_jordan(f, B, *coeffs, fo=fo, Bo=Bo))
elif len(coeffs) == 3:
ax.plot(B, lc.pfe_steinmetz(f, B, *coeffs, fo=fo, Bo=Bo))
plt.plot(losses['B'][:len(pfe)], pfe,
marker='o', label="{} Hz".format(f))
ax.set_title("Fe Losses/(W/kg) " + title)
if log:
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel("Flux Density [T]")
# plt.ylabel("Pfe [W/kg]")
ax.legend()
ax.grid(True)
def spel(isa, with_axis=False, ax=0):
"""plot super elements of I7/ISA7 model
Args:
isa: Isa7 object
"""
from matplotlib.patches import Polygon
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
for se in isa.superelements:
ax.add_patch(Polygon([n.xy
for nc in se.nodechains
for n in nc.nodes],
color=isa.color[se.color], lw=0))
ax.autoscale(enable=True)
if not with_axis:
ax.axis('off')
def mesh(isa, with_axis=False, ax=0):
"""plot mesh of I7/ISA7 model
Args:
isa: Isa7 object
"""
from matplotlib.lines import Line2D
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
for el in isa.elements:
pts = [list(i) for i in zip(*[v.xy for v in el.vertices])]
ax.add_line(Line2D(pts[0], pts[1], color='b', ls='-', lw=0.25))
# for nc in isa.nodechains:
# pts = [list(i) for i in zip(*[(n.x, n.y) for n in nc.nodes])]
# ax.add_line(Line2D(pts[0], pts[1], color="b", ls="-", lw=0.25,
# marker=".", ms="2", mec="None"))
# for nc in isa.nodechains:
# if nc.nodemid is not None:
# plt.plot(*nc.nodemid.xy, "rx")
ax.autoscale(enable=True)
if not with_axis:
ax.axis('off')
def _contour(ax, title, elements, values, label='', isa=None):
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
ax.set_title(title, fontsize=18)
if isa:
for se in isa.superelements:
ax.add_patch(Polygon([n.xy
for nc in se.nodechains
for n in nc.nodes],
color='gray', alpha=0.1, lw=0))
valid_values = np.logical_not(np.isnan(values))
patches = np.array([Polygon([v.xy for v in e.vertices])
for e in elements])[valid_values]
# , cmap=matplotlib.cm.jet, alpha=0.4)
p = PatchCollection(patches, alpha=1.0, match_original=False)
p.set_array(np.asarray(values)[valid_values])
ax.add_collection(p)
cb = plt.colorbar(p)
for patch in np.array([Polygon([v.xy for v in e.vertices],
fc='white', alpha=1.0)
for e in elements])[np.isnan(values)]:
ax.add_patch(patch)
if label:
cb.set_label(label=label, fontsize=18)
ax.autoscale(enable=True)
ax.axis('off')
def demag(isa, ax=0):
"""plot demag of NC/I7/ISA7 model
Args:
isa: Isa7/NC object
"""
emag = [e for e in isa.elements if e.is_magnet()]
demag = np.array([e.demagnetization(isa.MAGN_TEMPERATURE) for e in emag])
_contour(ax, f'Demagnetization at {isa.MAGN_TEMPERATURE} °C',
emag, demag, '-H / kA/m', isa)
logger.info("Max demagnetization %f", np.max(demag))
def demag_pos(isa, pos, icur=-1, ibeta=-1, ax=0):
"""plot demag of NC/I7/ISA7 model at rotor position
Args:
isa: Isa7/NC object
pos: rotor position in degree
icur: cur amplitude index or last index if -1
ibeta: beta angle index or last index if -1
"""
emag = [e for e in isa.elements if e.is_magnet()]
demag = np.array([isa.demagnetization(e, icur, ibeta)[1]
for e in emag])
for i, x in enumerate(isa.pos_el_fe_induction):
if x >= pos/180*np.pi:
break
hpol = demag[:, i]
hpol[hpol == 0] = np.nan
_contour(ax, f'Demagnetization at Pos. {round(x/np.pi*180)}° ({isa.MAGN_TEMPERATURE} °C)',
emag, hpol, '-H / kA/m', isa)
logger.info("Max demagnetization %f kA/m", np.nanmax(hpol))
def flux_density(isa, subreg=[], ax=0):
"""plot flux density of NC/I7/ISA7 model
Args:
isa: Isa7/NC object
"""
if subreg:
if isinstance(subreg, list):
sr = subreg
else:
sr = [subreg]
elements = [e for s in sr for se in isa.get_subregion(s).elements()
for e in se]
else:
elements = [e for e in isa.elements]
fluxd = np.array([np.linalg.norm(e.flux_density()) for e in elements])
_contour(ax, f'Flux Density T', elements, fluxd)
logger.info("Max flux dens %f", np.max(fluxd))
def loss_density(isa, subreg=[], ax=0):
"""plot loss density of NC/I7/ISA7 model
Args:
isa: Isa7/NC object
"""
if subreg:
if isinstance(subreg, list):
sr = subreg
else:
sr = [subreg]
elements = [e for s in sr for sre in isa.get_subregion(s).elements()
for e in sre]
else:
elements = [e for e in isa.elements]
lossd = np.array([e.loss_density*1e-3 for e in elements])
_contour(ax, 'Loss Density kW/m³', elements, lossd)
def mmf(f, title='', ax=0):
"""plot magnetomotive force (mmf) of winding"""
if ax == 0:
ax = plt.gca()
if title:
ax.set_title(title)
ax.plot(np.array(f['pos'])/np.pi*180, f['mmf'])
ax.plot(np.array(f['pos_fft'])/np.pi*180, f['mmf_fft'])
ax.set_xlabel('Position / Deg')
phi = [f['alfa0']/np.pi*180, f['alfa0']/np.pi*180]
y = [min(f['mmf_fft']), 1.1*max(f['mmf_fft'])]
ax.plot(phi, y, '--')
alfa0 = round(f['alfa0']/np.pi*180, 3)
ax.text(phi[0]/2, y[0]+0.05, f"{alfa0}°",
ha="center", va="bottom")
ax.annotate(f"", xy=(phi[0], y[0]),
xytext=(0, y[0]), arrowprops=dict(arrowstyle="->"))
ax.grid()
def mmf_fft(f, title='', mmfmin=1e-2, ax=0):
"""plot winding mmf harmonics"""
if ax == 0:
ax = plt.gca()
if title:
ax.set_title(title)
else:
ax.set_title('MMF Harmonics')
ax.grid(True)
order, mmf = np.array([(n, m) for n, m in zip(f['nue'],
f['mmf_nue']) if m > mmfmin]).T
try:
markerline1, stemlines1, _ = ax.stem(order, mmf, '-.', basefmt=" ",
use_line_collection=True)
ax.set_xticks(order)
except ValueError: # empty sequence
pass
def zoneplan(wdg, ax=0):
"""plot zone plan of winding wdg"""
from matplotlib.patches import Rectangle
upper, lower = wdg.zoneplan()
Qb = len([n for l in upper for n in l])
from femagtools.windings import coil_color
rh = 0.5
if lower:
yl = rh
ymax = 2*rh + 0.2
else:
yl = 0
ymax = rh + 0.2
if ax == 0:
ax = plt.gca()
ax.axis('off')
ax.set_xlim([-0.5, Qb-0.5])
ax.set_ylim([0, ymax])
ax.set_aspect(Qb/6+0.3)
for i, p in enumerate(upper):
for x in p:
ax.add_patch(Rectangle((abs(x)-1.5, yl), 1, rh,
facecolor=coil_color[i],
edgecolor='white', fill=True))
s = f'+{i+1}' if x > 0 else f'-{i+1}'
ax.text(abs(x)-1, yl+rh/2, s, color='black',
ha="center", va="center")
for i, p in enumerate(lower):
for x in p:
ax.add_patch(Rectangle((abs(x)-1.5, yl-rh), 1, rh,
facecolor=coil_color[i],
edgecolor='white', fill=True))
s = f'+{i+1}' if x > 0 else f'-{i+1}'
ax.text(abs(x)-1, yl-rh/2, s, color='black',
ha="center", va="center")
yu = yl+rh
step = 1 if Qb < 25 else 2
if lower:
yl -= rh
margin = 0.05
ax.text(-0.5, yu+margin, f'Q={wdg.Q}, p={wdg.p}, q={round(wdg.q,4)}',
ha='left', va='bottom', size=15)
for i in range(0, Qb, step):
ax.text(i, yl-margin, f'{i+1}', ha="center", va="top")
def winding_factors(wdg, n=8, ax=0):
"""plot winding factors"""
ax = plt.gca()
ax.set_title(f'Winding factors Q={wdg.Q}, p={wdg.p}, q={round(wdg.q,4)}')
ax.grid(True)
order, kwp, kwd, kw = np.array([(n, k1, k2, k3)
for n, k1, k2, k3 in zip(wdg.kw_order(n),
wdg.kwp(n),
wdg.kwd(n),
wdg.kw(n))]).T
try:
markerline1, stemlines1, _ = ax.stem(order-1, kwp, 'C1:', basefmt=" ",
markerfmt='C1.',
use_line_collection=True, label='Pitch')
markerline2, stemlines2, _ = ax.stem(order+1, kwd, 'C2:', basefmt=" ",
markerfmt='C2.',
use_line_collection=True, label='Distribution')
markerline3, stemlines3, _ = ax.stem(order, kw, 'C0-', basefmt=" ",
markerfmt='C0o',
use_line_collection=True, label='Total')
ax.set_xticks(order)
ax.legend()
except ValueError: # empty sequence
pass
def winding(wdg, ax=0):
"""plot coils of windings wdg"""
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
from femagtools.windings import coil_color
coil_len = 25
coil_height = 4
dslot = 8
arrow_head_length = 2
arrow_head_width = 2
if ax == 0:
ax = plt.gca()
z = wdg.zoneplan()
xoff = 0
if z[-1]:
xoff = 0.75
yd = dslot*wdg.yd
mh = 2*coil_height/yd
slots = sorted([abs(n) for m in z[0] for n in m])
smax = slots[-1]*dslot
for n in slots:
x = n*dslot
ax.add_patch(Rectangle((x + dslot/4, 1), dslot /
2, coil_len - 2, fc="lightblue"))
ax.text(x, coil_len / 2,
str(n),
horizontalalignment="center",
verticalalignment="center",
backgroundcolor="white",
bbox=dict(boxstyle='circle,pad=0', fc="white", lw=0))
line_thickness = [0.6, 1.2]
for i, layer in enumerate(z):
b = -xoff if i else xoff
lw = line_thickness[i]
for m, mslots in enumerate(layer):
for k in mslots:
x = abs(k) * dslot + b
xpoints = []
ypoints = []
if (i == 0 and (k > 0 or (k < 0 and wdg.l > 1))):
# first layer, positive dir or neg. dir and 2-layers:
# from right bottom
if x + yd > smax+b:
dx = dslot if yd > dslot else yd/4
xpoints = [x + yd//2 + dx - xoff]
ypoints = [-coil_height + mh*dx]
xpoints += [x + yd//2 - xoff, x, x, x + yd//2-xoff]
ypoints += [-coil_height, 0, coil_len,
coil_len+coil_height]
if x + yd > smax+b:
xpoints += [x + yd//2 + dx - xoff]
ypoints += [coil_len+coil_height - mh*dx]
else:
# from left bottom
if x - yd < 0: # and x - yd/2 > -3*dslot:
dx = dslot if yd > dslot else yd/4
xpoints = [x - yd//2 - dx + xoff]
ypoints = [- coil_height + mh*dx]
xpoints += [x - yd//2+xoff, x, x, x - yd/2+xoff]
ypoints += [-coil_height, 0, coil_len,
coil_len+coil_height]
if x - yd < 0: # and x - yd > -3*dslot:
xpoints += [x - yd//2 - dx + xoff]
ypoints += [coil_len + coil_height - mh*dx]
ax.add_line(Line2D(xpoints, ypoints,
color=coil_color[m], lw=lw))
if k > 0:
h = arrow_head_length
y = coil_len * 0.8
else:
h = -arrow_head_length
y = coil_len * 0.2
ax.arrow(x, y, 0, h,
length_includes_head=True,
head_starts_at_zero=False,
head_length=arrow_head_length,
head_width=arrow_head_width,
fc=coil_color[m], lw=0)
if False: # TODO show winding connections
m = 0
for k in [n*wdg.Q/wdg.p/wdg.m + 1 for n in range(wdg.m)]:
if k < len(slots):
x = k * dslot + b + yd/2 - xoff
ax.add_line(Line2D([x, x],
[-2*coil_height, -coil_height],
color=coil_color[m], lw=lw))
ax.text(x, -2*coil_height+0.5, str(m+1), color=coil_color[m])
m += 1
ax.autoscale(enable=True)
ax.set_axis_off()
def main():
import io
import sys
import argparse
from .__init__ import __version__
from femagtools.bch import Reader
argparser = argparse.ArgumentParser(
description='Read BCH/BATCH/PLT file and create a plot')
argparser.add_argument('filename',
help='name of BCH/BATCH/PLT file')
argparser.add_argument(
"--version",
"-v",
action="version",
version="%(prog)s {}, Python {}".format(__version__, sys.version),
help="display version information",
)
args = argparser.parse_args()
if not matplotlibversion:
sys.exit(0)
if not args.filename:
sys.exit(0)
ext = args.filename.split('.')[-1].upper()
if ext.startswith('MC'):
import femagtools.mcv
mcv = femagtools.mcv.read(sys.argv[1])
if mcv['mc1_type'] in (femagtools.mcv.MAGCRV, femagtools.mcv.ORIENT_CRV):
ncols = 2
else: # Permanent Magnet
ncols = 1
fig, ax = plt.subplots(nrows=1, ncols=ncols, figsize=(10, 6))
if ncols > 1:
plt.subplot(1, 2, 1)
mcv_hbj(mcv)
plt.subplot(1, 2, 2)
mcv_muer(mcv)
else:
mcv_hbj(mcv, log=False)
fig.tight_layout()
fig.subplots_adjust(top=0.94)
plt.show()
return
if ext.startswith('PLT'):
import femagtools.forcedens
fdens = femagtools.forcedens.read(args.filename)
cols = 1
rows = 2
fig, ax = plt.subplots(nrows=rows, ncols=cols,
figsize=(10, 10*rows))
title = '{}, Rotor position {}'.format(
fdens.title, fdens.positions[0]['position'])
pos = fdens.positions[0]['X']
FT_FN = (fdens.positions[0]['FT'],
fdens.positions[0]['FN'])
plt.subplot(rows, cols, 1)
forcedens(title, pos, FT_FN)
title = 'Force Density Harmonics'
plt.subplot(rows, cols, 2)
forcedens_fft(title, fdens)
# fig.tight_layout(h_pad=3.5)
# if title:
# fig.subplots_adjust(top=0.92)
plt.show()
return
bchresults = Reader()
with io.open(args.filename, encoding='latin1', errors='ignore') as f:
bchresults.read(f.readlines())
if (bchresults.type.lower().find(
'pm-synchronous-motor simulation') >= 0 or
bchresults.type.lower().find(
'permanet-magnet-synchronous-motor') >= 0 or
bchresults.type.lower().find(
'simulation pm/universal-motor') >= 0):
pmrelsim(bchresults, bchresults.filename)
elif bchresults.type.lower().find(
'multiple calculation of forces and flux') >= 0:
multcal(bchresults, bchresults.filename)
elif bchresults.type.lower().find('cogging calculation') >= 0:
cogging(bchresults, bchresults.filename)
elif bchresults.type.lower().find('ld-lq-identification') >= 0:
ldlq(bchresults)
elif bchresults.type.lower().find('psid-psiq-identification') >= 0:
psidq(bchresults)
elif bchresults.type.lower().find('fast_torque calculation') >= 0:
fasttorque(bchresults)
elif bchresults.type.lower().find('transient sc') >= 0:
transientsc(bchresults, bchresults.filename)
else:
raise ValueError("BCH type {} not yet supported".format(
bchresults.type))
plt.show()
def characteristics(char, title=''):
fig, axs = plt.subplots(2, 2, figsize=(10, 8), sharex=True)
if title:
fig.suptitle(title)
n = np.array(char['n'])*60
pmech = np.array(char['pmech'])*1e-3
axs[0, 0].plot(n, np.array(char['T']), 'C0-', label='Torque')
axs[0, 0].set_ylabel("Torque / Nm")
axs[0, 0].grid()
axs[0, 0].legend(loc='center left')
ax1 = axs[0, 0].twinx()
ax1.plot(n, pmech, 'C1-', label='P mech')
ax1.set_ylabel("Power / kW")
ax1.legend(loc='lower center')
axs[0, 1].plot(n[1:], np.array(char['u1'][1:]), 'C0-', label='Voltage')
axs[0, 1].set_ylabel("Voltage / V",)
axs[0, 1].grid()
axs[0, 1].legend(loc='center left')
ax2 = axs[0, 1].twinx()
ax2.plot(n[1:], char['cosphi'][1:], 'C1-', label='Cos Phi')
ax2.set_ylabel("Cos Phi")
ax2.legend(loc='lower right')
if 'id' in char:
axs[1, 0].plot(n, np.array(char['id']), label='Id')
if 'iq' in char:
axs[1, 0].plot(n, np.array(char['iq']), label='Iq')
axs[1, 0].plot(n, np.array(char['i1']), label='I1')
axs[1, 0].set_xlabel("Speed / rpm")
axs[1, 0].set_ylabel("Current / A")
axs[1, 0].legend(loc='center left')
if 'beta' in char:
ax3 = axs[1, 0].twinx()
ax3.plot(n, char['beta'], 'C3-', label='Beta')
ax3.set_ylabel("Beta / °")
ax3.legend(loc='center right')
axs[1, 0].grid()
try:
plfe = np.array(char['plfe'])*1e-3
except KeyError:
plfe = np.array(char['plfe1'])*1e-3
try:
plcu = np.array(char['plcu'])*1e-3
except KeyError:
plcu = np.array(char['plcu1'])*1e-3
pl = np.array(char['losses'])*1e-3
axs[1, 1].plot(n, plcu, 'C0-', label='Cu Losses')
axs[1, 1].plot(n, plfe, 'C1-', label='Fe Losses')
axs[1, 1].set_ylabel("Losses / kW")
axs[1, 1].legend(loc='center left')
axs[1, 1].grid()
axs[1, 1].set_xlabel("Speed / rpm")
ax4 = axs[1, 1].twinx()
ax4.plot(n[1:-1], char['eta'][1:-1], 'C3-', label="Eta")
ax4.legend(loc='upper center')
ax4.set_ylabel("Efficiency")
fig.tight_layout()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
main()
| StarcoderdataPython |
6591186 | <gh_stars>0
import boto3, json, os, re
QUEUE_NAME = os.environ["QUEUE_NAME"]
BUCKET_NAME = os.environ["BUCKET_NAME"]
def lambda_handler(event, context):
# Load Sites List from S3 Bucket
s3 = boto3.client('s3')
data = s3.get_object(Bucket=BUCKET_NAME, Key="sites-list.json")
sites_list = json.loads(data['Body'].read())
# Process Sites List into Array
all_sites_names = []
for x in sites_list['sites']:
all_sites_names.append(x)
# Get Queue Information
sqs = boto3.resource('sqs')
queue = sqs.get_queue_by_name(QueueName=QUEUE_NAME)
# Define Batch Jobs for Queue
maxBatchSize = 10
chunks = [all_sites_names[x:x+maxBatchSize]
for x in range(0, len(all_sites_names), maxBatchSize)]
num_chunks = 1
# Batch Jobs for Queue and Send
for chunk in chunks:
entries = []
for site_name in chunk:
variables = {'site_name': site_name, 'attempt_num': 2}
entry = {'Id': re.sub(r'\W+', '', site_name),
'MessageBody': json.dumps(variables)}
entries.append(entry)
# Send Batch to Queue
response = queue.send_messages(Entries=entries)
print(response)
| StarcoderdataPython |
4950060 | <reponame>baumartig/paperboy
from settings_handler import settings
import os
import smtplib
import mimetypes
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.MIMEAudio import MIMEAudio
from email.MIMEImage import MIMEImage
from email.Encoders import encode_base64
def sendMail(subject, text, *attachmentFilePaths):
recipient = settings.mailTo
sender = settings.mailFrom
msg = MIMEMultipart('mixed')
msg['From'] = sender
msg['To'] = recipient
msg['Subject'] = subject
msg.attach(MIMEText(text))
print "Start sending mail:"
print "Recipient: %s" % recipient
print "Sender: %s" % sender
print "Subject: %s" % subject
print "Recipient: %s" % recipient
for attachmentFilePath in attachmentFilePaths:
msg.attach(getAttachment(attachmentFilePath))
if settings.useSmtp():
if "port" in settings.smtpServer:
mailServer = smtplib.SMTP( settings.smtpServer["address"],
settings.smtpServer["port"])
else:
mailServer = smtplib.SMTP(settings.smtpServer["address"])
if "security" in settings.smtpServer:
if settings.smtpServer["security"] == "starttls":
# handle starttls
gmailUser = settings.smtpServer["login"]
gmailPassword = settings.smtpServer["password"]
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmailUser, gmailPassword)
else:
print "Using sendmail"
mailServer = smtplib.SMTP('localhost')
print "Sending mail"
mailServer.set_debuglevel(1)
mailServer.sendmail(sender, recipient, msg.as_string())
mailServer.close()
def getAttachment(attachmentFilePath):
contentType, encoding = mimetypes.guess_type(attachmentFilePath)
if contentType is None or encoding is not None:
contentType = 'application/octet-stream'
mainType, subType = contentType.split('/', 1)
file = open(attachmentFilePath, 'rb')
if mainType == 'text':
attachment = MIMEText(file.read())
elif mainType == 'message':
attachment = email.message_from_file(file)
elif mainType == 'image':
attachment = MIMEImage(file.read(),_subType=subType)
elif mainType == 'audio':
attachment = MIMEAudio(file.read(),_subType=subType)
else:
attachment = MIMEBase(mainType, subType)
attachment.set_payload(file.read())
encode_base64(attachment)
file.close()
attachment.add_header('Content-Disposition', 'attachment', filename=os.path.basename(attachmentFilePath))
return attachment
| StarcoderdataPython |
6551687 | # Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Attentions specific to Transformer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from texar import context
# pylint: disable=too-many-arguments, invalid-name, no-member
__all__ = [
'attention_bias_lower_triangle',
'attention_bias_ignore_padding',
'attention_bias_local',
'multihead_attention',
]
def attention_bias_lower_triangle(length):
"""Create an bias tensor to be added to attention logits.
Allows a query to attend to all positions up to and including its own.
Args:
length: a scalar.
Returns:
a `Tensor` with shape [1, 1, length, length].
"""
return attention_bias_local(length, -1, 0)
def attention_bias_local(length, max_backward, max_forward):
"""Create an bias tensor to be added to attention logits.
A position may attend to positions at most max_distance from it,
forward and backwards.
This does not actually save any computation.
Args:
length: int
max_backward: int, maximum distance backward to attend. Negative
values indicate unlimited.
max_forward: int, maximum distance forward to attend. Negative
values indicate unlimited.
Returns:
a `Tensor` with shape [1, 1, length, length].
[batch_size, num_heads, queri_len, queri_len]
"""
band = _ones_matrix_band_part(
length,
length,
max_backward,
max_forward,
out_shape=[1, 1, length, length])
return -1e18 * (1.0 - band)
def attention_bias_ignore_padding(memory_padding):
"""Create an bias tensor to be added to attention logits.
Args:
memory_padding: a float `Tensor` with shape [batch, memory_length].
Returns:
a `Tensor` with shape [batch, 1, 1, memory_length].
each dim corresponding to batch_size, num_heads, queries_len,
memory_length
"""
ret = memory_padding * -1e18
return tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1)
def multihead_attention(queries,
memory_attention_bias=None,
memory=None,
num_heads=8,
num_units=None,
dropout_rate=0,
cache=None,
scope='multihead_attention'):
"""Applies multihead attention.
Args:
queries: A 3d tensor with shape of [batch, length_query,
depth_query].
keys: A 3d tensor with shape of [batch, length_key, depth_key].
num_units: A scalar indicating the attention size,
equals to depth_query if not given.
dropout_rate: A floating point number.
num_heads: An int. Number of heads with calculating attention.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A 3d tensor with shape of (batch, length_query, num_units)
"""
#pylint: disable=too-many-locals
with tf.variable_scope(scope):
if num_units is None:
num_units = queries.get_shape().as_list()[-1]
if num_units % num_heads != 0:
raise ValueError("Value depth (%d) must be divisible by the"
"number of attention heads (%d)." % (\
num_units, num_heads))
if memory is None:
#'self attention'
Q = tf.layers.dense(queries, num_units, use_bias=False,
name='q')
K = tf.layers.dense(queries, num_units, use_bias=False,
name='k')
V = tf.layers.dense(queries, num_units, use_bias=False,
name='v')
if cache is not None:
# 'decoder self attention when dynamic decoding'
K = tf.concat([cache['self_keys'], K], axis=1)
V = tf.concat([cache['self_values'], V], axis=1)
cache['self_keys'] = K
cache['self_values'] = V
else:
# 'encoder decoder attention'
Q = tf.layers.dense(queries, num_units, use_bias=False,
name='q')
if cache is not None:
K, V = tf.cond(
tf.equal(tf.shape(cache["memory_keys"])[1], 0),
true_fn=lambda: \
[tf.layers.dense(memory, num_units, \
use_bias=False, name='k'), \
tf.layers.dense(memory, num_units, \
use_bias=False, name='v')],
false_fn=lambda: \
[cache["memory_keys"], cache["memory_values"]])
else:
K, V = [tf.layers.dense(memory, num_units, \
use_bias=False, name='k'),
tf.layers.dense(memory, num_units, \
use_bias=False, name='v')]
Q_ = _split_heads(Q, num_heads)
K_ = _split_heads(K, num_heads)
V_ = _split_heads(V, num_heads)
#[batch_size, num_heads, seq_length, memory_depth]
key_depth_per_head = num_units // num_heads
Q_ *= key_depth_per_head**-0.5
logits = tf.matmul(Q_, K_, transpose_b=True)
if memory_attention_bias is not None:
logits += memory_attention_bias
weights = tf.nn.softmax(logits, name="attention_weights")
weights = tf.layers.dropout(weights, \
rate=dropout_rate, training=context.global_mode_train())
outputs = tf.matmul(weights, V_)
outputs = _combine_heads(outputs)
outputs = tf.layers.dense(outputs, num_units,\
use_bias=False, name='output_transform')
#(batch_size, length_query, attention_depth)
return outputs
def _split_heads(x, num_heads):
"""Split channels (dimension 2) into multiple heads,
becomes dimension 1).
Must ensure `x.shape[-1]` can be deviced by num_heads
"""
depth = x.get_shape()[-1]
splitted_x = tf.reshape(x, [tf.shape(x)[0], tf.shape(x)[1], \
num_heads, depth // num_heads])
return tf.transpose(splitted_x, [0, 2, 1, 3])
def _combine_heads(x):
"""
Args:
x: A Tensor of shape `[batch, num_heads, seq_len, dim]`
Returns:
A Tensor of shape `[batch, seq_len, num_heads * dim]`
"""
t = tf.transpose(x, [0, 2, 1, 3]) #[batch, seq_len, num_heads, dim]
num_heads, dim = t.get_shape()[-2:]
return tf.reshape(t, [tf.shape(t)[0], tf.shape(t)[1], num_heads*dim])
def _ones_matrix_band_part(rows, cols, num_lower, num_upper,
out_shape=None):
"""Matrix band part of ones.
"""
if all([isinstance(el, int) for el in [rows, cols, num_lower,
num_upper]]):
# Needed info is constant, so we construct in numpy
if num_lower < 0:
num_lower = rows - 1
if num_upper < 0:
num_upper = cols - 1
lower_mask = np.tri(cols, rows, num_lower).T
upper_mask = np.tri(rows, cols, num_upper)
band = np.ones((rows, cols)) * lower_mask * upper_mask
if out_shape:
band = band.reshape(out_shape)
band = tf.constant(band, tf.float32)
else:
band = tf.matrix_band_part(tf.ones([rows, cols]),
tf.cast(num_lower, tf.int64),
tf.cast(num_upper, tf.int64))
if out_shape:
band = tf.reshape(band, out_shape)
return band
| StarcoderdataPython |
25115 | from django.contrib import admin
from .models import Comment
# Register your models here.
class CommentsAdmin(admin.ModelAdmin):
list_display = ['id', "user", "content", "timestamp"]
class Meta:
model = Comment
admin.site.register(Comment, CommentsAdmin)
| StarcoderdataPython |
6619112 | <gh_stars>0
from django.contrib.admin.apps import AdminConfig
class AdminConfig2fa(AdminConfig):
default = False
default_site = 'modal_2fa.admin.AdminSite2FA'
| StarcoderdataPython |
6543713 | import pytest
from controlled_vocabulary.utils import search_term_or_none
from radical_translations.core.documents import ResourceDocument
from radical_translations.core.models import (
Classification,
Contribution,
Resource,
ResourceLanguage,
)
from radical_translations.utils.models import Date
pytestmark = pytest.mark.django_db
@pytest.fixture
@pytest.mark.usefixtures("entry_search")
def resource_for_search(entry_search):
# setup
resource = Resource.from_gsx_entry(entry_search)
yield resource
# teardown
if resource.id:
for rr in resource.related_to.all():
rr.resource.delete()
resource.delete()
class TestResourceDocument:
def test_get_queryset(self):
qs = ResourceDocument().get_queryset()
assert qs.model == Resource
@pytest.mark.usefixtures("resource_for_search")
def test_get_instances_from_related(self, resource_for_search):
resource = resource_for_search
date_display = "2025"
search = ResourceDocument.search().query("match", date_display=date_display)
assert len(search.execute()) == 0
resource.date.date_display = date_display
resource.date.save()
search = ResourceDocument.search().query("match", date_display=date_display)
assert len(search.execute()) == 1
label = "pytest"
search = ResourceDocument.search().query("term", subjects__label=label)
assert len(search.execute()) == 0
subject = resource.subjects.first()
subject.label = label
subject.save()
search = ResourceDocument.search().query("term", subjects__label=label)
assert len(search.execute()) == subject.resources.count()
search = ResourceDocument.search().query(
"match_phrase", title=resource.title.main_title
)
assert len(search.execute()) == resource.title.resources.count()
title = resource.title
title.main_title = "radical translations"
title.save()
search = ResourceDocument.search().query(
"match_phrase", title=resource.title.main_title
)
assert len(search.execute()) == title.resources.count()
label = "pytest"
search = ResourceDocument.search().query(
"term", classifications__edition__label=label
)
assert len(search.execute()) == 0
edition = resource.classifications.first().edition
edition.label = label
edition.save()
search = ResourceDocument.search().query(
"term", classifications__edition__label=label
)
assert len(search.execute()) == edition.resources.count()
contribution = resource.contributions.first()
agent = contribution.agent
search = ResourceDocument.search().query(
"match", contributions__agent__name=agent.name
)
assert len(search.execute()) >= agent.contributed_to.count()
agent.name = "change agent display name"
agent.save()
contribution.save()
search = ResourceDocument.search().query(
"match", contributions__agent__name=agent.name
)
assert len(search.execute()) >= agent.contributed_to.count()
label = "flemish"
search = ResourceDocument.search().query(
"term", languages_language__label=label
)
assert len(search.execute()) == 0
language = resource.languages.first().language
language.label = label
language.save()
search = ResourceDocument.search().query(
"term", languages_language__label=label
)
assert len(search.execute()) == language.resources.count()
label = "nowhere"
search = ResourceDocument.search().query("match", places__fictional_place=label)
assert len(search.execute()) == 0
rp = resource.places.first()
rp.fictional_place = label
rp.save()
search = ResourceDocument.search().query("match", places__fictional_place=label)
assert len(search.execute()) == 1
@pytest.mark.usefixtures("entry_original")
def test_prepare_title(self, entry_original):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
paratext = Resource.paratext_from_gsx_entry(entry_original, resource)
assert len(doc.prepare_title(resource)) == 1
paratext.title.main_title = "a different title"
paratext.title.save()
paratext.save()
assert len(doc.prepare_title(resource)) == 2
@pytest.mark.usefixtures("entry_original")
def test_prepare_form_genre(self, entry_original):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
paratext = Resource.paratext_from_gsx_entry(entry_original, resource)
assert len(doc.prepare_form_genre(resource)) == 0
resource.subjects.add(search_term_or_none("fast-forms", "History"))
assert len(doc.prepare_form_genre(resource)) == 2
paratext.subjects.add(search_term_or_none("fast-forms", "Periodicals"))
assert len(doc.prepare_form_genre(resource)) == 4
@pytest.mark.usefixtures("entry_original")
def test__get_subjects(self, entry_original):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
assert len(doc._get_subjects(resource, ["fast-forms"])) == 0
assert len(doc._get_subjects(resource, ["fast-topic"])) == 2
@pytest.mark.usefixtures("entry_original")
def test_prepare_subjects(self, entry_original):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
paratext = Resource.paratext_from_gsx_entry(entry_original, resource)
assert len(doc.prepare_subjects(resource)) == 2
paratext.subjects.add(search_term_or_none("fast-topic", "Operas"))
assert len(doc.prepare_subjects(resource)) == 4
@pytest.mark.usefixtures("resource")
def test_prepare_date_display(self, resource):
doc = ResourceDocument()
assert doc.prepare_date_display(resource) is None
resource.date = Date(date_display="1971")
resource.date.save()
assert doc.prepare_date_display(resource) is not None
@pytest.mark.usefixtures("entry_original")
def test__get_resource(self, entry_original):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
assert doc._get_resource(resource) == resource
paratext = Resource.paratext_from_gsx_entry(entry_original, resource)
assert doc._get_resource(paratext) == resource
@pytest.mark.usefixtures("resource")
def test_prepare_year(self, resource):
doc = ResourceDocument()
prepared_data = doc.prepare_year(resource)
assert prepared_data is None
resource.date = Date(date_display="1971")
resource.date.save()
prepared_data = doc.prepare_year(resource)
assert prepared_data is not None
assert prepared_data == [1971]
resource.date = Date(date_display="1971/1972")
resource.date.save()
prepared_data = doc.prepare_year(resource)
assert prepared_data is not None
assert len(prepared_data) == 2
@pytest.mark.usefixtures("entry_original")
def test_prepare_summary(self, entry_original):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
assert len(doc.prepare_summary(resource)) == 1
resource.summary = "resource summary"
assert len(doc.prepare_summary(resource)) == 2
@pytest.mark.usefixtures("entry_original")
def test_prepare_classifications_printing_publishing(self, entry_original):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
paratext = Resource.paratext_from_gsx_entry(entry_original, resource)
assert len(doc.prepare_classifications_printing_publishing(resource)) == 0
resource.classifications.add(
Classification(edition=search_term_or_none("rt-ppt", "Forgeries")),
bulk=False,
)
assert len(doc.prepare_classifications_printing_publishing(resource)) == 2
paratext.classifications.add(
Classification(edition=search_term_or_none("rt-ppt", "Piracies")),
bulk=False,
)
assert len(doc.prepare_classifications_printing_publishing(resource)) == 4
@pytest.mark.usefixtures("entry_original")
def test__get_classifications(self, entry_original):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
assert len(doc._get_classifications(resource, "rt-ppt")) == 0
assert len(doc._get_classifications(resource, "rt-tt")) == 0
assert len(doc._get_classifications(resource, "rt-pt")) == 0
@pytest.mark.usefixtures("entry_original")
def test_prepare_classifications_translation(self, entry_original):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
paratext = Resource.paratext_from_gsx_entry(entry_original, resource)
assert len(doc.prepare_classifications_translation(resource)) == 0
resource.classifications.add(
Classification(edition=search_term_or_none("rt-tt", "Integral")),
bulk=False,
)
assert len(doc.prepare_classifications_translation(resource)) == 2
paratext.classifications.add(
Classification(edition=search_term_or_none("rt-tt", "Partial")),
bulk=False,
)
assert len(doc.prepare_classifications_translation(resource)) == 4
@pytest.mark.usefixtures("entry_original")
def test_prepare_classifications_paratext(self, entry_original):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
paratext = Resource.paratext_from_gsx_entry(entry_original, resource)
assert len(doc.prepare_classifications_paratext(resource)) == 0
paratext.classifications.add(
Classification(edition=search_term_or_none("rt-pt", "Preface")),
bulk=False,
)
assert len(doc.prepare_classifications_paratext(resource)) == 3
@pytest.mark.usefixtures("entry_original", "person")
def test_prepare_contributions(self, entry_original, person):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
paratext = Resource.paratext_from_gsx_entry(entry_original, resource)
assert len(doc.prepare_contributions(resource)) == 3
person.name = "<NAME>"
person.save()
contribution = Contribution(resource=resource, agent=person)
contribution.save()
contribution.roles.add(search_term_or_none("wikidata", "bookseller"))
paratext.contributions.add(contribution, bulk=False)
assert len(doc.prepare_contributions(resource)) == 4
contribution = doc.prepare_contributions(resource)[2]
assert contribution["agent"]["name"] == "Anonymous"
@pytest.mark.usefixtures("entry_original")
def test_prepare_languages(self, entry_original):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
paratext = Resource.paratext_from_gsx_entry(entry_original, resource)
assert len(doc.prepare_languages(resource)) == 2
paratext.languages.add(
ResourceLanguage(language=search_term_or_none("iso639-2", "english")),
bulk=False,
)
assert len(doc.prepare_languages(resource)) == 4
@pytest.mark.usefixtures("entry_original")
def test_prepare_places(self, entry_original):
doc = ResourceDocument()
resource = Resource.from_gsx_entry(entry_original)
assert "fictional_place" not in doc.prepare_places(resource)[0]
place = resource.places.first()
place.fictional_place = "mordor"
place.save()
prepared = doc.prepare_places(resource)[0]
assert "(" in prepared["place"]["address"]
assert prepared["fictional_place"] is not None
| StarcoderdataPython |
9724427 | from __future__ import unicode_literals
from django_shares.constants import Status
from django_shares.models import Share
from django_testing.testcases.users import SingleUserTestCase
from django_testing.user_utils import create_user
from test_models.models import TestSharedObjectModel
from test_models.models import TestSharedObjectModel2
class ShareTests(SingleUserTestCase):
def setUp(self):
"""Run once per test."""
super(ShareTests, self).setUp()
self.shared_user = create_user()
def tearDown(self):
super(ShareTests, self).tearDown()
self.shared_user.delete()
def test_add_for_user(self):
"""Share a user object with a another user."""
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=self.shared_user)
self.assertEqual(share.shared_object, self.shared_user)
def test_create_for_non_user(self):
"""Test for creating an object share with with an unknown user."""
first_name = 'Jimmy'
last_name = 'Buffet'
email = '<EMAIL>'
message = 'Share with me.'
status = Status.PENDING
share = Share.objects.create_for_non_user(created_user=self.user,
shared_object=self.shared_user,
first_name=first_name,
last_name=last_name,
email=email,
message=message,
status=status)
self.assertEqual(share.first_name, first_name)
self.assertEqual(share.last_name, last_name)
self.assertEqual(share.email, email)
self.assertEqual(share.status, status)
self.assertEqual(share.message, message)
def test_get_for_user(self):
"""Get shares for user."""
user = create_user()
share = Share.objects.create_for_user(created_user=self.user,
for_user=user,
shared_object=self.shared_user)
shares = Share.objects.get_for_user(user=user)
self.assertEqual(len(shares), 1)
self.assertEqual(shares[0], share)
def test_get_for_user_id(self):
"""Get shares for a user id."""
user = create_user()
share = Share.objects.create_for_user(created_user=self.user,
for_user=user,
shared_object=self.shared_user)
shares = Share.objects.get_for_user_id(user_id=user.id)
self.assertEqual(len(shares), 1)
self.assertEqual(shares[0], share)
def test_get_email(self):
"""Get shares by email."""
user = create_user()
share = Share.objects.create_for_user(created_user=self.user,
for_user=user,
shared_object=self.shared_user)
shares = Share.objects.get_by_email(email=user.email)
self.assertEqual(len(shares), 1)
self.assertEqual(shares[0], share)
def test_get_by_token(self):
"""Get a share by token."""
# self.assertEqual(self.car.shares, [])
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=self.shared_user)
share_db = Share.objects.get_by_token(token=share.token)
self.assertEqual(share, share_db)
def test_get_by_shared_object(self):
"""Get shares for a shared object."""
shared_object = create_user()
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=shared_object)
shares = Share.objects.get_by_shared_object(obj=shared_object)
self.assertEqual(len(shares), 1)
self.assertEqual(shares[0], share)
def test_get_by_shared_objects(self):
"""Get shares for a shared objects."""
obj_1 = TestSharedObjectModel.objects.create()
obj_2 = TestSharedObjectModel2.objects.create()
user_2 = create_user()
share_user_1_obj_1 = Share.objects.create_for_user(
created_user=self.user,
for_user=self.user,
shared_object=obj_1
)
share_user_1_obj_2 = Share.objects.create_for_user(
created_user=self.user,
for_user=self.user,
shared_object=obj_2
)
share_user_2_obj_1 = Share.objects.create_for_user(
created_user=user_2,
for_user=user_2,
shared_object=obj_1
)
share_user_2_obj_2 = Share.objects.create_for_user(
created_user=user_2,
for_user=user_2,
shared_object=obj_2
)
shares = list(Share.objects.get_by_shared_objects(objs=[obj_1, obj_2]))
self.assertEqual(len(shares), 4)
self.assertTrue(share_user_1_obj_1 in shares)
self.assertTrue(share_user_1_obj_2 in shares)
self.assertTrue(share_user_2_obj_1 in shares)
self.assertTrue(share_user_2_obj_2 in shares)
shares = Share.objects.get_by_shared_objects(
objs=[obj_1, obj_2],
for_user=user_2
)
self.assertEqual(len(shares), 2)
self.assertTrue(share_user_2_obj_1 in shares)
self.assertTrue(share_user_2_obj_2 in shares)
def test_accept_share(self):
"""Test for accepting share."""
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=self.shared_user)
self.assertEqual(share.status, Status.PENDING)
first_name = '<NAME>'
share.accept(first_name=first_name)
self.assertEqual(share.status, Status.ACCEPTED)
self.assertEqual(share.first_name, first_name)
def test_decline_share(self):
"""Test for accepting share."""
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=self.shared_user)
share.decline()
self.assertEqual(share.status, Status.DECLINED)
def test_inactivate(self):
"""Test for inactivating a share."""
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=self.shared_user)
share.inactivate()
self.assertEqual(share.status, Status.INACTIVE)
def test_is_accepted(self):
"""Test the is_accepted method."""
share = Share(status=Status.ACCEPTED)
self.assertTrue(share.is_accepted())
def test_is_pending(self):
"""Test the is_pending method."""
share = Share(status=Status.PENDING)
self.assertTrue(share.is_pending())
def test_is_declined(self):
"""Test the is_declined method."""
share = Share(status=Status.DECLINED)
self.assertTrue(share.is_declined())
def test_copy(self):
"""Test for inactivating a share."""
share = Share.objects.create_for_user(created_user=self.user,
for_user=self.user,
shared_object=self.shared_user)
share_copy = share.copy()
self.assertNotEqual(share.token, share_copy.token)
def test_get_full_name_for_user(self):
"""Test get full name for a share for existing user."""
first_name = 'John'
last_name = 'Doe'
user_2 = create_user(first_name=first_name, last_name=last_name)
share = Share.objects.create_for_user(created_user=user_2,
for_user=user_2,
shared_object=self.shared_user)
self.assertEqual(share.get_full_name(), '{0} {1}'.format(first_name,
last_name))
def test_get_full_name_for_non_user(self):
"""Test get full name for a share for non user."""
first_name = 'John'
last_name = 'Doe'
share = Share.objects.create_for_non_user(created_user=self.user,
email='<EMAIL>',
first_name=first_name,
last_name=last_name,
shared_object=self.shared_user)
self.assertEqual(share.get_full_name(), '{0} {1}'.format(first_name,
last_name))
def test_get_first_name(self):
"""Test get first name for a share."""
first_name = 'John'
share = Share(first_name=first_name)
self.assertEqual(share.get_first_name(), first_name)
def test_get_last_name(self):
"""Test get last name for a share."""
last_name = 'Doe'
share = Share(last_name=last_name)
self.assertEqual(share.get_last_name(), last_name)
def test_create_many(self):
"""Test for creating many objects at once. This is different from
bulk_create. See ``create_many`` doc.
"""
user = create_user()
obj_1 = TestSharedObjectModel.objects.create()
obj_2 = TestSharedObjectModel.objects.create()
obj_3 = TestSharedObjectModel.objects.create()
objs = [obj_1, obj_2, obj_3]
# There shouldn't be any shares here.
self.assertEqual(obj_1.shares.count(), 0)
self.assertEqual(obj_2.shares.count(), 0)
self.assertEqual(obj_3.shares.count(), 0)
ShareClass = TestSharedObjectModel.get_share_class()
shares = ShareClass.objects.create_many(objs=objs,
for_user=user,
created_user=user,
status=Status.ACCEPTED)
self.assertEqual(obj_1.shares.count(), 1)
self.assertEqual(obj_2.shares.count(), 1)
self.assertEqual(obj_3.shares.count(), 1)
def test_create_many_prevent_duplicate_share(self):
"""Test the ``create_many`` method that ensure no duplicate shares are
created for a single user.
"""
user = create_user()
obj_1 = TestSharedObjectModel.objects.create()
obj_1.shares.create_for_user(for_user=user,
created_user=user,
status=Status.ACCEPTED)
self.assertEqual(obj_1.shares.count(), 1)
obj_2 = TestSharedObjectModel.objects.create()
obj_3 = TestSharedObjectModel.objects.create()
objs = [obj_1, obj_2, obj_3]
ShareClass = TestSharedObjectModel.get_share_class()
shares = ShareClass.objects.create_many(objs=objs,
for_user=user,
created_user=user,
status=Status.ACCEPTED)
self.assertEqual(obj_1.shares.count(), 1)
self.assertEqual(obj_2.shares.count(), 1)
self.assertEqual(obj_3.shares.count(), 1)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.