blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
334249cec979a5fe4f70253adb78273457ef3d2b | 8dbde2f77c1c213a3477d21979d1cbec9aba8df7 | /baselines/her_pddl/ddpg.py | 4a33ccf8a99aaf748725c80e65bd41828b240775 | [
"MIT"
] | permissive | fabawi/goal_conditioned_RL_baselines | 04694e216d5024093c7135e68459338bc9ed6926 | 915fc875fd8cc75accd0804d99373916756f726e | refs/heads/master | 2023-01-08T12:11:57.130496 | 2020-08-14T13:28:52 | 2020-08-14T13:28:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,984 | py | from collections import OrderedDict
import numpy as np
import tensorflow as tf
from tensorflow.contrib.staging import StagingArea
from baselines import logger
from baselines.util import (
import_function, store_args, flatten_grads, transitions_in_episode_batch, prob_dist2discrete)
from baselines.her_pddl.normalizer import Normalizer
from baselines.her_pddl.replay_buffer import ReplayBuffer
from baselines.common.mpi_adam import MpiAdam
from baselines.template.policy import Policy
from baselines.her_pddl.obs2preds import Obs2PredsModel, Obs2PredsBuffer, Obs2PredsAttnModel
def dims_to_shapes(input_dims):
return {key: tuple([val]) if val > 0 else tuple() for key, val in input_dims.items()}
class DDPG_PDDL(Policy):
@store_args
def __init__(self, input_dims, buffer_size, hidden, layers, network_class, polyak, batch_size,
Q_lr, pi_lr, norm_eps, norm_clip, max_u, action_l2, clip_obs, scope, T,
rollout_batch_size, subtract_goals, relative_goals, clip_pos_returns, clip_return,
sample_transitions, gamma, n_preds, reuse=False, **kwargs):
"""Implementation of DDPG that is used in combination with Hindsight Experience Replay (HER).
Args:
input_dims (dict of ints): dimensions for the observation (o), the goal (g), and the
actions (u)
buffer_size (int): number of transitions that are stored in the replay buffer
hidden (int): number of units in the hidden layers
layers (int): number of hidden layers
network_class (str): the network class that should be used (e.g. 'baselines.her.ActorCritic')
polyak (float): coefficient for Polyak-averaging of the target network
batch_size (int): batch size for training
Q_lr (float): learning rate for the Q (critic) network
pi_lr (float): learning rate for the pi (actor) network
norm_eps (float): a small value used in the normalizer to avoid numerical instabilities
norm_clip (float): normalized inputs are clipped to be in [-norm_clip, norm_clip]
max_u (float): maximum action magnitude, i.e. actions are in [-max_u, max_u]
action_l2 (float): coefficient for L2 penalty on the actions
clip_obs (float): clip observations before normalization to be in [-clip_obs, clip_obs]
scope (str): the scope used for the TensorFlow graph
T (int): the time horizon for rollouts
rollout_batch_size (int): number of parallel rollouts per DDPG agent
subtract_goals (function): function that subtracts goals from each other
relative_goals (boolean): whether or not relative goals should be fed into the network
clip_pos_returns (boolean): whether or not positive returns should be clipped
clip_return (float): clip returns to be in [-clip_return, clip_return]
sample_transitions (function) function that samples from the replay buffer
gamma (float): gamma used for Q learning updates
reuse (boolean): whether or not the networks should be reused
"""
Policy.__init__(self, input_dims, T, rollout_batch_size, **kwargs)
self.hidden = hidden
self.layers = layers
self.max_u = max_u
self.network_class = network_class
self.sample_transitions = sample_transitions
self.scope = scope
self.subtract_goals = subtract_goals
self.relative_goals = relative_goals
self.clip_obs = clip_obs
self.Q_lr = Q_lr
self.pi_lr = pi_lr
self.batch_size = batch_size
self.buffer_size = buffer_size
self.clip_pos_returns = clip_pos_returns
self.gamma = gamma
self.polyak = polyak
self.clip_return = clip_return
self.norm_eps = norm_eps
self.norm_clip = norm_clip
self.action_l2 = action_l2
self.n_preds = n_preds
self.rep_lr = Q_lr
if self.clip_return is None:
self.clip_return = np.inf
self.create_actor_critic = import_function(self.network_class)
self.rep_network = import_function(kwargs['rep_network_class'])
# Create network.
with tf.variable_scope(self.scope):
self.staging_tf = StagingArea(
dtypes=[tf.float32 for _ in self.stage_shapes.keys()],
shapes=list(self.stage_shapes.values()))
self.buffer_ph_tf = [
tf.placeholder(tf.float32, shape=shape) for shape in self.stage_shapes.values()]
self.stage_op = self.staging_tf.put(self.buffer_ph_tf)
self._create_network(reuse=reuse)
# Configure the replay buffer.
buffer_shapes = {key: (self.T if key != 'o' else self.T+1, *self.input_shapes[key])
for key, val in self.input_shapes.items()}
buffer_shapes['g'] = (buffer_shapes['g'][0], self.dimg)
buffer_shapes['ag'] = (self.T+1, self.dimg)
buffer_size = (self.buffer_size // self.rollout_batch_size) * self.rollout_batch_size
self.buffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions)
# Creat rep. network
with tf.variable_scope(self.scope):
self._create_rep_network(reuse=reuse)
self.obs2preds_buffer = Obs2PredsBuffer(buffer_len=2000)
def _random_action(self, n):
return np.random.uniform(low=-self.max_u, high=self.max_u, size=(n, self.dimu))
def _preprocess_og(self, o, ag, g):
if self.relative_goals:
g_shape = g.shape
g = g.reshape(-1, self.dimg)
ag = ag.reshape(-1, self.dimg)
g = self.subtract_goals(g, ag)
g = g.reshape(*g_shape)
o = np.clip(o, -self.clip_obs, self.clip_obs)
g = np.clip(g, -self.clip_obs, self.clip_obs)
return o, g
def get_actions(self, o, ag, g, noise_eps=0., random_eps=0., use_target_net=False,
compute_Q=False, exploit=True):
noise_eps = noise_eps if not exploit else 0.
random_eps = random_eps if not exploit else 0.
o, g = self._preprocess_og(o, ag, g)
policy = self.target if use_target_net else self.main
# values to compute
vals = [policy.pi_tf]
if compute_Q:
vals += [policy.Q_pi_tf]
# feed
feed = {
policy.o_tf: o.reshape(-1, self.dimo),
policy.g_tf: g.reshape(-1, self.dimg),
policy.u_tf: np.zeros((o.size // self.dimo, self.dimu), dtype=np.float32)
}
ret = self.sess.run(vals, feed_dict=feed)
# action postprocessing
u = ret[0]
noise = noise_eps * self.max_u * np.random.randn(*u.shape) # gaussian noise
u += noise
u = np.clip(u, -self.max_u, self.max_u)
u += np.random.binomial(1, random_eps, u.shape[0]).reshape(-1, 1) * (self._random_action(u.shape[0]) - u) # eps-greedy
if u.shape[0] == 1:
u = u[0]
u = u.copy()
ret[0] = u
if len(ret) == 1:
return ret[0]
else:
return ret
def store_episode(self, episode_batch, update_stats=True):
"""
episode_batch: array of batch_size x (T or T+1) x dim_key
'o' is of size T+1, others are of size T
"""
self.buffer.store_episode(episode_batch)
if update_stats:
# add transitions to normalizer
episode_batch['o_2'] = episode_batch['o'][:, 1:, :]
episode_batch['ag_2'] = episode_batch['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode_batch)
transitions = self.sample_transitions(episode_batch, num_normalizing_transitions)
o, o_2, g, ag = transitions['o'], transitions['o_2'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
# No need to preprocess the o_2 and g_2 since this is only used for stats
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
def get_current_buffer_size(self):
return self.buffer.get_current_size()
def _sync_optimizers(self):
self.Q_adam.sync()
self.pi_adam.sync()
def _sync_rep_optimizers(self):
self.rep_adam.sync()
# self.pi_adam.sync()
def _grads(self):
# Avoid feed_dict here for performance!
critic_loss, actor_loss, Q_grad, pi_grad = self.sess.run([
self.Q_loss_tf,
self.main.Q_pi_tf,
self.Q_grad_tf,
self.pi_grad_tf
])
return critic_loss, actor_loss, Q_grad, pi_grad
def _update(self, Q_grad, pi_grad):
import os
# print("PID: {}. Updating AC.".format(os.getpid()))
self.Q_adam.update(Q_grad, self.Q_lr)
self.pi_adam.update(pi_grad, self.pi_lr)
def sample_batch(self):
transitions = self.buffer.sample(self.batch_size)
o, o_2, g = transitions['o'], transitions['o_2'], transitions['g']
ag, ag_2 = transitions['ag'], transitions['ag_2']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
transitions['o_2'], transitions['g_2'] = self._preprocess_og(o_2, ag_2, g)
transitions_batch = [transitions[key] for key in self.stage_shapes.keys()]
return transitions_batch
def stage_batch(self, batch=None):
if batch is None:
batch = self.sample_batch()
assert len(self.buffer_ph_tf) == len(batch)
self.sess.run(self.stage_op, feed_dict=dict(zip(self.buffer_ph_tf, batch)))
def train(self, stage=True):
if stage:
self.stage_batch()
critic_loss, actor_loss, Q_grad, pi_grad = self._grads()
self._update(Q_grad, pi_grad)
return critic_loss, actor_loss
def train_representation(self):
rep_batch_size = 64
batch = self.obs2preds_buffer.sample_batch(rep_batch_size)
indexes = batch['indexes']
feed_dict = {self.obs2preds_model.inputs_o: batch['obs'],
self.obs2preds_model.inputs_g: batch['goals'],
self.obs2preds_model.preds: batch['preds']}
rep_grad = self.sess.run([self.rep_grad_tf], feed_dict=feed_dict)[0]
self.rep_adam.update(rep_grad, self.rep_lr)
# opti_res, celoss, celosses = self.sess.run([self.obs2preds_model.optimizer,
# self.obs2preds_model.celoss,
# self.obs2preds_model.celosses],
# feed_dict=feed_dict)
#
# celosses = np.mean(celosses, axis=-1)
_, celosses_after = self.predict_representation(batch)
celoss = np.mean(celosses_after)
return celoss, celosses_after, indexes
def predict_representation(self, batch):
feed_dict = {self.obs2preds_model.inputs_o: batch['obs'],
self.obs2preds_model.inputs_g: batch['goals']}
pred_dist = self.sess.run([self.obs2preds_model.prob_out],
feed_dict=feed_dict)
losses = None
if 'preds' in batch:
preds = batch['preds']
if len(preds.shape) != 3:
preds_probdist = np.zeros(shape=[preds.shape[0], preds.shape[1], 2])
for j,p in enumerate(preds):
for i, v in enumerate(p):
preds_probdist[j][i][int(v)] = 1
preds = preds_probdist
feed_dict.update({self.obs2preds_model.preds: preds})
pred_dist, loss = self.sess.run([self.obs2preds_model.prob_out, self.obs2preds_model.celosses],
feed_dict=feed_dict)
loss = np.mean(loss, axis=-1)
losses = np.reshape(loss,newshape=(preds.shape[0]))
preds = prob_dist2discrete(pred_dist)
return preds, losses
def _init_target_net(self):
self.sess.run(self.init_target_net_op)
def update_target_net(self):
self.sess.run(self.update_target_net_op)
def clear_buffer(self):
self.buffer.clear_buffer()
def _vars(self, scope):
res = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope + '/' + scope)
assert len(res) > 0
return res
def _global_vars(self, scope):
res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope + '/' + scope)
return res
def _create_rep_network(self, reuse=False):
self.obs2preds_model = self.rep_network(self.n_preds, self.dimo, self.dimg)
self.rep_loss_tf = tf.reduce_mean(self.obs2preds_model.celoss)
rep_grads_tf = tf.gradients(self.rep_loss_tf, self._vars('obs2preds'))
self.rep_grad_tf = flatten_grads(grads=rep_grads_tf, var_list=self._vars('obs2preds'))
self.rep_adam = MpiAdam(self._vars('obs2preds'), scale_grad_by_procs=False)
self._sync_rep_optimizers()
def _create_network(self, reuse=False):
logger.info("Creating a DDPG agent with action space %d x %s..." % (self.dimu, self.max_u))
self.sess = tf.get_default_session()
if self.sess is None:
self.sess = tf.InteractiveSession()
# running averages
with tf.variable_scope('o_stats') as vs:
if reuse:
vs.reuse_variables()
self.o_stats = Normalizer(self.dimo, self.norm_eps, self.norm_clip, sess=self.sess)
with tf.variable_scope('g_stats') as vs:
if reuse:
vs.reuse_variables()
self.g_stats = Normalizer(self.dimg, self.norm_eps, self.norm_clip, sess=self.sess)
# mini-batch sampling.
batch = self.staging_tf.get()
batch_tf = OrderedDict([(key, batch[i])
for i, key in enumerate(self.stage_shapes.keys())])
batch_tf['r'] = tf.reshape(batch_tf['r'], [-1, 1])
# networks
with tf.variable_scope('main') as vs:
if reuse:
vs.reuse_variables()
self.main = self.create_actor_critic(batch_tf, net_type='main', **self.__dict__)
vs.reuse_variables()
with tf.variable_scope('target') as vs:
if reuse:
vs.reuse_variables()
target_batch_tf = batch_tf.copy()
target_batch_tf['o'] = batch_tf['o_2']
target_batch_tf['g'] = batch_tf['g_2']
self.target = self.create_actor_critic(
target_batch_tf, net_type='target', **self.__dict__)
vs.reuse_variables()
assert len(self._vars("main")) == len(self._vars("target"))
# loss functions
target_Q_pi_tf = self.target.Q_pi_tf
clip_range = (-self.clip_return, 0. if self.clip_pos_returns else np.inf)
target_tf = tf.clip_by_value(batch_tf['r'] + self.gamma * target_Q_pi_tf, *clip_range)
self.Q_loss_tf = tf.reduce_mean(tf.square(tf.stop_gradient(target_tf) - self.main.Q_tf))
self.pi_loss_tf = -tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
Q_grads_tf = tf.gradients(self.Q_loss_tf, self._vars('main/Q'))
pi_grads_tf = tf.gradients(self.pi_loss_tf, self._vars('main/pi'))
assert len(self._vars('main/Q')) == len(Q_grads_tf)
assert len(self._vars('main/pi')) == len(pi_grads_tf)
self.Q_grads_vars_tf = zip(Q_grads_tf, self._vars('main/Q'))
self.pi_grads_vars_tf = zip(pi_grads_tf, self._vars('main/pi'))
self.Q_grad_tf = flatten_grads(grads=Q_grads_tf, var_list=self._vars('main/Q'))
self.pi_grad_tf = flatten_grads(grads=pi_grads_tf, var_list=self._vars('main/pi'))
# optimizers
self.Q_adam = MpiAdam(self._vars('main/Q'), scale_grad_by_procs=False)
self.pi_adam = MpiAdam(self._vars('main/pi'), scale_grad_by_procs=False)
# polyak averaging
self.main_vars = self._vars('main/Q') + self._vars('main/pi')
self.target_vars = self._vars('target/Q') + self._vars('target/pi')
self.stats_vars = self._global_vars('o_stats') + self._global_vars('g_stats')
self.init_target_net_op = list(
map(lambda v: v[0].assign(v[1]), zip(self.target_vars, self.main_vars)))
self.update_target_net_op = list(
map(lambda v: v[0].assign(self.polyak * v[0] + (1. - self.polyak) * v[1]), zip(self.target_vars, self.main_vars)))
# initialize all variables
tf.variables_initializer(self._global_vars('')).run()
self._sync_optimizers()
self._init_target_net()
def logs(self, prefix=''):
logs = []
logs += [('stats_o/mean', np.mean(self.sess.run([self.o_stats.mean])))]
logs += [('stats_o/std', np.mean(self.sess.run([self.o_stats.std])))]
logs += [('stats_g/mean', np.mean(self.sess.run([self.g_stats.mean])))]
logs += [('stats_g/std', np.mean(self.sess.run([self.g_stats.std])))]
if prefix is not '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs
def __getstate__(self):
"""Our policies can be loaded from pkl, but after unpickling you cannot continue training.
"""
# [print(key, ": ", item) for key,item in self.__dict__.items()]
excluded_subnames = ['_tf', '_op', '_vars', '_adam', 'buffer', 'sess', '_stats',
'main', 'target', 'lock', 'env', 'sample_transitions',
'stage_shapes', 'create_actor_critic',
'obs2preds_buffer', 'obs2preds_model']
state = {k: v for k, v in self.__dict__.items() if all([not subname in k for subname in excluded_subnames])}
state['buffer_size'] = self.buffer_size
state['tf'] = self.sess.run([x for x in self._global_vars('') if 'buffer' not in x.name and 'obs2preds_buffer' not in x.name])
return state
def __setstate__(self, state):
if 'sample_transitions' not in state:
# We don't need this for playing the policy.
state['sample_transitions'] = None
self.__init__(**state)
# set up stats (they are overwritten in __init__)
for k, v in state.items():
if k[-6:] == '_stats':
self.__dict__[k] = v
# load TF variables
vars = [x for x in self._global_vars('') if 'buffer' not in x.name and 'obs2preds_buffer' not in x.name]
assert(len(vars) == len(state["tf"]))
node = [tf.assign(var, val) for var, val in zip(vars, state["tf"])]
self.sess.run(node)
| [
"eppe@informatik.uni-hamburg.de"
] | eppe@informatik.uni-hamburg.de |
c735627231131ebf3d41f8b0d0b2c4e1b2f91659 | 26bd175ffb3bd204db5bcb70eec2e3dfd55fbe9f | /exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/kubevirt_vm.py | ddcab20e9863af4105ed6489bcd7c4e7021b0e5a | [
"GPL-3.0-only",
"MIT",
"GPL-3.0-or-later",
"CC0-1.0",
"GPL-1.0-or-later"
] | permissive | tr3ck3r/linklight | 37814ed19173d893cdff161355d70a1cf538239b | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | refs/heads/master | 2021-04-11T04:33:02.727318 | 2020-03-25T17:38:41 | 2020-03-25T17:38:41 | 248,992,437 | 0 | 0 | MIT | 2020-03-21T14:26:25 | 2020-03-21T14:26:25 | null | UTF-8 | Python | false | false | 16,645 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_vm
short_description: Manage KubeVirt virtual machine
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machines.
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Set the virtual machine to either I(present), I(absent), I(running) or I(stopped).
- "I(present) - Create or update a virtual machine. (And run it if it's ephemeral.)"
- "I(absent) - Remove a virtual machine."
- "I(running) - Create or update a virtual machine and run it."
- "I(stopped) - Stop a virtual machine. (This deletes ephemeral VMs.)"
default: "present"
choices:
- present
- absent
- running
- stopped
type: str
name:
description:
- Name of the virtual machine.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine exists.
required: true
type: str
ephemeral:
description:
- If (true) ephemeral virtual machine will be created. When destroyed it won't be accessible again.
- Works only with C(state) I(present) and I(absent).
type: bool
default: false
datavolumes:
description:
- "DataVolumes are a way to automate importing virtual machine disks onto pvcs during the virtual machine's
launch flow. Without using a DataVolume, users have to prepare a pvc with a disk image before assigning
it to a VM or VMI manifest. With a DataVolume, both the pvc creation and import is automated on behalf of the user."
type: list
template:
description:
- "Name of Template to be used in creation of a virtual machine."
type: str
template_parameters:
description:
- "New values of parameters from Template."
type: dict
extends_documentation_fragment:
- community.kubernetes.k8s_auth_options
- community.general.kubevirt_vm_options
- community.general.kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Start virtual machine 'myvm'
kubevirt_vm:
state: running
name: myvm
namespace: vms
- name: Create virtual machine 'myvm' and start it
kubevirt_vm:
state: running
name: myvm
namespace: vms
memory: 64Mi
cpu_cores: 1
bootloader: efi
smbios_uuid: 5d307ca9-b3ef-428c-8861-06e72d69f223
cpu_model: Conroe
headless: true
hugepage_size: 2Mi
tablets:
- bus: virtio
name: tablet1
cpu_limit: 3
cpu_shares: 2
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Create virtual machine 'myvm' with multus network interface
kubevirt_vm:
name: myvm
namespace: vms
memory: 512M
interfaces:
- name: default
bridge: {}
network:
pod: {}
- name: mynet
bridge: {}
network:
multus:
networkName: mynetconf
- name: Combine inline definition with Ansible parameters
kubevirt_vm:
# Kubernetes specification:
definition:
metadata:
labels:
app: galaxy
service: web
origin: vmware
# Ansible parameters:
state: running
name: myvm
namespace: vms
memory: 64M
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Start ephemeral virtual machine 'myvm' and wait to be running
kubevirt_vm:
ephemeral: true
state: running
wait: true
wait_timeout: 180
name: myvm
namespace: vms
memory: 64M
labels:
kubevirt.io/vm: myvm
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Start fedora vm with cloud init
kubevirt_vm:
state: running
wait: true
name: myvm
namespace: vms
memory: 1024M
cloud_init_nocloud:
userData: |-
#cloud-config
password: fedora
chpasswd: { expire: False }
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/fedora-cloud-container-disk-demo:latest
path: /disk/fedora.qcow2
disk:
bus: virtio
node_affinity:
soft:
- weight: 1
term:
match_expressions:
- key: security
operator: In
values:
- S2
- name: Create virtual machine with datavolume and specify node affinity
kubevirt_vm:
name: myvm
namespace: default
memory: 1024Mi
datavolumes:
- name: mydv
source:
http:
url: https://url/disk.qcow2
pvc:
accessModes:
- ReadWriteOnce
storage: 5Gi
node_affinity:
hard:
- term:
match_expressions:
- key: security
operator: In
values:
- S1
- name: Remove virtual machine 'myvm'
kubevirt_vm:
state: absent
name: myvm
namespace: vms
'''
RETURN = '''
kubevirt_vm:
description:
- The virtual machine dictionary specification returned by the API.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachine)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible_collections.community.kubernetes.plugins.module_utils.k8s.common import AUTH_ARG_SPEC
from ansible_collections.community.general.plugins.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC,
VM_SPEC_DEF_ARG_SPEC
)
VM_ARG_SPEC = {
'ephemeral': {'type': 'bool', 'default': False},
'state': {
'type': 'str',
'choices': [
'present', 'absent', 'running', 'stopped'
],
'default': 'present'
},
'datavolumes': {'type': 'list'},
'template': {'type': 'str'},
'template_parameters': {'type': 'dict'},
}
# Which params (can) modify 'spec:' contents of a VM:
VM_SPEC_PARAMS = list(VM_SPEC_DEF_ARG_SPEC.keys()) + ['datavolumes', 'template', 'template_parameters']
class KubeVirtVM(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(VM_COMMON_ARG_SPEC)
argument_spec.update(VM_ARG_SPEC)
return argument_spec
@staticmethod
def fix_serialization(obj):
if obj and hasattr(obj, 'to_dict'):
return obj.to_dict()
return obj
def _wait_for_vmi_running(self):
for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
entity = event['object']
if entity.metadata.name != self.name:
continue
status = entity.get('status', {})
phase = status.get('phase', None)
if phase == 'Running':
return entity
self.fail("Timeout occurred while waiting for virtual machine to start. Maybe try a higher wait_timeout value?")
def _wait_for_vm_state(self, new_state):
if new_state == 'running':
want_created = want_ready = True
else:
want_created = want_ready = False
for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
entity = event['object']
if entity.metadata.name != self.name:
continue
status = entity.get('status', {})
created = status.get('created', False)
ready = status.get('ready', False)
if (created, ready) == (want_created, want_ready):
return entity
self.fail("Timeout occurred while waiting for virtual machine to achieve '{0}' state. "
"Maybe try a higher wait_timeout value?".format(new_state))
def manage_vm_state(self, new_state, already_changed):
new_running = True if new_state == 'running' else False
changed = False
k8s_obj = {}
if not already_changed:
k8s_obj = self.get_resource(self._kind_resource)
if not k8s_obj:
self.fail("VirtualMachine object disappeared during module operation, aborting.")
if k8s_obj.spec.get('running', False) == new_running:
return False, k8s_obj
newdef = dict(metadata=dict(name=self.name, namespace=self.namespace), spec=dict(running=new_running))
k8s_obj, err = self.patch_resource(self._kind_resource, newdef, k8s_obj,
self.name, self.namespace, merge_type='merge')
if err:
self.fail_json(**err)
else:
changed = True
if self.params.get('wait'):
k8s_obj = self._wait_for_vm_state(new_state)
return changed, k8s_obj
def _process_template_defaults(self, proccess_template, processedtemplate, defaults):
def set_template_default(default_name, default_name_index, definition_spec):
default_value = proccess_template['metadata']['annotations'][default_name]
if default_value:
values = definition_spec[default_name_index]
default_values = [d for d in values if d.get('name') == default_value]
defaults[default_name_index] = default_values
if definition_spec[default_name_index] is None:
definition_spec[default_name_index] = []
definition_spec[default_name_index].extend([d for d in values if d.get('name') != default_value])
devices = processedtemplate['spec']['template']['spec']['domain']['devices']
spec = processedtemplate['spec']['template']['spec']
set_template_default('defaults.template.cnv.io/disk', 'disks', devices)
set_template_default('defaults.template.cnv.io/volume', 'volumes', spec)
set_template_default('defaults.template.cnv.io/nic', 'interfaces', devices)
set_template_default('defaults.template.cnv.io/network', 'networks', spec)
def construct_definition(self, kind, our_state, ephemeral):
definition = virtdict()
processedtemplate = {}
# Construct the API object definition:
defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
vm_template = self.params.get('template')
if vm_template:
# Find the template the VM should be created from:
template_resource = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='templates')
proccess_template = template_resource.get(name=vm_template, namespace=self.params.get('namespace'))
# Set proper template values taken from module option 'template_parameters':
for k, v in self.params.get('template_parameters', {}).items():
for parameter in proccess_template.parameters:
if parameter.name == k:
parameter.value = v
# Proccess the template:
processedtemplates_res = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='processedtemplates')
processedtemplate = processedtemplates_res.create(proccess_template.to_dict()).to_dict()['objects'][0]
# Process defaults of the template:
self._process_template_defaults(proccess_template, processedtemplate, defaults)
if not ephemeral:
definition['spec']['running'] = our_state == 'running'
template = definition if ephemeral else definition['spec']['template']
template['metadata']['labels']['vm.cnv.io/name'] = self.params.get('name')
dummy, definition = self.construct_vm_definition(kind, definition, template, defaults)
return self.merge_dicts(definition, processedtemplate)
def execute_module(self):
# Parse parameters specific to this module:
ephemeral = self.params.get('ephemeral')
k8s_state = our_state = self.params.get('state')
kind = 'VirtualMachineInstance' if ephemeral else 'VirtualMachine'
_used_params = [name for name in self.params if self.params[name] is not None]
# Is 'spec:' getting changed?
vm_spec_change = True if set(VM_SPEC_PARAMS).intersection(_used_params) else False
changed = False
crud_executed = False
method = ''
# Underlying module_utils/k8s/* code knows only of state == present/absent; let's make sure not to confuse it
if ephemeral:
# Ephemerals don't actually support running/stopped; we treat those as aliases for present/absent instead
if our_state == 'running':
self.params['state'] = k8s_state = 'present'
elif our_state == 'stopped':
self.params['state'] = k8s_state = 'absent'
else:
if our_state != 'absent':
self.params['state'] = k8s_state = 'present'
# Start with fetching the current object to make sure it exists
# If it does, but we end up not performing any operations on it, at least we'll be able to return
# its current contents as part of the final json
self.client = self.get_api_client()
self._kind_resource = self.find_supported_resource(kind)
k8s_obj = self.get_resource(self._kind_resource)
if not self.check_mode and not vm_spec_change and k8s_state != 'absent' and not k8s_obj:
self.fail("It's impossible to create an empty VM or change state of a non-existent VM.")
# If there are (potential) changes to `spec:` or we want to delete the object, that warrants a full CRUD
# Also check_mode always warrants a CRUD, as that'll produce a sane result
if vm_spec_change or k8s_state == 'absent' or self.check_mode:
definition = self.construct_definition(kind, our_state, ephemeral)
result = self.execute_crud(kind, definition)
changed = result['changed']
k8s_obj = result['result']
method = result['method']
crud_executed = True
if ephemeral and self.params.get('wait') and k8s_state == 'present' and not self.check_mode:
# Waiting for k8s_state==absent is handled inside execute_crud()
k8s_obj = self._wait_for_vmi_running()
if not ephemeral and our_state in ['running', 'stopped'] and not self.check_mode:
# State==present/absent doesn't involve any additional VMI state management and is fully
# handled inside execute_crud() (including wait logic)
patched, k8s_obj = self.manage_vm_state(our_state, crud_executed)
changed = changed or patched
if changed:
method = method or 'patch'
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_vm': self.fix_serialization(k8s_obj),
'method': method
})
def main():
module = KubeVirtVM()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| [
"joshuamadison+gh@gmail.com"
] | joshuamadison+gh@gmail.com |
216c67e07b22577d4087564e68fc2b07c9350297 | 02464d5cef58178de8fe68b32b109ac8a06a6d69 | /02_generate.py | b9eb61dc4eb714f48dc2b09ab5a0c8f72c2fe090 | [] | no_license | koharukoharu/AI_carcof | 27fa0325d140a4c29433c9b6438d9a20e759505d | d8d37ee1c1395ebf8c9c4503a4b749df4e6752c4 | refs/heads/master | 2020-04-10T20:23:11.349292 | 2019-01-11T09:13:29 | 2019-01-11T09:13:29 | 161,265,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | # -*- coding: utf-8 -*-
from GenerateText import GenerateText
def generate_tweet():
generator = GenerateText()
print(generator.generate())
if __name__ == '__main__':
generate_tweet()
| [
"masayoshi_sakino@waku-2.com"
] | masayoshi_sakino@waku-2.com |
557e30a0058eb90a9047425be82e7edde954fedc | 4309faae9913ca6e159468b7385201ed15a54e64 | /Travelling Salesman Problem.py | 3ce0d3fa8044716dc4084e8ee0e5a3b10c7c4a6d | [
"MIT"
] | permissive | anshuljain21120/Genetic-Algorithms | 69f27988d8edd466101d5fde1b3d53dc002613a0 | fe53ea259929af739f46881f7b394472d811d7bf | refs/heads/master | 2020-09-12T02:51:12.559464 | 2019-12-12T05:25:49 | 2019-12-12T05:25:49 | 222,276,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,795 | py | import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def fitness(chrm,distances):
dis = 0
for i in range(len(chrm)-1):
dis += distances[chrm[i]][chrm[i+1]]
dis += distances[chrm[0]][chrm[len(chrm)-1]]
return dis
def randinit(n,lenchrm,dis):
df = pd.DataFrame(np.zeros((n,2)),columns=["Chromosome","Fitness"], dtype=object)
i = 0
arr = list(range(lenchrm))
while i < n:
random.shuffle(arr)
if arr not in list(df["Chromosome"]):
df.at[i,"Chromosome"] = arr.copy()
df.at[i,"Fitness"] = fitness(arr,dis)
i = i+1
df["Fitness"] = df["Fitness"].astype('int64')
return df
def distances(No_Of_Cities):
distance = np.zeros((No_Of_Cities,No_Of_Cities))
for city in range(No_Of_Cities):
cities = [i for i in range(No_Of_Cities) if not i==city]
for to_city in cities:
if(distance[city][to_city]==0 and distance[to_city][city]==0):
distance[city][to_city] = distance[to_city][city] = random.randint(1000,100000)
return distance
def crossover(chrm1,chrm2):
n = len(chrm1)
chrm3 = chrm1[:int(n/2)]
for i in range(int(n/2), int(n/2)+n):
if chrm2[i%n] not in chrm3:
chrm3.append(chrm2[i%n])
n = len(chrm2)
chrm4 = chrm2[:int(n/2)]
for i in range(int(n/2), int(n/2)+n):
if chrm1[i%n] not in chrm4:
chrm4.append(chrm1[i%n])
return chrm3, chrm4
def mutation(chrm):
if random.random() < 1/len(chrm):
n = len(chrm)
a = random.randint(0, n-1)
b = random.randint(0, n-1)
while b==a:
b = random.randint(0, n-1)
chrm[a], chrm[b] = chrm[b], chrm[a]
return chrm
def giveParent(pop):
n = pop.shape[0]
parents = pd.DataFrame(np.zeros((5,2)),columns=['Parents',"Fitness"], dtype=object)
i = 0
while len(parents) <= 5:
r = random.randint(0, n-1)
parent = pop.iloc[r, 0]
fitness = pop.iloc[r,1]
if parent not in list(parents["Parents"]):
parents.at[i,"Parents"] = parent
parents.at[i,"Fitness"] = fitness
i = i+1
parents = parents.sort_values(by=['Fitness'])
return parents.iloc[0, 0], parents.iloc[1, 0]
def replaceWorst(p, dis, v=False):
pop = p.copy()
p1, p2 = giveParent(pop)
c1, c2 = crossover(p1, p2)
c1 = mutation(c1)
c2 = mutation(c2)
if v:
print('Child 1:{0}\tFitness: {1}\nChild 2:{2}\tFitness: {3}'.format(c1, fitness(c1,dis), c2, fitness(c2,dis)))
pop = pop.sort_values(by=['Fitness'])
pop = pop.reset_index(drop=True)
if c1 not in list(pop["Chromosome"]):
pop.loc[p.shape[0]-2] = [c1, fitness(c1,dis)]
if c2 not in list(pop["Chromosome"]):
pop.loc[p.shape[0]-1] = [c2, fitness(c2,dis)]
return pop
def bestFitness(pop):
return min(list(pop['Fitness']))
def bestChromosome(pop):
p = pop.sort_values(by=['Fitness'])
return p.iloc[0, 0]
N = int(input('Value of N: '))
pop_size = int(input('Population Size: '))
show_progress = False
NUMBER_OF_CITIES = N;
dis = distances(N)
generationInfo = pd.DataFrame(np.zeros((10000, 2)), columns=['Chromosome', 'Fitness'], dtype=object)
for generation in range(10000):
if generation == 0:
pop = randinit(pop_size, N, dis)
generationInfo.loc[generation] = [bestChromosome(pop), bestFitness(pop)]
if show_progress:
print("\nGeneration {}".format(generation))
pop = replaceWorst(pop,dis,show_progress)
Optimal_Solution = [bestChromosome(pop), bestFitness(pop)]
print(Optimal_Solution)
import matplotlib.pyplot as plt
plt.plot(range(generationInfo.shape[0]), list(generationInfo['Fitness'])) | [
"noreply@github.com"
] | noreply@github.com |
580d51ce82ab0853343de7bbef2392334147e0e8 | 1f5a437354efeaffc4d5e04db9596d51398e63db | /cmake-build-debug/catkin_generated/pkg.develspace.context.pc.py | c795e5db2188c0fe3b8abcde3aaefad3d73382e3 | [] | no_license | fbjelonic/xbox_controller | 1bd9caadb5438ebf14993fa35e5a987be285507e | 9576f6afd8f515c1088420f46e060f9065a845f9 | refs/heads/master | 2021-04-24T00:07:25.985052 | 2021-01-24T19:35:32 | 2021-01-24T19:35:32 | 250,039,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/filip/banana_ws/src/xbox_controller/cmake-build-debug/devel/include;/home/filip/banana_ws/src/xbox_controller/include".split(';') if "/home/filip/banana_ws/src/xbox_controller/cmake-build-debug/devel/include;/home/filip/banana_ws/src/xbox_controller/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;geometry_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "xbox_controller"
PROJECT_SPACE_DIR = "/home/filip/banana_ws/src/xbox_controller/cmake-build-debug/devel"
PROJECT_VERSION = "0.0.0"
| [
"filip.bjelonic@gmail.com"
] | filip.bjelonic@gmail.com |
156d6f7fc512c8f3ba50b7135ffd548e1d30f08e | 8e75843fc2b27d50e1f8a95f0367a3a96a3dae30 | /Code/python_quote.py | a1cb9f69bbba8935805a704b36ca94ea7291b786 | [] | no_license | franklin-phan/CS-2-Tweet-Generator | 5f122e2aab7a6ee749feb888d094c8057671a7ee | fedb9ba46be3f31a1586f8d64986ec92c58296b6 | refs/heads/master | 2021-07-14T14:37:13.404088 | 2020-03-06T07:08:03 | 2020-03-06T07:08:03 | 236,772,553 | 0 | 0 | null | 2021-03-20T02:58:02 | 2020-01-28T15:47:39 | Python | UTF-8 | Python | false | false | 332 | py | import random
quotes = ("It's just a flesh wound.",
"He's not the Messiah. He's a very naughty boy!",
"THIS IS AN EX-PARROT!!")
def random_python_quote():
rand_index = random.randint(0, len(quotes) - 1)
return quotes[rand_index]
if __name__ == '__main__':
quote = random_python_quote()
print | [
"franklin.phan123@gmail.com"
] | franklin.phan123@gmail.com |
a650fcc83f32dd0898f953ec683b1b54eb77b733 | 233f97c6f360d478bf975016dd9e9c2be4a64adb | /guvi3.py | 6dd143242eb16cf5b6ec3091f1ddba172fd1f82f | [] | no_license | unknownboyy/GUVI | 3dbd1bb2bc6b3db52f5f79491accd6c56a2dec45 | d757dd473c4f5eef526a516cf64a1757eb235869 | refs/heads/master | 2020-03-27T00:07:12.449280 | 2019-03-19T12:57:03 | 2019-03-19T12:57:03 | 145,595,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | def check(n):
count = 0
for i in str(n):
count+=int(i)
if str(count)[0]=='1':
return count
else:
return False
n = int(input())
l = [8]
c = 0
diff = 2
curr = 800
while curr+diff<=n:
curr+=diff
w = check(curr)
if w!=False:
l.append(w)
diff+=2
c+=1
print(*l)
print(c) | [
"ankitagrawal11b@gmail.com"
] | ankitagrawal11b@gmail.com |
c6b5a89e10f3042dbf3e41d7c89e392ca76b813f | a5668837dbc639e87460c3c725539e97a2698667 | /chapter-4/7_build_order.py | 1929fc9bc52c99a26ee48f889ac4ff1d1ea6e693 | [] | no_license | shanminlin/Cracking_the_Coding_Interview | 6ea2a4a103fcd5ebcac956f715d15b7593587a69 | 133165d879a76f86ba0fa3fea723203118e9ef11 | refs/heads/master | 2022-09-12T21:28:08.926935 | 2020-06-03T07:49:49 | 2020-06-03T07:49:49 | 195,264,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | """
Chapter 4 - Problem 4.7 - Build Order
Problem:
You are given a list of projects and a list of dependencies (which is a list of pairs of projects,
where the second project is dependent on the first project). All of a project's dependencis must be
built before th project is. Find a build order that will allow the projects to be built. If there
is no valid build order, return an error
"""
| [
"shanminlin@gmail.com"
] | shanminlin@gmail.com |
35ffd32429dda0c8f6f7fc732cecded99c89a129 | 3e395616b524f7c416d3eb5bedcb385efc9476b9 | /Python - University of Michigan/02 Python Data Structures - University of Michigan/170813_coursera_pds.py | 7fa10f5b001a560c1e1b7dd8dd1946a9e5cdd417 | [] | no_license | rohannanaware/Python | 614f925c25a49852f3fe875367d61c089d3980f4 | d1f49571aac0f3b13c450311f420a26f2f70b910 | refs/heads/master | 2021-09-12T08:46:35.391638 | 2018-04-15T16:54:00 | 2018-04-15T16:54:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,055 | py | #Author : Rohan M. Nanaware
#Date C.: 13th Aug 2017
#Date M.: 13th Aug 2017
#Purpose: Python data structures code documentation
#6.1 Strings
fruit = 'banana'
print(fruit[0])
print(len(fruit))
for i in range(len(fruit)):
print(i, fruit[i])
for letter in fruit:
print(letter)
#Slicing strings - Upto but not including
string = 'Python is great!'
print(string[1:4])
print(string[7:])
#6.2 Manipulating strings
fruit = 'banana'
if 'nana' in fruit:
print('Present')
if 'Rohan' < 'rohan':
print('Capital first')
greet = 'Hello World!'
greet.lower()
print('Hello World!'.lower())
string = 'Hello World'
type(string)
dir(string)#Methods applicable on string
string.replace('Hello','Hi')
string = ' Yahallo! '
string.lstrip()
string.rstrip()
string.strip()
line = 'The quick brown fox!'
line.startswith('T')
line.startswith('t')
#String slicing
string = 'Koreva teme#$! Makhinayo '
start = string.find('!')+2
end = string.find(' ',start)
print(string[start:end])
#7.1 Files
# Use words.txt as the file name
fname = input("Enter file name: ")
fh = open(fname)
for line in fh:
line_strip = line.rstrip().upper()
#line_strip_upper = line_strip.upper()
print(line_strip)
# Use the file name mbox-short.txt as the file name
#sample data - "X-DSPAM-Confidence: 0.8475"
fname = input("Enter file name: ")
fh = open(fname)
confidence = 0
count = 0
for line in fh:
if not line.startswith("X-DSPAM-Confidence:") : continue
#print(line)
confidence = float(line[line.find(":")+1:len(line)]) + confidence
count = count + 1
avg_confidence = confidence/count
print("Average spam confidence:",avg_confidence)
#8.4
fname = input("Enter file name: ")
fh = open(fname)
lst = list()
#for line in fh:
# for word in line.splitsplit():
# if word in lst:
# print(word)
# continue
# lst.append(word)
#print(lst.sort())
for line in fh:
line.rstrip()
print(line.rstrip())
#9.1 Dictionaries
purse = dict()
purse['money'] = 10
purse['wallet'] = 1
print(purse['wallet'])
for w in purse:
print(w)
print(purse[w])
purse[1] = 15
dict_ = dict()
names = ['A','B','C','D','E','F','G','A','B','C']
for name in names:
if name in dict_:
dict_[name] += 1
else:
dict_[name] = 1
print(dict_)
dict_ = dict()
names = ['A','B','C','D','E','F','G','A','B','C']
for name in names:
dict_[name] = dict_.get(name, 0) + 1
name = input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
word_dict = dict()
word_list = list()
for line in handle:
if 'From:' not in line:
continue
else:
words = line.split()
word_list.append(words[1])
for word in word_list:
word_dict[word] = word_dict.get(word, 0) + 1
max_count = max(word_dict.values())
#print(max_count)
for sender, count_ in word_dict.items():
#print(sender)
if count_ == max_count:
print(sender, count_)
| [
"noreply@github.com"
] | noreply@github.com |
b39e0b8dd9b5b7433907c2c42d410fd4a1992a54 | 97cd75e0a505e3a499f292a671d1bbf678e59ad3 | /extras/[Day03.x]CrossedWires-Rendering.py | d8ada5180189cb430960eeb42512bcba7bffb294 | [] | no_license | cgdilley/AdventOfCode2019 | 53768e96eb629d8b9a8a27bd2bdbcb0bf7280f61 | 6e5816ea7e9e7c9921a139a328234d79c04e2f50 | refs/heads/master | 2020-09-22T12:55:16.869258 | 2019-12-13T10:10:11 | 2019-12-13T10:10:11 | 225,205,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,345 | py | import pygame
from shared.day3 import load_wires, Wire
import time
import sys
from typing import Tuple
Coord = Tuple[int, int]
def main():
wire1, wire2 = load_wires()
render_wires(wire1, wire2, progressive=True, speed=30)
#
#
def render_wires(*wires: Wire, progressive: bool, speed: int):
bounds = ((
min([c[0] for w in wires for c in w.coords]),
min([c[1] for w in wires for c in w.coords])
),
(
max([c[0] for w in wires for c in w.coords]),
max([c[1] for w in wires for c in w.coords])
))
# Calculate window size based on bounds, and determine scale factor
pygame.init()
max_width, max_height = 1920, 1080
width, height = bounds[1][0] - bounds[0][0], bounds[1][1] - bounds[0][1]
scale_x = 1 if width < max_width else max_width / width
scale_y = 1 if height < max_height else max_height / height
if scale_x < scale_y:
scale = scale_x
else:
scale = scale_y
width = int(width * scale)
height = int(height * scale)
screen = pygame.display.set_mode((width, height), pygame.RESIZABLE)
# Keep track of coords traversed
seen = set()
seen_by_index = {i: set() for i in range(len(wires))}
red_render_coords = set()
max_steps = max([w.total_length for w in wires])
for step in range(max_steps):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# Render each wire
for i, w in enumerate(wires):
if step >= len(w.raw):
continue
c = w.raw[step]
render_c = _offset(c, bounds=bounds, scale=scale)
if c in seen and c not in seen_by_index[i]:
color = (255, 0, 0)
size = 5
for x in range(render_c[0] - int(size/2), render_c[0] + int(size/2)):
for y in range(render_c[1] - int(size/2), render_c[1] + int(size/2)):
red_render_coords.add((x, y))
elif c in seen:
color = (255, 0, 0)
size = 1
red_render_coords.add(render_c)
elif render_c in red_render_coords:
color = (255, 0, 0)
size = 1
else:
color = _wire_color(i)
size = 1
pygame.draw.rect(screen,
color,
pygame.Rect(render_c[0] - int(size/2), render_c[1] - int(size/2), size, size))
seen.add(c)
seen_by_index[i].add(c)
if progressive and step % speed == 0:
pygame.display.flip()
# time.sleep(0.0001)
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
#
def _offset(coord: Coord, bounds: Tuple[Coord, Coord], scale: float) -> Coord:
return (int((coord[0] - bounds[0][0]) * scale),
int((coord[1] - bounds[0][1]) * scale))
def _wire_color(index: int) -> Tuple[int, int, int]:
if index == 0:
return 150, 150, 230
elif index == 1:
return 150, 230, 150
else:
return 255, 255, 255
#
if __name__ == "__main__":
main()
| [
"christopher@cabuu.de"
] | christopher@cabuu.de |
112b70a8cb49822a6647aa03586fb30a695eb9e9 | 6684fa1ad429185b3008058b763f2a3874a75e5e | /python_spj.py | c5b467cdc3336105478663ea4d3995b13c2134f6 | [] | no_license | DeepsnowTech/QubitOnlineJudgeBackend | b43c84c1e75e824b47ffb1196b41ce07a605fd9e | 434f87e568687dc1c58fc6715c2896d61a130446 | refs/heads/main | 2023-02-13T11:19:53.995413 | 2021-01-14T14:32:18 | 2021-01-14T14:32:18 | 328,137,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | import os
import subprocess
SPJ_WA = 1
SPJ_AC = 0
SPJ_ERROR = -1
PY_SPJ_HOME = "/code/ProblemCenter/PythonSPJ/"
def py_spj_run(spj_src_path, test_in_file_path, user_out_file_path):
problem_id = get_problem_id(spj_src_path)
return _py_spj_run(problem_id, test_in_file_path, user_out_file_path)
def _py_spj_run(problem_id, test_in_file_path, user_out_file_path):
cmd_list = ["python3", PY_SPJ_HOME +
str(problem_id)+".py", test_in_file_path, user_out_file_path]
"""
with open("/code/py_spj_log", "a") as f:
print("in_file",test_in_file_path,file=f)
print("user_output",user_out_file_path,file=f)
print("problem_id",problem_id,file=f)
print("cmd"," ".join(cmd_list),file=f)
"""
try:
spj_program_path = PY_SPJ_HOME+str(problem_id)+".py"
p_handler = subprocess.Popen(["python3",spj_program_path, test_in_file_path, user_out_file_path])
p_handler.wait()
if p_handler.returncode in [SPJ_WA, SPJ_AC, SPJ_ERROR]:
return p_handler.returncode
else:
return SPJ_ERROR
except Exception as e:
print(e)
return SPJ_ERROR
def get_problem_id(spj_src_path):
with open(spj_src_path, "r") as f:
line = f.readline()
line = line.replace("/", "")
line = line.replace("\n", "")
return line
if __name__ == "__main__":
_py_spj_run(1001, "/code/ProblemCenter/PythonSPJ/test.in", "/code/ProblemCenter/PythonSPJ/cmd.txt")
| [
"doomspec@outlook.com"
] | doomspec@outlook.com |
dd6599933934b2322beb606cd62f466192ff2185 | f450ab7329a2e5182d4b8e798a0192580949c737 | /src_detection/HOG_SVM/pedestran_detect_me_without_train_16.py | c793711b38c46b6eebc05a090b2996bf24165e62 | [] | no_license | xuelanglv/KCF_IOU_Tracker | 96e0f03a7ad24d30fa0430c2466a515119532611 | bccaa0c145b6f58dec75d5843de4995c5aeba136 | refs/heads/master | 2020-05-25T19:36:57.125575 | 2019-04-27T03:09:01 | 2019-04-27T03:09:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,453 | py | # *_*coding:utf-8 *_*
# 参考资料:https://blog.csdn.net/qq_33662995/article/details/79356939
#
# author: 许鸿斌
import os
import sys
import cv2
import time
import logging
import numpy as np
import pandas as pd
def logger_init():
'''
自定义python的日志信息打印配置
:return logger: 日志信息打印模块
'''
# 获取logger实例,如果参数为空则返回root logger
logger = logging.getLogger("PedestranDetect")
# 指定logger输出格式
formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')
# 文件日志
# file_handler = logging.FileHandler("test.log")
# file_handler.setFormatter(formatter) # 可以通过setFormatter指定输出格式
# 控制台日志
console_handler = logging.StreamHandler(sys.stdout)
console_handler.formatter = formatter # 也可以直接给formatter赋值
# 为logger添加的日志处理器
# logger.addHandler(file_handler)
logger.addHandler(console_handler)
# 指定日志的最低输出级别,默认为WARN级别
logger.setLevel(logging.INFO)
return logger
def test_hog_detect(logger):
'''
导入测试集,测试结果
:param test: 测试数据集
:param svm_detector: 用于HOGDescriptor的SVM检测器
:param logger: 日志信息打印模块
:return: 无
'''
hog = cv2.HOGDescriptor()
# opencv自带的训练好了的分类器
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
pwd = os.getcwd()
DATA_PATH = 'F:/mot/obj_det/mAP_16/input/detection-results/'
brenchmark = ['MOT16-02', 'MOT16-04', 'MOT16-05', 'MOT16-09', 'MOT16-10', 'MOT16-11', 'MOT16-13']
# cv2.namedWindow('Detect')
for seq_name in brenchmark:
test_dir = 'F:/MOT16/train/%s/img1/' % seq_name
save_path = DATA_PATH + '%s-%06d.txt'
idx = 1
test = os.listdir(test_dir)
for f in test:
file_path = os.path.join(test_dir, f)
logger.info('Processing {}'.format(file_path))
img = cv2.imread(file_path)
s_time = time.time()
rects, scores = hog.detectMultiScale(img, winStride=(8,8), padding=(8,8), scale=1.2)
print("run time = %f"%(time.time() - s_time))
dets = []
for (x,y,w,h), s in zip(rects, scores):
s = 100 - float(s[0])
# if s > 1:
# s = 2 - s
# if s < 0.6:
# continue
dets.append(['Person', s, x, y, x+w, y+h])
dets = pd.DataFrame(dets)
dets.to_csv(save_path % (seq_name, idx), sep=' ', index=False, header=False)
# for (x,y,w,h), (s) in zip(rects, scores):
# cv2.putText(img, '#%.03f' % s, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)
# cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 3)
# cv2.putText(img, '#%03d' % idx, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 3)
# cv2.imshow('Detect', cv2.resize(img, (int(img.shape[:2][1]*0.5), int(img.shape[:2][0]*0.5))))
# c = cv2.waitKey(1) & 0xff
# if c == 27:
# break
idx = idx + 1
# if True:
# cv2.imwrite('imgs/TUD-Stadtmitte/'+ f, img);
# cv2.destroyAllWindows()
if __name__ == '__main__':
logger = logger_init()
test_hog_detect(logger)
| [
"15822775539@163.com"
] | 15822775539@163.com |
a98677c79904384ea4f9182f45560317822060b0 | 1eb50735e3861cde4bca8f4feab5afc730003078 | /future/flags_threadpool.py | 68c2812cf5337155961672ac7f2d7ec0945eca02 | [] | no_license | chinaylssly/fluent-python | 442e6458215e3c5a74c4d34d020b714da108f81d | 126c1d3e7853628c4a2c0e6ff475362b7d7fe33a | refs/heads/master | 2020-04-17T13:58:03.534184 | 2019-02-01T14:40:42 | 2019-02-01T14:40:42 | 166,637,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | # -*- coding: utf-8 -*-
import random,sys
from time import sleep,time
from concurrent import futures
MAX_WORKERS= 20
tl=[i*0.01 for i in range(20)]
def do_one(t=0.2):
# print (t)
sleep(t)
return t
def do_many(tl=tl):
workers=min(len(tl),MAX_WORKERS)
with futures.ThreadPoolExecutor(workers) as executor:
'''
executor.__exit__()方法会调用executor.shutdown(wait=True)方法,它会在所有的线程都执行完毕前阻塞线程
'''
res=executor.map(do_one,tl)
return len(list(res))
##返回获取结果的数量,如果有线程抛出异常,异常会在这里抛出,这与隐式调用next()函数从迭代器中回去相应的返回值一样
def main(do_many=do_many):
t0=time()
count=do_many()
t=time()-t0
msg='execute {:2d} task cost {:.2f} s'
print (msg.format(count,t))
if __name__ =='__main__':
main()
| [
"chinaylssly@qq.com"
] | chinaylssly@qq.com |
819929e661e932eb05fa4daed21aad26f4396234 | 92a08e6bca398d9a59f18aabb67090869a2ec963 | /bagua/torch_api/data_parallel/functional.py | c12a379f2083826838ecb87c830b4d4751871a8f | [
"MIT"
] | permissive | JCGit2018/bagua | 74e77e3b4062075824a407e8f739f1dfa8b1bd7c | 689bb20623bb72a929758b54c67689e1639f2d12 | refs/heads/master | 2023-09-02T17:32:00.068437 | 2021-11-16T09:07:07 | 2021-11-16T09:07:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | import enum
import torch
from torch.autograd import Function
import torch.distributed as dist
# must be consistent with Aluminum ReductionOperator: https://github.com/BaguaSys/Aluminum/blob/master/include/aluminum/base.hpp
class ReduceOp(enum.IntEnum):
"""An enum-like class for available reduction operations: ``SUM``, ``PRODUCT``, ``MIN``, ``MAX``, ``BAND``,
``BOR``, ``BXOR`` and ``AVG``."""
SUM = 0
PRODUCT = 1
MIN = 2
MAX = 3
BOR = 7
BAND = 8
BXOR = 9
AVG = 10
def torch_reduce_op_to_bagua(op):
if op is torch.distributed.ReduceOp.SUM:
return ReduceOp.SUM
elif op is torch.distributed.ReduceOp.MAX:
return ReduceOp.MAX
else:
raise Exception("Unexpect input={}".format(op))
def all_reduce(tensor, op=dist.ReduceOp.SUM, group=dist.group.WORLD):
"""
Reduces the tensor data across all machines in such a way that all get
the final result.
After the call the returned tensor is going to be bitwise
identical in all processes.
Arguments:
tensor (Tensor): Input of the collective.
op (optional): One of the values from
``torch.distributed.ReduceOp``
enum. Specifies an operation used for element-wise reductions.
group (ProcessGroup, optional): The process group to work on.
Returns:
Tensor: Output of the collective
"""
if group is None:
group = dist.group.WORLD
return _AllReduce.apply(op, group, tensor)
class _AllReduce(Function):
@staticmethod
def forward(ctx, op, group, tensor):
ctx.group = group
ctx.op = op
tensor = tensor.clone()
comm = group.bagua_patch().bagua_get_global_communicator()
event = torch.cuda.current_stream().record_event()
comm.cuda_stream.wait_event(event)
with torch.cuda.stream(comm.cuda_stream):
comm.allreduce_inplace(
tensor.to_bagua_tensor().bagua_backend_tensor(),
int(torch_reduce_op_to_bagua(op)),
)
comm.cuda_stream.synchronize()
return tensor
@staticmethod
def backward(ctx, grad_output):
return (None, None) + (_AllReduce.apply(ctx.op, ctx.group, grad_output),)
| [
"noreply@github.com"
] | noreply@github.com |
b831e9344373a01d47963d9f219b70d3348677fd | 472c1437ac75d3cfa0e4a262c43d30f2d8e5c240 | /ISM4402_workbookA1.py | cde017455dde1401fc094fafdeff4d2e00666437 | [] | no_license | pwanner/ISM-4402 | 871153a785caf4e101d33ae0b3769897d438d27d | 1d23d01000845bdd2429af46bace8097ccfcee00 | refs/heads/master | 2020-07-23T04:32:09.227897 | 2019-12-02T16:09:21 | 2019-12-02T16:09:21 | 207,446,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,045 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
path_to_zip_file = "datasets.zip"
directory_to_extract_to = ""
import zipfile
zip_ref = zipfile.ZipFile(path_to_zip_file, 'r')
zip_ref.extractall(directory_to_extract_to)
zip_ref.close()
# In[2]:
import pandas as pd
Location = "datasets/smallgradesh.csv"
df = pd.read_csv(Location, header=None)
# In[3]:
df.head()
# In[4]:
import pandas as pd
Location = "datasets/gradedata.csv"
df = pd.read_csv(Location)
# In[5]:
df.head()
# In[6]:
import pandas as pd
Location = "datasets/smallgrades.csv"
# To add headers as we load the data...
df = pd.read_csv(Location, names=['Names','Grades'])
# To add headers to a dataframe
df.columns = ['Names','Grades']
# In[7]:
df.head()
# In[8]:
import pandas as pd
Location = "all_040_in_08.P1.csv"
censusdf = pd.read_csv(Location)
# In[9]:
censusdf.head()
# In[10]:
import pandas as pd
Location = "all_040_in_08.P1.csv"
censusdf = pd.read_csv(Location)
# In[11]:
censusdf.head()
# In[14]:
import pandas as pd
names = ['Bob','Jessica','Mary','John','Mel']
grades = [76,95,77,78,99]
GradeList = zip(names,grades)
df = pd.DataFrame(data = GradeList, columns=['Names','Grades'])
df.to_csv('studentgrades.csv',index=False,header=False)
# In[15]:
df.head()
# In[20]:
import pandas as pd
names = ['Bob','Jessica','Mary','John','Mel']
grades = [76,95,77,78,99]
bsdegrees = [1,1,0,0,1]
msdegrees = [2,1,0,0,0]
phddegrees = [0,1,0,0,0]
Degrees = zip(names,grades,bsdegrees,msdegrees,phddegrees)
columns = ['Names','Grades','BS','MS','PhD']
df = pd.DataFrame(data = Degrees, columns=column)
df
# In[21]:
import pandas as pd
Location = "datasets/gradedata.xlsx"
df = pd.read_excel(Location)
# In[22]:
df.head()
# In[23]:
df.columns = ['first','last','sex','age','exer','hrs','grd','addr']
df.head()
# In[24]:
path_to_zip_file = "EDU.zip"
directory_to_extract_to = ""
import zipfile
zip_ref = zipfile.ZipFile(path_to_zip_file, 'r')
zip_ref.extractall(directory_to_extract_to)
zip_ref.close()
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
25a966ceab5d2deb560acac18c7d2c9729e93236 | be999cad30c28d0370a57b73057cb734fdffbf23 | /workshop_corona19/corona19_07_여행력.py | 899ed6dd8d5552a0e7aa1dc68988569ffc65f5fa | [] | no_license | choicoding1026/ML | 341181d5b1088f48fa0336a6db773ed7cfbecc21 | 69db5fcd559a7a41ce9fb0ece543d3cf9b44d5bb | refs/heads/master | 2023-01-01T07:35:09.655664 | 2020-10-15T07:41:50 | 2020-10-15T07:41:50 | 303,907,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,833 | py | '''
서울시 코로나19 데이터 수집 및 분석
26. 여행력
'''
import pandas as pd
import numpy as np
file_name = "seoul_corona_10_11_.csv"
df = pd.read_csv(file_name, encoding="utf-8") # 한글처리
# 1. '연번' 기준으로 오름차순 정렬
df = df.sort_values(by="연번", ascending=False)
print("1. '연번' 기준으로 오름차순 정렬:\n", df.head())
# 2. 확진일의 빈도수 ==> 어느 날짜에 가장 많이 확진이 되었는지 확인 가능
# value_counts() 자동으로 내림차순 정렬해서 반환
print("2. 확진일의 빈도수: \n", df["확진일"].value_counts())
# 3. '확진일자' 컬럼 추가 => 2020_10_11 날짜형식
# 기존의 '확진일' 컬럼값은 문자이기 때문에 날짜로 변경해야 된다.
'''
1) 10.11 --> 10-11 변경
2) 10-11 --> 2020-10-11 로 변경
3) 2020-10-11 문자열 ---- > 2020-10-11 날짜로 변경 (pd.to_datetime 함수 )
4) df["확진일자"] = 날짜
'''
df["확진일자"] = pd.to_datetime("2020-"+df["확진일"].str.replace(".", "-"))
print("3. '확진일자' 컬럼 추가: \n", df.head())
# 4. '확진일자' 날짜 데이터 컬럼 이용하여 '월' 컬럼 추가
df["월"] = df["확진일자"].dt.month
print("4. '월' 컬럼 추가: \n", df.head())
# 5. '확진일자' 날짜 데이터 컬럼 이용하여 '주(week)' 컬럼 추가
# 해당년도의 몇번째 주(week)인지 반환
df["주"] = df["확진일자"].dt.isocalendar().week
print("5. '주' 컬럼 추가: \n", df.head())
# 6. '확진일자' 날짜 데이터 컬럼 이용하여 '월-일' 컬럼 추가
# m = df["확진일자"].dt.month
# d = df["확진일자"].dt.day
# df["월-일"] = m.astype(str) + "-" + d.astype(str)
df["월-일"] = df["확진일자"].astype(str).map(lambda x:x[-5:]) # map함수는 데이터가공시 사용
print("6. '월-일' 컬럼 추가: \n", df.head())
print("6. '월-일' 컬럼 추가: \n", df.tail())
########################################################################
# 26. 여행력
print(df["여행력"])
print(df["여행력"].unique())
print(df["여행력"].value_counts())
'''
1. '-' ==> NaN 처리
==> "-"을 np.nan 으로 변경 처리
2. 공통명으로 변경
'아랍에미리트', 'UAE' ===> 아랍에미리트
'중국 청도','우한교민','우한 교민', '중국 우한시', '중국' ==> 중국
'프랑스, 스페인','스페인, 프랑스' ==> 프랑스, 스페인
체코,헝가리,오스트리아,이탈리아,프랑스,모로코,독일,스페인,영국,폴란드,터키,아일랜드 ==>유럽
브라질,아르헨티아,칠레,볼리비아, 멕시코, 페루 => 남미
'''
## 공통명으로 변경하고 시각화
df["해외"]=df["여행력"]
print(df["해외"].str.contains('아랍에미리트|UAE'))
df.loc[df["해외"].str.contains('아랍에미리트|UAE'), "해외"] = "아랍에미리트"
df.loc[df["해외"].str.contains('우한|중국'), "해외"] = "중국"
df.loc[df["해외"].
str.contains('체코|헝가리|오스트리아|이탈리아|프랑스|모로코|독일,스페인|영국\폴란드|터키|아일랜드'),
"해외"] = "유럽"
df.loc[df["해외"].str.contains('브라질|아르헨티아|칠레|볼리비아|멕시코|페루'), "해외"] = "남미"
## "-"을 np.nan 으로 변경 처리
df["해외"]=df["해외"].replace("-", np.nan)
print(df["해외"].unique())
print(df["해외"].value_counts())
# 상위 15개만 시각화
import matplotlib.pyplot as plt
plt.rc("font", family="Malgun Gothic") # 한글 처리
# plt.rc("figure", titlesize=4) # title 크기
plt.rc("ytick", labelsize=8) # y축 라벨 크기
plt.rc("xtick", labelsize=8) # x축 라벨 크기
plt.style.use("fivethirtyeight")
g = df["해외"].value_counts().head(15).sort_values().plot.barh(title="xxxx", figsize=(16,4))
plt.show()
| [
"justin6130@gmail.com"
] | justin6130@gmail.com |
7d3286a307ecea8c8c07ea2d6f62606007ac26d0 | 1dd57fce35b0dee6d12a0a16473c1d7ff5df408d | /Assignment.8.5.py | c96d8ececa8738de6fcc0c797e365bab51a08b16 | [] | no_license | JohnyWilson/ChuckPython | 4b2770796a0466366d4c0b2414d6f2ca419bcfb7 | 6e4d2b446ce7f9305af202005aa2b1ece16dd44d | refs/heads/master | 2021-01-10T16:36:47.459263 | 2015-12-15T05:21:21 | 2015-12-15T05:21:21 | 47,785,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | # 8.5 Open the file mbox-short.txt and read it line by line. When you find a line that starts with 'From ' like the following line:
##### From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008
# You will parse the From line using split() and print out the second word in the line (i.e. the entire address of the person who sent the message).
# Then print out a count at the end.
# Hint: make sure not to include the lines that start with 'From:'.
# You can download the sample data at http://www.pythonlearn.com/code/mbox-short.txt
fname = "Data/mbox-short.txt"
if len(fname) < 1: fname = "Data/mbox-short.txt" # raw_input("Enter file name: ")
fh = open(fname)
count = 0
for line in fh:
if line.startswith("From:"): continue
if line.startswith("From"):
print line.rstrip().split()[1]
count+=1
print "There were", count, "lines in the file with From as the first word" | [
"imemyself9163@gmail.com"
] | imemyself9163@gmail.com |
218da19b57c5712555289e34068f1467c2a0dd69 | 16047f965a69893a8cd2c8d18fbd7b9c86a07eb3 | /src/networkx/algorithms/tree/mst.py | b7b8c1d9326cb3ad5fb75cc1770917b253928b1e | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | guctum/aws-kube-codesuite | 9ce2cc02fe5fa15c2e175fb697138014fb162f1e | 5d62beaadc13bec745ac7d2fc18f07805e91cef3 | refs/heads/master | 2021-05-24T10:08:00.651840 | 2020-04-23T20:21:46 | 2020-04-23T20:21:46 | 253,511,083 | 0 | 0 | Apache-2.0 | 2020-04-06T13:48:14 | 2020-04-06T13:48:13 | null | UTF-8 | Python | false | false | 21,167 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2017 NetworkX Developers
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# Loïc Séguin-C. <loicseguin@gmail.com>
# All rights reserved.
# BSD license.
"""
Algorithms for calculating min/max spanning trees/forests.
"""
from heapq import heappop, heappush
from operator import itemgetter
from itertools import count
from math import isnan
import networkx as nx
from networkx.utils import UnionFind, not_implemented_for
__all__ = [
'minimum_spanning_edges', 'maximum_spanning_edges',
'minimum_spanning_tree', 'maximum_spanning_tree',
]
@not_implemented_for('multigraph')
def boruvka_mst_edges(G, minimum=True, weight='weight',
keys=False, data=True, ignore_nan=False):
"""Iterate over edges of a Borůvka's algorithm min/max spanning tree.
Parameters
----------
G : NetworkX Graph
The edges of `G` must have distinct weights,
otherwise the edges may not form a tree.
minimum : bool (default: True)
Find the minimum (True) or maximum (False) spanning tree.
weight : string (default: 'weight')
The name of the edge attribute holding the edge weights.
keys : bool (default: True)
This argument is ignored since this function is not
implemented for multigraphs; it exists only for consistency
with the other minimum spanning tree functions.
data : bool (default: True)
Flag for whether to yield edge attribute dicts.
If True, yield edges `(u, v, d)`, where `d` is the attribute dict.
If False, yield edges `(u, v)`.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
"""
# Initialize a forest, assuming initially that it is the discrete
# partition of the nodes of the graph.
forest = UnionFind(G)
def best_edge(component):
"""Returns the optimum (minimum or maximum) edge on the edge
boundary of the given set of nodes.
A return value of ``None`` indicates an empty boundary.
"""
sign = 1 if minimum else -1
minwt = float('inf')
boundary = None
for e in nx.edge_boundary(G, component, data=True):
wt = e[-1].get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % (e,))
if wt < minwt:
minwt = wt
boundary = e
return boundary
# Determine the optimum edge in the edge boundary of each component
# in the forest.
best_edges = (best_edge(component) for component in forest.to_sets())
best_edges = [edge for edge in best_edges if edge is not None]
# If each entry was ``None``, that means the graph was disconnected,
# so we are done generating the forest.
while best_edges:
# Determine the optimum edge in the edge boundary of each
# component in the forest.
#
# This must be a sequence, not an iterator. In this list, the
# same edge may appear twice, in different orientations (but
# that's okay, since a union operation will be called on the
# endpoints the first time it is seen, but not the second time).
#
# Any ``None`` indicates that the edge boundary for that
# component was empty, so that part of the forest has been
# completed.
#
# TODO This can be parallelized, both in the outer loop over
# each component in the forest and in the computation of the
# minimum. (Same goes for the identical lines outside the loop.)
best_edges = (best_edge(component) for component in forest.to_sets())
best_edges = [edge for edge in best_edges if edge is not None]
# Join trees in the forest using the best edges, and yield that
# edge, since it is part of the spanning tree.
#
# TODO This loop can be parallelized, to an extent (the union
# operation must be atomic).
for u, v, d in best_edges:
if forest[u] != forest[v]:
if data:
yield u, v, d
else:
yield u, v
forest.union(u, v)
def kruskal_mst_edges(G, minimum, weight='weight',
keys=True, data=True, ignore_nan=False):
"""Iterate over edges of a Kruskal's algorithm min/max spanning tree.
Parameters
----------
G : NetworkX Graph
The graph holding the tree of interest.
minimum : bool (default: True)
Find the minimum (True) or maximum (False) spanning tree.
weight : string (default: 'weight')
The name of the edge attribute holding the edge weights.
keys : bool (default: True)
If `G` is a multigraph, `keys` controls whether edge keys ar yielded.
Otherwise `keys` is ignored.
data : bool (default: True)
Flag for whether to yield edge attribute dicts.
If True, yield edges `(u, v, d)`, where `d` is the attribute dict.
If False, yield edges `(u, v)`.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
"""
subtrees = UnionFind()
if G.is_multigraph():
edges = G.edges(keys=True, data=True)
def filter_nan_edges(edges=edges, weight=weight):
sign = 1 if minimum else -1
for u, v, k, d in edges:
wt = d.get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % ((u, v, f, k, d),))
yield wt, u, v, k, d
else:
edges = G.edges(data=True)
def filter_nan_edges(edges=edges, weight=weight):
sign = 1 if minimum else -1
for u, v, d in edges:
wt = d.get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % ((u, v, d),))
yield wt, u, v, d
edges = sorted(filter_nan_edges(), key=itemgetter(0))
# Multigraphs need to handle edge keys in addition to edge data.
if G.is_multigraph():
for wt, u, v, k, d in edges:
if subtrees[u] != subtrees[v]:
if keys:
if data:
yield u, v, k, d
else:
yield u, v, k
else:
if data:
yield u, v, d
else:
yield u, v
subtrees.union(u, v)
else:
for wt, u, v, d in edges:
if subtrees[u] != subtrees[v]:
if data:
yield (u, v, d)
else:
yield (u, v)
subtrees.union(u, v)
def prim_mst_edges(G, minimum, weight='weight',
keys=True, data=True, ignore_nan=False):
"""Iterate over edges of Prim's algorithm min/max spanning tree.
Parameters
----------
G : NetworkX Graph
The graph holding the tree of interest.
minimum : bool (default: True)
Find the minimum (True) or maximum (False) spanning tree.
weight : string (default: 'weight')
The name of the edge attribute holding the edge weights.
keys : bool (default: True)
If `G` is a multigraph, `keys` controls whether edge keys ar yielded.
Otherwise `keys` is ignored.
data : bool (default: True)
Flag for whether to yield edge attribute dicts.
If True, yield edges `(u, v, d)`, where `d` is the attribute dict.
If False, yield edges `(u, v)`.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
"""
is_multigraph = G.is_multigraph()
push = heappush
pop = heappop
nodes = list(G)
c = count()
sign = 1 if minimum else -1
while nodes:
u = nodes.pop(0)
frontier = []
visited = [u]
if is_multigraph:
for v, keydict in G.adj[u].items():
for k, d in keydict.items():
wt = d.get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % ((u, v, k, d),))
push(frontier, (wt, next(c), u, v, k, d))
else:
for v, d in G.adj[u].items():
wt = d.get(weight, 1) * sign
if isnan(wt):
if ignore_nan:
continue
msg = "NaN found as an edge weight. Edge %s"
raise ValueError(msg % ((u, v, d),))
push(frontier, (wt, next(c), u, v, d))
while frontier:
if is_multigraph:
W, _, u, v, k, d = pop(frontier)
else:
W, _, u, v, d = pop(frontier)
if v in visited:
continue
# Multigraphs need to handle edge keys in addition to edge data.
if is_multigraph and keys:
if data:
yield u, v, k, d
else:
yield u, v, k
else:
if data:
yield u, v, d
else:
yield u, v
# update frontier
visited.append(v)
nodes.remove(v)
if is_multigraph:
for w, keydict in G.adj[v].items():
if w in visited:
continue
for k2, d2 in keydict.items():
new_weight = d2.get(weight, 1) * sign
push(frontier, (new_weight, next(c), v, w, k2, d2))
else:
for w, d2 in G.adj[v].items():
if w in visited:
continue
new_weight = d2.get(weight, 1) * sign
push(frontier, (new_weight, next(c), v, w, d2))
ALGORITHMS = {
'boruvka': boruvka_mst_edges,
u'borůvka': boruvka_mst_edges,
'kruskal': kruskal_mst_edges,
'prim': prim_mst_edges
}
@not_implemented_for('directed')
def minimum_spanning_edges(G, algorithm='kruskal', weight='weight',
keys=True, data=True, ignore_nan=False):
"""Generate edges in a minimum spanning forest of an undirected
weighted graph.
A minimum spanning tree is a subgraph of the graph (a tree)
with the minimum sum of edge weights. A spanning forest is a
union of the spanning trees for each connected component of the graph.
Parameters
----------
G : undirected Graph
An undirected graph. If `G` is connected, then the algorithm finds a
spanning tree. Otherwise, a spanning forest is found.
algorithm : string
The algorithm to use when finding a minimum spanning tree. Valid
choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'.
weight : string
Edge data key to use for weight (default 'weight').
keys : bool
Whether to yield edge key in multigraphs in addition to the edge.
If `G` is not a multigraph, this is ignored.
data : bool, optional
If True yield the edge data along with the edge.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
Returns
-------
edges : iterator
An iterator over edges in a maximum spanning tree of `G`.
Edges connecting nodes `u` and `v` are represented as tuples:
`(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)`
If `G` is a multigraph, `keys` indicates whether the edge key `k` will
be reported in the third position in the edge tuple. `data` indicates
whether the edge datadict `d` will appear at the end of the edge tuple.
If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True
or `(u, v)` if `data` is False.
Examples
--------
>>> from networkx.algorithms import tree
Find minimum spanning edges by Kruskal's algorithm
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> mst = tree.minimum_spanning_edges(G, algorithm='kruskal', data=False)
>>> edgelist = list(mst)
>>> sorted(edgelist)
[(0, 1), (1, 2), (2, 3)]
Find minimum spanning edges by Prim's algorithm
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> mst = tree.minimum_spanning_edges(G, algorithm='prim', data=False)
>>> edgelist = list(mst)
>>> sorted(edgelist)
[(0, 1), (1, 2), (2, 3)]
Notes
-----
For Borůvka's algorithm, each edge must have a weight attribute, and
each edge weight must be distinct.
For the other algorithms, if the graph edges do not have a weight
attribute a default weight of 1 will be used.
Modified code from David Eppstein, April 2006
http://www.ics.uci.edu/~eppstein/PADS/
"""
try:
algo = ALGORITHMS[algorithm]
except KeyError:
msg = '{} is not a valid choice for an algorithm.'.format(algorithm)
raise ValueError(msg)
return algo(G, minimum=True, weight=weight, keys=keys, data=data,
ignore_nan=ignore_nan)
@not_implemented_for('directed')
def maximum_spanning_edges(G, algorithm='kruskal', weight='weight',
keys=True, data=True, ignore_nan=False):
"""Generate edges in a maximum spanning forest of an undirected
weighted graph.
A maximum spanning tree is a subgraph of the graph (a tree)
with the maximum possible sum of edge weights. A spanning forest is a
union of the spanning trees for each connected component of the graph.
Parameters
----------
G : undirected Graph
An undirected graph. If `G` is connected, then the algorithm finds a
spanning tree. Otherwise, a spanning forest is found.
algorithm : string
The algorithm to use when finding a maximum spanning tree. Valid
choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'.
weight : string
Edge data key to use for weight (default 'weight').
keys : bool
Whether to yield edge key in multigraphs in addition to the edge.
If `G` is not a multigraph, this is ignored.
data : bool, optional
If True yield the edge data along with the edge.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
Returns
-------
edges : iterator
An iterator over edges in a maximum spanning tree of `G`.
Edges connecting nodes `u` and `v` are represented as tuples:
`(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)`
If `G` is a multigraph, `keys` indicates whether the edge key `k` will
be reported in the third position in the edge tuple. `data` indicates
whether the edge datadict `d` will appear at the end of the edge tuple.
If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True
or `(u, v)` if `data` is False.
Examples
--------
>>> from networkx.algorithms import tree
Find maximum spanning edges by Kruskal's algorithm
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> mst = tree.maximum_spanning_edges(G, algorithm='kruskal', data=False)
>>> edgelist = list(mst)
>>> sorted(edgelist)
[(0, 1), (0, 3), (1, 2)]
Find maximum spanning edges by Prim's algorithm
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2) # assign weight 2 to edge 0-3
>>> mst = tree.maximum_spanning_edges(G, algorithm='prim', data=False)
>>> edgelist = list(mst)
>>> sorted(edgelist)
[(0, 1), (0, 3), (3, 2)]
Notes
-----
For Borůvka's algorithm, each edge must have a weight attribute, and
each edge weight must be distinct.
For the other algorithms, if the graph edges do not have a weight
attribute a default weight of 1 will be used.
Modified code from David Eppstein, April 2006
http://www.ics.uci.edu/~eppstein/PADS/
"""
try:
algo = ALGORITHMS[algorithm]
except KeyError:
msg = '{} is not a valid choice for an algorithm.'.format(algorithm)
raise ValueError(msg)
return algo(G, minimum=False, weight=weight, keys=keys, data=data,
ignore_nan=ignore_nan)
def minimum_spanning_tree(G, weight='weight', algorithm='kruskal',
ignore_nan=False):
"""Returns a minimum spanning tree or forest on an undirected graph `G`.
Parameters
----------
G : undirected graph
An undirected graph. If `G` is connected, then the algorithm finds a
spanning tree. Otherwise, a spanning forest is found.
weight : str
Data key to use for edge weights.
algorithm : string
The algorithm to use when finding a minimum spanning tree. Valid
choices are 'kruskal', 'prim', or 'boruvka'. The default is
'kruskal'.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
Returns
-------
G : NetworkX Graph
A minimum spanning tree or forest.
Examples
--------
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> T = nx.minimum_spanning_tree(G)
>>> sorted(T.edges(data=True))
[(0, 1, {}), (1, 2, {}), (2, 3, {})]
Notes
-----
For Borůvka's algorithm, each edge must have a weight attribute, and
each edge weight must be distinct.
For the other algorithms, if the graph edges do not have a weight
attribute a default weight of 1 will be used.
There may be more than one tree with the same minimum or maximum weight.
See :mod:`networkx.tree.recognition` for more detailed definitions.
Isolated nodes with self-loops are in the tree as edgeless isolated nodes.
"""
edges = minimum_spanning_edges(G, algorithm, weight, keys=True,
data=True, ignore_nan=ignore_nan)
T = G.fresh_copy() # Same graph class as G
T.graph.update(G.graph)
T.add_nodes_from(G.nodes.items())
T.add_edges_from(edges)
return T
def maximum_spanning_tree(G, weight='weight', algorithm='kruskal',
ignore_nan=False):
"""Returns a maximum spanning tree or forest on an undirected graph `G`.
Parameters
----------
G : undirected graph
An undirected graph. If `G` is connected, then the algorithm finds a
spanning tree. Otherwise, a spanning forest is found.
weight : str
Data key to use for edge weights.
algorithm : string
The algorithm to use when finding a minimum spanning tree. Valid
choices are 'kruskal', 'prim', or 'boruvka'. The default is
'kruskal'.
ignore_nan : bool (default: False)
If a NaN is found as an edge weight normally an exception is raised.
If `ignore_nan is True` then that edge is ignored instead.
Returns
-------
G : NetworkX Graph
A minimum spanning tree or forest.
Examples
--------
>>> G = nx.cycle_graph(4)
>>> G.add_edge(0, 3, weight=2)
>>> T = nx.maximum_spanning_tree(G)
>>> sorted(T.edges(data=True))
[(0, 1, {}), (0, 3, {'weight': 2}), (1, 2, {})]
Notes
-----
For Borůvka's algorithm, each edge must have a weight attribute, and
each edge weight must be distinct.
For the other algorithms, if the graph edges do not have a weight
attribute a default weight of 1 will be used.
There may be more than one tree with the same minimum or maximum weight.
See :mod:`networkx.tree.recognition` for more detailed definitions.
Isolated nodes with self-loops are in the tree as edgeless isolated nodes.
"""
edges = maximum_spanning_edges(G, algorithm, weight, keys=True,
data=True, ignore_nan=ignore_nan)
edges = list(edges)
T = G.fresh_copy() # Same graph class as G
T.graph.update(G.graph)
T.add_nodes_from(G.nodes.items())
T.add_edges_from(edges)
return T
| [
"olari@784f435df7a4.ant.amazon.com"
] | olari@784f435df7a4.ant.amazon.com |
989528ac7820dca22e21aec571ce43ed89e4c1a0 | e3e3071e5f01f75ba3716ac229abef484e8c051a | /mnist.py | f9639c963cb10a4bdcfc9a82659ccfe73a01289c | [] | no_license | schmit/dictlearning | 9efc9e15e73a99f840db71d81925dbe7c0bd22d0 | 14c37631aa4d330d58fc174b2294866e2484d5d0 | refs/heads/master | 2021-01-10T04:06:33.899917 | 2013-03-15T18:40:33 | 2013-03-15T18:40:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,943 | py | import numpy as np
import scipy.io as sio
import dictionary
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from multiOGD import *
from kernels import *
import sys
import argparse
import utility
sys.stdout = utility.Logger()
print 'Starting run of MNIST.py'
parser = argparse.ArgumentParser(description=\
'MNIST: Encode sparse dictionary and fit model')
parser.add_argument('dict_fit',\
help="model for fitting dictionary (linreg, lasso, lars)")
parser.add_argument('dict_init',\
help='initialization of dictionary')
parser.add_argument('dict_atoms',\
help='nr of atoms in dictionary')
parser.add_argument('dict_reg',\
help='regularization in sparse encoding')
parser.add_argument('mod_reg', \
help='regularization svm fit')
params = parser.parse_args(sys.argv[1:])
DICT_FIT = params.dict_fit
DICT_INIT = params.dict_init
DICT_ATOMS = int(params.dict_atoms)
DICT_REG = float(params.dict_reg)
MOD_REG = float(params.mod_reg)
print params
def showimage(x):
img = np.reshape(x, (28, 28), order = 'F')
imgplot = plt.imshow(img)
plt.show()
mnist_train = sio.loadmat('./data/mnist/MNIST_train.mat')
mnist_test = sio.loadmat('./data/mnist/MNIST_test.mat')
X_train = mnist_train['X'][0][0][2].transpose()
y_train = mnist_train['y']
X_test = mnist_test['Xtest'].transpose()
y_test = mnist_test['ytest']
dim = X_train.shape[1]
## Dictionary
lasso_d = dictionary.Dictionary(dim, DICT_ATOMS, DICT_FIT, DICT_REG, \
DICT_INIT)
lasso_d.batchtrain(X_train
# Save dictionary atoms as images
#lasso_d.dimagesave((28, 28), 'mnist')
# Find reconstructions
alphas_train = lasso_d.batchreconstruction(X_train, \
'mnist_train_s')
alphas_test = lasso_d.batchreconstruction(X_test, \
'mnist_test_s')
## Classification
ogd_m = multiOGD(10, DICT_ATOMS, MOD_REG)
ogd_m.train(alphas_train, y_train)
ogd_m.predict(alphas_test, y_test)
print 'Run of MNIST.py is complete!'
'''
Atoms: 200
Reg: 0.05 too much
'''
| [
"schmit@stanford.edu"
] | schmit@stanford.edu |
c271649567b1d4fcc6318325d96d7492f682fd23 | 0837592e2900db9ec8cf7fdb6fcce3053af7aeae | /New Main.py | 85b701d2fb9156512fd98900c6da509d5fde2472 | [] | no_license | AbdullahAjiPratama/GoldPrice-Prediction | 5e3ce396aa82e0c223e11b6ae551c69831b72ed6 | 46807984b540104645202402458ebbe98d21bdd0 | refs/heads/master | 2020-08-10T17:30:38.877698 | 2017-02-22T08:06:18 | 2017-02-22T08:06:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | #Import semua prosedur dari file Datahandler.py
import Datahandler as dh
import ann2 as ann
import numpy as np
#BAGIAN INI DIKHUSUSKAN UNTUK TRAINING DATA
#CHANGEABLE VARIABLE : File DATA TRAINING
fileTrain = 'DataTrainSMA.xlsx'
data, target = dh.generateToSeries(fileTrain, 3)
#CHANGEABLE VARIABLE : HIDDEN NODE
num_hidden = 5
# print data[1]
#MERUBAH DATA MENJADI MATRIX
data = np.array(data)
target = np.array(target)
#Modul TRAINING
#CHANGEABLE VARIABLE : EPOCH dan LEARNING RATE
mape, model = ann.train(data, num_hidden, target, epoch=1000, lr=0.1)
y = ann.test(data,model)
print y
mape = np.mean(np.abs(target-y))
print mape, (1-mape)*100
print 'test'
#BAGIAN INI DIKHUSUSKAN UNTUK TESTING DATA
#CHANGEABLE VARIABLE : File DATA TESTING
fileTest = 'DataTestSMA.xlsx'
data, target = dh.generateToSeries(fileTest, 3)
data = np.array(data)
target = np.array(target)
y = ann.test(data,model)
mape = np.mean(np.abs(target-y))
print mape, (1-mape)*100 | [
"noreply@github.com"
] | noreply@github.com |
24ebdd333e00edb3f74ccd4677e9ab43d5c096e3 | c03d7a4e03c581d4be98b6363003cddb9c213ec0 | /pets/migrations/0007_auto_20180910_0016.py | 6228879999e3df790cc687d09ad854b059402325 | [] | no_license | hernandavidc/plataforma | b333e4f06290713072d8dc609c27d4ce8af1d9df | 4316e2a59db76e74f1e6106958631ad4a7a653c7 | refs/heads/master | 2020-04-06T17:08:21.019355 | 2019-04-09T04:41:00 | 2019-04-09T04:41:00 | 157,648,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | # Generated by Django 2.1 on 2018-09-10 05:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pets', '0006_auto_20180910_0011'),
]
operations = [
migrations.RemoveField(
model_name='mascota',
name='dueno',
),
migrations.AddField(
model_name='mascota',
name='dueno',
field=models.ForeignKey(default=3, on_delete=django.db.models.deletion.PROTECT, related_name='get_pets', to=settings.AUTH_USER_MODEL, verbose_name='Dueños'),
preserve_default=False,
),
]
| [
"hernandavidc@hotmail.com"
] | hernandavidc@hotmail.com |
a3eefa3f23a8dfe00c158170d73f421c29d1e373 | c79737296bdf4b3a969ab5ceb69198daf66def0e | /python/solutii/bogdan_iacoboae/caesar/caesar.py | 315bde89ddbea8afd9d78e0152861ba4b9c51fa0 | [
"MIT"
] | permissive | ilieandrei98/labs | 96c749072b6455b34dc5f0bd3bb20f7a0e95b706 | cda09cbf5352e88909f51546c2eb360e1ff2bec1 | refs/heads/master | 2020-04-26T03:23:48.220151 | 2019-03-01T08:56:43 | 2019-03-01T08:56:43 | 173,265,757 | 0 | 0 | MIT | 2019-03-01T08:37:14 | 2019-03-01T08:37:14 | null | UTF-8 | Python | false | false | 1,939 | py | # coding=utf-8
# from __future__ import print_function
"""Împăratul a primit serie de mesaje importante pe care este
important să le descifreze cât mai repede.
Din păcate mesagerul nu a apucat să îi spună împăratul care au fost
cheile alese pentru fiecare mesaj și tu ai fost ales să descifrezi
misterul.
Informații:
În criptografie, cifrul lui Caesar este o metodă simplă de a cripta
un mesaj prin înlocuirea fiecărei litere cu litera de pe poziția aflată
la un n pași de ea în alfabet (unde este n este un număr întreg cunoscut
"""
def afla_pasul(mesaj):
""" Afla pasul encodarii """
first_letter = 'a'
my_letter = mesaj[0]
return ord(my_letter) - ord(first_letter)
def real_letter(character, key):
""" Afla caracterul """
if character.isalpha():
character = ord(character)-key
if character < ord('a'):
character = ord('z') - abs(ord('a') - character) + 1
return chr(character)
else:
return character
def decripteaza_mesajul(mesaj, fisier):
""" Decriptarea mesajului """
key = afla_pasul(mesaj)
puncte = 0.
for index in mesaj:
if index == ".":
if puncte == 1:
print ".\n"
fisier.write("\n")
else:
puncte = puncte + 1
print ".",
fisier.write(".")
else:
print real_letter(index, key),
fisier.write(real_letter(index, key))
def main():
""" Main function docstring """
try:
fisier = open("../../../date_intrare/mesaje.secret", "r")
towrite = open("../../../date_iesire/mesaje.decodat", "w")
mesaje = fisier.read()
fisier.close()
except IOError:
print "Nu am putut obtine mesajele."
return
for mesaj in mesaje.splitlines():
decripteaza_mesajul(mesaj, towrite)
if __name__ == "__main__":
main()
| [
"mmicu@cloudbasesolutions.com"
] | mmicu@cloudbasesolutions.com |
5f3bf0a00a97af0ee8dafb8ced4351ea6a96ee71 | 6373ae8a308d5d8a7a100a36933b2e7de22638cf | /ldpc_jossy/py/ldpc.py | 7841745ac8da20912be0cd1097fb01eb0a6de236 | [] | no_license | appleginny/Test_LDPC | 5c0b79eeb0b61180cc0a7d833f5c1b95529fb46c | a39a3f92dc836eaff6cc29459e753e1c23ae52e1 | refs/heads/main | 2023-05-08T03:23:30.124992 | 2021-05-26T08:46:05 | 2021-05-26T08:46:05 | 370,964,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,147 | py | from types import DynamicClassAttribute
import numpy as nphttps://github.com/appleginny/Test_LDPC/blob/main/ldpc_jossy/py/ldpc.py
import ctypes as ct
class code:
def __init__(self, standard = '802.11n', rate = '1/2', z=27, ptype='A'):
self.standard = standard
self.rate = rate
self.z = z
self.ptype = ptype
self.proto = self.assign_proto()
vdeg, cdeg, intrlv = self.prepare_decoder()
self.vdeg = vdeg
self.cdeg = cdeg
self.intrlv = intrlv
self.Nv = len(vdeg)
self.Nc = len(cdeg)
self.Nmsg = len(intrlv)
self.N = self.Nv
self.K = self.Nv - self.Nc
return
def assign_proto(self):
""" Generates arrays to enable the construction of
IEEE standard-compliant LDPC codes
Parameters
----------
standard: string
Specifies the IEEE standard used, 802.11n or 802.16
rate: string
Specifies the code rate, 1/2, 2/3, 3/4 or 5/6
z: int
Optional parameter (not needed for for 802.16, required for 802.11n)
Specifies the protograph expansion factor, freely chooseable >= 3 for
IEEE 802.16, restricted to (27, 54, 81) for IEEE 802.11n
ptype: character
Optional parameter.
Either A or B for 802.16 rates 2/3 and 3/4 where two options are
specified in the standard. Parameter unused for all other codes.
Returns
-------
np.ndarray
Protograph for an LDPC parity-check matrix
"""
standard = self.standard
rate = self.rate
z = self.z
ptype = self.ptype
if standard == "802.16":
# N = z*24
if rate == '1/2':
proto = np.array([
[-1, 94, 73, -1, -1, -1, -1, -1, 55, 83, -1, -1, 7, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 27, -1, -1, -1, 22, 79, 9, -1, -1, -1, 12, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, 24, 22, 81, -1, 33, -1, -1, -1, 0, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1],
[61, -1, 47, -1, -1, -1, -1, -1, 65, 25, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, 39, -1, -1, -1, 84, -1, -1, 41, 72, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, 46, 40, -1, 82, -1, -1, -1, 79, 0, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1],
[-1, -1, 95, 53, -1, -1, -1, -1, -1, 14, 18, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1],
[-1, 11, 73, -1, -1, -1, 2, -1, -1, 47, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1],
[12, -1, -1, -1, 83, 24, -1, 43, -1, -1, -1, 51, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1],
[-1, -1, -1, -1, -1, 94, -1, 59, -1, -1, 70, 72, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1],
[-1, -1, 7, 65, -1, -1, -1, -1, 39, 49, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0],
[43, -1, -1, -1, -1, 66, -1, 41, -1, -1, -1, 26, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0]
])
elif rate == '2/3':
if ptype == 'A':
proto = np.array([
[3, 0, -1, -1, 2, 0, -1, 3, 7, -1, 1, 1, -1, -1, -1, -1, 1, 0, -1, -1, -1, -1, -1, -1],
[-1, -1, 1, -1, 36, -1, -1, 34, 10, -1, -1, 18, 2, -1, 3, 0, -1, 0, 0, -1, -1, -1, -1, -1],
[-1, -1, 12, 2, -1, 15, -1, 40, -1, 3, -1, 15, -1, 2, 13, -1, -1, -1, 0, 0, -1, -1, -1, -1],
[-1, -1, 19, 24, -1, 3, 0, -1, 6, -1, 17, -1, -1, -1, 8, 39, -1, -1, -1, 0, 0, -1, -1, -1],
[20, -1, 6, -1, -1, 10, 29, -1, -1, 28, -1, 14, -1, 38, -1, -1, 0, -1, -1, -1, 0, 0, -1, -1],
[-1, -1, 10, -1, 28, 20, -1, -1, 8, -1, 36, -1, 9, -1, 21, 45, -1, -1, -1, -1, -1, 0, 0, -1],
[35, 25, -1, 37, -1, 21, -1, -1, 5, -1, -1, 0, -1, 4, 20, -1, -1, -1, -1, -1, -1, -1, 0, 0],
[-1, 6, 6, -1, -1, -1, 4, -1, 14, 30, -1, 3, 36, -1, 14, -1, 1, -1, -1, -1, -1, -1, -1, 0]
])
elif ptype == 'B':
proto = np.array([
[2, -1, 19, -1, 47, -1, 48, -1, 36, -1, 82, -1, 47, -1, 15, -1, 95, 0, -1, -1, -1, -1, -1, -1],
[-1, 69, -1, 88, -1, 33, -1, 3, -1, 16, -1, 37, -1, 40, -1, 48, -1, 0, 0, -1, -1, -1, -1, -1],
[10, -1, 86, -1, 62, -1, 28, -1, 85, -1, 16, -1, 34, -1, 73, -1, -1, -1, 0, 0, -1, -1, -1, -1],
[-1, 28, -1, 32, -1, 81, -1, 27, -1, 88, -1, 5, -1, 56, -1, 37, -1, -1, -1, 0, 0, -1, -1, -1],
[23, -1, 29, -1, 15, -1, 30, -1, 66, -1, 24, -1, 50, -1, 62, -1, -1, -1, -1, -1, 0, 0, -1, -1],
[-1, 30, -1, 65, -1, 54, -1, 14, -1, 0, -1, 30, -1, 74, -1, 0, -1, -1, -1, -1, -1, 0, 0, -1],
[32, -1, 0, -1, 15, -1, 56, -1, 85, -1, 5, -1, 6, -1, 52, -1, 0, -1, -1, -1, -1, -1, 0, 0],
[-1, 0, -1, 47, -1, 13, -1, 61, -1, 84, -1, 55, -1, 78, -1, 41, 95, -1, -1, -1, -1, -1, -1, 0]
])
else:
raise NameError('802.16 type must be either A or B')
elif rate == '3/4':
if ptype == 'A':
proto = np.array([
[6, 38, 3, 93, -1, -1, -1, 30, 70, -1, 86, -1, 37, 38, 4, 11, -1, 46, 48, 0, -1, -1, -1, -1],
[62, 94, 19, 84, -1, 92, 78, -1, 15, -1, -1, 92, -1, 45, 24, 32, 30, -1, -1, 0, 0, -1, -1, -1],
[71, -1, 55, -1, 12, 66, 45, 79, -1, 78, -1, -1, 10, -1, 22, 55, 70, 82, -1, -1, 0, 0, -1, -1],
[38, 61, -1, 66, 9, 73, 47, 64, -1, 39, 61, 43, -1, -1, -1, -1, 95, 32, 0, -1, -1, 0, 0, -1],
[-1, -1, -1, -1, 32, 52, 55, 80, 95, 22, 6, 51, 24, 90, 44, 20, -1, -1, -1, -1, -1, -1, 0, 0],
[-1, 63, 31, 88, 20, -1, -1, -1, 6, 40, 56, 16, 71, 53, -1, -1, 27, 26, 48, -1, -1, -1, -1, 0]
])
elif ptype == 'B':
proto = np.array([
[-1, 81, -1, 28, -1, -1, 14, 25, 17, -1, -1, 85, 29, 52, 78, 95, 22, 92, 0, 0, -1, -1, -1, -1],
[42, -1, 14, 68, 32, -1, -1, -1, -1, 70, 43, 11, 36, 40, 33, 57, 38, 24, -1, 0, 0, -1, -1, -1],
[-1, -1, 20, -1, -1, 63, 39, -1, 70, 67, -1, 38, 4, 72, 47, 29, 60, 5, 80, -1, 0, 0, -1, -1],
[64, 2, -1, -1, 63, -1, -1, 3, 51, -1, 81, 15, 94, 9, 85, 36, 14, 19, -1, -1, -1, 0, 0, -1],
[-1, 53, 60, 80, -1, 26, 75, -1, -1, -1, -1, 86, 77, 1, 3, 72, 60, 25, -1, -1, -1, -1, 0, 0],
[77, -1, -1, -1, 15, 28, -1, 35, -1, 72, 30, 68, 85, 84, 26, 64, 11, 89, 0, -1, -1, -1, -1, 0]
])
else:
raise NameError('802.16 type must be either A or B')
elif rate == '5/6':
proto = np.array([
[1, 25, 55, -1, 47, 4, -1, 91, 84, 8, 86, 52, 82, 33, 5, 0, 36, 20, 4, 77, 80, 0, -1, -1],
[-1, 6, -1, 36, 40, 47, 12, 79, 47, -1, 41, 21, 12, 71, 14, 72, 0, 44, 49, 0, 0, 0, 0, -1],
[51, 81, 83, 4, 67, -1, 21, -1, 31, 24, 91, 61, 81, 9, 86, 78, 60, 88, 67, 15, -1, -1, 0, 0],
[50, -1, 50, 15, -1, 36, 13, 10, 11, 20, 53, 90, 29, 92, 57, 30, 84, 92, 11, 66, 80, -1, -1, 0]
])
else:
raise NameError('802.16 invalid rate')
elif standard == "802.11n":
if z == 27:
# N = 648
if rate == '1/2':
proto = np.array([
[0, -1, -1, -1, 0, 0, -1, -1, 0, -1, -1, 0, 1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[22, 0, -1, -1, 17, -1, 0, 0, 12, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[6, -1, 0, -1, 10, -1, -1, -1, 24, -1, 0, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1],
[2, -1, -1, 0, 20, -1, -1, -1, 25, 0, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1],
[23, -1, -1, -1, 3, -1, -1, -1, 0, -1, 9, 11, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1],
[24, -1, 23, 1, 17, -1, 3, -1, 10, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1],
[25, -1, -1, -1, 8, -1, -1, -1, 7, 18, -1, -1, 0, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1],
[13, 24, -1, -1, 0, -1, 8, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1],
[7, 20, -1, 16, 22, 10, -1, -1, 23, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1],
[11, -1, -1, -1, 19, -1, -1, -1, 13, -1, 3, 17, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1],
[25, -1, 8, -1, 23, 18, -1, 14, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0],
[3, -1, -1, -1, 16, -1, -1, 2, 25, 5, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0]
])
elif rate == '2/3':
proto = np.array([
[25, 26, 14, -1, 20, -1, 2, -1, 4, -1, -1, 8, -1, 16, -1, 18, 1, 0, -1, -1, -1, -1, -1, -1],
[10, 9, 15, 11, -1, 0, -1, 1, -1, -1, 18, -1, 8, -1, 10, -1, -1, 0, 0, -1, -1, -1, -1, -1],
[16, 2, 20, 26, 21, -1, 6, -1, 1, 26, -1, 7, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1],
[10, 13, 5, 0, -1, 3, -1, 7, -1, -1, 26, -1, -1, 13, -1, 16, -1, -1, -1, 0, 0, -1, -1, -1],
[23, 14, 24, -1, 12, -1, 19, -1, 17, -1, -1, -1, 20, -1, 21, -1, 0, -1, -1, -1, 0, 0, -1, -1],
[6, 22, 9, 20, -1, 25, -1, 17, -1, 8, -1, 14, -1, 18, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1],
[14, 23, 21, 11, 20, -1, 24, -1, 18, -1, 19, -1, -1, -1, -1, 22, -1, -1, -1, -1, -1, -1, 0, 0],
[17, 11, 11, 20, -1, 21, -1, 26, -1, 3, -1, -1, 18, -1, 26, -1, 1, -1, -1, -1, -1, -1, -1, 0]
])
elif rate == '3/4':
proto = np.array([
[16, 17, 22, 24, 9, 3, 14, -1, 4, 2, 7, -1, 26, -1, 2, -1, 21, -1, 1, 0, -1, -1, -1, -1],
[25, 12, 12, 3, 3, 26, 6, 21, -1, 15, 22, -1, 15, -1, 4, -1, -1, 16, -1, 0, 0, -1, -1, -1],
[25, 18, 26, 16, 22, 23, 9, -1, 0, -1, 4, -1, 4, -1, 8, 23, 11, -1, -1, -1, 0, 0, -1, -1],
[9, 7, 0, 1, 17, -1, -1, 7, 3, -1, 3, 23, -1, 16, -1, -1, 21, -1, 0, -1, -1, 0, 0, -1],
[24, 5, 26, 7, 1, -1, -1, 15, 24, 15, -1, 8, -1, 13, -1, 13, -1, 11, -1, -1, -1, -1, 0, 0],
[2, 2, 19, 14, 24, 1, 15, 19, -1, 21, -1, 2, -1, 24, -1, 3, -1, 2, 1, -1, -1, -1, -1, 0]
])
elif rate == '5/6':
proto = np.array([
[17, 13, 8, 21, 9, 3, 18, 12, 10, 0, 4, 15, 19, 2, 5, 10, 26, 19, 13, 13, 1, 0, -1, -1],
[3, 12, 11, 14, 11, 25, 5, 18, 0, 9, 2, 26, 26, 10, 24, 7, 14, 20, 4, 2, -1, 0, 0, -1],
[22, 16, 4, 3, 10, 21, 12, 5, 21, 14, 19, 5, -1, 8, 5, 18, 11, 5, 5, 15, 0, -1, 0, 0],
[7, 7, 14, 14, 4, 16, 16, 24, 24, 10, 1, 7, 15, 6, 10, 26, 8, 18, 21, 14, 1, -1, -1, 0]
])
else:
raise NameError('802.11n invalid rate')
elif z == 54:
# N = 1296
if rate == '1/2':
proto = np.array([
[40, -1, -1, -1, 22, -1, 49, 23, 43, -1, -1, -1, 1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[50, 1, -1, -1, 48, 35, -1, -1, 13, -1, 30, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[39, 50, -1, -1, 4, -1, 2, -1, -1, -1, -1, 49, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1],
[33, -1, -1, 38, 37, -1, -1, 4, 1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1],
[45, -1, -1, -1, 0, 22, -1, -1, 20, 42, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1],
[51, -1, -1, 48, 35, -1, -1, -1, 44, -1, 18, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1],
[47, 11, -1, -1, -1, 17, -1, -1, 51, -1, -1, -1, 0, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1],
[5, -1, 25, -1, 6, -1, 45, -1, 13, 40, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1],
[33, -1, -1, 34, 24, -1, -1, -1, 23, -1, -1, 46, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1],
[1, -1, 27, -1, 1, -1, -1, -1, 38, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1],
[-1, 18, -1, -1, 23, -1, -1, 8, 0, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0],
[49, -1, 17, -1, 30, -1, -1, -1, 34, -1, -1, 19, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0]
])
elif rate == '2/3':
proto = np.array([
[39, 31, 22, 43, -1, 40, 4, -1, 11, -1, -1, 50, -1, -1, -1, 6, 1, 0, -1, -1, -1, -1, -1, -1],
[25, 52, 41, 2, 6, -1, 14, -1, 34, -1, -1, -1, 24, -1, 37, -1, -1, 0, 0, -1, -1, -1, -1, -1],
[43, 31, 29, 0, 21, -1, 28, -1, -1, 2, -1, -1, 7, -1, 17, -1, -1, -1, 0, 0, -1, -1, -1, -1],
[20, 33, 48, -1, 4, 13, -1, 26, -1, -1, 22, -1, -1, 46, 42, -1, -1, -1, -1, 0, 0, -1, -1, -1],
[45, 7, 18, 51, 12, 25, -1, -1, -1, 50, -1, -1, 5, -1, -1, -1, 0, -1, -1, -1, 0, 0, -1, -1],
[35, 40, 32, 16, 5, -1, -1, 18, -1, -1, 43, 51, -1, 32, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1],
[9, 24, 13, 22, 28, -1, -1, 37, -1, -1, 25, -1, -1, 52, -1, 13, -1, -1, -1, -1, -1, -1, 0, 0],
[32, 22, 4, 21, 16, -1, -1, -1, 27, 28, -1, 38, -1, -1, -1, 8, 1, -1, -1, -1, -1, -1, -1, 0]
])
elif rate == '3/4':
proto = np.array([
[39, 40, 51, 41, 3, 29, 8, 36, -1, 14, -1, 6, -1, 33, -1, 11, -1, 4, 1, 0, -1, -1, -1, -1],
[48, 21, 47, 9, 48, 35, 51, -1, 38, -1, 28, -1, 34, -1, 50, -1, 50, -1, -1, 0, 0, -1, -1, -1],
[30, 39, 28, 42, 50, 39, 5, 17, -1, 6, -1, 18, -1, 20, -1, 15, -1, 40, -1, -1, 0, 0, -1, -1],
[29, 0, 1, 43, 36, 30, 47, -1, 49, -1, 47, -1, 3, -1, 35, -1, 34, -1, 0, -1, -1, 0, 0, -1],
[1, 32, 11, 23, 10, 44, 12, 7, -1, 48, -1, 4, -1, 9, -1, 17, -1, 16, -1, -1, -1, -1, 0, 0],
[13, 7, 15, 47, 23, 16, 47, -1, 43, -1, 29, -1, 52, -1, 2, -1, 53, -1, 1, -1, -1, -1, -1, 0]
])
elif rate == '5/6':
proto = np.array([
[48, 29, 37, 52, 2, 16, 6, 14, 53, 31, 34, 5, 18, 42, 53, 31, 45, -1, 46, 52, 1, 0, -1, -1],
[17, 4, 30, 7, 43, 11, 24, 6, 14, 21, 6, 39, 17, 40, 47, 7, 15, 41, 19, -1, -1, 0, 0, -1],
[7, 2, 51, 31, 46, 23, 16, 11, 53, 40, 10, 7, 46, 53, 33, 35, -1, 25, 35, 38, 0, -1, 0, 0],
[19, 48, 41, 1, 10, 7, 36, 47, 5, 29, 52, 52, 31, 10, 26, 6, 3, 2, -1, 51, 1, -1, -1, 0]
])
else:
raise NameError('802.11n invalid rate')
elif z == 81:
# N = 1944
if rate == '1/2':
proto = np.array([
[57, -1, -1, -1, 50, -1, 11, -1, 50, -1, 79, -1, 1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, -1, 28, -1, 0, -1, -1, -1, 55, 7, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[30, -1, -1, -1, 24, 37, -1, -1, 56, 14, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1],
[62, 53, -1, -1, 53, -1, -1, 3, 35, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1],
[40, -1, -1, 20, 66, -1, -1, 22, 28, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1],
[0, -1, -1, -1, 8, -1, 42, -1, 50, -1, -1, 8, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1],
[69, 79, 79, -1, -1, -1, 56, -1, 52, -1, -1, -1, 0, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1],
[65, -1, -1, -1, 38, 57, -1, -1, 72, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1],
[64, -1, -1, -1, 14, 52, -1, -1, 30, -1, -1, 32, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1, -1],
[-1, 45, -1, 70, 0, -1, -1, -1, 77, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, -1],
[2, 56, -1, 57, 35, -1, -1, -1, -1, -1, 12, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0],
[24, -1, 61, -1, 60, -1, -1, 27, 51, -1, -1, 16, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0]
])
elif rate == '2/3':
proto = np.array([
[61, 75, 4, 63, 56, -1, -1, -1, -1, -1, -1, 8, -1, 2, 17, 25, 1, 0, -1, -1, -1, -1, -1, -1],
[56, 74, 77, 20, -1, -1, -1, 64, 24, 4, 67, -1, 7, -1, -1, -1, -1, 0, 0, -1, -1, -1, -1, -1],
[28, 21, 68, 10, 7, 14, 65, -1, -1, -1, 23, -1, -1, -1, 75, -1, -1, -1, 0, 0, -1, -1, -1, -1],
[48, 38, 43, 78, 76, -1, -1, -1, -1, 5, 36, -1, 15, 72, -1, -1, -1, -1, -1, 0, 0, -1, -1, -1],
[40, 2, 53, 25, -1, 52, 62, -1, 20, -1, -1, 44, -1, -1, -1, -1, 0, -1, -1, -1, 0, 0, -1, -1],
[69, 23, 64, 10, 22, -1, 21, -1, -1, -1, -1, -1, 68, 23, 29, -1, -1, -1, -1, -1, -1, 0, 0, -1],
[12, 0, 68, 20, 55, 61, -1, 40, -1, -1, -1, 52, -1, -1, -1, 44, -1, -1, -1, -1, -1, -1, 0, 0],
[58, 8, 34, 64, 78, -1, -1, 11, 78, 24, -1, -1, -1, -1, -1, 58, 1, -1, -1, -1, -1, -1, -1, 0]
])
elif rate == '3/4':
proto = np.array([
[48, 29, 28, 39, 9, 61, -1, -1, -1, 63, 45, 80, -1, -1, -1, 37, 32, 22, 1, 0, -1, -1, -1, -1],
[4, 49, 42, 48, 11, 30, -1, -1, -1, 49, 17, 41, 37, 15, -1, 54, -1, -1, -1, 0, 0, -1, -1, -1],
[35, 76, 78, 51, 37, 35, 21, -1, 17, 64, -1, -1, -1, 59, 7, -1, -1, 32, -1, -1, 0, 0, -1, -1],
[9, 65, 44, 9, 54, 56, 73, 34, 42, -1, -1, -1, 35, -1, -1, -1, 46, 39, 0, -1, -1, 0, 0, -1],
[3, 62, 7, 80, 68, 26, -1, 80, 55, -1, 36, -1, 26, -1, 9, -1, 72, -1, -1, -1, -1, -1, 0, 0],
[26, 75, 33, 21, 69, 59, 3, 38, -1, -1, -1, 35, -1, 62, 36, 26, -1, -1, 1, -1, -1, -1, -1, 0]
])
elif rate == '5/6':
proto = np.array([
[13, 48, 80, 66, 4, 74, 7, 30, 76, 52, 37, 60, -1, 49, 73, 31, 74, 73, 23, -1, 1, 0, -1, -1],
[69, 63, 74, 56, 64, 77, 57, 65, 6, 16, 51, -1, 64, -1, 68, 9, 48, 62, 54, 27, -1, 0, 0, -1],
[51, 15, 0, 80, 24, 25, 42, 54, 44, 71, 71, 9, 67, 35, -1, 58, -1, 29, -1, 53, 0, -1, 0, 0],
[16, 29, 36, 41, 44, 56, 59, 37, 50, 24, -1, 65, 4, 65, 52, -1, 4, -1, 73, 52, 1, -1, -1, 0]
])
else:
raise NameError('802.11n invalid rate')
else:
raise NameError('802.11n invalid z (must be 27,54 or 81)')
else:
raise NameError('IEEE standard unknown')
return proto
def pcmat(self):
""" Converts from a protograph to an LDPC parity-check matrix.
This function is not used in the live system but is made available
e.g. if one wants to visualise the actual parity-check matrix.
Returns
-------
np.ndarray
Parity-check matrix for the LDPC code
"""
# traverses protograph row/column-wise and assigns all-zero submatrices
# where the protograph entry is -1, or suitably cyclic-shifted zxz identity
# matrices where the entry is not -1. Note that use of "np.roll" which
# operates a cyclic shift of the columns by proto[row,col]%z, and note
# that the mod z at the end is merely cosmetic since np.roll will
# natively cyclic shift modulo z if asked to roll a matrix by a shift
# that exceeds the matrix dimensions.
proto = self.proto
z = self.z
pcmat = np.zeros((z*len(proto),z*len(proto[0])),dtype=int)
(row,col) = np.nonzero(proto != -1)
for j in range(len(row)):
pcmat[row[j]*z:row[j]*z+z,col[j]*z:col[j]*z+z] = np.roll(np.eye(z),proto[row[j],col[j]]%z,1)
return pcmat
def prepare_decoder(self):
""" Generates the elements required for the LDPC decoder from the
protograph.
Parameters
----------
proto: array
Specifies the protograph for the code.
z: int
Specifies the expansion factor for the protograph
Returns
-------
np.array
vdeg vector of variable node degrees
np.array
cdeg vector of constraint node degrees
np.array
intrlv vector specifies the interleaver between variable node messages
and constraint node messages (from a variable node perspective)
The messages are assumed ordered as required for constraint node processing
(which is the "harder" processing) and must be addressed through this
interleaver when processing variable nodes (which is the "easier" processing)
"""
proto = self.proto
z = self.z
# This method operates by assigning interleaver entries and "flagging" them
# as it traverses the parity-check matrix, so that later visits to the same
# variable (or constraint) node know to move on to the next available message
# connection ("port") in the node.
# Variable node degrees and constraint node degrees are expanded from the
# "degrees" in the protograph by a factor of z. Note that each column in
# the protograph results in z columns of the same degree in the actual code,
# and the same for rows.
cdeg = np.repeat(np.sum(proto != -1, 1), z)
vdeg = np.repeat(np.sum(proto != -1, 0), z)
# Cumulative degrees with a 0 inserted at the start because we need the
# cumulation "up to and NOT including" the degree of the current node,
# whereas numpy's cumsum gives us the degree "up to and including"
# Note that cumvdeg and cumcdeg will be one element too long than
# we need (we will never use the last entry)
cumcdeg = np.insert(np.cumsum(cdeg),0,0)
cumvdeg = np.insert(np.cumsum(vdeg),0,0)
# Initialise the interleaver and a vector of flags telling us which "ports"
# have been used for the constraint nodes, i.e., which messages in each
# constraint nodes have already been assigned. We also need such flags for
# the variable node side, but the interleaver doubles up as a flag since
# we initialised it as -1s, we know that any message that still has a -1
# is an unuseed port for a variable node.
intrlv = -np.ones(np.sum(cdeg),dtype=int)
vflag = np.zeros(np.sum(cdeg),dtype=bool)
# We will traverse the protograph stopping at each sub-graph that doesn't have
# a -1 in the protograph (the -1s in the protograph correspond to an all-zero
# submatrix in the parity-check matrix.
(xp,yp) = np.nonzero(proto != -1)
for j in range(xp.size):
# offset specifies the exponent of the permutation matrix that is inserted
# at this position in the protograph. An offset of 0 means that the matrix
# is an identity matrix, whereas an offset of +1 means that the matrix is a
# "shift one to the right" permutation matrix, etc. Offsets larger than z
# result in shifts modulo z.
offset = proto[xp[j],yp[j]]
for k in range(z):
# Determine the variable node and constraint node index from the index
# of the protograph position and the index k of the row/column within
# the zxz submatrix at this position in the protograph
cind = xp[j]*z+k
vind = yp[j]*z+(k+offset)%z
# Find an unused "port" for the message in the constraint node
for xi in range(cumcdeg[cind],cumcdeg[cind+1]):
if intrlv[xi] == -1:
break
# Error handling if no unused port found, should never occur
if intrlv[xi] != -1:
raise NameError('No unused port found in constraint node')
# Find an unused "port" for the message in the variable node
for yi in range(cumvdeg[vind],cumvdeg[vind+1]):
if vflag[yi] == 0:
break
# Error handling if no unused port found, should never occur
if vflag[yi] != 0:
raise NameError('No unused port found in variable node')
# now assign the interleaver entry and flag the constraint node "port"
vflag[yi] = 1
intrlv[xi] = yi
intrlv = np.argsort(intrlv)
return vdeg, cdeg, intrlv
def encode(self, info):
z = self.z
proto = self.proto
# check dimensions before starting
Np = len(proto[0])
N = Np*z # length of codeword
Mp = len(proto)
Kp = Np - Mp
K = Kp*z # length of information
if len(info) != K:
raise NameError('information word length not compatible with proto and z')
# x is the codeword, composed of K bits information and N-K bits parity
x = np.zeros(N, dtype=int)
x[0:K] = info # pre-fill the first K bits with the information
# for the encoding, we will address x z bits at a time, so we reshape it to
# be Np x z and the rows of x are our new "super-symbols"
x = np.reshape(x,(Np,z))
# the following p will contain sum_k x_k H_jk for each row of the prototype parity
# check matrix, where the sum is only over the systematic (information) part
p = np.zeros((Mp,z), dtype=int)
for j in range(Mp):
ind = np.nonzero(proto[j,0:Kp] != -1)[0]
for k in ind.tolist():
p[j] = np.add(p[j],np.roll(x[k],-proto[j,k]))
p = np.mod(p,2)
tp = np.mod(np.sum(p,0),2) # tp is the sum of the p's
# The sum of all the super parity-check (vector) equations gives an equation that
# has only information symbols and the first parity symbol. Most protographs were
# designed so that the coefficient of the parity symbol in that equation is the
# identity matrix, but there are a few exceptions where the coefficient is not
# an identity. The following few lines compute that coefficient and compute its
# inverse.
toff = np.zeros(z, dtype = int)
ind = np.nonzero(proto[:,Kp] != -1)[0]
for j in ind.tolist():
toff[proto[j,Kp]%z] += 1
toff = np.mod(toff, 2)
tnz = np.nonzero(toff)[0]
# the coefficients in proto in column Kp come in pairs except one coefficient,
# resulting in a single coefficient for the first parity symbol. If this is
# not the case, call an error.
if len(tnz) != 1:
raise NameError('The offsets in colum Kp+1 of proto do not add to a single offset')
toff = tnz[0]
# now compute the first parity symbol as tp times the inverse coefficient
# (which will be an offset by 0 in most cases, when the resulting coefficient is
# an identity matrix)
x[Kp] = np.roll(tp, toff)
# the remaining parity symbols are computed using one parity equation at a time
for j in range(Mp-1):
myk = Kp+j+1 # parity symbol to be computed
x[myk] = p[j] # initialise with value of acumulated systematic part
ind = np.nonzero(proto[j,Kp:myk]!=-1)[0] # search for remaining coefficients
for k in ind.tolist():
x[myk] = np.add(x[myk], np.roll(x[Kp+k], -proto[j,Kp+k]))
x = np.mod(x,2)
return(np.reshape(x,-1))
def decode(self, ch, dectype='sumprod2', corr_factor=0.7):
vdeg = self.vdeg
cdeg = self.cdeg
intrlv = self.intrlv
c_ldpc = ct.CDLL('./bin/c_ldpc.so')
# preliminary consistency checks
if len(ch) != len(vdeg):
raise NameError('Channel inputs not consistent with variable degrees')
# prepare arguments and outputs
Nv = self.Nv
Nc = self.Nc
Nmsg = self.Nmsg
app = np.zeros(Nv, dtype=np.double)
app_p = app.ctypes.data_as(ct.POINTER(ct.c_double))
ch_p = ch.ctypes.data_as(ct.POINTER(ct.c_double))
vdeg_p = self.vdeg.ctypes.data_as(ct.POINTER(ct.c_long))
cdeg_p = self.cdeg.ctypes.data_as(ct.POINTER(ct.c_long))
intrlv_p = self.intrlv.ctypes.data_as(ct.POINTER(ct.c_long))
# call C function for the sum product algorithm
if dectype == 'sumprod':
it = c_ldpc.sumprod(ch_p, vdeg_p, cdeg_p, intrlv_p, Nv, Nc, Nmsg, app_p)
elif dectype == 'sumprod2':
it = c_ldpc.sumprod2(ch_p, vdeg_p, cdeg_p, intrlv_p, Nv, Nc, Nmsg, app_p)
elif dectype == 'minsum':
it = c_ldpc.minsum(ch_p, vdeg_p, cdeg_p, intrlv_p, Nv, Nc, Nmsg, app_p, ct.c_double(corr_factor))
else:
raise NameError('Decoder type unknonwn')
return app, it
def Lxor(self, L1, L2, corrflag=1):
c_ldpc = ct.CDLL('./bin/c_ldpc.so')
c_ldpc.Lxor.restype = ct.c_double
return c_ldpc.Lxor(ct.c_double(L1), ct.c_double(L2), corrflag)
# min rule, first multiply the signs, both -1 or both 1 senarios give 1
# if L1 * L2 >0:
# L = 1
# else:
# L = -1
# L *= min(abs(L1), abs(L2))
# if corrflag:
# L += np.log(1 + np.exp(-abs(L1 + L2)))
# L -= np.log(1 + np.exp(-abs(L1 - L2)))
# return L
def Lxfb(self, L, corrflag=1):
c_ldpc = ct.CDLL('./bin/c_ldpc.so')
dc = len(L)
L = np.array(L, dtype=float)
L_p = L.ctypes.data_as(ct.POINTER(ct.c_double))
c_ldpc.Lxfb.restype = ct.c_double
return c_ldpc.Lxfb(L_p, dc, corrflag), L
# dc = len(L)
# f = []
# b = []
# f[0] = L[0]
# b[dc-1] = L[dc-1]
# for k in range(dc):
# f[k] = self.Lxor(f[k-1], L[k], corrflag)
# b[dc-k-1] = self.Lxor(b[dc-k-1], L[dc-k-1], corrflag)
# L[0] = b[1]
# L[dc-1] = f[dc-2]
# for k_ in range(dc-1):
# L[k] = self.Lxor(f[k-1], b[k+1], corrflag)
# return b[0]
# def sumprod(self, ch, vdeg, cdeg, intrlv, Nv, Nc, Nmsg, app):
# msg = np.zeros(Nmsg)
# # main loop, will iterate until stopping criterion is fulfilled
# for itcount in range(self.MAX_ITCOUNT):
# # variable node rule ('sum')
# imsg = 0
# for j in range(Nv):
# aggr = ch[j]
# for k in range(vdeg[j]):#
# aggr += msg[intrlv[imsg]]
# imsg += 1
# imsg -= vdeg[j]
# for k in range(vdeg[j]):
# msg[intrlv[imsg]] = aggr - msg[intrlv[imsg]]
# imsg += 1
# app[j] = aggr
# stopflag = 0
# # constraint node rule ('product')
# imsg = 0
# for j in range(Nc):
# aggr = 1
# for k in range(cdeg[j]):
# msg[imsg] = np.tanh(msg[imsg]/2)
# aggr *= msg[imsg]
# imsg += 1
# if (stopflag == 0 and 2*np.arctanh(aggr) <= 0):
# stopflag = 1
# imsg -= cdeg[j]
# for k in range(cdeg[j]):
# msg[imsg] = 2 *np.arctanh(aggr/msg[imsg])
# imsg += 1
# if stopflag == False:
# break
# return itcount
# def sumprod2(self, ch, vdeg, cdeg, intrlv, Nv, Nc, Nmsg, app):
# msg = np.zeros(Nmsg)
# for itcount in range(self.MAX_ITCOUNT):
# # Vairable node
# imsg = 0
# for j in range(Nv):
# aggr = ch[j]
# for k in range(vdeg[j]):
# aggr += msg[intrlv[imsg]]
# imsg += 1
# imsg -= vdeg[j]
# for k in range(vdeg[j]):
# msg[intrlv[imsg]] = aggr - msg[intrlv[imsg]]
# imsg += 1
# app[j] = aggr
# stopflag = 0
# # Constraint mode
# imsg = 0
# for j in range(Nc):
# aggr = self.Lxfb(np.asarray[msg[imsg]], cdeg[j], 1)
# if (stopflag == 0 and aggr <=0):
# stopflag = 1
# imsg += cdeg[j]
# if stopflag == False:
# break
# return itcount
# def minsum(self, ch, vdeg, cdeg, intrlv, Nv, Nc, Nmsg, app, corr_factor):
# msg = np.zeros(Nmsg)
# for itcount in range(self.MAX_ITCOUNT):
# # var node
# imsg = 0
# for j in range(Nv):
# aggr = ch[j]
# for k in range(vdeg[j]):
# msg[intrlv[imsg]] = aggr - msg[intrlv[imsg]]
# imsg += 1
# app[j] = aggr
# stopflag = 0
# # constraint node
# imsg = 0
# for j in range(Nc):
# imsg += cdeg[j]
# aggr = self.Lxfb(msg[imsg], cdeg[j], 0)
# if (stopflag == 0 and aggr <=0):
# stopflag = 1
# for k in range(cdeg[j]):
# msg[imsg + k] *= corr_factor
# if stopflag == False:
# break
# return itcount
| [
"noreply@github.com"
] | noreply@github.com |
9cc6631841e0d557fe5f7c550d607b43dd261df1 | 859fcc80ae3fcb062afba484344db4d061fd2bb0 | /baseline/similarity.py | 251517e2387474af47a1a8649da75220f4d3e563 | [] | no_license | theblackcat102/who-wants-to-join | ee0cf4e5f1aa979edfc09651497d042d27b0e9b9 | 3f2e40454423fc96e1d93b54fb46e4ac083fb210 | refs/heads/master | 2022-08-27T05:28:35.137639 | 2020-05-03T06:58:24 | 2020-05-03T06:58:24 | 227,043,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,578 | py | from dataset.aminer import Aminer
from dataset.meetup import Meetup,locations_id
import numpy as np
import pickle
from sklearn.metrics import f1_score
from src.utils import dict2table, np_confusion, str2bool
def similarity_evaluation(dataset, embeddings, user_node_id, top_k=20, user_size = 399212):
name2id = embeddings['name2id']
embedding = embeddings['embedding']
precisions, recalls = [], []
max_node_idx= 0
for data in dataset:
# extract user node with known before
known_user_node_id = (data.x[:, 2] == user_node_id) & (data.x[:, 1] == 1)
known_nodes = [ '{}_{}'.format(int(node[-1]), int(node[0])) for node in data.x[known_user_node_id, :] ]
# known_paper_node_id = (data.x[:, 2] == 1)
# known_nodes += [ '{}_{}'.format(int(node[-1]), int(node[0])) for node in data.x[known_user_node_id, :] ]
# print(known_nodes)
user_embeddings = np.array([ embedding[name2id[nameid]] for nameid in known_nodes if nameid in name2id ])
candidate_user_node_id = (data.x[:, 2] == user_node_id) & (data.x[:, 1] != 1)
target_node_id = [ int(node[0]) for node in data.x[data.y == 1, :]]
max_node_idx = max([ max_node_idx ] + target_node_id)
candidate_node_id = [ int(node[0]) for node in data.x[candidate_user_node_id, :]]
max_node_idx = max([ max_node_idx ] + candidate_node_id)
candidate_nodes = [ '{}_{}'.format(int(node[-1]), int(node[0])) for node in data.x[candidate_user_node_id, :] ]
candidate_nodes = [ '{}_{}'.format(int(node[-1]), int(node[0])) for node in data.x[candidate_user_node_id, :] ]
candidate_user_embeddings = np.array([ embedding[name2id[nameid]] for nameid in candidate_nodes if nameid in name2id ])
if len(user_embeddings) > 0 and len(candidate_user_embeddings) > 0:
norm_embeddings = user_embeddings.sum(0)/len(user_embeddings)
norm_embeddings = np.linalg.norm(norm_embeddings)
candidate_user_embeddings = np.linalg.norm(candidate_user_embeddings, axis=1)
dot_prod = candidate_user_embeddings.dot(norm_embeddings)
rank = [ (candidate_node_id[idx], weight) for idx, weight in enumerate(dot_prod) ]
rank.sort(key=lambda x: x[1])
pred_nodes = [ ]
if len(rank) < top_k:
pred_nodes = [ pair[0] for pair in rank ]
else:
pred_nodes = [ rank[i][0] for i in range(top_k)]
# print(pred_nodes, target_node_id)
y_pred, y_target = np.zeros(user_size), np.zeros(user_size)
y_pred[pred_nodes] = 1.0
y_target[target_node_id] = 1.0
TP, FP, TN, FN = np_confusion(y_pred, y_target)
recall = 0 if (TP+FN) < 1e-5 else TP/(TP+FN)
precision = 0 if (TP+FP) < 1e-5 else TP/(TP+FP)
precisions.append(precision)
recalls.append(recall)
avg_recalls = np.mean(recalls)
avg_precisions = np.mean(precisions)
f1 = 2*(avg_recalls*avg_precisions)/(avg_recalls+avg_precisions)
print(max_node_idx)
print(f1, avg_recalls, avg_precisions)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='CF rank method for group expansion')
parser.add_argument('--top-k', type=int, default=5)
parser.add_argument('--city', type=str, default='SF',
choices=['NY', 'SF'])
parser.add_argument('--dataset', type=str, default='aminer',
choices=['meetup', 'aminer'])
parser.add_argument('--user-node', type=int, default=0,
help='integer which user node id is represented in')
parser.add_argument('--user-size', type=int, default=399212,
help='maximum user node id')
parser.add_argument('--embeddings', type=str,
help='graphvite embedding pickle')
args = parser.parse_args()
if args.dataset == 'aminer':
dataset = Aminer()
else:
dataset = Meetup(city_id=locations_id[args.city])
data_size = len(dataset)
train_split, val, test = int(data_size*0.7), int(data_size*0.1), int(data_size*0.2)
indexes = np.array(list(range(data_size)), dtype=np.long)[train_split+val:]
print(indexes[:10])
val_dataset = dataset[list(indexes)]
with open(args.embeddings, 'rb') as f:
embeddings = pickle.load(f)
similarity_evaluation(val_dataset, embeddings,
user_node_id=args.user_node, user_size=args.user_size,
top_k=args.top_k)
| [
"zhirui09400@gmail.com"
] | zhirui09400@gmail.com |
6ffd1941c7d2a0e2f96614bc4e481f9522b2e3f1 | 53e7a7eaa898ed070e1bcf8ecee6cfe37454ceb4 | /network/tf_net.py | 9d0a7338fe4d3472c847d6af718e36b0a981e93a | [] | no_license | joshualley/VoiceScope | dcf0026068ac5f981ed70f69280f117c124dbee2 | cf6d09aef473bdffdb4e77e53ce2cc25e81b91f2 | refs/heads/master | 2020-03-23T23:30:47.770874 | 2018-07-25T02:55:19 | 2018-07-25T02:55:19 | 142,238,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,393 | py | import time
import numpy as np
import tensorflow as tf
import os
from configuration.config import PATH
from utils.utils import read_fontnames, load_words
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def read_from_npy(batch_size, num=1):
path = PATH.DATASET_DIR+'train_one.npy'
features = np.load(path).astype(np.float32)
labels = np.array(list(range(3557))*num)
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
iterator = dataset.make_one_shot_iterator()
return iterator
def read_record(path, epochs, batch_size):
def paser(record):
features = tf.parse_single_example(
record,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string),
}
)
label = tf.cast(features['label'], tf.float32)
image = tf.decode_raw(features['img_raw'], tf.float32)
return image, label
dataset = tf.data.TFRecordDataset(path)
dataset = dataset.map(paser)
dataset = dataset.shuffle(1000).repeat(epochs).batch(batch_size)
iterator = dataset.make_one_shot_iterator()
return iterator
def W(shape):
init = tf.truncated_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(init)
def B(shape):
init = tf.constant(0.1)
return tf.Variable(init)
def fc_layer(x, ws, bs):
w = W(ws)
b = B(bs)
net = tf.matmul(x, w) + b
return tf.nn.relu(net)
def conv2d_layer(x, ws, bs):
w = W(ws)
b = B(bs)
net = tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME') + b
net = tf.nn.relu(net)
net = tf.nn.max_pool(net, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
return net
def batch_norm_layer(x):
fc_mean, fc_var = tf.nn.moments(x, axes=[0])
scale = tf.Variable(tf.ones(x.shape[-1:]))
shift = tf.Variable(tf.zeros(x.shape[-1:]))
x = tf.nn.batch_normalization(x, fc_mean, fc_var, shift, scale, variance_epsilon=0.001)
return x
def built_net(x):
f_size = 32
k_size = 5
x = tf.reshape(x, (-1,48,48,1))
net = conv2d_layer(x, ws=[k_size,k_size,1,f_size], bs=[f_size])
net = tf.nn.dropout(net, 0.2)
net = conv2d_layer(net, ws=[k_size,k_size,f_size,f_size], bs=[f_size])
net = tf.nn.dropout(net, 0.6)
net = tf.reshape(net,shape=[-1, 12*12*f_size])
net = fc_layer(net, ws=[12*12*f_size, 1024], bs=[1024])
net = batch_norm_layer(net)
net = tf.nn.dropout(net, 0.6)
out = tf.nn.softmax(fc_layer(net, ws=[1024, 3557], bs=[3557]))
return out
def optimizer(y_in, y_out):
y_out = tf.reshape(y_out, (-1, 3557))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_in, logits=y_out))
train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)
correct_prediction = tf.equal(tf.argmax(y_in, 1), tf.argmax(y_out, 1))
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return train_op, loss, acc
import cv2
import matplotlib.pyplot as plt
def main():
words = load_words()
sess = tf.InteractiveSession()
path = PATH.DATASET_DIR+'test7x1x2.record'
#iterator = read_record(path, epochs=10, batch_size=1024)
iterator = read_from_npy(batch_size=1024)
x, y = iterator.get_next()
y = tf.cast(y, tf.int64)
x = tf.reshape(x, (-1, 48,48,1))
y_in = tf.one_hot(y, 3557, dtype=tf.int64)
y_out = built_net(x)
train_op, loss, acc = optimizer(y_in, y_out)
tf.global_variables_initializer().run()
t0 = time.time()
for epoch in range(1, 100):
for i in range(235):
sess.run(train_op)
if i%10 == 0:
x_,y_ = sess.run([y,y_out])
print('in: %s\nout: %s' %(x_[1:10], np.argmax(y_, 1)[1:10]))
losses, accurary = sess.run([loss, acc])
print('[==>] Epoch: %d \tStep: %d \tLoss: %s \tAcc: %s \tTime: %ss'
%(epoch, i, losses, accurary, round(time.time()-t0, 2)))
t0 = time.time()
"""
for k in range(len(x_)):
cv2.imshow('i', np.asarray(x_[k]))
cv2.waitKey(0)
print('key:',words[y_[k]])
sums = []
for m in range(48):
s = 0
for n in range(48):
s += x_[k][m,n]
sums.append(s)
plt.plot(sums)
plt.show()
"""
if __name__ == "__main__":
main()
| [
"ly.silent.valley@gmail.com"
] | ly.silent.valley@gmail.com |
af0460b6768e958b118d4f766a5792cc8bd5d712 | 08a77e9d1b67362430c404d044d1cff831a07dfa | /2021/day-09/solution1.py | 87ef0bce3fce0ad478cecdaf8dcf9bed61b938dd | [
"Unlicense"
] | permissive | TeddyDD/advent-of-code | 439974f31e30fc7ed541f56b2adf8bc365b91c86 | 6d3ba76e7312033a1255225f17a9ef580e066496 | refs/heads/master | 2022-12-11T10:39:40.339521 | 2022-12-06T07:55:53 | 2022-12-06T07:55:53 | 225,712,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | import sys
map = [y for y in sys.stdin]
| [
"vision360.daniel@gmail.com"
] | vision360.daniel@gmail.com |
cea5223ecb0de449aa5326b907fde8616c1df06b | 5d0ec686240860bc3f0c611fee8fe0188536b15c | /plot_utils.py | 4a3819a588036390264f528a21278b825e387f18 | [] | no_license | kareemelbadry/projection_effects | 87212d7212c5b83f446f6be005ffc77e8d7991e1 | 3c8a9f6f1f852dcc4150ff379b3fd60cc40ffe9f | refs/heads/master | 2020-04-03T21:45:39.294743 | 2018-10-31T16:31:42 | 2018-10-31T16:31:42 | 155,580,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | '''
very simple shortcuts for making plots
'''
import matplotlib.pyplot as plt
def new_column(nrow, xlim):
'''
a single column with multiple rows
'''
f, ax = plt.subplots(nrow, 1, figsize = (6, 1+3*nrow))
plt.subplots_adjust(hspace = 0)
for i in range(nrow):
ax[i].set_xlim(xlim)
ax[i].tick_params(labelsize = 18)
if i != nrow - 1:
ax[i].set_xticklabels([])
return f, ax | [
"kareem@Kareems-MacBook-Pro.local"
] | kareem@Kareems-MacBook-Pro.local |
cd010d7bf747349b82d9fd068f3b210bc383404a | d00890e73e5c16b4bab8528fc061d7c54ceeceac | /myproject/__init__.py | 000d0202daa9e5f3cc314bf9414333bebc2f5e0c | [] | no_license | satishkr39/PuppyAdoptionSite | 43d0b23e0b052de69833a3e5f129fac1e1246261 | a07ec09e64cc8f23701a3f7542989f7be050f3da | refs/heads/master | 2023-06-10T19:28:28.154570 | 2021-07-05T10:15:20 | 2021-07-05T10:15:20 | 383,097,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | # __init__.py it is used to set up db and our application
from flask import Flask, render_template, url_for, redirect
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
import os
# setting our flask application
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
Migrate(app, db)
# register bluprint is done once after db is being found by our application.
from myproject.puppies.views import puppies_blueprint
from myproject.owners.views import owners_blueprints
app.register_blueprint(owners_blueprints, url_prefix='/owners') # url_prefix is used in URL bar of browser.
app.register_blueprint(puppies_blueprint, url_prefix='/puppies')
| [
"satishkr639@gmail.com"
] | satishkr639@gmail.com |
7e6aaa5e69e03122dd3e0dec7476a9bc38d155c2 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Difference/trend_MovingMedian/cycle_7/ar_/test_artificial_128_Difference_MovingMedian_7__20.py | 3857b72f05dfa8e9071a105b318bb037455121e2 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 268 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 7, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
54031cba37a673bde336fdea678bd62f203009b0 | ac15c013c340e96240a866fb9f0cdec953227e9d | /Module 1/Unit_1-2/1/2.py | bfa9a5cd5e9cf2f1d1c88a36253516968465f2b3 | [] | no_license | NeroSH/python-beginner | 3c47cd11a4784bf891f40693b4fe21eaf55e6ed6 | 71f9dd350e53f8897e1f8a00228df0604c1968b6 | refs/heads/master | 2022-12-04T19:05:16.509630 | 2020-08-25T10:28:17 | 2020-08-25T10:28:17 | 286,216,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | # Исправь программу
# Сделай так, чтобы программа напечатала "How do you do?"
print(How do you do?) | [
"mustafo.xon@gmail.com"
] | mustafo.xon@gmail.com |
e39687a83d7901840b63d3e947089e5c408f944d | 11137bde91389c04a95df6f6fdaf64f7f49f5f80 | /secondTest/introduction_MIT2/5_1.py | 47272dfbccb9fef4086d3fd1feaa61aff6aa3068 | [] | no_license | starschen/learning | cf3c5a76c867567bce73e9cacb2cf0979ba053d9 | 34decb8f9990117a5f40b8db6dba076a7f115671 | refs/heads/master | 2020-04-06T07:02:56.444233 | 2016-08-24T08:11:49 | 2016-08-24T08:11:49 | 39,417,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #encoding:utf8
def findDivisors(n1,n2):
divisors=()
for i in range(1,min(n1,n2)+1):
if n1%i==0 and n2%i==0:
divisors=divisors+(i,)
return divisors
divisors=findDivisors(20,200)
# print divisors
total=0
for d in divisors:
total+=d
# print total
def findExtremeDivisors(n1,n2):
divisors=()
minVal,maxVal=None,None
for i in range(2,min(n1,n2)+1):
if n1%i==0 and n2%i==0:
if minVal==None or i<minVal:
minVal=i
if maxVal==None or i >maxVal:
maxVal=i
return (minVal,maxVal)
# minVal,maxVal=findExtremeDivisors(100,200)
# print 'minVal=',minVal
# print 'maxVal=',maxVal
print findExtremeDivisors(100,200)
| [
"stars_chenjiao@163.com"
] | stars_chenjiao@163.com |
c3a82f8ae4512e4c66cb1f0c074facd96d2a4bf3 | e89a3a203bd4b433c0f1acc05b2e3c89a7020a9f | /src/robots/descriptions/cheetah_simu/cheetah_sim/ros_package/cheetah_core/src/leg_control/__init__.py | 88699440d88fb79604aa080b53b86fcbe3df9d3b | [] | no_license | JJHbrams/QuadrupedMotionPlanning | 03e5dfdd8410710a26553441aa557e9585b5f10f | cf83de83776b8215b2e94dbc2afa5f59039e6d4d | refs/heads/master | 2023-01-12T11:30:38.202023 | 2020-11-11T10:52:20 | 2020-11-11T10:52:20 | 311,865,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | __all__=["legController"]
| [
"dpswpfrhdid@naver.com"
] | dpswpfrhdid@naver.com |
2f56ea2d6cedf1b7304b6de0a323b6d4b0d96f6b | 382d0c69a9c4f7da95ef057e9babd657fd7963e0 | /dog_classes.py | 1f0f6f9d66b533e64c03373235a1cfc723c456aa | [] | no_license | mewen38/mikes_repo | 9503523481218486f52365f05d7aa9fb3d0263fe | 64e086317ed6663bf470708f3cee9d03777dbb7e | refs/heads/master | 2022-12-04T18:51:46.215587 | 2020-07-29T04:49:04 | 2020-07-29T05:04:05 | 202,954,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,964 | py | # I remember feeling very uncomfortable with classes, so I'm forcing myself to mess around after reading an article
class animal:
def __init__ (self, name, color, type):
self.animal_name = name
self.animal_color = color
self.animal_type = type
def speak(self):
print('happy thoughts, happy thoughts, boy im getting mighty sick of this')
def dance(self):
print('{} does an animal class wiggle'.format(self.animal_name))
class dog(animal):
def __init__ (self, name, color, type, special_dance):
super().__init__ (name, color, type)
self.dog_special_dance = special_dance
def speak(self):
print('woof woof, bow-wow')
def dance(self):
print('{}, as a dog, knows a special overriding animal subclass (dog) {} wiggle'.format(self.animal_name, self.dog_special_dance))
class cat(animal):
def speak(self):
print("the toppings contain potassium benzoate .................. that's bad")
class princess(animal):
def __init__ (self, name, color, type, area_of_expertise):
super().__init__ (name, color, type)
self.princess_area_of_expertise = area_of_expertise
def speak(self):
print("i wrote this program, im a princess, and i love kibble")
bailey = animal('bailey','red', 'red mage dog')
weezer = dog('weezer','grey', 'wizard dog', 'spicy bachata')
zelda = princess('zelda', 'red', 'princess', 'java, python, react, go, npm, pyenv, aws, airflow, sql, redshift, s3, s3 glacier, spark, kibble')
pepper = cat('pepper', 'black', 'cat')
print(bailey,bailey.animal_name, bailey.animal_type)
bailey.speak()
bailey.dance()
print(weezer, weezer.animal_name, weezer.animal_type)
weezer.speak()
weezer.dance()
print(zelda, zelda.animal_name, zelda.animal_color, zelda.animal_type, zelda.princess_area_of_expertise)
zelda.speak()
print(pepper, pepper.animal_name, pepper.animal_color, pepper.animal_type)
pepper.speak() | [
"michael.ewen@compass.com"
] | michael.ewen@compass.com |
49ba3fc51d417b778a7e6f918187d160af935600 | a737f2f6b731afa8c041e8f06322730069193132 | /start_game.py | 692513f10d6eff56ce7e09c65eda2be91eb78e75 | [] | no_license | qinjiang03/PokerAgent | 514c01566d4dfaa1667e3a0f097ed3b9502c8428 | 1d7d50362393d4fd29972a2357fc056fb97524e8 | refs/heads/master | 2020-04-29T02:50:33.850260 | 2019-04-08T11:09:04 | 2019-04-08T11:09:04 | 175,785,512 | 0 | 0 | null | 2019-04-08T08:07:09 | 2019-03-15T09:05:53 | Python | UTF-8 | Python | false | false | 1,611 | py | from pypokerengine.api.game import setup_config, start_poker
from randomplayer import RandomPlayer
from raise_player import RaisedPlayer
from custom.minimax_player import MiniMaxPlayer
from custom.honest_player import HonestPlayer
from custom.honest_player2 import HonestPlayer2
from custom.call_player import CallPlayer
import pprint
from custom.logging_functions import startLogging, stopLogging
import os, datetime, logging
import numpy as np
import itertools
#TODO:config the config as our wish
# logFile = os.path.join('log', "log_{}.log".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")))
# logger = startLogging(logFile)
# w1s = np.arange(0.3,0.5,0.05)
# w2s = np.arange(0.7,0.85,0.05)
# w3s = np.arange(0.2,0.5,0.05)
# # w2s = [0.75]
# # w3s = [0.3]
# w_list = list(itertools.product(w1s, w2s, w3s))
# results = []
# for w in w_list:
config = setup_config(max_round=100, initial_stack=10000, small_blind_amount=10)
config.register_player(name="RaisePlayer", algorithm=RaisedPlayer())
# config.register_player(name="CallPlayer", algorithm=CallPlayer())
# config.register_player(name="HonestPlayer1", algorithm=HonestPlayer())
config.register_player(name="HonestPlayer2", algorithm=HonestPlayer())
# config.register_player(name="MiniMaxPlayer", algorithm=MiniMaxPlayer())
# config.register_player(name="HonestPlayer2", algorithm=HonestPlayer2(w[0], w[1], w[2]))
game_result = start_poker(config, verbose=0)
print(game_result)
# result = list(w) + [player["stack"] for player in game_result["players"]]
# logging.info(result)
# results.append(result)
# stopLogging(logger)
| [
"qinjiang03@gmail.com"
] | qinjiang03@gmail.com |
a1b7503c84df5db765cd1c0cf077084232aec8f4 | 88386ed170ab6d496b87ae3e8501130b92b82cf4 | /gui-framework/app.py | 681e45e0ed55088ee3d88f1942ab3b811ce99ac6 | [] | no_license | LiuYuancheng/tls_attack | f6186dc74704928ee883f1c1a9142d42954dc7ae | 3abe611d23a87aad9c35ad253e15bc06ff4bea35 | refs/heads/master | 2022-02-22T10:42:41.478189 | 2019-09-26T08:42:25 | 2019-09-26T08:42:25 | 221,579,890 | 2 | 0 | null | 2019-11-14T00:48:49 | 2019-11-14T00:48:48 | null | UTF-8 | Python | false | false | 1,369 | py | import os
import sys
import argparse
from template import Ui_MainWindow
from PyQt5 import QtCore, QtGui, QtWidgets
parser = argparse.ArgumentParser()
# parser.add_argument('-r', '--rootdir', help='Input the root directory path containing the data in json format for all trained model. Typically, foo/bar/rnn-model/trained-rnn/', required=True)
parser.add_argument('-p', '--pcapdir', help='Input all directories to where pcap files are located', required=True)
parser.add_argument('-m', '--modeldir', help='Input the root directory of trained rnn models. Typically, foo/bar/rnn-model/trained-rnn', required=True)
parser.add_argument('-f', '--featuredir', help='Input the root directory of the feature cvs files with other supporting files. Typically, foo/bar/feature-extraction/extracted-features', required=True)
args = parser.parse_args()
# Search iteratively for all data.json files in the root directory
# json_dirs = []
# for root, dirs, files in os.walk(args.rootdir):
# for f in files:
# if f == "data.json":
# json_dirs.append(os.path.join(root, f))
pcap_dirs = args.pcapdir
model_dirs = args.modeldir
feature_dirs = args.featuredir
app = QtWidgets.QApplication(sys.argv)
# app.resize(1838, 963)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow(pcap_dirs, model_dirs, feature_dirs)
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) | [
"yilong_tan@mymail.sutd.edu.sg"
] | yilong_tan@mymail.sutd.edu.sg |
6c42121e14c982c244c5e02c8719f1cf0456c50b | 00829e1ff78f73dab073a201d68139960c1d1922 | /tools/toolset/tool/rigging/beam/core/maths/color.py | dc40bd6d1cd95c9443bd68690d5e0cbba7ef7e09 | [] | no_license | liangyongg/Beam_Tools | a021ceb4187107508536c46726da5b9629ffd1cf | 21b5d06e660f058434e589ae4f672f96296b7540 | refs/heads/master | 2018-11-04T04:43:02.523654 | 2018-08-26T12:33:09 | 2018-08-26T12:33:09 | 115,005,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,953 | py | """Kraken - maths.color module.
Classes:
Color -- Color object.
"""
import random
import math
from rigging.beam.core.beam_system import bs
from math_object import MathObject
class Color(MathObject):
"""Vector 4 object."""
def __init__(self, r=0.0, g=0.0, b=0.0, a=1.0):
"""Initializes r, g b and a values for Color object."""
super(Color, self).__init__()
#if bs.getRTValTypeName(r) == 'Color':
# self._rtval = r
#else:
# self._rtval = bs.rtVal('Color')
# if isinstance(r, Color):
# self.set(r=r.r, g=r.g, b=r.b, a=r.b)
# else:
# self.set(r=r, g=g, b=b, a=a)
def __str__(self):
"""String representation of the Color object.
Returns:
str: String representation of the Color object."""
stringRep = "Color("
stringRep += str(self.r) + ","
stringRep += str(self.g) + ","
stringRep += str(self.b) + ","
stringRep += str(self.a) + ")"
return stringRep
@property
def r(self):
"""Gets red channel of this color.
Returns:
float: red channel of this color.
"""
return self._rtval.r.getSimpleType()
@r.setter
def r(self, value):
"""Sets red channel from the input channel.
Args:
channel (float): Value to set the red channel to.
Returns:
bool: True if successful.
"""
self._rtval.r = bs.rtVal('Scalar', value)
return True
@property
def g(self):
"""Gets green channel of this color.
Returns:
float: green channel of this color.
"""
return self._rtval.g.getSimpleType()
@g.setter
def g(self, value):
"""Sets green channel from the input channel.
Args:
channel (float): Value to set the green property as.
Returns:
bool: True if successful.
"""
self._rtval.g = bs.rtVal('Scalar', value)
return True
@property
def b(self):
"""Gets blue channel of this color.
Returns:
float: blue channel of this color.
"""
return self._rtval.b.getSimpleType()
@b.setter
def b(self, value):
"""Sets blue channel from the input channel.
Args:
channel (float): Value to set the blue property as.
Returns:
bool: True if successful.
"""
self._rtval.b = bs.rtVal('Scalar', value)
return True
@property
def a(self):
"""Gets alpha channel of this color.
Returns:
float: alpha channel of this color.
"""
return self._rtval.a.getSimpleType()
@a.setter
def a(self, value):
"""Sets a channel from the input channel.
Args:
channel (float): Value to set the a property as.
Returns:
bool: True if successful.
"""
self._rtval.a = bs.rtVal('Scalar', value)
def __eq__(self, other):
return self.equal(other)
def __ne__(self, other):
return not self.equal(other)
def __add__(self, other):
return self.add(other)
def __sub__(self, other):
return self.subtract(other)
def __mul__(self, other):
return self.multiply(other)
def __div__(self, other):
return self.divide(other)
def clone(self):
"""Returns a clone of the Color.
Returns:
Color: The cloned Color
"""
color = Color()
color.r = self.r
color.g = self.g
color.b = self.b
return color
def set(self, r, g, b, a):
"""Sets the r, g, b, and a value from the input values.
Args:
r (float): Value to set the r channel to.
g (float): Value to set the g channel to.
b (float): Value to set the b channel to.
a (float): Value to set the a channel to.
Returns:
bool: True if successful.
"""
self._rtval.set('', bs.rtVal('Scalar', r), bs.rtVal('Scalar', g),
bs.rtVal('Scalar', b), bs.rtVal('Scalar', a))
return True
def equal(self, other):
"""Checks equality of this color with another.
Args:
other (Color): other color to check equality with.
Returns:
bool: True if equal.
"""
return self._rtval.equal('Boolean', other._rtval).getSimpleType()
def almostEqual(self, other, precision):
"""Checks almost equality of this Color with another.
Args:
other (Color): other value to check equality with.
precision (float): Precision value.
Returns:
bool: True if almost equal.
"""
return self._rtval.almostEqual('Boolean', other._rtval,
bs.rtVal('Scalar', precision)).getSimpleType()
def component(self, i ):
"""Gets the component of this Color by index.
Args:
i (int): index of the component to return.
Returns:
float: component of this Color.
"""
return self._rtval.component('Scalar', bs.rtVal('Size', i)).getSimpleType()
def setComponent(self, i, v ):
"""Sets the component of this Color by index.
Args:
i (int): index of the component to set.
v (float): Value to set component as.
Returns:
bool: True if successful.
"""
return self._rtval.setComponent('', bs.rtVal('Size', i),
bs.rtVal('Scalar', v))
def add(self, other):
"""Overload method for the add operator.
Args:
other (Color): other color to add to this one.
Returns:
Color: New Color of the sum of the two Color's.
"""
return Color(self._rtval.add('Color', other._rtval))
def subtract(self, other):
"""Overload method for the subtract operator.
Args:
other (Color): other color to subtract from this one.
Returns:
Color: New Color of the difference of the two Color's.
"""
return Color(self._rtval.subtract('Color', other._rtval))
def multiply(self, other):
"""Overload method for the multiply operator.
Args:
other (Color): other color to multiply from this one.
Returns:
Color: New Color of the product of the two Color's.
"""
return Color(self._rtval.multiply('Color', other._rtval))
def divide(self, other):
"""Divides this color and an other.
Args:
other (Color): other color to divide by.
Returns:
Color: Quotient of the division of this color by the other.
"""
return Color(self._rtval.divide('Color', other._rtval))
def multiplyScalar(self, other):
"""Product of this color and a scalar.
Args:
other (float): Scalar value to multiply this color by.
Returns:
Color: Product of the multiplication of the scalar and this color.
"""
return Color(self._rtval.multiplyScalar('Color', bs.rtVal('Scalar', other)))
def divideScalar(self, other):
"""Divides this color and a scalar.
Args:
other (float): Value to divide this color by.
Returns:
Color: Quotient of the division of the color by the scalar.
"""
return Color(self._rtval.divideScalar('Color', bs.rtVal('Scalar', other)))
def linearInterpolate(self, other, t):
"""Linearly interpolates this color with another one based on a scalar
blend value (0.0 to 1.0).
Args:
other (Color): color to blend to.
t (float): Blend value.
Returns:
Color: New color blended between this and the input color.
"""
return Color(self._rtval.linearInterpolate('Color', bs.rtVal('Color', other), bs.rtVal('Scalar', t)))
@classmethod
def randomColor(cls, gammaAdjustment):
""" Generates a random color based on a seed and offset with gamma adjustment.
Example:
# Generate a regular random color
color = randomColor(seed)
# Generate a light random color
color = randomColor(seed, 0.5)
# Generate a dark random color
color = randomColor(seed, -0.5)
Args:
gammaAdjustment (float): A gamma adjustment to offset the range of the generated color.
Returns:
Color: New random color.
"""
def lerp( val1, val2, t):
return val1 + ((val2 - val1) * t)
if(gammaAdjustment > 0.0001):
# Generate a light color with values between gammaAdjustment and 1.0
return Color(
lerp(gammaAdjustment, 1.0, random.random()),
lerp(gammaAdjustment, 1.0, random.random()),
lerp(gammaAdjustment, 1.0, random.random())
)
elif(gammaAdjustment < -0.0001):
# Generate a dark color with values between 0.0 and 1.0-gammaAdjustment
return Color(
lerp(0.0, 1.0+gammaAdjustment, random.random()),
lerp(0.0, 1.0+gammaAdjustment, random.random()),
lerp(0.0, 1.0+gammaAdjustment, random.random())
)
else:
# We add an arbitrary offset to the provided offset so that each color
# generated based on the seed and offset is unique.
return Color(
random.random(),
random.random(),
random.random()
)
| [
"hhhh"
] | hhhh |
9a987096b65f6e71be52e794d735cf712074ae9b | 28938a381a60249062d18685c36ddaa5bd4069e2 | /Fibanocci.py | 264239845db697eb77bf0f5181b20e56fb8e4b66 | [] | no_license | AmitH2000/Mycap | ab108024da938c4616ce271e3ec2ae2b0db7b528 | 881e8603474be3552464ec9f6d5a362b0fa7e73a | refs/heads/master | 2021-05-26T03:51:24.033224 | 2020-04-12T10:57:34 | 2020-04-12T10:57:34 | 254,040,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | num=int(input("Enter the limit :"))
a,b=0,1
while a<num:
print(a,end=",")
a,b=b,a+b
print()
| [
"noreply@github.com"
] | noreply@github.com |
a090dee8f11f5d2e00a19ab4bc1ac21126f49a4a | 1ea0e2b4f064ba0de45a73c527ee89a36771e8fc | /tests/sentry/api/endpoints/test_project_create_sample.py | 89581e77fde5be59d789372f824b173ee52fe4f8 | [
"BSD-2-Clause"
] | permissive | atlassian/sentry | 6775e59c317f20f96982e91c2b3c88c02ecbb56b | b937615079d7b24dc225a83b99b1b65da932fc66 | refs/heads/master | 2023-08-27T15:45:47.699173 | 2017-09-18T22:14:55 | 2017-09-18T22:14:55 | 103,999,066 | 1 | 5 | BSD-3-Clause | 2023-04-01T07:49:37 | 2017-09-18T22:38:18 | Python | UTF-8 | Python | false | false | 4,940 | py | from __future__ import absolute_import
from django.core.urlresolvers import reverse
import json
from sentry.testutils import APITestCase
class ProjectCreateSampleTest(APITestCase):
def setUp(self):
self.login_as(user=self.user)
self.team = self.create_team()
def test_simple(self):
project = self.create_project(team=self.team, name='foo')
url = reverse(
'sentry-api-0-project-create-sample',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(url, format='json')
assert response.status_code == 200, response.content
assert 'groupID' in json.loads(response.content)
def test_project_platform(self):
project = self.create_project(team=self.team, name='foo', platform='javascript-react')
url = reverse(
'sentry-api-0-project-create-sample',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(url, format='json')
assert response.status_code == 200, response.content
assert 'groupID' in json.loads(response.content)
def test_cocoa(self):
project = self.create_project(team=self.team, name='foo', platform='cocoa')
url = reverse(
'sentry-api-0-project-create-sample',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(url, format='json')
assert response.status_code == 200, response.content
assert 'groupID' in json.loads(response.content)
def test_java(self):
project = self.create_project(team=self.team, name='foo', platform='java')
url = reverse(
'sentry-api-0-project-create-sample',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(url, format='json')
assert response.status_code == 200, response.content
assert 'groupID' in json.loads(response.content)
def test_javascript(self):
project = self.create_project(team=self.team, name='foo', platform='javascript')
url = reverse(
'sentry-api-0-project-create-sample',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(url, format='json')
assert response.status_code == 200, response.content
assert 'groupID' in json.loads(response.content)
def test_php(self):
project = self.create_project(team=self.team, name='foo', platform='php')
url = reverse(
'sentry-api-0-project-create-sample',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(url, format='json')
assert response.status_code == 200, response.content
assert 'groupID' in json.loads(response.content)
def test_python(self):
project = self.create_project(team=self.team, name='foo', platform='python')
url = reverse(
'sentry-api-0-project-create-sample',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(url, format='json')
assert response.status_code == 200, response.content
assert 'groupID' in json.loads(response.content)
def test_reactnative(self):
project = self.create_project(team=self.team, name='foo', platform='react-native')
url = reverse(
'sentry-api-0-project-create-sample',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(url, format='json')
assert response.status_code == 200, response.content
assert 'groupID' in json.loads(response.content)
def test_ruby(self):
project = self.create_project(team=self.team, name='foo', platform='ruby')
url = reverse(
'sentry-api-0-project-create-sample',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.post(url, format='json')
assert response.status_code == 200, response.content
assert 'groupID' in json.loads(response.content)
| [
"noreply@github.com"
] | noreply@github.com |
f72ea5adb6bb93fb22ed43dc90bdc32c3d350e5e | e9c9e38ed91969df78bbd7f9ca2a0fdb264d8ddb | /lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_pkg_firewall_policy6.py | 4e8e6249ef88271ad42c8c22653a3b534363cbf7 | [] | no_license | Arceusir/PRELIM_SKILLS_EXAM | 882fcf2868926f0bbfe1fb18d50e5fe165936c02 | b685c5b28d058f59de2875c7579739c545df2e0c | refs/heads/master | 2023-08-15T07:30:42.303283 | 2021-10-09T01:27:19 | 2021-10-09T01:27:19 | 415,167,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80,550 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_pkg_firewall_policy6
short_description: Configure IPv6 policies.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
pkg:
description: the parameter (pkg) in requested url
type: str
required: true
pkg_firewall_policy6:
description: the top level parameters set
required: false
type: dict
suboptions:
action:
type: str
description: 'Policy action (allow/deny/ipsec).'
choices:
- 'deny'
- 'accept'
- 'ipsec'
- 'ssl-vpn'
app-category:
type: str
description: 'Application category ID list.'
application:
description: no description
type: int
application-list:
type: str
description: 'Name of an existing Application list.'
auto-asic-offload:
type: str
description: 'Enable/disable policy traffic ASIC offloading.'
choices:
- 'disable'
- 'enable'
av-profile:
type: str
description: 'Name of an existing Antivirus profile.'
comments:
type: str
description: 'Comment.'
custom-log-fields:
type: str
description: 'Log field index numbers to append custom log fields to log messages for this policy.'
devices:
type: str
description: 'Names of devices or device groups that can be matched by the policy.'
diffserv-forward:
type: str
description: 'Enable to change packets DiffServ values to the specified diffservcode-forward value.'
choices:
- 'disable'
- 'enable'
diffserv-reverse:
type: str
description: 'Enable to change packets reverse (reply) DiffServ values to the specified diffservcode-rev value.'
choices:
- 'disable'
- 'enable'
diffservcode-forward:
type: str
description: 'Change packets DiffServ to this value.'
diffservcode-rev:
type: str
description: 'Change packets reverse (reply) DiffServ to this value.'
dlp-sensor:
type: str
description: 'Name of an existing DLP sensor.'
dscp-match:
type: str
description: 'Enable DSCP check.'
choices:
- 'disable'
- 'enable'
dscp-negate:
type: str
description: 'Enable negated DSCP match.'
choices:
- 'disable'
- 'enable'
dscp-value:
type: str
description: 'DSCP value.'
dsri:
type: str
description: 'Enable DSRI to ignore HTTP server responses.'
choices:
- 'disable'
- 'enable'
dstaddr:
type: str
description: 'Destination address and address group names.'
dstaddr-negate:
type: str
description: 'When enabled dstaddr specifies what the destination address must NOT be.'
choices:
- 'disable'
- 'enable'
dstintf:
type: str
description: 'Outgoing (egress) interface.'
firewall-session-dirty:
type: str
description: 'How to handle sessions if the configuration of this firewall policy changes.'
choices:
- 'check-all'
- 'check-new'
fixedport:
type: str
description: 'Enable to prevent source NAT from changing a sessions source port.'
choices:
- 'disable'
- 'enable'
global-label:
type: str
description: 'Label for the policy that appears when the GUI is in Global View mode.'
groups:
type: str
description: 'Names of user groups that can authenticate with this policy.'
icap-profile:
type: str
description: 'Name of an existing ICAP profile.'
inbound:
type: str
description: 'Policy-based IPsec VPN: only traffic from the remote network can initiate a VPN.'
choices:
- 'disable'
- 'enable'
ippool:
type: str
description: 'Enable to use IP Pools for source NAT.'
choices:
- 'disable'
- 'enable'
ips-sensor:
type: str
description: 'Name of an existing IPS sensor.'
label:
type: str
description: 'Label for the policy that appears when the GUI is in Section View mode.'
logtraffic:
type: str
description: 'Enable or disable logging. Log all sessions or security profile sessions.'
choices:
- 'disable'
- 'enable'
- 'all'
- 'utm'
logtraffic-start:
type: str
description: 'Record logs when a session starts and ends.'
choices:
- 'disable'
- 'enable'
mms-profile:
type: str
description: 'Name of an existing MMS profile.'
name:
type: str
description: 'Policy name.'
nat:
type: str
description: 'Enable/disable source NAT.'
choices:
- 'disable'
- 'enable'
natinbound:
type: str
description: 'Policy-based IPsec VPN: apply destination NAT to inbound traffic.'
choices:
- 'disable'
- 'enable'
natoutbound:
type: str
description: 'Policy-based IPsec VPN: apply source NAT to outbound traffic.'
choices:
- 'disable'
- 'enable'
np-accelation:
type: str
description: 'Enable/disable UTM Network Processor acceleration.'
choices:
- 'disable'
- 'enable'
outbound:
type: str
description: 'Policy-based IPsec VPN: only traffic from the internal network can initiate a VPN.'
choices:
- 'disable'
- 'enable'
per-ip-shaper:
type: str
description: 'Per-IP traffic shaper.'
policyid:
type: int
description: 'Policy ID.'
poolname:
type: str
description: 'IP Pool names.'
profile-group:
type: str
description: 'Name of profile group.'
profile-protocol-options:
type: str
description: 'Name of an existing Protocol options profile.'
profile-type:
type: str
description: 'Determine whether the firewall policy allows security profile groups or single profiles only.'
choices:
- 'single'
- 'group'
replacemsg-override-group:
type: str
description: 'Override the default replacement message group for this policy.'
rsso:
type: str
description: 'Enable/disable RADIUS single sign-on (RSSO).'
choices:
- 'disable'
- 'enable'
schedule:
type: str
description: 'Schedule name.'
send-deny-packet:
type: str
description: 'Enable/disable return of deny-packet.'
choices:
- 'disable'
- 'enable'
service:
type: str
description: 'Service and service group names.'
service-negate:
type: str
description: 'When enabled service specifies what the service must NOT be.'
choices:
- 'disable'
- 'enable'
session-ttl:
type: int
description: 'Session TTL in seconds for sessions accepted by this policy. 0 means use the system default session TTL.'
spamfilter-profile:
type: str
description: 'Name of an existing Spam filter profile.'
srcaddr:
type: str
description: 'Source address and address group names.'
srcaddr-negate:
type: str
description: 'When enabled srcaddr specifies what the source address must NOT be.'
choices:
- 'disable'
- 'enable'
srcintf:
type: str
description: 'Incoming (ingress) interface.'
ssl-mirror:
type: str
description: 'Enable to copy decrypted SSL traffic to a FortiGate interface (called SSL mirroring).'
choices:
- 'disable'
- 'enable'
ssl-mirror-intf:
type: str
description: 'SSL mirror interface name.'
ssl-ssh-profile:
type: str
description: 'Name of an existing SSL SSH profile.'
status:
type: str
description: 'Enable or disable this policy.'
choices:
- 'disable'
- 'enable'
tags:
type: str
description: 'Names of object-tags applied to this policy.'
tcp-mss-receiver:
type: int
description: 'Receiver TCP maximum segment size (MSS).'
tcp-mss-sender:
type: int
description: 'Sender TCP maximum segment size (MSS).'
tcp-session-without-syn:
type: str
description: 'Enable/disable creation of TCP session without SYN flag.'
choices:
- 'all'
- 'data-only'
- 'disable'
timeout-send-rst:
type: str
description: 'Enable/disable sending RST packets when TCP sessions expire.'
choices:
- 'disable'
- 'enable'
traffic-shaper:
type: str
description: 'Reverse traffic shaper.'
traffic-shaper-reverse:
type: str
description: 'Reverse traffic shaper.'
url-category:
type: str
description: 'URL category ID list.'
users:
type: str
description: 'Names of individual users that can authenticate with this policy.'
utm-status:
type: str
description: 'Enable AV/web/ips protection profile.'
choices:
- 'disable'
- 'enable'
uuid:
type: str
description: 'Universally Unique Identifier (UUID; automatically assigned but can be manually reset).'
vlan-cos-fwd:
type: int
description: 'VLAN forward direction user priority: 255 passthrough, 0 lowest, 7 highest'
vlan-cos-rev:
type: int
description: 'VLAN reverse direction user priority: 255 passthrough, 0 lowest, 7 highest'
voip-profile:
type: str
description: 'Name of an existing VoIP profile.'
vpntunnel:
type: str
description: 'Policy-based IPsec VPN: name of the IPsec VPN Phase 1.'
webfilter-profile:
type: str
description: 'Name of an existing Web filter profile.'
anti-replay:
type: str
description: 'Enable/disable anti-replay check.'
choices:
- 'disable'
- 'enable'
app-group:
type: str
description: 'Application group names.'
cifs-profile:
type: str
description: 'Name of an existing CIFS profile.'
dnsfilter-profile:
type: str
description: 'Name of an existing DNS filter profile.'
emailfilter-profile:
type: str
description: 'Name of an existing email filter profile.'
http-policy-redirect:
type: str
description: 'Redirect HTTP(S) traffic to matching transparent web proxy policy.'
choices:
- 'disable'
- 'enable'
inspection-mode:
type: str
description: 'Policy inspection mode (Flow/proxy). Default is Flow mode.'
choices:
- 'proxy'
- 'flow'
np-acceleration:
type: str
description: 'Enable/disable UTM Network Processor acceleration.'
choices:
- 'disable'
- 'enable'
ssh-filter-profile:
type: str
description: 'Name of an existing SSH filter profile.'
ssh-policy-redirect:
type: str
description: 'Redirect SSH traffic to matching transparent proxy policy.'
choices:
- 'disable'
- 'enable'
tos:
type: str
description: 'ToS (Type of Service) value used for comparison.'
tos-mask:
type: str
description: 'Non-zero bit positions are used for comparison while zero bit positions are ignored.'
tos-negate:
type: str
description: 'Enable negated TOS match.'
choices:
- 'disable'
- 'enable'
vlan-filter:
type: str
description: 'Set VLAN filters.'
waf-profile:
type: str
description: 'Name of an existing Web application firewall profile.'
webcache:
type: str
description: 'Enable/disable web cache.'
choices:
- 'disable'
- 'enable'
webcache-https:
type: str
description: 'Enable/disable web cache for HTTPS.'
choices:
- 'disable'
- 'enable'
webproxy-forward-server:
type: str
description: 'Web proxy forward server name.'
webproxy-profile:
type: str
description: 'Webproxy profile name.'
fsso-groups:
type: str
description: 'Names of FSSO groups.'
decrypted-traffic-mirror:
type: str
description: 'Decrypted traffic mirror.'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Configure IPv6 policies.
fmgr_pkg_firewall_policy6:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
pkg: <your own value>
state: <value in [present, absent]>
pkg_firewall_policy6:
action: <value in [deny, accept, ipsec, ...]>
app-category: <value of string>
application: <value of integer>
application-list: <value of string>
auto-asic-offload: <value in [disable, enable]>
av-profile: <value of string>
comments: <value of string>
custom-log-fields: <value of string>
devices: <value of string>
diffserv-forward: <value in [disable, enable]>
diffserv-reverse: <value in [disable, enable]>
diffservcode-forward: <value of string>
diffservcode-rev: <value of string>
dlp-sensor: <value of string>
dscp-match: <value in [disable, enable]>
dscp-negate: <value in [disable, enable]>
dscp-value: <value of string>
dsri: <value in [disable, enable]>
dstaddr: <value of string>
dstaddr-negate: <value in [disable, enable]>
dstintf: <value of string>
firewall-session-dirty: <value in [check-all, check-new]>
fixedport: <value in [disable, enable]>
global-label: <value of string>
groups: <value of string>
icap-profile: <value of string>
inbound: <value in [disable, enable]>
ippool: <value in [disable, enable]>
ips-sensor: <value of string>
label: <value of string>
logtraffic: <value in [disable, enable, all, ...]>
logtraffic-start: <value in [disable, enable]>
mms-profile: <value of string>
name: <value of string>
nat: <value in [disable, enable]>
natinbound: <value in [disable, enable]>
natoutbound: <value in [disable, enable]>
np-accelation: <value in [disable, enable]>
outbound: <value in [disable, enable]>
per-ip-shaper: <value of string>
policyid: <value of integer>
poolname: <value of string>
profile-group: <value of string>
profile-protocol-options: <value of string>
profile-type: <value in [single, group]>
replacemsg-override-group: <value of string>
rsso: <value in [disable, enable]>
schedule: <value of string>
send-deny-packet: <value in [disable, enable]>
service: <value of string>
service-negate: <value in [disable, enable]>
session-ttl: <value of integer>
spamfilter-profile: <value of string>
srcaddr: <value of string>
srcaddr-negate: <value in [disable, enable]>
srcintf: <value of string>
ssl-mirror: <value in [disable, enable]>
ssl-mirror-intf: <value of string>
ssl-ssh-profile: <value of string>
status: <value in [disable, enable]>
tags: <value of string>
tcp-mss-receiver: <value of integer>
tcp-mss-sender: <value of integer>
tcp-session-without-syn: <value in [all, data-only, disable]>
timeout-send-rst: <value in [disable, enable]>
traffic-shaper: <value of string>
traffic-shaper-reverse: <value of string>
url-category: <value of string>
users: <value of string>
utm-status: <value in [disable, enable]>
uuid: <value of string>
vlan-cos-fwd: <value of integer>
vlan-cos-rev: <value of integer>
voip-profile: <value of string>
vpntunnel: <value of string>
webfilter-profile: <value of string>
anti-replay: <value in [disable, enable]>
app-group: <value of string>
cifs-profile: <value of string>
dnsfilter-profile: <value of string>
emailfilter-profile: <value of string>
http-policy-redirect: <value in [disable, enable]>
inspection-mode: <value in [proxy, flow]>
np-acceleration: <value in [disable, enable]>
ssh-filter-profile: <value of string>
ssh-policy-redirect: <value in [disable, enable]>
tos: <value of string>
tos-mask: <value of string>
tos-negate: <value in [disable, enable]>
vlan-filter: <value of string>
waf-profile: <value of string>
webcache: <value in [disable, enable]>
webcache-https: <value in [disable, enable]>
webproxy-forward-server: <value of string>
webproxy-profile: <value of string>
fsso-groups: <value of string>
decrypted-traffic-mirror: <value of string>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/pkg/{pkg}/firewall/policy6'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/pkg/{pkg}/firewall/policy6/{policy6}'
]
url_params = ['adom', 'pkg']
module_primary_key = 'policyid'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'pkg': {
'required': True,
'type': 'str'
},
'pkg_firewall_policy6': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True
},
'options': {
'action': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'deny',
'accept',
'ipsec',
'ssl-vpn'
],
'type': 'str'
},
'app-category': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'application': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'application-list': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'auto-asic-offload': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'av-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'comments': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'custom-log-fields': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'devices': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'diffserv-forward': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'diffserv-reverse': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'diffservcode-forward': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'diffservcode-rev': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'dlp-sensor': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'dscp-match': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'dscp-negate': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'dscp-value': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'dsri': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'dstaddr': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'dstaddr-negate': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'dstintf': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'firewall-session-dirty': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'check-all',
'check-new'
],
'type': 'str'
},
'fixedport': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'global-label': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'groups': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'icap-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'inbound': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ippool': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ips-sensor': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'label': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'logtraffic': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable',
'all',
'utm'
],
'type': 'str'
},
'logtraffic-start': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'mms-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'name': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'nat': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'natinbound': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'natoutbound': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'np-accelation': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'outbound': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'per-ip-shaper': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'policyid': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'poolname': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'profile-group': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'profile-protocol-options': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'profile-type': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'single',
'group'
],
'type': 'str'
},
'replacemsg-override-group': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'rsso': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'schedule': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'send-deny-packet': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'service': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'service-negate': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'session-ttl': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'spamfilter-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'srcaddr': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'srcaddr-negate': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'srcintf': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'ssl-mirror': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ssl-mirror-intf': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'ssl-ssh-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'status': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'tags': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': False,
'6.2.3': False,
'6.2.5': False,
'6.4.0': False,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'tcp-mss-receiver': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'tcp-mss-sender': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'tcp-session-without-syn': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'all',
'data-only',
'disable'
],
'type': 'str'
},
'timeout-send-rst': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'traffic-shaper': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'traffic-shaper-reverse': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'url-category': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'users': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'utm-status': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'uuid': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'vlan-cos-fwd': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'vlan-cos-rev': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'int'
},
'voip-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'vpntunnel': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'webfilter-profile': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'anti-replay': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'app-group': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'cifs-profile': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'dnsfilter-profile': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'emailfilter-profile': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'http-policy-redirect': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'inspection-mode': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'proxy',
'flow'
],
'type': 'str'
},
'np-acceleration': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ssh-filter-profile': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'ssh-policy-redirect': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'tos': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'tos-mask': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'tos-negate': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'vlan-filter': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'waf-profile': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'webcache': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'webcache-https': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'webproxy-forward-server': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'webproxy-profile': {
'required': False,
'revision': {
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'fsso-groups': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
},
'decrypted-traffic-mirror': {
'required': False,
'revision': {
'6.4.0': True,
'6.4.2': False,
'6.4.5': False,
'7.0.0': False
},
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'pkg_firewall_policy6'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"aaronchristopher.dalmacio@gmail.com"
] | aaronchristopher.dalmacio@gmail.com |
fec7c69adfe85662ef8afe4fad53eb0ed76d2067 | e35cd19277b6440371bb5dd1b92c0abfd90c1e49 | /get_background.py | 17cd858a39421c138234ef7006b073fd1618aeab | [] | no_license | sbrems/background_sources | eb1272f37b0e3b5bf7134ee69f3caa5a46bbc7f0 | 127ec0452afd01eda93dd980e6db5f592940f4f0 | refs/heads/master | 2021-01-25T05:02:12.436972 | 2017-06-14T11:46:03 | 2017-06-14T11:46:03 | 93,503,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,468 | py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from astropy.coordinates import Angle,SkyCoord
import astropy.units as u
from astropy.table import Table
from starclass import Star
from parameters import *
import pickle
import os
import ipdb
star = Star(srcname)
star.coordinates = 'auto'
def do():
'''Does everything except downloading the data. Use down_catalogues for that.'''
if not os.path.exists(outdir):
os.makedirs(outdir)
twomasscat = read_cat(vvv=False)
references = []
results = []#in order catalogue,coords,raudius,wavelength,[histvalues,],[histbins,]
if grid == 'auto':
print('Auto-making the grid')
cencoords = make_comp_grid()
elif grid == 'manual':
print('Using manually center points')
cencoords = load_comp_grid()
else:
raise ValueError('Unknown keyword {} for grid. Select auto or manual'.format(grid))
print('Processing the 2Mass referencepoint')
for radius in comp_radii:
for wave in twomassbands:
references.append(['2mass',star.coordinates,radius,wave] +
[x[0] for x in list(make_cumhists(twomasscat,star.coordinates,
radius,[wave,]))])
print('Processing the 2Masssurvey')
for ii,center in enumerate(cencoords.icrs):
print('Processing center {} ({} of {})'.format(center,ii,len(cencoords)-1))
for radius in comp_radii:
for wave in twomassbands:
results.append(['2mass',center,radius,wave] +
[x[0] for x in list(make_cumhists(twomasscat,SkyCoord([center,]),
radius,[wave,]))])
pn_results = os.path.join(outdir,'results.py')
pn_references = os.path.join(outdir,'references.py')
pickle.dump(results,open(pn_results,'wb'))
pickle.dump(references,open(pn_references,'wb'))
print('Done with cumhists. Saved them to {} , {}'.format(pn_results,pn_references))
if len(cencoords) > 100:
plot = False
else:
plot=True
t_maxdiff = plot_bins(results,references,cencoords,plot=plot)
#find the best result in a min of max diff sense
t_diff_grouped = t_maxdiff.group_by(['cen_lon','cen_lat','radius'])
t_diff_max = t_diff_grouped.groups.aggregate(np.max)
t_diff_max.sort(['radius','maxdiff'])
#mindiffglob = np.min(t_diff_max['maxdiff'])
#t_bestcens = t_diff_max.sort([np.where(t_diff_max['maxdiff'] == mindiffglob)]
#save the results
pn_tgrouped = os.path.join(outdir,'centerprops_grouped.csv')
pn_tall = os.path.join(outdir,'centerprops_all.csv')
#pn_tbest= os.path.join('centerprops_best.csv')
i_largest_radius = len(cencoords)*(len(comp_radii)-1)
print('The 10 best centers seem {}.\n Saved them to {}'.format(\
t_diff_max[i_largest_radius:i_largest_radius+10],pn_tgrouped))
t_diff_grouped.write(pn_tall,format='ascii.csv',overwrite=True)
t_diff_max.write(pn_tgrouped,format='ascii.csv',overwrite=True)
#t_bestcens.write(pn_tbest,format='ascii.csv')
print('DONE with get_background.do')
def plot_bins(results,references,cencoords,plot=True):
'''make a grid plot in one file. Returns a table with simple statistics.'''
import seaborn as sn
wb2col = {'Jmag':'blue',
'Hmag':'green',
'Kmag':'red'}
#store stuff in a table. Fill and delete dummy values for right column properties
t_maxdiff = Table([[np.nan],[np.nan],[np.nan,],['Dummy_waveband',],[-999]],
names=['cen_lon','cen_lat','radius','waveband','maxdiff'])
t_maxdiff = t_maxdiff[:0]
bins = references[-1][-1][1:]
cummax = np.max([x[-2] for x in results])
if plot:
fig = plt.figure(figsize=(5*len(comp_radii), 5*len(cencoords)))
outergrid = gridspec.GridSpec(len(cencoords), len(comp_radii), wspace=0.12,hspace=0.12)
for icen,center in enumerate(cencoords.icrs):
if plot:
print('Plotting and getting statistics for Position {} of {}'.format(icen,
len(cencoords.icrs)-1))
else:
print('Not plotting. Getting statistics for Position {} of {}'.format(icen,
len(cencoords.icrs)-1))
for irad,radius in enumerate(comp_radii):
if plot:
innergrid = gridspec.GridSpecFromSubplotSpec(3, 1,
subplot_spec=outergrid[icen,irad], wspace=0.1, hspace=0.1)
ax1 = plt.Subplot(fig,innergrid[:-1,0])
ax2 = plt.Subplot(fig,innergrid[-1,0])
for wave in twomassbands:
refvals = [x[-2] for x in references if ((x[2] == radius) and
(x[3] == wave))][0]
scivals = [x[-2] for x in results if ((x[2] == radius) and
(x[1].separation(center) < 0.01*u.arcsec) and
(x[3] == wave))][0]
if plot:
ax1.plot(bins,refvals,ls='--',c=wb2col[wave],label=wave)
ax1.plot(bins,scivals,ls='-' ,c=wb2col[wave])
ax1.set_ylim(0,cummax)
ax2.set_ylim(-cummax/8,cummax/8)
ax2.plot(bins,refvals-scivals,c=wb2col[wave])
t_maxdiff.add_row([center.galactic.l,center.galactic.b,
radius,wave,int(np.max(np.abs(refvals-scivals)))])
if plot:
ax1.legend(loc='upper left')
#ax1.annotate('icen,irad: {},{}'.format(icen,irad),xy=(10,20))
#ax1.set_title('2MASS, r={}, cen={}'.format(radius,center.galactic))
#make the labels where needed
if irad == 0:
ax1.set_ylabel('lon={0:.4f}, lat={1:4f} \n\nCumulative nr of sources'.format(center.galactic.l.value,center.galactic.b.value))
ax2.set_ylabel('Residuals')
#plt.subplot(outergrid[icen,irad]).set_ylabel('lon={10.6f}, lat={8.6f}\n\n\n'.format(center.galactic.l.value,center.galactic.b.value))
#plt.subplot(outergrid[icen,irad]).set_yticks([])
#plt.subplot(outergrid[icen,irad]).set_xticks([])
else:
ax1.set_yticklabels([])
ax1.set_ylabel('')
ax2.set_ylabel('')
ax2.set_yticklabels([])
if icen == len(cencoords)-1:
ax2.set_xlabel('mag')
ax1.set_xticklabels([])
elif icen == 0:
ax1.set_title('search radius = {}'.format(radius))
ax1.set_xticklabels([])
#plt.subplot(outergrid[icen,irad]).set_ylabel('search radius = {}'.format(radius))
#plt.subplot(outergrid[icen,irad]).set_yticks([])
#plt.subplot(outergrid[icen,irad]).set_xticks([])
else:
ax2.set_xlabel('')
ax1.set_xticklabels([])
ax2.set_xticklabels([])
if plot:
fig.add_subplot(ax1)
fig.add_subplot(ax2,sharex=ax2)
if plot:
fp_plot = os.path.join(outdir,'sourcedensity_overview.pdf')
try:
fig.savefig(fp_plot)
except:
print('Somehow could not save the figure.')
print('Saved plot to {}'.format(fp_plot))
plt.close('all')
return t_maxdiff
def make_cumhists(cat,center,radius,wavebands,magrange=[5,14.5]):
'''Make a magnitude histogram around the center with a given radius for each waveband'''
nbins = 200
idz1,idz2,sep2d,sep3d = cat['skycoord'].search_around_sky(center,radius)
small_cat = cat[idz2]
hists = []
for wave in wavebands:
values, bins = np.histogram(small_cat[wave],bins=nbins,range=magrange)
hists.append(np.cumsum(values))
return hists,[bins,]
def read_cat(vvv=True,twomass=True):
'''Read the catalogues. Mainly convert coordinates to the ones given in column
skycoord.'''
if vvv:
print('Reading vvvcat')
vvvcat = Table.read(os.path.join(maindir,'vvv_catalogue.csv'),format='ascii.csv')
vvvcat['skycoord'] = SkyCoord(vvvcat['RAJ2000'],vvvcat['DEJ2000'],frame='icrs',unit=u.deg)
if twomass:
print('Reading 2masscat')
twomasscat = Table.read(os.path.join(maindir,'2mass_catalogue.csv'),format='ascii.csv')
twomasscat['skycoord']= SkyCoord(twomasscat['RAJ2000'],twomasscat['DEJ2000'],
frame='icrs',unit=u.deg)
if vvv and twomass:
return vvvcat,twomasscat
elif vvv:
return vvvcat
else:
return towmasscat
def make_comp_grid():
'''Makes the center points of the comparison. Returns a SkyCoord object with the
centrag points'''
max_rad = np.max(comp_radii)
#make the longitude and latitude vectors in galactic coords
veclon = np.linspace(vvv_topright.l + max_rad, vvv_leftbot.l - max_rad, nlon)
veclat = np.linspace(vvv_leftbot.b + max_rad, vvv_topright.b - max_rad, nlat)
vlon = np.meshgrid(veclon,veclat)[0].flatten()
vlat = np.meshgrid(veclon,veclat)[1].flatten()
cencoords = SkyCoord(vlon,vlat,unit=u.deg,frame='galactic')
return cencoords
def load_comp_grid():
'''Uses manually entered grid. Counterpart to make_comp_grid'''
return manual_center_points
def down_catalogues():
'''Download and save the catalogs'''
from astroquery.vizier import Vizier
Vizier.ROW_LIMIT = -1
for cat in query_cats.keys():
res = Vizier.query_region(query_center,catalog=query_cats[cat],
radius=query_radius)
cat_pname = os.path.join(maindir,cat+'_catalogue.csv')
res[0].write(cat_pname,
format='ascii.csv',
delimiter=',',overwrite=True)
print('Saved cat {} ({} rows) to {}'.format(cat,len(res[0]),cat_pname))
del res
import ipdb;ipdb.set_trace()
| [
"sbrems@lsw.uni-heidelberg.de"
] | sbrems@lsw.uni-heidelberg.de |
5596e27e04441cf697f3ff37533f45e9882c9af2 | 856ada709d85f7c2c385277bbfdf07fabb24b762 | /mysite/cocktails/views.py | e67443d0717c2a5edd839bf45b05f8e1c6921ffc | [] | no_license | laurence-liu/DearLiquor.Django | 91ca130ac088ef2815355d88e940d5fb69da5350 | 395f9edda908ffedef7b9e41bc67e6cca6d5e71c | refs/heads/master | 2020-04-28T20:18:15.994826 | 2019-03-29T08:30:11 | 2019-03-29T08:30:11 | 175,540,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | from django.shortcuts import render
from .models import Cocktail
# Create your views here.
def cocktail(request):
cocktails = Cocktail.objects.all()
return render(request, 'cocktail.html', {
'cocktails': cocktails,
})
| [
"laurence.liuuu@gmail.com"
] | laurence.liuuu@gmail.com |
fc68572641d5a74b599133888502a7f9c82b6d3d | cd050f1f91d516101f3a8c05f36e9ac506560b5d | /crypto_api.py | 7999f838d75d9f35c0658b52b5621d99f065bd43 | [] | no_license | jacinthd/progyny-assessment | 684dbe2b9062e106340549c0ad69cd8b8df1c967 | a2a4ca5a2de09fe2a13ffc18d8e79a6a6829f4a0 | refs/heads/main | 2023-05-14T11:56:05.373384 | 2021-06-09T17:23:24 | 2021-06-09T17:23:24 | 375,439,966 | 0 | 1 | null | 2021-06-11T02:38:08 | 2021-06-09T17:36:53 | Python | UTF-8 | Python | false | false | 1,149 | py | """Crypto API."""
from typing import Dict, List
import requests
# API Documentation - https://www.coingecko.com/en/api#explore-api
def get_coins() -> List[Dict]:
"""This function will get the top 10 coins at the current time, sorted by market cap in desc order."""
response = requests.get('https://api.coingecko.com/api/v3/coins/markets?vs_currency=usd&order=market_cap_desc&per_page=10&page=1&sparkline=false')
# Important keys
# - id
# - symbol
# - name
# - current_price
return response.json()
def get_coin_price_history(coin_id: str) -> List[Dict]:
response = requests.get(f"https://api.coingecko.com/api/v3/coins/{coin_id}/market_chart?vs_currency=usd&days=9&interval=daily")
# Returns a list of tuples
# Item 0 -> Unix Timestamp
# Item 1 -> price
return response.json()['prices']
# utilize this function when submitting an order
def submit_order(coin_id: str, quantity: int, bid: float):
"""
Mock function to submit an order to an exchange.
Assume order went through successfully and the return value is the price the order was filled at.
"""
return bid | [
"jacinthdavid@gmail.com"
] | jacinthdavid@gmail.com |
b24a4d5318b0f26c286c6480a6924b655e13f8e5 | a2fc37a497c73629c88fededb4c3c925ad2d75c6 | /proyectoWeb/servicios/urls.py | cf4fafb9a238a2f02e0959892a9f3e5b697263ec | [] | no_license | atziripe/GestionPedidos | 4b212796f5bbc704c6dbf42a4fcea18e1eb4435a | 7522ce7ed8407c6e70938f7cd24a0ae92ea5fbfc | refs/heads/main | 2023-06-17T13:31:25.227177 | 2021-07-09T00:53:33 | 2021-07-09T00:53:33 | 348,610,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.servicios, name="Servicios"),
]
| [
"atziripg.99@gmail.com"
] | atziripg.99@gmail.com |
5bdd6cdb881c2c468c9c6d67ac53d6fa8cb63ced | 7531ccd6d19ada54a11c69b721a352500a4220f0 | /_commands/_CreateDirectory.py | 50fb1206e5e72edaf082dae022c8819f45b56fd6 | [] | no_license | pallavigusain92/FTP | 27d234e5d799c02ef67bdb1189b3a199e8bf1c8e | 68483b49cb8d70d1a7797438164c7cd89d6b4829 | refs/heads/master | 2020-04-26T00:08:03.095976 | 2019-02-28T19:15:33 | 2019-02-28T19:15:33 | 173,167,722 | 0 | 0 | null | 2019-02-28T18:55:33 | 2019-02-28T18:39:29 | null | UTF-8 | Python | false | false | 314 | py | class Command:
def do_createdirectory(self, input_directory):
"""
Command to create directory with name of new directory
Args:
input_directory (str): Name of directory you want to create
"""
print(input_directory)
response = self._perform_ftp_command('listdir')
print(response)
| [
"noreply@github.com"
] | noreply@github.com |
86f7be2c3bdb7768e0fad6cef2776da4e48735ab | 4639dec8b435074b62c4dbc8ca05d6885cb51d6e | /실습과제2/In[22].py | 8dd6ba012e1b300afb658b5e4eb513abcc605dac | [] | no_license | kimhyeyun/2020-DataMining | 10e2bbde8a8f5575f659e52e4bb17b35a2566b65 | a0c7c7d70158a828072169e13f6d1662aa2e0324 | refs/heads/master | 2021-04-07T13:24:35.317906 | 2020-06-25T01:07:23 | 2020-06-25T01:07:23 | 248,679,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | from pandas import Series
s3 = Series([1.2,2.5,-2.2,3.1,-0.8,-3.2],
index = ['Jan 1','Jan 2','Jan 3','Jan 4','Jan 5','Jan 6',])
print(s3 + 4) #applying scalar operation on a numeric Series
print(s3 / 4)
| [
"cqqwer@gmail.com"
] | cqqwer@gmail.com |
1093f8dcf00af1185853fa3a81e1e25a43a616dc | 4842612668bd2378dbc2b38f922616688892ace0 | /Django/env/FTM/views.py | 7793b68bd4379d46ebe531941083e100cdbef630 | [] | no_license | CoupDgrace/FamilyProjectTracker | 1b04d697dac19dff8c3ba13e07cd409c9457c6e3 | 1a3621fe459c9c93ebfcba57eaafb08d29435a87 | refs/heads/main | 2023-07-13T02:12:49.369827 | 2021-08-30T04:34:28 | 2021-08-30T04:34:28 | 360,789,760 | 0 | 0 | null | 2021-08-30T04:34:29 | 2021-04-23T06:47:50 | TSQL | UTF-8 | Python | false | false | 1,275 | py | from django.shortcuts import render
from django.http import HttpResponse
from .models import *
from django.db.models import Q
# Create your views here.
# Home Page
def home(request):
nTicks = Tasks.objects.filter(assignedMember = 1)
kTicks = Tasks.objects.filter(assignedMember = 2)
gTicks = Tasks.objects.filter(assignedMember = 3)
eTicks = Tasks.objects.filter(assignedMember = 4)
tTicks = Tasks.objects.filter(assignedMember = 5)
mTicks = Tasks.objects.filter(assignedMember = 6)
backTicks = Tasks.objects.filter(Q(taskStatus='backlog') | Q(taskStatus='Backlog'))
doneTicks = Tasks.objects.filter(Q(taskStatus='Complete') | Q(taskStatus='complete'))
waitTicks = Tasks.objects.filter(taskStatus='waiting')
return render (
request,
'FTM/HTML/BanksBoardIndex.html',
{'nTicks': nTicks, 'backTicks':backTicks, 'kTicks':kTicks, 'gTicks':gTicks, 'eTicks':eTicks, 'tTicks':tTicks, 'mTicks':mTicks, 'doneTicks':doneTicks, 'waitTicks':waitTicks},
)
'''def admin(request):
return render (
request,
'admin/',
)'''
# # # A few forms # # #
# Members Form
'''
def Members(request):
memForm = MembersForm()
return render(request,'FTM/HTML/membersForm.html',{'form':form})
''' | [
"nathan.banksd@gmail.com"
] | nathan.banksd@gmail.com |
634e1c3d02aa18881bd9b4af1f650f111c07e4c7 | 9db4cf293323d83c02aa3846e172242ce3ded550 | /qa/pull-tester/rpc-tests.py | ec86971a09eedebd51c8dc16e6d568722ede2eb9 | [
"MIT"
] | permissive | mtx-coin/Matrix-Blockchain | bd0495038146cd639db655787edfa5f406806ef9 | cb8c2c4c7ec5ee911c4f43a718785241cd4ceafe | refs/heads/master | 2020-06-02T15:19:01.450287 | 2019-06-10T17:13:12 | 2019-06-10T17:13:12 | 191,206,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,801 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("WARNING: \"import zmq\" failed. Setting ENABLE_ZMQ=0. " \
"To run zmq tests, see dependency info in /qa/README.md.")
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passon string
opts = set()
passon_args = []
PASSON_REGEX = re.compile("^--")
PARALLEL_REGEX = re.compile('^-parallel=')
print_help = False
run_parallel = 4
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h" or arg == "-?":
print_help = True
break
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif PASSON_REGEX.match(arg):
passon_args.append(arg)
elif PARALLEL_REGEX.match(arg):
run_parallel = int(arg.split(sep='=', maxsplit=1)[1])
else:
opts.add(arg)
#Set env vars
if "MTXD" not in os.environ:
os.environ["MTXD"] = BUILDDIR + '/src/matrixd' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
"to run zmq tests, see dependency info in /qa/README.md.")
# ENABLE_ZMQ=0
raise
testScripts = [
# longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py', # NOTE: needs matrix_hash to pass
'fundrawtransaction.py',
'fundrawtransaction-hd.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py', # NOTE: needs matrix_hash to pass
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'nodehandling.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'keypool.py',
'keypool-hd.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs matrix_hash to pass
'invalidtxrequest.py', # NOTE: needs matrix_hash to pass
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'p2p-compactblocks.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
# 'pruning.py', # Prune mode is incompatible with -txindex.
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs matrix_hash to pass
'bipdersig-p2p.py', # NOTE: needs matrix_hash to pass
'bipdersig.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs matrix_hash to pass
# 'replace-by-fee.py', # RBF is disabled in MATRIX Core
]
def runtests():
test_list = []
if '-extended' in opts:
test_list = testScripts + testScriptsExt
elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts):
test_list = testScripts
else:
for t in testScripts + testScriptsExt:
if t in opts or re.sub(".py$", "", t) in opts:
test_list.append(t)
if print_help:
# Only print help of the first script and exit
subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h'])
sys.exit(0)
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
flags.append("--cachedir=%s/qa/cache" % BUILDDIR)
if coverage:
flags.append(coverage.flag)
if len(test_list) > 1 and run_parallel > 1:
# Populate cache
subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags)
#Run Tests
max_len_name = len(max(test_list, key=len))
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(run_parallel, test_list, flags)
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
all_passed = True
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print('' if passed else stdout + '\n', end='')
print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| [
"noreply@github.com"
] | noreply@github.com |
ad62f9feb1c07f0e0d3a9a0db8defb334439b636 | a2fdcd5252741bdd3ad96f20944c07d80bd57dc8 | /class_sample.py | ca23e1669eeab4e7a15a44c5a304dc1c92735155 | [] | no_license | chaossky/Python2019 | 7cd11aab7cecf23acb42b7635f8bfb506555c856 | fd63563f6a175a6aef1f3248aefb3f754f6658e1 | refs/heads/master | 2021-07-31T09:15:14.430835 | 2019-08-16T12:13:45 | 2019-08-16T12:13:45 | 200,347,544 | 0 | 0 | null | 2019-08-05T21:54:10 | 2019-08-03T07:43:34 | Python | UTF-8 | Python | false | false | 365 | py | class Ball:
color=""
speed=0
def setSpeed(self,value):
self.speed=value
ball01=Ball()
ball01.color="Red"
ball01.setSpeed(10)
ball02=Ball()
ball02.color="Blue"
ball02.setSpeed(20)
print("Ball01 color:%s" %ball01.color)
print("Ball01 speed:%s" %ball01.speed)
print("Ball02 color:%s" %ball02.color)
print("Ball02 speed:%s" %ball02.speed)
| [
"user@email.mail"
] | user@email.mail |
1d3c6228044c40a6619b0f83a3379c3346d6e98c | 4e265daafbfd97c84029dff9f15f52962c26b1fa | /Week_01/week_01.py | 84481dd7bbb74d55ca307ca3cc080f036b16cc60 | [] | no_license | AI-Candy-Yang/algorithm017 | a536e3afb51de2df54795ab3468b5e140ba67f92 | 855eef0aa006c14ade8413cf03615ff99f229923 | refs/heads/master | 2023-01-07T00:05:09.054946 | 2020-10-28T04:13:37 | 2020-10-28T04:13:37 | 296,322,117 | 0 | 0 | null | 2020-09-17T12:38:52 | 2020-09-17T12:38:51 | null | UTF-8 | Python | false | false | 12,155 | py | #-*- encoding:utf-8 -*-
#作业
#1.删除排序数组中的重复项,返回去重后的数组长度
def removeDuplicates(nums):
# 双指针 快指针指向不重复的元素
low = 0
for fast in range(len(nums)):
if (nums[fast] != nums[low]):
low += 1
nums[low] = nums[fast]
return low + 1
#2.旋转数组
def rotate(nums, k):
"""
Do not return anything, modify nums in-place instead.
"""
#解法一:将数组切分成两部分,前面的n-k和后面的k个元素
#首先将两个子数组进行反转[4,3,2,1,7,6,5]
#然后将整体进行反转 [5,6,7,1,2,3,4]
# def reverse(l,r):
# while l < r:
# nums[l],nums[r] = nums[r],nums[l]
# l += 1
# r -= 1
# k = k % len(nums)
# reverse(0,len(nums)-k-1)
# reverse(len(nums)-k,len(nums)-1)
# reverse(0,len(nums)-1)
#解法二:使用额外的数组
n = len(nums)
a = [0] * n
for i in range(n):
a[(i+k)%n] = nums[i]
for i in range(n):
nums[i] = a[i]
#3.合并两个有序链表
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
#递归
# if not l1:
# return l2
# if not l2:
# return l1
# if l1.val < l2.val:
# l1.next = mergeTwoLists(l1.next,l2)
# return l1
# else:
# l2.next = mergeTwoLists(l1,l2.next)
# return l2
#迭代
prehead = ListNode(-1)
pre = prehead
while l1 and l2:
if l1.val < l2.val:
pre.next = l1
l1 = l1.next
else:
pre.next = l2
l2 = l2.next
pre = pre.next
#将未被合并的直接添加到末尾
pre.next = l1 if l1 is not None else l2
return prehead.next
#4.合并两个有序列表
def merge(nums1, m, nums2, n):
#解法一,从前往后开始比较,申请一个新的数组空间,时间复杂度为O(n+m) 空间复杂度为O(m)
#获取nums1有效的元素
nums1_copy = nums1[:m]
#将nums1数组赋值为空数组
nums1[:] = []
while (len(nums1_copy) > 0) and (len(nums2) > 0):
if (nums1_copy[0] < nums2[0]):
nums1.append(nums1_copy.pop(0))
else:
nums1.append(nums2.pop(0))
#将两个数组未比较完的元素添加进来
nums1.extend(nums1_copy)
nums1.extend(nums2)
#解法二:从后往前开始比较,不需要申请额外的内存空间 时间复杂度为O(m+n) 空间复杂度为O(1)
p1 = m - 1
p2 = n - 1
p = m + n - 1
while (p1 > 0) and (p2 > 0):
if nums1[p1] < nums2[p2]:
nums1[p] = nums2[p2]
p2 -= 1
else:
nums1[p] = nums1[p1]
p1 -= 1
p -= 1
#将最后多的元素加到前面,只可能有一个数组不为空,如果是第一个不为空的话,num1不需要改变
nums1[:p2+1] = nums2[:p2+1]
#5.两数之和
def twoSum(nums, target):
if len(nums) < 2:
return []
n = len(nums)
# 排序加双指针
nums.sort()
l = 0
r = n - 1
while l < r:
sum1 = nums[l] + nums[r]
if sum1 < target:
l += 1
elif sum1 > target:
r -= 1
else:
return [l,r]
#6.移动零
def moveZeroes(nums):
#解法一:快慢指针解法,快指针用于查找非零元素,慢指针用于记录非零元素存放的位置
# low = 0
# for fast in range(len(nums)):
# if (nums[fast] != 0):
# nums[low] = nums[fast]
# if (fast != low):
# nums[fast] = 0
# low += 1
#解法二:当快指针指向非零元素的时候直接蒋欢快慢指针的位置,然后同步前进
low = 0
for fast in range(len(nums)):
if (nums[fast] != 0):
nums[low],nums[fast] = nums[fast],nums[low]
low += 1
print(nums)
#7.加一
def plusOne(digits):
n = len(digits)
for i in range(n-1,-1,-1):
digits[i] += 1
digits[i] %= 10
#如果不等于0,则说明这个位置原来不为9,直接返回即可,如果等于0,则直接移到前一位加1,继续判断
if digits[i] != 0:
return digits
#如果都没有返回,表示所有的位置加1后都变成了0,则直接在首尾加1
digits.insert(0, 1)
return digits
#8.设计循环双端队列
#9.接雨水
def trap(height):
#和柱状图中最大的矩形比较像 遍历柱子的高度,找出左右边界
#解法一:对每个元素,找出下雨后谁能达到的最大高度(左右两边最大高度的最小值减去当前高度值),最后将所有的高度相加
#时间复杂度为O(n^2) 空间复杂度为O(1)
# res = 0
# for i in range(len(height)):
# max_left = 0
# max_right = 0
# #分别从当前元素向左和向右查找
# for j in range(i,-1,-1):
# max_left = max(max_left,height[j])
#
# #向右遍历
# for j in range(i,len(height)):
# max_right = max(max_right,height[j])
#
# #加上每个元素可以接雨水的高度
# res += min(max_left,max_right) - height[i]
# return res
#解法二:提前存储每个位置可以看到的左边和右边的最大值
# if len(height) <= 1:
# return 0
#
# res = 0
# n = len(height)
# left_max = [0]*n
# right_max = [0]*n
#
#
# #从左往右遍历记录每个位置的最大值
# left_max[0] = height[0]
# for i in range(n):
# left_max[i] = max(height[i],left_max[i-1])
#
# #从右往左记录每个位置的最大值
# right_max[n-1] = height[n-1]
# for i in range(n-2,-1,-1):
# right_max[i] = max(height[i],right_max[i+1])
#
# #结合每个位置的左边的最大值和右边的最大值计算
# for i in range(n):
# res += min(left_max[i],right_max[i]) - height[i]
#
# return res
#解法三:按照柱状图中最大的矩形面积使用单调栈来找出每个位置的左右边界,栈底到栈顶由大变小,维持一个单调递减的栈
#当前元素比栈顶元素大 栈顶元素出栈
#当前元素比栈顶元素小 则继续入栈
n = len(height)
if n < 3:
return 0
res,idx = 0,0
stack = []
while idx < n:
while len(stack) > 0 and height[idx] > height[stack[-1]]:
top = stack.pop()
if len(stack) == 0:
break
#高度为左边界高度和右边界高度最小值-当前元素高度
h = min(height[stack[-1]],height[idx])-height[top]
#间距
dist = idx - stack[-1] - 1
res += (dist * h)
stack.append(idx)
idx += 1
return res
#练习
#2.盛水的最大容器面积
#双指针左右移动找出最大的面积,最大面积纸盒左右两个柱子的高度有关
def maxArea(heights):
l,r = 0,len(heights)-1
area = 0
while (l < r):
if heights[l] < heights[r]:
area = max(area,(r-l)*heights(l))
l += 1
else:
area = max(area,(r-l)*heights[r])
r -= 1
return area
#3.爬楼梯
def climbStairs(n):
if n <= 3:
return n
x,y,z = 1,2,3
for i in range(4,n+1):
x,y,z = y,z,y+z
return z
#4.三数之和
#从头到尾遍历表示其中一个数
def threeSum(nums):
#排序+遍历+双指针
if len(nums) < 3:
return []
res = []
for i in range(len(nums)-2):
if (i > 0) and (nums[i] == nums[i-1]):
continue
l = i + 1
r = len(nums) - 1
while (l < r):
sum1 = nums[i] + nums[l] + nums[r]
if sum1 == 0:
res.append([nums[i],nums[l],nums[r]])
while (l < r) and (nums[l] == nums[l+1]):
l += 1
while (l < r) and (nums[r] == nums[r-1]):
r -= 1
l += 1
r -= 1
elif sum1 > 0:
r -= 1
else:
l -= 1
return res
#5.反转链表
def reverseList(self, head: ListNode) -> ListNode:
#初始化前一个节点
pre = None
#初始化当前节点
cur = head
while cur:
#初始化临时变量保存当前节点的下一节点
tmp = cur.next
#将当前节点指向前一个节点
cur.next = pre
#向后移动前一个节点和当前节点
pre = cur
cur = tmp
#返回头节点 pre
return pre
#6.两两交换链表中的节点
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
if (not head) or (not head.next):
return head
#初始化下个节点
next = head.next
#将当前节点指向下下个节点进行交换后的子链表
head.next = self.swapPairs(next.next)
#下个节点指向当前节点
next.next = head
return next
#7.环形链表
def hasCycle(self, head: ListNode) -> bool:
#设置快慢指针 快指针每次走两步,慢指针每次走一步,如果快慢指针相遇,则表示有环
if (not head) or (not head.next):
return False
low = fast = head
while fast and fast.next:
low = low.next
fast = fast.next.next
if low is fast:
return True
else:
return False
#8.K个一组翻转链表
#9.有效的括号
def isValid(s):
#使用栈,左括号入栈,判断和栈顶元素能否抵消
if len(s) % 2 != 0:
return False
pairs = {"]":"[",")":"(","}":"{"}
stack = []
for ch in s:
#如果当前括号是右括号
if ch in pairs.keys():
#栈顶元素和当前括号不匹配
if not stack or stack[-1] != pairs[ch]:
return False
#匹配的情况,直接将栈顶元素出栈
stack.pop()
else: #当前括号是左括号,则入栈
stack.append(ch)
return not stack
#10.柱状图中最大的矩形面积
def largestRectangleArea(heights):
#解法一 遍历高度,分别找出每根柱子的左右边界
# res = 0
# n = len(heights)
# for i in range(n):
# left_i = i
# right_i = i
# while left_i > 0 and heights[i] <= heights[left_i]:
# left_i -= 1
# while right_i < n and heights[i] <= heights[right_i]:
# right_i += 1
# res = max(res,(right_i-left_i-i)*heights[i])
# return res
#解法二:使用递增的栈来获取每个元素的左右边界
heights = [0] + heights + [0]
res = 0
stack = []
for i in range(len(heights)):
#栈里面存放的是每个元素的索引
while stack and heights[stack[-1]] > heights[i]:
#记录栈顶元素的索引
tmp = stack.pop()
#根据左右边界计算对应的面积
res = max(res,(i-stack[-1]-1)*tmp)
#如果大于栈顶元素则直接入栈
stack.append(i)
return res
import collections
#11.滑动窗口最大值
def maxSlidingWindow(nums,k):
#采用双端队列
if len(nums) < 2:
return nums
queue = collections.deque()
res = []
for i in range(len(nums)):
#将元素加入双端队列,保证从大到小排序,新加入的如果比队尾元素大,则删除队尾元素,加入新得元素
while queue and nums[queue[-1]] < nums[i]:
queue.pop()
#当队列为空,或加入的元素比队尾元素小,则直接加入队列
queue.append(i)
#判断队首是否在窗口内部
if queue[0] <= i - k:
queue.popleft()
#当窗口长度为k时加入队首元素到结果列表
if i + 1 >= k:
res.append(nums[queue[0]])
return res
if __name__ =='__main__':
# moveZeroes([0,1,0,3,12])
print(twoSum([2, 7, 11, 15],9)) | [
"yangshuang@sinandata.com"
] | yangshuang@sinandata.com |
fce40fcdd08dbb6ed7e3d1ea9d199b10684218c0 | c36bbbafedbf84ff39111b501a0f555e15ad7246 | /20200509python/20200321python/welcome.py | abdcbbe6c53a77ec1676b46488f7a65f23c6cb3d | [] | no_license | JoltDJ/mysite-3-14-20 | 996e815b6e6702b2a43aacb974139daa4ddec137 | 93c78371ff6a62a5134bf6a21689edec8209b408 | refs/heads/master | 2021-03-20T08:35:44.864754 | 2020-05-16T01:45:41 | 2020-05-16T01:45:41 | 247,193,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from jinja2 import Template
def welcomeHTML():
with open('welcome.html') as f:
s = f.read()
return s
def main():
user1 = { 'name': 'Alice', 'likes': 123}
user2 = { 'name': 'Jimmy', 'likes': 1234}
tmpl = Template(welcomeHTML())
print(tmpl.render({ 'user': user1}))
print(tmpl.render({ 'user': user2}))
main() | [
"438043@ibsh.tw"
] | 438043@ibsh.tw |
b3659978c254246c6d5f9ff0bb961a8029d82c3e | 30e2a85fc560165a16813b0486a862317c7a486a | /tensorflow/test/misc/graph.py | f141c134d6cf1435e3b25c0f9515954553e7ee26 | [] | no_license | muryliang/python_prac | 2f65b6fdb86c3b3a44f0c6452a154cd497eb2d01 | 0301e8f523a2e31e417fd99a968ad8414e9a1e08 | refs/heads/master | 2021-01-21T11:03:48.397178 | 2017-09-18T04:13:27 | 2017-09-18T04:13:27 | 68,801,688 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | import tensorflow as tf
g1 = tf.Graph()
with g1.as_default():
v = tf.get_variable(
"v", initializer=tf.zeros_initializer(shape=[1]))
g2 = tf.Graph()
with g2.as_default():
v= tf.get_variable(
"v", initializer=tf.ones_initializer(shape=[1]))
with tf.Session(graph=g1) as sess:
tf.initialize_all_variables().run()
with tf.variable_scope("", reuse=True):
print sess.run(tf.get_variable("v")
| [
"muryliang@gmail.com"
] | muryliang@gmail.com |
166d918b5aab10d1f189e7aef448ea5254188780 | 82859a8effe9420e53dee38f0c69a31cdae4355e | /prologlib/ui.py | 0be5cc77ef533c15f7682f656c2bece153dbb73f | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | gpiancastelli/prologlib | 1b0bf32383624e88d33ac5b48973231b95c0b267 | a9911ffb4e3ea2b0f958bd7a8b158b9870bd2975 | refs/heads/master | 2021-04-13T14:09:15.099263 | 2010-05-14T15:52:28 | 2010-05-14T15:52:28 | 249,167,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,630 | py | import errno
import sys
from .core import Engine, PrologError
from .parser import PrologParser, InvalidTermException
__version__ = 'UNRELEASED'
__date__ = '2010-05-14'
header = """
prologlib {} ({})
Type 'help.' for help, 'halt.' to exit.
""".lstrip().format(__version__, __date__)
HELP_TEXT = """
Welcome to prologlib, an ISO Prolog processor written in Python 3.
Type a goal at the '?-' prompt, press Return to issue it to the Prolog
processor.
If the goal fails, you will get 'no.' as an answer.
If the goal succeeds, you will get a valid substitution in resposnse (or a
blank line if the substitution is empty), followed by a '?' prompt that you can
use to ask for further solutions to the goal. Type ';' (a semicolon) followed
by Return if you want another solution, just press Return if you are satisfied
with what you already got. A solution is followed by 'yes.' as an answer.
Ask help for any ISO builtin predicate by using help/1, e.g. help(findall/3).
Please note that if the predicate functor is also an operator, you have to
enclose it in parentheses, e.g. help((is)/2).
To exit the interactive toplevel, type 'halt.' and press Return.
""".strip()
class Console:
def __init__(self, stdin=sys.stdin, stdout=sys.stdout):
self.stdin = stdin
self.stdout = stdout
self.engine = Engine()
def solveloop(self):
self.write(header)
stop = None
while not stop:
self.stdout.write('?- ')
self.stdout.flush()
line = self.stdin.readline().strip()
if not line:
continue
try:
goal = self.read_term(line)
if hasattr(self, 'do_' + goal.name):
cmd = getattr(self, 'do_' + goal.name)
stop = cmd(goal)
if not stop:
self.write('yes.')
else:
# TODO Resolve the line/goal ambiguity
self.solve(line) # yeah, should be goal
except InvalidTermException as ite:
self.write('SyntaxError: {}'.format(ite))
except IOError as ioe:
if ioe.errno == errno.ENOENT:
message = str(ioe)
name = message[message.rfind(':')+2:]
self.write('Error: the file {} cannot be found'.format(name))
else:
raise
def read_term(self, line):
parser = PrologParser(line)
return parser.read_term()
def solve(self, goal):
try:
result = self.engine.solve(goal)
if result:
self.write('')
subst = self.engine.currsubst()
for variable in sorted(subst):
if not variable.startswith('_'):
self.write(variable + ' = ' + subst[variable]._touiform())
#if self.engine.haschoicepoint():
if len(self.engine._s) > 1:
self.solvenextloop()
else:
self.write('yes.')
else:
self.write('no.')
except PrologError as e:
self.write('Error: {0}'.format(e.error_term()))
def solvenextloop(self):
#while self.engine.haschoicepoint()
while len(self.engine._s) > 1:
self.stdout.write(' ? ')
self.stdout.flush()
line = self.stdin.readline().strip()
if not line:
self.write('yes.')
break
elif line == ';':
result = self.engine.solve_next()
if result:
self.write('')
subst = self.engine.currsubst()
for variable in sorted(subst):
if not variable.startswith('_'):
self.write(variable + ' = ' + subst[variable]._touiform())
else:
self.write('no.')
else:
self.write('Type ; and press Return to ask for another solution,')
self.write('or just press Return to accept the current solution.')
def write(self, message):
message = '{0}\n'.format(message)
self.stdout.write(message)
def do_clear(self, goal):
self.engine._clear()
def do_consult(self, goal):
f = goal.value[1].value
with open(f) as theory:
self.engine._consult(theory)
def do_listing(self, goal):
for procedure in self.engine._kb:
self.write(str(procedure))
def do_help(self, goal):
if goal.arity == 0:
self.write(HELP_TEXT)
else:
self.help(goal.value[1])
def help(self, indicator):
from .builtin import search_builtin
term = mock_term(*indicator.value[1:])
procedure = search_builtin(term)
if procedure:
self.write(procedure.__doc__)
else:
pi = '{}/{}'.format(*indicator.value[1:])
self.write('No built-in predicate known with indicator: ' + pi)
def mock_term(name, arity):
'''Create a fake term to use as a key for searching in the set of
available builtin predicates, so as to retrieve the documentation'''
from .parser import Compound, Variable
t = tuple(Variable('_') for i in range(arity.value))
term = Compound(name.name, *t)
return term
| [
"giulio.piancastelli@gmail.com"
] | giulio.piancastelli@gmail.com |
3df9670854119be5855f85168905dd7833a5b034 | e0532dba57ff8455b7a95fb7b64e80a9bfa249a9 | /SplitGuys/wsgi.py | 6c755fbbd79ebbbb9823d6293c2d7a06c9dead50 | [] | no_license | Rozhiin/SplitGuys | a98455a96f6d938d82cfc4a2bb3d4ff031caa632 | f333d8b14a59c41c0706779e27bb16a9f883d57b | refs/heads/master | 2020-05-29T21:10:29.192878 | 2019-07-13T19:17:24 | 2019-07-13T19:17:24 | 189,373,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for SplitGuys project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SplitGuys.settings')
application = get_wsgi_application()
| [
"salarkiama@gmail.com"
] | salarkiama@gmail.com |
bd95c1d7fb41edc644b0ff48474088e1cc624552 | ee622ac8bb8cb695056e53f6bec2a2a7ef001de2 | /detection/configs/anti-uav/faster_rcnn_r50_fpn_2x_anti-uav-full.py | 734e5885ef64d3081104e939af2dfcc527f4029c | [] | no_license | maloo135/UAVDetectionTrackingBenchmark | faee6f0f0e628f921e40d4f2040e2093ae2006f0 | 9ad8bdeaedc5a8c2cc7633bf481c98c903bfacf1 | refs/heads/main | 2023-07-08T10:16:38.806317 | 2021-08-10T11:14:27 | 2021-08-10T11:14:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py',
'../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(
num_classes=1,)))
checkpoint_config = dict(interval=3)
classes = ('drone',)
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
img_prefix='data/anti-uav/images/',
classes=classes,
ann_file='data/anti-uav/train-full.json'),
val=dict(
img_prefix='data/anti-uav/images/',
classes=classes,
ann_file='data/anti-uav/val-full.json'),
test=dict(
img_prefix='data/anti-uav/images/',
classes=classes,
ann_file='data/anti-uav/val-full.json'))
load_from = 'checkpoints/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth'
optimizer = dict(lr=0.001) | [
"brian.k.isaac-medina@durham.ac.uk"
] | brian.k.isaac-medina@durham.ac.uk |
2d28abd02b655286a0b2762b8b7f33ce1e3ce5c8 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/u19.py | 44f8d26c2e4a9b7cdc3d42f29e4fe1307c540b0c | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'u19':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
445c2230f975dd0e1e6f4f7c980b766500609f3a | 6c37d1d2437a08e43b13d621d4a8da4da7135b3a | /yt_dlp/extractor/mirrativ.py | 0a8ee0c3a52eeff28f2d9e679e0ae5913bc34970 | [
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] | permissive | yt-dlp/yt-dlp | be040bde10cc40258c879c75ab30215686352824 | d3d81cc98f554d0adb87d24bfd6fabaaa803944d | refs/heads/master | 2023-09-05T21:15:21.050538 | 2023-09-05T20:35:23 | 2023-09-05T20:35:23 | 307,260,205 | 52,742 | 5,376 | Unlicense | 2023-09-14T05:22:08 | 2020-10-26T04:22:55 | Python | UTF-8 | Python | false | false | 4,880 | py | from .common import InfoExtractor
from ..utils import (
ExtractorError,
dict_get,
traverse_obj,
try_get,
)
class MirrativBaseIE(InfoExtractor):
def assert_error(self, response):
error_message = traverse_obj(response, ('status', 'error'))
if error_message:
raise ExtractorError('Mirrativ says: %s' % error_message, expected=True)
class MirrativIE(MirrativBaseIE):
IE_NAME = 'mirrativ'
_VALID_URL = r'https?://(?:www\.)?mirrativ\.com/live/(?P<id>[^/?#&]+)'
TESTS = [{
'url': 'https://mirrativ.com/live/UQomuS7EMgHoxRHjEhNiHw',
'info_dict': {
'id': 'UQomuS7EMgHoxRHjEhNiHw',
'title': 'ねむいぃ、。『参加型』🔰jcが初めてやるCOD✨初見さん大歓迎💗',
'is_live': True,
'description': 'md5:bfcd8f77f2fab24c3c672e5620f3f16e',
'thumbnail': r're:https?://.+',
'uploader': '# あ ち ゅ 。💡',
'uploader_id': '118572165',
'duration': None,
'view_count': 1241,
'release_timestamp': 1646229192,
'timestamp': 1646229167,
'was_live': False,
},
'skip': 'livestream',
}, {
'url': 'https://mirrativ.com/live/POxyuG1KmW2982lqlDTuPw',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage('https://www.mirrativ.com/live/%s' % video_id, video_id)
live_response = self._download_json(f'https://www.mirrativ.com/api/live/live?live_id={video_id}', video_id)
self.assert_error(live_response)
hls_url = dict_get(live_response, ('archive_url_hls', 'streaming_url_hls'))
is_live = bool(live_response.get('is_live'))
if not hls_url:
raise ExtractorError('Neither archive nor live is available.', expected=True)
formats = self._extract_m3u8_formats(
hls_url, video_id,
ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls', live=is_live)
return {
'id': video_id,
'title': self._og_search_title(webpage, default=None) or self._search_regex(
r'<title>\s*(.+?) - Mirrativ\s*</title>', webpage) or live_response.get('title'),
'is_live': is_live,
'description': live_response.get('description'),
'formats': formats,
'thumbnail': live_response.get('image_url'),
'uploader': traverse_obj(live_response, ('owner', 'name')),
'uploader_id': traverse_obj(live_response, ('owner', 'user_id')),
'duration': try_get(live_response, lambda x: x['ended_at'] - x['started_at']) if not is_live else None,
'view_count': live_response.get('total_viewer_num'),
'release_timestamp': live_response.get('started_at'),
'timestamp': live_response.get('created_at'),
'was_live': bool(live_response.get('is_archive')),
}
class MirrativUserIE(MirrativBaseIE):
IE_NAME = 'mirrativ:user'
_VALID_URL = r'https?://(?:www\.)?mirrativ\.com/user/(?P<id>\d+)'
_TESTS = [{
# Live archive is available up to 3 days
# see: https://helpfeel.com/mirrativ/%E9%8C%B2%E7%94%BB-5e26d3ad7b59ef0017fb49ac (Japanese)
'url': 'https://www.mirrativ.com/user/110943130',
'note': 'multiple archives available',
'only_matching': True,
}]
def _entries(self, user_id):
page = 1
while page is not None:
api_response = self._download_json(
f'https://www.mirrativ.com/api/live/live_history?user_id={user_id}&page={page}', user_id,
note=f'Downloading page {page}')
self.assert_error(api_response)
lives = api_response.get('lives')
if not lives:
break
for live in lives:
if not live.get('is_archive') and not live.get('is_live'):
# neither archive nor live is available, so skip it
# or the service will ban your IP address for a while
continue
live_id = live.get('live_id')
url = 'https://www.mirrativ.com/live/%s' % live_id
yield self.url_result(url, video_id=live_id, video_title=live.get('title'))
page = api_response.get('next_page')
def _real_extract(self, url):
user_id = self._match_id(url)
user_info = self._download_json(
f'https://www.mirrativ.com/api/user/profile?user_id={user_id}', user_id,
note='Downloading user info', fatal=False)
self.assert_error(user_info)
return self.playlist_result(
self._entries(user_id), user_id,
user_info.get('name'), user_info.get('description'))
| [
"noreply@github.com"
] | noreply@github.com |
fb12706820982b795c59f849b48717b13ef46c7d | 7f2d9dd3a65cd0f040b21ce40894eb2edfb40f11 | /train_hr.py | f5c9b27dddf46580e37113891917ae0cb6922dab | [
"MIT"
] | permissive | jinxu06/pixel-cnn | 3546e5ea071ac764bcc8129fbf68bba296b57364 | 9cad98f3f801bd772815dbb403fb6649ff704dfa | refs/heads/master | 2021-09-06T13:50:08.190072 | 2018-02-07T05:38:26 | 2018-02-07T05:38:26 | 109,583,064 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,118 | py | """
Trains a Pixel-CNN++ generative model on CIFAR-10 or Tiny ImageNet data.
Uses multiple GPUs, indicated by the flag --nr-gpu
Example usage:
CUDA_VISIBLE_DEVICES=0,1,2,3 python train_double_cnn.py --nr_gpu 4
"""
import os
import sys
import time
import json
import argparse
import numpy as np
import tensorflow as tf
import pixel_cnn_pp.nn as nn
import pixel_cnn_pp.mask as mk
import pixel_cnn_pp.plotting as plotting
from pixel_cnn_pp.model_hr import model_spec
import data.cifar10_data as cifar10_data
import data.imagenet_data as imagenet_data
import data.celeba_data_hr as celeba_data #!!!!!!
import data.svhn_data as svhn_data
from utils import parse_args
from configs import configs
# -----------------------------------------------------------------------------
parser = argparse.ArgumentParser()
# data I/O
parser.add_argument('-i', '--data_dir', type=str,
default='/tmp/pxpp/data', help='Location for the dataset')
parser.add_argument('-o', '--save_dir', type=str, default='/tmp/pxpp/save',
help='Location for parameter checkpoints and samples')
parser.add_argument('-d', '--data_set', type=str,
default='cifar', help='Can be either cifar|imagenet')
parser.add_argument('-t', '--save_interval', type=int, default=20,
help='Every how many epochs to write checkpoint/samples?')
parser.add_argument('-r', '--load_params', dest='load_params', action='store_true',
help='Restore training from previous model checkpoint?')
# model
parser.add_argument('-q', '--nr_resnet', type=int, default=5,
help='Number of residual blocks per stage of the model')
parser.add_argument('-n', '--nr_filters', type=int, default=160,
help='Number of filters to use across the model. Higher = larger model.')
parser.add_argument('-m', '--nr_logistic_mix', type=int, default=10,
help='Number of logistic components in the mixture. Higher = more flexible model')
parser.add_argument('-z', '--resnet_nonlinearity', type=str, default='concat_elu',
help='Which nonlinearity to use in the ResNet layers. One of "concat_elu", "elu", "relu" ')
parser.add_argument('-c', '--class_conditional', dest='class_conditional',
action='store_true', help='Condition generative model on labels?')
# optimization
parser.add_argument('-l', '--learning_rate', type=float,
default=0.001, help='Base learning rate')
parser.add_argument('-e', '--lr_decay', type=float, default=0.999995,
help='Learning rate decay, applied every step of the optimization')
parser.add_argument('-b', '--batch_size', type=int, default=12,
help='Batch size during training per GPU')
parser.add_argument('-a', '--init_batch_size', type=int, default=100,
help='How much data to use for data-dependent initialization.')
parser.add_argument('-p', '--dropout_p', type=float, default=0.5,
help='Dropout strength (i.e. 1 - keep_prob). 0 = No dropout, higher = more dropout.')
parser.add_argument('-x', '--max_epochs', type=int,
default=5000, help='How many epochs to run in total?')
parser.add_argument('-g', '--nr_gpu', type=int, default=8,
help='How many GPUs to distribute the training across?')
# evaluation
parser.add_argument('--polyak_decay', type=float, default=0.9995,
help='Exponential decay rate of the sum of previous model iterates during Polyak averaging')
# reproducibility
parser.add_argument('-s', '--seed', type=int, default=1,
help='Random seed to use')
parser.add_argument('-k', '--masked', dest='masked',
action='store_true', help='Randomly mask input images?')
parser.add_argument('-j', '--rot180', dest='rot180',
action='store_true', help='Rot180 the images?')
args = parser.parse_args()
parse_args(args, **configs['celeba-hr-forward'])
args.save_dir = "/data/ziz/jxu/save64-backward-new-20-missing"
args.nr_logistic_mix = 20
args.learning_rate = 0.0005
args.masked = True
args.load_params = True
print('input args:\n', json.dumps(vars(args), indent=4,
separators=(',', ':'))) # pretty print args
# -----------------------------------------------------------------------------
# fix random seed for reproducibility
rng = np.random.RandomState(args.seed)
tf.set_random_seed(args.seed)
# initialize data loaders for train/test splits
if args.data_set == 'imagenet' and args.class_conditional:
raise("We currently don't have labels for the small imagenet data set")
DataLoader = {'cifar': cifar10_data.DataLoader,
'imagenet': imagenet_data.DataLoader,
'celeba': celeba_data.DataLoader,
'svhn': svhn_data.DataLoader}[args.data_set]
train_data = DataLoader(args.data_dir, 'train', args.batch_size * args.nr_gpu,
rng=rng, shuffle=True, return_labels=args.class_conditional)
test_data = DataLoader(args.data_dir, 'valid', args.batch_size *
args.nr_gpu, shuffle=False, return_labels=args.class_conditional)
obs_shape = train_data.get_observation_size() # e.g. a tuple (32,32,3)
assert len(obs_shape) == 3, 'assumed right now'
# data place holders
x_init = tf.placeholder(tf.float32, shape=(args.init_batch_size,) + obs_shape)
xs = [tf.placeholder(tf.float32, shape=(args.batch_size, ) + obs_shape)
for i in range(args.nr_gpu)]
# if the model is class-conditional we'll set up label placeholders +
# one-hot encodings 'h' to condition on
if args.class_conditional:
num_labels = train_data.get_num_labels()
y_init = tf.placeholder(tf.int32, shape=(args.init_batch_size,))
h_init = tf.one_hot(y_init, num_labels)
y_sample = np.split(
np.mod(np.arange(args.batch_size * args.nr_gpu), num_labels), args.nr_gpu)
h_sample = [tf.one_hot(tf.Variable(
y_sample[i], trainable=False), num_labels) for i in range(args.nr_gpu)]
ys = [tf.placeholder(tf.int32, shape=(args.batch_size,))
for i in range(args.nr_gpu)]
hs = [tf.one_hot(ys[i], num_labels) for i in range(args.nr_gpu)]
else:
h_init = None
h_sample = [None] * args.nr_gpu
hs = h_sample
if args.masked:
masks = tf.placeholder(tf.float32, shape=(args.batch_size,) + obs_shape[:-1])
else:
masks = None
# create the model
model_opt = {'nr_resnet': args.nr_resnet, 'nr_filters': args.nr_filters,
'nr_logistic_mix': args.nr_logistic_mix, 'resnet_nonlinearity': args.resnet_nonlinearity}
model = tf.make_template('model', model_spec)
# run once for data dependent initialization of parameters
gen_par = model(x_init, None, h_init, init=True,
dropout_p=args.dropout_p, **model_opt)
# keep track of moving average
all_params = tf.trainable_variables()
ema = tf.train.ExponentialMovingAverage(decay=args.polyak_decay)
maintain_averages_op = tf.group(ema.apply(all_params))
# get loss gradients over multiple GPUs
grads = []
loss_gen = []
loss_gen_test = []
for i in range(args.nr_gpu):
with tf.device('/gpu:%d' % i):
# train
gen_par = model(xs[i], masks, hs[i], ema=None,
dropout_p=args.dropout_p, **model_opt)
loss_gen.append(nn.discretized_mix_logistic_loss(xs[i], gen_par, masks=masks))
# gradients
grads.append(tf.gradients(loss_gen[i], all_params))
# test
gen_par = model(xs[i], masks, hs[i], ema=ema, dropout_p=0., **model_opt)
loss_gen_test.append(nn.discretized_mix_logistic_loss(xs[i], gen_par, masks=masks))
# add losses and gradients together and get training updates
tf_lr = tf.placeholder(tf.float32, shape=[])
with tf.device('/gpu:0'):
for i in range(1, args.nr_gpu):
loss_gen[0] += loss_gen[i]
loss_gen_test[0] += loss_gen_test[i]
for j in range(len(grads[0])):
grads[0][j] += grads[i][j]
# training op
optimizer = tf.group(nn.adam_updates(
all_params, grads[0], lr=tf_lr, mom1=0.95, mom2=0.9995), maintain_averages_op)
# convert loss to bits/dim
bits_per_dim = loss_gen[
0] / (args.nr_gpu * np.log(2.) * np.prod(obs_shape) * args.batch_size)
bits_per_dim_test = loss_gen_test[
0] / (args.nr_gpu * np.log(2.) * np.prod(obs_shape) * args.batch_size)
# sample from the model
new_x_gen = []
for i in range(args.nr_gpu):
with tf.device('/gpu:%d' % i):
gen_par = model(xs[i], None, h_sample[i], ema=ema, dropout_p=0, **model_opt)
new_x_gen.append(nn.sample_from_discretized_mix_logistic(
gen_par, args.nr_logistic_mix))
def sample_from_model(sess):
x_gen = [np.zeros((args.batch_size,) + obs_shape, dtype=np.float32)
for i in range(args.nr_gpu)]
for yi in range(obs_shape[0]):
for xi in range(obs_shape[1]):
new_x_gen_np = sess.run(
new_x_gen, {xs[i]: x_gen[i] for i in range(args.nr_gpu)})
for i in range(args.nr_gpu):
x_gen[i][:, yi, xi, :] = new_x_gen_np[i][:, yi, xi, :]
return np.concatenate(x_gen, axis=0)
# init & save
initializer = tf.global_variables_initializer()
saver = tf.train.Saver()
# turn numpy inputs into feed_dict for use with tensorflow
#mgen = mk.RecMaskGenerator(obs_shape[0], obs_shape[1])
mgen = mk.RectangleInProgressMaskGenerator(obs_shape[0], obs_shape[1])
agen = mk.AllOnesMaskGenerator(obs_shape[0], obs_shape[1])
def make_feed_dict(data, init=False, masks=None, is_test=False):
if type(data) is tuple:
x, y = data
else:
x = data
y = None
if args.rot180:
x = np.rot90(x, 2, (1,2)) #### ROT
# input to pixelCNN is scaled from uint8 [0,255] to float in range [-1,1]
x = np.cast[np.float32]((x - 127.5) / 127.5)
if init:
feed_dict = {x_init: x}
if y is not None:
feed_dict.update({y_init: y})
else:
x = np.split(x, args.nr_gpu)
feed_dict = {xs[i]: x[i] for i in range(args.nr_gpu)}
if masks is not None:
if is_test:
feed_dict[masks] = agen.gen(args.batch_size)
else:
feed_dict[masks] = mgen.gen(args.batch_size)
if y is not None:
y = np.split(y, args.nr_gpu)
feed_dict.update({ys[i]: y[i] for i in range(args.nr_gpu)})
return feed_dict
# //////////// perform training //////////////
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
print('starting training')
test_bpd = []
lr = args.learning_rate
with tf.Session() as sess:
for epoch in range(args.max_epochs):
begin = time.time()
# init
if epoch == 0:
# manually retrieve exactly init_batch_size examples
feed_dict = make_feed_dict(
train_data.next(args.init_batch_size), init=True)
train_data.reset() # rewind the iterator back to 0 to do one full epoch
sess.run(initializer, feed_dict)
print('initializing the model...')
if args.load_params:
ckpt_file = args.save_dir + '/params_' + args.data_set + '.ckpt'
print('restoring parameters from', ckpt_file)
saver.restore(sess, ckpt_file)
# train for one epoch
train_losses = []
for d in train_data:
feed_dict = make_feed_dict(d, masks=masks)
# forward/backward/update model on each gpu
lr *= args.lr_decay
feed_dict.update({tf_lr: lr})
l, _ = sess.run([bits_per_dim, optimizer], feed_dict)
train_losses.append(l)
train_loss_gen = np.mean(train_losses)
# compute likelihood over test data
test_losses = []
for d in test_data:
feed_dict = make_feed_dict(d, masks=masks, is_test=True)
l = sess.run(bits_per_dim_test, feed_dict)
test_losses.append(l)
test_loss_gen = np.mean(test_losses)
test_bpd.append(test_loss_gen)
# log progress to console
print("Iteration %d, time = %ds, train bits_per_dim = %.4f, test bits_per_dim = %.4f" % (
epoch, time.time() - begin, train_loss_gen, test_loss_gen))
sys.stdout.flush()
if epoch % args.save_interval == 0:
# generate samples from the model
sample_x = sample_from_model(sess)
img_tile = plotting.img_tile(sample_x[:int(np.floor(np.sqrt(
args.batch_size * args.nr_gpu))**2)], aspect_ratio=1.0, border_color=1.0, stretch=True)
img = plotting.plot_img(img_tile, title=args.data_set + ' samples')
plotting.plt.savefig(os.path.join(
args.save_dir, '%s_sample%d.png' % (args.data_set, epoch)))
plotting.plt.close('all')
# save params
saver.save(sess, args.save_dir + '/params_' +
args.data_set + '.ckpt')
np.savez(args.save_dir + '/test_bpd_' + args.data_set +
'.npz', test_bpd=np.array(test_bpd))
| [
"s1673820@sms.ed.ac.uk"
] | s1673820@sms.ed.ac.uk |
edb183db7cca17314943fd54d12e17ee078172da | 708ed0381947de546923c98acbc8dd41bf506c7b | /constant.py | 2de6ba3c213c973ec5a15525c17c18932d117895 | [] | no_license | fazli1702/tic-tac-toe | 7072ce973641785f54b88dbd77574cedfa42e004 | c9f7e065df3b69682b0f37f501e40a33fdf544d1 | refs/heads/master | 2023-02-10T15:09:08.970809 | 2020-12-25T04:59:34 | 2020-12-25T04:59:34 | 322,166,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | import pygame
WIDTH, HEIGHT = 600, 600
SQUARE_SIZE = WIDTH // 3 or HEIGHT // 3
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
def get_row_col(x, y):
row = y // SQUARE_SIZE
col = x // SQUARE_SIZE
return row, col | [
"fazliyazid1702@gmail.com"
] | fazliyazid1702@gmail.com |
041c50eb2bba907e2b7b482cee6cae683559bf89 | 50382ea6c3fc3fe0a26a10496c52b248c22b2505 | /notes/Items = Judge Amandeep.py | cad89c22cb4caae67383ccdf12ff53663966e7e4 | [] | no_license | Judge61/CSE | 9288f21a3d6847ab414c0f3c92e225661f118f36 | b70a9cf94e8223609b96c2860fa6ea079404b02a | refs/heads/master | 2020-04-02T06:57:39.156633 | 2019-05-23T16:45:12 | 2019-05-23T16:45:12 | 154,175,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | class Item(object):
def __init__(self, name):
self.name = name
class Weapon(Item):
def __init__(self, name, damage):
super(Weapon, self).__init__(name)
self.damage = damage
print("Nice attack")
class Armor(Item):
def __init__(self, name, armor_amt):
super(Armor, self). __init__(name)
self.armor_amt = armor_amt
class Character(object):
def __init__(self, name, health: int, weapon, armor):
self.name = name
self.health = health
self.weapon = weapon
self.armor = armor
def take_damage(self, damage: int):
if self.armor.armor_amt > damage:
print("No damage is done because of some AMAZING armor!")
else:
self.health -= damage - self.armor.armor_amt
self.health -= damage - self.armor.armor_amt
print("%s has %d health left" % (self.name, self.health))
def attack(self, target):
print("%s attacks %s for %d damage" % (self.name, target.name, self.weapon.damage))
target.take_damage(self.weapon.damage)
sword = Weapon("Sword", 10)
canoe = Weapon("Canoe", 42)
troll_armor = Armor("Armor of the gods", 10000000)
lich = Character("Lich", 100, sword, Armor("Generic Armor", 2))
troll = Character("Troll", 10000, canoe, troll_armor)
lich.attack(troll)
troll.attack(lich)
| [
"42385606+Judge61@users.noreply.github.com"
] | 42385606+Judge61@users.noreply.github.com |
7d81bfd1aecb256087020de1e52e5f0ca83e6295 | d52af838e79dbc0a8524ea14d3b84b7eada45df8 | /repcal/__init__.py | c5fa765ce2e8cfa2ce5c2f13928dba92223f3483 | [
"MIT"
] | permissive | dekadans/repcal | 989544f368e043d99287a1a03fea226dbcb886ff | a9022943fc43190f5260f5e09c78cbc4001edc6e | refs/heads/main | 2023-02-03T19:09:40.721567 | 2020-12-23T12:25:43 | 2020-12-23T12:25:43 | 305,468,317 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | from .RepublicanDate import RepublicanDate
from .DecimalTime import DecimalTime
from .RepublicanFormatter import RepublicanFormatter
| [
"tomas.thelander0@gmail.com"
] | tomas.thelander0@gmail.com |
c9ba661b2eaf706150acc8d70fa5c78430f74e48 | 622a73eb5e7e3f465b3f0d7774df5e742b1532ce | /intermediate tasks/task3.1/myModels.py | dc9c50a215f88d6dd060c69b8fcb96f2e0f39832 | [] | no_license | nirajmahajan/Diabetic-Retinopathy-Image-Generation | 6a88ae8b170598353b383d9b4ad787481b8d2cb6 | 511bca72217180ada58bfa5fb7ffe8fb17e1a108 | refs/heads/main | 2023-08-28T03:23:04.427883 | 2021-10-28T09:26:32 | 2021-10-28T09:26:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,552 | py | import numpy as np
import torch
import torchvision
import matplotlib.pyplot as plt
from torchvision import transforms, models
from torchvision.models import vgg16, vgg16_bn, alexnet
from torch import nn, optim
from torch.nn import functional as F
import pickle
import argparse
import sys
import os
import PIL
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class VAE(nn.Module):
def __init__(self, n_channels = 1, latent_space = 64):
super(VAE, self).__init__()
self.enc = Encoder(n_channels, latent_space)
self.dec = Decoder(n_channels, latent_space)
self.latent_space = latent_space
def forward(self, x):
mean,log_var = self.enc(x)
reconstruction = self.dec(self.reparameterize(mean,log_var))
return mean, log_var, reconstruction
def reparameterize(self, mean, log_var):
std = torch.exp(0.5*log_var)
eps = torch.randn_like(std)
return mean + eps * log_var
def generate_new(self, n):
data = torch.randn(n,self.latent_space).to(device)
return self.dec(data).detach().cpu().numpy().reshape(-1,256,256)
class Encoder(nn.Module):
def __init__(self, n_channels, latent_space):
super(Encoder, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(n_channels,64,3,stride = 2,padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2),
nn.Conv2d(64,64,3,padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2),
nn.Conv2d(64,128,3,stride = 2,padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128,128,3,padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128,256,3,stride = 2,padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256,256,3,padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256,512,3,stride = 2,padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1),
nn.Conv2d(512,512,3,padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1),
nn.Conv2d(512,512,3,stride=2,padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1),
nn.Conv2d(512,512,3,padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1),
nn.Conv2d(512,512,3,stride=2,padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1),
nn.Conv2d(512,512,3,padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1),
nn.Conv2d(512,512,3,stride=2,padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1),
nn.Conv2d(512,512,3,padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1),
nn.Conv2d(512,512,2),
)
self.mean_layer = nn.Sequential(
nn.Linear(512, latent_space),
nn.LeakyReLU(),
nn.Dropout(0.2)
)
self.log_var_layer = nn.Sequential(
nn.Linear(512, latent_space),
nn.LeakyReLU(),
nn.Dropout(0.2)
)
def forward(self, x):
a = self.model(x).reshape(-1,512)
return self.mean_layer(a), self.log_var_layer(a)
class Decoder(nn.Module):
def __init__(self, n_channels, latent_space):
super(Decoder, self).__init__()
self.model = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(512,512,3,padding = 1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1),
nn.Upsample(scale_factor=2),
nn.Conv2d(512,512,3,padding = 1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1),
nn.Upsample(scale_factor=2),
nn.Conv2d(512,512,3,padding = 1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1),
nn.Upsample(scale_factor=2),
nn.Conv2d(512,512,3,padding = 1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.1),
nn.Upsample(scale_factor=2),
nn.Conv2d(512,256,3,padding = 1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.1),
nn.Conv2d(256,256,3,padding = 1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.1),
nn.Upsample(scale_factor=2),
nn.Conv2d(256,128,3,padding = 1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.1),
nn.Conv2d(128,128,3,padding = 1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.1),
nn.Upsample(scale_factor=2),
nn.Conv2d(128,64,3,padding = 1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1),
nn.Conv2d(64,64,3,padding = 1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1),
nn.Upsample(scale_factor=2),
nn.Conv2d(64,64,3,padding = 1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1),
nn.Conv2d(64,64,3,padding = 1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.1),
nn.Conv2d(64,n_channels,3,padding = 1),
nn.Sigmoid(),
)
self.decoder_linear = nn.Sequential(
nn.Linear(latent_space, 512),
nn.LeakyReLU(),
nn.Dropout(0.2)
)
def forward(self, x):
return self.model(self.decoder_linear(x).view(-1,512,1,1)) | [
"nirajmahajan007@gmail.com"
] | nirajmahajan007@gmail.com |
462fdb171366888fdd4b6940b64acb815518deda | d4d421ef14e5155a9c8252fb28c432d736d62f74 | /comment_spider/comment_spider/spiders/commentspider.py | bd87f5509af3e7661af55f82cad58599c3b1d1d7 | [
"Apache-2.0"
] | permissive | moxi255/jd-spider | e63a6a37030443ee79f87cb610a3c27f5865e788 | 8f92f7e7a9a27c2f83f6432f17e1f0bb920b4213 | refs/heads/master | 2020-05-23T20:58:22.578965 | 2019-05-16T03:35:20 | 2019-05-16T03:35:20 | 186,941,334 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,524 | py | # -*- coding: utf-8 -*-
import scrapy
import json, re
from tools.sql_tools import *
from tools.jieba_content import get_jieba_comment
from ..items import CommentSpiderItem
from ..settings import EXISTS_CONMENTS
class CommentspiderSpider(scrapy.Spider):
name = 'commentspider'
allowed_domains = ['jd.com']
start_urls = get_start_urls()
def parse(self, response):
if response.text:
json_obj = json.loads(response.text)
if json_obj:
tag_data = json_obj['hotCommentTagStatistics']
tags = '|'.join([tag['name'] for tag in tag_data])
count = '|'.join([str(tag['count']) for tag in tag_data])
url = response._url
page_num = int(url.split('&')[4].split('=')[1])
computer_id = int(url.split('&')[1].split('=')[1])
comments = json_obj['comments']
# 保存数据
if page_num == 1:
save_tags(tags, count, computer_id)
if 0 < len(comments) < 10:
for comment in comments:
comment_id = str(computer_id) + str(comment['id'])
content = re.sub(r"…|\.| |~|'", '', comment['content'])
print(content)
jieba_content = get_jieba_comment(content)
print(jieba_content)
create_time = comment['creationTime']
score = comment['score']
print(comment_id, content, jieba_content, score, create_time, computer_id)
if comment_id in EXISTS_CONMENTS:
print(f'{comment_id} 评论已存在')
else:
save_comment(comment_id, content, jieba_content, score, create_time, computer_id)
# 该商品评论爬取完成更新if_spider字段
update_if_spider(computer_id)
elif len(comments) == 10:
for comment in comments:
comment_id = str(computer_id) + str(comment['id'])
content = comment['content'].replace(' ', '')
jieba_content = get_jieba_comment(content)
create_time = comment['creationTime']
score = comment['score']
print(comment_id, content, jieba_content, score, create_time, computer_id)
if comment_id in EXISTS_CONMENTS:
print(f'{comment_id} 评论已存在')
else:
save_comment(comment_id, content, jieba_content, score, create_time, computer_id)
page_num += 1
if page_num == 101:
# 该商品评论爬取完成更新if_spider字段
update_if_spider(computer_id)
# 找下一页
if page_num < 101:
next_url = f'https://club.jd.com/comment/skuProductPageComments.action?&productId={computer_id}&score=0&sortType=5&page={page_num}&pageSize=10&isShadowSku=0&rid=0&fold=1%27'
yield scrapy.Request(url=next_url, callback=self.parse)
else:
update_if_spider(computer_id)
# 进行下一个商品评论收集
yield CommentspiderSpider()
| [
"614303219@qq.com"
] | 614303219@qq.com |
951eff5775832bdb7d6d2882d73b8b743dbe75e8 | 60f93042cd806b56362232105dad1a5ba8e10d51 | /energy_budget/dependencies/useful_functions.py | a2357ccf5d32d2752b3b38e08eca0312ce22ec18 | [] | no_license | robertladwig/LakeGeneva | 264d5dac287891ccc1f264056e6b5ff4b1b34f14 | 25370cf570ad8d112ea6a7f3990eeb5fbd4e2e1a | refs/heads/main | 2023-07-13T03:45:56.772603 | 2021-08-28T19:46:26 | 2021-08-28T19:46:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,973 | py | ## USEFUL FUNCTIONS
#B. Fernandez Castro 2020.03.03
import numpy as np
import sys
import matplotlib.pyplot as plt
def moving_average(x,n, window = "flat"):
if n%2 == 0:
n+=1
N = x.size
cx = np.full(x.size, np.nan)
for i in range(N):
ii = np.arange(i-n//2, i+n//2+1,1)
if window == "flat":
ww = np.ones(ii.size)
elif window == "gauss":
xx = ii - i
ww = np.exp(- xx**2/(float(n)/4)**2 )
elif window == "hanning":
ww = np.hanning(ii.size)
ww = ww[ (ii>=0) & (ii<N)]
ii = ii[ (ii>=0) & (ii<N)]
kk = np.isfinite(x[ii])
if np.sum(kk)<0.25*ii.size:
continue
cx[i] = np.sum(x[ii[kk]]*ww[kk])/np.sum(ww[kk])
return cx
def linfit_modelII(X,Y):
# lsqfitma.m by: Edward T Peltzer, MBARI
# revised: 2016 Mar 17.
#
# M-file to calculate a "MODEL-2" least squares fit.
#
# The line is fit by MINIMIZING the NORMAL deviates.
#
# The equation of the line is: y = mx + b.
#
# This line is called the MAJOR AXIS. All points are given EQUAL
# weight. The units and range for X and Y must be the same.
# Equations are from York (1966) Canad. J. Phys. 44: 1079-1086;
# re-written from Kermack & Haldane (1950) Biometrika 37: 30-41;
# after a derivation by Pearson (1901) Phil. Mag. V2(6): 559-572.
#
# Data are input and output as follows:
#
# [m,b,r,sm,sb] = lsqfitma(X,Y)
#
# X = x data (vector)
# Y = y data (vector)
#
# m = slope
# b = y-intercept
# r = correlation coefficient
# sm = standard deviation of the slope
# sb = standard deviation of the y-intercept
#
# Note that the equation passes through the centroid: (x-mean, y-mean)
iif = np.isfinite(X+Y)
X = X[iif]
Y = Y[iif]
n = float(np.sum(iif))
Sx = np.sum(X)
Sy = np.sum(Y)
xbar = Sx/n
ybar = Sy/n
U = X - xbar
V = Y - ybar
Suv = np.sum(U*V)
Suu = np.sum(U*U)
Svv = np.sum(V*V)
sigx = np.sqrt(Suu/(n-1))
sigy = np.sqrt(Svv/(n-1))
m = (Svv - Suu + np.sqrt(((Svv-Suu)**2) + (4*Suv**2)))/(2*Suv)
b = ybar - m*xbar
r = Suv/np.sqrt( Suu * Svv )
sm = (m/r)*np.sqrt((1 - r**2)/n)
sb1 = (sigy - sigx*m)**2
sb2 = (2*sigx*sigy) + ((xbar**2*m*(1+r))/r**2)
sb = np.sqrt( (sb1 + ((1-r)*m*sb2))/n)
return np.array([m, b]), np.array([sm, sb]), r
def FCD_2d(x, y, axis = 0):
if x.ndim != 2 or y.ndim !=2:
sys.exit("Invalid dimensions")
if axis != 0 and axis != 1:
sys.exit("Invalid axis")
if axis == 1:
x = x.T
y = y.T
dy = np.full(y.shape,np.nan)
for i in range(x.shape[1]):
dy[:,i] = first_centered_differences(x[:,i], y[:,i])
if axis == 1:
dy = dy.T
return dy
def first_centered_differences(x, y, fill = False):
if x.size != y.size:
print "first-centered differences: vectors do not have the same size"
dy = np.full( x.size, np.nan )
iif = np.where( (np.isfinite(x)) & (np.isfinite(y))) [0]
if iif.size == 0:
return dy
x0 = x[iif]
y0 = y[iif]
dy0 = np.full( x0.size, np.nan )
#calculates differences
dy0[0] = (y0[1] - y0[0])/(x0[1]-x0[0])
dy0[-1] = (y0[-1] - y0[-2])/(x0[-1]-x0[-2])
dy0[1:-1] = (y0[2:] - y0[0:-2])/(x0[2:]- x0[0:-2])
dy[iif] = dy0
if fill:
dy[0:iif[0]] = dy[iif[0]]
dy[iif[-1]+1:] = dy[iif[-1]]
return dy
def centered_differences(x, y, fill = False):
if x.size != y.size:
print "first-centered differences: vectors do not have the same size"
dy = np.full( x.size, np.nan )
iif = np.where( (np.isfinite(x)) & (np.isfinite(y))) [0]
if iif.size == 0:
return dy
x0 = x[iif]
y0 = y[iif]
dy0 = np.full( x0.size, np.nan )
#calculates differences
dy0[1:-1] = (y0[2:] - y0[0:-2])/(x0[2:]- x0[0:-2])
dy[iif] = dy0
if fill:
dy[0:iif[0]] = dy[iif[0]]
dy[iif[-1]+1:] = dy[iif[-1]]
return dy
def mixed_layer_depth(z0, den0, Dd = 0.05, crit = "diff"):
#Mixed layer calculation
if crit != "diff" and crit != "grad":
crit = "diff"
print "Incorrect criterion, set to diff"
c,f = den0.shape
MLD = np.full(f, np.nan)
for i in range(f):
if z0.ndim ==1:
z = np.copy(z0)
else:
z = z0[:,i]
den = np.sort(den0[:,i])
iif = np.isfinite(den+z)
if np.sum(iif)<=1:
continue
den = den[iif]
z = z[iif]
if crit == "diff":
sden = den[0]
denp = den-sden
imld = np.where( denp>=Dd )[0]
if imld.size == 0:
MLD[i] = np.max(z)
elif imld[0]>0:
imld = imld[0]
z2 = z[imld]
z1 = z[imld-1]
denp2 = denp[imld]
denp1 = denp[imld-1]
MLD[i] = (z2-z1)/(denp2-denp1)*(Dd - denp1) + z1
else:
MLD[i] = np.max(z)
#MLD[i] = z0[0,i]
elif crit == "grad":
grden = np.abs(first_centered_differences(z, den))
imld = np.where(grden>=Dd)[0]
if imld.size == 0:
MLD[i] = np.max(z)
elif imld[0]>0:
imld = imld[0]
z2 = z[imld]
z1 = z[imld-1]
grd2 = grden[imld]
grd1 = grden[imld-1]
MLD[i] = (z2-z1)/(grd2-grd1)*(Dd - grd1) + z1
else:
MLD[i] = z[0]
return MLD
| [
"noreply@github.com"
] | noreply@github.com |
af93385675fcf4822c571cccec3632e16d5f819f | af275f01dfdf5636e61ecb8977b207109031f79a | /deneme/models.py | 0dd77a30e2db97a36f48461963433abb8b7a0164 | [] | no_license | malialtinel/Newspaper | 871e0990bc4d34b3998587d5f2c1fb4c4b9990e5 | b594c1a9afc145e1535f60e76357f059b7d9a4a2 | refs/heads/master | 2020-03-22T14:11:50.539698 | 2018-07-08T11:24:40 | 2018-07-08T11:24:40 | 140,160,237 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | from django.db import models
from django.contrib.auth.models import User
from ckeditor.fields import RichTextField
class Halk_User(models.Model):
vatandas=models.OneToOneField(
User,
on_delete=models.CASCADE,
primary_key=True,
)
def __str__(self):
return self.vatandas.last_name + ", " + self.vatandas.first_name
class Deneme(models.Model):
f = models.TextField()
class Postumuz(models.Model):
user=models.ForeignKey('auth.User',verbose_name='Yazar',related_name='posts')
kat=models.ForeignKey('Kategori',verbose_name='Kategori',related_name='posts')
baslik=models.CharField(max_length=120,verbose_name="Başlık")
content=RichTextField(verbose_name="İçerik")
tarihi=models.DateTimeField(verbose_name="Yayımlanma Tarihi",auto_now_add=True)
image=models.FileField(null=True,blank=True)
def __str__(self):
return self.baslik
# Create your models here.
class Comment(models.Model):
post=models.ForeignKey('Postumuz',on_delete=models.CASCADE,related_name='comments')
name=models.CharField(max_length=400,verbose_name='isim')
content=models.TextField(verbose_name='Yorum')
created_date=models.DateTimeField(auto_now_add=True)
class Kategori(models.Model):
katadi=models.CharField(max_length=30,verbose_name='kategoriadi')
def __str__(self):
return self.katadi
| [
"malialtinel@gmail.com"
] | malialtinel@gmail.com |
deeae75fb55700a9bf1ce2d3c2f15007fbc511d0 | c9af78f03563391def0e1cb808afda4d88a21066 | /ex7.py | 8ae5bbca0fd2aeffb24f76cb66d2f833a6d6269d | [] | no_license | Psyconne/python-examples | b873b30db237524d8bff9123e334db096d0239c5 | 35a4bb668c49aa51d9645d68a7bdf39daa0b58a4 | refs/heads/master | 2021-01-02T09:14:23.549963 | 2015-07-20T15:00:39 | 2015-07-20T15:00:39 | 39,284,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | print "I had a little lamb"
print "its fleece was white as %s." % 'snow'
print "." * 10 #printing it 10 times
l1 = 'M'
l2 = 'a'
l3 = 'r'
l4 = 'o'
l5 = 'u'
l6 = 'a'
l7 = 'n'
end = 'E'
print l1 + l2 + l3 + l4 + l5 + l6 + l7 + end
print l1 + l2 + l3 + l4, #a space
print l5 + l6 + l7,
print end
| [
"iimen.elidrissi@gmail.com"
] | iimen.elidrissi@gmail.com |
38910cfa0d829421a6d14748e1081970a606abe0 | 2734b77a68f6d7e22e8b823418ad1c59fe1a34af | /opengever/dossier/upgrades/20170307184059_reindex_searchable_text_for_dossier_templates/upgrade.py | 436c02f98340c2800590927a5f6bf366f0ad4ab2 | [] | no_license | 4teamwork/opengever.core | 5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1 | a01bec6c00d203c21a1b0449f8d489d0033c02b7 | refs/heads/master | 2023-08-30T23:11:27.914905 | 2023-08-25T14:27:15 | 2023-08-25T14:27:15 | 9,788,097 | 19 | 8 | null | 2023-09-14T13:28:56 | 2013-05-01T08:28:16 | Python | UTF-8 | Python | false | false | 365 | py | from ftw.upgrade import UpgradeStep
class ReindexSearchableTextForDossierTemplates(UpgradeStep):
"""Reindex SearchableText for dossier templates.
"""
def __call__(self):
self.install_upgrade_profile()
self.catalog_reindex_objects(
{'portal_type': 'opengever.dossier.dossiertemplate'},
idxs=['SearchableText'])
| [
"david.erni@4teamwork.ch"
] | david.erni@4teamwork.ch |
0c9d8876bb93f2c786e18e37dc1213e7ef6c6c2d | 8e8260d109d6b3680e3ce966e9baaa540393db8b | /xadmintest/settings.py | ff250650fd023fd3bc87381d949c7e0e1b6833b0 | [] | no_license | loveguan/xadmintest | 4472c13bd68f4b4ae47479449f4319e6f50df4fc | f20093afe25216154861fd8f6f061bcfee7269f2 | refs/heads/master | 2020-09-04T23:24:06.094513 | 2019-11-06T05:57:04 | 2019-11-06T05:57:04 | 219,922,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,286 | py | """
Django settings for xadmintest project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'goero%l-vd)wg%)1*5rt29kv8#=qo40=94_vvp(!+o(g#^^n%c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Xadmin.apps.XadminConfig',
'app01.apps.App01Config',
'app02.apps.App02Config',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'xadmintest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'xadmintest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[os.path.join(BASE_DIR,"static")] | [
"zhouguanjie@qq.com"
] | zhouguanjie@qq.com |
6fbbd263a969c5cf0e893a592532066c44019962 | 9d425b64bd8d66993fc2253613095d82ff667a6f | /day_7/parse_input.py | 44682ccb370fad08ced644dbbfc4c61dd285d510 | [] | no_license | evanptang/advent-of-code-2020 | bcf4766ff2c9bd7a9d09ed4b4ad1faf47c4920ff | e4a6871e8495142e7e4644d06e133fa421eb440c | refs/heads/master | 2023-02-02T09:50:10.432697 | 2020-12-13T20:04:16 | 2020-12-13T20:04:16 | 319,874,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | import json
file_name = 'sample.txt'
# file_name = 'input.txt'
final_object = dict()
with open(f'parsed_{file_name}', 'w+') as f:
with open(file_name) as g:
for line in g:
line = line.replace('\n', '')
line = line.replace('bags', 'bag')
line = line.replace('.', '')
line = line.split('contain')
line[0] = line[0][:-1]
line[1] = line[1].split(',')
temp = dict()
try:
for item in line[1]:
item = item[1:]
temp[item[2:]] = int(item[0])
except:
temp = {}
line[1] = temp
final_object[line[0]] = line[1]
json.dump(final_object, f) | [
"evantang2019@u.northwestern.edu"
] | evantang2019@u.northwestern.edu |
49f3e7d823cd9ee17a9c42cca9f6a42c42a6c33e | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ENJTPoWCyEGgnXYjM_18.py | 35efe9177850a3a6a90e03c5f007d2d7ec0fb8cc | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py |
def percent_filled(box):
return str(round((''.join(box).count('o') / ((len(box[0]) - 2) * (len(box) - 2))) * 100)) + '%'
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
d25e02789b4302272e941ce1371e76e9d26a1faf | 898fc24e399b203bf8072cec03b9bfd22d3160e6 | /Neural Networks/Assignment1/venv/Scripts/pip-script.py | d2c4c69759d833d8db79036eb12596186b31282b | [] | no_license | ismael-martinez/AQM | 94505fe8acf44e459b731e6563e304789ec9db47 | a9c08ca6f9be2b07e7892773c0bad165f695899a | refs/heads/master | 2022-12-23T12:19:40.591906 | 2018-02-02T08:08:04 | 2018-02-02T08:08:04 | 119,900,486 | 0 | 1 | null | 2022-12-09T23:40:38 | 2018-02-01T22:18:18 | Python | UTF-8 | Python | false | false | 436 | py | #!"C:\Users\Ismael Martinez\Desktop\AQM\Neural Networks\Assignment1\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip')()
)
| [
"ismael.martinez@hotmail.ca"
] | ismael.martinez@hotmail.ca |
19cf498c8d5f4bfad7938928cf5d71d9ab7e19e4 | d8c589e279b59a0ad6ac79ef42e073e0e86340e5 | /caluma/core/tests/test_pagination.py | 82a9cfc701678272b3d073c0e262679a53303ee8 | [
"AGPL-3.0-only",
"MIT"
] | permissive | Yelinz/caluma | e9d8b03a91f4923da99b4d820e66349a3de946e2 | b5d4f1ed3fa3e144d8516e7823dd57c3b68617be | refs/heads/master | 2023-08-16T14:00:11.207559 | 2019-10-18T12:40:03 | 2019-10-18T12:40:03 | 181,823,650 | 0 | 0 | MIT | 2019-04-17T05:34:28 | 2019-04-17T05:34:27 | null | UTF-8 | Python | false | false | 1,309 | py | import pytest
@pytest.mark.parametrize(
"first,last,before,after,has_next,has_previous",
[
(1, None, None, None, True, False),
(None, 1, None, None, False, True),
(None, None, None, None, False, False),
(None, None, None, "YXJyYXljb25uZWN0aW9uOjI=", False, True),
(None, None, "YXJyYXljb25uZWN0aW9uOjI=", None, True, False),
],
)
def test_has_next_previous(
db,
first,
last,
before,
after,
has_next,
has_previous,
schema_executor,
document_factory,
):
document_factory.create_batch(5)
query = """
query AllDocumentsQuery ($first: Int, $last: Int, $before: String, $after: String) {
allDocuments(first: $first, last: $last, before: $before, after: $after) {
pageInfo {
hasNextPage
hasPreviousPage
}
edges {
node {
id
}
}
}
}
"""
inp = {"first": first, "last": last, "before": before, "after": after}
result = schema_executor(query, variables=inp)
assert not result.errors
assert result.data["allDocuments"]["pageInfo"]["hasNextPage"] == has_next
assert result.data["allDocuments"]["pageInfo"]["hasPreviousPage"] == has_previous
| [
"fabio.raemi@adfinis-sygroup.ch"
] | fabio.raemi@adfinis-sygroup.ch |
7d50401e7f1cf6286d23132c3ea577467c6a556e | 84caee3f7b0e9811f91da65f59c93b08f76453f3 | /later/task.py | a201c2d1487a33d00572d896156b06435581c5c9 | [
"Apache-2.0"
] | permissive | thatch/later | 541492ca2ebd77d4b5859c00ff46247847a1d1a5 | 29c614c8a14eb290555bd1708fafea6542365e60 | refs/heads/master | 2021-01-09T16:10:39.247170 | 2020-02-19T01:33:45 | 2020-02-19T01:36:01 | 242,367,315 | 0 | 0 | null | 2020-02-22T15:46:40 | 2020-02-22T15:46:39 | null | UTF-8 | Python | false | false | 11,199 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import contextvars
import logging
from contextlib import suppress
from functools import partial, wraps
from inspect import isawaitable
from types import TracebackType
from typing import (
Any,
Awaitable,
Callable,
Dict,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
from unittest.mock import Mock
from .event import BiDirectionalEvent
FixerType = Callable[[asyncio.Task], Union[asyncio.Task, Awaitable[asyncio.Task]]]
logger = logging.getLogger(__name__)
F = TypeVar("F", bound=Callable[..., Awaitable[Any]])
__all__: Sequence[str] = ["Watcher", "START_TASK", "TaskSentinel", "cancel", "as_task"]
class TaskSentinel(asyncio.Task):
""" When you need a done task for typing """
def __init__(self):
fake = Mock()
asyncio.Future.__init__(self, loop=fake) # typing: ignore, don't create a loop
asyncio.Future.set_result(self, None)
async def cancel(fut: asyncio.Future) -> None:
"""
Cancel a future/task and await for it to cancel.
This method suppresses the CancelledError
"""
fut.cancel()
await asyncio.sleep(0) # let loop cycle
with suppress(asyncio.CancelledError):
await fut
def as_task(func: F) -> F:
"""
Decorate a function, So that when called it is wrapped in a task
on the running loop.
"""
@wraps(func)
def create_task(*args, **kws):
loop = asyncio.get_running_loop()
return loop.create_task(func(*args, **kws))
return cast(F, create_task)
# Sentinel Task
START_TASK: asyncio.Task = TaskSentinel()
# ContextVar for Finding an existing Task Watcher
WATCHER_CONTEXT: contextvars.ContextVar[Watcher] = contextvars.ContextVar(
"WATCHER_CONTEXT"
)
class WatcherError(RuntimeError):
pass
class Watcher:
_tasks: Dict[asyncio.Future, Optional[FixerType]]
_scheduled: List[FixerType]
_tasks_changed: BiDirectionalEvent
_cancelled: asyncio.Event
_cancel_timeout: float
_preexit_callbacks: List[Callable[[], None]]
_shielded_tasks: Dict[asyncio.Task, asyncio.Future]
loop: asyncio.AbstractEventLoop
running: bool
@staticmethod
def get() -> Watcher:
return WATCHER_CONTEXT.get()
def __init__(self, *, cancel_timeout: float = 300, context: bool = False) -> None:
"""
cancel_timeout is the time in seconds we will wait after cancelling all
the tasks watched by this watcher.
context is wether to expose this Watcher via contextvars now or at __aenter__
"""
if context:
WATCHER_CONTEXT.set(self)
self._cancel_timeout = cancel_timeout
self._tasks = {}
self._scheduled = []
self._tasks_changed = BiDirectionalEvent()
self._cancelled = asyncio.Event()
self._preexit_callbacks = []
self._shielded_tasks = {}
self.running = False
async def _run_scheduled(self) -> None:
scheduled = self._scheduled
while scheduled:
fixer = scheduled.pop()
task = fixer(START_TASK)
if not isinstance(task, asyncio.Task) and isawaitable(task):
task = await task
if isinstance(task, asyncio.Task):
self._tasks[task] = fixer
else:
raise TypeError(f"{fixer}(START_TASK) failed to return a task.")
async def unwatch(
self,
task: asyncio.Task = START_TASK,
fixer: Optional[FixerType] = None,
*,
shield: bool = False,
) -> bool:
"""
The ability to unwatch a task, by task or fixer
This is a coroutine to insure the watcher has re-watched the tasks list
If the task was shielded then you need to specify here so we can find
the shield and remove it from the watch list.
When unwatching a fixer, if the returned task is not the same
as the one passed in we will cancel it, and await it.
"""
async def tasks_changed():
if self.running:
await self._tasks_changed.set()
if shield:
if task in self._shielded_tasks:
del self._tasks[self._shielded_tasks[task]]
del self._shielded_tasks[task]
await tasks_changed()
return True
elif fixer is not None:
for t, fix in tuple(self._tasks.items()):
if fix is fixer:
del self._tasks[t]
await tasks_changed()
if t is not task:
await cancel(t)
return True
elif task is not START_TASK:
if task in self._tasks:
del self._tasks[task]
await tasks_changed()
return True
return False
def watch(
self,
task: asyncio.Task = START_TASK,
fixer: Optional[FixerType] = None,
*,
shield: bool = False,
) -> None:
"""
Add a task to be watched by the watcher
You can also attach a fixer co-routine or function to be used to fix a
task that has died.
The fixer will be passed the failed task, and is expected to return a working
task, or raise if that is impossible.
You can also just pass in the fixer and we will use it to create the task
to be watched. The fixer will be passed a dummy task singleton:
`later.task.START_TASK`
shield argument lets you watch a task, but not cancel it in this watcher.
Useful for triggering on task failures, but not managing said task.
"""
# Watching a coro, leads to a confusing error deep in watcher
# so use runtime checks not just static types.
if not isinstance(task, asyncio.Task):
raise TypeError("only asyncio.Task objects can be watched.")
if task is START_TASK:
if not fixer:
raise ValueError("fixer must be specified when using START_TASK.")
self._scheduled.append(fixer)
elif shield:
if fixer:
raise ValueError("`fixer` can not be used with shield=True")
self._shielded_tasks[task] = asyncio.shield(task)
self._tasks[self._shielded_tasks[task]] = None
else:
self._tasks[task] = fixer
self._tasks_changed.set_nowait()
def cancel(self) -> None:
"""
Stop the watcher and cause it to cancel all the tasks in its care.
"""
self._cancelled.set()
def add_preexit_callback(self, callback: Callable[..., None], *args, **kws) -> None:
self._preexit_callbacks.append(partial(callback, *args, **kws))
def _run_preexit_callbacks(self) -> None:
for callback in self._preexit_callbacks:
try:
callback()
except Exception as e:
logger.exception(
f"ignoring exception from pre-exit callback {callback}: {e}"
)
async def __aenter__(self) -> "Watcher":
WATCHER_CONTEXT.set(self)
self.loop = asyncio.get_running_loop()
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType],
) -> bool:
cancel_task: asyncio.Task = self.loop.create_task(self._cancelled.wait())
changed_task: asyncio.Task = START_TASK
try:
self.running = True
while not self._cancelled.is_set():
if self._scheduled:
await self._run_scheduled()
if changed_task is START_TASK or changed_task.done():
changed_task = self.loop.create_task(self._tasks_changed.wait())
try:
if not self._tasks:
return False # There are no tasks just exit.
done, pending = await asyncio.wait(
[cancel_task, changed_task, *self._tasks.keys()],
return_when=asyncio.FIRST_COMPLETED,
)
if cancel_task in done:
break # Don't bother doing fixes just break out
for task in done:
task = cast(asyncio.Task, task)
if task is changed_task:
continue
else:
await self._fix_task(task)
except asyncio.CancelledError:
self.cancel()
finally:
self.running = False
self._run_preexit_callbacks()
await self._event_task_cleanup(cancel_task, changed_task)
await self._handle_cancel()
self._tasks.clear()
self._shielded_tasks.clear()
return False
async def _event_task_cleanup(self, *tasks):
for task in tasks:
if task is not START_TASK:
await cancel(task)
async def _fix_task(self, task: asyncio.Task) -> None:
# Insure we "retrieve" the result of failed tasks
exc = task.exception()
if exc is None:
task.result()
fixer = self._tasks[task]
if fixer is None:
raise RuntimeError(f"{task} finished and there is no fixer!") from exc
new_task = fixer(task)
if not isinstance(new_task, asyncio.Task) and isawaitable(new_task):
new_task = await new_task
if isinstance(new_task, asyncio.Task):
del self._tasks[task]
self._tasks[new_task] = fixer
else:
raise TypeError(
f"{fixer}(task) failed to return a task, returned:" f"{new_task}!"
) from exc
async def _handle_cancel(self):
tasks = [task for task in self._tasks if not task.done()]
if not tasks:
return
for task in tasks:
task.cancel()
done, pending = await asyncio.wait(tasks, timeout=self._cancel_timeout)
bad_tasks: List[asyncio.Task] = []
for task in done:
if task.cancelled():
continue
if task.exception() is not None:
bad_tasks.append(task)
bad_tasks.extend(pending)
if bad_tasks:
raise WatcherError(
"The following tasks didn't cancel cleanly or at all!", bad_tasks
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
9e44aadf38af013a19b2183bcb83b8f98eec8804 | 83d7c9edec86a024165889a725ce21486c7af707 | /pichr/fixtures/parse_pichr_json.py | f53a730d9eda7b51ce9dc70de06d150d5b6fe23a | [] | no_license | emilymye/pichr | 3a5cb3a2c8f54bf07c832c45fe4225882dac87b5 | 7441b54a2450358fb3b6a85f4be67e2bf695e9b8 | refs/heads/master | 2021-01-13T01:54:12.737860 | 2015-03-12T22:18:46 | 2015-03-12T22:18:46 | 31,866,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,609 | py | import json
import csv
if __name__ == "__main__":
with open("data.csv") as data_f:
reader = csv.DictReader(data_f)
print reader.fieldnames
players = {}
recoveries = []
for row in reader:
player = {
"name": row['Name'].strip(),
"pid": int(row['PlayerId']),
"team": row['Team'].strip()
}
players[player["pid"]] = player
date = row["On Date"].split('/')
if len(date[0]) == 1:
date[0] = "0%s" % date[0]
if len(date[1]) == 1:
date[1] = "0%s" % date[1]
print date
recovery = {
"date": "%s-%s-%s" % (date[2], date[0], date[1]),
"duration" : row['Days'],
"sct_injury": row['InjurySCTID'],
"player": player["pid"],
"preERA": float(row['preERA']),
"postERA": float(row['postERA']),
"preFastball": float(row['preFB']),
"postFastball": float(row['postFB'])
}
if "yes" in row['reinjury'].lower():
recovery['reinjury'] = True
if "yes" in row["offseason"].lower():
recovery['offseason'] = True
if "procedure" in row:
recovery["ProcedureSCTID"] = row["ProcedureSCTID"]
recoveries.append(recovery)
players = [{ "model": "pichr.Player", "fields": players[pid] } for pid in players]
recoveries = [{ "model": "pichr.Recovery", "pk": i, "fields": rec } for i, rec in enumerate(recoveries)]
enc = json.JSONEncoder()
with open("instances.json", "w+") as outF:
outF.write(json.dumps(players + recoveries, sort_keys=False,
indent=4, separators=(',', ': ')))
| [
"emily3ye@gmail.com"
] | emily3ye@gmail.com |
a08c27bb4b36f95989b207b71198e8f3254b8485 | 827cc3633a99f90eed504da343ecc86684556c24 | /24.py | fc9b8c38d637f320fcafc3b4a42faa0f723416f0 | [] | no_license | fs-source/fs-source | 7f6e5c0504decc8111c50d0e3bba2e48e0f27658 | 5a39697e5b7fe1e895e92f0fedab4889dc8812ad | refs/heads/main | 2023-07-14T23:13:54.307676 | 2021-08-26T20:37:22 | 2021-08-26T20:37:22 | 328,153,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | print(292 + 774)
| [
"tlxt5650508@163.com"
] | tlxt5650508@163.com |
218c12ac6119307e120b6b12b5bfe4a790c1aa59 | 33fab1fae9b4a099e705dcc10443b7721da8f430 | /1.py | 4963f2a6e3bdf4eb5409bc2735c4474c71f9afa8 | [] | no_license | cm12348/tryGithub | 4d797e40ae28b87a20aaad543997e2531d279ba5 | 5300bdd4a008d597595aea08a831361f85fc95e9 | refs/heads/master | 2020-04-16T06:24:27.109767 | 2019-01-12T04:36:38 | 2019-01-12T04:36:38 | 165,345,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | """
I did some changes.
"""
print("Helloworld")
| [
"noreply@github.com"
] | noreply@github.com |
c8bca5286d0a9ad049f59155f5a9114d8f06dd8c | b92eee41d665314bc42043d1ff46c608af5ffdfd | /sesion_3/prog.4.py | eda17bf266e753571861d3d45fc42db362032da6 | [] | no_license | badillosoft/python-economics | 40efe8326558a8fb93f84fdbd2137428844ee5f3 | 82af43c7a47297ce186dc0e23e30620d46e6693a | refs/heads/master | 2021-01-11T18:55:15.762752 | 2017-05-09T01:15:59 | 2017-05-09T01:15:59 | 79,656,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | from openpyxl import load_workbook
from geg import *
wb = load_workbook("puntos.xlsx")
ws = wb.active
puntos = automatic_load_data(ws, "A2")
def f(x, y):
return x**2 + y**2
for p in puntos:
x = p["X"]
y = p["Y"]
z = f(x, y)
print "%f, %f, %f" %(x, y, z) | [
"kmmx@hsoft.local"
] | kmmx@hsoft.local |
533e43f778d33b82ee7fcc0f71873a2f54f2a562 | 1abb00f7a02635918eba197cb31a16f9cff8550f | /Zadanie5.py | 7e8db8044cd0327daedda99eae58f101384011f7 | [] | no_license | Exoticisignis/Sem5SL | 5535513408905778d9cfb3920c44663cd0d60fb8 | a823f1956c07aeceede75571fb83bc5738796461 | refs/heads/master | 2023-03-28T12:17:32.243911 | 2021-04-05T18:20:41 | 2021-04-05T18:20:41 | 354,929,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | list = [7, 'x', 'y', 6, "uaua", 9, 10, 99]
def process(list):
new_list = [i for i in list if isinstance(i, int)]
summ = sum(new_list)
length = len(new_list)
average = float(summ / length)
minn = min(new_list)
maxx = max(new_list)
variance = calc_variance(new_list, length, average)
result = ("Długość: "+str(length),"Średnia wartość: "+str(average),"Wariancja: "+str(variance),"Min: "+str(minn),"Max: "+ str(maxx))
return result
def calc_variance(calc_list, length, ave):
var = 0
for i in calc_list:
var += float((i - ave) ** 2)
return var / length
if __name__ == '__main__':
print(process(list))
| [
"debysh99@gmail.com"
] | debysh99@gmail.com |
4403759cc3a6535b10eb3e09928d293cb9555aad | bb151500b0fc5bb9ef1b1a9e5bba98e485b4b34d | /problemSet/591C_Median_Smoothing.py | 9436f6108c5e3ab88ea40e68a7cd92378f7749a0 | [] | no_license | yamaton/codeforces | 47b98b23da0a3a8237d9021b0122eaa498d98628 | e0675fd010df852c94eadffdf8b801eeea7ad81b | refs/heads/master | 2021-01-10T01:22:02.338425 | 2018-11-28T02:45:04 | 2018-11-28T03:21:45 | 45,873,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,697 | py | """
Codeforces Round #327 (Div. 2)
Problem 591 C. Median Smoothing
@author yamaton
@date 2015-11-06
"""
def reduce_consec(iterable):
"""
[1, 2, 3, 6, 7, 9, 10, 11, 12, 13, 20]
--> [(1, 3), (6, 2), (9, 5), (20, 1)]
Detect consecutive part and (starting_value, length) pair
:param xs: List of int
:return: List of pair of int
"""
stack = []
for x in iterable:
if stack:
# check if consective
if stack[-1] + 1 == x:
stack.append(x)
# if not consecutive, flush stack and start with new element
else:
yield (stack[0], len(stack))
stack = [x]
else:
# starting element
stack.append(x)
if stack:
yield (stack[0], len(stack))
def alternating_indices(xs):
for i, x in enumerate(xs):
if i == 0 or i == len(xs) - 1:
continue
if xs[i-1] != x and xs[i+1] != x:
yield i
def alternating_position_and_length(xs):
for x in xs:
pass
def solve(xs, n):
# zigzag = [] # alternating part
# for i, x in enumerate(xs):
# if i == 0 or i == n - 1:
# continue
# if xs[i-1] != x and xs[i+1] != x:
# zigzag.append(i)
zigzag = alternating_indices(xs)
zigzag_start_length_pairs = reduce_consec(zigzag)
count = 0
result = xs[:]
for (i, n) in zigzag_start_length_pairs:
n_half = n // 2
count = max(count, (n + 1) // 2)
if n % 2 == 0:
for j in range(i, i + n_half):
result[j] = xs[i-1]
for j in range(i + n_half, i + n):
result[j] = 1 - xs[i-1]
else:
for j in range(i, i + n):
result[j] = xs[i-1]
return count, result
def solve_bruteforce(xs, n):
def transform(ps):
result = []
for i in range(n):
if i == 0 or i == n-1:
result.append(ps[i])
else:
median = int(sum(ps[i-1:i+2]) >= 2)
result.append(median)
return tuple(result)
xs = tuple(xs)
seen = set()
seen.add(xs)
ys = transform(xs)
count = 0
while ys != xs:
# Actually, this system always ends up to a fixed point. No cycle exists.
if ys in seen:
return -1, xs
xs = ys
seen.add(xs)
count += 1
ys = transform(xs)
return count, xs
def main():
n = int(input())
xs = [int(i) for i in input().strip().split()]
count, seq = solve(xs, n)
print(count)
print(' '.join(str(n) for n in seq))
if __name__ == '__main__':
main()
| [
"yamaton@gmail.com"
] | yamaton@gmail.com |
cba35a947a9ee82ae669d9f1be77bc9ee3889334 | 3d5a99baa64e8c1bb0946f2b3e2ea29bf1d12b98 | /ch02/r28.py | 5a8a9bf99f9bb9cb1f5f7a49e0f0da4b65a56418 | [] | no_license | lakchaud89/python_data_structures | a9809d3a6b7d1db48d24e53ed376224673565ce2 | 97198952d4d275775da21224a3754b7a6b37fc03 | refs/heads/master | 2021-01-21T06:48:58.710228 | 2017-09-08T21:33:54 | 2017-09-08T21:33:54 | 101,947,272 | 0 | 0 | null | 2017-09-02T03:19:33 | 2017-08-31T01:53:16 | null | UTF-8 | Python | false | false | 2,262 | py | # Modify the declaration of the first for loop in the CreditCard tests, from Code Fragment 2.3,
# so that it will eventually cause exactly one of the three credit cards to go over its credit limit.
# Which credit card is it?
class CreditCard:
def __init__(self,customer,bank,account,limit,balance = None):
self._customer = customer
self._bank = bank
self._account = account
self._limit = limit
if balance == None:
self._balance = 0
else:
self._balance = balance
def get_customer(self):
return self._customer
def get_bank(self):
return self._bank
def get_account(self):
return self._account
def get_limit(self):
return self._limit
def get_balance(self):
return self._balance
def charge(self,price):
if not isinstance(price, (int, float)):
raise TypeError( "price must be numeric" )
elif price < 0:
raise ValueError( "price cannot be negative" )
if (price + self._balance > self._limit):
return False
else:
self._balance +=price
return True
def make_payment(self,amount):
if not isinstance(amount, (int, float)):
raise TypeError("amount must be numeric" )
elif amount < 0:
raise ValueError( "amount cannot be negative" )
self._balance-=amount
#if name == __main__ :
wallet = [ ]
wallet.append(CreditCard( 'John Bowman' , 'California Savings' ,'5391 0375 9387 5309' , 2500) )
wallet.append(CreditCard( 'John Bowman', 'California Federal' ,'3485 0399 3395 1954' , 3500) )
wallet.append(CreditCard( 'John Bowman' , 'California Finance' ,'5391 0375 9387 5309' , 5000) )
for val in range(1, 17):
wallet[0].charge(val*500)
wallet[1].charge(2*val)
wallet[2].charge(3*val)
for c in range(3):
print( 'Customer = ', wallet[c].get_customer())
print( 'Bank = ', wallet[c].get_bank())
print( 'Account = ', wallet[c].get_account())
print( 'Limit = ', wallet[c].get_limit())
print( 'Balance = ', wallet[c].get_balance())
while wallet[c].get_balance() > 100:
wallet[c].make_payment(100)
print( "New balance = ", wallet[c].get_balance())
print( )
| [
"lakshmichaudhari@lakshmis-mbp.gateway.pace.com"
] | lakshmichaudhari@lakshmis-mbp.gateway.pace.com |
a5255900b8026034fbd41ea3c3b5ab5f8ea68614 | 1c61d9e996f24e87ae686c2b6a2f01ef052425d1 | /accountapp/views.py | b383b2612cc5d41cb64536203155784c87823fbb | [] | no_license | joft-ware/django_web | ba3be679299fdacba7fc1c49cb5317209e895a59 | 06f750fd4a2bdced2b0d67a198ebafd5d3407637 | refs/heads/master | 2023-02-26T19:52:49.247705 | 2021-02-02T14:12:53 | 2021-02-02T14:12:53 | 325,967,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,896 | py | from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
# Create your views here.
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import CreateView, DetailView, UpdateView, DeleteView
from django.views.generic.list import MultipleObjectMixin
from accountapp.decorators import account_ownership_required
from accountapp.forms import AccountUpdateForm
from accountapp.models import hw
from articleapp.models import Article
has_ownership = [account_ownership_required, login_required]
class AccountCreateView(CreateView):
model = User
form_class = UserCreationForm
success_url = reverse_lazy('accountapp:login')
template_name = 'accountapp/create.html'
class AccountDetailView(DetailView, MultipleObjectMixin):
model = User
context_object_name='target_user'
template_name = 'accountapp/detail.html'
paginate_by = 25
def get_context_data(self, **kwargs):
object_list=Article.objects.filter(writer=self.get_object())
return super(AccountDetailView, self).get_context_data(object_list=object_list, **kwargs)
@method_decorator(has_ownership, 'get')
@method_decorator(has_ownership, 'post')
class AccountUpdateView(UpdateView):
model = User
form_class = AccountUpdateForm
context_object_name='target_user'
success_url = reverse_lazy('home')
template_name = 'accountapp/update.html'
@method_decorator(has_ownership, 'get')
@method_decorator(has_ownership, 'post')
class AccountDeleteView(DeleteView):
model = User
context_object_name='target_user'
success_url = reverse_lazy('accountapp:login')
template_name = 'accountapp/delete.html' | [
"34998542+sky033116@users.noreply.github.com"
] | 34998542+sky033116@users.noreply.github.com |
c122747c667ba2b5cd4b66de7acfc231a1576ac7 | 9634a7dd26605fbf6260f2338bfb1f061111c8e6 | /wishes/urls.py | d4ac2c932e9d1ccfb6786700bf76f6175716a944 | [] | no_license | pkc035/21-1st-MealKatMarket-backend | 985b18e28e55758e08455c597fcfc48822c92c65 | 1d43eea1cf8f30bcd893fb74b30e2645dd0f5561 | refs/heads/main | 2023-06-03T06:49:52.052308 | 2021-06-20T11:22:01 | 2021-06-20T11:22:01 | 378,775,535 | 1 | 0 | null | 2021-06-21T01:35:25 | 2021-06-21T01:35:25 | null | UTF-8 | Python | false | false | 155 | py | from django.urls import path
from .views import WishView
urlpatterns = [
path('',WishView.as_view()),
path('/<int:food_id>', WishView.as_view())
]
| [
"pkc0305@gmail.com"
] | pkc0305@gmail.com |
d40f02e6db1890c62e20ceca6eb08cad3e7ac968 | 674a882b1f958766e3ac2db0fc445a7a7fd7ee16 | /weather/views.py | 324005a5103ce55e011138cfe0ddb753ad0b2766 | [] | no_license | harinder8407/weather-app | 2463bd9155e887d51bca83223de5828a570ab694 | e30adedfb351d5e2da5d1be42686323293d86ae9 | refs/heads/master | 2023-03-09T00:25:10.219507 | 2021-03-01T09:14:33 | 2021-03-01T09:14:33 | 343,339,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,919 | py |
import requests
from django.shortcuts import render, redirect
from .models import City
from .forms import CityForm
def index(request):
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&appid=14c01bd544e820b6681e78683c1103a7'
err_msg = ''
message = ''
message_class = ''
if request.method == 'POST':
form = CityForm(request.POST)
if form.is_valid():
new_city = form.cleaned_data['name']
existing_city_count = City.objects.filter(name=new_city).count()
if existing_city_count == 0:
r = requests.get(url.format(new_city)).json()
if r['cod'] == 200:
form.save()
else:
err_msg = 'City does not exist in the world!'
else:
err_msg = 'City already exists in the database!'
if err_msg:
message = err_msg
message_class = 'is-danger'
else:
message = 'City added successfully!'
message_class = 'is-success'
form = CityForm()
cities = City.objects.all()
weather_data = []
for city in cities:
r = requests.get(url.format(city)).json()
city_weather = {
'city' : city.name,
'temperature' : r['main']['temp'],
'description' : r['weather'][0]['description'],
'icon' : r['weather'][0]['icon'],
}
weather_data.append(city_weather)
context = {
'weather_data' : weather_data,
'form' : form,
'message' : message,
'message_class' : message_class
}
return render(request, 'weather/weather.html', context)
def delete_city(request, city_name):
City.objects.get(name=city_name).delete()
return redirect('index')
| [
"harinderss2899@gmail.com"
] | harinderss2899@gmail.com |
a5961849eacd9651fe5f3146a8524c53eda4f663 | 4db107059e4f77740bea34705fc459bb6a2fe981 | /MicroAOD/python/flashggTriLeptons_cfi.py | 2a7348b193e44455a9fd1bb1c4dd5f03b8c01bfe | [] | no_license | GiuliaNegro/dafne | 6e4188e233113387b67096bf5b0eed362771f1e3 | 628076faec651c6ebaedd068b1b1626c8b3d9a1d | refs/heads/master | 2020-09-26T23:59:11.712519 | 2019-01-31T19:50:45 | 2019-01-31T19:50:45 | 67,401,535 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | import FWCore.ParameterSet.Config as cms
flashggTriLeptons = cms.EDProducer('FlashggTriLeptonsProducer',
ElectronTag=cms.InputTag('flashggElectrons'), #'slimmedElectrons'),
MuonTag=cms.InputTag('flashggMuons'), #'slimmedMuons'),
VertexTag=cms.InputTag('offlineSlimmedPrimaryVertices'),
##Parameters
minElectronPt=cms.double(5.),
maxElectronEta=cms.double(2.4),
minMuonPt=cms.double(5.),
maxMuonEta=cms.double(2.4)
) | [
"giulia.negro@cern.ch"
] | giulia.negro@cern.ch |
2a5cef818c311103bcb5f7606a771da35e81fd54 | 2c9750d1f89b0c3d2936f826e7af750d850e9cce | /RHMDashboard/wsgi.py | 1ba08fb3f4211fbb2e844033d12d8210abd9cf63 | [] | no_license | AGhosh-SPC/RegulatorHealthMonitor | 738a97ed9f243489e833bc3390c792ff4137b86f | fa3404126112d9b013a1bba3bd54132bee8d188b | refs/heads/main | 2023-04-06T23:29:53.735996 | 2021-04-14T20:07:34 | 2021-04-14T20:07:34 | 358,028,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | """
WSGI config for RHMDashboard project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
For more information, visit
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'RHMDashboard.settings')
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
| [
"Ghosh.Aindrila@spartancontrols.com"
] | Ghosh.Aindrila@spartancontrols.com |
2ac3a4e371b349d7e8e7c8a07168edbe9ea3a41a | 04d3f37b043364cd0233348863036436dfabedd6 | /manage.py | 7a4745372d94347843ee1bf9eb1a5ae4839100bb | [] | no_license | anonim0zero/NASA-challenges-covid-19 | b281d0f8faad453bd5c1ddd81510f94b8acc6625 | 1eed3aa9b8ae871539605084a22da3c037ea8843 | refs/heads/main | 2023-08-16T20:46:44.454581 | 2021-10-03T16:50:45 | 2021-10-03T16:50:45 | 413,138,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'NASA.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"igorteplov2001@gmail.com"
] | igorteplov2001@gmail.com |
53c51e60a3111190c14795ad4a983b2bcccfe5af | b397e24cdb458fa7cb9ba1838e8236b425eed66e | /Locators.py | 9c4a3dba58c4c8b2e1710c1ad47a318c638d4b96 | [] | no_license | Mrkabu/PageObjectPatern | 29b177a2598f85c87aa9947cdce61883857b043b | 7e4a208adecd36cbb449c68f1e3c7baf6b34e9c9 | refs/heads/master | 2020-03-16T03:58:19.924212 | 2018-05-17T20:33:47 | 2018-05-17T20:33:47 | 132,499,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | class Locators(object):
logo = '//*[@id="top"]/div/div/div/div/nav/ul/li[2]/a'
terazwtv = '//*[@id="top"]/div/div/div/div/div/nav/a[1]'
login = '//*[@id="Email"]'
password = '//*[@id="Password"]'
buttonzaloguj = '//*[@id="login-form"]/fieldset/div/div[1]/div[3]/button'
logowyszukiwarka = '//*[@id="top"]/div/div/div/div/nav/ul/li[1]/a/img'
wyszukiwarka = '//*[@id="search"]'
rozumiem = '//*[@id="Account_Login"]/div[2]/div/div/div/a[2]'
kruk = '//*[@id="Search_Search"]/main/div/div[2]/ul/li[1]/div/div[2]/h2/a'
#programtv = (By.XPATH, '/html/body/header/div/div/div/div/div/nav/a[2]')
#zaloguj = (By.XPATH, '/html/body/header/div/div/div/div/nav/ul/li[3]/a')
| [
"sagan07@gmail.com"
] | sagan07@gmail.com |
0a2a4d1068f3c40fd8c94b02d7e99a8d7d07b26f | 47718952e24df62386dc5e858a889ba37323c6c0 | /anal-lexico.py | ea41df822875166bb2859730fe681f8daf898fb7 | [] | no_license | rv0lt/PDL | 4ae5f9acdf5b30885c81670d9ac8fef043c26b37 | 4a016bc8becbb8d742dd7b9ce56418e449c46b5d | refs/heads/master | 2022-04-28T01:16:12.451534 | 2020-03-18T17:02:34 | 2020-03-18T17:02:34 | 211,706,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,778 | py | import ply.lex as lex
from ply.lex import TOKEN
import sys
import re
from TablaSimbolos import TablaSimbolos
from TablaSimbolos import Fun
from gramatica import parser
import gramatica
lexema=""
tipo=None
comentario=False
tabla_simbolos = TablaSimbolos()
estoy_en_fun=False #sirve para indicar si voy a leer una funcion
flagFor = False
contador=0
contReturn=0
flagIf = False
metaFlagIf = False
flagExpresion=False
anotherFlag = False
opEsp=False
listaExpresion=[]
listaTokens=[] #Para pasarle al sintactico
funcion = Fun()
funcionAux = Fun()
tipos=(
#palabras clave que hacen referencia a los tipos de datos
'int',
'boolean',
'string'
)
palabras_clave = tipos +(
#Definir las palabras clave
'var',
'for',
'print',
'return',
'function',
'if',
'input'
)
tokens = palabras_clave + (
'id',
'cte_entera',
'asignacion', # =
'coma', # ,
'cadena', #"hola"
'opArt', #+
'opRel', # >
'opLog', # !
'opEsp', # b1|=b2 -> b1 = b1 || b2
'puntoComa', # ;
'parAb', # (
'parCerr', # )
'corchAb', # {
'corchCerr' # }
)
def evaluar_expresion():
check=False
tipoRetorno=None
simboloMayorQue = False
while len(listaExpresion)>0:
elem= listaExpresion.pop()
if not check:
if elem == ">" or elem == "!" or elem == "+":
return "error"
else:
if tipoRetorno is not None:
if tipoRetorno == "+" and elem != "int":
return "error"
elif tipoRetorno == ">" and elem != "int":
return "error"
tipoRetorno=elem
check=True
else:
if elem == "!" and tipoRetorno != "boolean":
return "error"
elif elem == ">":
if tipoRetorno != "int" or simboloMayorQue:
return "error"
simboloMayorQue=True
elif elem == "+" and tipoRetorno != "int":
return "error"
check=False
tipoRetorno = elem
if tipoRetorno == "!" or simboloMayorQue: tipoRetorno = "boolean"
return tipoRetorno
# Reglas de expresiones regulares para los tokens
def t_opEsp(t):
r'\|='
global lexema
global flagExpresion
global opEsp
if(not comentario):
listaTokens.append(t.value)
flagExpresion=True
opEsp=True
t.value=" "
return t
def t_opRel(t):
r'>'
if(not comentario):
listaTokens.append(t.value)
if flagExpresion:
listaExpresion.append(t.value)
t.value=" "
return t
def t_opLog(t):
r'!'
global flagExpresion
if(not comentario):
listaTokens.append(t.value)
if flagExpresion:
listaExpresion.append(t.value)
t.value=" "
return t
def t_asignacion(t):
r'='
global flagExpresion
if(not comentario):
flagExpresion=True
listaTokens.append(t.value)
t.value=" "
return t
def t_coma(t):
r','
if(not comentario):
listaTokens.append(t.value)
t.value=" "
return t
def t_puntoComa(t):
r';'
global listaExpresion
global flagExpresion
global lexema
global opEsp
global flagFor,contador
global metaFlagIf
if(not comentario):
listaTokens.append(t.value)
if metaFlagIf: metaFlagIf=False
if flagFor:
contador -=1
if contador == 0:
#flagExpresion=False
exp=evaluar_expresion()
if exp!='boolean':
raise Exception ("Error en la condicion del for")
if flagExpresion:
flagExpresion = False
exp = evaluar_expresion()
if exp == "error":
raise Exception ("La expresion escrita no tiene los tipos correctos")
elif lexema == "return":
if exp!=funcionAux.retorno:
raise Exception("Tipo no valido para el return")
elif lexema=="print":
if exp == 'boolean':
raise Exception ("Tipo logico en print o input")
elif opEsp:
opEsp=False
if ( (tabla_simbolos.buscarTipo(lexema) != 'boolean') or (exp!='boolean') ):
raise Exception ("Tipo no logico en asignacion con or logico")
elif tabla_simbolos.buscarTipo(lexema) != exp:
if flagFor and exp!=None:
raise Exception("Asignacion de tipos distintos en la iniciacion del for")
elif not flagFor: raise Exception("Tipo incorrecto en asignacion")
if contador ==1 and flagFor:
flagExpresion=True
t.value=" "
return t
def t_parAb(t):
r'\('
if(not comentario):
listaTokens.append(t.value)
t.value=" "
return t
def t_parCerr(t):
r'\)'
global anotherFlag,flagFor, opEsp
if(not comentario):
listaTokens.append(t.value)
if flagFor:
flagFor=False
exp=evaluar_expresion()
if exp == "error" or tabla_simbolos.buscarTipo(lexema) !=exp:
if exp !=None:
raise Exception("Asignacion de tipos distintos en la actualizacion del for")
elif opEsp:
opEsp=False
if ( (tabla_simbolos.buscarTipo(lexema) != 'boolean') or (exp!='boolean') ):
raise Exception ("Tipo incorrecto en actualizacion del for")
if anotherFlag and flagExpresion:
anotherFlag=False
if funcionAux.nParam > 1:
if funcionAux.nParam-1 != tabla_simbolos.buscarnParamFuncion(funcion.nombre):
raise Exception ("Numero de parametros equivocado")
funcionAux.tipoParam.reverse()
funcionAux.tipoParam.pop()
funcionAux.tipoParam.reverse()
if funcionAux.tipoParam != tabla_simbolos.buscarTipoParamFuncion(funcionAux.nombre):
raise Exception ("Tipos de los atributos equivocado")
else:
if tabla_simbolos.buscarnParamFuncion(funcionAux.nombre) > 0:
raise Exception ("Numero de argumentos erroneo")
funcionAux.reinicio
t.value=" "
return t
def t_corchAb(t):
r'\{'
global estoy_en_fun
if(not comentario):
if estoy_en_fun:
#si estoy leyendo una funcion y veo las llaves abiertas signigica que ya he dejado de declararla
estoy_en_fun=False
tabla_simbolos.crearFuncion(funcion)
listaTokens.append(t.value)
t.value=" "
return t
def t_corchCerr(t):
r'\}'
global flagFor,contReturn
if(not comentario):
listaTokens.append(t.value)
t.value=" "
if not flagFor:
if contReturn <=0 and funcionAux.retorno is not None:
raise Exception("Error en el cuerpo de la funcion")
tabla_simbolos.destuirTSL()
funcion.reinicio()
funcionAux.reinicio()
else:
flagFor=False
return t
def t_opArt(t):
r'\+'
global flagExpresion
if(not comentario):
if flagExpresion:
listaExpresion.append(t.value)
listaTokens.append(t.value)
t.value = " "
return t
def t_id(t):
r'[a-zA-z_]\w*'
if(not comentario):
global tipo
global estoy_en_fun
global flagFor
global flagIf,metaFlagIf
global flagExpresion
global anotherFlag
global lexema
global contador, contReturn
if t.value in palabras_clave:
listaTokens.append(t.value)
t.type = t.value
t.value = " "
if t.type == 'var': #cada vez que leo var entro en zona de declaraciom
tabla_simbolos.declaracion=True
elif t.type in tipos and (tabla_simbolos.declaracion or estoy_en_fun):
#si estoy en zona de declaracion o acabo de leer un function me quiero guardar el tipo
tipo= t.type
elif t.type == 'function': #voy a leer una funcion
estoy_en_fun=True
contReturn=0
elif t.type == 'for':
flagFor=True
contador=2
elif t.type == 'if':
flagIf=True
metaFlagIf=True
elif t.type == 'print' or t.type == 'input':
flagExpresion=True
lexema="print"
elif t.type == 'return':
flagExpresion=True
lexema="return"
contReturn+=1
if metaFlagIf:
contReturn-=1
else:
t.type = "id"
if not flagExpresion: lexema=t.value
if flagExpresion and not anotherFlag:
if tabla_simbolos.buscarTipo(t.value) == "funcion":
listaExpresion.append(tabla_simbolos.buscarTipoRetorno(t.value))
funcionAux.nombre=t.value
anotherFlag = True
else:
listaExpresion.append(tabla_simbolos.buscarTipo(t.value))
listaTokens.append(t.type)
if flagIf:
flagIf=False
if tabla_simbolos.buscarTipo(t.value) != "boolean":
raise Exception("error en el if")
if anotherFlag:
if flagExpresion:
funcionAux.nParam+=1
funcionAux.tipoParam.append(tabla_simbolos.buscarTipo(t.value))
if estoy_en_fun:
if not funcion.flag:
funcion.retorno=tipo
funcion.nombre=t.value
funcionAux.retorno=tipo
funcion.flag=True
q=tabla_simbolos.insertarTS(t.value, "funcion")
else:
funcion.nParam+=1
funcion.tipoParam.append(tipo)
funcion.nombreParam.append(t.value)
q = tabla_simbolos.insertarTSL()
elif tabla_simbolos.declaracion: #zona declaracion
q=tabla_simbolos.buscarTS(t.value)
if q is not None:
raise Exception("id ya declarada")
else:
q= tabla_simbolos.insertarTS(t.value,tipo)
#una vez que he leido el ID paso a zona de uso
tabla_simbolos.declaracion=False
else: #zona de uso
q= tabla_simbolos.buscarTS(t.value)
if q is None:
raise Exception("id no declarado")
t.value=q
tipo=None
return t
def t_cte_entera(t):
r'\d+\.?(\d+)?'
global flagExpresion
global anotherFlag
if(not comentario):
if eval(t.value) > 32767 or '.' in t.value:
raise Exception ("Lexical: illegal character '%s' in line '%d' position" % (t.value, t.lineno))
t.lexer.skip(1)
else:
t.value = eval(t.value)
if flagExpresion:
if anotherFlag:
funcionAux.nParam+=1
funcionAux.tipoParam.append("int")
else:
listaExpresion.append("int")
listaTokens.append(t.type)
return t
def t_cadena(t) :
r'"([^"\\]|(\\.))*"'
global flagExpresion
if(not comentario):
if len(t.value)-2>64 :
raise Exception("Cadena demasiado larga")
if flagExpresion:
if anotherFlag:
funcionAux.nParam+=1
funcionAux.tipoParam.append("string")
else:
listaExpresion.append("string")
listaTokens.append(t.type)
return t
def t_newline(t):
r'\n'
t.lexer.lineno+=1
#t_ignore_COMMENT = r'/\*(.|\n)*?\*/'
#@TOKEN(regex)
def t_commentab(t):
r'/\*'
global comentario
comentario = True
def t_commentcer(t):
r'\*/'
global comentario
comentario = False
def t_error(t):
if(not comentario):
print("")
t.lexer.skip(1)
t_ignore = ' \t' #Contiene espacios y tabuladores
if __name__ == '__main__':
if len(sys.argv) != 2:
print ("ERROR parametros incorrectos")
sys.exit(1)
lexer=lex.lex(reflags=re.DOTALL)
tabla_simbolos =TablaSimbolos()
data = open(sys.argv[1], 'r')
linea = data.readline()
output = open("tokens.txt", 'w')
lex.lex(reflags=re.DOTALL)
while linea != "":
lexer.input(linea)
linea = data.readline()
while True:
tok = lexer.token()
if not tok: break
tokens = ("<" + tok.type + "," + str(tok.value) +">" )
output.write(tokens+"\n")
tokens+= " token number "+ str(tok.lexpos +1) + " in line " + str(tok.lineno) +"\n"
tabla_simbolos.volcar()
#Ahora he generado un fichero con los tokens y he creado la tabla de Simbolos
#El siguiente paso es pasar la lista de Tokens identificados al parser
#print(listaTokens)
if parser.match(listaTokens):
print("ACEPT")
else:
print("REJEC")
parser.verbose_match(listaTokens, False)
#gramatica.res
# print(gramatica.res)
data.close()
output.close()
#lex.input("a+b")
#for tok in iter(lex.token, None):
# print repr(tok.type), repr(tok.value)
#lex.runmain(lexer)
| [
"arevuelta@Rv0lt"
] | arevuelta@Rv0lt |
13c9307b9b5ccdc95f35925e7cd437499b488c9b | 5cd67aa3b3c43937e623bd7f4deede198956061d | /migrations/versions/f1318346a431_.py | a723d1b76520dd99b9160eaeb5d082d57fec78aa | [] | no_license | GDG-SSU/gdg-homepage-project | 596d6379c8f93d2bbdc70c67703c80a42b07ec16 | 883bfdb9fa0e6f07d84a3b671daefbd8c69647c5 | refs/heads/backup | 2023-01-10T07:36:41.575334 | 2016-03-17T04:33:04 | 2016-03-17T04:33:04 | 49,634,424 | 6 | 2 | null | 2022-12-26T20:00:24 | 2016-01-14T08:45:02 | JavaScript | UTF-8 | Python | false | false | 954 | py | """empty message
Revision ID: f1318346a431
Revises: None
Create Date: 2016-01-30 16:42:51.170407
"""
# revision identifiers, used by Alembic.
revision = 'f1318346a431'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('t_gdg_help_desk',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('help_title', sa.String(length=200), nullable=True),
sa.Column('help_content', sa.Text(), nullable=True),
sa.Column('author_address', sa.String(length=15), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('t_gdg_help_desk')
### end Alembic commands ###
| [
"Genus@Genusui-MacBook-Pro.local"
] | Genus@Genusui-MacBook-Pro.local |
ee3e2068130cbb427286d11a91b9b4c4a9af71c5 | db6c003ab9d407979386fa8fd48eb88b791ace7a | /aws_scripts/zero_all_asgs.py | 4f9224b49fe2e6e98d512679ea18ba17a46b6a52 | [] | no_license | aria-jpl/ops_scripts2 | 319e3450358748531dce5d2e3e8fbe0ac8ef0ade | 332b1ecd2de3978724800136162aead998391ee6 | refs/heads/master | 2021-08-08T08:13:43.492801 | 2021-07-14T18:24:00 | 2021-07-14T18:24:00 | 188,817,794 | 0 | 0 | null | 2021-07-14T18:24:00 | 2019-05-27T09:56:04 | Python | UTF-8 | Python | false | false | 6,178 | py | '''
This script will set the max size, min size, and desired capacity values for all
auto-scaling groups to zero then wait until all instances have been terminated
after which these initial values will be restored. A list of asg's with active
instances is printed to the console until no active instances are detected.
There is also an option to read in an asg config file that will assign the
max, min, and desired capacity values. The format for this file is:
Flags:
-f: input asg config file
-p <int>: specifies interval over which to print ASG's with active instances.
If not specified, default is '2'.
Example usage: python zero_all_asgs.py -p 3
'''
import boto3
import json
import time
import argparse
# AWS ASG parameters
MAX_SIZE = "MaxSize"
MIN_SIZE = "MinSize"
DESIRED_CAPACITY = "DesiredCapacity"
client = boto3.client('autoscaling')
'''
Stores maxsize, minsize, and desired capacity values to a config
file then sets these ASG parameters to zero.
'''
def set_asgs_to_zero():
f = open("asg_config.json", "w")
data = {}
response = client.describe_auto_scaling_groups()
asgs = response['AutoScalingGroups']
for asg in asgs:
# Get asg name
asg_name = asg['AutoScalingGroupName']
print("\n" + asg_name)
data[asg_name] = []
# Collect initial asg values then, if desired capacity, max, or min are non-zero, set to zero
if (asg['MinSize'] is not 0):
data[asg_name].append({'MinSize':asg['MinSize']})
client.update_auto_scaling_group(AutoScalingGroupName=asg_name, MinSize=0)
print("...setting MinSize to 0")
if (asg['MaxSize'] is not 0):
data[asg_name].append({'MaxSize':asg['MaxSize']})
client.update_auto_scaling_group(AutoScalingGroupName=asg_name, MaxSize=0)
print("...setting MaxSize to 0")
if (asg['DesiredCapacity'] is not 0):
#data[asg_name].append({'DesiredCapacity':asg['DesiredCapacity']})
client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=0)
print("...setting DesiredCapacity to 0")
# Write initial asg values to config file
json.dump(data, f)
f.close()
'''
Print list of ASG's with active instances until all instances have been terminated.
'''
def wait_for_asgs_to_zero(period):
while(True):
response = client.describe_auto_scaling_instances()
asg_set = set()
if (len(response['AutoScalingInstances']) <= 0):
print("\n***********************************************")
print("\nAll asg's zero'd. No active instances detected.")
print("\n***********************************************")
break;
print("\nWaiting for active instances from the following ASG's to shutdown:")
for i in response['AutoScalingInstances']:
asg_set.add(i['AutoScalingGroupName'])
print('\n'.join(asg_set))
print("Total number of active instances: " + str(len(response['AutoScalingInstances'])))
time.sleep(int(period))
'''
Sets maxsize, minsize, and desired capacity back to initial values once all instances
have been terminated.
'''
def set_asgs_to_defaults():
# Open file containing default asg values
f = open("asg_config.json", "r")
data = json.load(f)
for asg in data.items():
param = tuple(asg)[1]
asg_name = tuple(asg)[0]
print("\n" + asg_name)
# Set asg default values
if (len(param) > 0):
for obj in param:
if ("MaxSize" in obj):
maxSize = obj["MaxSize"]
client.update_auto_scaling_group(AutoScalingGroupName=asg_name, MaxSize=maxSize)
print("Set MaxSize to", maxSize)
'''
if (DESIRED_CAPACITY in obj):
desiredCapacity = obj[DESIRED_CAPACITY]
client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desiredCapacity)
print("Set DesiredCapacity to", desiredCapacity)
'''
if (MIN_SIZE in obj):
minSize = obj[MIN_SIZE]
client.update_auto_scaling_group(AutoScalingGroupName=asg_name, MinSize=minSize)
print("Set MinSize to", minSize)
'''
Sets asg min, max, and/or desired capacity values from config
'''
def set_asg_from_config(config):
print("Setting config values...")
f = open(config, "r")
data = json.load(f)
for asg in data.items():
param = tuple(asg)[1]
asg_name = tuple(asg)[0]
print("\n" + asg_name)
# Set asg default values
if (len(param) > 0):
for obj in param:
if ("MaxSize" in obj):
maxSize = obj["MaxSize"]
client.update_auto_scaling_group(AutoScalingGroupName=asg_name, MaxSize=maxSize)
print("Set MaxSize to", maxSize)
if (DESIRED_CAPACITY in obj):
desiredCapacity = obj[DESIRED_CAPACITY]
client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=desiredCapacity)
print("Set DesiredCapacity to", desiredCapacity)
if (MIN_SIZE in obj):
minSize = obj[MIN_SIZE]
client.update_auto_scaling_group(AutoScalingGroupName=asg_name, MinSize=minSize)
print("Set MinSize to", minSize)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--period", default=2, help="Interval in seconds to print list of ASG's with active instances.")
parser.add_argument("-f", "--f", help="Input asg config file containing desired values.")
args = parser.parse_args()
if (args.f is not None):
# Set asg values to those specified in config
set_asg_from_config(args.f)
else:
set_asgs_to_zero()
wait_for_asgs_to_zero(args.period)
set_asgs_to_defaults()
| [
"marjorie.j.lucas@jpl.nasa.gov"
] | marjorie.j.lucas@jpl.nasa.gov |
d3d86e14f08837fc614c41663d93f3d0699382aa | 79c89b881f488310bff3354dc71171371cf33987 | /fast_text_embeddings.py | 46cd6246a771888cc32d0927a9941020a944634e | [] | no_license | sreeja-g/NER-code-mix | 4f32aa3de1cb2aff4ce98b97557fe9974357e3e1 | f84d1b43c1ffdd01b6db61dd379d670f396eb06c | refs/heads/master | 2022-11-29T07:16:41.941878 | 2020-07-30T22:58:38 | 2020-07-30T22:58:38 | 281,765,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | import pandas as pd
import csv
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
from gensim.models import FastText
data = pd.read_csv("processed_data/annotatedVec.tsv",sep='\t', quoting=csv.QUOTE_NONE, header=None)
data.columns=['Sent', 'words', 'lang', 'ner-tag']
data = data.fillna(method="ffill")
for index, each in data.iterrows():
if each['words'] in stop_words:
data.drop(data[data['words'] == each['words']].index, inplace = True)
corpus=[]
for i in data['words'].values:
corpus.append(str(i).split(" "))
corpus[:1]
model = FastText(corpus, size=100, workers=4,window=5)
model.save('saved_models/fasttext.model') | [
"37956427+sreeja-g@users.noreply.github.com"
] | 37956427+sreeja-g@users.noreply.github.com |
930560ddc5302324fc791a268bd09fb10d2e8b48 | 42a91364a9d25096f7b23791f666f25e02520ddf | /FETCH_CODE/quilt_flatten/distance.py | 47ef94afaf28004972fe594793db81d8bec1ec9e | [] | no_license | Jie-Tree/FETCH_CODE | 0d9d2ab04a085beefa3baf6cf15c2c671aa65944 | 37b0286843619fd231ff7b8277e6a0a8dff40ac7 | refs/heads/master | 2020-04-11T13:10:07.739588 | 2018-12-14T16:12:41 | 2018-12-14T16:12:41 | 161,806,079 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,933 | py | from fetch_core import robot_interface, camera
import time
import cv2
import numpy as np
import requests
import rospy
import socket
from moveit_python import MoveGroupInterface, PlanningSceneInterface
import os
from matplotlib import pyplot as plt
from cal_position import CalPosition
from manage_fetch_robot import FetchPose, GripperClient
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
from moveit_msgs.msg import MoveItErrorCodes
class Detect:
def __init__(self):
self.detect_ip = '172.31.76.30'
self.detect_port = 7777
self.cam = camera.RGBD()
self.cal_position = CalPosition()
self.fetch_pose = FetchPose()
def get_detect_info(self):
print "get_detect_info"
file_size = 0
while file_size == 0:
time.sleep(0.05)
img = self.cam.read_color_data()
cv2.imwrite("image/fetch.png", img)
path = "image/fetch.png"
# path = "../fetch_core/image/26.png"
file_size = os.stat(path).st_size
time.sleep(0.2)
print 'get img'
def move_corner(self, x, y):
position = self.cal_position.get_base_position_from_pix(x, y)
position[0] = position[0] - 0.20
move_group = MoveGroupInterface("arm_with_torso", "base_link")
planning_scene = PlanningSceneInterface("base_link")
planning_scene.removeCollisionObject("my_front_ground")
planning_scene.removeCollisionObject("my_back_ground")
planning_scene.removeCollisionObject("my_right_ground")
planning_scene.removeCollisionObject("my_left_ground")
planning_scene.addCube("my_front_ground", 2, 1.1, 0.0, -1.0)
planning_scene.addCube("my_back_ground", 2, -1.2, 0.0, -1.0)
planning_scene.addCube("my_left_ground", 2, 0.0, 1.2, -1.0)
planning_scene.addCube("my_right_ground", 2, 0.0, -1.2, -1.0)
# This is the wrist link not the gripper itself
gripper_frame = 'wrist_roll_link'
pose = Pose(Point(position[0], position[1], position[2]),
Quaternion(0, 0, 0, 1))
# Construct a "pose_stamped" message as required by moveToPose
gripper_pose_stamped = PoseStamped()
gripper_pose_stamped.header.frame_id = 'base_link'
# Finish building the Pose_stamped message
# If the message stamp is not current it could be ignored
gripper_pose_stamped.header.stamp = rospy.Time.now()
# Set the message pose
gripper_pose_stamped.pose = pose
# Move gripper frame to the pose specified
move_group.moveToPose(gripper_pose_stamped, gripper_frame)
result = move_group.get_move_action().get_result()
if result:
# Checking the MoveItErrorCode
if result.error_code.val == MoveItErrorCodes.SUCCESS:
rospy.loginfo("Hello there!")
else:
# If you get to this point please search for:
# moveit_msgs/MoveItErrorCodes.msg
rospy.logerr("Arm goal in state: %s",
move_group.get_move_action().get_state())
else:
rospy.logerr("MoveIt! failure no result returned.")
time.sleep(1)
joint_names = ["torso_lift_joint", "shoulder_pan_joint",
"shoulder_lift_joint", "upperarm_roll_joint",
"elbow_flex_joint", "forearm_roll_joint",
"wrist_flex_joint", "wrist_roll_joint"]
joints_value = [0.3, 1.32, 0.7, 0.0, -2.0, 0.0, -0.57, 0.0]
move_group.moveToJointPosition(joint_names, joints_value, wait=False)
# Since we passed in wait=False above we need to wait here
move_group.get_move_action().wait_for_result()
result = move_group.get_move_action().get_result()
if result:
# Checking the MoveItErrorCode
if result.error_code.val == MoveItErrorCodes.SUCCESS:
rospy.loginfo("pose Success!")
else:
# If you get to this point please search for:
# moveit_msgs/MoveItErrorCodes.msg
rospy.logerr("Arm goal in state: %s",
self.move_group.get_move_action().get_state())
else:
rospy.logerr("MoveIt! failure no result returned.")
def on_press(event):
if event.inaxes is None:
print 'None'
return
print event.xdata, event.ydata
detect.move_corner(int(event.ydata), int(event.xdata))
if __name__ == '__main__':
rospy.init_node("test_connect")
detect = Detect()
while True:
detect.get_detect_info()
plt.imshow(plt.imread('image/fetch.png'))
plt.connect("button_press_event", on_press)
plt.show()
| [
"904615562@qq.com"
] | 904615562@qq.com |
85d502c61f2fa92729891532280aef50f4e2f0f3 | 4c5c0a3883a9416568930f7e434e1b4cfb333831 | /form_test.py | 2e436dd367465b7cf1987029b3698ce8036b0b2a | [] | no_license | SugimuraMichael/pfscm_change_tracker | 8bc9dec4ad180da55f0f765babd431152cbcd9e5 | 467d3500c7e16ea5d29160c2f843fc54e8cbf146 | refs/heads/master | 2021-01-01T16:32:22.036952 | 2017-07-20T15:57:00 | 2017-07-20T15:57:00 | 97,853,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,404 | py | '''
current version of this code takes a google sheet, copy of original which is static... next thing would be to
implement a copy in google drive... I can go do that now...
total time is --- 149.125 seconds ---
'''
import gspread
import pandas as pd
from oauth2client.service_account import ServiceAccountCredentials
import time
from spreadsheets.url_keeper import url_form_test
start_timez = time.time()
import json
from pprint import pprint
#with open('C:/Users/585000/Desktop/Python Projects/PPM USAID/spreadsheets/client_secret_2.json') as data_file:
# data = json.load(data_file)
#pprint(data)
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('C:/Users/585000/Desktop/Python Projects/PPM USAID/spreadsheets/client_secret_2.json', scope)
gc = gspread.authorize(credentials)
wks = gc.open_by_url(url_form_test()).get_worksheet(0)
internal_wks = gc.open_by_url(url_form_test()).get_worksheet(1)
external_wks = gc.open_by_url(url_form_test()).get_worksheet(2)
change_sheet = pd.DataFrame()
internal_sheet = pd.DataFrame()
external_sheet = pd.DataFrame()
for i in range(22):
n = i+1
print n
column = pd.DataFrame([sub for sub in wks.col_values(n)])
column.columns = column.iloc[0]
column = column.ix[1:]
change_sheet = pd.concat([change_sheet, column], axis=1)
del column
column = pd.DataFrame([sub for sub in internal_wks.col_values(n)])
column.columns = column.iloc[0]
column = column.ix[1:]
internal_sheet = pd.concat([internal_sheet, column], axis=1)
del column
column = pd.DataFrame([sub for sub in external_wks.col_values(n)])
column.columns = column.iloc[0]
column = column.ix[1:]
external_sheet = pd.concat([external_sheet, column], axis=1)
del column
def checkEqual1(iterator):
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
# need to deal with duplicate coluimns Orion Field Name
col_list_change_sheet = ['Timestamp',
'Email Address',
"Requester's Name",
'Reason for change',
'Program',
'Do you need bulk changes?',
'Please upload file with bulk changes',
'PE No',
'PQ No',
'Order No',
'Shipment No',
'What type of change is needed?',
'Orion field name',
'Current date',
'Requested new date',
'Orion field name_2',
'Current data',
'Requested new data',
'Correction ID',
'Date approved by PMU',
'Global Fund or Internal tab?',
'Status of correction by ORION']
change_sheet.columns = col_list_change_sheet
drop_list = []
for indexz, row in change_sheet.iterrows():
if checkEqual1(row.tolist()) == True:
drop_list.append(indexz-1)
change_sheet=change_sheet.drop(change_sheet.index[drop_list])
drop_list = []
for indexz, row in internal_sheet.iterrows():
if checkEqual1(row.tolist()) == True:
drop_list.append(indexz-1)
internal_sheet = internal_sheet.drop(internal_sheet.index[drop_list])
drop_list = []
for indexz, row in external_sheet.iterrows():
if checkEqual1(row.tolist()) == True:
drop_list.append(indexz-1)
change_sheet = change_sheet.rename(index={15:'Orion field name_2'})
external_sheet = external_sheet.drop(external_sheet.index[drop_list])
external_sheet = external_sheet.drop('',axis=1)
change_sheet_c = change_sheet.copy()
additional_blank_cols = ['AD/UD Code',
'Number of days (+/-)',
'AD/UD Comments',
'Current AD',
'Current UD',
'Actual Delivery Date',
'COTD impact % (+/-)','Date change approved by TGF?',
'Shared with Client?'
]
for col in additional_blank_cols:
change_sheet_c[col]= ''
done_sheet = change_sheet_c[change_sheet_c['Status of correction by ORION'].str.lower()=='done']
internal_edits =done_sheet[done_sheet['Global Fund or Internal tab?'].str.lower().str.contains('internal only')==True]
external_edits =done_sheet[done_sheet['Global Fund or Internal tab?'].str.lower().str.contains('internal only')!=True]
external_edits = external_edits.rename(index = str,
columns={'Timestamp':'Date of request',
'Date approved by PMU':'Date Done', #seems not like a 1:1 ratio
'Reason for change':'Reason for change', #same but for good measure
'Program':'Program', #''
'PE No':'PE No',
'PQ No':'PQ No',
'Order No':'Order No',
'Shipment No':'Shipment No',
'Orion field name':'Field Name associated with correction',
'Current date':'Existing Data (Orion)',
'Requested new date':'New Data (Revised)',
'Status of correction by ORION':'Done'
}
)
external_edits = external_edits[['Date of request',
'Date Done',
'Reason for change',
'Program',
'PE No',
'PQ No',
'Order No',
'Shipment No',
'Field Name associated with correction',
'Existing Data (Orion)',
'New Data (Revised)',
'AD/UD Code',
'Number of days (+/-)',
'AD/UD Comments',
'Current AD',
'Current UD',
'Actual Delivery Date',
'COTD impact % (+/-)',
'Done',
'Date change approved by TGF?',
'Shared with Client?']]
external_result = pd.concat([external_sheet,external_edits])
internal_edits = internal_edits.rename(index = str,
columns={'Timestamp':'Date of request',
'Date approved by PMU':'Date Done', #seems not like a 1:1 ratio
'Reason for change':'Reason for Correction', #same but for good measure
'Program':'Program', #''
'PE No':'PE No',
'PQ No':'PQ No',
'Order No':'Order No',
'Shipment No':'Shipment No',
'Orion field name':'Field Name associated with correction',
'Current date':'Existing Data (Orion)',
'Requested new date':'New Data (Revised)',
'Status of correction by ORION':'Status'
}
)
internal_edits['Reason for change'] = ''
internal_col_list =['Date of request',
'Date Done',
'Reason for Correction',
'Program',
'PE No',
'PQ No',
'Order No',
'Shipment No',
'Field Name associated with correction',
'Existing Data (Orion)',
'New Data (Revised)',
'AD/UD Code',
'Number of days (+/-)',
'AD/UD Comments',
'Current AD',
'Current UD',
'Actual Delivery Date',
'COTD impact % (+/-)',
'Reason for change',
'Status',
'Date change approved by TGF?',
'Shared with Client?']
#df a dataframe and ws a google api worksheet object
def to_googlesheet(df,ws):
def numberToLetters(q):
q = q - 1
result = ''
while q >= 0:
remain = q % 26
result = chr(remain+65) + result;
q = q//26 - 1
return result
# columns names
columns = df.columns.values.tolist()
# selection of the range that will be updated
cell_list = ws.range('A1:'+numberToLetters(len(columns))+'1')
# modifying the values in the range
for cell in cell_list:
val = columns[cell.col-1]
if type(val) is str:
val = val.decode('utf-8')
cell.value = val
# update in batch
ws.update_cells(cell_list)
# number of lines and columns
num_lines, num_columns = df.shape
# selection of the range that will be updated
cell_list = ws.range('A2:' + numberToLetters(num_columns) + str(num_lines + 1))
# modifying the values in the range
for cell in cell_list:
val = df.iloc[cell.row - 2, cell.col - 1]
if type(val) is str:
val = val.decode('utf-8')
elif isinstance(val, (int, long, float, complex)):
# note that we round all numbers
val = int(round(val))
cell.value = val
# update in batch
ws.update_cells(cell_list)
internal_edits = internal_edits[internal_col_list]
internal_result = pd.concat([internal_sheet,internal_edits])
internal_save_loc = gc.open_by_url(url_form_test()).get_worksheet(3)
external_save_loc = gc.open_by_url(url_form_test()).get_worksheet(4)
internal_save_loc.clear()
external_save_loc.clear()
to_googlesheet(internal_result,internal_save_loc)
to_googlesheet(external_result,external_save_loc)
#external_result.to_csv('C:/Users/585000/Desktop/PCFSM/2017 KPIs/external_change_test_v1.csv',index = False)
#internal_result.to_csv('C:/Users/585000/Desktop/PCFSM/2017 KPIs/internal_change_test_v1.csv',index = False)
print("total time --- %s seconds ---" % (time.time() - start_timez))
| [
"sugimura_michael@bah.com"
] | sugimura_michael@bah.com |
713e56b0dfc1b28ab55d67e75f8720cff692e593 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-mrsp.0/mrsp_ut=3.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=49/params.py | fcbfbdfe45d4c06dbfe8c250d00b2d4aa9ae3364 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.557024',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'RUN',
'trial': 49,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
2c15394b0845b5b8e849bf6d0b52a048d9f8be8d | 016cab497a506bb50ac5b89e3e4622c244fe1339 | /src/TrainModel/study1_rawdata_plot.py | ac605a5599dd121b62cc445cc9f248d7e0aa7597 | [] | no_license | SYilei/LimiTouch | 917595cb87cf012542c3da3375342fa492d5a892 | f757a7838572617239a4039a0daa31a826238830 | refs/heads/master | 2023-07-20T22:57:59.315684 | 2019-09-19T22:35:29 | 2019-09-19T22:35:29 | 195,982,686 | 0 | 0 | null | 2023-07-06T21:41:32 | 2019-07-09T10:00:32 | Python | UTF-8 | Python | false | false | 1,916 | py | import torch
import numpy as np
import os
import sys
import pandas as pd
import random
from study1_modelclass import LeNet
import matplotlib.pyplot as plt
import plotly.graph_objects as go
touch_data = pd.read_csv('../../data/Data_Study1/chamod_touch_free_circle.csv')[['ax','ay','az','gx','gy','gz']].values
nontouch_data = pd.read_csv('../../data/Data_Study1/chamod_nontouch_free_circle.csv')[['ax','ay','az','gx','gy','gz']].values
# touch_data = pd.read_csv('../../data/Data_Study1_Difference/chamod_touch_free_circle.csv')[['0','1','2','3','4','5']].values
# nontouch_data = pd.read_csv('../../data/Data_Study1_Difference/chamod_nontouch_free_circle.csv')[['0','1','2','3','4','5']].values
n = 2000
random_x = [i for i in range(n)]
# random_y0 = touch_data[:n,0]
# random_y1 = touch_data[:n,1]
# random_y2 = touch_data[:n,2]
# random_y3 = touch_data[:n,3]
# random_y4 = touch_data[:n,4]
# random_y5 = touch_data[:n,5]
random_y0 = nontouch_data[:n,0]
random_y1 = nontouch_data[:n,1]
random_y2 = nontouch_data[:n,2]
random_y3 = nontouch_data[:n,3]
random_y4 = nontouch_data[:n,4]
random_y5 = nontouch_data[:n,5]
# print(random_y0)
# quit(0)
# Create traces
fig = go.Figure()
fig.add_trace(go.Scatter(x=random_x, y=random_y0,
mode='lines',
name='acc_x'))
fig.add_trace(go.Scatter(x=random_x, y=random_y1,
mode='lines',
name='acc_y'))
fig.add_trace(go.Scatter(x=random_x, y=random_y2,
mode='lines',
name='acc_z'))
fig.add_trace(go.Scatter(x=random_x, y=random_y3,
mode='lines',
name='gyro_x'))
fig.add_trace(go.Scatter(x=random_x, y=random_y4,
mode='lines',
name='gyro_y'))
fig.add_trace(go.Scatter(x=random_x, y=random_y5,
mode='lines',
name='gyro_z'))
fig.show()
| [
"1000839@wifi-staff-172-24-20-243.net.auckland.ac.nz"
] | 1000839@wifi-staff-172-24-20-243.net.auckland.ac.nz |
c28398d854bb76f93aa2b6c24c128a40872bddbc | cc2a0a075a2f069e5b65bb749c7af0ac488ce1af | /manage.py | 0e98d8243e2c62f9a996d0df5086d14f2899816c | [] | no_license | PaoAguilar/sistema-de-compras | ff88a39beceb42088a1b7b4662b2e2eff819c369 | ae66deb9ca92fe7c168901ac526cb18b34ad2b8c | refs/heads/main | 2023-02-10T07:56:34.657052 | 2020-11-29T23:48:48 | 2020-11-29T23:48:48 | 316,593,342 | 0 | 1 | null | 2020-11-30T06:06:30 | 2020-11-27T20:24:07 | JavaScript | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sistemaCompras.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"pao.aquev29@gmail.com"
] | pao.aquev29@gmail.com |
8b37a3439da396e141ef81dfd7a5c6ecb6a958c9 | 2763387c80e808e63bd9aa190e64a2582454f4f9 | /aoc18/day05/day05.py | 655f65e24f0534d2b87cf80ec34035f34501882c | [
"MIT"
] | permissive | dds/advent18 | ca68bb26a3d8d890eeb0cdc49b15f6de7e6a7d7f | 51c6f32cd90f50657d33527e1c9b3a0768f28538 | refs/heads/master | 2020-04-09T06:19:04.378438 | 2019-03-22T15:17:50 | 2019-03-22T15:17:50 | 160,107,033 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | import util
from string import lowercase
test_input = """
aA
abBA
aBAB
aabAAB
dabAcCaCBAcCcaDA
"""
def reaction(left, right):
if left != right and (ord(left) % 32) == (ord(right) % 32):
return True
return False
def reactPolymer(polymer):
index = 1
while True:
if len(polymer) == 0 or index >= len(polymer):
break
if reaction(polymer[index-1], polymer[index]):
polymer = polymer[0:index-1] + polymer[index+1:]
index -= 1
else:
index += 1
return polymer
polymers = []
for line in util.get_data(5).read().split('\n'):
# for line in test_input.split('\n'):
polymer = line
if not polymer:
continue
polymers.append(polymer)
def removeUnit(polymer, unit):
index = 0
while True:
if len(polymer) == 0 or index >= len(polymer):
break
if ord(polymer[index]) % 32 == ord(unit) % 32:
polymer = polymer[:index] + polymer[index+1:]
else:
index += 1
return polymer
for polymer in polymers:
reactedPolymer = reactPolymer(polymer)
print 'A: %d' % len(reactedPolymer)
reducedPolymers = [removeUnit(polymer, c) for c in lowercase]
print 'got reducedPolymers'
reactedReducedPolymers = []
for i, p in enumerate(reducedPolymers):
reactedReducedPolymers.append(reactPolymer(p))
print 'got %s reduced polymers' % chr(ord('a') + i)
index, bestReducedPolymer = util.best([len(i) for i in reactedReducedPolymers], True)
print 'B: %d (worst unit: %s)' % (bestReducedPolymer, chr(ord('a') + index))
| [
"davidsmith@acm.org"
] | davidsmith@acm.org |
541a41450f5c3cc864969e5d8d374347713e4ae8 | 92150eee1ef8c331b3a417cc9d5917cbf8274d37 | /src/01_06_image_kernel.py | 05ebe6a4136adf939742b67abc7f61baa872972c | [] | no_license | deepdeepdot/nano-deep-learning | 87606acd7a0a9c0b1d4108f7dd5749e106eed652 | 20da799fac155e7e59e4fa56d3e949a81b895a89 | refs/heads/master | 2020-04-26T06:36:44.944351 | 2019-04-21T18:32:13 | 2019-04-21T18:32:13 | 173,370,424 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | import matplotlib.pyplot as plt
import numpy as np
img = plt.imread("panda-corner.jpg")
nrows, ncols = img.shape[0], img.shape[1]
nchannels = img.shape[2]
emboss = [
[-2, -1, 0],
[-1, 1, 1],
[0, 1, 2]
]
buffer = np.zeros((nrows, ncols, 3))
for i in range(1, nrows-1):
for j in range(1, ncols-1):
for c in range(nchannels):
source = img[i-1:i+2, j-1:j+2, c]
buffer[i][j][c] = np.sum(np.multiply(source, emboss))
buffer = np.clip(buffer, 0, 255).astype(np.int8)
plt.imsave(f"out/panda-emboss.png", buffer)
print("Done!") | [
"ph@educreational.com"
] | ph@educreational.com |
9a4567c7387b96986d9b3a7c804bb75fdfa5cae6 | e29d2cd7dd16f961a964dbf90a7b3e011ecf7c4c | /LambdaAWSFunction/message.py | bcc9a6323e1350a0f2352d92bd579524d14d686c | [] | no_license | shrirangbagdi/LogParser | 8c0af19a7f01ec2682eed0818bbb3eb7201ddf7f | a5a2dabbfab828e103f5062bb65d2988a72323e4 | refs/heads/master | 2022-12-01T06:28:05.091601 | 2020-08-16T22:17:05 | 2020-08-16T22:17:05 | 274,524,768 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,754 | py | import boto3
from PatternFour import PatternFour
from PatternOne import PatternOne
from PatternThree import PatternThree
class message:
def __init__(self, caseID, event):
self.caseID = caseID
self.event = event
def generate_messages(self):
s3 = boto3.client('s3')
list_of_messages = []
warning = {}
event = self.event
file_name = str(event['Records'][0]['s3']['object']['key'])
bucket = str(event['Records'][0]['s3']['bucket']['name'])
obj = s3.get_object(Bucket=bucket, Key=file_name)
previous_type = ""
first_iteration = True
for line in obj["Body"].read().decode(encoding="utf-8").splitlines():
pattern_one = PatternOne(line)
pattern_three = PatternThree(line)
pattern_four = PatternFour(line)
if pattern_one.IsPatternOne():
if (not first_iteration) and (previous_type == "WARN" or previous_type == "ERROR"):
list_of_messages.append(warning)
else:
first_iteration = False
warning = {}
current_type = pattern_one.GetCurrentType()
if current_type == "WARN":
timestamp = pattern_one.GetTimeStamp()
message = pattern_one.GetMessage()
warning = {'File Name': file_name, 'Case ID': self.caseID,
'Timestamp': pattern_one.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
if current_type == "ERROR":
timestamp = pattern_one.GetTimeStamp()
message = pattern_one.GetMessage()
warning = {'File Name': file_name, 'Case ID': self.caseID,
'Timestamp': pattern_one.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
previous_type = current_type
elif pattern_three.IsPatternThree():
if (not first_iteration) and (previous_type == "WARN" or previous_type == "ERROR"):
list_of_messages.append(warning)
else:
first_iteration = False
warning = {}
current_type = pattern_three.GetCurrentType()
if current_type == "WARN":
timestamp = pattern_three.GetTimeStamp()
message = pattern_three.GetMessage()
warning = {'File Name': file_name, 'Case ID': self.caseID,
'Timestamp': pattern_three.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
if current_type == "ERROR":
timestamp = pattern_three.GetTimeStamp()
message = pattern_three.GetMessage()
warning = {'File Name': file_name, 'Case ID': self.caseID,
'Timestamp': pattern_three.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
previous_type = current_type
elif pattern_four.IsPatternFour():
if (not first_iteration) and (previous_type == "WARN" or previous_type == "ERROR"):
list_of_messages.append(warning)
else:
first_iteration = False
warning = {}
current_type = pattern_four.GetCurrentType()
if current_type == "WARN":
timestamp = pattern_four.GetTimeStamp()
message = pattern_four.GetMessage()
warning = {'File Name': file_name, 'Case ID': self.caseID,
'Timestamp': pattern_four.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
if current_type == "ERROR":
timestamp = pattern_four.GetTimeStamp()
message = pattern_four.GetMessage()
warning = {'File Name': file_name, 'Case ID': self.caseID,
'Timestamp': pattern_four.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
previous_type = current_type
elif previous_type == "WARN" or previous_type == "ERROR":
warning["Message"] += line
if previous_type == "ERROR" or previous_type == "WARN":
list_of_messages.append(warning)
return list_of_messages | [
"sbagdi2@illinois.edu"
] | sbagdi2@illinois.edu |
9ac194b9a29467ac4a1e58124ee6a21762943045 | e2b0f6994076bf5183108bc8ee016ea09648cc5e | /apps/organization/migrations/0003_auto_20171119_2204.py | e78d20a4db95a404a9b0a0fcf71932ddb795d212 | [] | no_license | tongguoweizpp/MxOnline | aaa8bf7e12be133e94cd6a10f76bdfab049c023b | 8ad57dd0eba5c297de8da09d3908cd238d0c23b7 | refs/heads/master | 2021-09-04T09:09:34.663455 | 2018-01-17T15:13:49 | 2018-01-17T15:13:49 | 117,853,001 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-11-19 22:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0002_auto_20171119_1113'),
]
operations = [
migrations.AddField(
model_name='courseorg',
name='course_nums',
field=models.IntegerField(default=0, verbose_name='课程数'),
),
migrations.AddField(
model_name='courseorg',
name='students',
field=models.IntegerField(default=0, verbose_name='学习人数'),
),
]
| [
"tongguoweizpp@gmail.com"
] | tongguoweizpp@gmail.com |
d0b17bbfb828844b9aade0e41ba7dacd041135e1 | b83bebc43ba07d299c5e8a3d954f710024d27bcc | /owners_api/serializers.py | 3ed8fe27b47c19714055045d0ff8595764dc12a8 | [] | no_license | Faiza1987/Salon-API | eaf601b44abed4fcd2501f1076aa4f043e14caa7 | 8bfab422accb2a5cae253a08f9cdab05a7469e6d | refs/heads/master | 2022-11-27T14:29:56.986608 | 2019-07-22T20:49:14 | 2019-07-22T20:49:14 | 196,279,240 | 0 | 0 | null | 2022-11-22T04:07:37 | 2019-07-10T21:48:18 | Python | UTF-8 | Python | false | false | 2,117 | py | from rest_framework import serializers
from owners_api.models import User, UserProfile
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ('salon_name', 'salon_address', 'salon_city',
'salon_state', 'salon_zip', 'salon_phone_number',
'salon_description')
class UserSerializer(serializers.HyperlinkedModelSerializer):
profile = UserProfileSerializer(required=True)
jobs = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='job-detail'
)
class Meta:
model = User
fields = ('url', 'email', 'first_name', 'last_name',
'password', 'profile', 'jobs')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
profile_data = validated_data.pop('profile')
password = validated_data.pop('password')
user = User(**validated_data)
user.set_password(password)
user.save()
UserProfile.objects.create(user=user, **profile_data)
return user
def update(self, instance, validated_data):
profile_data = validated_data.pop('profile')
profile = instance.profile
instance.email = validated_data.get('email', instance.email)
instance.save()
profile.salon_name = profile_data.get(
'salon_name', profile.salon_name)
profile.salon_address = profile_data.get(
'salon_address', profile.salon_address)
profile.salon_city = profile_data.get(
'salon_city', profile.salon_city)
profile.salon_state = profile_data.get(
'salon_state', profile.salon_state)
profile.salon_zip = profile_data.get(
'salon_zip', profile.salon_zip)
profile.salon_phone_number = profile_data.get(
'salon_phone_number', profile.salon_phone_number)
profile.salon_description = profile_data.get(
'salon_description', profile.salon_description)
profile.save()
return instance
| [
"faiza.ahsan1222@gmail.com"
] | faiza.ahsan1222@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.