max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
api/api/routes/cards.py | frissyn/Replit-TCG | 8 | 6613451 | import flask
from api import db
from api import app
from api import tokens
from api import limiter
from ..models import Card
@app.route("/cards")
@limiter.limit("75 per minute")
def cards_route():
cards = Card.query.all()
return flask.jsonify([c.serialize() for c in cards])
@app.route("/card/<iden>", methods=["GET", "DELETE", "PUT"])
@limiter.limit("75 per minute")
def card_route(iden: int):
req = flask.request
card = Card.query.get_or_404(iden)
if req.method == "GET":
return flask.jsonify(card.serialize())
elif req.method == "PUT":
if req.headers.get("X-API-TOKEN") in tokens:
card.update({
"name": req.form.get("name"),
"image": req.form.get("image"),
"title": req.form.get("title"),
"rarity": req.form.get("rarity"),
"color": req.form.get("color"),
"shiny": req.form.get("shiny") == "True",
"description": req.form.get("description"),
})
db.session.commit()
return flask.jsonify(card.serialize())
else:
return flask.abort(404)
elif req.method == "DELETE":
if req.headers.get("X-API-TOKEN") in tokens:
db.session.delete(card)
db.session.commit()
return "", 204
else:
return flask.abort(403)
@app.route("/card/add", methods=["POST"])
@limiter.limit("10 per minute")
def card_add_route():
req = flask.request
if req.headers.get("X-API-TOKEN") in tokens:
card = Card()
card.update({
"name": req.form.get("name"),
"image": req.form.get("image"),
"title": req.form.get("title"),
"rarity": req.form.get("rarity"),
"color": req.form.get("color"),
"shiny": req.form.get("shiny") == "True",
"description": req.form.get("description")
})
db.session.add(card)
db.session.commit()
return "", 201
else:
return flask.abort(403)
| import flask
from api import db
from api import app
from api import tokens
from api import limiter
from ..models import Card
@app.route("/cards")
@limiter.limit("75 per minute")
def cards_route():
cards = Card.query.all()
return flask.jsonify([c.serialize() for c in cards])
@app.route("/card/<iden>", methods=["GET", "DELETE", "PUT"])
@limiter.limit("75 per minute")
def card_route(iden: int):
req = flask.request
card = Card.query.get_or_404(iden)
if req.method == "GET":
return flask.jsonify(card.serialize())
elif req.method == "PUT":
if req.headers.get("X-API-TOKEN") in tokens:
card.update({
"name": req.form.get("name"),
"image": req.form.get("image"),
"title": req.form.get("title"),
"rarity": req.form.get("rarity"),
"color": req.form.get("color"),
"shiny": req.form.get("shiny") == "True",
"description": req.form.get("description"),
})
db.session.commit()
return flask.jsonify(card.serialize())
else:
return flask.abort(404)
elif req.method == "DELETE":
if req.headers.get("X-API-TOKEN") in tokens:
db.session.delete(card)
db.session.commit()
return "", 204
else:
return flask.abort(403)
@app.route("/card/add", methods=["POST"])
@limiter.limit("10 per minute")
def card_add_route():
req = flask.request
if req.headers.get("X-API-TOKEN") in tokens:
card = Card()
card.update({
"name": req.form.get("name"),
"image": req.form.get("image"),
"title": req.form.get("title"),
"rarity": req.form.get("rarity"),
"color": req.form.get("color"),
"shiny": req.form.get("shiny") == "True",
"description": req.form.get("description")
})
db.session.add(card)
db.session.commit()
return "", 201
else:
return flask.abort(403)
| none | 1 | 2.602404 | 3 | |
KayakMask/masklink/features/steps/example_steps.py | HanzhouLiu/Kayak_for_Masks | 1 | 6613452 | <reponame>HanzhouLiu/Kayak_for_Masks
from behave import given, when, then
@given('a set of website')
def step_impl(context):
pass
@when('I go to the home page')
def step_impl(context):
pass
@then('I should see all the brand on display')
def step_impl(context):
pass
@then('I should see all the product items that are available to purchase')
def step_impl(context):
pass
@when('I click the purchase link')
def step_impl(context):
pass
@then('I should jump to the product page')
def step_impl(context):
pass
| from behave import given, when, then
@given('a set of website')
def step_impl(context):
pass
@when('I go to the home page')
def step_impl(context):
pass
@then('I should see all the brand on display')
def step_impl(context):
pass
@then('I should see all the product items that are available to purchase')
def step_impl(context):
pass
@when('I click the purchase link')
def step_impl(context):
pass
@then('I should jump to the product page')
def step_impl(context):
pass | none | 1 | 2.569158 | 3 | |
backend/facilities/migrations/0004_facility_add_sports_field.py | totaki/leaders-2021-05 | 0 | 6613453 | <reponame>totaki/leaders-2021-05
# Generated by Django 3.2.8 on 2021-10-18 18:32
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facilities', '0003_rename_sports_area_related_name'),
]
operations = [
migrations.AddField(
model_name='facility',
name='sports',
field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), default=[], size=70),
preserve_default=False,
),
]
| # Generated by Django 3.2.8 on 2021-10-18 18:32
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facilities', '0003_rename_sports_area_related_name'),
]
operations = [
migrations.AddField(
model_name='facility',
name='sports',
field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), default=[], size=70),
preserve_default=False,
),
] | en | 0.896789 | # Generated by Django 3.2.8 on 2021-10-18 18:32 | 1.793661 | 2 |
kicker/train/trainer.py | KI-cker/Ki-cker | 0 | 6613454 | import tensorflow as tf
from keras import backend as K
from kicker.neural_net import NeuralNet
from tensorflow.python import debug
class Trainer:
def __init__(self, neural_net, shape=(320, 480), frame_count=5):
self.gamma = 0.99
self.punishment_for_moving = 0.1
self.neural_net = neural_net
self.neural_net_old = NeuralNet(filename=self.neural_net.filename)
self.width = shape[0]
self.height = shape[1]
self.frame_count = frame_count
self.options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
self.run_metadata = tf.RunMetadata()
self.writer = tf.summary.FileWriter(
logdir='tensorboard_logdir', graph=K.get_session().graph)
self.writer.flush()
self.learning_rate = 1e-4
self.observations_img = self.build_image_processor()
self.debugger = False
def build_image_processor(self):
observations = tf.placeholder(
tf.string, shape=[None, self.frame_count + 1], name='observations')
observations_img = tf.cast(tf.map_fn(lambda i: self.convert_images(
i), observations, dtype=tf.uint8), tf.float32)
observations_img.set_shape(
[None, self.width, self.height, self.frame_count + 1])
return observations_img
def decode(self, images):
sess = K.get_session()
return sess.run(self.observations_img, feed_dict={
'observations:0': images
}, options=self.options, run_metadata=self.run_metadata)
def compute(self, actions, inputs, inputs_next, rewards, terminals):
computed = self.evaluate_input(inputs)
computed_next = self.evaluate_input(inputs_next)
computed_next_old = self.evaluate_input_old(inputs_next)
# computed_actions = tf.stop_gradient(tf.argmax(computed, axis=2))
actions_one_hot = tf.one_hot(actions, 3, axis=2)
q_old = tf.reduce_sum(actions_one_hot * computed, axis=2)
argmax_old = tf.one_hot(
tf.argmax(
computed_next_old,
axis=2),
3,
axis=2)
second_term = self.gamma * \
tf.reduce_sum(computed_next * argmax_old, axis=2)
# second_term = self.gamma * tf.reduce_max(computed_next, axis=2)
q_new = tf.stop_gradient(
rewards +
tf.where(
terminals,
tf.zeros_like(second_term),
second_term))
loss = tf.losses.huber_loss(q_new, q_old, delta=50.0)
# loss = loss + 0.01 * tf.reduce_mean(tf.where(computed_actions == tf.ones_like(computed_actions), tf.zeros_like(q_new), tf.ones_like(q_new)))
# loss = loss + 0.1 * tf.reduce_mean(stf.nn.relu(computed[:,:,0] - computed[:,:,1]))
# loss = loss + 0.1 * tf.reduce_mean(tf.nn.relu(computed[:,:,2] - computed[:,:,1]))
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(
self.learning_rate).minimize(loss)
tf.summary.scalar('loss', loss)
tf.summary.scalar('diff', tf.reduce_mean(tf.abs(q_new - q_old)))
tf.summary.scalar('maximal_reward', tf.reduce_max(q_new))
tf.summary.scalar('mean_reward', tf.reduce_mean(q_new))
tf.summary.scalar('minimal_reward', tf.reduce_min(q_new))
merged = tf.summary.merge_all()
return train_step, loss, tf.abs(
q_new - q_old), tf.argmax(computed, axis=2), merged
def convert_images(self, inputs):
return tf.transpose(tf.map_fn(lambda i: tf.image.decode_jpeg(
i), inputs, dtype=tf.uint8)[:, :, :, 0], [1, 2, 0])
def train_step(self, batch):
if self.debugger:
sess = debug.TensorBoardDebugWrapperSession(
K.get_session(), 'localhost:6004')
K.set_session(sess)
self.debugger = False
sess = K.get_session()
return sess.run(self.tf_train_step, feed_dict=self.build_feed_dict(
batch), options=self.options, run_metadata=self.run_metadata)
def evaluate_input(self, input):
return tf.reshape(self.neural_net.model(input), [32, 8, 3])
def build_feed_dict(self, batch):
return {
'rewards:0': [[s['score'], ] * 8 for s in batch],
'actions:0': [s['action'] for s in batch],
# 'observations:0': [s['observations'] for s in batch],
'terminal:0': [s['terminal'] for s in batch],
'inputs:0': [s['images'] for s in batch],
'inputs_next:0': [s['images_next'] for s in batch]
}
def evaluate_input_old(self, input):
return tf.reshape(self.neural_net_old.model(input), [32, 8, 3])
| import tensorflow as tf
from keras import backend as K
from kicker.neural_net import NeuralNet
from tensorflow.python import debug
class Trainer:
def __init__(self, neural_net, shape=(320, 480), frame_count=5):
self.gamma = 0.99
self.punishment_for_moving = 0.1
self.neural_net = neural_net
self.neural_net_old = NeuralNet(filename=self.neural_net.filename)
self.width = shape[0]
self.height = shape[1]
self.frame_count = frame_count
self.options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
self.run_metadata = tf.RunMetadata()
self.writer = tf.summary.FileWriter(
logdir='tensorboard_logdir', graph=K.get_session().graph)
self.writer.flush()
self.learning_rate = 1e-4
self.observations_img = self.build_image_processor()
self.debugger = False
def build_image_processor(self):
observations = tf.placeholder(
tf.string, shape=[None, self.frame_count + 1], name='observations')
observations_img = tf.cast(tf.map_fn(lambda i: self.convert_images(
i), observations, dtype=tf.uint8), tf.float32)
observations_img.set_shape(
[None, self.width, self.height, self.frame_count + 1])
return observations_img
def decode(self, images):
sess = K.get_session()
return sess.run(self.observations_img, feed_dict={
'observations:0': images
}, options=self.options, run_metadata=self.run_metadata)
def compute(self, actions, inputs, inputs_next, rewards, terminals):
computed = self.evaluate_input(inputs)
computed_next = self.evaluate_input(inputs_next)
computed_next_old = self.evaluate_input_old(inputs_next)
# computed_actions = tf.stop_gradient(tf.argmax(computed, axis=2))
actions_one_hot = tf.one_hot(actions, 3, axis=2)
q_old = tf.reduce_sum(actions_one_hot * computed, axis=2)
argmax_old = tf.one_hot(
tf.argmax(
computed_next_old,
axis=2),
3,
axis=2)
second_term = self.gamma * \
tf.reduce_sum(computed_next * argmax_old, axis=2)
# second_term = self.gamma * tf.reduce_max(computed_next, axis=2)
q_new = tf.stop_gradient(
rewards +
tf.where(
terminals,
tf.zeros_like(second_term),
second_term))
loss = tf.losses.huber_loss(q_new, q_old, delta=50.0)
# loss = loss + 0.01 * tf.reduce_mean(tf.where(computed_actions == tf.ones_like(computed_actions), tf.zeros_like(q_new), tf.ones_like(q_new)))
# loss = loss + 0.1 * tf.reduce_mean(stf.nn.relu(computed[:,:,0] - computed[:,:,1]))
# loss = loss + 0.1 * tf.reduce_mean(tf.nn.relu(computed[:,:,2] - computed[:,:,1]))
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(
self.learning_rate).minimize(loss)
tf.summary.scalar('loss', loss)
tf.summary.scalar('diff', tf.reduce_mean(tf.abs(q_new - q_old)))
tf.summary.scalar('maximal_reward', tf.reduce_max(q_new))
tf.summary.scalar('mean_reward', tf.reduce_mean(q_new))
tf.summary.scalar('minimal_reward', tf.reduce_min(q_new))
merged = tf.summary.merge_all()
return train_step, loss, tf.abs(
q_new - q_old), tf.argmax(computed, axis=2), merged
def convert_images(self, inputs):
return tf.transpose(tf.map_fn(lambda i: tf.image.decode_jpeg(
i), inputs, dtype=tf.uint8)[:, :, :, 0], [1, 2, 0])
def train_step(self, batch):
if self.debugger:
sess = debug.TensorBoardDebugWrapperSession(
K.get_session(), 'localhost:6004')
K.set_session(sess)
self.debugger = False
sess = K.get_session()
return sess.run(self.tf_train_step, feed_dict=self.build_feed_dict(
batch), options=self.options, run_metadata=self.run_metadata)
def evaluate_input(self, input):
return tf.reshape(self.neural_net.model(input), [32, 8, 3])
def build_feed_dict(self, batch):
return {
'rewards:0': [[s['score'], ] * 8 for s in batch],
'actions:0': [s['action'] for s in batch],
# 'observations:0': [s['observations'] for s in batch],
'terminal:0': [s['terminal'] for s in batch],
'inputs:0': [s['images'] for s in batch],
'inputs_next:0': [s['images_next'] for s in batch]
}
def evaluate_input_old(self, input):
return tf.reshape(self.neural_net_old.model(input), [32, 8, 3])
| en | 0.498046 | # computed_actions = tf.stop_gradient(tf.argmax(computed, axis=2)) # second_term = self.gamma * tf.reduce_max(computed_next, axis=2) # loss = loss + 0.01 * tf.reduce_mean(tf.where(computed_actions == tf.ones_like(computed_actions), tf.zeros_like(q_new), tf.ones_like(q_new))) # loss = loss + 0.1 * tf.reduce_mean(stf.nn.relu(computed[:,:,0] - computed[:,:,1])) # loss = loss + 0.1 * tf.reduce_mean(tf.nn.relu(computed[:,:,2] - computed[:,:,1])) # 'observations:0': [s['observations'] for s in batch], | 2.358345 | 2 |
DQN_01/exercise/dqn_agent.py | vashineyu/DRL_udacity | 0 | 6613455 | import numpy as np
import random
from collections import namedtuple, deque
import sys
from model import QNetwork
import tensorflow as tf
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 25 # how often to update the network
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
#optimizer = tf.train.RMSPropOptimizer(learning_rate= LR)
optimizer = tf.train.AdamOptimizer(learning_rate = LR)
self.Qnetwork = QNetwork(state_size = state_size,
action_size = action_size,
optimizer = optimizer,
gamma=GAMMA, tau = TAU, minibatch_size = BATCH_SIZE)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# ------------------- update target network ------------------- #
self.Qnetwork.update_target_network()
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
if len(state.shape) == 1:
# make it batch-like
state = state[np.newaxis, :]
# Epsilon-greedy action selection
if random.random() > eps:
action_values = self.Qnetwork.get_action(state)
return np.argmax(action_values)
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
#states, actions, rewards, next_states, dones = experiences
## TODO: compute and minimize the loss
"*** YOUR CODE HERE ***"
current_loss = self.Qnetwork.train(experiences)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = np.vstack([e.state for e in experiences if e is not None])
actions = np.vstack([e.action for e in experiences if e is not None])
rewards = np.vstack([e.reward for e in experiences if e is not None])
next_states = np.vstack([e.next_state for e in experiences if e is not None])
dones = np.vstack([e.done for e in experiences if e is not None])
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | import numpy as np
import random
from collections import namedtuple, deque
import sys
from model import QNetwork
import tensorflow as tf
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 25 # how often to update the network
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
#optimizer = tf.train.RMSPropOptimizer(learning_rate= LR)
optimizer = tf.train.AdamOptimizer(learning_rate = LR)
self.Qnetwork = QNetwork(state_size = state_size,
action_size = action_size,
optimizer = optimizer,
gamma=GAMMA, tau = TAU, minibatch_size = BATCH_SIZE)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# ------------------- update target network ------------------- #
self.Qnetwork.update_target_network()
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
if len(state.shape) == 1:
# make it batch-like
state = state[np.newaxis, :]
# Epsilon-greedy action selection
if random.random() > eps:
action_values = self.Qnetwork.get_action(state)
return np.argmax(action_values)
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
#states, actions, rewards, next_states, dones = experiences
## TODO: compute and minimize the loss
"*** YOUR CODE HERE ***"
current_loss = self.Qnetwork.train(experiences)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = np.vstack([e.state for e in experiences if e is not None])
actions = np.vstack([e.action for e in experiences if e is not None])
rewards = np.vstack([e.reward for e in experiences if e is not None])
next_states = np.vstack([e.next_state for e in experiences if e is not None])
dones = np.vstack([e.done for e in experiences if e is not None])
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | en | 0.692626 | # replay buffer size # minibatch size # discount factor # for soft update of target parameters # learning rate # how often to update the network Interacts with and learns from the environment. Initialize an Agent object. Params ====== state_size (int): dimension of each state action_size (int): dimension of each action seed (int): random seed # Q-Network #optimizer = tf.train.RMSPropOptimizer(learning_rate= LR) # Replay memory # Initialize time step (for updating every UPDATE_EVERY steps) # Save experience in replay memory # Learn every UPDATE_EVERY time steps. # ------------------- update target network ------------------- # Returns actions for given state as per current policy. Params ====== state (array_like): current state eps (float): epsilon, for epsilon-greedy action selection # make it batch-like # Epsilon-greedy action selection Update value parameters using given batch of experience tuples. Params ====== experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples gamma (float): discount factor #states, actions, rewards, next_states, dones = experiences ## TODO: compute and minimize the loss Fixed-size buffer to store experience tuples. Initialize a ReplayBuffer object. Params ====== action_size (int): dimension of each action buffer_size (int): maximum size of buffer batch_size (int): size of each training batch seed (int): random seed Add a new experience to memory. Randomly sample a batch of experiences from memory. Return the current size of internal memory. | 2.474381 | 2 |
arkav_is_api/arkavauth/admin.py | arkavidia5/arkav-is | 3 | 6613456 | <filename>arkav_is_api/arkavauth/admin.py
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from django.utils.translation import ugettext_lazy as _
from .models import User, RegistrationConfirmationAttempt, PasswordResetConfirmationAttempt
def resend_email(modeladmin, request, queryset):
for obj in queryset:
obj.send_email()
resend_email.short_description = 'Resend the confirmation email of the selected attempts with the same token.'
@admin.register(User)
class UserAdmin(DjangoUserAdmin):
"""Define admin model for custom User model with no username field."""
fieldsets = (
(
None,
{'fields': ['full_name', 'email', 'password']}
),
(
_('Permissions'),
{'fields': ['is_active', 'is_email_confirmed', 'is_staff',
'is_superuser', 'groups', 'user_permissions']}
),
(
_('Important dates'),
{'fields': ('last_login', 'date_joined')}
),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', '<PASSWORD>', '<PASSWORD>'),
}),
)
list_display = ('email', 'full_name', 'is_staff', 'is_active', 'is_email_confirmed')
search_fields = ('email', 'full_name')
ordering = ('email',)
@admin.register(RegistrationConfirmationAttempt)
class RegistrationConfirmationAttemptAdmin(admin.ModelAdmin):
list_display = ['user', 'token', 'is_confirmed', 'email_last_sent_at']
list_filter = ['is_confirmed']
readonly_fields = ['token', 'email_last_sent_at']
autocomplete_fields = ['user']
actions = [resend_email]
search_fields = ['user__full_name', 'user__email']
class Meta:
ordering = ['-email_last_sent_at']
@admin.register(PasswordResetConfirmationAttempt)
class PasswordResetConfirmationAttemptAdmin(admin.ModelAdmin):
list_display = ['user', 'token', 'is_confirmed', 'email_last_sent_at']
list_filter = ['is_confirmed']
readonly_fields = ['token', 'email_last_sent_at']
autocomplete_fields = ['user']
actions = [resend_email]
search_fields = ['user__full_name', 'user__email']
class Meta:
ordering = ['-email_last_sent_at']
| <filename>arkav_is_api/arkavauth/admin.py
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from django.utils.translation import ugettext_lazy as _
from .models import User, RegistrationConfirmationAttempt, PasswordResetConfirmationAttempt
def resend_email(modeladmin, request, queryset):
for obj in queryset:
obj.send_email()
resend_email.short_description = 'Resend the confirmation email of the selected attempts with the same token.'
@admin.register(User)
class UserAdmin(DjangoUserAdmin):
"""Define admin model for custom User model with no username field."""
fieldsets = (
(
None,
{'fields': ['full_name', 'email', 'password']}
),
(
_('Permissions'),
{'fields': ['is_active', 'is_email_confirmed', 'is_staff',
'is_superuser', 'groups', 'user_permissions']}
),
(
_('Important dates'),
{'fields': ('last_login', 'date_joined')}
),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', '<PASSWORD>', '<PASSWORD>'),
}),
)
list_display = ('email', 'full_name', 'is_staff', 'is_active', 'is_email_confirmed')
search_fields = ('email', 'full_name')
ordering = ('email',)
@admin.register(RegistrationConfirmationAttempt)
class RegistrationConfirmationAttemptAdmin(admin.ModelAdmin):
list_display = ['user', 'token', 'is_confirmed', 'email_last_sent_at']
list_filter = ['is_confirmed']
readonly_fields = ['token', 'email_last_sent_at']
autocomplete_fields = ['user']
actions = [resend_email]
search_fields = ['user__full_name', 'user__email']
class Meta:
ordering = ['-email_last_sent_at']
@admin.register(PasswordResetConfirmationAttempt)
class PasswordResetConfirmationAttemptAdmin(admin.ModelAdmin):
list_display = ['user', 'token', 'is_confirmed', 'email_last_sent_at']
list_filter = ['is_confirmed']
readonly_fields = ['token', 'email_last_sent_at']
autocomplete_fields = ['user']
actions = [resend_email]
search_fields = ['user__full_name', 'user__email']
class Meta:
ordering = ['-email_last_sent_at']
| en | 0.772945 | Define admin model for custom User model with no username field. | 2.029595 | 2 |
pytablewriter/__init__.py | sundarsrst/pytablewriter | 1 | 6613457 | <reponame>sundarsrst/pytablewriter
# encoding: utf-8
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
from __future__ import absolute_import
from typepy import (
Bool,
DateTime,
Dictionary,
Infinity,
Integer,
IpAddress,
List,
Nan,
NoneType,
NullString,
RealNumber,
String,
)
from .__version__ import __author__, __copyright__, __email__, __license__, __version__
from ._factory import TableWriterFactory
from ._function import dump_tabledata, dumps_tabledata
from ._logger import set_log_level, set_logger
from ._table_format import FormatAttr, TableFormat
from .error import (
EmptyHeaderError,
EmptyTableDataError,
EmptyTableNameError,
EmptyValueError,
NotSupportedError,
WriterNotFoundError,
)
from .style import Align, Format
from .writer import (
CsvTableWriter,
ElasticsearchWriter,
ExcelXlsTableWriter,
ExcelXlsxTableWriter,
HtmlTableWriter,
JavaScriptTableWriter,
JsonLinesTableWriter,
JsonTableWriter,
LatexMatrixWriter,
LatexTableWriter,
LtsvTableWriter,
MarkdownTableWriter,
MediaWikiTableWriter,
NullTableWriter,
NumpyTableWriter,
PandasDataFrameWriter,
PythonCodeTableWriter,
RstCsvTableWriter,
RstGridTableWriter,
RstSimpleTableWriter,
SpaceAlignedTableWriter,
SqliteTableWriter,
TomlTableWriter,
TsvTableWriter,
UnicodeTableWriter,
)
from .writer._table_writer import LineBreakHandling
| # encoding: utf-8
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
from __future__ import absolute_import
from typepy import (
Bool,
DateTime,
Dictionary,
Infinity,
Integer,
IpAddress,
List,
Nan,
NoneType,
NullString,
RealNumber,
String,
)
from .__version__ import __author__, __copyright__, __email__, __license__, __version__
from ._factory import TableWriterFactory
from ._function import dump_tabledata, dumps_tabledata
from ._logger import set_log_level, set_logger
from ._table_format import FormatAttr, TableFormat
from .error import (
EmptyHeaderError,
EmptyTableDataError,
EmptyTableNameError,
EmptyValueError,
NotSupportedError,
WriterNotFoundError,
)
from .style import Align, Format
from .writer import (
CsvTableWriter,
ElasticsearchWriter,
ExcelXlsTableWriter,
ExcelXlsxTableWriter,
HtmlTableWriter,
JavaScriptTableWriter,
JsonLinesTableWriter,
JsonTableWriter,
LatexMatrixWriter,
LatexTableWriter,
LtsvTableWriter,
MarkdownTableWriter,
MediaWikiTableWriter,
NullTableWriter,
NumpyTableWriter,
PandasDataFrameWriter,
PythonCodeTableWriter,
RstCsvTableWriter,
RstGridTableWriter,
RstSimpleTableWriter,
SpaceAlignedTableWriter,
SqliteTableWriter,
TomlTableWriter,
TsvTableWriter,
UnicodeTableWriter,
)
from .writer._table_writer import LineBreakHandling | en | 0.277426 | # encoding: utf-8 .. codeauthor:: <NAME> <<EMAIL>> | 1.811044 | 2 |
snippets/raspi_camera/vid_stream.py | arajput63/flask_react_base | 0 | 6613458 | from io import BytesIO
from picamera import PiCamera
stream = BytesIO()
camera = PiCamera()
camera.resolution = (640, 480)
camera.start_recording(stream, format='h264', quality=23)
camera.wait_recording(15)
camera.stop_recording() | from io import BytesIO
from picamera import PiCamera
stream = BytesIO()
camera = PiCamera()
camera.resolution = (640, 480)
camera.start_recording(stream, format='h264', quality=23)
camera.wait_recording(15)
camera.stop_recording() | none | 1 | 2.561234 | 3 | |
litreview/apps/accounts/views/get_users.py | josayko/litreview | 0 | 6613459 | <reponame>josayko/litreview
from ..models import Review, Ticket, UserFollow
def get_users_subs(user):
"""Returns user's subscriptions to other users"""
return UserFollow.objects.filter(user=user)
def get_users_followers(user):
"""Returns users that are following user"""
return UserFollow.objects.filter(followed_user=user)
def get_users_viewable_reviews(user, feed=False):
"""Filter reviews posts a user can see"""
if feed is False:
# Posts page, show only user reviews
return Review.objects.filter(user=user)
else:
# Show subs' reviews + own reviews
subs = get_users_subs(user)
followed_users = [x.followed_user for x in subs]
followed_users.append(user)
# Show other users review to tickets
reviews = Review.objects.filter(user__in=followed_users)
reviews_to_tickets = Review.objects.filter(ticket__user__in=followed_users)
return reviews | reviews_to_tickets
def get_users_viewable_tickets(user, feed=False):
"""Filter tickets posts a user can see"""
if feed is False:
# Posts page, show only user tickets
return Ticket.objects.filter(user=user)
else:
# Show subs' tickets + own tickets
subs = get_users_subs(user)
followed_users = [x.followed_user for x in subs]
followed_users.append(user)
return Ticket.objects.filter(user__in=followed_users)
| from ..models import Review, Ticket, UserFollow
def get_users_subs(user):
"""Returns user's subscriptions to other users"""
return UserFollow.objects.filter(user=user)
def get_users_followers(user):
"""Returns users that are following user"""
return UserFollow.objects.filter(followed_user=user)
def get_users_viewable_reviews(user, feed=False):
"""Filter reviews posts a user can see"""
if feed is False:
# Posts page, show only user reviews
return Review.objects.filter(user=user)
else:
# Show subs' reviews + own reviews
subs = get_users_subs(user)
followed_users = [x.followed_user for x in subs]
followed_users.append(user)
# Show other users review to tickets
reviews = Review.objects.filter(user__in=followed_users)
reviews_to_tickets = Review.objects.filter(ticket__user__in=followed_users)
return reviews | reviews_to_tickets
def get_users_viewable_tickets(user, feed=False):
"""Filter tickets posts a user can see"""
if feed is False:
# Posts page, show only user tickets
return Ticket.objects.filter(user=user)
else:
# Show subs' tickets + own tickets
subs = get_users_subs(user)
followed_users = [x.followed_user for x in subs]
followed_users.append(user)
return Ticket.objects.filter(user__in=followed_users) | en | 0.851503 | Returns user's subscriptions to other users Returns users that are following user Filter reviews posts a user can see # Posts page, show only user reviews # Show subs' reviews + own reviews # Show other users review to tickets Filter tickets posts a user can see # Posts page, show only user tickets # Show subs' tickets + own tickets | 2.660201 | 3 |
pysource2notebooks.py | ggomes/p4e | 1 | 6613460 | import os
input_folder = "./pysource"
output_folder = "./notebooks"
for file in os.listdir(input_folder):
name, ext = os.path.splitext(file)
if os.path.isfile(os.path.join(input_folder, file)) and ext==".py":
os.system(f"jupytext --to notebook {input_folder}/{file} --output {output_folder}/{name}.ipynb")
| import os
input_folder = "./pysource"
output_folder = "./notebooks"
for file in os.listdir(input_folder):
name, ext = os.path.splitext(file)
if os.path.isfile(os.path.join(input_folder, file)) and ext==".py":
os.system(f"jupytext --to notebook {input_folder}/{file} --output {output_folder}/{name}.ipynb")
| none | 1 | 2.511329 | 3 | |
vagrant/optraj.istic.univ-rennes1.fr/src/tests/optimisation/testpythondeap.py | gpierre42/optraj | 0 | 6613461 | #coding=utf8
import unittest
from optimisation.pythondeap import ComputationLauncher
from interfacebdd.AssignmentDAO import AssignmentDAO
from interfacebdd.Connexion import Connexion
from system.System import System
from interfacebdd.SiteDAO import SiteDAO
from interfacebdd.PhaseDAO import PhaseDAO
from interfacebdd.WorkerDAO import WorkerDAO
from operator import xor
from datetime import date
class TestDeap(unittest.TestCase):
def test_mustInSolution(self):
'''
Test si toutes les affectations déja présente en base sont bien dans la solution finale
'''
launcher = ComputationLauncher()
launcher.init()
launcher.algoInst.ngen = 1
launcher.start()
launcher.join()
solution = launcher.assignments
conn = Connexion().connect()
assignmentsBdd = AssignmentDAO().getAll(conn, False, [])
today = date.today()
res = True
for a in assignmentsBdd:
p = a.phase
# on ne prend que les phases corespondant aux 17 semaines du tableau d'opti
if p.numWeek >= today.isocalendar()[1] and p.numYear >= today.year:
numWorker = a.worker.num
numPhase = a.phase.num
for a2 in solution:
if a2.worker.num == numWorker and a2.phase.num == numPhase:
res = res & True
self.assertTrue(res)
def test_allWorkersInSolution(self):
'''
Test de la non disparition ou ajout d'un worker pendant l'algo
'''
launcher = ComputationLauncher()
launcher.init()
launcher.algoInst.ngen = 1
launcher.start()
launcher.join()
conn = Connexion().connect()
system = System(SiteDAO().getAll(conn, False, []), WorkerDAO().getAll(conn, False, []), PhaseDAO().getAll(conn, False, []))
res = True
for w in system.workers:
for h in launcher.hallOfFame:
for year in h.keys():
for week in h[year].keys():
boolAssign = False
boolAvailable = False
boolUnavailable = False
if year in launcher.algoInst.unavailabilities.keys():
if week in launcher.algoInst.unavailabilities[year].keys():
for x in launcher.algoInst.unavailabilities[year][week]:
if x == w.num:
boolUnavailable = True
for site in h[year][week]["assigns"].keys():
for temp in h[year][week]["assigns"][site]:
if temp == w.num:
boolAssign = True
for temp2 in h[year][week]["availablesWorkers"][w.craft.num][w.qualification.num]:
if temp2 == w.num:
boolAvailable = True
if not ((boolAssign & (not boolAvailable) & (not boolUnavailable)) |
((not boolAssign) & (boolAvailable) & (not boolUnavailable)) |
((not boolAssign) & (not boolAvailable) & (boolUnavailable))):
print boolAssign, boolAvailable, boolUnavailable
res = res & ((boolAssign & (not boolAvailable) & (not boolUnavailable)) |
((not boolAssign) & (boolAvailable) & (not boolUnavailable)) |
((not boolAssign) & (not boolAvailable) & (boolUnavailable)))
self.assertTrue(res)
# def test_allWorkersInIndividual(self):
# '''
# Test de la non disparition ou ajout d'un worker pendant l'algo
# '''
# from deap import creator
# from deap import base
# creator.create("Fitness", base.Fitness, weights=(-1,))
# creator.create("Individual", dict, fitness=creator.Fitness)
# launcher = ComputationLauncher()
# launcher.init()
# ind = launcher.algoInst.createInd(creator.Individual)
# conn = Connexion().connect()
# system = System(SiteDAO().getAll(conn, False, []), WorkerDAO().getAll(conn, False, []), PhaseDAO().getAll(conn, False, []))
# res = True
# for w in system.workers:
# h = ind
# for year in h.keys():
# for week in h[year].keys():
# boolAssign = False
# boolAvailable = False
# boolUnavailable = False
# for x in launcher.algoInst.unavailabilities[year][week]:
# if x == w.num:
# boolUnavailable = True
# for site in h[year][week]["assigns"].keys():
# for temp in h[year][week]["assigns"][site]:
# if temp == w.num:
# boolAssign = True
# for temp2 in h[year][week]["availablesWorkers"][w.craft.num][w.qualification.num]:
# if temp2 == w.num:
# boolAvailable = True
# res = res & ((boolAssign & (not boolAvailable) & (not boolUnavailable)) |
# ((not boolAssign) & (boolAvailable) & (not boolUnavailable)) |
# ((not boolAssign) & (not boolAvailable) & (boolUnavailable)))
# self.assertTrue(res)
def test_needAllSatisfy(self):
'''
si les besoins sont tous remplis quand cela est possible, execption sinon
'''
launcher = ComputationLauncher()
launcher.init()
launcher.algoInst.ngen = 1
launcher.start()
launcher.join()
conn = Connexion().connect()
today = date.today()
pDAO = PhaseDAO().getAllByFilterExtended(conn, False, [],
('numYear', '=', str(today.year)),
('numWeek', '>=', str(today.isocalendar()[1])))
pDAO = pDAO | PhaseDAO().getAllByFilterExtended(conn, False, [],
('numYear', '>', str(today.year)))
system = System(SiteDAO().getAll(conn, False, []), WorkerDAO().getAll(conn, False, []), pDAO)
res = True
for phase in system.phases:
for need in phase.needs:
for hof in launcher.hallOfFame:
nbWorkerofCraftQualif = 0
for w in hof[phase.numYear][phase.numWeek]["assigns"][phase.numSite]:
worker = system.getWorkerById(w)
if (worker.craft.num == need.craft.num) & (worker.qualification.num == need.qualification.num):
nbWorkerofCraftQualif += 1
needRespect = (nbWorkerofCraftQualif == need.need)
exception = False
for n in launcher.issue["disponibility"]:
if need.num == n:
exception = True
res = res & xor(needRespect, exception)
self.assertTrue(res)
| #coding=utf8
import unittest
from optimisation.pythondeap import ComputationLauncher
from interfacebdd.AssignmentDAO import AssignmentDAO
from interfacebdd.Connexion import Connexion
from system.System import System
from interfacebdd.SiteDAO import SiteDAO
from interfacebdd.PhaseDAO import PhaseDAO
from interfacebdd.WorkerDAO import WorkerDAO
from operator import xor
from datetime import date
class TestDeap(unittest.TestCase):
def test_mustInSolution(self):
'''
Test si toutes les affectations déja présente en base sont bien dans la solution finale
'''
launcher = ComputationLauncher()
launcher.init()
launcher.algoInst.ngen = 1
launcher.start()
launcher.join()
solution = launcher.assignments
conn = Connexion().connect()
assignmentsBdd = AssignmentDAO().getAll(conn, False, [])
today = date.today()
res = True
for a in assignmentsBdd:
p = a.phase
# on ne prend que les phases corespondant aux 17 semaines du tableau d'opti
if p.numWeek >= today.isocalendar()[1] and p.numYear >= today.year:
numWorker = a.worker.num
numPhase = a.phase.num
for a2 in solution:
if a2.worker.num == numWorker and a2.phase.num == numPhase:
res = res & True
self.assertTrue(res)
def test_allWorkersInSolution(self):
'''
Test de la non disparition ou ajout d'un worker pendant l'algo
'''
launcher = ComputationLauncher()
launcher.init()
launcher.algoInst.ngen = 1
launcher.start()
launcher.join()
conn = Connexion().connect()
system = System(SiteDAO().getAll(conn, False, []), WorkerDAO().getAll(conn, False, []), PhaseDAO().getAll(conn, False, []))
res = True
for w in system.workers:
for h in launcher.hallOfFame:
for year in h.keys():
for week in h[year].keys():
boolAssign = False
boolAvailable = False
boolUnavailable = False
if year in launcher.algoInst.unavailabilities.keys():
if week in launcher.algoInst.unavailabilities[year].keys():
for x in launcher.algoInst.unavailabilities[year][week]:
if x == w.num:
boolUnavailable = True
for site in h[year][week]["assigns"].keys():
for temp in h[year][week]["assigns"][site]:
if temp == w.num:
boolAssign = True
for temp2 in h[year][week]["availablesWorkers"][w.craft.num][w.qualification.num]:
if temp2 == w.num:
boolAvailable = True
if not ((boolAssign & (not boolAvailable) & (not boolUnavailable)) |
((not boolAssign) & (boolAvailable) & (not boolUnavailable)) |
((not boolAssign) & (not boolAvailable) & (boolUnavailable))):
print boolAssign, boolAvailable, boolUnavailable
res = res & ((boolAssign & (not boolAvailable) & (not boolUnavailable)) |
((not boolAssign) & (boolAvailable) & (not boolUnavailable)) |
((not boolAssign) & (not boolAvailable) & (boolUnavailable)))
self.assertTrue(res)
# def test_allWorkersInIndividual(self):
# '''
# Test de la non disparition ou ajout d'un worker pendant l'algo
# '''
# from deap import creator
# from deap import base
# creator.create("Fitness", base.Fitness, weights=(-1,))
# creator.create("Individual", dict, fitness=creator.Fitness)
# launcher = ComputationLauncher()
# launcher.init()
# ind = launcher.algoInst.createInd(creator.Individual)
# conn = Connexion().connect()
# system = System(SiteDAO().getAll(conn, False, []), WorkerDAO().getAll(conn, False, []), PhaseDAO().getAll(conn, False, []))
# res = True
# for w in system.workers:
# h = ind
# for year in h.keys():
# for week in h[year].keys():
# boolAssign = False
# boolAvailable = False
# boolUnavailable = False
# for x in launcher.algoInst.unavailabilities[year][week]:
# if x == w.num:
# boolUnavailable = True
# for site in h[year][week]["assigns"].keys():
# for temp in h[year][week]["assigns"][site]:
# if temp == w.num:
# boolAssign = True
# for temp2 in h[year][week]["availablesWorkers"][w.craft.num][w.qualification.num]:
# if temp2 == w.num:
# boolAvailable = True
# res = res & ((boolAssign & (not boolAvailable) & (not boolUnavailable)) |
# ((not boolAssign) & (boolAvailable) & (not boolUnavailable)) |
# ((not boolAssign) & (not boolAvailable) & (boolUnavailable)))
# self.assertTrue(res)
def test_needAllSatisfy(self):
'''
si les besoins sont tous remplis quand cela est possible, execption sinon
'''
launcher = ComputationLauncher()
launcher.init()
launcher.algoInst.ngen = 1
launcher.start()
launcher.join()
conn = Connexion().connect()
today = date.today()
pDAO = PhaseDAO().getAllByFilterExtended(conn, False, [],
('numYear', '=', str(today.year)),
('numWeek', '>=', str(today.isocalendar()[1])))
pDAO = pDAO | PhaseDAO().getAllByFilterExtended(conn, False, [],
('numYear', '>', str(today.year)))
system = System(SiteDAO().getAll(conn, False, []), WorkerDAO().getAll(conn, False, []), pDAO)
res = True
for phase in system.phases:
for need in phase.needs:
for hof in launcher.hallOfFame:
nbWorkerofCraftQualif = 0
for w in hof[phase.numYear][phase.numWeek]["assigns"][phase.numSite]:
worker = system.getWorkerById(w)
if (worker.craft.num == need.craft.num) & (worker.qualification.num == need.qualification.num):
nbWorkerofCraftQualif += 1
needRespect = (nbWorkerofCraftQualif == need.need)
exception = False
for n in launcher.issue["disponibility"]:
if need.num == n:
exception = True
res = res & xor(needRespect, exception)
self.assertTrue(res)
| en | 0.258118 | #coding=utf8 Test si toutes les affectations déja présente en base sont bien dans la solution finale # on ne prend que les phases corespondant aux 17 semaines du tableau d'opti Test de la non disparition ou ajout d'un worker pendant l'algo # def test_allWorkersInIndividual(self): # ''' # Test de la non disparition ou ajout d'un worker pendant l'algo # ''' # from deap import creator # from deap import base # creator.create("Fitness", base.Fitness, weights=(-1,)) # creator.create("Individual", dict, fitness=creator.Fitness) # launcher = ComputationLauncher() # launcher.init() # ind = launcher.algoInst.createInd(creator.Individual) # conn = Connexion().connect() # system = System(SiteDAO().getAll(conn, False, []), WorkerDAO().getAll(conn, False, []), PhaseDAO().getAll(conn, False, [])) # res = True # for w in system.workers: # h = ind # for year in h.keys(): # for week in h[year].keys(): # boolAssign = False # boolAvailable = False # boolUnavailable = False # for x in launcher.algoInst.unavailabilities[year][week]: # if x == w.num: # boolUnavailable = True # for site in h[year][week]["assigns"].keys(): # for temp in h[year][week]["assigns"][site]: # if temp == w.num: # boolAssign = True # for temp2 in h[year][week]["availablesWorkers"][w.craft.num][w.qualification.num]: # if temp2 == w.num: # boolAvailable = True # res = res & ((boolAssign & (not boolAvailable) & (not boolUnavailable)) | # ((not boolAssign) & (boolAvailable) & (not boolUnavailable)) | # ((not boolAssign) & (not boolAvailable) & (boolUnavailable))) # self.assertTrue(res) si les besoins sont tous remplis quand cela est possible, execption sinon | 2.496445 | 2 |
src/dhp/kma_score_extract/pdf_to_image/__init__.py | Haven-Code/KMA-Score-Extractor | 1 | 6613462 | <gh_stars>1-10
from pdf2image import convert_from_path
from pdf2image.exceptions import (
PDFInfoNotInstalledError,
PDFPageCountError,
PDFSyntaxError
)
import os
from pathlib import Path
default_temp_path = os.path.join(os.path.abspath(".."), "temp", "pdf2images_ppm")
def extract_image(filename, thread_count=8, poppler_path="", temp_path=None):
"""
Extract image from pdf using pdf2image
:param filename: Path to file
:type filename str
:param thread_count: Number of thread use to extract (Default: 8)
:type thread_count int
:param poppler_path: Path to poppler_path if not installed in PATH
:type poppler_path str
:param temp_path: Path to save temp file
:type temp_path str
:return: List of PIL image
:rtype: list
"""
if temp_path is None:
temp_path = default_temp_path
if not os.path.exists(temp_path):
Path(temp_path).mkdir(parents=True, exist_ok=True)
images = convert_from_path(filename, dpi=400, thread_count=thread_count, poppler_path=poppler_path,
output_folder=temp_path)
return images
| from pdf2image import convert_from_path
from pdf2image.exceptions import (
PDFInfoNotInstalledError,
PDFPageCountError,
PDFSyntaxError
)
import os
from pathlib import Path
default_temp_path = os.path.join(os.path.abspath(".."), "temp", "pdf2images_ppm")
def extract_image(filename, thread_count=8, poppler_path="", temp_path=None):
"""
Extract image from pdf using pdf2image
:param filename: Path to file
:type filename str
:param thread_count: Number of thread use to extract (Default: 8)
:type thread_count int
:param poppler_path: Path to poppler_path if not installed in PATH
:type poppler_path str
:param temp_path: Path to save temp file
:type temp_path str
:return: List of PIL image
:rtype: list
"""
if temp_path is None:
temp_path = default_temp_path
if not os.path.exists(temp_path):
Path(temp_path).mkdir(parents=True, exist_ok=True)
images = convert_from_path(filename, dpi=400, thread_count=thread_count, poppler_path=poppler_path,
output_folder=temp_path)
return images | en | 0.715206 | Extract image from pdf using pdf2image :param filename: Path to file :type filename str :param thread_count: Number of thread use to extract (Default: 8) :type thread_count int :param poppler_path: Path to poppler_path if not installed in PATH :type poppler_path str :param temp_path: Path to save temp file :type temp_path str :return: List of PIL image :rtype: list | 3.347847 | 3 |
swimai/recon/_recon.py | DobromirM/swim-system-python | 8 | 6613463 | <filename>swimai/recon/_recon.py<gh_stars>1-10
# Copyright 2015-2021 SWIM.AI inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swimai.structures._structs import Value
from ._parsers import _ReconParser
from ._writers import _ReconWriter
class Recon:
# Singletons
_writer = None
_parser = None
@staticmethod
def parse(recon_string: str) -> 'Value':
"""
Parse a Recon message in string format and return a Swim structure object.
:param recon_string: - Recon message in string format.
:return: - Swim structure object representing the Recon message.
"""
return Recon._get_parser()._parse_block_string(recon_string)
@staticmethod
def to_string(item: 'Value') -> str:
"""
Parse a Swim structure object to a Recon string.
:param item: - Swim structure object.
:return: - Recon message in string format representing the Swim structure object.
"""
return Recon._get_writer()._write_item(item)
@staticmethod
def _get_writer() -> '_ReconWriter':
"""
Get a Recon writer if one already exists.
Otherwise, instantiate a new one.
:return: - Recon writer.
"""
if Recon._writer is None:
Recon._writer = _ReconWriter()
return Recon._writer
@staticmethod
def _get_parser() -> '_ReconParser':
"""
Get a Recon parser if one already exists.
Otherwise, instantiate a new one.
:return: - Recon parser.
"""
if Recon._parser is None:
Recon._parser = _ReconParser()
return Recon._parser
| <filename>swimai/recon/_recon.py<gh_stars>1-10
# Copyright 2015-2021 SWIM.AI inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swimai.structures._structs import Value
from ._parsers import _ReconParser
from ._writers import _ReconWriter
class Recon:
# Singletons
_writer = None
_parser = None
@staticmethod
def parse(recon_string: str) -> 'Value':
"""
Parse a Recon message in string format and return a Swim structure object.
:param recon_string: - Recon message in string format.
:return: - Swim structure object representing the Recon message.
"""
return Recon._get_parser()._parse_block_string(recon_string)
@staticmethod
def to_string(item: 'Value') -> str:
"""
Parse a Swim structure object to a Recon string.
:param item: - Swim structure object.
:return: - Recon message in string format representing the Swim structure object.
"""
return Recon._get_writer()._write_item(item)
@staticmethod
def _get_writer() -> '_ReconWriter':
"""
Get a Recon writer if one already exists.
Otherwise, instantiate a new one.
:return: - Recon writer.
"""
if Recon._writer is None:
Recon._writer = _ReconWriter()
return Recon._writer
@staticmethod
def _get_parser() -> '_ReconParser':
"""
Get a Recon parser if one already exists.
Otherwise, instantiate a new one.
:return: - Recon parser.
"""
if Recon._parser is None:
Recon._parser = _ReconParser()
return Recon._parser
| en | 0.710292 | # Copyright 2015-2021 SWIM.AI inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Singletons Parse a Recon message in string format and return a Swim structure object. :param recon_string: - Recon message in string format. :return: - Swim structure object representing the Recon message. Parse a Swim structure object to a Recon string. :param item: - Swim structure object. :return: - Recon message in string format representing the Swim structure object. Get a Recon writer if one already exists. Otherwise, instantiate a new one. :return: - Recon writer. Get a Recon parser if one already exists. Otherwise, instantiate a new one. :return: - Recon parser. | 2.526383 | 3 |
py/py_0210_obtuse_angled_triangles.py | lcsm29/project-euler | 0 | 6613464 | <filename>py/py_0210_obtuse_angled_triangles.py
# Solution of;
# Project Euler Problem 210: Obtuse Angled Triangles
# https://projecteuler.net/problem=210
#
# Consider the set S(r) of points (x,y) with integer coordinates satisfying
# |x| + |y| ≤ r. Let O be the point (0,0) and C the point (r/4,r/4). Let N(r)
# be the number of points B in S(r), so that the triangle OBC has an obtuse
# angle, i. e. the largest angle α satisfies 90°<α<180°. So, for example,
# N(4)=24 and N(8)=100. What is N(1,000,000,000)?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 210
timed.caller(dummy, n, i, prob_id)
| <filename>py/py_0210_obtuse_angled_triangles.py
# Solution of;
# Project Euler Problem 210: Obtuse Angled Triangles
# https://projecteuler.net/problem=210
#
# Consider the set S(r) of points (x,y) with integer coordinates satisfying
# |x| + |y| ≤ r. Let O be the point (0,0) and C the point (r/4,r/4). Let N(r)
# be the number of points B in S(r), so that the triangle OBC has an obtuse
# angle, i. e. the largest angle α satisfies 90°<α<180°. So, for example,
# N(4)=24 and N(8)=100. What is N(1,000,000,000)?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 210
timed.caller(dummy, n, i, prob_id)
| en | 0.582785 | # Solution of; # Project Euler Problem 210: Obtuse Angled Triangles # https://projecteuler.net/problem=210 # # Consider the set S(r) of points (x,y) with integer coordinates satisfying # |x| + |y| ≤ r. Let O be the point (0,0) and C the point (r/4,r/4). Let N(r) # be the number of points B in S(r), so that the triangle OBC has an obtuse # angle, i. e. the largest angle α satisfies 90°<α<180°. So, for example, # N(4)=24 and N(8)=100. What is N(1,000,000,000)? # # by lcsm29 http://github.com/lcsm29/project-euler | 2.983695 | 3 |
presentation/formatter/date_formatter.py | xaman/ScrollZ | 1 | 6613465 | from presentation.formatter.formatter import Formatter
class DateFormatter(Formatter):
_FORMAT = "%a, %-d %b '%y"
def format(self, value):
return value.strftime(self._FORMAT)
| from presentation.formatter.formatter import Formatter
class DateFormatter(Formatter):
_FORMAT = "%a, %-d %b '%y"
def format(self, value):
return value.strftime(self._FORMAT)
| none | 1 | 3.196748 | 3 | |
authorization_system/authorization_system/document_processing/scanner.py | daftcode/authorization-system | 7 | 6613466 | # coding=utf-8
"""
Scanner.
HP Deskjet 3070 B611 series and Fujitsu fi-65F were tested.
"""
import os
import pickle
import cv2
import imageio
import numpy as np
import pyinsane2
from authorization_system.document_processing import document, document_type_detector
from authorization_system.document_processing.document_utils import BColors
from authorization_system.face_recognition import face, face_utils
class DocumentScanner:
def __init__(self, args, face_recognition=None, device=None, set_device=True):
self.args = args
if face_recognition is None:
face_utils.set_face_model(args, verbose=False)
face_recognition = face.Recognition(
face_crop_size=args.image_size,
face_crop_margin=args.margin,
gpu_memory_fraction=args.gpu_memory_fraction,
)
self.face_recognition = face_recognition
if args.load_document_type_detector:
# loading document type detector
self.document_detector = document_type_detector.DocumentTypeDetector()
self.document_detector.load_document_means(
self.args.document_type_detector_means_path
)
else:
self.document_detector = None
if device is None and set_device:
# setting scanner
pyinsane2.init()
device = pyinsane2.Scanner(name=args.scanner_name)
device.options["mode"].value = "Color"
device.options["resolution"].value = 600
if args.hp_sane_scanner:
device.options["tl-x"].value = 1 * 65536
device.options["tl-y"].value = 1 * 65536
device.options["br-x"].value = 84 * 65536
device.options["br-y"].value = 51 * 65536
self.scanner_device = device
def _adjust_gamma(self, image, gamma=2.5):
inv_gamma = 1.0 / gamma
table = np.array(
[((i / 255.0) ** inv_gamma) * 255 for i in np.arange(0, 256)]
).astype("uint8")
return cv2.LUT(image.copy(), table)
def scan_and_process(self, document_type=None, gamma=1.0, test_document_image=None):
if test_document_image is None:
# document scanning
scan_session = self.scanner_device.scan(multiple=False)
try:
while True:
scan_session.scan.read()
except EOFError:
pass
image = scan_session.images[-1]
if self.args.hp_sane_scanner:
document_image = np.array(image)
else:
# Remember to select proper values of cut (shape of (1180, 1960))
# and "scanning margin". Every scanner device is different.
# If you want to have solution insensitive on document flipping,
# use any graphics software to make sure that you get similar
# images after cut and flip. In our configuration margin is (24, 30).
document_image = np.array(image)[30:1210, 24:1984]
if gamma != 1.0:
document_image = self._adjust_gamma(document_image, gamma)
else:
document_image = test_document_image.copy()
# document processing: setting type; detecting first name and surname;
# hiding sensitive data; detecting photo
document_info = document.Document()
document_info.process_document(
document_image,
type=document_type,
document_type_detector=self.document_detector,
face_recognition=self.face_recognition,
)
return document_info
def scan_document(
self,
document_type=None,
gamma=1.0,
exit_scanner=True,
save_data=False,
intro=True,
window_scan_title="Scanned Document",
test_document_image=None,
):
if intro:
os.system("clear")
print(
"Welcome to the authorization system based on Polish documents.",
"Place your document on the scanner, with the photograph facing",
"downwards.",
)
print(
"\nYour document is going to be scanned right now.",
"After then it will be showed on the screen.",
)
# document scanning and processing
document_info = self.scan_and_process(
document_type=document_type,
gamma=gamma,
test_document_image=test_document_image,
)
# showing scanned document with hidden sensitive data
document_image = cv2.cvtColor(document_info.image.copy(), cv2.COLOR_RGB2BGR)
cv2.imshow(window_scan_title, document_image)
cv2.waitKey(50)
# saving data
if save_data:
self.scanner_device.save_document(document_info)
if exit_scanner:
self.scanner_device.exit()
return document_info
def check_if_correct_document(self, document_info):
if document_info is None:
return False
elif document_info.type == "unknown_document":
print(
"\n%s%sUnknown document was detected. Try again.%s"
% (BColors.BOLD, BColors.FAIL, BColors.ENDC)
)
return False
elif document_info.photo is None:
print(
"\n%s%sPhoto on the document wasn't detected. Try again.%s"
% (BColors.BOLD, BColors.FAIL, BColors.ENDC)
)
return False
elif document_info.type_auto != document_info.type:
print(
(
"\n%s%sIncorrect document's type was detected after flipping."
+ "Try again.%s"
)
% (BColors.BOLD, BColors.FAIL, BColors.ENDC)
)
return False
elif (
document_info.name_image is None
or document_info.first_name_detected_text is None
or document_info.surname_detected_text is None
):
print(
"\n%s%sText wasn't detected. Try again.%s"
% (BColors.BOLD, BColors.FAIL, BColors.ENDC)
)
return False
else:
return True
def save_document(self, document_info):
if document_info is None:
return None
# creating paths
dir = self.args.data_save_dir
if not os.path.exists(os.path.join(dir, "full_document_info")):
os.makedirs(os.path.join(dir, "full_document_info"))
if not os.path.exists(os.path.join(dir, "document_image")):
os.makedirs(os.path.join(dir, "document_image"))
if not os.path.exists(os.path.join(dir, "document_type_image")):
os.makedirs(os.path.join(dir, "document_type_image"))
if not os.path.exists(os.path.join(dir, "document_photo")):
os.makedirs(os.path.join(dir, "document_photo"))
if not os.path.exists(os.path.join(dir, "document_name_image")):
os.makedirs(os.path.join(dir, "document_name_image"))
if not os.path.exists(os.path.join(dir, "document_box_name_image")):
os.makedirs(os.path.join(dir, "document_box_name_image"))
# saving files: all information about document in pickle; document's image;
# part of document for type detector; photo from document;
# part of document with first name and surname after processing;
# part of document with first name and surname before processing;
document_info_file_name = document_info.timestamp + "_" + document_info.type
document_info_file_name = document_info_file_name.replace(" ", "_")
with open(
os.path.join(dir, "full_document_info", document_info_file_name + ".pkl"),
"wb",
) as f:
pickle.dump(document_info, f, pickle.HIGHEST_PROTOCOL)
if document_info.image is not None:
imageio.imsave(
os.path.join(dir, "document_image", document_info_file_name + ".jpg"),
document_info.image,
)
imageio.imsave(
os.path.join(
dir, "document_type_image", document_info_file_name + ".jpg"
),
document_info.image[50:300, 50:650, :],
)
if document_info.photo is not None:
imageio.imsave(
os.path.join(dir, "document_photo", document_info_file_name + ".jpg"),
document_info.photo,
)
if document_info.name_image is not None:
imageio.imsave(
os.path.join(
dir, "document_name_image", document_info_file_name + ".jpg"
),
document_info.name_image,
)
if document_info.box_name_image is not None:
imageio.imsave(
os.path.join(
dir, "document_box_name_image", document_info_file_name + ".jpg"
),
document_info.box_name_image,
)
def exit(self):
pyinsane2.exit()
| # coding=utf-8
"""
Scanner.
HP Deskjet 3070 B611 series and Fujitsu fi-65F were tested.
"""
import os
import pickle
import cv2
import imageio
import numpy as np
import pyinsane2
from authorization_system.document_processing import document, document_type_detector
from authorization_system.document_processing.document_utils import BColors
from authorization_system.face_recognition import face, face_utils
class DocumentScanner:
def __init__(self, args, face_recognition=None, device=None, set_device=True):
self.args = args
if face_recognition is None:
face_utils.set_face_model(args, verbose=False)
face_recognition = face.Recognition(
face_crop_size=args.image_size,
face_crop_margin=args.margin,
gpu_memory_fraction=args.gpu_memory_fraction,
)
self.face_recognition = face_recognition
if args.load_document_type_detector:
# loading document type detector
self.document_detector = document_type_detector.DocumentTypeDetector()
self.document_detector.load_document_means(
self.args.document_type_detector_means_path
)
else:
self.document_detector = None
if device is None and set_device:
# setting scanner
pyinsane2.init()
device = pyinsane2.Scanner(name=args.scanner_name)
device.options["mode"].value = "Color"
device.options["resolution"].value = 600
if args.hp_sane_scanner:
device.options["tl-x"].value = 1 * 65536
device.options["tl-y"].value = 1 * 65536
device.options["br-x"].value = 84 * 65536
device.options["br-y"].value = 51 * 65536
self.scanner_device = device
def _adjust_gamma(self, image, gamma=2.5):
inv_gamma = 1.0 / gamma
table = np.array(
[((i / 255.0) ** inv_gamma) * 255 for i in np.arange(0, 256)]
).astype("uint8")
return cv2.LUT(image.copy(), table)
def scan_and_process(self, document_type=None, gamma=1.0, test_document_image=None):
if test_document_image is None:
# document scanning
scan_session = self.scanner_device.scan(multiple=False)
try:
while True:
scan_session.scan.read()
except EOFError:
pass
image = scan_session.images[-1]
if self.args.hp_sane_scanner:
document_image = np.array(image)
else:
# Remember to select proper values of cut (shape of (1180, 1960))
# and "scanning margin". Every scanner device is different.
# If you want to have solution insensitive on document flipping,
# use any graphics software to make sure that you get similar
# images after cut and flip. In our configuration margin is (24, 30).
document_image = np.array(image)[30:1210, 24:1984]
if gamma != 1.0:
document_image = self._adjust_gamma(document_image, gamma)
else:
document_image = test_document_image.copy()
# document processing: setting type; detecting first name and surname;
# hiding sensitive data; detecting photo
document_info = document.Document()
document_info.process_document(
document_image,
type=document_type,
document_type_detector=self.document_detector,
face_recognition=self.face_recognition,
)
return document_info
def scan_document(
self,
document_type=None,
gamma=1.0,
exit_scanner=True,
save_data=False,
intro=True,
window_scan_title="Scanned Document",
test_document_image=None,
):
if intro:
os.system("clear")
print(
"Welcome to the authorization system based on Polish documents.",
"Place your document on the scanner, with the photograph facing",
"downwards.",
)
print(
"\nYour document is going to be scanned right now.",
"After then it will be showed on the screen.",
)
# document scanning and processing
document_info = self.scan_and_process(
document_type=document_type,
gamma=gamma,
test_document_image=test_document_image,
)
# showing scanned document with hidden sensitive data
document_image = cv2.cvtColor(document_info.image.copy(), cv2.COLOR_RGB2BGR)
cv2.imshow(window_scan_title, document_image)
cv2.waitKey(50)
# saving data
if save_data:
self.scanner_device.save_document(document_info)
if exit_scanner:
self.scanner_device.exit()
return document_info
def check_if_correct_document(self, document_info):
if document_info is None:
return False
elif document_info.type == "unknown_document":
print(
"\n%s%sUnknown document was detected. Try again.%s"
% (BColors.BOLD, BColors.FAIL, BColors.ENDC)
)
return False
elif document_info.photo is None:
print(
"\n%s%sPhoto on the document wasn't detected. Try again.%s"
% (BColors.BOLD, BColors.FAIL, BColors.ENDC)
)
return False
elif document_info.type_auto != document_info.type:
print(
(
"\n%s%sIncorrect document's type was detected after flipping."
+ "Try again.%s"
)
% (BColors.BOLD, BColors.FAIL, BColors.ENDC)
)
return False
elif (
document_info.name_image is None
or document_info.first_name_detected_text is None
or document_info.surname_detected_text is None
):
print(
"\n%s%sText wasn't detected. Try again.%s"
% (BColors.BOLD, BColors.FAIL, BColors.ENDC)
)
return False
else:
return True
def save_document(self, document_info):
if document_info is None:
return None
# creating paths
dir = self.args.data_save_dir
if not os.path.exists(os.path.join(dir, "full_document_info")):
os.makedirs(os.path.join(dir, "full_document_info"))
if not os.path.exists(os.path.join(dir, "document_image")):
os.makedirs(os.path.join(dir, "document_image"))
if not os.path.exists(os.path.join(dir, "document_type_image")):
os.makedirs(os.path.join(dir, "document_type_image"))
if not os.path.exists(os.path.join(dir, "document_photo")):
os.makedirs(os.path.join(dir, "document_photo"))
if not os.path.exists(os.path.join(dir, "document_name_image")):
os.makedirs(os.path.join(dir, "document_name_image"))
if not os.path.exists(os.path.join(dir, "document_box_name_image")):
os.makedirs(os.path.join(dir, "document_box_name_image"))
# saving files: all information about document in pickle; document's image;
# part of document for type detector; photo from document;
# part of document with first name and surname after processing;
# part of document with first name and surname before processing;
document_info_file_name = document_info.timestamp + "_" + document_info.type
document_info_file_name = document_info_file_name.replace(" ", "_")
with open(
os.path.join(dir, "full_document_info", document_info_file_name + ".pkl"),
"wb",
) as f:
pickle.dump(document_info, f, pickle.HIGHEST_PROTOCOL)
if document_info.image is not None:
imageio.imsave(
os.path.join(dir, "document_image", document_info_file_name + ".jpg"),
document_info.image,
)
imageio.imsave(
os.path.join(
dir, "document_type_image", document_info_file_name + ".jpg"
),
document_info.image[50:300, 50:650, :],
)
if document_info.photo is not None:
imageio.imsave(
os.path.join(dir, "document_photo", document_info_file_name + ".jpg"),
document_info.photo,
)
if document_info.name_image is not None:
imageio.imsave(
os.path.join(
dir, "document_name_image", document_info_file_name + ".jpg"
),
document_info.name_image,
)
if document_info.box_name_image is not None:
imageio.imsave(
os.path.join(
dir, "document_box_name_image", document_info_file_name + ".jpg"
),
document_info.box_name_image,
)
def exit(self):
pyinsane2.exit()
| en | 0.858511 | # coding=utf-8 Scanner. HP Deskjet 3070 B611 series and Fujitsu fi-65F were tested. # loading document type detector # setting scanner # document scanning # Remember to select proper values of cut (shape of (1180, 1960)) # and "scanning margin". Every scanner device is different. # If you want to have solution insensitive on document flipping, # use any graphics software to make sure that you get similar # images after cut and flip. In our configuration margin is (24, 30). # document processing: setting type; detecting first name and surname; # hiding sensitive data; detecting photo # document scanning and processing # showing scanned document with hidden sensitive data # saving data # creating paths # saving files: all information about document in pickle; document's image; # part of document for type detector; photo from document; # part of document with first name and surname after processing; # part of document with first name and surname before processing; | 2.384108 | 2 |
scikit_algo/feature_rank.py | sankar-mukherjee/CoFee | 0 | 6613467 | <reponame>sankar-mukherjee/CoFee<filename>scikit_algo/feature_rank.py
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 11:10:14 2015
@author: mukherjee
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing, metrics
from sklearn.learning_curve import learning_curve
# read Form data
DATA_FORM_FILE = 'all-merged-cat.csv'
#rawdata = pd.read_csv(DATA_FORM_FILE, usecols=np.r_[3,5:12,13:28,81:87,108])
rawdata = pd.read_csv(DATA_FORM_FILE)
#select features
posfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[3:12]].astype(float)
lextypefeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[12:14]]
lexfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[14:29]].astype(float)
phonfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[29:47]]
accoufeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[47:81]].astype(float)
phonfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[29]].astype(float)
lextypefeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[13]]
# Transforming categorical feature
le = preprocessing.LabelBinarizer()
le.fit(lextypefeat)
list(le.classes_)
lextypefeat = le.transform(lextypefeat)
#----------------------------------------------------------------------------------------------------
# select feature combination
featN = np.column_stack((posfeat,accoufeat,phonfeat))
featB = np.column_stack((lexfeat,lextypefeat))
###------------------------------------------- PCA
#from sklearn.decomposition import PCA
#pca = PCA(n_components=4)
#####------------------------------------------- Randomized PCA
##from sklearn.decomposition import RandomizedPCA
##pca = RandomizedPCA(n_components=30, whiten=True)
###
#scale = pca.fit(feat1)
#feat1 = scale.fit_transform(feat1)
feat = np.column_stack((featN,featB))
feat[np.isnan(feat)] = 0
feat[np.isinf(feat)] = 0
# select test labels
#Ytest = pd.DataFrame.as_matrix(rawdata)[:,20:26].astype(float)
label = pd.DataFrame.as_matrix(rawdata)[:,108]
#remove bad features as there is no label
scale = np.where(label == 'None')
label = np.delete(label,scale)
feat = np.delete(feat,scale,0)
#----------------------------------------------------------------------------------------------------
# Transforming categorical feature
le = preprocessing.LabelEncoder()
le.fit(label)
list(le.classes_)
label = le.transform(label)
# create traning and test data by partioning
train_set_size = int(.7*len(feat))
test_set_size = int(.3*len(feat))
XtrainPos = feat[0:train_set_size,:]
YtrainPos = label[0:train_set_size]
XtestPos = feat[train_set_size:,:]
YtestPos = label[train_set_size:]
XtrainAll = feat
#----------------------------------------------------------------------------------------------------
#normalization of features
scale = preprocessing.StandardScaler().fit(XtrainPos)
XtrainPos = scale.transform(XtrainPos)
XtestPos = scale.transform(XtestPos)
# for whole data set
scaleAll = preprocessing.StandardScaler().fit(XtrainAll)
XtrainAll = scaleAll.transform(XtrainAll)
#scale = preprocessing.MinMaxScaler()
#XtrainPos = scale.fit_transform(XtrainPos)
#XtestPos = scale.transform(XtestPos)
#scaleAll = preprocessing.MinMaxScaler()
#XtrainAll = scaleAll.fit_transform(XtrainAll)
#scale = preprocessing.Normalizer().fit(XtrainPos)
#XtrainPos = scale.transform(XtrainPos)
#XtestPos = scale.transform(XtestPos)
#scaleAll = preprocessing.Normalizer().fit(XtrainAll)
#XtrainAll = scaleAll.transform(XtrainAll)
###------------------------------------------- RandomizedLogisticRegression
#from sklearn.linear_model import RandomizedLogisticRegression
#scale = RandomizedLogisticRegression()
#XtrainPos = scale.fit_transform(XtrainPos,YtrainPos)
#XtestPos = scale.transform(XtestPos)
#XtrainAll = scale.fit_transform(XtrainAll,label)
###------------------------------------------- PCA
#from sklearn.decomposition import PCA
#pca = PCA(n_components=30)
####------------------------------------------- Randomized PCA
#from sklearn.decomposition import RandomizedPCA
#pca = RandomizedPCA(n_components=30, whiten=True)
##
##
#scale = pca.fit(XtrainPos)
#XtrainPos = scale.fit_transform(XtrainPos)
#XtestPos = scale.fit_transform(XtestPos)
#scaleAll = pca.fit(XtrainAll)
#XtrainAll = scaleAll.transform(XtrainAll)
###------------------------------------------- LDA
#from sklearn.lda import LDA
#lda = LDA(n_components=4)
#scale = lda.fit(XtrainPos,YtrainPos)
#XtrainPos = scale.transform(XtrainPos)
#XtestPos = scale.transform(XtestPos)
#scaleAll = lda.fit(XtrainAll,label)
#XtrainAll = scaleAll.transform(XtrainAll)
#--------------------feature Ranking---------------------------------
from sklearn.feature_selection import RFE
## SVM
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
rfe = RFE(clf, 3)
rfe = rfe.fit(XtrainAll, label)
print(rfe.support_)
print(rfe.ranking_)
#ExtraTreesClassifier
#from sklearn.ensemble import ExtraTreesClassifier
#clf = ExtraTreesClassifier()
#clf.fit(XtrainAll, label)
#print(clf.feature_importances_)
| # -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 11:10:14 2015
@author: mukherjee
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing, metrics
from sklearn.learning_curve import learning_curve
# read Form data
DATA_FORM_FILE = 'all-merged-cat.csv'
#rawdata = pd.read_csv(DATA_FORM_FILE, usecols=np.r_[3,5:12,13:28,81:87,108])
rawdata = pd.read_csv(DATA_FORM_FILE)
#select features
posfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[3:12]].astype(float)
lextypefeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[12:14]]
lexfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[14:29]].astype(float)
phonfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[29:47]]
accoufeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[47:81]].astype(float)
phonfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[29]].astype(float)
lextypefeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[13]]
# Transforming categorical feature
le = preprocessing.LabelBinarizer()
le.fit(lextypefeat)
list(le.classes_)
lextypefeat = le.transform(lextypefeat)
#----------------------------------------------------------------------------------------------------
# select feature combination
featN = np.column_stack((posfeat,accoufeat,phonfeat))
featB = np.column_stack((lexfeat,lextypefeat))
###------------------------------------------- PCA
#from sklearn.decomposition import PCA
#pca = PCA(n_components=4)
#####------------------------------------------- Randomized PCA
##from sklearn.decomposition import RandomizedPCA
##pca = RandomizedPCA(n_components=30, whiten=True)
###
#scale = pca.fit(feat1)
#feat1 = scale.fit_transform(feat1)
feat = np.column_stack((featN,featB))
feat[np.isnan(feat)] = 0
feat[np.isinf(feat)] = 0
# select test labels
#Ytest = pd.DataFrame.as_matrix(rawdata)[:,20:26].astype(float)
label = pd.DataFrame.as_matrix(rawdata)[:,108]
#remove bad features as there is no label
scale = np.where(label == 'None')
label = np.delete(label,scale)
feat = np.delete(feat,scale,0)
#----------------------------------------------------------------------------------------------------
# Transforming categorical feature
le = preprocessing.LabelEncoder()
le.fit(label)
list(le.classes_)
label = le.transform(label)
# create traning and test data by partioning
train_set_size = int(.7*len(feat))
test_set_size = int(.3*len(feat))
XtrainPos = feat[0:train_set_size,:]
YtrainPos = label[0:train_set_size]
XtestPos = feat[train_set_size:,:]
YtestPos = label[train_set_size:]
XtrainAll = feat
#----------------------------------------------------------------------------------------------------
#normalization of features
scale = preprocessing.StandardScaler().fit(XtrainPos)
XtrainPos = scale.transform(XtrainPos)
XtestPos = scale.transform(XtestPos)
# for whole data set
scaleAll = preprocessing.StandardScaler().fit(XtrainAll)
XtrainAll = scaleAll.transform(XtrainAll)
#scale = preprocessing.MinMaxScaler()
#XtrainPos = scale.fit_transform(XtrainPos)
#XtestPos = scale.transform(XtestPos)
#scaleAll = preprocessing.MinMaxScaler()
#XtrainAll = scaleAll.fit_transform(XtrainAll)
#scale = preprocessing.Normalizer().fit(XtrainPos)
#XtrainPos = scale.transform(XtrainPos)
#XtestPos = scale.transform(XtestPos)
#scaleAll = preprocessing.Normalizer().fit(XtrainAll)
#XtrainAll = scaleAll.transform(XtrainAll)
###------------------------------------------- RandomizedLogisticRegression
#from sklearn.linear_model import RandomizedLogisticRegression
#scale = RandomizedLogisticRegression()
#XtrainPos = scale.fit_transform(XtrainPos,YtrainPos)
#XtestPos = scale.transform(XtestPos)
#XtrainAll = scale.fit_transform(XtrainAll,label)
###------------------------------------------- PCA
#from sklearn.decomposition import PCA
#pca = PCA(n_components=30)
####------------------------------------------- Randomized PCA
#from sklearn.decomposition import RandomizedPCA
#pca = RandomizedPCA(n_components=30, whiten=True)
##
##
#scale = pca.fit(XtrainPos)
#XtrainPos = scale.fit_transform(XtrainPos)
#XtestPos = scale.fit_transform(XtestPos)
#scaleAll = pca.fit(XtrainAll)
#XtrainAll = scaleAll.transform(XtrainAll)
###------------------------------------------- LDA
#from sklearn.lda import LDA
#lda = LDA(n_components=4)
#scale = lda.fit(XtrainPos,YtrainPos)
#XtrainPos = scale.transform(XtrainPos)
#XtestPos = scale.transform(XtestPos)
#scaleAll = lda.fit(XtrainAll,label)
#XtrainAll = scaleAll.transform(XtrainAll)
#--------------------feature Ranking---------------------------------
from sklearn.feature_selection import RFE
## SVM
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
rfe = RFE(clf, 3)
rfe = rfe.fit(XtrainAll, label)
print(rfe.support_)
print(rfe.ranking_)
#ExtraTreesClassifier
#from sklearn.ensemble import ExtraTreesClassifier
#clf = ExtraTreesClassifier()
#clf.fit(XtrainAll, label)
#print(clf.feature_importances_) | en | 0.244097 | # -*- coding: utf-8 -*- Created on Fri Mar 13 11:10:14 2015
@author: mukherjee # read Form data #rawdata = pd.read_csv(DATA_FORM_FILE, usecols=np.r_[3,5:12,13:28,81:87,108]) #select features # Transforming categorical feature #---------------------------------------------------------------------------------------------------- # select feature combination ###------------------------------------------- PCA #from sklearn.decomposition import PCA #pca = PCA(n_components=4) #####------------------------------------------- Randomized PCA ##from sklearn.decomposition import RandomizedPCA ##pca = RandomizedPCA(n_components=30, whiten=True) ### #scale = pca.fit(feat1) #feat1 = scale.fit_transform(feat1) # select test labels #Ytest = pd.DataFrame.as_matrix(rawdata)[:,20:26].astype(float) #remove bad features as there is no label #---------------------------------------------------------------------------------------------------- # Transforming categorical feature # create traning and test data by partioning #---------------------------------------------------------------------------------------------------- #normalization of features # for whole data set #scale = preprocessing.MinMaxScaler() #XtrainPos = scale.fit_transform(XtrainPos) #XtestPos = scale.transform(XtestPos) #scaleAll = preprocessing.MinMaxScaler() #XtrainAll = scaleAll.fit_transform(XtrainAll) #scale = preprocessing.Normalizer().fit(XtrainPos) #XtrainPos = scale.transform(XtrainPos) #XtestPos = scale.transform(XtestPos) #scaleAll = preprocessing.Normalizer().fit(XtrainAll) #XtrainAll = scaleAll.transform(XtrainAll) ###------------------------------------------- RandomizedLogisticRegression #from sklearn.linear_model import RandomizedLogisticRegression #scale = RandomizedLogisticRegression() #XtrainPos = scale.fit_transform(XtrainPos,YtrainPos) #XtestPos = scale.transform(XtestPos) #XtrainAll = scale.fit_transform(XtrainAll,label) ###------------------------------------------- PCA #from sklearn.decomposition import PCA #pca = PCA(n_components=30) ####------------------------------------------- Randomized PCA #from sklearn.decomposition import RandomizedPCA #pca = RandomizedPCA(n_components=30, whiten=True) ## ## #scale = pca.fit(XtrainPos) #XtrainPos = scale.fit_transform(XtrainPos) #XtestPos = scale.fit_transform(XtestPos) #scaleAll = pca.fit(XtrainAll) #XtrainAll = scaleAll.transform(XtrainAll) ###------------------------------------------- LDA #from sklearn.lda import LDA #lda = LDA(n_components=4) #scale = lda.fit(XtrainPos,YtrainPos) #XtrainPos = scale.transform(XtrainPos) #XtestPos = scale.transform(XtestPos) #scaleAll = lda.fit(XtrainAll,label) #XtrainAll = scaleAll.transform(XtrainAll) #--------------------feature Ranking--------------------------------- ## SVM #ExtraTreesClassifier #from sklearn.ensemble import ExtraTreesClassifier #clf = ExtraTreesClassifier() #clf.fit(XtrainAll, label) #print(clf.feature_importances_) | 2.604576 | 3 |
demos/navier_stokes/GP_active_cavity.py | tsilifis/quinoa | 1 | 6613468 | <reponame>tsilifis/quinoa
import numpy as np
import kernel_py as kp
import scipy.stats as st
from scipy import linalg
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#%matplotlib inline
def build_up_b(b, rho, dt, u, v, dx, dy):
b[1:-1, 1:-1] = (rho * ( (1. / dt) * ((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2. * dx) +
(v[2:, 1:-1] - v[0:-2, 1:-1]) / (2. * dy)) - ((u[1:-1,2:] - u[1:-1,0:-2]) / (2. * dx)) ** 2 -
2. * ((u[2:,1:-1] - u[0:-2,1:-1]) / (2. * dy)) * ((v[1:-1, 2:] - v[1:-1,0:-2]) / (2. * dx)) -
((v[2:, 1:-1] - v[0:-2, 1:-1]) / (2. * dy))**2 ))
return b
def pressure_poisson(p, dx, dy, b):
pn = np.empty_like(p)
pn = p.copy()
for q in range(nit):
pn = p.copy()
p[1:-1, 1:-1] = ( ( (pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dy**2 + (pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dx**2) / (2. * (dx**2 + dy**2)) - dx**2 * dy**2 * b[1:-1,1:-1] / (2. * (dx**2 + dy**2)))
p[:,-1] = p[:, -2] ## dp/dy = 0 at x = 2
p[0, :] = p[1, :] ## dp/dy = 0 at y = 0
p[:, 0] = p[:, 1] ## dp/dx = 0 at x = 0
p[-1,:] = 0. ## p = 0 at y = 2
return p
def cavity_flow(nt, u, v, dt, dx, dy, p, rho, xi):
un = np.empty_like(u)
vn = np.empty_like(v)
b = np.zeros((ny, nx))
nu = xi[-1]
for n in range(nt):
un = u.copy()
vn = v.copy()
b = build_up_b(b, rho, dt, u, v, dx, dy)
p = pressure_poisson(p, dx, dy, b)
#print p
u[1:-1, 1:-1] = (un[1:-1, 1:-1] - un[1:-1, 1:-1] * (dt / dx) * (un[1:-1, 1:-1] - un[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * (dt / dy) * (un[1:-1, 1:-1] - un[0:-2,1:-1]) -
(dt / (2.*rho*dx)) * (p[1:-1,2:] - p[1:-1,0:-2]) +
nu * ( (dt / dx**2) * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +
(dt / dy**2) * (un[2:, 1:-1] - 2. * un[1:-1,1:-1] + un[0:-2, 1:-1]) ) )
v[1:-1, 1:-1] = (vn[1:-1, 1:-1] - un[1:-1, 1:-1] * (dt / dx) * (vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * (dt / dy) * (vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) -
(dt / (2.*rho*dy)) * (p[2:, 1:-1] - p[0:-2, 1:-1]) +
nu * ( (dt / dx**2) * (vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2]) +
(dt / dy**2) * (vn[2:, 1:-1] - 2. * vn[1:-1, 1:-1] + vn[0:-2, 1:-1]) ) )
u[0, :] = -2 * xi[0] * np.sin(xi[2] * np.pi * np.linspace(0., 2., int(2/dx + 1)))
u[:, 0] = 0.
u[:, -1] = 0.
u[-1, :] = 2 * xi[1] * np.sin(xi[3] * np.pi * np.linspace(0., 2., int(2/dx + 1))) # set velocity on cavity lid equal to 1
v[0, :] = 0.
v[-1, :] = 0.
v[:, 0] = 0. # * np.exp(- xi[2] * np.linspace(0., 2., int(2/dx + 1)))
v[:, -1] = 0. # * np.exp(- xi[3] * np.linspace(0., 2., int(2/dx + 1)))
return u, v, p
nx = 101
ny = 101
#nt = 500
nit = 50
c = 1.
dx = 2. / (nx - 1)
dy = 2. / (ny - 1)
x = np.linspace(0, 2., nx)
y = np.linspace(0, 2., ny)
X, Y = np.meshgrid(x, y)
rho = 1.
#nu = .05
dt = .001
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
nt = 1000
dim = 5
#xi = st.uniform.rvs(size = (5,))
#xi[4] = xi[4] * 0.04 + 0.01
N_init = 20
XI = 2. * st.uniform.rvs(size = (N_init,dim)) - 1.
YI = np.zeros((N_init,1))
for i in range(XI.shape[0]):
print 'Taking initial sample : ' + str(i)
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
xi = 0.5 * (XI[i,:].copy() + 1.)
xi[-1] = 0.04 * xi[-1] + 0.01
u, v, p = cavity_flow(nt, u, v, dt, dx, dy, p, rho, xi)
YI[i,0] = p[-2, -1]
print YI
kern = kp.RBF(dim, 1, 1)
gp = kp.GP(XI, YI, kern)
N_quad = 300
gp.optimize()
sig = np.zeros(N_quad + 1)
sig_noise = np.zeros(N_quad + 1)
ell = np.zeros(N_quad + 1)
sig[0] = gp._kern._var
sig_noise[0] = gp._noise_var
ell[0] = gp._kern._lengthscale[0]
kern._var = sig[0]
kern._lengthscale = [ell[0]] * dim
for i in range(N_quad):
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
x_new = gp.argmaxvar((-1.,1.))
print 'New design :' + str(x_new)
print x_new.shape
xi = 0.5 * (x_new.copy() + 1.)
xi[-1] = 0.04 * xi[-1] + 0.01
print 'New input : ' + str(xi)
u, v, p = cavity_flow(nt, u, v, dt, dx, dy, p, rho, xi)
#y_new = collect_data(x_new).reshape((1,1))# + 0.1 * np.random.normal(size = (1,1))
y_new = p[-2, -1]
XI = np.vstack([XI, x_new])
YI = np.vstack([YI, y_new])
gp_new = kp.GP(XI, YI, kern)
gp_new._noise_var = gp._noise_var
gp_new.optimize()
#gp_new._kern._lengthscale
sig[i+1] = gp_new._kern._var
sig_noise[i+1] = gp_new._noise_var
ell[i+1] = gp_new._kern._lengthscale[0]
kern._var = sig[i+1]
kern._lengthscale = [ell[i+1]] * dim
gp = gp_new
#f, var = gp_new.predict(X_test)
if i % 50 == 0:
np.save('sig_batch_'+str(i)+'.npy', sig)
np.save('ell_batch_'+str(i)+'.npy', ell)
np.save('sig_noise_batch_'+str(i)+'.npy', sig_noise)
np.save('X_batch_'+str(i)+'.npy', XI)
np.save('Y_batch_'+str(i)+'.npy', YI)
print 'Took active data ' + str(i)
np.save('sig.npy', sig)
np.save('sig_noise.npy', sig_noise)
np.save('ell.npy', ell)
np.save('X.npy', XI)
np.save('Y.npy', YI)
#fig = plt.figure(figsize = (11, 7), dpi = 100)
# plotting the pressure field as a contour
#plt.contourf(X, Y, p, alpha = 0.5, cmap = cm.viridis)
#plt.colorbar()
# plotting the pressure field outlines
#plt.contour(X, Y, p, 30, cmap = cm.viridis)
# plotting velocity field
#plt.quiver(X[::2, fdf8:f53e:61e4::18], Y[::2, fdf8:f53e:61e4::18], u[::2, fdf8:f53e:61e4::18], v[::2, ::2])
#plt.xlabel('X')
#plt.ylabel('Y')
#plt.show()
| import numpy as np
import kernel_py as kp
import scipy.stats as st
from scipy import linalg
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#%matplotlib inline
def build_up_b(b, rho, dt, u, v, dx, dy):
b[1:-1, 1:-1] = (rho * ( (1. / dt) * ((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2. * dx) +
(v[2:, 1:-1] - v[0:-2, 1:-1]) / (2. * dy)) - ((u[1:-1,2:] - u[1:-1,0:-2]) / (2. * dx)) ** 2 -
2. * ((u[2:,1:-1] - u[0:-2,1:-1]) / (2. * dy)) * ((v[1:-1, 2:] - v[1:-1,0:-2]) / (2. * dx)) -
((v[2:, 1:-1] - v[0:-2, 1:-1]) / (2. * dy))**2 ))
return b
def pressure_poisson(p, dx, dy, b):
pn = np.empty_like(p)
pn = p.copy()
for q in range(nit):
pn = p.copy()
p[1:-1, 1:-1] = ( ( (pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dy**2 + (pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dx**2) / (2. * (dx**2 + dy**2)) - dx**2 * dy**2 * b[1:-1,1:-1] / (2. * (dx**2 + dy**2)))
p[:,-1] = p[:, -2] ## dp/dy = 0 at x = 2
p[0, :] = p[1, :] ## dp/dy = 0 at y = 0
p[:, 0] = p[:, 1] ## dp/dx = 0 at x = 0
p[-1,:] = 0. ## p = 0 at y = 2
return p
def cavity_flow(nt, u, v, dt, dx, dy, p, rho, xi):
un = np.empty_like(u)
vn = np.empty_like(v)
b = np.zeros((ny, nx))
nu = xi[-1]
for n in range(nt):
un = u.copy()
vn = v.copy()
b = build_up_b(b, rho, dt, u, v, dx, dy)
p = pressure_poisson(p, dx, dy, b)
#print p
u[1:-1, 1:-1] = (un[1:-1, 1:-1] - un[1:-1, 1:-1] * (dt / dx) * (un[1:-1, 1:-1] - un[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * (dt / dy) * (un[1:-1, 1:-1] - un[0:-2,1:-1]) -
(dt / (2.*rho*dx)) * (p[1:-1,2:] - p[1:-1,0:-2]) +
nu * ( (dt / dx**2) * (un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +
(dt / dy**2) * (un[2:, 1:-1] - 2. * un[1:-1,1:-1] + un[0:-2, 1:-1]) ) )
v[1:-1, 1:-1] = (vn[1:-1, 1:-1] - un[1:-1, 1:-1] * (dt / dx) * (vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * (dt / dy) * (vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) -
(dt / (2.*rho*dy)) * (p[2:, 1:-1] - p[0:-2, 1:-1]) +
nu * ( (dt / dx**2) * (vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2]) +
(dt / dy**2) * (vn[2:, 1:-1] - 2. * vn[1:-1, 1:-1] + vn[0:-2, 1:-1]) ) )
u[0, :] = -2 * xi[0] * np.sin(xi[2] * np.pi * np.linspace(0., 2., int(2/dx + 1)))
u[:, 0] = 0.
u[:, -1] = 0.
u[-1, :] = 2 * xi[1] * np.sin(xi[3] * np.pi * np.linspace(0., 2., int(2/dx + 1))) # set velocity on cavity lid equal to 1
v[0, :] = 0.
v[-1, :] = 0.
v[:, 0] = 0. # * np.exp(- xi[2] * np.linspace(0., 2., int(2/dx + 1)))
v[:, -1] = 0. # * np.exp(- xi[3] * np.linspace(0., 2., int(2/dx + 1)))
return u, v, p
nx = 101
ny = 101
#nt = 500
nit = 50
c = 1.
dx = 2. / (nx - 1)
dy = 2. / (ny - 1)
x = np.linspace(0, 2., nx)
y = np.linspace(0, 2., ny)
X, Y = np.meshgrid(x, y)
rho = 1.
#nu = .05
dt = .001
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
nt = 1000
dim = 5
#xi = st.uniform.rvs(size = (5,))
#xi[4] = xi[4] * 0.04 + 0.01
N_init = 20
XI = 2. * st.uniform.rvs(size = (N_init,dim)) - 1.
YI = np.zeros((N_init,1))
for i in range(XI.shape[0]):
print 'Taking initial sample : ' + str(i)
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
xi = 0.5 * (XI[i,:].copy() + 1.)
xi[-1] = 0.04 * xi[-1] + 0.01
u, v, p = cavity_flow(nt, u, v, dt, dx, dy, p, rho, xi)
YI[i,0] = p[-2, -1]
print YI
kern = kp.RBF(dim, 1, 1)
gp = kp.GP(XI, YI, kern)
N_quad = 300
gp.optimize()
sig = np.zeros(N_quad + 1)
sig_noise = np.zeros(N_quad + 1)
ell = np.zeros(N_quad + 1)
sig[0] = gp._kern._var
sig_noise[0] = gp._noise_var
ell[0] = gp._kern._lengthscale[0]
kern._var = sig[0]
kern._lengthscale = [ell[0]] * dim
for i in range(N_quad):
u = np.zeros((ny, nx))
v = np.zeros((ny, nx))
p = np.zeros((ny, nx))
b = np.zeros((ny, nx))
x_new = gp.argmaxvar((-1.,1.))
print 'New design :' + str(x_new)
print x_new.shape
xi = 0.5 * (x_new.copy() + 1.)
xi[-1] = 0.04 * xi[-1] + 0.01
print 'New input : ' + str(xi)
u, v, p = cavity_flow(nt, u, v, dt, dx, dy, p, rho, xi)
#y_new = collect_data(x_new).reshape((1,1))# + 0.1 * np.random.normal(size = (1,1))
y_new = p[-2, -1]
XI = np.vstack([XI, x_new])
YI = np.vstack([YI, y_new])
gp_new = kp.GP(XI, YI, kern)
gp_new._noise_var = gp._noise_var
gp_new.optimize()
#gp_new._kern._lengthscale
sig[i+1] = gp_new._kern._var
sig_noise[i+1] = gp_new._noise_var
ell[i+1] = gp_new._kern._lengthscale[0]
kern._var = sig[i+1]
kern._lengthscale = [ell[i+1]] * dim
gp = gp_new
#f, var = gp_new.predict(X_test)
if i % 50 == 0:
np.save('sig_batch_'+str(i)+'.npy', sig)
np.save('ell_batch_'+str(i)+'.npy', ell)
np.save('sig_noise_batch_'+str(i)+'.npy', sig_noise)
np.save('X_batch_'+str(i)+'.npy', XI)
np.save('Y_batch_'+str(i)+'.npy', YI)
print 'Took active data ' + str(i)
np.save('sig.npy', sig)
np.save('sig_noise.npy', sig_noise)
np.save('ell.npy', ell)
np.save('X.npy', XI)
np.save('Y.npy', YI)
#fig = plt.figure(figsize = (11, 7), dpi = 100)
# plotting the pressure field as a contour
#plt.contourf(X, Y, p, alpha = 0.5, cmap = cm.viridis)
#plt.colorbar()
# plotting the pressure field outlines
#plt.contour(X, Y, p, 30, cmap = cm.viridis)
# plotting velocity field
#plt.quiver(X[::2, fdf8:f53e:61e4::18], Y[::2, fdf8:f53e:61e4::18], u[::2, fdf8:f53e:61e4::18], v[::2, ::2])
#plt.xlabel('X')
#plt.ylabel('Y')
#plt.show() | en | 0.347967 | #%matplotlib inline ## dp/dy = 0 at x = 2 ## dp/dy = 0 at y = 0 ## dp/dx = 0 at x = 0 ## p = 0 at y = 2 #print p # set velocity on cavity lid equal to 1 # * np.exp(- xi[2] * np.linspace(0., 2., int(2/dx + 1))) # * np.exp(- xi[3] * np.linspace(0., 2., int(2/dx + 1))) #nt = 500 #nu = .05 #xi = st.uniform.rvs(size = (5,)) #xi[4] = xi[4] * 0.04 + 0.01 #y_new = collect_data(x_new).reshape((1,1))# + 0.1 * np.random.normal(size = (1,1)) #gp_new._kern._lengthscale #f, var = gp_new.predict(X_test) #fig = plt.figure(figsize = (11, 7), dpi = 100) # plotting the pressure field as a contour #plt.contourf(X, Y, p, alpha = 0.5, cmap = cm.viridis) #plt.colorbar() # plotting the pressure field outlines #plt.contour(X, Y, p, 30, cmap = cm.viridis) # plotting velocity field #plt.quiver(X[::2, fdf8:f53e:61e4::18], Y[::2, fdf8:f53e:61e4::18], u[::2, fdf8:f53e:61e4::18], v[::2, ::2]) #plt.xlabel('X') #plt.ylabel('Y') #plt.show() | 2.319417 | 2 |
bacpypes/bvll/upd_multiplexer.py | cbergmiller/bacpypes | 1 | 6613469 | <reponame>cbergmiller/bacpypes
import sys
import asyncio
import logging
from ..transport import UDPDirector
from ..comm import Client, Server, bind
from ..link import Address, LocalBroadcast, PDU, unpack_ip_addr
DEBUG = True
_logger = logging.getLogger(__name__)
__all__ = ['UDPMultiplexer']
class _MultiplexClient(Client):
def __init__(self, mux):
Client.__init__(self)
self.multiplexer = mux
def confirmation(self, pdu):
self.multiplexer.confirmation(self, pdu)
class _MultiplexServer(Server):
def __init__(self, mux):
Server.__init__(self)
self.multiplexer = mux
def indication(self, pdu):
self.multiplexer.indication(self, pdu)
class UDPMultiplexer:
"""
UDPMultiplexer
"""
def __init__(self, addr=None, no_broadcast=False):
if DEBUG: _logger.debug('__init__ %r noBroadcast=%r', addr, no_broadcast)
# check for some options
self.special_broadcast = False
self.no_broadcast = no_broadcast
if addr is None:
self.address = Address()
self.addrTuple = ('', 47808)
self.addrBroadcastTuple = ('255.255.255.255', 47808)
else:
# allow the address to be cast
if isinstance(addr, Address):
self.address = addr
else:
self.address = Address(addr)
# promote the normal and broadcast tuples
self.addrTuple = self.address.addrTuple
self.addrBroadcastTuple = self.address.addrBroadcastTuple
# check for no broadcasting (loopback interface)
if not self.addrBroadcastTuple:
self.no_broadcast = True
elif self.addrTuple == self.addrBroadcastTuple:
# old school broadcast address
self.addrBroadcastTuple = ('255.255.255.255', self.addrTuple[1])
else:
self.special_broadcast = True
if DEBUG:
_logger.debug(' - address: %r', self.address)
_logger.debug(' - addrTuple: %r', self.addrTuple)
_logger.debug(' - addrBroadcastTuple: %r', self.addrBroadcastTuple)
# create and bind the direct address
self.direct = _MultiplexClient(self)
self.protocol = None
self.broadcast = None
self.broadcast_protocol = None
# create and bind the Annex H and J servers
self.annexH = _MultiplexServer(self)
self.annexJ = _MultiplexServer(self)
async def create_endpoint(self):
_logger.debug('create_endpoint')
loop = asyncio.get_event_loop()
transport, protocol = await loop.create_datagram_endpoint(
UDPDirector, local_addr=self.addrTuple, allow_broadcast=True
)
self.protocol = protocol
bind(self.direct, protocol)
# create and bind the broadcast address for non-Windows
if self.special_broadcast and (not self.no_broadcast) and sys.platform in ('linux', 'darwin'):
self.broadcast = _MultiplexClient(self)
transport, protocol = await loop.create_datagram_endpoint(
UDPDirector, remote_addr=self.addrBroadcastTuple, reuse_address=True
)
self.broadcast_protocol = protocol
bind(self.direct, self.broadcast_protocol)
def close_endpoint(self):
if DEBUG: _logger.debug('close_socket')
# pass along the close to the director(s)
self.protocol.close_endpoint()
if self.broadcast_protocol:
self.broadcast_protocol.close_endpoint()
def indication(self, server, pdu):
if DEBUG: _logger.debug('indication %r %r', server, pdu)
if not self.protocol:
raise RuntimeError('UDPMultiplexer.protocol is not set')
# check for a broadcast message
if pdu.pduDestination.addrType == Address.localBroadcastAddr:
dest = self.addrBroadcastTuple
if DEBUG: _logger.debug(' - requesting local broadcast: %r', dest)
# interface might not support broadcasts
if not dest:
return
elif pdu.pduDestination.addrType == Address.localStationAddr:
dest = unpack_ip_addr(pdu.pduDestination.addrAddr)
if DEBUG: _logger.debug(' - requesting local station: %r', dest)
else:
raise RuntimeError('invalid destination address type')
self.protocol.indication(PDU(pdu, destination=dest))
def confirmation(self, client, pdu):
if DEBUG: _logger.debug('confirmation %r %r', client, pdu)
# if this came from ourselves, dump it
if pdu.pduSource == self.addrTuple:
if DEBUG: _logger.debug(' - from us!')
return
# the PDU source is a tuple, convert it to an Address instance
src = Address(pdu.pduSource)
# match the destination in case the stack needs it
if client is self.direct:
dest = self.address
elif client is self.broadcast:
dest = LocalBroadcast()
else:
raise RuntimeError('confirmation mismatch')
# must have at least one octet
if not pdu.pduData:
if DEBUG: _logger.debug(' - no data')
return
# extract the first octet
msg_type = pdu.pduData[0]
# check for the message type
if msg_type == 0x01:
if self.annexH.serverPeer:
self.annexH.response(PDU(pdu, source=src, destination=dest))
elif msg_type == 0x81:
if self.annexJ.serverPeer:
self.annexJ.response(PDU(pdu, source=src, destination=dest))
else:
_logger.warning('unsupported message')
| import sys
import asyncio
import logging
from ..transport import UDPDirector
from ..comm import Client, Server, bind
from ..link import Address, LocalBroadcast, PDU, unpack_ip_addr
DEBUG = True
_logger = logging.getLogger(__name__)
__all__ = ['UDPMultiplexer']
class _MultiplexClient(Client):
def __init__(self, mux):
Client.__init__(self)
self.multiplexer = mux
def confirmation(self, pdu):
self.multiplexer.confirmation(self, pdu)
class _MultiplexServer(Server):
def __init__(self, mux):
Server.__init__(self)
self.multiplexer = mux
def indication(self, pdu):
self.multiplexer.indication(self, pdu)
class UDPMultiplexer:
"""
UDPMultiplexer
"""
def __init__(self, addr=None, no_broadcast=False):
if DEBUG: _logger.debug('__init__ %r noBroadcast=%r', addr, no_broadcast)
# check for some options
self.special_broadcast = False
self.no_broadcast = no_broadcast
if addr is None:
self.address = Address()
self.addrTuple = ('', 47808)
self.addrBroadcastTuple = ('255.255.255.255', 47808)
else:
# allow the address to be cast
if isinstance(addr, Address):
self.address = addr
else:
self.address = Address(addr)
# promote the normal and broadcast tuples
self.addrTuple = self.address.addrTuple
self.addrBroadcastTuple = self.address.addrBroadcastTuple
# check for no broadcasting (loopback interface)
if not self.addrBroadcastTuple:
self.no_broadcast = True
elif self.addrTuple == self.addrBroadcastTuple:
# old school broadcast address
self.addrBroadcastTuple = ('255.255.255.255', self.addrTuple[1])
else:
self.special_broadcast = True
if DEBUG:
_logger.debug(' - address: %r', self.address)
_logger.debug(' - addrTuple: %r', self.addrTuple)
_logger.debug(' - addrBroadcastTuple: %r', self.addrBroadcastTuple)
# create and bind the direct address
self.direct = _MultiplexClient(self)
self.protocol = None
self.broadcast = None
self.broadcast_protocol = None
# create and bind the Annex H and J servers
self.annexH = _MultiplexServer(self)
self.annexJ = _MultiplexServer(self)
async def create_endpoint(self):
_logger.debug('create_endpoint')
loop = asyncio.get_event_loop()
transport, protocol = await loop.create_datagram_endpoint(
UDPDirector, local_addr=self.addrTuple, allow_broadcast=True
)
self.protocol = protocol
bind(self.direct, protocol)
# create and bind the broadcast address for non-Windows
if self.special_broadcast and (not self.no_broadcast) and sys.platform in ('linux', 'darwin'):
self.broadcast = _MultiplexClient(self)
transport, protocol = await loop.create_datagram_endpoint(
UDPDirector, remote_addr=self.addrBroadcastTuple, reuse_address=True
)
self.broadcast_protocol = protocol
bind(self.direct, self.broadcast_protocol)
def close_endpoint(self):
if DEBUG: _logger.debug('close_socket')
# pass along the close to the director(s)
self.protocol.close_endpoint()
if self.broadcast_protocol:
self.broadcast_protocol.close_endpoint()
def indication(self, server, pdu):
if DEBUG: _logger.debug('indication %r %r', server, pdu)
if not self.protocol:
raise RuntimeError('UDPMultiplexer.protocol is not set')
# check for a broadcast message
if pdu.pduDestination.addrType == Address.localBroadcastAddr:
dest = self.addrBroadcastTuple
if DEBUG: _logger.debug(' - requesting local broadcast: %r', dest)
# interface might not support broadcasts
if not dest:
return
elif pdu.pduDestination.addrType == Address.localStationAddr:
dest = unpack_ip_addr(pdu.pduDestination.addrAddr)
if DEBUG: _logger.debug(' - requesting local station: %r', dest)
else:
raise RuntimeError('invalid destination address type')
self.protocol.indication(PDU(pdu, destination=dest))
def confirmation(self, client, pdu):
if DEBUG: _logger.debug('confirmation %r %r', client, pdu)
# if this came from ourselves, dump it
if pdu.pduSource == self.addrTuple:
if DEBUG: _logger.debug(' - from us!')
return
# the PDU source is a tuple, convert it to an Address instance
src = Address(pdu.pduSource)
# match the destination in case the stack needs it
if client is self.direct:
dest = self.address
elif client is self.broadcast:
dest = LocalBroadcast()
else:
raise RuntimeError('confirmation mismatch')
# must have at least one octet
if not pdu.pduData:
if DEBUG: _logger.debug(' - no data')
return
# extract the first octet
msg_type = pdu.pduData[0]
# check for the message type
if msg_type == 0x01:
if self.annexH.serverPeer:
self.annexH.response(PDU(pdu, source=src, destination=dest))
elif msg_type == 0x81:
if self.annexJ.serverPeer:
self.annexJ.response(PDU(pdu, source=src, destination=dest))
else:
_logger.warning('unsupported message') | en | 0.856049 | UDPMultiplexer # check for some options # allow the address to be cast # promote the normal and broadcast tuples # check for no broadcasting (loopback interface) # old school broadcast address # create and bind the direct address # create and bind the Annex H and J servers # create and bind the broadcast address for non-Windows # pass along the close to the director(s) # check for a broadcast message # interface might not support broadcasts # if this came from ourselves, dump it # the PDU source is a tuple, convert it to an Address instance # match the destination in case the stack needs it # must have at least one octet # extract the first octet # check for the message type | 2.375339 | 2 |
agent_core/message.py | dhh1128/aries-protocol-test-suite | 1 | 6613470 | """ Define Message base class. """
from collections import UserDict
import json
import re
import uuid
from schema import Schema, Optional, Regex, SchemaError
from .utils import Semver
class InvalidMessage(Exception):
""" Thrown when message is malformed. """
MTURI_RE = re.compile(r'(.*?)([a-z0-9._-]+)/(\d[^/]*)/([a-z0-9._-]+)$')
def generate_id():
""" Generate a message id. """
return str(uuid.uuid4())
def parse_type_info(message_type_uri):
""" Parse message type for doc_uri, portocol, version, and short type.
"""
matches = MTURI_RE.match(message_type_uri)
if not matches:
raise InvalidMessage()
return matches.groups()
MESSAGE_SCHEMA = Schema({
'@type': Regex(MTURI_RE),
Optional('@id', default=generate_id): str,
Optional(str): object
})
class Message(dict):
""" Message base class.
Inherits from UserDict meaning it behaves like a dictionary.
"""
__slots__ = (
'mtc',
'doc_uri',
'protocol',
'version',
'version_info',
'short_type'
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.update(MESSAGE_SCHEMA.validate(dict(self)))
except SchemaError as err:
raise InvalidMessage('Invalid message type') from err
self.doc_uri, self.protocol, self.version, self.short_type = \
parse_type_info(self.type)
try:
self.version_info = Semver.from_str(self.version)
except ValueError as err:
raise InvalidMessage('Invalid message type version') from err
@property
def type(self):
""" Shortcut for msg['@type'] """
return self['@type']
@property
def id(self): # pylint: disable=invalid-name
""" Shortcut for msg['@id'] """
return self['@id']
@property
def qualified_protocol(self):
""" Shortcut for constructing qualified protocol identifier from
doc_uri and protocol
"""
return self.doc_uri + self.protocol
# Serialization
@staticmethod
def deserialize(serialized: str):
""" Deserialize a message from a json string. """
try:
return Message(json.loads(serialized))
except json.decoder.JSONDecodeError as err:
raise InvalidMessage('Could not deserialize message') from err
def serialize(self):
""" Serialize a message into a json string. """
return json.dumps(self)
def pretty_print(self):
return json.dumps(self, indent=2)
class Noop(Message): # pylint: disable=too-many-ancestors
""" Noop message """
def __init__(self, **kwargs):
super().__init__({
'@type': 'noop/noop/0.0/noop'
})
return_route = kwargs.get('return_route', False)
if return_route:
self['~transport']['return_route'] = 'all'
| """ Define Message base class. """
from collections import UserDict
import json
import re
import uuid
from schema import Schema, Optional, Regex, SchemaError
from .utils import Semver
class InvalidMessage(Exception):
""" Thrown when message is malformed. """
MTURI_RE = re.compile(r'(.*?)([a-z0-9._-]+)/(\d[^/]*)/([a-z0-9._-]+)$')
def generate_id():
""" Generate a message id. """
return str(uuid.uuid4())
def parse_type_info(message_type_uri):
""" Parse message type for doc_uri, portocol, version, and short type.
"""
matches = MTURI_RE.match(message_type_uri)
if not matches:
raise InvalidMessage()
return matches.groups()
MESSAGE_SCHEMA = Schema({
'@type': Regex(MTURI_RE),
Optional('@id', default=generate_id): str,
Optional(str): object
})
class Message(dict):
""" Message base class.
Inherits from UserDict meaning it behaves like a dictionary.
"""
__slots__ = (
'mtc',
'doc_uri',
'protocol',
'version',
'version_info',
'short_type'
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.update(MESSAGE_SCHEMA.validate(dict(self)))
except SchemaError as err:
raise InvalidMessage('Invalid message type') from err
self.doc_uri, self.protocol, self.version, self.short_type = \
parse_type_info(self.type)
try:
self.version_info = Semver.from_str(self.version)
except ValueError as err:
raise InvalidMessage('Invalid message type version') from err
@property
def type(self):
""" Shortcut for msg['@type'] """
return self['@type']
@property
def id(self): # pylint: disable=invalid-name
""" Shortcut for msg['@id'] """
return self['@id']
@property
def qualified_protocol(self):
""" Shortcut for constructing qualified protocol identifier from
doc_uri and protocol
"""
return self.doc_uri + self.protocol
# Serialization
@staticmethod
def deserialize(serialized: str):
""" Deserialize a message from a json string. """
try:
return Message(json.loads(serialized))
except json.decoder.JSONDecodeError as err:
raise InvalidMessage('Could not deserialize message') from err
def serialize(self):
""" Serialize a message into a json string. """
return json.dumps(self)
def pretty_print(self):
return json.dumps(self, indent=2)
class Noop(Message): # pylint: disable=too-many-ancestors
""" Noop message """
def __init__(self, **kwargs):
super().__init__({
'@type': 'noop/noop/0.0/noop'
})
return_route = kwargs.get('return_route', False)
if return_route:
self['~transport']['return_route'] = 'all'
| en | 0.58902 | Define Message base class. Thrown when message is malformed. Generate a message id. Parse message type for doc_uri, portocol, version, and short type. Message base class. Inherits from UserDict meaning it behaves like a dictionary. Shortcut for msg['@type'] # pylint: disable=invalid-name Shortcut for msg['@id'] Shortcut for constructing qualified protocol identifier from doc_uri and protocol # Serialization Deserialize a message from a json string. Serialize a message into a json string. # pylint: disable=too-many-ancestors Noop message | 2.518981 | 3 |
cloud.py | cloud322/HelloPython | 0 | 6613471 | import random
#19윤년
def isLeapYear():
year = int(input('윤년 여부 알고싶은 년도 입력'))
isleap = '윤년아님'
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
isleap = '윤년'
print('%d는 %s' %(year,isleap))
#20
import random
def lotto():
lotto = str(random.randint(100,999))
lucky = input('번호입력')
match = 0
prize = '다음에'
for i in [0, 1, 2]:
for j in [0, 1, 2]:
if (lucky[i] == lotto[j]):
match+=1
if match ==3: prize = '1등'
elif match == 2: prize = '2등'
elif match == 1: prize = '3등'
print(lucky,lotto,prize)
#계산기
def intCal():
num1 = int((input('num1')))
num2 = int((input('num2')))
fmt = '%d+%d=%d \n %d+%d=%d \n'
fmt += '%d*%d=%d \n %d/%d=%d \n'
fmt += '%d**%d=%d \n'
print(fmt % (num1,num2,num1+num2,\
num1,num2,num1+num2,\
num1,num2,num1*num2,\
num1,num2,num1/num2,\
num1,num2,num1**num2))
class SungjukV0:
def __init__(self, name, kor, eng, mat):
self.name = name
self.kor = kor
self.eng = eng
self.mat = mat
class SungJukService:
def getTotal(self,sj):
tot = sj.kor + sj.eng + sj.mat
return tot
def getAverage(self,sj):
avg = self.getTotal(sj) / 3
return avg
def getGrade(self,sj):
grdlist='FFFFFDCBAA'
var = int(self.getAverage(sj)/10)
grd = grdlist[var]
return grd
| import random
#19윤년
def isLeapYear():
year = int(input('윤년 여부 알고싶은 년도 입력'))
isleap = '윤년아님'
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
isleap = '윤년'
print('%d는 %s' %(year,isleap))
#20
import random
def lotto():
lotto = str(random.randint(100,999))
lucky = input('번호입력')
match = 0
prize = '다음에'
for i in [0, 1, 2]:
for j in [0, 1, 2]:
if (lucky[i] == lotto[j]):
match+=1
if match ==3: prize = '1등'
elif match == 2: prize = '2등'
elif match == 1: prize = '3등'
print(lucky,lotto,prize)
#계산기
def intCal():
num1 = int((input('num1')))
num2 = int((input('num2')))
fmt = '%d+%d=%d \n %d+%d=%d \n'
fmt += '%d*%d=%d \n %d/%d=%d \n'
fmt += '%d**%d=%d \n'
print(fmt % (num1,num2,num1+num2,\
num1,num2,num1+num2,\
num1,num2,num1*num2,\
num1,num2,num1/num2,\
num1,num2,num1**num2))
class SungjukV0:
def __init__(self, name, kor, eng, mat):
self.name = name
self.kor = kor
self.eng = eng
self.mat = mat
class SungJukService:
def getTotal(self,sj):
tot = sj.kor + sj.eng + sj.mat
return tot
def getAverage(self,sj):
avg = self.getTotal(sj) / 3
return avg
def getGrade(self,sj):
grdlist='FFFFFDCBAA'
var = int(self.getAverage(sj)/10)
grd = grdlist[var]
return grd
| ko | 0.99879 | #19윤년 #20 #계산기 | 3.373371 | 3 |
components/idf_test/unit_test/TestCaseScript/IDFUnitTest/UnitTest.py | ulfalizer/esp-idf-1 | 46 | 6613472 | <reponame>ulfalizer/esp-idf-1
# -*- coding: utf-8 -*-
import re
import time
from TCAction import PerformanceTCBase
from TCAction import TCActionBase
from NativeLog import NativeLog
class UnitTest(PerformanceTCBase.PerformanceTCBase):
def __init__(self, test_case, test_env, cmd_set, timeout=30, log_path=TCActionBase.LOG_PATH):
PerformanceTCBase.PerformanceTCBase.__init__(self, test_case, test_env, cmd_set=cmd_set,
timeout=timeout, log_path=log_path)
self.case = cmd_set[1][0]
self.test_timeout = 20
self.reset_reason = test_case['reset']
self.result_cntx = TCActionBase.ResultCheckContext(self, test_env, self.tc_name)
def send_commands(self):
self.flush_data("UT1")
try:
self.serial_write_line("UT1", "\"" + self.case + "\"")
data = ""
for _ in range(self.timeout):
time.sleep(1) # wait for test to run before reading result
data += self.serial_read_data("UT1")
if re.search('[^0].* Tests 0 F',
data): # check that number of tests run != 0 and number of tests failed == 0
self.set_result("Succeed")
break
else:
self.set_result("Fail")
reset_list = self.reset_reason.split(",") if self.reset_reason else ''
pattern = re.compile(r"(ets [\w]{3} [\d]{1,2} [\d]{4} [\d]{2}:[\d]{2}:[\d]{2}[^(\)]*\([\w].*?\))"
r"|(Guru Meditation Error: Core [\d] panic'ed \([\w].*?\))")
reset_exception = pattern.findall(data)
if reset_list and len(reset_list) == len(reset_exception):
for i, reset in enumerate(reset_list):
temp_reset = reset_exception[i]
if reset not in "".join(temp_reset):
self.set_result("Fail")
break
else:
self.set_result("Succeed")
except StandardError, e:
NativeLog.add_exception_log(e)
def execute(self):
TCActionBase.TCActionBase.execute(self)
self.send_commands()
def main():
pass
if __name__ == '__main__':
pass
| # -*- coding: utf-8 -*-
import re
import time
from TCAction import PerformanceTCBase
from TCAction import TCActionBase
from NativeLog import NativeLog
class UnitTest(PerformanceTCBase.PerformanceTCBase):
def __init__(self, test_case, test_env, cmd_set, timeout=30, log_path=TCActionBase.LOG_PATH):
PerformanceTCBase.PerformanceTCBase.__init__(self, test_case, test_env, cmd_set=cmd_set,
timeout=timeout, log_path=log_path)
self.case = cmd_set[1][0]
self.test_timeout = 20
self.reset_reason = test_case['reset']
self.result_cntx = TCActionBase.ResultCheckContext(self, test_env, self.tc_name)
def send_commands(self):
self.flush_data("UT1")
try:
self.serial_write_line("UT1", "\"" + self.case + "\"")
data = ""
for _ in range(self.timeout):
time.sleep(1) # wait for test to run before reading result
data += self.serial_read_data("UT1")
if re.search('[^0].* Tests 0 F',
data): # check that number of tests run != 0 and number of tests failed == 0
self.set_result("Succeed")
break
else:
self.set_result("Fail")
reset_list = self.reset_reason.split(",") if self.reset_reason else ''
pattern = re.compile(r"(ets [\w]{3} [\d]{1,2} [\d]{4} [\d]{2}:[\d]{2}:[\d]{2}[^(\)]*\([\w].*?\))"
r"|(Guru Meditation Error: Core [\d] panic'ed \([\w].*?\))")
reset_exception = pattern.findall(data)
if reset_list and len(reset_list) == len(reset_exception):
for i, reset in enumerate(reset_list):
temp_reset = reset_exception[i]
if reset not in "".join(temp_reset):
self.set_result("Fail")
break
else:
self.set_result("Succeed")
except StandardError, e:
NativeLog.add_exception_log(e)
def execute(self):
TCActionBase.TCActionBase.execute(self)
self.send_commands()
def main():
pass
if __name__ == '__main__':
pass | en | 0.921451 | # -*- coding: utf-8 -*- # wait for test to run before reading result # check that number of tests run != 0 and number of tests failed == 0 | 2.151171 | 2 |
DLtorch/model/base.py | A-LinCui/DLtorch | 3 | 6613473 | # -*- coding: utf-8 -*-
import abc
import torch.nn as nn
from DLtorch.base import BaseComponent
import DLtorch.utils.torch_utils as torch_utils
class BaseModel(BaseComponent):
def __init__(self):
super(BaseModel, self).__init__()
self.logger.info("Module Constructed.")
self.logger.info("Parameters: {:.5f}M".format(torch_utils.get_params(self, only_trainable=False) / 1.e6)) | # -*- coding: utf-8 -*-
import abc
import torch.nn as nn
from DLtorch.base import BaseComponent
import DLtorch.utils.torch_utils as torch_utils
class BaseModel(BaseComponent):
def __init__(self):
super(BaseModel, self).__init__()
self.logger.info("Module Constructed.")
self.logger.info("Parameters: {:.5f}M".format(torch_utils.get_params(self, only_trainable=False) / 1.e6)) | en | 0.769321 | # -*- coding: utf-8 -*- | 2.25625 | 2 |
src/project/app1/tests.py | soulseen/devops-python-sample | 3 | 6613474 | <reponame>soulseen/devops-python-sample
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase, Client
class MyClassTestCase(TestCase):
def SetUp(self):
pass
def test_my_func(self):
print('[OK] Test running')
class MyAppTests(TestCase):
def setUp(self):
super(MyAppTests, self).setUp()
self.client = Client(enforce_csrf_checks=True)
def test_home(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase, Client
class MyClassTestCase(TestCase):
def SetUp(self):
pass
def test_my_func(self):
print('[OK] Test running')
class MyAppTests(TestCase):
def setUp(self):
super(MyAppTests, self).setUp()
self.client = Client(enforce_csrf_checks=True)
def test_home(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200) | en | 0.769321 | # -*- coding: utf-8 -*- | 2.215078 | 2 |
telefones/api/service.py | projeto-agro-tcc/osvaldo-backend | 0 | 6613475 | <gh_stars>0
from rest_framework import status
from rest_framework.exceptions import ValidationError, APIException
from telefones.models import Telefone
from utils.exceptions.catalogo_exceptions import CustomValidation
class TelefoneService:
def __init__(self, residencial=None, celular=None, outro=None):
self.residencial = residencial
self.celular = celular
self.outro = outro
def from_dto(self, objdto):
try:
telefones = Telefone()
telefones.residencial = objdto['residencial']
telefones.celular = objdto['celular']
telefones.outro = objdto['outro']
return telefones
except:
raise CustomValidation("Erro ao parse telefones", 'detail', status_code=status.HTTP_400_BAD_REQUEST)
def from_dto_update(self, objdto, telefone):
try:
telefones = Telefone.objects.filter(id=telefone.id)[0]
telefones.residencial = objdto['residencial']
telefones.celular = objdto['celular']
telefones.outro = objdto['outro']
return telefones
except:
raise CustomValidation("Erro ao parse telefones", 'detail', status_code=status.HTTP_400_BAD_REQUEST)
def save_telefones(self, telefones):
try:
telefones.save()
except:
raise CustomValidation("Erro ao salvar telefone", 'detail', status_code=status.HTTP_409_CONFLICT)
| from rest_framework import status
from rest_framework.exceptions import ValidationError, APIException
from telefones.models import Telefone
from utils.exceptions.catalogo_exceptions import CustomValidation
class TelefoneService:
def __init__(self, residencial=None, celular=None, outro=None):
self.residencial = residencial
self.celular = celular
self.outro = outro
def from_dto(self, objdto):
try:
telefones = Telefone()
telefones.residencial = objdto['residencial']
telefones.celular = objdto['celular']
telefones.outro = objdto['outro']
return telefones
except:
raise CustomValidation("Erro ao parse telefones", 'detail', status_code=status.HTTP_400_BAD_REQUEST)
def from_dto_update(self, objdto, telefone):
try:
telefones = Telefone.objects.filter(id=telefone.id)[0]
telefones.residencial = objdto['residencial']
telefones.celular = objdto['celular']
telefones.outro = objdto['outro']
return telefones
except:
raise CustomValidation("Erro ao parse telefones", 'detail', status_code=status.HTTP_400_BAD_REQUEST)
def save_telefones(self, telefones):
try:
telefones.save()
except:
raise CustomValidation("Erro ao salvar telefone", 'detail', status_code=status.HTTP_409_CONFLICT) | none | 1 | 2.228183 | 2 | |
processor/tracker.py | Mask0913/Driver_monitor | 0 | 6613476 | import dlib
import cv2
def plot_bboxes(image, bboxes, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(
0.002 * (image.shape[0] + image.shape[1]) / 2) + 1 # line/font thickness
for (x1, y1, x2, y2, cls_id, pos_id) in bboxes:
if cls_id in ['smoke', 'phone', 'eat']:
color = (0, 0, 255)
else:
color = (0, 255, 0)
if cls_id == 'eat':
cls_id = 'eat-drink'
c1, c2 = (x1, y1), (x2, y2)
# cv2.rectangle(image, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
# tf = max(tl - 1, 1) # font thickness
# t_size = cv2.getTextSize(cls_id, 0, fontScale=tl / 3, thickness=tf)[0]
# c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
# cv2.rectangle(image, c1, c2, color, -1, cv2.LINE_AA) # filled
# cv2.putText(image, '{} ID-{}'.format(cls_id, pos_id), (c1[0], c1[1] - 2), 0, tl / 3,
# [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
return image
def update_tracker(target_detector, image):
raw = image.copy()
if target_detector.frameCounter > 2e+4:
target_detector.frameCounter = 0
faceIDtoDelete = []
for faceID in target_detector.faceTracker.keys():
trackingQuality = target_detector.faceTracker[faceID].update(image)
if trackingQuality < 8:
faceIDtoDelete.append(faceID)
for faceID in faceIDtoDelete:
target_detector.faceTracker.pop(faceID, None)
target_detector.faceLocation1.pop(faceID, None)
target_detector.faceLocation2.pop(faceID, None)
target_detector.faceClasses.pop(faceID, None)
new_faces = []
if not (target_detector.frameCounter % target_detector.stride):
_, bboxes = target_detector.detect(image)
for (x1, y1, x2, y2, cls_id, _) in bboxes:
x = int(x1)
y = int(y1)
w = int(x2-x1)
h = int(y2-y1)
x_bar = x + 0.5 * w
y_bar = y + 0.5 * h
matchCarID = None
for faceID in target_detector.faceTracker.keys():
trackedPosition = target_detector.faceTracker[faceID].get_position(
)
t_x = int(trackedPosition.left())
t_y = int(trackedPosition.top())
t_w = int(trackedPosition.width())
t_h = int(trackedPosition.height())
t_x_bar = t_x + 0.5 * t_w
t_y_bar = t_y + 0.5 * t_h
if t_x <= x_bar <= (t_x + t_w) and t_y <= y_bar <= (t_y + t_h):
if x <= t_x_bar <= (x + w) and y <= t_y_bar <= (y + h):
matchCarID = faceID
if matchCarID is None:
# 新出现的目标
tracker = dlib.correlation_tracker()
tracker.start_track(
image, dlib.rectangle(x, y, x + w, y + h))
target_detector.faceTracker[target_detector.currentCarID] = tracker
target_detector.faceLocation1[target_detector.currentCarID] = [
x, y, w, h]
matchCarID = target_detector.currentCarID
target_detector.currentCarID = target_detector.currentCarID + 1
if cls_id == 'face':
pad_x = int(w * 0.15)
pad_y = int(h * 0.15)
if x > pad_x:
x = x-pad_x
if y > pad_y:
y = y-pad_y
face = raw[y:y+h+pad_y*2, x:x+w+pad_x*2]
new_faces.append((face, matchCarID))
target_detector.faceClasses[matchCarID] = cls_id
bboxes2draw = []
face_bboxes = []
for faceID in target_detector.faceTracker.keys():
trackedPosition = target_detector.faceTracker[faceID].get_position()
t_x = int(trackedPosition.left())
t_y = int(trackedPosition.top())
t_w = int(trackedPosition.width())
t_h = int(trackedPosition.height())
cls_id = target_detector.faceClasses[faceID]
target_detector.faceLocation2[faceID] = [t_x, t_y, t_w, t_h]
bboxes2draw.append(
(t_x, t_y, t_x+t_w, t_y+t_h, cls_id, faceID)
)
if cls_id == 'face':
face_bboxes.append((t_x, t_y, t_x+t_w, t_y+t_h))
image = plot_bboxes(image, bboxes2draw)
print(bboxes2draw)
return image, new_faces, face_bboxes
| import dlib
import cv2
def plot_bboxes(image, bboxes, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(
0.002 * (image.shape[0] + image.shape[1]) / 2) + 1 # line/font thickness
for (x1, y1, x2, y2, cls_id, pos_id) in bboxes:
if cls_id in ['smoke', 'phone', 'eat']:
color = (0, 0, 255)
else:
color = (0, 255, 0)
if cls_id == 'eat':
cls_id = 'eat-drink'
c1, c2 = (x1, y1), (x2, y2)
# cv2.rectangle(image, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
# tf = max(tl - 1, 1) # font thickness
# t_size = cv2.getTextSize(cls_id, 0, fontScale=tl / 3, thickness=tf)[0]
# c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
# cv2.rectangle(image, c1, c2, color, -1, cv2.LINE_AA) # filled
# cv2.putText(image, '{} ID-{}'.format(cls_id, pos_id), (c1[0], c1[1] - 2), 0, tl / 3,
# [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
return image
def update_tracker(target_detector, image):
raw = image.copy()
if target_detector.frameCounter > 2e+4:
target_detector.frameCounter = 0
faceIDtoDelete = []
for faceID in target_detector.faceTracker.keys():
trackingQuality = target_detector.faceTracker[faceID].update(image)
if trackingQuality < 8:
faceIDtoDelete.append(faceID)
for faceID in faceIDtoDelete:
target_detector.faceTracker.pop(faceID, None)
target_detector.faceLocation1.pop(faceID, None)
target_detector.faceLocation2.pop(faceID, None)
target_detector.faceClasses.pop(faceID, None)
new_faces = []
if not (target_detector.frameCounter % target_detector.stride):
_, bboxes = target_detector.detect(image)
for (x1, y1, x2, y2, cls_id, _) in bboxes:
x = int(x1)
y = int(y1)
w = int(x2-x1)
h = int(y2-y1)
x_bar = x + 0.5 * w
y_bar = y + 0.5 * h
matchCarID = None
for faceID in target_detector.faceTracker.keys():
trackedPosition = target_detector.faceTracker[faceID].get_position(
)
t_x = int(trackedPosition.left())
t_y = int(trackedPosition.top())
t_w = int(trackedPosition.width())
t_h = int(trackedPosition.height())
t_x_bar = t_x + 0.5 * t_w
t_y_bar = t_y + 0.5 * t_h
if t_x <= x_bar <= (t_x + t_w) and t_y <= y_bar <= (t_y + t_h):
if x <= t_x_bar <= (x + w) and y <= t_y_bar <= (y + h):
matchCarID = faceID
if matchCarID is None:
# 新出现的目标
tracker = dlib.correlation_tracker()
tracker.start_track(
image, dlib.rectangle(x, y, x + w, y + h))
target_detector.faceTracker[target_detector.currentCarID] = tracker
target_detector.faceLocation1[target_detector.currentCarID] = [
x, y, w, h]
matchCarID = target_detector.currentCarID
target_detector.currentCarID = target_detector.currentCarID + 1
if cls_id == 'face':
pad_x = int(w * 0.15)
pad_y = int(h * 0.15)
if x > pad_x:
x = x-pad_x
if y > pad_y:
y = y-pad_y
face = raw[y:y+h+pad_y*2, x:x+w+pad_x*2]
new_faces.append((face, matchCarID))
target_detector.faceClasses[matchCarID] = cls_id
bboxes2draw = []
face_bboxes = []
for faceID in target_detector.faceTracker.keys():
trackedPosition = target_detector.faceTracker[faceID].get_position()
t_x = int(trackedPosition.left())
t_y = int(trackedPosition.top())
t_w = int(trackedPosition.width())
t_h = int(trackedPosition.height())
cls_id = target_detector.faceClasses[faceID]
target_detector.faceLocation2[faceID] = [t_x, t_y, t_w, t_h]
bboxes2draw.append(
(t_x, t_y, t_x+t_w, t_y+t_h, cls_id, faceID)
)
if cls_id == 'face':
face_bboxes.append((t_x, t_y, t_x+t_w, t_y+t_h))
image = plot_bboxes(image, bboxes2draw)
print(bboxes2draw)
return image, new_faces, face_bboxes
| en | 0.40587 | # Plots one bounding box on image img # line/font thickness # cv2.rectangle(image, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) # tf = max(tl - 1, 1) # font thickness # t_size = cv2.getTextSize(cls_id, 0, fontScale=tl / 3, thickness=tf)[0] # c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 # cv2.rectangle(image, c1, c2, color, -1, cv2.LINE_AA) # filled # cv2.putText(image, '{} ID-{}'.format(cls_id, pos_id), (c1[0], c1[1] - 2), 0, tl / 3, # [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) # 新出现的目标 | 2.40713 | 2 |
Codes/A001_MetaSGD-CL_pMNIST/Az05_ERMetaSGD.py | Nic5472K/MetaLearnCC2021_AAAI_MetaSGD-CL | 1 | 6613477 | ###===###
# Coder: <NAME>
# Affiliation: The Australian National University
#===
# Code purpose:
# Supporting script, the MetaSGD-CL meta-learner nueral optimiser
###===###
import numpy as np
import math
from functools import reduce
from operator import mul
#---
import torch
import torch.nn as nn
import torch.nn.functional as Fnc
import torch.optim as optim
from torch.autograd import Variable
###===###
# ER_MSGD
# Experience Replay with Meta-SGD
class ER_MSGD(nn.Module):
###===###
def __init__(self, model):
super(ER_MSGD, self).__init__()
###===###
# keep a reference of the base line model for clean coding
self.RefModel = model
###===###
# save a copy of the total parametric count
Tot_P_Count = 0
for params in self.RefModel.model.parameters():
Tot_P_Count += params.shape.numel()
#---
self.Tot_P_Count = Tot_P_Count
###===###
# unroll the neural optimiser
# to prepare for updating the neural optimiser
# unlike the LSTM-optimiser of L2L by gradient descent by gd
# MetaSGD is unrolled once every single iteration
# note, this unroll occurs before a new mini-batch is fed
def Unroll(self, model=None):
# to remove the old reference off the graph
self.RefModel.reset()
# and update its most recent weights
self.RefModel.copy_params_from(model)
###===###
# creates new meta learning rates
def UpdateMetaLR(self, TaskID):
# whenever encountering a new task
# create a new learning rate per parameter
# initialised with value 0.01
newLR = torch.ones(self.Tot_P_Count).cuda() * 0.01
setattr(self,
'LR_Task_{}'.format(TaskID),
nn.Parameter(newLR)
)
# if the task at hand is not the first task (task 0)
if TaskID > 0:
# remove the old task lr using .data
oldLR = getattr(self, 'LR_Task_{}'.format(TaskID - 1)).cpu().detach().data
oldLR = torch.tensor(oldLR).cuda()
exec('del self.LR_Task_{}'.format(TaskID - 1))
setattr(self,
'LR_Task_{}'.format(TaskID - 1),
oldLR
)
###===###
# the main code for using MetaSGD to update the base learner
def UpdateTransfer(self,
CurOptimisee,
TaskID,
new_grad,
My_BS,
coTrain):
###===###
# grab all the native gradients
grads = []
for module in CurOptimisee.children():
grads.append(module._parameters['weight'].grad.data.view(-1))
grads.append(module._parameters['bias'].grad.data.view(-1))
###===###
# grab all the existing parametric values
flat_params = self.RefModel.get_flat_params()
###===###
# see Equation (9) in our paper where
# \theta_{t+1} =
# \theta_t - \beta_V L_V
# - \sum \beta_u \L_u
#---
# let us first execute the
# ( \theta_t - \beta_V L_V ) part
# grab the current lr
CUR_LR = getattr(self, 'LR_Task_{}'.format(TaskID))
# and compare it with an upper bound kappa (see Equation (10) )
CUR_LR = torch.min(CUR_LR, torch.ones_like(CUR_LR) * 0.02)
# the first part of the update
flat_params = flat_params - CUR_LR * torch.cat(grads)
#---
# the remaining part of the update
# ( - \sum \beta_u \L_u )
# for all other observed tasks
if TaskID > 0:
# go into each past task
for PastID in range(TaskID):
# look for the past trained and frozen learning rates
cur_LR = getattr(self, 'LR_Task_{}'.format(PastID + 1))
# look for their corresponding gradient
cur_grad = new_grad[PastID + 1]
# compare with kappa (see Equation (10) )
# and prevent over-parametrisation (see Equation (11))
cur_LR = torch.min(cur_LR, torch.ones_like(cur_LR) * 0.02) / (TaskID + 1)
# the second part of the update
flat_params = flat_params - cur_LR * cur_grad
###===###
# now copy the updated parameters back to the non-referential learner
self.RefModel.set_flat_params(flat_params)
self.RefModel.copy_params_to(CurOptimisee)
return self.RefModel.model
###===###
# a nice set of utility functions
class RefMode:
def __init__(self, model):
self.model = model
def reset(self):
for module in self.model.children():
if isinstance(module, nn.Linear):
module._parameters['weight'] = Variable(
module._parameters['weight'].data)
module._parameters['bias'] = Variable(
module._parameters['bias'].data)
if isinstance(module, nn.Conv2d):
module._parameters['weight'] = Variable(
module._parameters['weight'].data)
def get_flat_params(self):
params = []
for module in self.model.children():
if isinstance(module, nn.Linear):
params.append(module._parameters['weight'].view(-1))
params.append(module._parameters['bias'].view(-1))
if isinstance(module, nn.Conv2d):
params.append(module._parameters['weight'].view(-1))
return torch.cat(params)
def set_flat_params(self, flat_params):
offset = 0
for i, module in enumerate(self.model.children()):
if isinstance(module, nn.Linear):
weight_shape = module._parameters['weight'].size()
bias_shape = module._parameters['bias'].size()
weight_flat_size = reduce(mul, weight_shape, 1)
bias_flat_size = reduce(mul, bias_shape, 1)
module._parameters['weight'] = flat_params[
offset:offset + weight_flat_size].view(*weight_shape)
module._parameters['bias'] = flat_params[
offset + weight_flat_size:offset + weight_flat_size + bias_flat_size].view(*bias_shape)
offset += weight_flat_size + bias_flat_size
if isinstance(module, nn.Conv2d):
weight_shape = module._parameters['weight'].size()
weight_flat_size = reduce(mul, weight_shape, 1)
module._parameters['weight'] = flat_params[
offset:offset + weight_flat_size].view(*weight_shape)
offset += weight_flat_size
def copy_params_from(self, model):
for modelA, modelB in zip(self.model.parameters(), model.parameters()):
modelA.data.copy_(modelB.data)
def copy_params_to(self, model):
for modelA, modelB in zip(self.model.parameters(), model.parameters()):
modelB.data.copy_(modelA.data)
| ###===###
# Coder: <NAME>
# Affiliation: The Australian National University
#===
# Code purpose:
# Supporting script, the MetaSGD-CL meta-learner nueral optimiser
###===###
import numpy as np
import math
from functools import reduce
from operator import mul
#---
import torch
import torch.nn as nn
import torch.nn.functional as Fnc
import torch.optim as optim
from torch.autograd import Variable
###===###
# ER_MSGD
# Experience Replay with Meta-SGD
class ER_MSGD(nn.Module):
###===###
def __init__(self, model):
super(ER_MSGD, self).__init__()
###===###
# keep a reference of the base line model for clean coding
self.RefModel = model
###===###
# save a copy of the total parametric count
Tot_P_Count = 0
for params in self.RefModel.model.parameters():
Tot_P_Count += params.shape.numel()
#---
self.Tot_P_Count = Tot_P_Count
###===###
# unroll the neural optimiser
# to prepare for updating the neural optimiser
# unlike the LSTM-optimiser of L2L by gradient descent by gd
# MetaSGD is unrolled once every single iteration
# note, this unroll occurs before a new mini-batch is fed
def Unroll(self, model=None):
# to remove the old reference off the graph
self.RefModel.reset()
# and update its most recent weights
self.RefModel.copy_params_from(model)
###===###
# creates new meta learning rates
def UpdateMetaLR(self, TaskID):
# whenever encountering a new task
# create a new learning rate per parameter
# initialised with value 0.01
newLR = torch.ones(self.Tot_P_Count).cuda() * 0.01
setattr(self,
'LR_Task_{}'.format(TaskID),
nn.Parameter(newLR)
)
# if the task at hand is not the first task (task 0)
if TaskID > 0:
# remove the old task lr using .data
oldLR = getattr(self, 'LR_Task_{}'.format(TaskID - 1)).cpu().detach().data
oldLR = torch.tensor(oldLR).cuda()
exec('del self.LR_Task_{}'.format(TaskID - 1))
setattr(self,
'LR_Task_{}'.format(TaskID - 1),
oldLR
)
###===###
# the main code for using MetaSGD to update the base learner
def UpdateTransfer(self,
CurOptimisee,
TaskID,
new_grad,
My_BS,
coTrain):
###===###
# grab all the native gradients
grads = []
for module in CurOptimisee.children():
grads.append(module._parameters['weight'].grad.data.view(-1))
grads.append(module._parameters['bias'].grad.data.view(-1))
###===###
# grab all the existing parametric values
flat_params = self.RefModel.get_flat_params()
###===###
# see Equation (9) in our paper where
# \theta_{t+1} =
# \theta_t - \beta_V L_V
# - \sum \beta_u \L_u
#---
# let us first execute the
# ( \theta_t - \beta_V L_V ) part
# grab the current lr
CUR_LR = getattr(self, 'LR_Task_{}'.format(TaskID))
# and compare it with an upper bound kappa (see Equation (10) )
CUR_LR = torch.min(CUR_LR, torch.ones_like(CUR_LR) * 0.02)
# the first part of the update
flat_params = flat_params - CUR_LR * torch.cat(grads)
#---
# the remaining part of the update
# ( - \sum \beta_u \L_u )
# for all other observed tasks
if TaskID > 0:
# go into each past task
for PastID in range(TaskID):
# look for the past trained and frozen learning rates
cur_LR = getattr(self, 'LR_Task_{}'.format(PastID + 1))
# look for their corresponding gradient
cur_grad = new_grad[PastID + 1]
# compare with kappa (see Equation (10) )
# and prevent over-parametrisation (see Equation (11))
cur_LR = torch.min(cur_LR, torch.ones_like(cur_LR) * 0.02) / (TaskID + 1)
# the second part of the update
flat_params = flat_params - cur_LR * cur_grad
###===###
# now copy the updated parameters back to the non-referential learner
self.RefModel.set_flat_params(flat_params)
self.RefModel.copy_params_to(CurOptimisee)
return self.RefModel.model
###===###
# a nice set of utility functions
class RefMode:
def __init__(self, model):
self.model = model
def reset(self):
for module in self.model.children():
if isinstance(module, nn.Linear):
module._parameters['weight'] = Variable(
module._parameters['weight'].data)
module._parameters['bias'] = Variable(
module._parameters['bias'].data)
if isinstance(module, nn.Conv2d):
module._parameters['weight'] = Variable(
module._parameters['weight'].data)
def get_flat_params(self):
params = []
for module in self.model.children():
if isinstance(module, nn.Linear):
params.append(module._parameters['weight'].view(-1))
params.append(module._parameters['bias'].view(-1))
if isinstance(module, nn.Conv2d):
params.append(module._parameters['weight'].view(-1))
return torch.cat(params)
def set_flat_params(self, flat_params):
offset = 0
for i, module in enumerate(self.model.children()):
if isinstance(module, nn.Linear):
weight_shape = module._parameters['weight'].size()
bias_shape = module._parameters['bias'].size()
weight_flat_size = reduce(mul, weight_shape, 1)
bias_flat_size = reduce(mul, bias_shape, 1)
module._parameters['weight'] = flat_params[
offset:offset + weight_flat_size].view(*weight_shape)
module._parameters['bias'] = flat_params[
offset + weight_flat_size:offset + weight_flat_size + bias_flat_size].view(*bias_shape)
offset += weight_flat_size + bias_flat_size
if isinstance(module, nn.Conv2d):
weight_shape = module._parameters['weight'].size()
weight_flat_size = reduce(mul, weight_shape, 1)
module._parameters['weight'] = flat_params[
offset:offset + weight_flat_size].view(*weight_shape)
offset += weight_flat_size
def copy_params_from(self, model):
for modelA, modelB in zip(self.model.parameters(), model.parameters()):
modelA.data.copy_(modelB.data)
def copy_params_to(self, model):
for modelA, modelB in zip(self.model.parameters(), model.parameters()):
modelB.data.copy_(modelA.data)
| en | 0.672637 | ###===### # Coder: <NAME> # Affiliation: The Australian National University #=== # Code purpose: # Supporting script, the MetaSGD-CL meta-learner nueral optimiser ###===### #--- ###===### # ER_MSGD # Experience Replay with Meta-SGD ###===### ###===### # keep a reference of the base line model for clean coding ###===### # save a copy of the total parametric count #--- ###===### # unroll the neural optimiser # to prepare for updating the neural optimiser # unlike the LSTM-optimiser of L2L by gradient descent by gd # MetaSGD is unrolled once every single iteration # note, this unroll occurs before a new mini-batch is fed # to remove the old reference off the graph # and update its most recent weights ###===### # creates new meta learning rates # whenever encountering a new task # create a new learning rate per parameter # initialised with value 0.01 # if the task at hand is not the first task (task 0) # remove the old task lr using .data ###===### # the main code for using MetaSGD to update the base learner ###===### # grab all the native gradients ###===### # grab all the existing parametric values ###===### # see Equation (9) in our paper where # \theta_{t+1} = # \theta_t - \beta_V L_V # - \sum \beta_u \L_u #--- # let us first execute the # ( \theta_t - \beta_V L_V ) part # grab the current lr # and compare it with an upper bound kappa (see Equation (10) ) # the first part of the update #--- # the remaining part of the update # ( - \sum \beta_u \L_u ) # for all other observed tasks # go into each past task # look for the past trained and frozen learning rates # look for their corresponding gradient # compare with kappa (see Equation (10) ) # and prevent over-parametrisation (see Equation (11)) # the second part of the update ###===### # now copy the updated parameters back to the non-referential learner ###===### # a nice set of utility functions | 2.036713 | 2 |
DataPrep/rasterize.py | david9dragon9/AIR | 16 | 6613478 | import tensorflow as tf
import cv2
import numpy as np
import matplotlib.pyplot as plt
from ..utils import get_corners_in_world_coordinates
from ..utils import transform_points
from ..utils import transform_matrix
from ..utils import road_segment_color
CV2_SHIFT = 8
CV2_SHIFT_VALUE = 2 ** CV2_SHIFT
def rasterize(parsed):
"""
Parameters:
parsed: a parsed example
Returns:
batch_images: a nparray of rasterized images of shape(B, 224,448, 3) dtype = float32
"""
decoded_example = parsed
past_states = tf.stack([
decoded_example['state/past/x'],
decoded_example['state/past/y'],
decoded_example['state/past/length'],
decoded_example['state/past/width'],
decoded_example['state/past/bbox_yaw']
], -1)
cur_states = tf.stack([
decoded_example['state/current/x'],
decoded_example['state/current/y'],
decoded_example['state/current/length'],
decoded_example['state/current/width'],
decoded_example['state/current/bbox_yaw']
], -1)
states = tf.concat([past_states, cur_states], axis = 1)
past_is_valid = decoded_example['state/past/valid'] > 0
current_is_valid = decoded_example['state/current/valid'] > 0
is_valid = tf.concat([past_is_valid, current_is_valid], axis = 1)
is_valid = tf.reduce_any(is_valid, 1)
valid_states = tf.boolean_mask(states, is_valid)
tracks_to_predict = parsed['state/tracks_to_predict']
current_is_valid = tf.squeeze(current_is_valid, axis = 1)
orig_to_valid_map = (tf.cumsum(tf.cast(is_valid, dtype = tf.int32)) - 1).numpy()
tracks = tf.where(tracks_to_predict > 0)
tracks = tracks.numpy().reshape(-1)
current_is_valid = current_is_valid.numpy()
r_valid_states = tf.transpose(valid_states, perm = [1,0,2]) # (11,58,5)
r_valid_states = tf.reshape(r_valid_states, (-1,5))
corners = get_corners_in_world_coordinates(r_valid_states) # (58*11, 4, 2)
ego_info = {}
current_x = parsed['state/current/x'].numpy().reshape(-1)
current_y = parsed['state/current/y'].numpy().reshape(-1)
current_yaw = parsed['state/current/bbox_yaw'].numpy().reshape(-1)
# Prepare the road data
xyz_road = parsed['roadgraph_samples/xyz']
is_valid_road = parsed['roadgraph_samples/valid']
road_type = parsed['roadgraph_samples/type']
xy_road = xyz_road[:,:2]
is_valid_road = tf.squeeze(is_valid_road)
valid_xy_road = tf.boolean_mask(xy_road, is_valid_road)
dir_road = parsed['roadgraph_samples/dir']
dir_xy_road = dir_road[:, :2]
valid_dir_xy_road = tf.boolean_mask(dir_xy_road, is_valid_road)
valid_road_type = np.squeeze(tf.boolean_mask(road_type, is_valid_road).numpy())
road_ids = np.squeeze(tf.boolean_mask(parsed['roadgraph_samples/id'], is_valid_road).numpy())
valid_xy_plus_dir = valid_xy_road + valid_dir_xy_road
valid_xy_plus_dir = valid_xy_plus_dir.numpy()
valid_xy_road = valid_xy_road.numpy()
tl_state = parsed['traffic_light_state/current/state']
tl_ids = parsed['traffic_light_state/current/id']
tl_valid = parsed['traffic_light_state/current/valid']
valid_tl_states = tf.boolean_mask(tl_state, tl_valid).numpy()
valid_tl_ids = tf.boolean_mask(tl_ids, tl_valid).numpy()
batch_images = np.zeros((len(tracks), 224,448, 3), dtype=np.float32)
for track_index, track in enumerate(tracks):
if not current_is_valid[track]:
print("WARNING! Found a track that is not valid in current frame!")
batch_images[track_index] = None
continue
track_in_valid_index = orig_to_valid_map[track]
cx = current_x[track]
cy = current_y[track]
yaw = current_yaw[track]
# generate the transfer matrix
transform = transform_matrix(cx, cy, yaw)
transformed = transform_points(corners, transform)
tl_colors = [(1,1,1), # white Unknown = 0
(1,0,0), # red Arrow_Stop = 1
(1,1,0), # yellow Arrow_Caution = 2
(0,1,0), # green Arrow_go = 3
(1,0,0), # red stop = 4
(1,1,0), # yellow caution = 5
(0,1,0), # green go = 6
(1,115/255,0), # red flashing_stop = 7
(212/255,1,0)] # yellow flashing caution = 8
# Drawing the road
road_img = np.zeros((224,448,3), dtype = np.float32)
valid_xy_road_in_img = transform_points(valid_xy_road, transform)*CV2_SHIFT_VALUE
valid_xy_plus_dir_in_img = transform_points(valid_xy_plus_dir, transform)*CV2_SHIFT_VALUE
road_pts = np.stack([valid_xy_road_in_img, valid_xy_plus_dir_in_img], 1).astype(np.int64)
for rs_type in [1,2,3,6,7,8,9,10,11,12,13,15,16,17,18,19]:
type_indexes = np.where(valid_road_type == rs_type)
cv2.polylines(road_img, road_pts[type_indexes], False, color = road_segment_color(rs_type), lineType=cv2.LINE_AA, shift=CV2_SHIFT)
for i,tl_state in enumerate(valid_tl_states):
lane_id = valid_tl_ids[i]
tl_road_pt_indexes = np.where(road_ids == lane_id)[0]
cv2.polylines(road_img, road_pts[tl_road_pt_indexes], False, tl_colors[tl_state], lineType=cv2.LINE_AA, shift=CV2_SHIFT)
road_img = np.clip(road_img, 0, 1)
pts = np.reshape(transformed*CV2_SHIFT_VALUE, (11, -1, 4, 2)).astype(np.int64)
out_img = np.zeros((224,448, 3), dtype = np.float32)
for i in range(11):
out_img *= 0.85
cv2.fillPoly(out_img, pts[i], color = (1,1,0), lineType=cv2.LINE_AA, shift=CV2_SHIFT)
# draw the ego in green
cv2.fillPoly(out_img, pts[i][track_in_valid_index:track_in_valid_index+1], color = (0,1,0), lineType=cv2.LINE_AA, shift=CV2_SHIFT)
out_img = np.clip(out_img, 0, 1)
# Combine road and car images
road_img[out_img > 0] = out_img[out_img > 0]
batch_images[track_index] = (road_img*255).astype(np.uint8)
return batch_images
def compute_embeddings(batch_images, cnn_models):
"""
Parameters:
batch_images: nparray of shape(B, 224,448, 3)
cnn_models: dictionary from model_names to models
Returns:
a dictionary from model_names to embeddings of shape(B, out_embedding_size)
"""
# evaluate the pre-trained CNN embeddings
model_embeddings = {}
for model_name, model in cnn_models.items():
model_embedding = model.predict(tf.convert_to_tensor(batch_images)) # Outputs (B,7,7,2048)
model_embedding = model_embedding[:,1:-1,1:-1,:]
model_embedding = tf.keras.layers.GlobalAveragePooling2D()(model_embedding) # Outputs (B,2048)
model_embeddings[model_name] = model_embedding.numpy()
return model_embeddings | import tensorflow as tf
import cv2
import numpy as np
import matplotlib.pyplot as plt
from ..utils import get_corners_in_world_coordinates
from ..utils import transform_points
from ..utils import transform_matrix
from ..utils import road_segment_color
CV2_SHIFT = 8
CV2_SHIFT_VALUE = 2 ** CV2_SHIFT
def rasterize(parsed):
"""
Parameters:
parsed: a parsed example
Returns:
batch_images: a nparray of rasterized images of shape(B, 224,448, 3) dtype = float32
"""
decoded_example = parsed
past_states = tf.stack([
decoded_example['state/past/x'],
decoded_example['state/past/y'],
decoded_example['state/past/length'],
decoded_example['state/past/width'],
decoded_example['state/past/bbox_yaw']
], -1)
cur_states = tf.stack([
decoded_example['state/current/x'],
decoded_example['state/current/y'],
decoded_example['state/current/length'],
decoded_example['state/current/width'],
decoded_example['state/current/bbox_yaw']
], -1)
states = tf.concat([past_states, cur_states], axis = 1)
past_is_valid = decoded_example['state/past/valid'] > 0
current_is_valid = decoded_example['state/current/valid'] > 0
is_valid = tf.concat([past_is_valid, current_is_valid], axis = 1)
is_valid = tf.reduce_any(is_valid, 1)
valid_states = tf.boolean_mask(states, is_valid)
tracks_to_predict = parsed['state/tracks_to_predict']
current_is_valid = tf.squeeze(current_is_valid, axis = 1)
orig_to_valid_map = (tf.cumsum(tf.cast(is_valid, dtype = tf.int32)) - 1).numpy()
tracks = tf.where(tracks_to_predict > 0)
tracks = tracks.numpy().reshape(-1)
current_is_valid = current_is_valid.numpy()
r_valid_states = tf.transpose(valid_states, perm = [1,0,2]) # (11,58,5)
r_valid_states = tf.reshape(r_valid_states, (-1,5))
corners = get_corners_in_world_coordinates(r_valid_states) # (58*11, 4, 2)
ego_info = {}
current_x = parsed['state/current/x'].numpy().reshape(-1)
current_y = parsed['state/current/y'].numpy().reshape(-1)
current_yaw = parsed['state/current/bbox_yaw'].numpy().reshape(-1)
# Prepare the road data
xyz_road = parsed['roadgraph_samples/xyz']
is_valid_road = parsed['roadgraph_samples/valid']
road_type = parsed['roadgraph_samples/type']
xy_road = xyz_road[:,:2]
is_valid_road = tf.squeeze(is_valid_road)
valid_xy_road = tf.boolean_mask(xy_road, is_valid_road)
dir_road = parsed['roadgraph_samples/dir']
dir_xy_road = dir_road[:, :2]
valid_dir_xy_road = tf.boolean_mask(dir_xy_road, is_valid_road)
valid_road_type = np.squeeze(tf.boolean_mask(road_type, is_valid_road).numpy())
road_ids = np.squeeze(tf.boolean_mask(parsed['roadgraph_samples/id'], is_valid_road).numpy())
valid_xy_plus_dir = valid_xy_road + valid_dir_xy_road
valid_xy_plus_dir = valid_xy_plus_dir.numpy()
valid_xy_road = valid_xy_road.numpy()
tl_state = parsed['traffic_light_state/current/state']
tl_ids = parsed['traffic_light_state/current/id']
tl_valid = parsed['traffic_light_state/current/valid']
valid_tl_states = tf.boolean_mask(tl_state, tl_valid).numpy()
valid_tl_ids = tf.boolean_mask(tl_ids, tl_valid).numpy()
batch_images = np.zeros((len(tracks), 224,448, 3), dtype=np.float32)
for track_index, track in enumerate(tracks):
if not current_is_valid[track]:
print("WARNING! Found a track that is not valid in current frame!")
batch_images[track_index] = None
continue
track_in_valid_index = orig_to_valid_map[track]
cx = current_x[track]
cy = current_y[track]
yaw = current_yaw[track]
# generate the transfer matrix
transform = transform_matrix(cx, cy, yaw)
transformed = transform_points(corners, transform)
tl_colors = [(1,1,1), # white Unknown = 0
(1,0,0), # red Arrow_Stop = 1
(1,1,0), # yellow Arrow_Caution = 2
(0,1,0), # green Arrow_go = 3
(1,0,0), # red stop = 4
(1,1,0), # yellow caution = 5
(0,1,0), # green go = 6
(1,115/255,0), # red flashing_stop = 7
(212/255,1,0)] # yellow flashing caution = 8
# Drawing the road
road_img = np.zeros((224,448,3), dtype = np.float32)
valid_xy_road_in_img = transform_points(valid_xy_road, transform)*CV2_SHIFT_VALUE
valid_xy_plus_dir_in_img = transform_points(valid_xy_plus_dir, transform)*CV2_SHIFT_VALUE
road_pts = np.stack([valid_xy_road_in_img, valid_xy_plus_dir_in_img], 1).astype(np.int64)
for rs_type in [1,2,3,6,7,8,9,10,11,12,13,15,16,17,18,19]:
type_indexes = np.where(valid_road_type == rs_type)
cv2.polylines(road_img, road_pts[type_indexes], False, color = road_segment_color(rs_type), lineType=cv2.LINE_AA, shift=CV2_SHIFT)
for i,tl_state in enumerate(valid_tl_states):
lane_id = valid_tl_ids[i]
tl_road_pt_indexes = np.where(road_ids == lane_id)[0]
cv2.polylines(road_img, road_pts[tl_road_pt_indexes], False, tl_colors[tl_state], lineType=cv2.LINE_AA, shift=CV2_SHIFT)
road_img = np.clip(road_img, 0, 1)
pts = np.reshape(transformed*CV2_SHIFT_VALUE, (11, -1, 4, 2)).astype(np.int64)
out_img = np.zeros((224,448, 3), dtype = np.float32)
for i in range(11):
out_img *= 0.85
cv2.fillPoly(out_img, pts[i], color = (1,1,0), lineType=cv2.LINE_AA, shift=CV2_SHIFT)
# draw the ego in green
cv2.fillPoly(out_img, pts[i][track_in_valid_index:track_in_valid_index+1], color = (0,1,0), lineType=cv2.LINE_AA, shift=CV2_SHIFT)
out_img = np.clip(out_img, 0, 1)
# Combine road and car images
road_img[out_img > 0] = out_img[out_img > 0]
batch_images[track_index] = (road_img*255).astype(np.uint8)
return batch_images
def compute_embeddings(batch_images, cnn_models):
"""
Parameters:
batch_images: nparray of shape(B, 224,448, 3)
cnn_models: dictionary from model_names to models
Returns:
a dictionary from model_names to embeddings of shape(B, out_embedding_size)
"""
# evaluate the pre-trained CNN embeddings
model_embeddings = {}
for model_name, model in cnn_models.items():
model_embedding = model.predict(tf.convert_to_tensor(batch_images)) # Outputs (B,7,7,2048)
model_embedding = model_embedding[:,1:-1,1:-1,:]
model_embedding = tf.keras.layers.GlobalAveragePooling2D()(model_embedding) # Outputs (B,2048)
model_embeddings[model_name] = model_embedding.numpy()
return model_embeddings | en | 0.740004 | Parameters: parsed: a parsed example Returns: batch_images: a nparray of rasterized images of shape(B, 224,448, 3) dtype = float32 # (11,58,5) # (58*11, 4, 2) # Prepare the road data # generate the transfer matrix # white Unknown = 0 # red Arrow_Stop = 1 # yellow Arrow_Caution = 2 # green Arrow_go = 3 # red stop = 4 # yellow caution = 5 # green go = 6 # red flashing_stop = 7 # yellow flashing caution = 8 # Drawing the road # draw the ego in green # Combine road and car images Parameters: batch_images: nparray of shape(B, 224,448, 3) cnn_models: dictionary from model_names to models Returns: a dictionary from model_names to embeddings of shape(B, out_embedding_size) # evaluate the pre-trained CNN embeddings # Outputs (B,7,7,2048) # Outputs (B,2048) | 2.372013 | 2 |
pythonBase/PythonDictionary.py | ChallengerCY/Python-PythonBase | 0 | 6613479 | #coding=utf-8
#字典
k={"姓名":"CY","爱好":"足球"}
#输出整个字典
print(k)
#输出字典中某一信息
print(k["姓名"])
#往字典中添加数据
k["性别"]="男"
print(k["性别"])
| #coding=utf-8
#字典
k={"姓名":"CY","爱好":"足球"}
#输出整个字典
print(k)
#输出字典中某一信息
print(k["姓名"])
#往字典中添加数据
k["性别"]="男"
print(k["性别"])
| zh | 0.852057 | #coding=utf-8 #字典 #输出整个字典 #输出字典中某一信息 #往字典中添加数据 | 3.153461 | 3 |
wb/main/calibration_abstractions/model.py | apaniukov/workbench | 23 | 6613480 | """
OpenVINO DL Workbench
Class for model abstraction in the calibration config
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from wb.main.models.topologies_model import TopologiesModel
from wb.main.utils.utils import find_by_ext
class Model:
"""
Abstraction for "model" section of calibration config:
"model": {
"name": "model_name", // model name
"model": "<MODEL_PATH>", // path to xml
"weights": "<WIGHT_PATH>", // path to weight
}
"""
def __init__(self, topology_model: TopologiesModel):
self.name = topology_model.name
self.model = find_by_ext(topology_model.path, 'xml')
self.weights = find_by_ext(topology_model.path, 'bin')
def json(self) -> dict:
return {
'model_name': self.name,
'model': self.model,
'weights': self.weights,
}
| """
OpenVINO DL Workbench
Class for model abstraction in the calibration config
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from wb.main.models.topologies_model import TopologiesModel
from wb.main.utils.utils import find_by_ext
class Model:
"""
Abstraction for "model" section of calibration config:
"model": {
"name": "model_name", // model name
"model": "<MODEL_PATH>", // path to xml
"weights": "<WIGHT_PATH>", // path to weight
}
"""
def __init__(self, topology_model: TopologiesModel):
self.name = topology_model.name
self.model = find_by_ext(topology_model.path, 'xml')
self.weights = find_by_ext(topology_model.path, 'bin')
def json(self) -> dict:
return {
'model_name': self.name,
'model': self.model,
'weights': self.weights,
}
| en | 0.790609 | OpenVINO DL Workbench Class for model abstraction in the calibration config Copyright (c) 2018-2019 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Abstraction for "model" section of calibration config: "model": { "name": "model_name", // model name "model": "<MODEL_PATH>", // path to xml "weights": "<WIGHT_PATH>", // path to weight } | 2.193073 | 2 |
accounts/endpoints.py | sinisaos/starlette-piccolo-rental | 3 | 6613481 | <gh_stars>1-10
# uncomment for Cloudinary upload
# import cloudinary.api
# import cloudinary.uploader
# import cloudinary
import os
from starlette.authentication import requires
from starlette.responses import RedirectResponse
from accounts.forms import LoginForm, RegistrationForm
from accounts.tables import User, generate_jwt
from ads.helpers import get_ads, get_reviews
from ads.tables import Ad, Image, Notification, Rent, Review
from settings import (
BASE_HOST,
templates,
# CLOUDINARY_API_KEY,
# CLOUDINARY_API_SECRET,
)
from utils import pagination
async def register(request):
"""
Validate form, register and authenticate user
"""
data = await request.form()
form = RegistrationForm(data)
username = form.username.data
email = form.email.data
password = <PASSWORD>.data
if request.method == "POST" and form.validate():
if (
await User.exists().where(User.email == email).run()
or await User.exists().where(User.username == username).run()
):
user_error = "User with that email or username already exists."
return templates.TemplateResponse(
"accounts/register.html",
{
"request": request,
"form": form,
"user_error": user_error,
},
)
query = User(
username=username,
email=email,
password=password,
)
await query.save().run()
results = await (
User.select()
.columns(User.id, User.username, User.password)
.where((User.username == username))
.first()
).run()
valid_user = await User.login(username=username, password=password)
if not valid_user:
user_error = "Invalid username or password"
return templates.TemplateResponse(
"accounts/login.html",
{
"request": request,
"form": form,
"user_error": user_error,
},
)
response = RedirectResponse(BASE_HOST, status_code=302)
response.set_cookie(
"jwt", generate_jwt(results["username"]), httponly=True
)
return response
return templates.TemplateResponse(
"accounts/register.html", {"request": request, "form": form}
)
async def login(request):
"""
Validate form, login and authenticate user
"""
path = request.query_params["next"]
data = await request.form()
form = LoginForm(data)
username = form.username.data
password = form.password.data
if request.method == "POST" and form.validate():
if await User.exists().where(User.username == username).run():
results = await (
User.select()
.columns(User.id, User.username, User.password)
.where((User.username == username))
.first()
).run()
valid_user = await User.login(username=username, password=password)
if not valid_user:
user_error = "Invalid username or password"
return templates.TemplateResponse(
"accounts/login.html",
{
"request": request,
"form": form,
"user_error": user_error,
},
)
response = RedirectResponse(BASE_HOST + path, status_code=302)
response.set_cookie(
"jwt", generate_jwt(results["username"]), httponly=True
)
return response
else:
user_error = "Please register you don't have account"
return templates.TemplateResponse(
"accounts/login.html",
{
"request": request,
"form": form,
"user_error": user_error,
},
)
return templates.TemplateResponse(
"accounts/login.html", {"request": request, "form": form}
)
@requires("authenticated", redirect="index")
async def profile(request):
"""
User profile page
"""
a = Ad
r = Review
u = User
n = Notification
auth_user = request.user.display_name
results = await u.select().where(u.username == auth_user).run()
ads_count = await a.count().where(a.ad_user.username == auth_user).run()
reviews_count = (
await r.count().where(r.review_user.username == auth_user).run()
)
# all user notification
notifications_count = (
await n.count().where(n.recipient.username == auth_user).run()
)
# user unread notifications profile
unread_notifications_count = (
await n.count()
.where((n.is_read == False) & (n.recipient.username == auth_user))
.run()
)
return templates.TemplateResponse(
"accounts/profile.html",
{
"request": request,
"results": results,
"auth_user": auth_user,
"ads_count": ads_count,
"reviews_count": reviews_count,
"notifications_count": notifications_count,
"unread_notifications_count": unread_notifications_count,
},
)
@requires("authenticated", redirect="index")
async def profile_rented_from_user(request):
auth_user = request.user.display_name
rented_from_me = await Rent.raw(
f"SELECT ad.title, rent.start_date, rent.end_date, "
f"(SELECT username from piccolo_user WHERE ad.ad_user = piccolo_user.id) as owner, "
f"(SELECT username from piccolo_user WHERE rent.client = piccolo_user.id) as usr "
f"FROM ad JOIN rent ON ad.id = rent.ad_rent JOIN piccolo_user ON piccolo_user.id = ad.ad_user "
f"WHERE piccolo_user.username = '{auth_user}';"
).run()
return templates.TemplateResponse(
"accounts/rent_by_user.html",
{
"request": request,
"rented_from_me": rented_from_me,
},
)
@requires("authenticated", redirect="index")
async def profile_rented_by_user(request):
u = User
session_user = (
await u.select(u.id, u.username)
.where(u.username == request.user.username)
.first()
.run()
)
rented_by_me = await Rent.raw(
f"SELECT ad.title, rent.start_date, rent.end_date, "
f"(SELECT username from piccolo_user WHERE ad.ad_user = piccolo_user.id) as owner, "
f"(SELECT username from piccolo_user WHERE rent.client = piccolo_user.id) as usr FROM ad "
f"JOIN rent ON ad.id = rent.ad_rent JOIN piccolo_user ON piccolo_user.id = ad.ad_user "
f"WHERE rent.client = {session_user['id']};"
).run()
return templates.TemplateResponse(
"accounts/rent_from_user.html",
{
"request": request,
"rented_by_me": rented_by_me,
},
)
@requires("authenticated", redirect="index")
async def profile_ads(request):
a = Ad
auth_user = request.user.display_name
page_query = pagination.get_page_number(url=request.url)
count = await a.count().where(a.ad_user.username == auth_user).run()
paginator = pagination.Pagination(page_query, count)
ads = (
await get_ads()
.where(a.ad_user.username == auth_user)
.limit(paginator.page_size)
.offset(paginator.offset())
.run()
)
page_controls = pagination.get_page_controls(
url=request.url,
current_page=paginator.current_page(),
total_pages=paginator.total_pages(),
)
return templates.TemplateResponse(
"accounts/profile_ads.html",
{
"request": request,
"ads": ads,
"page_controls": page_controls,
},
)
@requires("authenticated", redirect="index")
async def profile_reviews(request):
r = Review
auth_user = request.user.display_name
page_query = pagination.get_page_number(url=request.url)
count = await r.count().where(r.review_user.username == auth_user).run()
paginator = pagination.Pagination(page_query, count)
reviews = (
await get_reviews()
.where(r.review_user.username == auth_user)
.limit(paginator.page_size)
.offset(paginator.offset())
.run()
)
page_controls = pagination.get_page_controls(
url=request.url,
current_page=paginator.current_page(),
total_pages=paginator.total_pages(),
)
return templates.TemplateResponse(
"accounts/profile_reviews.html",
{
"request": request,
"reviews": reviews,
"page_controls": page_controls,
},
)
@requires("authenticated", redirect="index")
async def profile_notifications(request):
n = Notification
auth_user = request.user.display_name
page_query = pagination.get_page_number(url=request.url)
count = await n.count().where(n.recipient.username == auth_user).run()
paginator = pagination.Pagination(page_query, count)
notifications = (
await n.select(
n.id, n.message, n.created, n.is_read, n.sender.username
)
.where(n.recipient.username == auth_user)
.limit(paginator.page_size)
.offset(paginator.offset())
.run()
)
page_controls = pagination.get_page_controls(
url=request.url,
current_page=paginator.current_page(),
total_pages=paginator.total_pages(),
)
return templates.TemplateResponse(
"accounts/profile_notifications.html",
{
"request": request,
"notifications": notifications,
"page_controls": page_controls,
},
)
@requires("authenticated", redirect="index")
async def read_notification(request):
n = Notification
request_path_id = request.path_params["id"]
if request.method == "POST":
await n.update({n.is_read: True}).where(
n.id == int(request_path_id)
).run()
return RedirectResponse(url="/accounts/profile", status_code=302)
@requires("authenticated")
async def user_delete(request):
"""
Delete user
"""
u = User
i = Image
request_path_id = int(request.path_params["id"])
if request.method == "POST":
result = await i.raw(
f"SELECT path FROM image "
f"JOIN ad on ad.id = image.ad_image "
f"JOIN piccolo_user on piccolo_user.id = ad.ad_user "
f"WHERE piccolo_user.id = {request_path_id}"
).run()
image_list = []
for img in result:
for k, v in img.items():
image_list.append(v)
# Cloudinary image deletion when user account is deleted
# cloudinary.config(
# cloud_name="rkl",
# api_key=CLOUDINARY_API_KEY,
# api_secret=CLOUDINARY_API_SECRET
# )
# if image_list:
# public_ids = [img.split('/')[-1].split('.')[0] for img in image_list]
# cloudinary.api.delete_resources(public_ids)
# Dropzone image deletion when user account is deleted
if image_list:
for img in image_list:
os.remove(img)
await u.delete().where(u.id == request_path_id).run()
request.session.clear()
response = RedirectResponse("/", status_code=302)
response.delete_cookie("jwt")
return response
async def logout(request):
"""
Logout user
"""
request.session.clear()
response = RedirectResponse(url="/", status_code=302)
response.delete_cookie("jwt")
return response
| # uncomment for Cloudinary upload
# import cloudinary.api
# import cloudinary.uploader
# import cloudinary
import os
from starlette.authentication import requires
from starlette.responses import RedirectResponse
from accounts.forms import LoginForm, RegistrationForm
from accounts.tables import User, generate_jwt
from ads.helpers import get_ads, get_reviews
from ads.tables import Ad, Image, Notification, Rent, Review
from settings import (
BASE_HOST,
templates,
# CLOUDINARY_API_KEY,
# CLOUDINARY_API_SECRET,
)
from utils import pagination
async def register(request):
"""
Validate form, register and authenticate user
"""
data = await request.form()
form = RegistrationForm(data)
username = form.username.data
email = form.email.data
password = <PASSWORD>.data
if request.method == "POST" and form.validate():
if (
await User.exists().where(User.email == email).run()
or await User.exists().where(User.username == username).run()
):
user_error = "User with that email or username already exists."
return templates.TemplateResponse(
"accounts/register.html",
{
"request": request,
"form": form,
"user_error": user_error,
},
)
query = User(
username=username,
email=email,
password=password,
)
await query.save().run()
results = await (
User.select()
.columns(User.id, User.username, User.password)
.where((User.username == username))
.first()
).run()
valid_user = await User.login(username=username, password=password)
if not valid_user:
user_error = "Invalid username or password"
return templates.TemplateResponse(
"accounts/login.html",
{
"request": request,
"form": form,
"user_error": user_error,
},
)
response = RedirectResponse(BASE_HOST, status_code=302)
response.set_cookie(
"jwt", generate_jwt(results["username"]), httponly=True
)
return response
return templates.TemplateResponse(
"accounts/register.html", {"request": request, "form": form}
)
async def login(request):
"""
Validate form, login and authenticate user
"""
path = request.query_params["next"]
data = await request.form()
form = LoginForm(data)
username = form.username.data
password = form.password.data
if request.method == "POST" and form.validate():
if await User.exists().where(User.username == username).run():
results = await (
User.select()
.columns(User.id, User.username, User.password)
.where((User.username == username))
.first()
).run()
valid_user = await User.login(username=username, password=password)
if not valid_user:
user_error = "Invalid username or password"
return templates.TemplateResponse(
"accounts/login.html",
{
"request": request,
"form": form,
"user_error": user_error,
},
)
response = RedirectResponse(BASE_HOST + path, status_code=302)
response.set_cookie(
"jwt", generate_jwt(results["username"]), httponly=True
)
return response
else:
user_error = "Please register you don't have account"
return templates.TemplateResponse(
"accounts/login.html",
{
"request": request,
"form": form,
"user_error": user_error,
},
)
return templates.TemplateResponse(
"accounts/login.html", {"request": request, "form": form}
)
@requires("authenticated", redirect="index")
async def profile(request):
"""
User profile page
"""
a = Ad
r = Review
u = User
n = Notification
auth_user = request.user.display_name
results = await u.select().where(u.username == auth_user).run()
ads_count = await a.count().where(a.ad_user.username == auth_user).run()
reviews_count = (
await r.count().where(r.review_user.username == auth_user).run()
)
# all user notification
notifications_count = (
await n.count().where(n.recipient.username == auth_user).run()
)
# user unread notifications profile
unread_notifications_count = (
await n.count()
.where((n.is_read == False) & (n.recipient.username == auth_user))
.run()
)
return templates.TemplateResponse(
"accounts/profile.html",
{
"request": request,
"results": results,
"auth_user": auth_user,
"ads_count": ads_count,
"reviews_count": reviews_count,
"notifications_count": notifications_count,
"unread_notifications_count": unread_notifications_count,
},
)
@requires("authenticated", redirect="index")
async def profile_rented_from_user(request):
auth_user = request.user.display_name
rented_from_me = await Rent.raw(
f"SELECT ad.title, rent.start_date, rent.end_date, "
f"(SELECT username from piccolo_user WHERE ad.ad_user = piccolo_user.id) as owner, "
f"(SELECT username from piccolo_user WHERE rent.client = piccolo_user.id) as usr "
f"FROM ad JOIN rent ON ad.id = rent.ad_rent JOIN piccolo_user ON piccolo_user.id = ad.ad_user "
f"WHERE piccolo_user.username = '{auth_user}';"
).run()
return templates.TemplateResponse(
"accounts/rent_by_user.html",
{
"request": request,
"rented_from_me": rented_from_me,
},
)
@requires("authenticated", redirect="index")
async def profile_rented_by_user(request):
u = User
session_user = (
await u.select(u.id, u.username)
.where(u.username == request.user.username)
.first()
.run()
)
rented_by_me = await Rent.raw(
f"SELECT ad.title, rent.start_date, rent.end_date, "
f"(SELECT username from piccolo_user WHERE ad.ad_user = piccolo_user.id) as owner, "
f"(SELECT username from piccolo_user WHERE rent.client = piccolo_user.id) as usr FROM ad "
f"JOIN rent ON ad.id = rent.ad_rent JOIN piccolo_user ON piccolo_user.id = ad.ad_user "
f"WHERE rent.client = {session_user['id']};"
).run()
return templates.TemplateResponse(
"accounts/rent_from_user.html",
{
"request": request,
"rented_by_me": rented_by_me,
},
)
@requires("authenticated", redirect="index")
async def profile_ads(request):
a = Ad
auth_user = request.user.display_name
page_query = pagination.get_page_number(url=request.url)
count = await a.count().where(a.ad_user.username == auth_user).run()
paginator = pagination.Pagination(page_query, count)
ads = (
await get_ads()
.where(a.ad_user.username == auth_user)
.limit(paginator.page_size)
.offset(paginator.offset())
.run()
)
page_controls = pagination.get_page_controls(
url=request.url,
current_page=paginator.current_page(),
total_pages=paginator.total_pages(),
)
return templates.TemplateResponse(
"accounts/profile_ads.html",
{
"request": request,
"ads": ads,
"page_controls": page_controls,
},
)
@requires("authenticated", redirect="index")
async def profile_reviews(request):
r = Review
auth_user = request.user.display_name
page_query = pagination.get_page_number(url=request.url)
count = await r.count().where(r.review_user.username == auth_user).run()
paginator = pagination.Pagination(page_query, count)
reviews = (
await get_reviews()
.where(r.review_user.username == auth_user)
.limit(paginator.page_size)
.offset(paginator.offset())
.run()
)
page_controls = pagination.get_page_controls(
url=request.url,
current_page=paginator.current_page(),
total_pages=paginator.total_pages(),
)
return templates.TemplateResponse(
"accounts/profile_reviews.html",
{
"request": request,
"reviews": reviews,
"page_controls": page_controls,
},
)
@requires("authenticated", redirect="index")
async def profile_notifications(request):
n = Notification
auth_user = request.user.display_name
page_query = pagination.get_page_number(url=request.url)
count = await n.count().where(n.recipient.username == auth_user).run()
paginator = pagination.Pagination(page_query, count)
notifications = (
await n.select(
n.id, n.message, n.created, n.is_read, n.sender.username
)
.where(n.recipient.username == auth_user)
.limit(paginator.page_size)
.offset(paginator.offset())
.run()
)
page_controls = pagination.get_page_controls(
url=request.url,
current_page=paginator.current_page(),
total_pages=paginator.total_pages(),
)
return templates.TemplateResponse(
"accounts/profile_notifications.html",
{
"request": request,
"notifications": notifications,
"page_controls": page_controls,
},
)
@requires("authenticated", redirect="index")
async def read_notification(request):
n = Notification
request_path_id = request.path_params["id"]
if request.method == "POST":
await n.update({n.is_read: True}).where(
n.id == int(request_path_id)
).run()
return RedirectResponse(url="/accounts/profile", status_code=302)
@requires("authenticated")
async def user_delete(request):
"""
Delete user
"""
u = User
i = Image
request_path_id = int(request.path_params["id"])
if request.method == "POST":
result = await i.raw(
f"SELECT path FROM image "
f"JOIN ad on ad.id = image.ad_image "
f"JOIN piccolo_user on piccolo_user.id = ad.ad_user "
f"WHERE piccolo_user.id = {request_path_id}"
).run()
image_list = []
for img in result:
for k, v in img.items():
image_list.append(v)
# Cloudinary image deletion when user account is deleted
# cloudinary.config(
# cloud_name="rkl",
# api_key=CLOUDINARY_API_KEY,
# api_secret=CLOUDINARY_API_SECRET
# )
# if image_list:
# public_ids = [img.split('/')[-1].split('.')[0] for img in image_list]
# cloudinary.api.delete_resources(public_ids)
# Dropzone image deletion when user account is deleted
if image_list:
for img in image_list:
os.remove(img)
await u.delete().where(u.id == request_path_id).run()
request.session.clear()
response = RedirectResponse("/", status_code=302)
response.delete_cookie("jwt")
return response
async def logout(request):
"""
Logout user
"""
request.session.clear()
response = RedirectResponse(url="/", status_code=302)
response.delete_cookie("jwt")
return response | en | 0.694879 | # uncomment for Cloudinary upload # import cloudinary.api # import cloudinary.uploader # import cloudinary # CLOUDINARY_API_KEY, # CLOUDINARY_API_SECRET, Validate form, register and authenticate user Validate form, login and authenticate user User profile page # all user notification # user unread notifications profile Delete user # Cloudinary image deletion when user account is deleted # cloudinary.config( # cloud_name="rkl", # api_key=CLOUDINARY_API_KEY, # api_secret=CLOUDINARY_API_SECRET # ) # if image_list: # public_ids = [img.split('/')[-1].split('.')[0] for img in image_list] # cloudinary.api.delete_resources(public_ids) # Dropzone image deletion when user account is deleted Logout user | 2.203055 | 2 |
markdown/private/providers/MarkdownInfo.bzl | dwtj/me_dwtj_rules_markdown | 0 | 6613482 | '''Defines the `MarkdownInfo` provider.
'''
MarkdownInfo = provider(
fields = [
'direct_source_files',
'transitive_source_files',
'markdownlint_config_file',
],
)
| '''Defines the `MarkdownInfo` provider.
'''
MarkdownInfo = provider(
fields = [
'direct_source_files',
'transitive_source_files',
'markdownlint_config_file',
],
)
| en | 0.339861 | Defines the `MarkdownInfo` provider. | 1.406393 | 1 |
codeforces/1213D1-EqualizingByDivision.py | 0x8b/HackerRank | 3 | 6613483 | <filename>codeforces/1213D1-EqualizingByDivision.py
from collections import defaultdict
n, k = map(int, input().split())
nums = list(map(int, input().split()))
cc = defaultdict(int)
aa = defaultdict(list)
for num in nums:
o = num
i = 0
while o != 0:
cc[o] += 1
aa[o].append(i)
i += 1
o //= 2
m = min(sum(sorted(aa[key])[:k]) for key, count in cc.items() if count >= k)
print(m)
| <filename>codeforces/1213D1-EqualizingByDivision.py
from collections import defaultdict
n, k = map(int, input().split())
nums = list(map(int, input().split()))
cc = defaultdict(int)
aa = defaultdict(list)
for num in nums:
o = num
i = 0
while o != 0:
cc[o] += 1
aa[o].append(i)
i += 1
o //= 2
m = min(sum(sorted(aa[key])[:k]) for key, count in cc.items() if count >= k)
print(m)
| none | 1 | 3.062913 | 3 | |
Run_4-Synthetic_Flat.py | Fernandez-Trincado/DataReductionPy | 1 | 6613484 | #!/usr/bin/python
# Created by: <NAME>
# Date: 2013 June 26
# Program: This program correct the imagen .fit (Science) by Syntethic Flat
# 1 m Reflector telescope, National Astronomical Observatory of Venezuela
# Mode f/5, 21 arcmin x 21 arcmin
# Project: Omega Centauri, Tidal Tails.
# The program Astrometry_V1.py defined was developed by <NAME> at the Centro de Investigaciones de Astronomia "Franc<NAME>".
# If you have any problems, please contact <NAME>, <EMAIL> / <EMAIL>
import numpy as np
import scipy as sc
import pyfits
import sys, os
from pyraf import iraf
#run, program.
#Example:
# Next program: ./Run_4-Synthetic_Flat.py Feb.22.Feb.23.2013.hlv
# >>> Feb.22.Feb.23.2013.hlv/*.fit
location='/home/jfernandez/Escritorio/Tesis_2013-2014_CIDA_ULA/Data_Tesis_2013_2014_CIDA-ULA/Reflector/'
if len(sys.argv[:]) < 2.:
print '***************************************************'
print 'Warning: ./Run_4-Synthetic_Flat.py XXX.xx.XXX.xx.XXXX.hlv'
print '***************************************************'
else:
#Combine images MEDIAN
#TASK IRAF: images.immatch.imcombine
#Function to combine images for generates Master Flat
def Master_combina(inter_img,filt):
iraf.images.immatch()
iraf.images.immatch.imcombine.output=filt
iraf.images.immatch.imcombine.headers=''
iraf.images.immatch.imcombine.bpmasks=''
iraf.images.immatch.imcombine.rejmasks=''
iraf.images.immatch.imcombine.nrejmasks=''
iraf.images.immatch.imcombine.expmasks=''
iraf.images.immatch.imcombine.sigmas=''
iraf.images.immatch.imcombine.logfile='STDOUT'
iraf.images.immatch.imcombine.combine='median'
iraf.images.immatch.imcombine.reject='avsigclip'
iraf.images.immatch.imcombine.project='no'
iraf.images.immatch.imcombine.outtype='real'
iraf.images.immatch.imcombine.outlimits=''
iraf.images.immatch.imcombine.offsets='none'
iraf.images.immatch.imcombine.masktype='none'
iraf.images.immatch.imcombine.maskvalue=0.
iraf.images.immatch.imcombine.blank=1.0
iraf.images.immatch.imcombine.scale='mode'
iraf.images.immatch.imcombine.zero='none'
iraf.images.immatch.imcombine.weight='mode'
iraf.images.immatch.imcombine.statsec=''
iraf.images.immatch.imcombine.expname=''
iraf.images.immatch.imcombine.lthreshold='INDEF'
iraf.images.immatch.imcombine.hthreshold='INDEF'
iraf.images.immatch.imcombine.nlow=1.
iraf.images.immatch.imcombine.nhigh=1.
iraf.images.immatch.imcombine.nkeep=1.
iraf.images.immatch.imcombine.mclip='yes'
iraf.images.immatch.imcombine.lsigma=3.
iraf.images.immatch.imcombine.hsigma=3.
iraf.images.immatch.imcombine.rdnoise=7.
iraf.images.immatch.imcombine.gain=1.68
iraf.images.immatch.imcombine.snoise=0.
iraf.images.immatch.imcombine.sigscale=0.1
iraf.images.immatch.imcombine.pclip=-0.5
iraf.images.immatch.imcombine.grow=0.
iraf.images.immatch.imcombine(inter_img)
#END function, IRAF: imcombine
os.system('ls '+sys.argv[1]+'/Science/*_BR.fit >list_temp_Science.txt ')
data=sc.genfromtxt('list_temp_Science.txt',dtype=str)
def list_s(x1,y1):
lf='Initial_list_Syntethic_flat_'+y1
os.system('ls '+x1+' >> '+lf)
return lf
for i in np.arange(len(data)):
data_head=pyfits.open(data[i])
delta=data_head[0].header['DECJ2_D']
filter_s=data_head[0].header['FILTER']
filter_s=float(map(str,filter_s)[0])
time_exp=data_head[0].header['EXPTIME']
time_exp=int(time_exp)
#Selecting images of my project
# if float(delta) < -39. and filter_s == 2. and time_exp == 60:
if filter_s == 2. and time_exp == 60:
list_s(data[i],'V'+str(time_exp)) #Generating list
elif filter_s == 4. and time_exp == 60:
list_s(data[i],'I'+str(time_exp)) #Generating list
elif filter_s == 2. and time_exp == 90:
list_s(data[i],'V'+str(time_exp)) #Generating list
elif filter_s == 4. and time_exp == 90:
list_s(data[i],'I'+str(time_exp)) #Generating list
else:
os.system('bzip2 '+data[i])
os.system('ls Initial*list* >list_temp_flat_list.dat')
proc=sc.genfromtxt('list_temp_flat_list.dat',dtype=str)
for j in np.arange(len(proc)):
Master_combina('@'+proc[j],'Master_'+proc[j]+'.fit')
os.system('mv Master_'+proc[j]+'.fit '+sys.argv[1]+'/')
os.system('mv Initial*list* '+sys.argv[1]+'/')
os.system('rm list_temp_Science.txt list_temp_flat_list.dat')
#END
| #!/usr/bin/python
# Created by: <NAME>
# Date: 2013 June 26
# Program: This program correct the imagen .fit (Science) by Syntethic Flat
# 1 m Reflector telescope, National Astronomical Observatory of Venezuela
# Mode f/5, 21 arcmin x 21 arcmin
# Project: Omega Centauri, Tidal Tails.
# The program Astrometry_V1.py defined was developed by <NAME> at the Centro de Investigaciones de Astronomia "Franc<NAME>".
# If you have any problems, please contact <NAME>, <EMAIL> / <EMAIL>
import numpy as np
import scipy as sc
import pyfits
import sys, os
from pyraf import iraf
#run, program.
#Example:
# Next program: ./Run_4-Synthetic_Flat.py Feb.22.Feb.23.2013.hlv
# >>> Feb.22.Feb.23.2013.hlv/*.fit
location='/home/jfernandez/Escritorio/Tesis_2013-2014_CIDA_ULA/Data_Tesis_2013_2014_CIDA-ULA/Reflector/'
if len(sys.argv[:]) < 2.:
print '***************************************************'
print 'Warning: ./Run_4-Synthetic_Flat.py XXX.xx.XXX.xx.XXXX.hlv'
print '***************************************************'
else:
#Combine images MEDIAN
#TASK IRAF: images.immatch.imcombine
#Function to combine images for generates Master Flat
def Master_combina(inter_img,filt):
iraf.images.immatch()
iraf.images.immatch.imcombine.output=filt
iraf.images.immatch.imcombine.headers=''
iraf.images.immatch.imcombine.bpmasks=''
iraf.images.immatch.imcombine.rejmasks=''
iraf.images.immatch.imcombine.nrejmasks=''
iraf.images.immatch.imcombine.expmasks=''
iraf.images.immatch.imcombine.sigmas=''
iraf.images.immatch.imcombine.logfile='STDOUT'
iraf.images.immatch.imcombine.combine='median'
iraf.images.immatch.imcombine.reject='avsigclip'
iraf.images.immatch.imcombine.project='no'
iraf.images.immatch.imcombine.outtype='real'
iraf.images.immatch.imcombine.outlimits=''
iraf.images.immatch.imcombine.offsets='none'
iraf.images.immatch.imcombine.masktype='none'
iraf.images.immatch.imcombine.maskvalue=0.
iraf.images.immatch.imcombine.blank=1.0
iraf.images.immatch.imcombine.scale='mode'
iraf.images.immatch.imcombine.zero='none'
iraf.images.immatch.imcombine.weight='mode'
iraf.images.immatch.imcombine.statsec=''
iraf.images.immatch.imcombine.expname=''
iraf.images.immatch.imcombine.lthreshold='INDEF'
iraf.images.immatch.imcombine.hthreshold='INDEF'
iraf.images.immatch.imcombine.nlow=1.
iraf.images.immatch.imcombine.nhigh=1.
iraf.images.immatch.imcombine.nkeep=1.
iraf.images.immatch.imcombine.mclip='yes'
iraf.images.immatch.imcombine.lsigma=3.
iraf.images.immatch.imcombine.hsigma=3.
iraf.images.immatch.imcombine.rdnoise=7.
iraf.images.immatch.imcombine.gain=1.68
iraf.images.immatch.imcombine.snoise=0.
iraf.images.immatch.imcombine.sigscale=0.1
iraf.images.immatch.imcombine.pclip=-0.5
iraf.images.immatch.imcombine.grow=0.
iraf.images.immatch.imcombine(inter_img)
#END function, IRAF: imcombine
os.system('ls '+sys.argv[1]+'/Science/*_BR.fit >list_temp_Science.txt ')
data=sc.genfromtxt('list_temp_Science.txt',dtype=str)
def list_s(x1,y1):
lf='Initial_list_Syntethic_flat_'+y1
os.system('ls '+x1+' >> '+lf)
return lf
for i in np.arange(len(data)):
data_head=pyfits.open(data[i])
delta=data_head[0].header['DECJ2_D']
filter_s=data_head[0].header['FILTER']
filter_s=float(map(str,filter_s)[0])
time_exp=data_head[0].header['EXPTIME']
time_exp=int(time_exp)
#Selecting images of my project
# if float(delta) < -39. and filter_s == 2. and time_exp == 60:
if filter_s == 2. and time_exp == 60:
list_s(data[i],'V'+str(time_exp)) #Generating list
elif filter_s == 4. and time_exp == 60:
list_s(data[i],'I'+str(time_exp)) #Generating list
elif filter_s == 2. and time_exp == 90:
list_s(data[i],'V'+str(time_exp)) #Generating list
elif filter_s == 4. and time_exp == 90:
list_s(data[i],'I'+str(time_exp)) #Generating list
else:
os.system('bzip2 '+data[i])
os.system('ls Initial*list* >list_temp_flat_list.dat')
proc=sc.genfromtxt('list_temp_flat_list.dat',dtype=str)
for j in np.arange(len(proc)):
Master_combina('@'+proc[j],'Master_'+proc[j]+'.fit')
os.system('mv Master_'+proc[j]+'.fit '+sys.argv[1]+'/')
os.system('mv Initial*list* '+sys.argv[1]+'/')
os.system('rm list_temp_Science.txt list_temp_flat_list.dat')
#END
| en | 0.615328 | #!/usr/bin/python # Created by: <NAME> # Date: 2013 June 26 # Program: This program correct the imagen .fit (Science) by Syntethic Flat # 1 m Reflector telescope, National Astronomical Observatory of Venezuela # Mode f/5, 21 arcmin x 21 arcmin # Project: Omega Centauri, Tidal Tails. # The program Astrometry_V1.py defined was developed by <NAME> at the Centro de Investigaciones de Astronomia "Franc<NAME>". # If you have any problems, please contact <NAME>, <EMAIL> / <EMAIL> #run, program. #Example: # Next program: ./Run_4-Synthetic_Flat.py Feb.22.Feb.23.2013.hlv # >>> Feb.22.Feb.23.2013.hlv/*.fit #Combine images MEDIAN #TASK IRAF: images.immatch.imcombine #Function to combine images for generates Master Flat #END function, IRAF: imcombine #Selecting images of my project # if float(delta) < -39. and filter_s == 2. and time_exp == 60: #Generating list #Generating list #Generating list #Generating list #END | 2.155938 | 2 |
planadversity/apps/meditations/urls.py | powellc/planadversity | 0 | 6613485 | <reponame>powellc/planadversity<filename>planadversity/apps/meditations/urls.py
from django.conf.urls import url
from .views import MeditationListView, MeditationDetailView, MeditationListJSONView, \
HomepageView, ResponseListView, ResponseDetailView, ResponseCreateView
# custom views
urlpatterns = [
url(r'^meditations.json',
view=MeditationListJSONView.as_view(),
name="meditation-list-json"),
url(r'^meditations/(?P<slug>[-\w]+)/',
view=MeditationDetailView.as_view(),
name="meditation-detail"),
#url(r'^meditations.csv',
# view=MeditationListCSVView.as_view(),
# name="meditation-list-csv"),
url(r'^meditations/$',
view=MeditationListView.as_view(),
name="meditation-list"),
url(r'^responses/create/$',
view=ResponseCreateView.as_view(),
name="response-create"),
url(r'^responses/(?P<slug>[-\w\d]+)/',
view=ResponseDetailView.as_view(),
name="response-detail"),
url(r'^responses/$',
view=ResponseListView.as_view(),
name="response-list"),
url("^$",
view=HomepageView.as_view(),
name="homepage")]
| from django.conf.urls import url
from .views import MeditationListView, MeditationDetailView, MeditationListJSONView, \
HomepageView, ResponseListView, ResponseDetailView, ResponseCreateView
# custom views
urlpatterns = [
url(r'^meditations.json',
view=MeditationListJSONView.as_view(),
name="meditation-list-json"),
url(r'^meditations/(?P<slug>[-\w]+)/',
view=MeditationDetailView.as_view(),
name="meditation-detail"),
#url(r'^meditations.csv',
# view=MeditationListCSVView.as_view(),
# name="meditation-list-csv"),
url(r'^meditations/$',
view=MeditationListView.as_view(),
name="meditation-list"),
url(r'^responses/create/$',
view=ResponseCreateView.as_view(),
name="response-create"),
url(r'^responses/(?P<slug>[-\w\d]+)/',
view=ResponseDetailView.as_view(),
name="response-detail"),
url(r'^responses/$',
view=ResponseListView.as_view(),
name="response-list"),
url("^$",
view=HomepageView.as_view(),
name="homepage")] | en | 0.196143 | # custom views #url(r'^meditations.csv', # view=MeditationListCSVView.as_view(), # name="meditation-list-csv"), | 1.900256 | 2 |
PythonAPI/quickstart/27-control-traffic-lights.py | MaheshM99/PolyVerif | 1 | 6613486 | #!/usr/bin/env python3
#
# Copyright (c) 2019-2021 LG Electronics, Inc.
#
# This software contains code licensed as described in LICENSE.
#
from environs import Env
import lgsvl
print("Python API Quickstart #27: How to Control Traffic Light")
env = Env()
sim = lgsvl.Simulator(env.str("LGSVL__SIMULATOR_HOST", lgsvl.wise.SimulatorSettings.simulator_host), env.int("LGSVL__SIMULATOR_PORT", lgsvl.wise.SimulatorSettings.simulator_port))
if sim.current_scene == lgsvl.wise.DefaultAssets.map_borregasave:
sim.reset()
else:
sim.load(lgsvl.wise.DefaultAssets.map_borregasave, 42)
spawns = sim.get_spawn()
state = lgsvl.AgentState()
forward = lgsvl.utils.transform_to_forward(spawns[0])
state.transform = spawns[0]
state.transform.position = spawns[0].position + 20 * forward
sim.add_agent(env.str("LGSVL__VEHICLE_0", lgsvl.wise.DefaultAssets.ego_lincoln2017mkz_apollo5), lgsvl.AgentType.EGO, state)
# # Get a list of controllable objects
controllables = sim.get_controllables("signal")
print("\n# List of controllable objects in {} scene:".format(lgsvl.wise.DefaultAssets.map_borregasave))
for c in controllables:
print(c)
signal = sim.get_controllable(lgsvl.Vector(15.5, 4.7, -23.9), "signal")
print("\n# Signal of interest:")
print(signal)
# Get current controllable states
print("\n# Current control policy:")
print(signal.control_policy)
# Create a new control policy
control_policy = "trigger=50;green=3;yellow=2;red=1;loop"
# Control this traffic light with a new control policy
signal.control(control_policy)
print("\n# Updated control policy:")
print(signal.control_policy)
# Get current state of signal
print("\n# Current signal state before simulation:")
print(signal.current_state)
seconds = 18
input("\nPress Enter to run simulation for {} seconds".format(seconds))
print("\nRunning simulation for {} seconds...".format(seconds))
sim.run(seconds)
print("\n# Current signal state after simulation:")
print(signal.current_state)
print("\nDone!")
| #!/usr/bin/env python3
#
# Copyright (c) 2019-2021 LG Electronics, Inc.
#
# This software contains code licensed as described in LICENSE.
#
from environs import Env
import lgsvl
print("Python API Quickstart #27: How to Control Traffic Light")
env = Env()
sim = lgsvl.Simulator(env.str("LGSVL__SIMULATOR_HOST", lgsvl.wise.SimulatorSettings.simulator_host), env.int("LGSVL__SIMULATOR_PORT", lgsvl.wise.SimulatorSettings.simulator_port))
if sim.current_scene == lgsvl.wise.DefaultAssets.map_borregasave:
sim.reset()
else:
sim.load(lgsvl.wise.DefaultAssets.map_borregasave, 42)
spawns = sim.get_spawn()
state = lgsvl.AgentState()
forward = lgsvl.utils.transform_to_forward(spawns[0])
state.transform = spawns[0]
state.transform.position = spawns[0].position + 20 * forward
sim.add_agent(env.str("LGSVL__VEHICLE_0", lgsvl.wise.DefaultAssets.ego_lincoln2017mkz_apollo5), lgsvl.AgentType.EGO, state)
# # Get a list of controllable objects
controllables = sim.get_controllables("signal")
print("\n# List of controllable objects in {} scene:".format(lgsvl.wise.DefaultAssets.map_borregasave))
for c in controllables:
print(c)
signal = sim.get_controllable(lgsvl.Vector(15.5, 4.7, -23.9), "signal")
print("\n# Signal of interest:")
print(signal)
# Get current controllable states
print("\n# Current control policy:")
print(signal.control_policy)
# Create a new control policy
control_policy = "trigger=50;green=3;yellow=2;red=1;loop"
# Control this traffic light with a new control policy
signal.control(control_policy)
print("\n# Updated control policy:")
print(signal.control_policy)
# Get current state of signal
print("\n# Current signal state before simulation:")
print(signal.current_state)
seconds = 18
input("\nPress Enter to run simulation for {} seconds".format(seconds))
print("\nRunning simulation for {} seconds...".format(seconds))
sim.run(seconds)
print("\n# Current signal state after simulation:")
print(signal.current_state)
print("\nDone!")
| en | 0.791747 | #!/usr/bin/env python3 # # Copyright (c) 2019-2021 LG Electronics, Inc. # # This software contains code licensed as described in LICENSE. # #27: How to Control Traffic Light") # # Get a list of controllable objects # List of controllable objects in {} scene:".format(lgsvl.wise.DefaultAssets.map_borregasave)) # Signal of interest:") # Get current controllable states # Current control policy:") # Create a new control policy # Control this traffic light with a new control policy # Updated control policy:") # Get current state of signal # Current signal state before simulation:") # Current signal state after simulation:") | 2.411056 | 2 |
stix2gen/ioc.py | botherder/stix2gen | 2 | 6613487 | import validators
from stix2.v21 import Bundle, DomainName, Indicator, Malware, Relationship
IOC_TYPE_DOMAIN = "domain"
IOC_TYPE_EMAIL = "email"
IOC_TYPE_IPV4 = "ipv4"
IOC_TYPE_IPV6 = "ipv6"
IOC_VALIDATORS = [
{
"type": IOC_TYPE_DOMAIN,
"validator": validators.domain,
"stix2_pattern": "[domain-name:value='{value}']",
},
{
"type": IOC_TYPE_EMAIL,
"validator": validators.email,
"stix2_pattern": "[email-addr:value='{value}']",
},
{
"type": IOC_TYPE_IPV4,
"validator": validators.ipv4,
"stix2_pattern": "[ipv4-addr:value='{value}']",
},
{
"type": IOC_TYPE_IPV6,
"validator": validators.ipv6,
"stix2_pattern": "[ipv6-addr:value='{value}']",
}
]
class IOC(object):
def __init__(self, ioc, ioc_type=None):
self.ioc = ioc
self.ioc_type = ioc_type
self.stix2_pattern = None
def clean(self):
self.ioc = self.ioc.strip().replace("[.]", ".").replace("[@]", "@")
def detect(self):
for validator in IOC_VALIDATORS:
if validator["validator"](self.ioc):
self.ioc_type = validator.get("type")
self.stix2_pattern = validator.get("stix2_pattern")
def stix2(self):
if not self.ioc_type:
return None
return Indicator(indicator_types=["malicious-activity"],
pattern_type="stix",
pattern=self.stix2_pattern.format(value=self.ioc))
| import validators
from stix2.v21 import Bundle, DomainName, Indicator, Malware, Relationship
IOC_TYPE_DOMAIN = "domain"
IOC_TYPE_EMAIL = "email"
IOC_TYPE_IPV4 = "ipv4"
IOC_TYPE_IPV6 = "ipv6"
IOC_VALIDATORS = [
{
"type": IOC_TYPE_DOMAIN,
"validator": validators.domain,
"stix2_pattern": "[domain-name:value='{value}']",
},
{
"type": IOC_TYPE_EMAIL,
"validator": validators.email,
"stix2_pattern": "[email-addr:value='{value}']",
},
{
"type": IOC_TYPE_IPV4,
"validator": validators.ipv4,
"stix2_pattern": "[ipv4-addr:value='{value}']",
},
{
"type": IOC_TYPE_IPV6,
"validator": validators.ipv6,
"stix2_pattern": "[ipv6-addr:value='{value}']",
}
]
class IOC(object):
def __init__(self, ioc, ioc_type=None):
self.ioc = ioc
self.ioc_type = ioc_type
self.stix2_pattern = None
def clean(self):
self.ioc = self.ioc.strip().replace("[.]", ".").replace("[@]", "@")
def detect(self):
for validator in IOC_VALIDATORS:
if validator["validator"](self.ioc):
self.ioc_type = validator.get("type")
self.stix2_pattern = validator.get("stix2_pattern")
def stix2(self):
if not self.ioc_type:
return None
return Indicator(indicator_types=["malicious-activity"],
pattern_type="stix",
pattern=self.stix2_pattern.format(value=self.ioc))
| none | 1 | 2.266931 | 2 | |
datapack/data/scripts/quests/161_FruitsOfMothertree/__init__.py | DigitalCoin1/L2SPERO | 0 | 6613488 | <gh_stars>0
# Made by Mr. Have fun!
# Version 0.3 by H1GHL4ND3R
import sys
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "161_FruitsOfMothertree"
ANDELLRIAS_LETTER = 1036
MOTHERTREE_FRUIT = 1037
ADENA = 57
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "30362-04.htm" :
st.set("cond","1")
st.setState(STARTED)
st.giveItems(ANDELLRIAS_LETTER,1)
st.playSound("ItemSound.quest_accept")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if id == CREATED :
if player.getRace().ordinal() != 1 :
htmltext = "30362-00.htm"
elif player.getLevel() >= 3 :
htmltext = "30362-03.htm"
st.set("cond","0")
else:
htmltext = "30362-02.htm"
st.exitQuest(1)
elif id == COMPLETED :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif id == STARTED :
try :
cond = st.getInt("cond")
except :
cond = None
if cond == 1 :
if npcId == 30362 :
htmltext = "30362-05.htm"
elif npcId == 30371 and st.getQuestItemsCount(ANDELLRIAS_LETTER) :
htmltext = "30371-01.htm"
st.takeItems(ANDELLRIAS_LETTER,1)
st.giveItems(MOTHERTREE_FRUIT,1)
st.set("cond", "2")
elif cond == 2 :
if npcId == 30362 and st.getQuestItemsCount(MOTHERTREE_FRUIT) :
htmltext = "30362-06.htm"
st.giveItems(ADENA,100)
st.takeItems(MOTHERTREE_FRUIT,1)
st.addExpAndSp(1000,0)
st.unset("cond")
st.setState(COMPLETED)
st.playSound("ItemSound.quest_finish")
elif npcId == 30371 and st.getQuestItemsCount(MOTHERTREE_FRUIT) :
htmltext = "30371-02.htm"
return htmltext
QUEST = Quest(161,qn,"Fruits Of Mothertree")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(30362)
QUEST.addTalkId(30362)
QUEST.addTalkId(30371)
STARTED.addQuestDrop(30371,MOTHERTREE_FRUIT,1)
STARTED.addQuestDrop(30362,ANDELLRIAS_LETTER,1) | # Made by Mr. Have fun!
# Version 0.3 by H1GHL4ND3R
import sys
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "161_FruitsOfMothertree"
ANDELLRIAS_LETTER = 1036
MOTHERTREE_FRUIT = 1037
ADENA = 57
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "30362-04.htm" :
st.set("cond","1")
st.setState(STARTED)
st.giveItems(ANDELLRIAS_LETTER,1)
st.playSound("ItemSound.quest_accept")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if id == CREATED :
if player.getRace().ordinal() != 1 :
htmltext = "30362-00.htm"
elif player.getLevel() >= 3 :
htmltext = "30362-03.htm"
st.set("cond","0")
else:
htmltext = "30362-02.htm"
st.exitQuest(1)
elif id == COMPLETED :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif id == STARTED :
try :
cond = st.getInt("cond")
except :
cond = None
if cond == 1 :
if npcId == 30362 :
htmltext = "30362-05.htm"
elif npcId == 30371 and st.getQuestItemsCount(ANDELLRIAS_LETTER) :
htmltext = "30371-01.htm"
st.takeItems(ANDELLRIAS_LETTER,1)
st.giveItems(MOTHERTREE_FRUIT,1)
st.set("cond", "2")
elif cond == 2 :
if npcId == 30362 and st.getQuestItemsCount(MOTHERTREE_FRUIT) :
htmltext = "30362-06.htm"
st.giveItems(ADENA,100)
st.takeItems(MOTHERTREE_FRUIT,1)
st.addExpAndSp(1000,0)
st.unset("cond")
st.setState(COMPLETED)
st.playSound("ItemSound.quest_finish")
elif npcId == 30371 and st.getQuestItemsCount(MOTHERTREE_FRUIT) :
htmltext = "30371-02.htm"
return htmltext
QUEST = Quest(161,qn,"Fruits Of Mothertree")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(30362)
QUEST.addTalkId(30362)
QUEST.addTalkId(30371)
STARTED.addQuestDrop(30371,MOTHERTREE_FRUIT,1)
STARTED.addQuestDrop(30362,ANDELLRIAS_LETTER,1) | en | 0.900403 | # Made by Mr. Have fun! # Version 0.3 by H1GHL4ND3R | 2.644285 | 3 |
cursoemvideo/python/exercicio/017(catetoHipotenusa).py | mateusjustino/cursos | 0 | 6613489 | <reponame>mateusjustino/cursos
from math import hypot
co = float(input('Qual o valor do cateto oposto: '))
ca = float(input('Qual o valor do cateto adjacente: '))
print('A hipotenusa vale {:.2f}'.format(hypot(co, ca)))
#sem importar o math
hi = (co ** 2 + ca ** 2) ** (1/2)
print('A hipotenusa vale {:.2f}'.format((hi)))
| from math import hypot
co = float(input('Qual o valor do cateto oposto: '))
ca = float(input('Qual o valor do cateto adjacente: '))
print('A hipotenusa vale {:.2f}'.format(hypot(co, ca)))
#sem importar o math
hi = (co ** 2 + ca ** 2) ** (1/2)
print('A hipotenusa vale {:.2f}'.format((hi))) | es | 0.14456 | #sem importar o math | 3.775636 | 4 |
plone/datadogmonitoring/Logger.py | zopyx/plone.datadogmonitoring | 0 | 6613490 | <filename>plone/datadogmonitoring/Logger.py
# $Id: Logger.py,v 1.1.1.1 2005-03-04 10:46:51 dieter Exp $
'''Logging long requests.'''
from zLOG import LOG, WARNING
def factory(config):
return handler
def handler(req, handlerState, globalState):
LOG('RequestMonitor.Logger', WARNING, 'Long running request',
'Request %s "%s" running in thread %s since %ss' % (
req.id,
req.info,
req.threadId,
handlerState.monitorTime - req.startTime,
)
)
| <filename>plone/datadogmonitoring/Logger.py
# $Id: Logger.py,v 1.1.1.1 2005-03-04 10:46:51 dieter Exp $
'''Logging long requests.'''
from zLOG import LOG, WARNING
def factory(config):
return handler
def handler(req, handlerState, globalState):
LOG('RequestMonitor.Logger', WARNING, 'Long running request',
'Request %s "%s" running in thread %s since %ss' % (
req.id,
req.info,
req.threadId,
handlerState.monitorTime - req.startTime,
)
)
| en | 0.229291 | # $Id: Logger.py,v 1.1.1.1 2005-03-04 10:46:51 dieter Exp $ Logging long requests. | 2.060132 | 2 |
HomePage/views.py | MidnightMadne33/Image-Blog | 0 | 6613491 | <reponame>MidnightMadne33/Image-Blog
from django.shortcuts import render, redirect
from .forms import SignUpForm
from UserPage.models import UserProfile
from django.contrib.auth import login, logout
from django.contrib.auth.forms import AuthenticationForm
# Create your views here.
def Index(request):
return render(request, 'HomePage/Index.html')
def SignIn(request):
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
return redirect('UserPage:Profile')
else:
form = AuthenticationForm()
return render(request, 'HomePage/SignIn.html', {'forms':form})
def SignUp(request):
if request.method == 'POST':
form = SignUpForm(data=request.POST)
if form.is_valid():
user = form.save(commit=False)
user.set_password(<PASSWORD>)
user.save()
login(request, user)
if 'next' in request.POST:
return redirect(request.POST.get('next', ''))
else:
return redirect('UserPage:Profile')
else:
form = SignUpForm()
return render(request, 'HomePage/SignUp.html', {'forms':form})
def SignOut(request):
if request.method == 'POST':
logout(request)
return redirect('HomePage:Index')
| from django.shortcuts import render, redirect
from .forms import SignUpForm
from UserPage.models import UserProfile
from django.contrib.auth import login, logout
from django.contrib.auth.forms import AuthenticationForm
# Create your views here.
def Index(request):
return render(request, 'HomePage/Index.html')
def SignIn(request):
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
return redirect('UserPage:Profile')
else:
form = AuthenticationForm()
return render(request, 'HomePage/SignIn.html', {'forms':form})
def SignUp(request):
if request.method == 'POST':
form = SignUpForm(data=request.POST)
if form.is_valid():
user = form.save(commit=False)
user.set_password(<PASSWORD>)
user.save()
login(request, user)
if 'next' in request.POST:
return redirect(request.POST.get('next', ''))
else:
return redirect('UserPage:Profile')
else:
form = SignUpForm()
return render(request, 'HomePage/SignUp.html', {'forms':form})
def SignOut(request):
if request.method == 'POST':
logout(request)
return redirect('HomePage:Index') | en | 0.968116 | # Create your views here. | 2.284795 | 2 |
pysimulator/scenarios/defaults.py | SergeTarasov/pyMHT-simulator | 6 | 6613492 | maxSpeedMS = 22.0
eta2 = 5.99 # 95% confidence (2df)
eta2_ais = 9.45 # 95% confidence (4df)
M_required = 2
N_checks = 4
| maxSpeedMS = 22.0
eta2 = 5.99 # 95% confidence (2df)
eta2_ais = 9.45 # 95% confidence (4df)
M_required = 2
N_checks = 4
| en | 0.456337 | # 95% confidence (2df) # 95% confidence (4df) | 1.130827 | 1 |
src/visualization_helpers.py | edesz/predict-loan-defaults | 0 | 6613493 | <reponame>edesz/predict-loan-defaults
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from multiprocessing import cpu_count
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn.metrics as mr
import sklearn.model_selection as ms
import yellowbrick.classifier as ybc
from joblib import Parallel, delayed
from matplotlib.ticker import FuncFormatter
from sklearn.inspection import permutation_importance
from sklearn.metrics import auc, roc_curve
from sklearn.utils.class_weight import compute_sample_weight
import src.ml_helpers as mlh
def customize_splines(ax: plt.axis) -> plt.axis:
ax.spines["left"].set_edgecolor("black")
ax.spines["left"].set_linewidth(2)
ax.spines["left"].set_zorder(3)
ax.spines["bottom"].set_edgecolor("black")
ax.spines["bottom"].set_linewidth(2)
ax.spines["bottom"].set_zorder(3)
ax.spines["top"].set_edgecolor("lightgrey")
ax.spines["top"].set_linewidth(1)
ax.spines["right"].set_edgecolor("lightgrey")
ax.spines["right"].set_linewidth(1)
return ax
def plot_horiz_bar_chart(data, ptitle, log_scale_x=True, fig_size=(8, 4)):
_, ax = plt.subplots(figsize=fig_size)
data.plot(ax=ax, kind="barh", zorder=3)
ax.get_legend().remove()
ax = add_gridlines(ax)
ax.set_title(ptitle, loc="left", fontweight="bold", fontsize=14)
for ax_dir in ["x", "y"]:
ax.tick_params(axis=ax_dir, labelsize=14)
if log_scale_x:
ax.get_xaxis().set_major_formatter(
FuncFormatter(lambda x, p: format(int(np.expm1(x)), ","))
)
else:
ax.get_xaxis().set_major_formatter(
FuncFormatter(lambda x, p: format(int(x), ","))
)
_ = customize_splines(ax)
def plot_verticalbar_pie_chart(
data, pie_var, bar_title, pie_title, fig_size=(12, 6)
):
_, axs = plt.subplots(1, 2, figsize=(12, 6))
sns.countplot(
x="loan_status",
data=data,
ax=axs[0],
palette=["red", "green"],
zorder=3,
)
axs[0].set_title(
bar_title,
loc="left",
fontweight="bold",
fontsize=14,
)
axs[0].set_ylabel(None)
axs[0].set_xlabel(None)
# axs[0].grid(color="lightgrey", zorder=0)
axs[0] = add_gridlines(axs[0])
axs[0].get_yaxis().set_major_formatter(
FuncFormatter(lambda x, p: format(int(x), ","))
)
for ax_dir in ["x", "y"]:
axs[0].tick_params(axis=ax_dir, labelsize=14)
data[pie_var].value_counts().plot(
x=None,
y=None,
kind="pie",
explode=(0, 0.1),
fontsize=14,
shadow=False,
ax=axs[1],
autopct="%1.2f%%",
colors=["green", "red"],
)
axs[1].set_title(
pie_title,
loc="center",
fontweight="bold",
fontsize=14,
)
axs[1].set_ylabel(None)
_ = customize_splines(axs[0])
def add_gridlines(ax: plt.axis):
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
ax.xaxis.grid(True)
ax.yaxis.grid(True)
return ax
def plot_learning_curve(
estimator,
title,
X,
y,
cv=None,
scorer="f1_score",
n_jobs=-1,
train_sizes=np.linspace(0.1, 1.0, 5),
legend_coords=(0.9, 1.15),
axis_tick_label_fontsize=12,
fig_size=(20, 5),
):
_, axes = plt.subplots(3, 1, figsize=fig_size)
axes[0].set_title(
title + " versus Training size",
loc="left",
fontweight="bold",
fontsize=axis_tick_label_fontsize,
)
axes[0].set_xlabel(None)
train_sizes, train_scores, test_scores, fit_times, _ = ms.learning_curve(
estimator,
X,
y,
cv=cv,
scoring=scorer,
n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True,
)
# print(train_scores, cv, scorer, train_sizes)
train_split_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_split_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
# Plot learning curve
axes[0].grid()
axes[0].fill_between(
train_sizes,
train_split_scores_mean - train_scores_std,
train_split_scores_mean + train_scores_std,
alpha=0.1,
color="r",
)
axes[0].fill_between(
train_sizes,
test_split_scores_mean - test_scores_std,
test_split_scores_mean + test_scores_std,
alpha=0.1,
color="g",
)
axes[0].plot(
train_sizes, train_split_scores_mean, "o-", color="r", label="Train"
)
axes[0].plot(
train_sizes, test_split_scores_mean, "o-", color="g", label="Test"
)
axes[0].legend(
loc="upper left",
bbox_to_anchor=legend_coords,
ncol=2,
frameon=False,
handletextpad=0.3,
columnspacing=0.4,
)
axes[0].xaxis.set_major_formatter(mtick.StrMethodFormatter("{x:,.0f}"))
# Plot n_samples vs fit_times
axes[1].grid()
axes[1].plot(train_sizes, fit_times_mean, "o-")
axes[1].fill_between(
train_sizes,
fit_times_mean - fit_times_std,
fit_times_mean + fit_times_std,
alpha=0.1,
)
axes[1].set_xlabel(None)
axes[1].set_title(
"Fit times (sec) versus Training size",
loc="left",
fontweight="bold",
fontsize=axis_tick_label_fontsize,
)
axes[1].xaxis.set_major_formatter(mtick.StrMethodFormatter("{x:,.0f}"))
# Plot fit_time vs score
axes[2].grid()
axes[2].plot(fit_times_mean, test_split_scores_mean, "o-")
axes[2].fill_between(
fit_times_mean,
test_split_scores_mean - test_scores_std,
test_split_scores_mean + test_scores_std,
alpha=0.1,
)
axes[2].set_xlabel(None)
axes[2].set_title(
"Test score versus Training time (sec)",
loc="left",
fontweight="bold",
fontsize=axis_tick_label_fontsize,
)
for ax in axes:
ax.xaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
_ = customize_splines(ax)
def builtin_plot_permutation_importances(
pipe,
X_train,
X_test,
y_train,
y_test,
scorer,
n_repeats,
wspace=0.5,
fig_title_fontsize=16,
fig_title_vertical_pos=1.1,
axis_tick_label_fontsize=12,
axis_label_fontsize=14,
box_color="cyan",
fig_size=(12, 6),
):
scorer_name = scorer._score_func.__name__.split("_score")[0].replace(
"threshold_", ""
)
fig_title = (
f"{scorer_name.upper()} Permutation Importances using "
f"{type(pipe.named_steps['clf']).__name__}"
)
fig = plt.figure(figsize=fig_size)
fig.suptitle(
fig_title,
fontsize=fig_title_fontsize,
fontweight="bold",
y=fig_title_vertical_pos,
)
grid = plt.GridSpec(1, 2, wspace=wspace)
ax1 = fig.add_subplot(grid[0, 0])
ax2 = fig.add_subplot(grid[0, 1])
for Xs, ys, ax, split_name in zip(
[X_train, X_test], [y_train, y_test], [ax1, ax2], ["train", "test"]
):
result = permutation_importance(
pipe,
Xs,
ys,
scoring=scorer,
n_repeats=n_repeats,
random_state=42,
n_jobs=-1,
)
sorted_idx = result.importances_mean.argsort()
sns.boxplot(
data=result.importances[sorted_idx][::-1].T,
orient="h",
color=box_color,
saturation=0.5,
zorder=3,
ax=ax,
)
ax.axvline(x=0, color="k", ls="--")
ax.set_yticks(range(len(sorted_idx)))
ax.set_yticklabels(Xs.columns[sorted_idx][::-1])
ax.set_title(
f"{split_name.title()}",
loc="left",
fontweight="bold",
fontsize=axis_tick_label_fontsize,
)
ax.set_xlabel(
f"Change in avg. score, over {n_repeats} passes through the data",
fontsize=axis_label_fontsize,
)
ax.xaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
ax.xaxis.grid(True, which="major", color="lightgrey", zorder=0)
_ = customize_splines(ax)
def plot_coefs(
coefs,
ptitle="Coefficient variability",
axis_tick_label_font_size=12,
fig_size=(9, 7),
):
_, ax = plt.subplots(figsize=fig_size)
# sns.swarmplot(data=coefs, orient="h", color="k", alpha=1, zorder=3)
sns.boxplot(
data=coefs, orient="h", color="cyan", saturation=0.5, zorder=3, ax=ax
)
ax.axvline(x=0, color="k", ls="--")
ax.set_title(
ptitle,
loc="left",
fontweight="bold",
fontsize=axis_tick_label_font_size,
)
ax.xaxis.set_tick_params(labelsize=axis_tick_label_font_size)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_font_size)
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
_ = customize_splines(ax)
def iterate_discrimination_thresholds(pipe, X, y, cv, threshold_values_array):
executor = Parallel(n_jobs=cpu_count(), backend="multiprocessing")
tasks = [
delayed(mlh.fit_predict_threshold)(pipe, X, y, train, test, thresh)
for thresh in np.nditer(threshold_values_array)
for train, test in cv.split(X, y)
]
scores = executor(tasks)
df_scores = pd.DataFrame.from_records(scores).astype({"threshold": str})
# display(df_scores.head(8))
return df_scores
def plot_discrimination_threshold(
scores,
pipe,
metrics=["recall", "fpr", "auc"],
split_types=["test"],
ax=None,
):
grouped_scores = scores.groupby("threshold").agg(
{
k: ["mean", "min", "max"]
for k in [f"{s}_{m}" for m in metrics for s in split_types]
}
)
# display(manual_scores_grouped)
if ax is None:
_, ax = plt.subplots(figsize=(8, 4))
for split_type in split_types:
xyd = {}
for metric in ["recall", "fpr", "auc"]:
for aggfunc in ["mean", "min", "max"]:
xyd[f"{split_type}_{metric}_{aggfunc}"] = grouped_scores.loc[
slice(None), [(f"{split_type}_{metric}", aggfunc)]
]
if aggfunc in ["min", "max"]:
xyd[f"{split_type}_{metric}_{aggfunc}"] = xyd[
f"{split_type}_{metric}_{aggfunc}"
].squeeze()
else:
xyd[f"{split_type}_{metric}_{aggfunc}"].columns = (
xyd[f"{split_type}_{metric}_{aggfunc}"]
.columns.map("_".join)
.str.strip("_")
)
for metric in metrics:
xyd[f"{split_type}_{metric}_mean"].squeeze().plot(
ax=ax,
label="_".join(
xyd[f"{split_type}_{metric}_mean"]
.squeeze()
.name.split("_", 2)[:2]
),
)
ax.fill_between(
xyd[f"{split_type}_{metric}_{aggfunc}"].squeeze().index,
xyd[f"{split_type}_{metric}_min"],
xyd[f"{split_type}_{metric}_max"],
alpha=1.0,
lw=1,
)
ax.legend()
ax.set_xlabel(None)
ax.set_title(
(
"Discrimination Threshold Plot for "
f"{type(pipe.named_steps['clf']).__name__}"
)
)
def show_yb_grid(
estimator,
X_test,
y_test,
classes,
X,
y,
threshold_cv,
threshold_values,
wspace=0.6,
hspace=0.6,
fig_size: tuple = (16, 8),
):
fig = plt.figure(figsize=fig_size)
grid = plt.GridSpec(3, 2, hspace=hspace, wspace=wspace)
ax1 = fig.add_subplot(grid[0, 0])
ax2 = fig.add_subplot(grid[0, 1])
ax3 = fig.add_subplot(grid[1, 0])
ax4 = fig.add_subplot(grid[1, 1])
ax5 = fig.add_subplot(grid[2, 0])
ax6 = fig.add_subplot(grid[2, 1])
for k, (viz_func, ax) in enumerate(
zip(
[
ybc.ClassPredictionError,
ybc.ConfusionMatrix,
ybc.ClassificationReport,
ybc.ROCAUC,
],
[ax1, ax3, ax4, ax5],
)
):
visualizer = viz_func(
estimator,
classes=classes,
is_fitted="auto",
ax=ax,
)
visualizer.fit(X_test, y_test)
visualizer.score(X_test, y_test)
visualizer.finalize()
if k == 0:
ax.get_legend().remove()
plot_discrimination_threshold(
iterate_discrimination_thresholds(
estimator, X, y, threshold_cv, threshold_values
),
estimator,
["recall", "fpr", "auc"],
["test"],
ax2,
)
plot_roc_curve(estimator, X_test, y_test, ax=ax6)
def plot_cross_validated_coefs(
pipe,
numerical_columns,
nominal_columns,
X_train,
X_test,
y_train,
y_test,
scorer,
n_repeats=5,
n_splits=5,
axis_tick_label_fontsize=12,
fig_size=(8, 12),
):
feature_names = (
pipe.named_steps["preprocessor"]
.named_transformers_["onehot"]
.get_feature_names(input_features=nominal_columns)
)
feature_names = np.concatenate([numerical_columns, feature_names])
cv_model = ms.cross_validate(
pipe,
X=pd.concat([X_train, X_test]),
y=pd.concat([y_train, y_test]),
cv=ms.RepeatedKFold(
n_splits=n_splits, n_repeats=n_repeats, random_state=42
),
scoring=scorer,
return_train_score=True,
return_estimator=True,
n_jobs=-1,
)
coefs = pd.DataFrame(
[
est.named_steps["clf"].coef_.flatten()
for est in cv_model["estimator"]
],
columns=feature_names,
)
coefs = coefs[coefs.mean(axis=0).sort_values(ascending=False).index]
plot_coefs(
coefs, "Coefficient variability", axis_tick_label_fontsize, fig_size
)
def plot_grouped_bar_chart(
df, groupby, col_to_plot, wspace=0.5, fig_size=(8, 4)
):
misclassified_str = "(pct. misclassified)"
fig = plt.figure(figsize=fig_size)
grid = plt.GridSpec(1, 2, wspace=wspace)
ax1 = fig.add_subplot(grid[0, 0])
ax2 = fig.add_subplot(grid[0, 1])
(
100 * (df[df[col_to_plot]][groupby].value_counts(normalize=True))
).sort_values(ascending=False).sort_values(ascending=True).plot.barh(
ax=ax1, rot=0, zorder=3
)
ax1.set_ylabel(None)
ax1.set_title(
f"{groupby.title()} {misclassified_str}", loc="left", fontweight="bold"
)
(100 * (df[groupby].value_counts(normalize=True))).sort_values(
ascending=True
).plot.barh(ax=ax2, rot=0, zorder=3)
ax2.set_title(
groupby.title() + " (pct. overall)", loc="left", fontweight="bold"
)
for ax in [ax1, ax2]:
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
_ = customize_splines(ax)
_ = add_gridlines(ax)
def plot_grouped_histogram(
df,
col_to_plot,
legend_loc=(0.9, 1.1),
alpha=0.5,
wspace=0.2,
fig_size=(8, 6),
):
fig = plt.figure(figsize=fig_size)
grid = plt.GridSpec(1, 2, wspace=wspace)
ax1 = fig.add_subplot(grid[0, 0])
ax2 = fig.add_subplot(grid[0, 1])
dfm = df[df["misclassified"]]
ax1.hist(dfm[dfm["is_default"] == 1][col_to_plot], alpha=alpha, label="M")
ax1.hist(
df[df["is_default"] == 1][col_to_plot],
alpha=alpha,
label="All",
)
ax1.set_title(
"Defaulted Loans by " + col_to_plot.title(),
loc="left",
fontweight="bold",
)
ax1.legend(
loc="upper left",
ncol=2,
bbox_to_anchor=legend_loc,
handletextpad=0.2,
columnspacing=0.2,
frameon=False,
)
ptitle = f"Paid on-time loans by {col_to_plot.title()}"
dfm = df[df["misclassified"]]
ax2.hist(dfm[dfm["is_default"] == 0][col_to_plot], alpha=alpha, label="M")
ax2.hist(df[df["is_default"] == 0][col_to_plot], alpha=alpha, label="All")
ax2.set_title(ptitle, loc="left", fontweight="bold")
ax2.legend(
loc="upper left",
ncol=2,
bbox_to_anchor=legend_loc,
handletextpad=0.2,
columnspacing=0.2,
frameon=False,
)
for ax in [ax1, ax2]:
_ = add_gridlines(ax)
def plot_roc_curve(
pipe, X_test, y_test, handletextpad=0.5, ax=None, fig_size=(6, 6)
):
y_score = pipe.predict_proba(X_test)
y_test_binarized = pd.get_dummies(y_test).to_numpy()
fpr, tpr, roc_auc = ({} for i in range(3))
for i in range(y_test.nunique()):
fpr[i], tpr[i], _ = roc_curve(
y_test_binarized[:, i],
y_score[:, i],
sample_weight=compute_sample_weight(
class_weight="balanced", y=y_test
),
)
roc_auc[i] = auc(fpr[i], tpr[i])
if not ax:
_, ax = plt.subplots(figsize=fig_size)
for class_val in list(np.sort(y_test.unique())):
ax.plot(
fpr[class_val],
tpr[class_val],
lw=2,
label=f"ROC of class {class_val}, AUC = {roc_auc[class_val]:.2f}",
)
ax.plot([0, 1], [0, 1], color="navy", lw=2, linestyle="--")
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel(None)
ax.set_ylabel(None)
model_name = type(pipe.named_steps["clf"]).__name__
ax.set_title(f"Manual ROC (TPR vs FPR) Curves for {model_name}")
ax.legend(
loc="lower right",
frameon=False,
handletextpad=handletextpad,
)
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
_ = customize_splines(ax)
def plot_pr_roc_curves(
y_test,
y_probs,
est_name,
axis_tick_label_fontsize=12,
wspace=0.1,
legend_position=(0.35, 1.1),
f2_beta=2,
fig_size=(12, 4),
):
fig = plt.figure(figsize=fig_size)
grid = plt.GridSpec(1, 2, wspace=wspace)
ax1 = fig.add_subplot(grid[0, 0])
ax2 = fig.add_subplot(grid[0, 1])
p, r, p_thresholds = mr.precision_recall_curve(y_test, y_probs)
f2score = ((1 + (f2_beta ** 2)) * p * r) / (((f2_beta ** 2) * p) + r)
ix = np.argmax(f2score)
no_skill = len(y_test[y_test == 1]) / len(y_test)
ax1.plot([0, 1], [no_skill, no_skill], ls="--", label="No Skill")
ax1.plot(r, p, label=est_name)
ax1.set_title("Precision-Recall", loc="left", fontweight="bold")
ax1.annotate(str(np.round(p_thresholds[ix], 3)), (r[ix], p[ix]))
ax1.scatter(
r[ix],
p[ix],
marker="o",
color="black",
label="Best",
zorder=3,
)
fpr, tpr, r_thresholds = mr.roc_curve(y_test, y_probs)
gmeans = np.sqrt(tpr * (1 - fpr))
ix = np.argmax(gmeans)
no_skill = [0, 1]
ax2.plot(no_skill, no_skill, ls="--", label="No Skill")
ax2.plot(fpr, tpr, label=est_name)
ax2.set_title("ROC-AUC", loc="left", fontweight="bold")
ax2.annotate(str(np.round(r_thresholds[ix], 3)), (fpr[ix], tpr[ix]))
ax2.scatter(
fpr[ix],
tpr[ix],
marker="o",
color="black",
label="Best",
zorder=3,
)
ax2.legend(
loc="upper left",
bbox_to_anchor=legend_position,
columnspacing=0.4,
handletextpad=0.2,
frameon=False,
ncol=3,
)
for ax in [ax1, ax2]:
_ = customize_splines(ax)
ax.xaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.grid(which="both", axis="both", color="lightgrey", zorder=10)
ax.xaxis.grid(True)
ax.yaxis.grid(True)
def plot_lower_corr_heatmap(
df_corr,
ptitle,
lw=1,
annot_dict={True: ".2f"},
ptitle_y_loc=1,
show_cbar=False,
cbar_shrink_factor=1,
fig_size=(10, 10),
):
_, ax = plt.subplots(figsize=fig_size)
mask = np.triu(np.ones_like(df_corr, dtype=bool))
sns.heatmap(
df_corr,
mask=mask,
vmin=-1,
vmax=1,
center=0,
cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True,
ax=ax,
annot=list(annot_dict.keys())[0],
cbar=show_cbar,
linewidths=lw,
cbar_kws={"shrink": cbar_shrink_factor},
fmt=list(annot_dict.values())[0],
)
ax.set_title(ptitle, loc="left", fontweight="bold", y=ptitle_y_loc)
ax.tick_params(left=False, bottom=False)
def plot_single_column_histogram(df, colname, ptitle, fig_size=(8, 4)):
_, ax = plt.subplots(figsize=fig_size)
df[colname].plot(kind="hist", ax=ax, lw=1.25, edgecolor="w", label="")
ax.set_title(ptitle, fontweight="bold", loc="left")
ax.set_ylabel(None)
ax.axvline(x=df[colname].median(), label="Median", color="k", ls="--")
ax.axvline(x=df[colname].mean(), label="Avg", color="r", ls="--")
ax.xaxis.set_major_formatter(mtick.StrMethodFormatter("{x:,.0f}"))
ax.legend(
loc="upper left",
bbox_to_anchor=(0.76, 1.1),
ncol=2,
handletextpad=0.2,
columnspacing=0.2,
)
def plot_boxplot_using_quantiles(
boxes,
ptitle,
axis_tick_label_fontsize=12,
fig_size=(6, 4),
):
_, ax = plt.subplots(figsize=fig_size)
bxp1 = ax.bxp(
boxes,
positions=[1, 1.5],
widths=0.35,
showfliers=False,
patch_artist=True,
whiskerprops=dict(linewidth=1.25, color="black"),
capprops=dict(linewidth=1.25, color="black"),
boxprops=dict(linewidth=1.25),
medianprops=dict(linewidth=1.5, color="cyan"),
)
for patch in bxp1["boxes"]:
patch.set(facecolor="steelblue")
ax.xaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
_ = customize_splines(ax)
_ = add_gridlines(ax)
ax.set_xlabel(None)
ax.set_title(
ptitle,
loc="left",
fontweight="bold",
)
ax.yaxis.set_major_formatter(mtick.StrMethodFormatter("{x:,.0f}"))
def plot_multiple_boxplots(
df,
x,
y_s,
ptitles,
axis_tick_label_fontsize=12,
x_ticks_formatter="{x:,.0f}",
fig_size=(12, 4),
):
fig = plt.figure(figsize=fig_size)
grid = plt.GridSpec(1, len(y_s), wspace=0.2)
for c in range(2):
ax = fig.add_subplot(grid[0, c])
sns.boxplot(x=x, y=y_s[c], ax=ax, data=df)
ax.set_xlabel(None)
ax.set_ylabel(None)
if x_ticks_formatter:
ax.yaxis.set_major_formatter(
mtick.StrMethodFormatter(x_ticks_formatter)
)
ax.set_title(ptitles[c], loc="left", fontweight="bold")
ax.xaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.grid(True, color="lightgrey")
ax.spines["bottom"].set_edgecolor("black")
ax.spines["bottom"].set_linewidth(1.5)
def plot_multi_catplot(
df,
x,
y,
cat_columns,
ptitles,
x_ticks_formatter="{x:,.0f}",
plot_color="red",
axis_tick_label_fontsize=12,
fig_height=4,
fig_aspect_ratio=1.25,
):
g = sns.catplot(
data=df,
kind="bar",
x=x,
col=cat_columns,
y=y,
sharey=False,
palette=sns.color_palette([plot_color]),
alpha=1,
height=fig_height,
aspect=fig_aspect_ratio,
legend=False,
)
for ax, ptitle in zip([g.axes[0][0], g.axes[0][1]], ptitles):
ax.set_ylabel(None)
ax.set_xlabel(None)
ax.set_title(None)
if x_ticks_formatter:
ax.yaxis.set_major_formatter(
mtick.StrMethodFormatter(x_ticks_formatter)
)
if ptitle == ptitles[-1]:
ax.xaxis.set_ticks_position("top")
ax.yaxis.set_ticks_position("right")
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["top"].set_visible(True)
ax.spines["right"].set_visible(True)
ax.spines["top"].set_edgecolor("black")
ax.spines["top"].set_linewidth(1.5)
else:
ax.spines["bottom"].set_edgecolor("black")
ax.spines["bottom"].set_linewidth(1.5)
ax.set_title(ptitle, loc="left", fontweight="bold")
ax.xaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.grid(True, color="lightgrey")
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from multiprocessing import cpu_count
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn.metrics as mr
import sklearn.model_selection as ms
import yellowbrick.classifier as ybc
from joblib import Parallel, delayed
from matplotlib.ticker import FuncFormatter
from sklearn.inspection import permutation_importance
from sklearn.metrics import auc, roc_curve
from sklearn.utils.class_weight import compute_sample_weight
import src.ml_helpers as mlh
def customize_splines(ax: plt.axis) -> plt.axis:
ax.spines["left"].set_edgecolor("black")
ax.spines["left"].set_linewidth(2)
ax.spines["left"].set_zorder(3)
ax.spines["bottom"].set_edgecolor("black")
ax.spines["bottom"].set_linewidth(2)
ax.spines["bottom"].set_zorder(3)
ax.spines["top"].set_edgecolor("lightgrey")
ax.spines["top"].set_linewidth(1)
ax.spines["right"].set_edgecolor("lightgrey")
ax.spines["right"].set_linewidth(1)
return ax
def plot_horiz_bar_chart(data, ptitle, log_scale_x=True, fig_size=(8, 4)):
_, ax = plt.subplots(figsize=fig_size)
data.plot(ax=ax, kind="barh", zorder=3)
ax.get_legend().remove()
ax = add_gridlines(ax)
ax.set_title(ptitle, loc="left", fontweight="bold", fontsize=14)
for ax_dir in ["x", "y"]:
ax.tick_params(axis=ax_dir, labelsize=14)
if log_scale_x:
ax.get_xaxis().set_major_formatter(
FuncFormatter(lambda x, p: format(int(np.expm1(x)), ","))
)
else:
ax.get_xaxis().set_major_formatter(
FuncFormatter(lambda x, p: format(int(x), ","))
)
_ = customize_splines(ax)
def plot_verticalbar_pie_chart(
data, pie_var, bar_title, pie_title, fig_size=(12, 6)
):
_, axs = plt.subplots(1, 2, figsize=(12, 6))
sns.countplot(
x="loan_status",
data=data,
ax=axs[0],
palette=["red", "green"],
zorder=3,
)
axs[0].set_title(
bar_title,
loc="left",
fontweight="bold",
fontsize=14,
)
axs[0].set_ylabel(None)
axs[0].set_xlabel(None)
# axs[0].grid(color="lightgrey", zorder=0)
axs[0] = add_gridlines(axs[0])
axs[0].get_yaxis().set_major_formatter(
FuncFormatter(lambda x, p: format(int(x), ","))
)
for ax_dir in ["x", "y"]:
axs[0].tick_params(axis=ax_dir, labelsize=14)
data[pie_var].value_counts().plot(
x=None,
y=None,
kind="pie",
explode=(0, 0.1),
fontsize=14,
shadow=False,
ax=axs[1],
autopct="%1.2f%%",
colors=["green", "red"],
)
axs[1].set_title(
pie_title,
loc="center",
fontweight="bold",
fontsize=14,
)
axs[1].set_ylabel(None)
_ = customize_splines(axs[0])
def add_gridlines(ax: plt.axis):
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
ax.xaxis.grid(True)
ax.yaxis.grid(True)
return ax
def plot_learning_curve(
estimator,
title,
X,
y,
cv=None,
scorer="f1_score",
n_jobs=-1,
train_sizes=np.linspace(0.1, 1.0, 5),
legend_coords=(0.9, 1.15),
axis_tick_label_fontsize=12,
fig_size=(20, 5),
):
_, axes = plt.subplots(3, 1, figsize=fig_size)
axes[0].set_title(
title + " versus Training size",
loc="left",
fontweight="bold",
fontsize=axis_tick_label_fontsize,
)
axes[0].set_xlabel(None)
train_sizes, train_scores, test_scores, fit_times, _ = ms.learning_curve(
estimator,
X,
y,
cv=cv,
scoring=scorer,
n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True,
)
# print(train_scores, cv, scorer, train_sizes)
train_split_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_split_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
# Plot learning curve
axes[0].grid()
axes[0].fill_between(
train_sizes,
train_split_scores_mean - train_scores_std,
train_split_scores_mean + train_scores_std,
alpha=0.1,
color="r",
)
axes[0].fill_between(
train_sizes,
test_split_scores_mean - test_scores_std,
test_split_scores_mean + test_scores_std,
alpha=0.1,
color="g",
)
axes[0].plot(
train_sizes, train_split_scores_mean, "o-", color="r", label="Train"
)
axes[0].plot(
train_sizes, test_split_scores_mean, "o-", color="g", label="Test"
)
axes[0].legend(
loc="upper left",
bbox_to_anchor=legend_coords,
ncol=2,
frameon=False,
handletextpad=0.3,
columnspacing=0.4,
)
axes[0].xaxis.set_major_formatter(mtick.StrMethodFormatter("{x:,.0f}"))
# Plot n_samples vs fit_times
axes[1].grid()
axes[1].plot(train_sizes, fit_times_mean, "o-")
axes[1].fill_between(
train_sizes,
fit_times_mean - fit_times_std,
fit_times_mean + fit_times_std,
alpha=0.1,
)
axes[1].set_xlabel(None)
axes[1].set_title(
"Fit times (sec) versus Training size",
loc="left",
fontweight="bold",
fontsize=axis_tick_label_fontsize,
)
axes[1].xaxis.set_major_formatter(mtick.StrMethodFormatter("{x:,.0f}"))
# Plot fit_time vs score
axes[2].grid()
axes[2].plot(fit_times_mean, test_split_scores_mean, "o-")
axes[2].fill_between(
fit_times_mean,
test_split_scores_mean - test_scores_std,
test_split_scores_mean + test_scores_std,
alpha=0.1,
)
axes[2].set_xlabel(None)
axes[2].set_title(
"Test score versus Training time (sec)",
loc="left",
fontweight="bold",
fontsize=axis_tick_label_fontsize,
)
for ax in axes:
ax.xaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
_ = customize_splines(ax)
def builtin_plot_permutation_importances(
pipe,
X_train,
X_test,
y_train,
y_test,
scorer,
n_repeats,
wspace=0.5,
fig_title_fontsize=16,
fig_title_vertical_pos=1.1,
axis_tick_label_fontsize=12,
axis_label_fontsize=14,
box_color="cyan",
fig_size=(12, 6),
):
scorer_name = scorer._score_func.__name__.split("_score")[0].replace(
"threshold_", ""
)
fig_title = (
f"{scorer_name.upper()} Permutation Importances using "
f"{type(pipe.named_steps['clf']).__name__}"
)
fig = plt.figure(figsize=fig_size)
fig.suptitle(
fig_title,
fontsize=fig_title_fontsize,
fontweight="bold",
y=fig_title_vertical_pos,
)
grid = plt.GridSpec(1, 2, wspace=wspace)
ax1 = fig.add_subplot(grid[0, 0])
ax2 = fig.add_subplot(grid[0, 1])
for Xs, ys, ax, split_name in zip(
[X_train, X_test], [y_train, y_test], [ax1, ax2], ["train", "test"]
):
result = permutation_importance(
pipe,
Xs,
ys,
scoring=scorer,
n_repeats=n_repeats,
random_state=42,
n_jobs=-1,
)
sorted_idx = result.importances_mean.argsort()
sns.boxplot(
data=result.importances[sorted_idx][::-1].T,
orient="h",
color=box_color,
saturation=0.5,
zorder=3,
ax=ax,
)
ax.axvline(x=0, color="k", ls="--")
ax.set_yticks(range(len(sorted_idx)))
ax.set_yticklabels(Xs.columns[sorted_idx][::-1])
ax.set_title(
f"{split_name.title()}",
loc="left",
fontweight="bold",
fontsize=axis_tick_label_fontsize,
)
ax.set_xlabel(
f"Change in avg. score, over {n_repeats} passes through the data",
fontsize=axis_label_fontsize,
)
ax.xaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
ax.xaxis.grid(True, which="major", color="lightgrey", zorder=0)
_ = customize_splines(ax)
def plot_coefs(
coefs,
ptitle="Coefficient variability",
axis_tick_label_font_size=12,
fig_size=(9, 7),
):
_, ax = plt.subplots(figsize=fig_size)
# sns.swarmplot(data=coefs, orient="h", color="k", alpha=1, zorder=3)
sns.boxplot(
data=coefs, orient="h", color="cyan", saturation=0.5, zorder=3, ax=ax
)
ax.axvline(x=0, color="k", ls="--")
ax.set_title(
ptitle,
loc="left",
fontweight="bold",
fontsize=axis_tick_label_font_size,
)
ax.xaxis.set_tick_params(labelsize=axis_tick_label_font_size)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_font_size)
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
_ = customize_splines(ax)
def iterate_discrimination_thresholds(pipe, X, y, cv, threshold_values_array):
executor = Parallel(n_jobs=cpu_count(), backend="multiprocessing")
tasks = [
delayed(mlh.fit_predict_threshold)(pipe, X, y, train, test, thresh)
for thresh in np.nditer(threshold_values_array)
for train, test in cv.split(X, y)
]
scores = executor(tasks)
df_scores = pd.DataFrame.from_records(scores).astype({"threshold": str})
# display(df_scores.head(8))
return df_scores
def plot_discrimination_threshold(
scores,
pipe,
metrics=["recall", "fpr", "auc"],
split_types=["test"],
ax=None,
):
grouped_scores = scores.groupby("threshold").agg(
{
k: ["mean", "min", "max"]
for k in [f"{s}_{m}" for m in metrics for s in split_types]
}
)
# display(manual_scores_grouped)
if ax is None:
_, ax = plt.subplots(figsize=(8, 4))
for split_type in split_types:
xyd = {}
for metric in ["recall", "fpr", "auc"]:
for aggfunc in ["mean", "min", "max"]:
xyd[f"{split_type}_{metric}_{aggfunc}"] = grouped_scores.loc[
slice(None), [(f"{split_type}_{metric}", aggfunc)]
]
if aggfunc in ["min", "max"]:
xyd[f"{split_type}_{metric}_{aggfunc}"] = xyd[
f"{split_type}_{metric}_{aggfunc}"
].squeeze()
else:
xyd[f"{split_type}_{metric}_{aggfunc}"].columns = (
xyd[f"{split_type}_{metric}_{aggfunc}"]
.columns.map("_".join)
.str.strip("_")
)
for metric in metrics:
xyd[f"{split_type}_{metric}_mean"].squeeze().plot(
ax=ax,
label="_".join(
xyd[f"{split_type}_{metric}_mean"]
.squeeze()
.name.split("_", 2)[:2]
),
)
ax.fill_between(
xyd[f"{split_type}_{metric}_{aggfunc}"].squeeze().index,
xyd[f"{split_type}_{metric}_min"],
xyd[f"{split_type}_{metric}_max"],
alpha=1.0,
lw=1,
)
ax.legend()
ax.set_xlabel(None)
ax.set_title(
(
"Discrimination Threshold Plot for "
f"{type(pipe.named_steps['clf']).__name__}"
)
)
def show_yb_grid(
estimator,
X_test,
y_test,
classes,
X,
y,
threshold_cv,
threshold_values,
wspace=0.6,
hspace=0.6,
fig_size: tuple = (16, 8),
):
fig = plt.figure(figsize=fig_size)
grid = plt.GridSpec(3, 2, hspace=hspace, wspace=wspace)
ax1 = fig.add_subplot(grid[0, 0])
ax2 = fig.add_subplot(grid[0, 1])
ax3 = fig.add_subplot(grid[1, 0])
ax4 = fig.add_subplot(grid[1, 1])
ax5 = fig.add_subplot(grid[2, 0])
ax6 = fig.add_subplot(grid[2, 1])
for k, (viz_func, ax) in enumerate(
zip(
[
ybc.ClassPredictionError,
ybc.ConfusionMatrix,
ybc.ClassificationReport,
ybc.ROCAUC,
],
[ax1, ax3, ax4, ax5],
)
):
visualizer = viz_func(
estimator,
classes=classes,
is_fitted="auto",
ax=ax,
)
visualizer.fit(X_test, y_test)
visualizer.score(X_test, y_test)
visualizer.finalize()
if k == 0:
ax.get_legend().remove()
plot_discrimination_threshold(
iterate_discrimination_thresholds(
estimator, X, y, threshold_cv, threshold_values
),
estimator,
["recall", "fpr", "auc"],
["test"],
ax2,
)
plot_roc_curve(estimator, X_test, y_test, ax=ax6)
def plot_cross_validated_coefs(
pipe,
numerical_columns,
nominal_columns,
X_train,
X_test,
y_train,
y_test,
scorer,
n_repeats=5,
n_splits=5,
axis_tick_label_fontsize=12,
fig_size=(8, 12),
):
feature_names = (
pipe.named_steps["preprocessor"]
.named_transformers_["onehot"]
.get_feature_names(input_features=nominal_columns)
)
feature_names = np.concatenate([numerical_columns, feature_names])
cv_model = ms.cross_validate(
pipe,
X=pd.concat([X_train, X_test]),
y=pd.concat([y_train, y_test]),
cv=ms.RepeatedKFold(
n_splits=n_splits, n_repeats=n_repeats, random_state=42
),
scoring=scorer,
return_train_score=True,
return_estimator=True,
n_jobs=-1,
)
coefs = pd.DataFrame(
[
est.named_steps["clf"].coef_.flatten()
for est in cv_model["estimator"]
],
columns=feature_names,
)
coefs = coefs[coefs.mean(axis=0).sort_values(ascending=False).index]
plot_coefs(
coefs, "Coefficient variability", axis_tick_label_fontsize, fig_size
)
def plot_grouped_bar_chart(
df, groupby, col_to_plot, wspace=0.5, fig_size=(8, 4)
):
misclassified_str = "(pct. misclassified)"
fig = plt.figure(figsize=fig_size)
grid = plt.GridSpec(1, 2, wspace=wspace)
ax1 = fig.add_subplot(grid[0, 0])
ax2 = fig.add_subplot(grid[0, 1])
(
100 * (df[df[col_to_plot]][groupby].value_counts(normalize=True))
).sort_values(ascending=False).sort_values(ascending=True).plot.barh(
ax=ax1, rot=0, zorder=3
)
ax1.set_ylabel(None)
ax1.set_title(
f"{groupby.title()} {misclassified_str}", loc="left", fontweight="bold"
)
(100 * (df[groupby].value_counts(normalize=True))).sort_values(
ascending=True
).plot.barh(ax=ax2, rot=0, zorder=3)
ax2.set_title(
groupby.title() + " (pct. overall)", loc="left", fontweight="bold"
)
for ax in [ax1, ax2]:
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
_ = customize_splines(ax)
_ = add_gridlines(ax)
def plot_grouped_histogram(
df,
col_to_plot,
legend_loc=(0.9, 1.1),
alpha=0.5,
wspace=0.2,
fig_size=(8, 6),
):
fig = plt.figure(figsize=fig_size)
grid = plt.GridSpec(1, 2, wspace=wspace)
ax1 = fig.add_subplot(grid[0, 0])
ax2 = fig.add_subplot(grid[0, 1])
dfm = df[df["misclassified"]]
ax1.hist(dfm[dfm["is_default"] == 1][col_to_plot], alpha=alpha, label="M")
ax1.hist(
df[df["is_default"] == 1][col_to_plot],
alpha=alpha,
label="All",
)
ax1.set_title(
"Defaulted Loans by " + col_to_plot.title(),
loc="left",
fontweight="bold",
)
ax1.legend(
loc="upper left",
ncol=2,
bbox_to_anchor=legend_loc,
handletextpad=0.2,
columnspacing=0.2,
frameon=False,
)
ptitle = f"Paid on-time loans by {col_to_plot.title()}"
dfm = df[df["misclassified"]]
ax2.hist(dfm[dfm["is_default"] == 0][col_to_plot], alpha=alpha, label="M")
ax2.hist(df[df["is_default"] == 0][col_to_plot], alpha=alpha, label="All")
ax2.set_title(ptitle, loc="left", fontweight="bold")
ax2.legend(
loc="upper left",
ncol=2,
bbox_to_anchor=legend_loc,
handletextpad=0.2,
columnspacing=0.2,
frameon=False,
)
for ax in [ax1, ax2]:
_ = add_gridlines(ax)
def plot_roc_curve(
pipe, X_test, y_test, handletextpad=0.5, ax=None, fig_size=(6, 6)
):
y_score = pipe.predict_proba(X_test)
y_test_binarized = pd.get_dummies(y_test).to_numpy()
fpr, tpr, roc_auc = ({} for i in range(3))
for i in range(y_test.nunique()):
fpr[i], tpr[i], _ = roc_curve(
y_test_binarized[:, i],
y_score[:, i],
sample_weight=compute_sample_weight(
class_weight="balanced", y=y_test
),
)
roc_auc[i] = auc(fpr[i], tpr[i])
if not ax:
_, ax = plt.subplots(figsize=fig_size)
for class_val in list(np.sort(y_test.unique())):
ax.plot(
fpr[class_val],
tpr[class_val],
lw=2,
label=f"ROC of class {class_val}, AUC = {roc_auc[class_val]:.2f}",
)
ax.plot([0, 1], [0, 1], color="navy", lw=2, linestyle="--")
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel(None)
ax.set_ylabel(None)
model_name = type(pipe.named_steps["clf"]).__name__
ax.set_title(f"Manual ROC (TPR vs FPR) Curves for {model_name}")
ax.legend(
loc="lower right",
frameon=False,
handletextpad=handletextpad,
)
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
_ = customize_splines(ax)
def plot_pr_roc_curves(
y_test,
y_probs,
est_name,
axis_tick_label_fontsize=12,
wspace=0.1,
legend_position=(0.35, 1.1),
f2_beta=2,
fig_size=(12, 4),
):
fig = plt.figure(figsize=fig_size)
grid = plt.GridSpec(1, 2, wspace=wspace)
ax1 = fig.add_subplot(grid[0, 0])
ax2 = fig.add_subplot(grid[0, 1])
p, r, p_thresholds = mr.precision_recall_curve(y_test, y_probs)
f2score = ((1 + (f2_beta ** 2)) * p * r) / (((f2_beta ** 2) * p) + r)
ix = np.argmax(f2score)
no_skill = len(y_test[y_test == 1]) / len(y_test)
ax1.plot([0, 1], [no_skill, no_skill], ls="--", label="No Skill")
ax1.plot(r, p, label=est_name)
ax1.set_title("Precision-Recall", loc="left", fontweight="bold")
ax1.annotate(str(np.round(p_thresholds[ix], 3)), (r[ix], p[ix]))
ax1.scatter(
r[ix],
p[ix],
marker="o",
color="black",
label="Best",
zorder=3,
)
fpr, tpr, r_thresholds = mr.roc_curve(y_test, y_probs)
gmeans = np.sqrt(tpr * (1 - fpr))
ix = np.argmax(gmeans)
no_skill = [0, 1]
ax2.plot(no_skill, no_skill, ls="--", label="No Skill")
ax2.plot(fpr, tpr, label=est_name)
ax2.set_title("ROC-AUC", loc="left", fontweight="bold")
ax2.annotate(str(np.round(r_thresholds[ix], 3)), (fpr[ix], tpr[ix]))
ax2.scatter(
fpr[ix],
tpr[ix],
marker="o",
color="black",
label="Best",
zorder=3,
)
ax2.legend(
loc="upper left",
bbox_to_anchor=legend_position,
columnspacing=0.4,
handletextpad=0.2,
frameon=False,
ncol=3,
)
for ax in [ax1, ax2]:
_ = customize_splines(ax)
ax.xaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.grid(which="both", axis="both", color="lightgrey", zorder=10)
ax.xaxis.grid(True)
ax.yaxis.grid(True)
def plot_lower_corr_heatmap(
df_corr,
ptitle,
lw=1,
annot_dict={True: ".2f"},
ptitle_y_loc=1,
show_cbar=False,
cbar_shrink_factor=1,
fig_size=(10, 10),
):
_, ax = plt.subplots(figsize=fig_size)
mask = np.triu(np.ones_like(df_corr, dtype=bool))
sns.heatmap(
df_corr,
mask=mask,
vmin=-1,
vmax=1,
center=0,
cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True,
ax=ax,
annot=list(annot_dict.keys())[0],
cbar=show_cbar,
linewidths=lw,
cbar_kws={"shrink": cbar_shrink_factor},
fmt=list(annot_dict.values())[0],
)
ax.set_title(ptitle, loc="left", fontweight="bold", y=ptitle_y_loc)
ax.tick_params(left=False, bottom=False)
def plot_single_column_histogram(df, colname, ptitle, fig_size=(8, 4)):
_, ax = plt.subplots(figsize=fig_size)
df[colname].plot(kind="hist", ax=ax, lw=1.25, edgecolor="w", label="")
ax.set_title(ptitle, fontweight="bold", loc="left")
ax.set_ylabel(None)
ax.axvline(x=df[colname].median(), label="Median", color="k", ls="--")
ax.axvline(x=df[colname].mean(), label="Avg", color="r", ls="--")
ax.xaxis.set_major_formatter(mtick.StrMethodFormatter("{x:,.0f}"))
ax.legend(
loc="upper left",
bbox_to_anchor=(0.76, 1.1),
ncol=2,
handletextpad=0.2,
columnspacing=0.2,
)
def plot_boxplot_using_quantiles(
boxes,
ptitle,
axis_tick_label_fontsize=12,
fig_size=(6, 4),
):
_, ax = plt.subplots(figsize=fig_size)
bxp1 = ax.bxp(
boxes,
positions=[1, 1.5],
widths=0.35,
showfliers=False,
patch_artist=True,
whiskerprops=dict(linewidth=1.25, color="black"),
capprops=dict(linewidth=1.25, color="black"),
boxprops=dict(linewidth=1.25),
medianprops=dict(linewidth=1.5, color="cyan"),
)
for patch in bxp1["boxes"]:
patch.set(facecolor="steelblue")
ax.xaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.grid(which="both", axis="both", color="lightgrey", zorder=0)
_ = customize_splines(ax)
_ = add_gridlines(ax)
ax.set_xlabel(None)
ax.set_title(
ptitle,
loc="left",
fontweight="bold",
)
ax.yaxis.set_major_formatter(mtick.StrMethodFormatter("{x:,.0f}"))
def plot_multiple_boxplots(
df,
x,
y_s,
ptitles,
axis_tick_label_fontsize=12,
x_ticks_formatter="{x:,.0f}",
fig_size=(12, 4),
):
fig = plt.figure(figsize=fig_size)
grid = plt.GridSpec(1, len(y_s), wspace=0.2)
for c in range(2):
ax = fig.add_subplot(grid[0, c])
sns.boxplot(x=x, y=y_s[c], ax=ax, data=df)
ax.set_xlabel(None)
ax.set_ylabel(None)
if x_ticks_formatter:
ax.yaxis.set_major_formatter(
mtick.StrMethodFormatter(x_ticks_formatter)
)
ax.set_title(ptitles[c], loc="left", fontweight="bold")
ax.xaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.grid(True, color="lightgrey")
ax.spines["bottom"].set_edgecolor("black")
ax.spines["bottom"].set_linewidth(1.5)
def plot_multi_catplot(
df,
x,
y,
cat_columns,
ptitles,
x_ticks_formatter="{x:,.0f}",
plot_color="red",
axis_tick_label_fontsize=12,
fig_height=4,
fig_aspect_ratio=1.25,
):
g = sns.catplot(
data=df,
kind="bar",
x=x,
col=cat_columns,
y=y,
sharey=False,
palette=sns.color_palette([plot_color]),
alpha=1,
height=fig_height,
aspect=fig_aspect_ratio,
legend=False,
)
for ax, ptitle in zip([g.axes[0][0], g.axes[0][1]], ptitles):
ax.set_ylabel(None)
ax.set_xlabel(None)
ax.set_title(None)
if x_ticks_formatter:
ax.yaxis.set_major_formatter(
mtick.StrMethodFormatter(x_ticks_formatter)
)
if ptitle == ptitles[-1]:
ax.xaxis.set_ticks_position("top")
ax.yaxis.set_ticks_position("right")
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["top"].set_visible(True)
ax.spines["right"].set_visible(True)
ax.spines["top"].set_edgecolor("black")
ax.spines["top"].set_linewidth(1.5)
else:
ax.spines["bottom"].set_edgecolor("black")
ax.spines["bottom"].set_linewidth(1.5)
ax.set_title(ptitle, loc="left", fontweight="bold")
ax.xaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.set_tick_params(labelsize=axis_tick_label_fontsize)
ax.yaxis.grid(True, color="lightgrey") | en | 0.50603 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # axs[0].grid(color="lightgrey", zorder=0) # print(train_scores, cv, scorer, train_sizes) # Plot learning curve # Plot n_samples vs fit_times # Plot fit_time vs score # sns.swarmplot(data=coefs, orient="h", color="k", alpha=1, zorder=3) # display(df_scores.head(8)) # display(manual_scores_grouped) | 2.136095 | 2 |
gestcont.py | guchait995/Virtual_Mouse | 0 | 6613494 | import cv2
import numpy as np
from pynput.mouse import Button, Controller
import wx
mouse=Controller()
app=wx.App(False)
(sx,sy)=wx.GetDisplaySize()
(camx,camy)=(320,240)
font=cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_SIMPLEX,2,0.5,0,3,1)
lowerBound=np.array([50,80,40])
upperBound=np.array([102,255,255])
cam= cv2.VideoCapture(0)
kernelOpen=np.ones((7,7))
kernelClose=np.ones((15,15))
pinchFlag=0
holdFlag=0
while True:
ret, img=cam.read()
img=cv2.resize(img,(340,220))
imgHSV= cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
mask=cv2.inRange(imgHSV,lowerBound,upperBound)
maskOpen=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernelOpen)
maskClose=cv2.morphologyEx(maskOpen,cv2.MORPH_CLOSE,kernelClose)
maskFinal=maskClose
conts,h=cv2.findContours(maskFinal.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
if(len(conts)==2):
holdFlag=0
if(pinchFlag==1):
pinchFlag=0
mouse.release(Button.left)
x1,y1,w1,h1=cv2.boundingRect(conts[0])
x2,y2,w2,h2=cv2.boundingRect(conts[1])
cv2.rectangle(img,(x1,y1),(x1+w1,y1+h1),(255,0,0),2)
cv2.rectangle(img,(x2,y2),(x2+w2,y2+h2),(255,0,0),2)
cx1=x1+w1/2
cy1=y1+h1/2
cx2=x2+w2/2
cy2=y2+h2/2
cx=(cx1+cx2)/2
cy=(cy1+cy2)/2
cv2.line(img, (cx1,cy1),(cx2,cy2),(255,0,0),2)
cv2.circle(img, (cx,cy),2,(0,0,255),2)
mouseLoc=(sx-(cx*sx/camx), cy*sy/camy)
mouse.position=mouseLoc
while mouse.position!=mouseLoc:
pass
elif(len(conts)==1):
holdFlag+=1
x, y, w, h = cv2.boundingRect(conts[0])
if(holdFlag>=80):
mouse.click(Button.left,2)
cv2.cv.PutText(cv2.cv.fromarray(img), "Hold Active", (x, y + h), font, (0, 255, 255))
# holdFlag=0
if(pinchFlag==0):
pinchFlag=1
mouse.press(Button.left)
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cx=x+w/2
cy=y+h/2
cv2.circle(img,(cx,cy),(w+h)/4,(0,0,255),2)
mouseLoc=(sx-(cx*sx/camx), cy*sy/camy)
mouse.position=mouseLoc
while mouse.position!=mouseLoc:
pass
cv2.imshow("cam",img)
cv2.waitKey(5)
| import cv2
import numpy as np
from pynput.mouse import Button, Controller
import wx
mouse=Controller()
app=wx.App(False)
(sx,sy)=wx.GetDisplaySize()
(camx,camy)=(320,240)
font=cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_SIMPLEX,2,0.5,0,3,1)
lowerBound=np.array([50,80,40])
upperBound=np.array([102,255,255])
cam= cv2.VideoCapture(0)
kernelOpen=np.ones((7,7))
kernelClose=np.ones((15,15))
pinchFlag=0
holdFlag=0
while True:
ret, img=cam.read()
img=cv2.resize(img,(340,220))
imgHSV= cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
mask=cv2.inRange(imgHSV,lowerBound,upperBound)
maskOpen=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernelOpen)
maskClose=cv2.morphologyEx(maskOpen,cv2.MORPH_CLOSE,kernelClose)
maskFinal=maskClose
conts,h=cv2.findContours(maskFinal.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
if(len(conts)==2):
holdFlag=0
if(pinchFlag==1):
pinchFlag=0
mouse.release(Button.left)
x1,y1,w1,h1=cv2.boundingRect(conts[0])
x2,y2,w2,h2=cv2.boundingRect(conts[1])
cv2.rectangle(img,(x1,y1),(x1+w1,y1+h1),(255,0,0),2)
cv2.rectangle(img,(x2,y2),(x2+w2,y2+h2),(255,0,0),2)
cx1=x1+w1/2
cy1=y1+h1/2
cx2=x2+w2/2
cy2=y2+h2/2
cx=(cx1+cx2)/2
cy=(cy1+cy2)/2
cv2.line(img, (cx1,cy1),(cx2,cy2),(255,0,0),2)
cv2.circle(img, (cx,cy),2,(0,0,255),2)
mouseLoc=(sx-(cx*sx/camx), cy*sy/camy)
mouse.position=mouseLoc
while mouse.position!=mouseLoc:
pass
elif(len(conts)==1):
holdFlag+=1
x, y, w, h = cv2.boundingRect(conts[0])
if(holdFlag>=80):
mouse.click(Button.left,2)
cv2.cv.PutText(cv2.cv.fromarray(img), "Hold Active", (x, y + h), font, (0, 255, 255))
# holdFlag=0
if(pinchFlag==0):
pinchFlag=1
mouse.press(Button.left)
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cx=x+w/2
cy=y+h/2
cv2.circle(img,(cx,cy),(w+h)/4,(0,0,255),2)
mouseLoc=(sx-(cx*sx/camx), cy*sy/camy)
mouse.position=mouseLoc
while mouse.position!=mouseLoc:
pass
cv2.imshow("cam",img)
cv2.waitKey(5)
| da | 0.151296 | # holdFlag=0 | 2.480544 | 2 |
vendor/pywork4core/django_app/no_network_test_case.py | olivierlefloch/django-short-urls | 1 | 6613495 | # coding: utf-8
"""
Django test case that doesn't allow networking
"""
from __future__ import unicode_literals
import re
import socket
from unittest import TestCase
from utils.reflect import caller_qualname
class NetworkError(Exception):
"""Exception thrown when you try to use the network (that's bad!)"""
pass
class NoNetworkTestCase(TestCase):
"""
TestCase class that prevents networking requests from non whitelisted modules
Subclasses should set NETWORKING_ALLOWED_PREFIXES to a tuple of allowed
qualified prefixes (module.class.function) that should be allowed to perform
networking requests, for example
NETWORKING_ALLOWED_PREFIXES = ("pyredis", "pymongo.pool.Pool.create_connection")
"""
NETWORKING_ALLOWED_PREFIXES = None
@classmethod
def __guard(cls, *args, **kwargs):
"""
Checks whether the current caller (previous method in stack) should
be allowed to call socket.socket.
"""
caller = caller_qualname()
if not cls.whitelist_re or not cls.whitelist_re.match(caller):
raise NetworkError("I told you not to use the Internet! (called from %s)" % caller)
return cls.__socket_backup(*args, **kwargs)
@classmethod
def setUpClass(cls):
super(NoNetworkTestCase, cls).setUpClass()
# Cache the regular expression so we only build it once
if cls.NETWORKING_ALLOWED_PREFIXES:
cls.whitelist_re = re.compile(r'^%s' % '|'.join(cls.NETWORKING_ALLOWED_PREFIXES).replace('.', r'\.'))
else:
cls.whitelist_re = None
cls.__socket_backup, socket.socket = socket.socket, cls.__guard
@classmethod
def tearDownClass(cls):
socket.socket = cls.__socket_backup
super(NoNetworkTestCase, cls).tearDownClass()
| # coding: utf-8
"""
Django test case that doesn't allow networking
"""
from __future__ import unicode_literals
import re
import socket
from unittest import TestCase
from utils.reflect import caller_qualname
class NetworkError(Exception):
"""Exception thrown when you try to use the network (that's bad!)"""
pass
class NoNetworkTestCase(TestCase):
"""
TestCase class that prevents networking requests from non whitelisted modules
Subclasses should set NETWORKING_ALLOWED_PREFIXES to a tuple of allowed
qualified prefixes (module.class.function) that should be allowed to perform
networking requests, for example
NETWORKING_ALLOWED_PREFIXES = ("pyredis", "pymongo.pool.Pool.create_connection")
"""
NETWORKING_ALLOWED_PREFIXES = None
@classmethod
def __guard(cls, *args, **kwargs):
"""
Checks whether the current caller (previous method in stack) should
be allowed to call socket.socket.
"""
caller = caller_qualname()
if not cls.whitelist_re or not cls.whitelist_re.match(caller):
raise NetworkError("I told you not to use the Internet! (called from %s)" % caller)
return cls.__socket_backup(*args, **kwargs)
@classmethod
def setUpClass(cls):
super(NoNetworkTestCase, cls).setUpClass()
# Cache the regular expression so we only build it once
if cls.NETWORKING_ALLOWED_PREFIXES:
cls.whitelist_re = re.compile(r'^%s' % '|'.join(cls.NETWORKING_ALLOWED_PREFIXES).replace('.', r'\.'))
else:
cls.whitelist_re = None
cls.__socket_backup, socket.socket = socket.socket, cls.__guard
@classmethod
def tearDownClass(cls):
socket.socket = cls.__socket_backup
super(NoNetworkTestCase, cls).tearDownClass()
| en | 0.799851 | # coding: utf-8 Django test case that doesn't allow networking Exception thrown when you try to use the network (that's bad!) TestCase class that prevents networking requests from non whitelisted modules Subclasses should set NETWORKING_ALLOWED_PREFIXES to a tuple of allowed qualified prefixes (module.class.function) that should be allowed to perform networking requests, for example NETWORKING_ALLOWED_PREFIXES = ("pyredis", "pymongo.pool.Pool.create_connection") Checks whether the current caller (previous method in stack) should be allowed to call socket.socket. # Cache the regular expression so we only build it once | 2.692568 | 3 |
commitmonitor/urls.py | tcostam/commitmonitor | 0 | 6613496 | from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from core import views as core_views
from django.views.generic import TemplateView
from django.contrib.auth import views as auth_views
from rest_framework import routers
from core import views as api_views
import django_js_reverse.views
urlpatterns = [
url(r'^login/$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, name='logout'),
url(r'^oauth/', include('social_django.urls', namespace='social')),
url(r'^hooks/$', core_views.hook, name='hook'),
url(r'^jsreverse/$', django_js_reverse.views.urls_js, name='js_reverse'),
url(r'^$', core_views.home, name='home'),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
# Routers
router = routers.DefaultRouter()
router.register(r'repositories', api_views.RepositoryViewSet)
router.register(r'commits', api_views.CommitViewSet)
urlpatterns = [
url(r'^api/v1/', include(router.urls)),
url(r'^api/v1/api-auth/', include('rest_framework.urls', namespace='rest_framework'))
] + urlpatterns
| from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from core import views as core_views
from django.views.generic import TemplateView
from django.contrib.auth import views as auth_views
from rest_framework import routers
from core import views as api_views
import django_js_reverse.views
urlpatterns = [
url(r'^login/$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, name='logout'),
url(r'^oauth/', include('social_django.urls', namespace='social')),
url(r'^hooks/$', core_views.hook, name='hook'),
url(r'^jsreverse/$', django_js_reverse.views.urls_js, name='js_reverse'),
url(r'^$', core_views.home, name='home'),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
# Routers
router = routers.DefaultRouter()
router.register(r'repositories', api_views.RepositoryViewSet)
router.register(r'commits', api_views.CommitViewSet)
urlpatterns = [
url(r'^api/v1/', include(router.urls)),
url(r'^api/v1/api-auth/', include('rest_framework.urls', namespace='rest_framework'))
] + urlpatterns
| none | 1 | 1.866853 | 2 | |
lightnet/data/transform/post/_cornernet.py | eavise-kul/lightnet | 6 | 6613497 | #
# Lightnet postprocessing for Anchor based detectors (Darknet)
# Copyright EAVISE
#
import logging
import torch
import torch.nn as nn
from ..util import BaseTransform
__all__ = ['GetCornerBoxes']
log = logging.getLogger(__name__)
class GetCornerBoxes(BaseTransform):
""" Convert output from cornernet networks to bounding box tensor.
.. admonition:: Experimental
This post-processing implementation is still in development
and might not be yielding the same results as the official implementation.
Use at your own risk!
Args:
embedding_thresh (Number): Embedding distance threshold to filter matching corners
conf_thresh (Number [0-1]): Confidence threshold to filter detections
network_stride (Number): Downsampling factor of the network (most lightnet networks have a `inner_stride` attribute)
topk (Number, optional): Number of corners to select from the network output; Default **100**
subsample_kernel (Number, optional): Kernel size to perform maxpool subsampling; Default **0**
Returns:
(Tensor [Boxes x 7]]): **[batch_num, x_tl, y_tl, x_br, y_br, confidence, class_id]** for every bounding box
Note:
If setting the subsample_kernel to **0**, you disable the subsampling.
Otherwise this post-processing will perform maxpooling on the heatmap with the specified kernel.
"""
def __init__(self, embedding_thresh, conf_thresh, network_stride, topk=100, subsample_kernel=0):
super().__init__()
log.experimental(f'"{self.__class__.__name__}" is still in development. Use at your own risk!')
self.embedding_thresh = embedding_thresh
self.conf_thresh = conf_thresh
self.network_stride = network_stride
self.topk = topk
self.subsample_kernel = subsample_kernel
def forward(self, network_output):
device = network_output.device
batch, channels, h, w = network_output.shape
# Split tensor
network_output = network_output.view(batch, 2, -1, h, w) # BATCH, TLBR, NUM_CLASSES+3, H, W
heatmaps = torch.sigmoid(network_output[:, :, :-3]) # BATCH, TLBR, NUM_CLASSES, H, W
embedding = network_output[:, :, -3] # BATCH, TLBR, H, W
offsets = network_output[:, :, -2:] # BATCH, TLBR, XY, H, W
# Subsample heatmaps
if self.subsample_kernel:
maxpool_heat = nn.functional.max_pool2d(heatmaps.view(batch, -1, h, w), self.subsample_kernel, stride=1, padding=(self.subsample_kernel - 1) // 2)
heatmaps *= maxpool_heat.view(batch, 2, -1, h, w) == heatmaps
# Get topK corners
topk_heatmaps, topk_idx = torch.topk(heatmaps.view(batch, 2, -1), self.topk)
topk_classes = topk_idx // (h * w)
topk_idx %= (h * w)
topk_x = (topk_idx % w).float()
topk_y = (topk_idx // w).float()
# Add XY offsets
offset_x = torch.gather(offsets[:, :, 0].reshape(batch, 2, -1), 2, topk_idx)
offset_y = torch.gather(offsets[:, :, 1].reshape(batch, 2, -1), 2, topk_idx)
topk_x = topk_x + offset_x
topk_y = topk_y + offset_y
# Combine TL and BR corners
tl_x = topk_x[:, 0, :, None].expand(-1, self.topk, self.topk)
tl_y = topk_y[:, 0, :, None].expand(-1, self.topk, self.topk)
br_x = topk_x[:, 1, None, :].expand(-1, self.topk, self.topk)
br_y = topk_y[:, 1, None, :].expand(-1, self.topk, self.topk)
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=3)
bboxes *= self.network_stride
# Create corner filter
corner_filter = (br_x >= tl_x) & (br_y >= tl_y)
# Create class filter
tl_classes = topk_classes[:, 0, :, None].expand(-1, self.topk, self.topk)
br_classes = topk_classes[:, 1, None, :].expand(-1, self.topk, self.topk)
class_filter = (tl_classes == br_classes)
# Create confidence filter
# NOTE : This is different than the original implementation, where they keep the TOP N detections
confidence = (topk_heatmaps[:, 0, :, None] + topk_heatmaps[:, 1, None, :]) / 2
confidence_filter = confidence > self.conf_thresh
# Create embedding filter
topk_embed = torch.gather(embedding.view(batch, 2, -1), 2, topk_idx)
dist = torch.abs(topk_embed[:, 0, :, None] - topk_embed[:, 1, None, :])
embedding_filter = dist <= self.embedding_thresh
# Get batch number of the detections
total_filter = class_filter & embedding_filter & corner_filter & confidence_filter
nums = torch.arange(0, batch, dtype=torch.uint8, device=total_filter.device)
batch_num = total_filter.view(batch, -1)
batch_num = nums[:, None].expand_as(batch_num)[batch_num]
# Apply filters and combine values
bboxes = bboxes[total_filter, :].view(-1, 4)
confidence = confidence[total_filter].view(-1)
class_idx = tl_classes[total_filter].view(-1)
return torch.cat([batch_num[:, None].float(), bboxes, confidence[:, None], class_idx[:, None].float()], dim=1)
| #
# Lightnet postprocessing for Anchor based detectors (Darknet)
# Copyright EAVISE
#
import logging
import torch
import torch.nn as nn
from ..util import BaseTransform
__all__ = ['GetCornerBoxes']
log = logging.getLogger(__name__)
class GetCornerBoxes(BaseTransform):
""" Convert output from cornernet networks to bounding box tensor.
.. admonition:: Experimental
This post-processing implementation is still in development
and might not be yielding the same results as the official implementation.
Use at your own risk!
Args:
embedding_thresh (Number): Embedding distance threshold to filter matching corners
conf_thresh (Number [0-1]): Confidence threshold to filter detections
network_stride (Number): Downsampling factor of the network (most lightnet networks have a `inner_stride` attribute)
topk (Number, optional): Number of corners to select from the network output; Default **100**
subsample_kernel (Number, optional): Kernel size to perform maxpool subsampling; Default **0**
Returns:
(Tensor [Boxes x 7]]): **[batch_num, x_tl, y_tl, x_br, y_br, confidence, class_id]** for every bounding box
Note:
If setting the subsample_kernel to **0**, you disable the subsampling.
Otherwise this post-processing will perform maxpooling on the heatmap with the specified kernel.
"""
def __init__(self, embedding_thresh, conf_thresh, network_stride, topk=100, subsample_kernel=0):
super().__init__()
log.experimental(f'"{self.__class__.__name__}" is still in development. Use at your own risk!')
self.embedding_thresh = embedding_thresh
self.conf_thresh = conf_thresh
self.network_stride = network_stride
self.topk = topk
self.subsample_kernel = subsample_kernel
def forward(self, network_output):
device = network_output.device
batch, channels, h, w = network_output.shape
# Split tensor
network_output = network_output.view(batch, 2, -1, h, w) # BATCH, TLBR, NUM_CLASSES+3, H, W
heatmaps = torch.sigmoid(network_output[:, :, :-3]) # BATCH, TLBR, NUM_CLASSES, H, W
embedding = network_output[:, :, -3] # BATCH, TLBR, H, W
offsets = network_output[:, :, -2:] # BATCH, TLBR, XY, H, W
# Subsample heatmaps
if self.subsample_kernel:
maxpool_heat = nn.functional.max_pool2d(heatmaps.view(batch, -1, h, w), self.subsample_kernel, stride=1, padding=(self.subsample_kernel - 1) // 2)
heatmaps *= maxpool_heat.view(batch, 2, -1, h, w) == heatmaps
# Get topK corners
topk_heatmaps, topk_idx = torch.topk(heatmaps.view(batch, 2, -1), self.topk)
topk_classes = topk_idx // (h * w)
topk_idx %= (h * w)
topk_x = (topk_idx % w).float()
topk_y = (topk_idx // w).float()
# Add XY offsets
offset_x = torch.gather(offsets[:, :, 0].reshape(batch, 2, -1), 2, topk_idx)
offset_y = torch.gather(offsets[:, :, 1].reshape(batch, 2, -1), 2, topk_idx)
topk_x = topk_x + offset_x
topk_y = topk_y + offset_y
# Combine TL and BR corners
tl_x = topk_x[:, 0, :, None].expand(-1, self.topk, self.topk)
tl_y = topk_y[:, 0, :, None].expand(-1, self.topk, self.topk)
br_x = topk_x[:, 1, None, :].expand(-1, self.topk, self.topk)
br_y = topk_y[:, 1, None, :].expand(-1, self.topk, self.topk)
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=3)
bboxes *= self.network_stride
# Create corner filter
corner_filter = (br_x >= tl_x) & (br_y >= tl_y)
# Create class filter
tl_classes = topk_classes[:, 0, :, None].expand(-1, self.topk, self.topk)
br_classes = topk_classes[:, 1, None, :].expand(-1, self.topk, self.topk)
class_filter = (tl_classes == br_classes)
# Create confidence filter
# NOTE : This is different than the original implementation, where they keep the TOP N detections
confidence = (topk_heatmaps[:, 0, :, None] + topk_heatmaps[:, 1, None, :]) / 2
confidence_filter = confidence > self.conf_thresh
# Create embedding filter
topk_embed = torch.gather(embedding.view(batch, 2, -1), 2, topk_idx)
dist = torch.abs(topk_embed[:, 0, :, None] - topk_embed[:, 1, None, :])
embedding_filter = dist <= self.embedding_thresh
# Get batch number of the detections
total_filter = class_filter & embedding_filter & corner_filter & confidence_filter
nums = torch.arange(0, batch, dtype=torch.uint8, device=total_filter.device)
batch_num = total_filter.view(batch, -1)
batch_num = nums[:, None].expand_as(batch_num)[batch_num]
# Apply filters and combine values
bboxes = bboxes[total_filter, :].view(-1, 4)
confidence = confidence[total_filter].view(-1)
class_idx = tl_classes[total_filter].view(-1)
return torch.cat([batch_num[:, None].float(), bboxes, confidence[:, None], class_idx[:, None].float()], dim=1)
| en | 0.708824 | # # Lightnet postprocessing for Anchor based detectors (Darknet) # Copyright EAVISE # Convert output from cornernet networks to bounding box tensor. .. admonition:: Experimental This post-processing implementation is still in development and might not be yielding the same results as the official implementation. Use at your own risk! Args: embedding_thresh (Number): Embedding distance threshold to filter matching corners conf_thresh (Number [0-1]): Confidence threshold to filter detections network_stride (Number): Downsampling factor of the network (most lightnet networks have a `inner_stride` attribute) topk (Number, optional): Number of corners to select from the network output; Default **100** subsample_kernel (Number, optional): Kernel size to perform maxpool subsampling; Default **0** Returns: (Tensor [Boxes x 7]]): **[batch_num, x_tl, y_tl, x_br, y_br, confidence, class_id]** for every bounding box Note: If setting the subsample_kernel to **0**, you disable the subsampling. Otherwise this post-processing will perform maxpooling on the heatmap with the specified kernel. # Split tensor # BATCH, TLBR, NUM_CLASSES+3, H, W # BATCH, TLBR, NUM_CLASSES, H, W # BATCH, TLBR, H, W # BATCH, TLBR, XY, H, W # Subsample heatmaps # Get topK corners # Add XY offsets # Combine TL and BR corners # Create corner filter # Create class filter # Create confidence filter # NOTE : This is different than the original implementation, where they keep the TOP N detections # Create embedding filter # Get batch number of the detections # Apply filters and combine values | 2.540817 | 3 |
regressionAnalysis.py | feghalya/CAMAP_Tools | 0 | 6613498 | #!/usr/bin/env python3
import argparse
import os
from camaptools.Dataset import RegressionDataset
from camaptools.Regression import RegressionMetaManager
from camaptools.EnhancedFutures import EnhancedProcessPoolExecutor, EnhancedMPIPoolExecutor
OUTPUT_FOLDER = "./output"
def add_blcl_peptide_lists(dataset):
train = []
test = []
with open('../CAMAP/peptides_dataset_162/peptides.source.train.txt', 'r') as f:
train.extend([x.strip() for x in f.readlines() if x.strip()])
with open('../CAMAP/peptides_dataset_162/peptides.nonsource.train.txt', 'r') as f:
train.extend([x.strip() for x in f.readlines() if x.strip()])
with open('../CAMAP/peptides_dataset_162/peptides.source.test.txt', 'r') as f:
test.extend([x.strip() for x in f.readlines() if x.strip()])
with open('../CAMAP/peptides_dataset_162/peptides.nonsource.test.txt', 'r') as f:
test.extend([x.strip() for x in f.readlines() if x.strip()])
with open('../CAMAP/peptides_dataset_162/peptides.source.validation.txt', 'r') as f:
test.extend([x.strip() for x in f.readlines() if x.strip()])
with open('../CAMAP/peptides_dataset_162/peptides.nonsource.validation.txt', 'r') as f:
test.extend([x.strip() for x in f.readlines() if x.strip()])
dataset.construct_datasets_options(train, test)
def main():
parser=argparse.ArgumentParser()
parser.add_argument("-g", "--genomes", help="genomes [GRCh37.75, etc.]", type=str, default="GRCh37.75,GRCm38.78")
#parser.add_argument("-d", "--dataset", help="dataset [BLCL, EL4, etc.]", type=str, default="BLCL")
parser.add_argument("-c", "--context", help="mRNA context length on each side", type=int, default=162)
parser.add_argument("-r", "--bs_or_rank", help="max binding score (>=100) or rank (<100)", type=int, default=1250)
parser.add_argument("-n", "--np", help="use rank score from naturally processed peptides", action='store_true')
parser.add_argument("-m", "--ncontexts", help="max contexts permitted to keep peptide", type=int, default=10)
parser.add_argument("-a", "--ann_method", help="ANN method to use", type=str, default='SGD')
parser.add_argument("-p", "--ann_parameters", help="ANN training parameters selection", type=str, default='e4000')
parser.add_argument("-x", "--max_replicates", help="ANN replicate runs to keep", type=int, default=None)
parser.add_argument("-w", "--workers", help="number of parallel workers in addition to main", type=int, default=0)
#parser.add_argument("-s", "--subworkers", help="number of parallel subworkers", type=int, default=0)
parser.add_argument("--mpi", help="Parallelize using MPI", action='store_true')
args=parser.parse_args().__dict__
genomes = args['genomes'].split(',')
#ds = args['dataset']
context = args['context']
max_bs_or_rank = args['bs_or_rank']
var = 'Rank_NP' if args['np'] else 'nM'
var = 'Rank' if max_bs_or_rank < 100 and var != 'Rank_NP' else var
max_contexts = args['ncontexts']
method = args['ann_method']
params = args['ann_parameters']
reps = args['max_replicates']
workers = args['workers']
#subworkers = args['subworkers']
mpi = args['mpi']
print('Genomes:', genomes)
#print('Dataset:', ds)
print('Context:', context)
print('Max BS/Rank:', max_bs_or_rank)
print('BS or Rank detected:', var)
print('Max contexts per peptide:', max_contexts)
print('ANN method:', method)
print('ANN parameters:', params)
print('Max CAMAP replicates:', reps)
print('Workers:', workers)
#print('Subworkers:', subworkers)
print('MPI:', mpi)
Executor = EnhancedMPIPoolExecutor if mpi else EnhancedProcessPoolExecutor
out_name = 'genome%s_padding%d_max%s%d_maxContexts%d_ANNMethod%s%s' % (
'genome'.join(genomes),
context,
var.replace('nM', 'BS').replace('_', ''),
max_bs_or_rank,
max_contexts,
method,
params
)
out_dir = os.path.join(OUTPUT_FOLDER, 'metrics', out_name)
genome_datasets = []
for genome in genomes:
if 'GRCh' in genome:
for ds in ['BLCL', 'B721', 'PBMCs']:
genome_datasets.append((genome, ds))
elif 'GRCm' in genome:
for ds in ['EL4', 'CT26']:
genome_datasets.append((genome, ds))
datasets = [RegressionDataset(g, d, context) for g, d in genome_datasets]
ix_blcl = None
for i, (g, d) in enumerate(genome_datasets):
if d == 'BLCL':
ix_blcl = i
if ix_blcl is not None:
add_blcl_peptide_lists(datasets[ix_blcl])
rem = RegressionMetaManager(datasets, out_dir, workers, Executor)
rem.set_load_peptides_options(
max_bs_or_rank=max_bs_or_rank,
var=var,
max_contexts=max_contexts,
step='evaluateDS',
ann_method=method,
ann_params=params,
max_replicates=reps)
rem.run()
rem.join()
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import argparse
import os
from camaptools.Dataset import RegressionDataset
from camaptools.Regression import RegressionMetaManager
from camaptools.EnhancedFutures import EnhancedProcessPoolExecutor, EnhancedMPIPoolExecutor
OUTPUT_FOLDER = "./output"
def add_blcl_peptide_lists(dataset):
train = []
test = []
with open('../CAMAP/peptides_dataset_162/peptides.source.train.txt', 'r') as f:
train.extend([x.strip() for x in f.readlines() if x.strip()])
with open('../CAMAP/peptides_dataset_162/peptides.nonsource.train.txt', 'r') as f:
train.extend([x.strip() for x in f.readlines() if x.strip()])
with open('../CAMAP/peptides_dataset_162/peptides.source.test.txt', 'r') as f:
test.extend([x.strip() for x in f.readlines() if x.strip()])
with open('../CAMAP/peptides_dataset_162/peptides.nonsource.test.txt', 'r') as f:
test.extend([x.strip() for x in f.readlines() if x.strip()])
with open('../CAMAP/peptides_dataset_162/peptides.source.validation.txt', 'r') as f:
test.extend([x.strip() for x in f.readlines() if x.strip()])
with open('../CAMAP/peptides_dataset_162/peptides.nonsource.validation.txt', 'r') as f:
test.extend([x.strip() for x in f.readlines() if x.strip()])
dataset.construct_datasets_options(train, test)
def main():
parser=argparse.ArgumentParser()
parser.add_argument("-g", "--genomes", help="genomes [GRCh37.75, etc.]", type=str, default="GRCh37.75,GRCm38.78")
#parser.add_argument("-d", "--dataset", help="dataset [BLCL, EL4, etc.]", type=str, default="BLCL")
parser.add_argument("-c", "--context", help="mRNA context length on each side", type=int, default=162)
parser.add_argument("-r", "--bs_or_rank", help="max binding score (>=100) or rank (<100)", type=int, default=1250)
parser.add_argument("-n", "--np", help="use rank score from naturally processed peptides", action='store_true')
parser.add_argument("-m", "--ncontexts", help="max contexts permitted to keep peptide", type=int, default=10)
parser.add_argument("-a", "--ann_method", help="ANN method to use", type=str, default='SGD')
parser.add_argument("-p", "--ann_parameters", help="ANN training parameters selection", type=str, default='e4000')
parser.add_argument("-x", "--max_replicates", help="ANN replicate runs to keep", type=int, default=None)
parser.add_argument("-w", "--workers", help="number of parallel workers in addition to main", type=int, default=0)
#parser.add_argument("-s", "--subworkers", help="number of parallel subworkers", type=int, default=0)
parser.add_argument("--mpi", help="Parallelize using MPI", action='store_true')
args=parser.parse_args().__dict__
genomes = args['genomes'].split(',')
#ds = args['dataset']
context = args['context']
max_bs_or_rank = args['bs_or_rank']
var = 'Rank_NP' if args['np'] else 'nM'
var = 'Rank' if max_bs_or_rank < 100 and var != 'Rank_NP' else var
max_contexts = args['ncontexts']
method = args['ann_method']
params = args['ann_parameters']
reps = args['max_replicates']
workers = args['workers']
#subworkers = args['subworkers']
mpi = args['mpi']
print('Genomes:', genomes)
#print('Dataset:', ds)
print('Context:', context)
print('Max BS/Rank:', max_bs_or_rank)
print('BS or Rank detected:', var)
print('Max contexts per peptide:', max_contexts)
print('ANN method:', method)
print('ANN parameters:', params)
print('Max CAMAP replicates:', reps)
print('Workers:', workers)
#print('Subworkers:', subworkers)
print('MPI:', mpi)
Executor = EnhancedMPIPoolExecutor if mpi else EnhancedProcessPoolExecutor
out_name = 'genome%s_padding%d_max%s%d_maxContexts%d_ANNMethod%s%s' % (
'genome'.join(genomes),
context,
var.replace('nM', 'BS').replace('_', ''),
max_bs_or_rank,
max_contexts,
method,
params
)
out_dir = os.path.join(OUTPUT_FOLDER, 'metrics', out_name)
genome_datasets = []
for genome in genomes:
if 'GRCh' in genome:
for ds in ['BLCL', 'B721', 'PBMCs']:
genome_datasets.append((genome, ds))
elif 'GRCm' in genome:
for ds in ['EL4', 'CT26']:
genome_datasets.append((genome, ds))
datasets = [RegressionDataset(g, d, context) for g, d in genome_datasets]
ix_blcl = None
for i, (g, d) in enumerate(genome_datasets):
if d == 'BLCL':
ix_blcl = i
if ix_blcl is not None:
add_blcl_peptide_lists(datasets[ix_blcl])
rem = RegressionMetaManager(datasets, out_dir, workers, Executor)
rem.set_load_peptides_options(
max_bs_or_rank=max_bs_or_rank,
var=var,
max_contexts=max_contexts,
step='evaluateDS',
ann_method=method,
ann_params=params,
max_replicates=reps)
rem.run()
rem.join()
if __name__ == '__main__':
main()
| en | 0.213214 | #!/usr/bin/env python3 #parser.add_argument("-d", "--dataset", help="dataset [BLCL, EL4, etc.]", type=str, default="BLCL") #parser.add_argument("-s", "--subworkers", help="number of parallel subworkers", type=int, default=0) #ds = args['dataset'] #subworkers = args['subworkers'] #print('Dataset:', ds) #print('Subworkers:', subworkers) | 2.206879 | 2 |
test_asm_tools.py | veyselharun/Turna | 0 | 6613499 | <gh_stars>0
""" Test the functionality of asm_tools.py
This program tests the following functions of asm_tools.py
get_function_name
get_function_start
get_function_end
"""
# This file tests asm_tools.py
import asm_tools
def main(file_name: str):
"""Main function of this test program.
This function does not return a value.
Parameters
----------
file_name : str
The name of the assembly file to be tested.
"""
content: list = None
fn_name: str = None
fn_starting_point: int = -1
fn_end_point: int = -1
with open(file_name) as f:
content = f.read()
f.closed
assembly_code = content.splitlines()
# Test get_function_name
fn_name = asm_tools.get_function_name("1019c", assembly_code)
print(f"Expected function name: calc. Function start address \"1019c\". "
f"Found function name: {fn_name}.")
# Test get_function_start
fn_starting_point = asm_tools.get_function_start(fn_name, assembly_code)
print(f"Function \"calc\". Expected starting point of function: 97. "
f"Found starting point of function: {fn_starting_point}.")
# Test get_function_end
fn_end_point = asm_tools.get_function_end(fn_name, assembly_code)
print(f"Function \"calc\". Expected end point of function: 112. "
f"Found end point of function: {fn_end_point}.")
# Test error string of get_function_name
fn_name = asm_tools.get_function_name("200000", assembly_code)
print(f"Function should not be found on address \"200000\".")
if __name__ == "__main__":
"""Entry point of the program.
This test pogram tests asm_tools with the loop_test.dump file.
"""
file_name: str = "test_data/loop_test.dump"
print(f"The name of the assembly file to be tested is: {file_name}")
main(file_name) | """ Test the functionality of asm_tools.py
This program tests the following functions of asm_tools.py
get_function_name
get_function_start
get_function_end
"""
# This file tests asm_tools.py
import asm_tools
def main(file_name: str):
"""Main function of this test program.
This function does not return a value.
Parameters
----------
file_name : str
The name of the assembly file to be tested.
"""
content: list = None
fn_name: str = None
fn_starting_point: int = -1
fn_end_point: int = -1
with open(file_name) as f:
content = f.read()
f.closed
assembly_code = content.splitlines()
# Test get_function_name
fn_name = asm_tools.get_function_name("1019c", assembly_code)
print(f"Expected function name: calc. Function start address \"1019c\". "
f"Found function name: {fn_name}.")
# Test get_function_start
fn_starting_point = asm_tools.get_function_start(fn_name, assembly_code)
print(f"Function \"calc\". Expected starting point of function: 97. "
f"Found starting point of function: {fn_starting_point}.")
# Test get_function_end
fn_end_point = asm_tools.get_function_end(fn_name, assembly_code)
print(f"Function \"calc\". Expected end point of function: 112. "
f"Found end point of function: {fn_end_point}.")
# Test error string of get_function_name
fn_name = asm_tools.get_function_name("200000", assembly_code)
print(f"Function should not be found on address \"200000\".")
if __name__ == "__main__":
"""Entry point of the program.
This test pogram tests asm_tools with the loop_test.dump file.
"""
file_name: str = "test_data/loop_test.dump"
print(f"The name of the assembly file to be tested is: {file_name}")
main(file_name) | en | 0.510032 | Test the functionality of asm_tools.py This program tests the following functions of asm_tools.py get_function_name get_function_start get_function_end # This file tests asm_tools.py Main function of this test program. This function does not return a value. Parameters ---------- file_name : str The name of the assembly file to be tested. # Test get_function_name # Test get_function_start # Test get_function_end # Test error string of get_function_name Entry point of the program. This test pogram tests asm_tools with the loop_test.dump file. | 3.236129 | 3 |
boml/load_data/em_utils.py | LongMa319/BOML | 2 | 6613500 | """
Contains some misc utility functions
"""
import collections
import multiprocessing
from collections import OrderedDict, Callable
from functools import reduce
import numpy as np
def as_list(obj):
"""
Makes sure `obj` is a list or otherwise converts it to a list with a single element.
:param obj:
:return: A `list`
"""
return obj if isinstance(obj, list) else [obj]
def maybe_call(obj, *args, **kwargs):
"""
Calls obj with args and kwargs and return its result if obj is callable, otherwise returns obj.
"""
if callable(obj):
return obj(*args, **kwargs)
return obj
def as_tuple_or_list(obj):
"""
Make sure that `obj` is a tuple or a list and eventually converts it into a list with a single element
:param obj:
:return: A `tuple` or a `list`
"""
return obj if isinstance(obj, (list, tuple)) else [obj]
def maybe_get(obj, i):
return obj[i] if hasattr(obj, "__getitem__") else obj
def merge_dicts(*dicts):
return reduce(lambda a, nd: {**a, **nd}, dicts, {})
def flatten_list(lst):
from itertools import chain
return list(chain(*lst))
def filter_vars(var_name, scope):
import tensorflow as tf
return [
v
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope.name if hasattr(scope, "name") else scope,
)
if v.name.endswith("%s:0" % var_name)
]
def name_from_vars(var_dict, *vars_):
"""
Unfortunately this method doesn't return a very specific name....It gets a little messy
:param var_dict:
:param vars_:
:return:
"""
new_k_v = {}
for v in vars_:
for k, vv in var_dict.items():
if v == vv:
new_k_v[k] = v
return name_from_dict(new_k_v)
def name_from_dict(_dict, *exclude_names):
string_dict = {str(k): str(v) for k, v in _dict.items() if k not in exclude_names}
return _tf_string_replace("_".join(flatten_list(list(sorted(string_dict.items())))))
def _tf_string_replace(_str):
"""
Replace chars that are not accepted by tensorflow namings (eg. variable_scope)
:param _str:
:return:
"""
return (
_str.replace("[", "p")
.replace("]", "q")
.replace(",", "c")
.replace("(", "p")
.replace(")", "q")
.replace(" ", "")
)
def namedtuple_with_defaults(typename, field_names, default_values=()):
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None,) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
def get_rand_state(rand):
"""
Utility methods for getting a `RandomState` object.
:param rand: rand can be None (new State will be generated),
np.random.RandomState (it will be returned) or an integer (will be treated as seed).
:return: a `RandomState` object
"""
if isinstance(rand, np.random.RandomState):
return rand
elif isinstance(rand, (int, np.ndarray, list)) or rand is None:
return np.random.RandomState(rand)
else:
raise ValueError("parameter rand {} has wrong type".format(rand))
def GPU_CONFIG():
import tensorflow as tf
CONFIG_GPU_GROWTH = tf.ConfigProto(allow_soft_placement=True)
CONFIG_GPU_GROWTH.gpu_options.allow_growth = True
return CONFIG_GPU_GROWTH
# SOME SCORING UTILS FUNCTIONS
half_int = lambda _m: 1.96 * np.std(_m) / np.sqrt(len(_m) - 1)
def mean_std_ci(measures, mul=1.0, tex=False):
"""
Computes mean, standard deviation and 95% half-confidence interval for a list of measures.
:param measures: list
:param mul: optional multiplication coefficient (e.g. for percentage)
:param tex: if True returns mean +- half_conf_interval for latex
:return: a list or a string in latex
"""
measures = np.array(measures) * mul
ms = np.mean(measures), np.std(measures), half_int(measures)
return ms if not tex else r"${:.2f} \pm {:.2f}$".format(ms[0], ms[2])
def leaky_relu(x, alpha, name=None):
"""
Implements leaky relu with negative coefficient `alpha`
"""
import tensorflow as tf
with tf.name_scope(name, "leaky_relu_{}".format(alpha)):
return tf.nn.relu(x) - alpha * tf.nn.relu(-x)
def execute(target, *args, **kwargs):
pr = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
pr.start()
return pr
def get_global_step(name="GlobalStep", init=0):
import tensorflow as tf
return tf.get_variable(
name,
initializer=init,
trainable=False,
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES],
)
class DefaultOrderedDict(OrderedDict):
# Source: http://stackoverflow.com/a/6190500/562769
def __init__(self, default_factory=None, *a, **kw):
if default_factory is not None and not isinstance(default_factory, Callable):
raise TypeError("first argument must be callable")
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = (self.default_factory,)
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory, copy.deepcopy(self.items()))
def __repr__(self):
return "OrderedDefaultDict(%s, %s)" % (
self.default_factory,
OrderedDict.__repr__(self),
)
| """
Contains some misc utility functions
"""
import collections
import multiprocessing
from collections import OrderedDict, Callable
from functools import reduce
import numpy as np
def as_list(obj):
"""
Makes sure `obj` is a list or otherwise converts it to a list with a single element.
:param obj:
:return: A `list`
"""
return obj if isinstance(obj, list) else [obj]
def maybe_call(obj, *args, **kwargs):
"""
Calls obj with args and kwargs and return its result if obj is callable, otherwise returns obj.
"""
if callable(obj):
return obj(*args, **kwargs)
return obj
def as_tuple_or_list(obj):
"""
Make sure that `obj` is a tuple or a list and eventually converts it into a list with a single element
:param obj:
:return: A `tuple` or a `list`
"""
return obj if isinstance(obj, (list, tuple)) else [obj]
def maybe_get(obj, i):
return obj[i] if hasattr(obj, "__getitem__") else obj
def merge_dicts(*dicts):
return reduce(lambda a, nd: {**a, **nd}, dicts, {})
def flatten_list(lst):
from itertools import chain
return list(chain(*lst))
def filter_vars(var_name, scope):
import tensorflow as tf
return [
v
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope.name if hasattr(scope, "name") else scope,
)
if v.name.endswith("%s:0" % var_name)
]
def name_from_vars(var_dict, *vars_):
"""
Unfortunately this method doesn't return a very specific name....It gets a little messy
:param var_dict:
:param vars_:
:return:
"""
new_k_v = {}
for v in vars_:
for k, vv in var_dict.items():
if v == vv:
new_k_v[k] = v
return name_from_dict(new_k_v)
def name_from_dict(_dict, *exclude_names):
string_dict = {str(k): str(v) for k, v in _dict.items() if k not in exclude_names}
return _tf_string_replace("_".join(flatten_list(list(sorted(string_dict.items())))))
def _tf_string_replace(_str):
"""
Replace chars that are not accepted by tensorflow namings (eg. variable_scope)
:param _str:
:return:
"""
return (
_str.replace("[", "p")
.replace("]", "q")
.replace(",", "c")
.replace("(", "p")
.replace(")", "q")
.replace(" ", "")
)
def namedtuple_with_defaults(typename, field_names, default_values=()):
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None,) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
def get_rand_state(rand):
"""
Utility methods for getting a `RandomState` object.
:param rand: rand can be None (new State will be generated),
np.random.RandomState (it will be returned) or an integer (will be treated as seed).
:return: a `RandomState` object
"""
if isinstance(rand, np.random.RandomState):
return rand
elif isinstance(rand, (int, np.ndarray, list)) or rand is None:
return np.random.RandomState(rand)
else:
raise ValueError("parameter rand {} has wrong type".format(rand))
def GPU_CONFIG():
import tensorflow as tf
CONFIG_GPU_GROWTH = tf.ConfigProto(allow_soft_placement=True)
CONFIG_GPU_GROWTH.gpu_options.allow_growth = True
return CONFIG_GPU_GROWTH
# SOME SCORING UTILS FUNCTIONS
half_int = lambda _m: 1.96 * np.std(_m) / np.sqrt(len(_m) - 1)
def mean_std_ci(measures, mul=1.0, tex=False):
"""
Computes mean, standard deviation and 95% half-confidence interval for a list of measures.
:param measures: list
:param mul: optional multiplication coefficient (e.g. for percentage)
:param tex: if True returns mean +- half_conf_interval for latex
:return: a list or a string in latex
"""
measures = np.array(measures) * mul
ms = np.mean(measures), np.std(measures), half_int(measures)
return ms if not tex else r"${:.2f} \pm {:.2f}$".format(ms[0], ms[2])
def leaky_relu(x, alpha, name=None):
"""
Implements leaky relu with negative coefficient `alpha`
"""
import tensorflow as tf
with tf.name_scope(name, "leaky_relu_{}".format(alpha)):
return tf.nn.relu(x) - alpha * tf.nn.relu(-x)
def execute(target, *args, **kwargs):
pr = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
pr.start()
return pr
def get_global_step(name="GlobalStep", init=0):
import tensorflow as tf
return tf.get_variable(
name,
initializer=init,
trainable=False,
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES],
)
class DefaultOrderedDict(OrderedDict):
# Source: http://stackoverflow.com/a/6190500/562769
def __init__(self, default_factory=None, *a, **kw):
if default_factory is not None and not isinstance(default_factory, Callable):
raise TypeError("first argument must be callable")
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = (self.default_factory,)
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory, copy.deepcopy(self.items()))
def __repr__(self):
return "OrderedDefaultDict(%s, %s)" % (
self.default_factory,
OrderedDict.__repr__(self),
)
| en | 0.726206 | Contains some misc utility functions Makes sure `obj` is a list or otherwise converts it to a list with a single element. :param obj: :return: A `list` Calls obj with args and kwargs and return its result if obj is callable, otherwise returns obj. Make sure that `obj` is a tuple or a list and eventually converts it into a list with a single element :param obj: :return: A `tuple` or a `list` Unfortunately this method doesn't return a very specific name....It gets a little messy :param var_dict: :param vars_: :return: Replace chars that are not accepted by tensorflow namings (eg. variable_scope) :param _str: :return: Utility methods for getting a `RandomState` object. :param rand: rand can be None (new State will be generated), np.random.RandomState (it will be returned) or an integer (will be treated as seed). :return: a `RandomState` object # SOME SCORING UTILS FUNCTIONS Computes mean, standard deviation and 95% half-confidence interval for a list of measures. :param measures: list :param mul: optional multiplication coefficient (e.g. for percentage) :param tex: if True returns mean +- half_conf_interval for latex :return: a list or a string in latex Implements leaky relu with negative coefficient `alpha` # Source: http://stackoverflow.com/a/6190500/562769 | 2.778823 | 3 |
tools/data_spreadsheet.py | despinoza1/DTA-Predictor | 0 | 6613501 | <reponame>despinoza1/DTA-Predictor<filename>tools/data_spreadsheet.py<gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import xlsxwriter
import math
def get_meta(csv_file='data/DTC_data.csv', save_xlsx=True):
workbook = None
worksheet = None
if save_xlsx:
workbook = xlsxwriter.Workbook('metadata.xlsx', {'nan_inf_to_errors': True})
worksheet = workbook.add_worksheet()
dataset = pd.read_csv(csv_file)
info = (
[], #compound ids
{}, #target ids
{}, #number of targets
{} #drug kinase Kd
)
shape = dataset.shape
for i in range(shape[0]):
c_id = dataset['compound_id'][i]
value, unit = dataset['standard_value'][i], dataset['standard_units'][i]
t_id = dataset['target_id'][i]
if unit != 'NM' or value == float('nan'):
continue
if type(c_id) is float or type(t_id) is float:
continue
#Add compound+target Kd
if c_id in info[3]:
info[3][c_id].append(value)
else:
info[3][c_id] = []
info[3][c_id].append(value)
#Add compound id
if c_id not in info[0]:
info[0].append(c_id)
#Add target id
if c_id in info[1]:
info[1][c_id].append(t_id)
else:
info[1][c_id] = []
info[1][c_id].append(t_id)
#Increase number of targets
if c_id in info[2]:
info[2][c_id] += 1
else:
info[2][c_id] = 1
row, col = 1, 0
if save_xlsx:
worksheet.write(0, 0, "Compound ID")
worksheet.write(0, 1, "# of Targets")
worksheet.write(0, 2, "Target ID")
worksheet.write(0, 3, "Kd")
total = 0
for i in range(len(info[0])):
worksheet.write(row, col, info[0][i])
worksheet.write(row, col+1, info[2][info[0][i]])
col = 2
for j in range(len(info[1][info[0][i]])):
try:
worksheet.write(row, col, info[1][info[0][i]][j])
worksheet.write(row, col+1, info[3][info[0][i]][j])
except IndexError:
print(j, info[1][info[0][i]], info[3][info[0][i]], sep=' ')
col += 2
total += info[2][info[0][i]]
row += 1
col = 0
print("Total number of compounds: ", len(info[0]))
print("Total number of targets: ", total)
workbook.close()
else:
with open('binding_KD.csv', 'w') as f:
for i in range(len(info[0])):
c_id = info[0][i]
entry = ''
#entry = '{}'.format(c_id)
for j in range(info[2][c_id]):
entry = entry + '{},"{}",{}\n'.format(c_id, info[1][c_id][j], info[3][c_id][j])
f.write(entry)
def split_data(Type='Kd', csv_file='Data/DTC_data.csv'):
dataset = pd.read_csv(csv_file)
dataset.drop(dataset[dataset.standard_type != 'Kd'].index, inplace=True)
#shape = dataset.shape
#for i in range(shape[0]):
# if dataset['standard_type'][i].upper() != Type:
# dataset = dataset.drop(i, axis=0)
# i -= 1
dataset.to_csv("data/{}_DTC_data.csv".format(Type), index=False, encoding='utf-8')
def main():
get_meta('misc/KD_DTC_data.csv', False)
#split_data()
if __name__ == '__main__':
main()
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import xlsxwriter
import math
def get_meta(csv_file='data/DTC_data.csv', save_xlsx=True):
workbook = None
worksheet = None
if save_xlsx:
workbook = xlsxwriter.Workbook('metadata.xlsx', {'nan_inf_to_errors': True})
worksheet = workbook.add_worksheet()
dataset = pd.read_csv(csv_file)
info = (
[], #compound ids
{}, #target ids
{}, #number of targets
{} #drug kinase Kd
)
shape = dataset.shape
for i in range(shape[0]):
c_id = dataset['compound_id'][i]
value, unit = dataset['standard_value'][i], dataset['standard_units'][i]
t_id = dataset['target_id'][i]
if unit != 'NM' or value == float('nan'):
continue
if type(c_id) is float or type(t_id) is float:
continue
#Add compound+target Kd
if c_id in info[3]:
info[3][c_id].append(value)
else:
info[3][c_id] = []
info[3][c_id].append(value)
#Add compound id
if c_id not in info[0]:
info[0].append(c_id)
#Add target id
if c_id in info[1]:
info[1][c_id].append(t_id)
else:
info[1][c_id] = []
info[1][c_id].append(t_id)
#Increase number of targets
if c_id in info[2]:
info[2][c_id] += 1
else:
info[2][c_id] = 1
row, col = 1, 0
if save_xlsx:
worksheet.write(0, 0, "Compound ID")
worksheet.write(0, 1, "# of Targets")
worksheet.write(0, 2, "Target ID")
worksheet.write(0, 3, "Kd")
total = 0
for i in range(len(info[0])):
worksheet.write(row, col, info[0][i])
worksheet.write(row, col+1, info[2][info[0][i]])
col = 2
for j in range(len(info[1][info[0][i]])):
try:
worksheet.write(row, col, info[1][info[0][i]][j])
worksheet.write(row, col+1, info[3][info[0][i]][j])
except IndexError:
print(j, info[1][info[0][i]], info[3][info[0][i]], sep=' ')
col += 2
total += info[2][info[0][i]]
row += 1
col = 0
print("Total number of compounds: ", len(info[0]))
print("Total number of targets: ", total)
workbook.close()
else:
with open('binding_KD.csv', 'w') as f:
for i in range(len(info[0])):
c_id = info[0][i]
entry = ''
#entry = '{}'.format(c_id)
for j in range(info[2][c_id]):
entry = entry + '{},"{}",{}\n'.format(c_id, info[1][c_id][j], info[3][c_id][j])
f.write(entry)
def split_data(Type='Kd', csv_file='Data/DTC_data.csv'):
dataset = pd.read_csv(csv_file)
dataset.drop(dataset[dataset.standard_type != 'Kd'].index, inplace=True)
#shape = dataset.shape
#for i in range(shape[0]):
# if dataset['standard_type'][i].upper() != Type:
# dataset = dataset.drop(i, axis=0)
# i -= 1
dataset.to_csv("data/{}_DTC_data.csv".format(Type), index=False, encoding='utf-8')
def main():
get_meta('misc/KD_DTC_data.csv', False)
#split_data()
if __name__ == '__main__':
main() | en | 0.481443 | #compound ids #target ids #number of targets #drug kinase Kd #Add compound+target Kd #Add compound id #Add target id #Increase number of targets #entry = '{}'.format(c_id) #shape = dataset.shape #for i in range(shape[0]): # if dataset['standard_type'][i].upper() != Type: # dataset = dataset.drop(i, axis=0) # i -= 1 #split_data() | 2.362033 | 2 |
tests/models/test_base.py | cryptk/hourglass | 3 | 6613502 | import pytest
import datetime as dt
from opsy.auth.models import User
from opsy.inventory.models import Host, Group
###############################################################################
# OpsyQuery
###############################################################################
def test_opsy_query(test_user, test_users, test_inventory_bootstrap):
"""Test opsy_query to make sure it works correctly."""
# Test get_or_fail
assert User.query.get_or_fail(test_user.id) == test_user
with pytest.raises(ValueError):
User.query.get_or_fail('thisisnotarealuser')
# Test first_or_fail
assert User.query.filter_by(id=test_user.id).first_or_fail() == test_user
with pytest.raises(ValueError):
User.query.filter_by(id='thisisnotarealuser').first_or_fail()
# Setup for filter_in tests
westprom = Host.query.filter_by(name='westprom').first()
westconsul = Host.query.filter_by(name='westconsul').first()
centralprom = Host.query.filter_by(name='centralprom').first()
centralconsul = Host.query.filter_by(name='centralconsul').first()
eastprom = Host.query.filter_by(name='eastprom').first()
eastconsul = Host.query.filter_by(name='eastconsul').first()
# Test empty query
assert len(Host.query.filter_in().all()) == 6
# Test if we don't give it a string it bypasses the string operations
eastconsul.update(updated_at=dt.datetime.utcnow())
assert Host.query.filter_in(
updated_at=eastconsul.updated_at).first() == eastconsul
# Test filter_in include
assert Host.query.filter_in(name='westprom').first() == westprom
for node in Host.query.filter_in(name='westprom,eastprom').all():
assert node in [westprom, eastprom]
# Test filter_in exclude
assert len(Host.query.filter_in(name='!westprom').all()) == 5
for node in Host.query.filter_in(name='!westprom').all():
assert isinstance(node, Host)
assert node != westprom
assert len(Host.query.filter_in(name='!westprom,!eastprom').all()) == 4
for node in Host.query.filter_in(name='!westprom,!eastprom').all():
assert isinstance(node, Host)
assert node not in [westprom, eastprom]
# Test filter_in like
assert len(Host.query.filter_in(name='*prom').all()) == 3
for node in Host.query.filter_in(name='*prom').all():
assert isinstance(node, Host)
assert node in [westprom, centralprom, eastprom]
# Test filter_in not like
assert len(Host.query.filter_in(name='!*prom').all()) == 3
for node in Host.query.filter_in(name='!*prom').all():
assert isinstance(node, Host)
assert node not in [westprom, centralprom, eastprom]
# Now all of these over again but with relationship filters!
# We do these with both groups___name and zone___name since they're
# different types of relationships (many-to-many and one-to-many).
# Test filter_in include
assert len(Host.query.filter_in(zone___name='east,west').all()) == 4
for node in Host.query.filter_in(zone___name='east,west').all():
assert isinstance(node, Host)
assert node in [westprom, westconsul, eastprom, eastconsul]
assert len(Host.query.filter_in(groups___name='prom_nodes').all()) == 3
for node in Host.query.filter_in(groups___name='prom_nodes').all():
assert isinstance(node, Host)
assert node in [westprom, centralprom, eastprom]
# Test filter_in exclude
assert len(Host.query.filter_in(zone___name='!west,!east').all()) == 2
for node in Host.query.filter_in(zone___name='!west,!east').all():
assert isinstance(node, Host)
assert node in [centralprom, centralconsul]
assert len(Host.query.filter_in(groups___name='!prom_nodes').all()) == 3
for node in Host.query.filter_in(groups___name='!prom_nodes').all():
assert isinstance(node, Host)
assert node in [westconsul, centralconsul, eastconsul]
# Test filter_in like
assert len(Host.query.filter_in(zone___name='*st').all()) == 4
for node in Host.query.filter_in(zone___name='*st').all():
assert isinstance(node, Host)
assert node in [westprom, westconsul, eastprom, eastconsul]
assert len(Host.query.filter_in(groups___name='prom*').all()) == 3
for node in Host.query.filter_in(groups___name='prom*').all():
assert isinstance(node, Host)
assert node in [westprom, centralprom, eastprom]
# Test filter_in not like
assert len(Host.query.filter_in(zone___name='!*st').all()) == 2
for node in Host.query.filter_in(zone___name='!*st').all():
assert isinstance(node, Host)
assert node in [centralprom, centralconsul]
assert len(Host.query.filter_in(groups___name='!prom*').all()) == 3
for node in Host.query.filter_in(groups___name='!prom*').all():
assert isinstance(node, Host)
assert node not in [westprom, centralprom, eastprom]
###############################################################################
# BaseModel
###############################################################################
def test_base_model(test_group, test_groups):
"""Test BaseModel to make sure it works correctly."""
# Test __repr__ (have to use super here since Group overrides this)
assert super(Group, test_group).__repr__() == \
f'<{test_group.__class__.__name__} {test_group.id}>'
# Test create
new_group = Group.create(name='new_group')
assert new_group.id is not None
assert Group.query.filter_by(id=new_group.id).first() == new_group
# Test update
new_group.update(name='my_new_group')
assert new_group.name == 'my_new_group'
# Test Delete
new_group_id = new_group.id
new_group.delete()
assert Group.query.filter_by(id=new_group_id).first() is None
# Test get_by_id
assert Group.get_by_id(test_group.id) == test_group
with pytest.raises(ValueError):
Group.get_by_id('thisgroupdoesntexist')
# Test updated_by_id
Group.update_by_id(test_group.id, name='my_test_group')
assert test_group.name == 'my_test_group'
with pytest.raises(ValueError):
Group.update_by_id('thisgroupdoesntexist', name='Cool bois')
# Test delete_by_id
test_group_id = test_group.id
Group.delete_by_id(test_group.id)
assert Group.query.filter_by(id=test_group_id).first() is None
with pytest.raises(ValueError):
Group.delete_by_id('thisgroupdoesntexist')
###############################################################################
# NamedModel
###############################################################################
def test_named_model(test_user, test_users):
"""Test NamedModel to make sure it works correctly."""
# Test __repr__
assert test_user.__repr__() == \
f'<{test_user.__class__.__name__} {test_user.name}>'
# Test create
new_user = User.create('new_user')
assert new_user.id is not None
assert User.query.filter_by(id=new_user.id).first() == new_user
# Test get_by_id_or_name
assert User.get_by_id_or_name(new_user.id) == new_user
assert User.get_by_id_or_name(new_user.name) == new_user
with pytest.raises(ValueError):
User.get_by_id_or_name('thisuserdoesntexist')
# Test update_by_id_or_name
User.update_by_id_or_name(new_user.name, full_name='<NAME>')
assert new_user.full_name == '<NAME>'
User.update_by_id_or_name(new_user.id, full_name='<NAME>')
assert new_user.full_name == '<NAME>'
with pytest.raises(ValueError):
User.update_by_id_or_name('thisuserdoesntexist', full_name='<NAME>')
# Test delete_by_id_or_name
new_user_id = new_user.id
test_user_id = test_user.id
User.delete_by_id_or_name(new_user.id)
assert User.query.filter_by(id=new_user_id).first() is None
User.delete_by_id_or_name(test_user.name)
assert User.query.filter_by(id=test_user_id).first() is None
with pytest.raises(ValueError):
User.delete_by_id_or_name('thisuserdoesntexist')
| import pytest
import datetime as dt
from opsy.auth.models import User
from opsy.inventory.models import Host, Group
###############################################################################
# OpsyQuery
###############################################################################
def test_opsy_query(test_user, test_users, test_inventory_bootstrap):
"""Test opsy_query to make sure it works correctly."""
# Test get_or_fail
assert User.query.get_or_fail(test_user.id) == test_user
with pytest.raises(ValueError):
User.query.get_or_fail('thisisnotarealuser')
# Test first_or_fail
assert User.query.filter_by(id=test_user.id).first_or_fail() == test_user
with pytest.raises(ValueError):
User.query.filter_by(id='thisisnotarealuser').first_or_fail()
# Setup for filter_in tests
westprom = Host.query.filter_by(name='westprom').first()
westconsul = Host.query.filter_by(name='westconsul').first()
centralprom = Host.query.filter_by(name='centralprom').first()
centralconsul = Host.query.filter_by(name='centralconsul').first()
eastprom = Host.query.filter_by(name='eastprom').first()
eastconsul = Host.query.filter_by(name='eastconsul').first()
# Test empty query
assert len(Host.query.filter_in().all()) == 6
# Test if we don't give it a string it bypasses the string operations
eastconsul.update(updated_at=dt.datetime.utcnow())
assert Host.query.filter_in(
updated_at=eastconsul.updated_at).first() == eastconsul
# Test filter_in include
assert Host.query.filter_in(name='westprom').first() == westprom
for node in Host.query.filter_in(name='westprom,eastprom').all():
assert node in [westprom, eastprom]
# Test filter_in exclude
assert len(Host.query.filter_in(name='!westprom').all()) == 5
for node in Host.query.filter_in(name='!westprom').all():
assert isinstance(node, Host)
assert node != westprom
assert len(Host.query.filter_in(name='!westprom,!eastprom').all()) == 4
for node in Host.query.filter_in(name='!westprom,!eastprom').all():
assert isinstance(node, Host)
assert node not in [westprom, eastprom]
# Test filter_in like
assert len(Host.query.filter_in(name='*prom').all()) == 3
for node in Host.query.filter_in(name='*prom').all():
assert isinstance(node, Host)
assert node in [westprom, centralprom, eastprom]
# Test filter_in not like
assert len(Host.query.filter_in(name='!*prom').all()) == 3
for node in Host.query.filter_in(name='!*prom').all():
assert isinstance(node, Host)
assert node not in [westprom, centralprom, eastprom]
# Now all of these over again but with relationship filters!
# We do these with both groups___name and zone___name since they're
# different types of relationships (many-to-many and one-to-many).
# Test filter_in include
assert len(Host.query.filter_in(zone___name='east,west').all()) == 4
for node in Host.query.filter_in(zone___name='east,west').all():
assert isinstance(node, Host)
assert node in [westprom, westconsul, eastprom, eastconsul]
assert len(Host.query.filter_in(groups___name='prom_nodes').all()) == 3
for node in Host.query.filter_in(groups___name='prom_nodes').all():
assert isinstance(node, Host)
assert node in [westprom, centralprom, eastprom]
# Test filter_in exclude
assert len(Host.query.filter_in(zone___name='!west,!east').all()) == 2
for node in Host.query.filter_in(zone___name='!west,!east').all():
assert isinstance(node, Host)
assert node in [centralprom, centralconsul]
assert len(Host.query.filter_in(groups___name='!prom_nodes').all()) == 3
for node in Host.query.filter_in(groups___name='!prom_nodes').all():
assert isinstance(node, Host)
assert node in [westconsul, centralconsul, eastconsul]
# Test filter_in like
assert len(Host.query.filter_in(zone___name='*st').all()) == 4
for node in Host.query.filter_in(zone___name='*st').all():
assert isinstance(node, Host)
assert node in [westprom, westconsul, eastprom, eastconsul]
assert len(Host.query.filter_in(groups___name='prom*').all()) == 3
for node in Host.query.filter_in(groups___name='prom*').all():
assert isinstance(node, Host)
assert node in [westprom, centralprom, eastprom]
# Test filter_in not like
assert len(Host.query.filter_in(zone___name='!*st').all()) == 2
for node in Host.query.filter_in(zone___name='!*st').all():
assert isinstance(node, Host)
assert node in [centralprom, centralconsul]
assert len(Host.query.filter_in(groups___name='!prom*').all()) == 3
for node in Host.query.filter_in(groups___name='!prom*').all():
assert isinstance(node, Host)
assert node not in [westprom, centralprom, eastprom]
###############################################################################
# BaseModel
###############################################################################
def test_base_model(test_group, test_groups):
"""Test BaseModel to make sure it works correctly."""
# Test __repr__ (have to use super here since Group overrides this)
assert super(Group, test_group).__repr__() == \
f'<{test_group.__class__.__name__} {test_group.id}>'
# Test create
new_group = Group.create(name='new_group')
assert new_group.id is not None
assert Group.query.filter_by(id=new_group.id).first() == new_group
# Test update
new_group.update(name='my_new_group')
assert new_group.name == 'my_new_group'
# Test Delete
new_group_id = new_group.id
new_group.delete()
assert Group.query.filter_by(id=new_group_id).first() is None
# Test get_by_id
assert Group.get_by_id(test_group.id) == test_group
with pytest.raises(ValueError):
Group.get_by_id('thisgroupdoesntexist')
# Test updated_by_id
Group.update_by_id(test_group.id, name='my_test_group')
assert test_group.name == 'my_test_group'
with pytest.raises(ValueError):
Group.update_by_id('thisgroupdoesntexist', name='Cool bois')
# Test delete_by_id
test_group_id = test_group.id
Group.delete_by_id(test_group.id)
assert Group.query.filter_by(id=test_group_id).first() is None
with pytest.raises(ValueError):
Group.delete_by_id('thisgroupdoesntexist')
###############################################################################
# NamedModel
###############################################################################
def test_named_model(test_user, test_users):
"""Test NamedModel to make sure it works correctly."""
# Test __repr__
assert test_user.__repr__() == \
f'<{test_user.__class__.__name__} {test_user.name}>'
# Test create
new_user = User.create('new_user')
assert new_user.id is not None
assert User.query.filter_by(id=new_user.id).first() == new_user
# Test get_by_id_or_name
assert User.get_by_id_or_name(new_user.id) == new_user
assert User.get_by_id_or_name(new_user.name) == new_user
with pytest.raises(ValueError):
User.get_by_id_or_name('thisuserdoesntexist')
# Test update_by_id_or_name
User.update_by_id_or_name(new_user.name, full_name='<NAME>')
assert new_user.full_name == '<NAME>'
User.update_by_id_or_name(new_user.id, full_name='<NAME>')
assert new_user.full_name == '<NAME>'
with pytest.raises(ValueError):
User.update_by_id_or_name('thisuserdoesntexist', full_name='<NAME>')
# Test delete_by_id_or_name
new_user_id = new_user.id
test_user_id = test_user.id
User.delete_by_id_or_name(new_user.id)
assert User.query.filter_by(id=new_user_id).first() is None
User.delete_by_id_or_name(test_user.name)
assert User.query.filter_by(id=test_user_id).first() is None
with pytest.raises(ValueError):
User.delete_by_id_or_name('thisuserdoesntexist')
| en | 0.345961 | ############################################################################### # OpsyQuery ############################################################################### Test opsy_query to make sure it works correctly. # Test get_or_fail # Test first_or_fail # Setup for filter_in tests # Test empty query # Test if we don't give it a string it bypasses the string operations # Test filter_in include # Test filter_in exclude # Test filter_in like # Test filter_in not like # Now all of these over again but with relationship filters! # We do these with both groups___name and zone___name since they're # different types of relationships (many-to-many and one-to-many). # Test filter_in include # Test filter_in exclude # Test filter_in like # Test filter_in not like ############################################################################### # BaseModel ############################################################################### Test BaseModel to make sure it works correctly. # Test __repr__ (have to use super here since Group overrides this) # Test create # Test update # Test Delete # Test get_by_id # Test updated_by_id # Test delete_by_id ############################################################################### # NamedModel ############################################################################### Test NamedModel to make sure it works correctly. # Test __repr__ # Test create # Test get_by_id_or_name # Test update_by_id_or_name # Test delete_by_id_or_name | 2.274361 | 2 |
cgi-bin/buildMenu.py | sugar-activities/4430-activity | 0 | 6613503 | <reponame>sugar-activities/4430-activity
#!/usr/bin/python
#build index.html for a specified level
#
def buildMenu(levelName,added_links=[],added_scripts=[]):
#return a string 'index.html' which is generic for the corresponding level
#the calling script can add additional optional scripts as required
#linkList is a list of lists, one list per level
#ScriptList is a list of lists, one list per level
#the path to the scripts and css files in karma need to be adjusted based on the level
#path to scripts not in karma are in the same folder as the index.html file
log=open('/tmp/logMenu','w')
print >> log,'buildMenu',levelName
print >> log,'addl',len(added_links),added_links
print >> log,'adds',len(added_scripts),added_scripts
LEVELS = {'subject':0, 'course':1, 'unit':2, 'lesson':3, 'activity':4}
PREFIXES = ['./','../','../../','../../../','../../../../']
HEAD = '<!DOCTYPE html>\n<html>\n<head>\n<meta charset="UTF-8"/><meta http-equiv="Pragma" content="no-cache"/>'
FOOT = '</head>\n<body>\n<div id="header"></div>\n'
FOOT = FOOT + '<div id = "content"></div>\n'
FOOT = FOOT + '<div id = "footer"></div>\n</body>\n</html>\n'
LINKS = {}
LINKS['menu'] = 'karma/css/menu.css'
LINKS['global'] = 'karma/css/global.css'
LINKS['jquizme'] = 'karma/css/jquizme.css'
LINKS['lesson'] = 'lesson.css'
LINKS['crossword'] = 'crossword.css'
LINKS['hangman'] = 'hangman.css'
LINKS['identification'] = 'identification.css'
LINKS['mad-libs'] = 'mad-libs.css'
LINKS['matching'] = 'matching.css'
LINKS['matching-pairs'] = 'matching-pairs.css'
LINKS['multiple-choice-sentences'] = 'multiple-choice-sentences.css'
LINKS['vocabulary-with-game'] = 'vocabulary-with-game.css'
LINKS['what-is-this'] = 'what-is-this.css'
LINKS['anagrams'] = 'anagrams'
priorityLinks = ['menu','global','jquizme','crossword',
'hangman','identification','mad-libs','matching',
'matching-pairs','multiple-choice-sentences',
'vocabulary-with-game','what-is-this','anagrams','lesson']
SCRIPTS = {}
SCRIPTS['jquery'] = 'karma/js/external/jquery-1.4.2.js'
SCRIPTS['ui'] = 'karma/js/external/jquery-ui-1.8.2.js'
SCRIPTS['flash'] = 'karma/js/flash.js'
SCRIPTS['karma'] = "karma/js/karma.js"
SCRIPTS['common'] = "karma/js/common.js"
SCRIPTS['clickable'] = "karma/js/jquery.clickable.js"
SCRIPTS['i18n'] = "karma/js/jquery.i18n.js"
SCRIPTS['jquizme'] = "karma/js/jquizme.js"
SCRIPTS['math'] = "karma/js/math.js"
SCRIPTS['global'] = "karma/js/global.js"
SCRIPTS['templates']="karma/templates/templates.js"
SCRIPTS['edit'] = "karma/js/edit.js"
SCRIPTS['subjects'] = "-0subjects.js"
SCRIPTS['main'] = "karma/js/main.js"
SCRIPTS['course'] = "karma/js/course.js"
SCRIPTS['unit'] = "karma/js/unit.js"
SCRIPTS['clock'] = 'karma/js/clock.js'
SCRIPTS['lessons'] = "karma/js/lesson.js"
SCRIPTS['lesson'] = "lesson.js"
SCRIPTS['base'] = 'karma/js/base.js'
SCRIPTS['milestones'] = "milestones.js"
SCRIPTS['activities'] = "activities.js"
SCRIPTS['lesson-karma'] = "lesson-karma.js"
SCRIPTS['courses'] = "-1courses.js"
SCRIPTS['course'] = "karma/js/course.js"
SCRIPTS['quiz']="quiz.js"
SCRIPTS['khan']="khan/khan-exercise.js"
SCRIPTS['ui.scoreboard'] = 'karma/js/ui.scoreboard.js'
SCRIPTS['crossword']="crossword.js"
SCRIPTS['hangman']="hangman.js"
SCRIPTS['multiple-choice'] = 'karma/js/multiple-choice.js'
SCRIPTS['identification'] = 'identification.js'
SCRIPTS['configuration'] = 'configuration.js'
SCRIPTS['mad-libs'] = 'mad-libs.js'
SCRIPTS['objects'] = 'objects.js'
SCRIPTS['matching'] = 'matching.js'
SCRIPTS['matching-pairs'] = 'matching-pairs.js'
SCRIPTS['multiple-choice-sentences'] = 'multiple-choice-sentences.js'
SCRIPTS['label-generator'] = 'label-generator.js'
SCRIPTS['init'] = 'init.js'
SCRIPTS['addition'] = 'addition.js'
SCRIPTS['quick'] = 'quick.js'
SCRIPTS['vocabulary-with-game'] = 'vocabulary-with-game.js'
SCRIPTS['what-is-this'] = 'what-is-this.js'
SCRIPTS['anagrams'] = 'anagrams.js'
SCRIPTS['start'] = "start.js"
priorityScripts = ['jquery','ui','flash','karma','common','clickable',
'i18n','jquizme','math','global','templates','edit','subjects',
'main','course','unit','lessons','base','lesson','milestones',
'activities','clock','lesson-karma','courses',
'quiz','khan','ui.scoreboard','multiple-choice',
'crossword','hangman','identification','configuration','mad-libs',
'objects','matching','matching-pairs','multiple-choice-sentences',
'addition.js','quick.js','init.js','vocabulary-with-game',
'label-generator','what-is-this','anagrams','start']
linkPrefix = '<link rel="stylesheet" href="'
linkPostfix ='" type="text/css"/>'
scriptPrefix = '<script type="text/javascript" src="'
scriptPostfix = '"></script>'
linkList = [
['menu'],
['menu','global'],
['global'],
['global'],
['global', 'lesson'],
]
scriptList = [
['jquery','ui','karma','clickable','i18n','subjects','main'],
['jquery','ui','karma','clickable','i18n','subjects','courses','course'],
['jquery','karma','global','edit','subjects','courses','milestones','unit'],
['jquery','ui','karma','global','templates','edit','subjects',
'courses','activities', 'lessons'],
['jquery','ui','karma','common','clickable',
'i18n','global','templates','edit','subjects','base','lesson-karma',
'start'],
]
links = ''
lnks=[]
level = LEVELS[levelName]
for link in priorityLinks:
if link in linkList[level] or link in added_links:
lnks.append(link)
for link in lnks:
if 'karma' in LINKS[link]:
pth = PREFIXES[level]+LINKS[link]
else:
pth = LINKS[link]
links = links + linkPrefix + pth + linkPostfix + '\n'
scrpts = []
for script in priorityScripts:
if script in scriptList[level] or script in added_scripts:
scrpts.append(script)
print >> log,'scrpts',len(scrpts),scrpts
scripts = ''
for script in scrpts:
if 'karma' in SCRIPTS[script] and not script == 'lesson-karma':
pth = PREFIXES[level]+SCRIPTS[script]
else:
pth = SCRIPTS[script]
if '-0' in pth:
pth = PREFIXES[level]+SCRIPTS[script].replace('-0','')
elif '-1' in pth:
pth = PREFIXES[level-1]+SCRIPTS[script].replace('-1','')
scripts = scripts + scriptPrefix + pth + scriptPostfix + '\n'
txtout = HEAD + links + scripts + FOOT
print >> log, 'done'
print >> log, txtout
log.close()
return txtout
| #!/usr/bin/python
#build index.html for a specified level
#
def buildMenu(levelName,added_links=[],added_scripts=[]):
#return a string 'index.html' which is generic for the corresponding level
#the calling script can add additional optional scripts as required
#linkList is a list of lists, one list per level
#ScriptList is a list of lists, one list per level
#the path to the scripts and css files in karma need to be adjusted based on the level
#path to scripts not in karma are in the same folder as the index.html file
log=open('/tmp/logMenu','w')
print >> log,'buildMenu',levelName
print >> log,'addl',len(added_links),added_links
print >> log,'adds',len(added_scripts),added_scripts
LEVELS = {'subject':0, 'course':1, 'unit':2, 'lesson':3, 'activity':4}
PREFIXES = ['./','../','../../','../../../','../../../../']
HEAD = '<!DOCTYPE html>\n<html>\n<head>\n<meta charset="UTF-8"/><meta http-equiv="Pragma" content="no-cache"/>'
FOOT = '</head>\n<body>\n<div id="header"></div>\n'
FOOT = FOOT + '<div id = "content"></div>\n'
FOOT = FOOT + '<div id = "footer"></div>\n</body>\n</html>\n'
LINKS = {}
LINKS['menu'] = 'karma/css/menu.css'
LINKS['global'] = 'karma/css/global.css'
LINKS['jquizme'] = 'karma/css/jquizme.css'
LINKS['lesson'] = 'lesson.css'
LINKS['crossword'] = 'crossword.css'
LINKS['hangman'] = 'hangman.css'
LINKS['identification'] = 'identification.css'
LINKS['mad-libs'] = 'mad-libs.css'
LINKS['matching'] = 'matching.css'
LINKS['matching-pairs'] = 'matching-pairs.css'
LINKS['multiple-choice-sentences'] = 'multiple-choice-sentences.css'
LINKS['vocabulary-with-game'] = 'vocabulary-with-game.css'
LINKS['what-is-this'] = 'what-is-this.css'
LINKS['anagrams'] = 'anagrams'
priorityLinks = ['menu','global','jquizme','crossword',
'hangman','identification','mad-libs','matching',
'matching-pairs','multiple-choice-sentences',
'vocabulary-with-game','what-is-this','anagrams','lesson']
SCRIPTS = {}
SCRIPTS['jquery'] = 'karma/js/external/jquery-1.4.2.js'
SCRIPTS['ui'] = 'karma/js/external/jquery-ui-1.8.2.js'
SCRIPTS['flash'] = 'karma/js/flash.js'
SCRIPTS['karma'] = "karma/js/karma.js"
SCRIPTS['common'] = "karma/js/common.js"
SCRIPTS['clickable'] = "karma/js/jquery.clickable.js"
SCRIPTS['i18n'] = "karma/js/jquery.i18n.js"
SCRIPTS['jquizme'] = "karma/js/jquizme.js"
SCRIPTS['math'] = "karma/js/math.js"
SCRIPTS['global'] = "karma/js/global.js"
SCRIPTS['templates']="karma/templates/templates.js"
SCRIPTS['edit'] = "karma/js/edit.js"
SCRIPTS['subjects'] = "-0subjects.js"
SCRIPTS['main'] = "karma/js/main.js"
SCRIPTS['course'] = "karma/js/course.js"
SCRIPTS['unit'] = "karma/js/unit.js"
SCRIPTS['clock'] = 'karma/js/clock.js'
SCRIPTS['lessons'] = "karma/js/lesson.js"
SCRIPTS['lesson'] = "lesson.js"
SCRIPTS['base'] = 'karma/js/base.js'
SCRIPTS['milestones'] = "milestones.js"
SCRIPTS['activities'] = "activities.js"
SCRIPTS['lesson-karma'] = "lesson-karma.js"
SCRIPTS['courses'] = "-1courses.js"
SCRIPTS['course'] = "karma/js/course.js"
SCRIPTS['quiz']="quiz.js"
SCRIPTS['khan']="khan/khan-exercise.js"
SCRIPTS['ui.scoreboard'] = 'karma/js/ui.scoreboard.js'
SCRIPTS['crossword']="crossword.js"
SCRIPTS['hangman']="hangman.js"
SCRIPTS['multiple-choice'] = 'karma/js/multiple-choice.js'
SCRIPTS['identification'] = 'identification.js'
SCRIPTS['configuration'] = 'configuration.js'
SCRIPTS['mad-libs'] = 'mad-libs.js'
SCRIPTS['objects'] = 'objects.js'
SCRIPTS['matching'] = 'matching.js'
SCRIPTS['matching-pairs'] = 'matching-pairs.js'
SCRIPTS['multiple-choice-sentences'] = 'multiple-choice-sentences.js'
SCRIPTS['label-generator'] = 'label-generator.js'
SCRIPTS['init'] = 'init.js'
SCRIPTS['addition'] = 'addition.js'
SCRIPTS['quick'] = 'quick.js'
SCRIPTS['vocabulary-with-game'] = 'vocabulary-with-game.js'
SCRIPTS['what-is-this'] = 'what-is-this.js'
SCRIPTS['anagrams'] = 'anagrams.js'
SCRIPTS['start'] = "start.js"
priorityScripts = ['jquery','ui','flash','karma','common','clickable',
'i18n','jquizme','math','global','templates','edit','subjects',
'main','course','unit','lessons','base','lesson','milestones',
'activities','clock','lesson-karma','courses',
'quiz','khan','ui.scoreboard','multiple-choice',
'crossword','hangman','identification','configuration','mad-libs',
'objects','matching','matching-pairs','multiple-choice-sentences',
'addition.js','quick.js','init.js','vocabulary-with-game',
'label-generator','what-is-this','anagrams','start']
linkPrefix = '<link rel="stylesheet" href="'
linkPostfix ='" type="text/css"/>'
scriptPrefix = '<script type="text/javascript" src="'
scriptPostfix = '"></script>'
linkList = [
['menu'],
['menu','global'],
['global'],
['global'],
['global', 'lesson'],
]
scriptList = [
['jquery','ui','karma','clickable','i18n','subjects','main'],
['jquery','ui','karma','clickable','i18n','subjects','courses','course'],
['jquery','karma','global','edit','subjects','courses','milestones','unit'],
['jquery','ui','karma','global','templates','edit','subjects',
'courses','activities', 'lessons'],
['jquery','ui','karma','common','clickable',
'i18n','global','templates','edit','subjects','base','lesson-karma',
'start'],
]
links = ''
lnks=[]
level = LEVELS[levelName]
for link in priorityLinks:
if link in linkList[level] or link in added_links:
lnks.append(link)
for link in lnks:
if 'karma' in LINKS[link]:
pth = PREFIXES[level]+LINKS[link]
else:
pth = LINKS[link]
links = links + linkPrefix + pth + linkPostfix + '\n'
scrpts = []
for script in priorityScripts:
if script in scriptList[level] or script in added_scripts:
scrpts.append(script)
print >> log,'scrpts',len(scrpts),scrpts
scripts = ''
for script in scrpts:
if 'karma' in SCRIPTS[script] and not script == 'lesson-karma':
pth = PREFIXES[level]+SCRIPTS[script]
else:
pth = SCRIPTS[script]
if '-0' in pth:
pth = PREFIXES[level]+SCRIPTS[script].replace('-0','')
elif '-1' in pth:
pth = PREFIXES[level-1]+SCRIPTS[script].replace('-1','')
scripts = scripts + scriptPrefix + pth + scriptPostfix + '\n'
txtout = HEAD + links + scripts + FOOT
print >> log, 'done'
print >> log, txtout
log.close()
return txtout | en | 0.837491 | #!/usr/bin/python #build index.html for a specified level # #return a string 'index.html' which is generic for the corresponding level #the calling script can add additional optional scripts as required #linkList is a list of lists, one list per level #ScriptList is a list of lists, one list per level #the path to the scripts and css files in karma need to be adjusted based on the level #path to scripts not in karma are in the same folder as the index.html file | 2.431482 | 2 |
mine/miner_client_p2p.py | Mechasparrow/Sparkles | 0 | 6613504 | import websocket
import threading
from threading import Thread
import time
import json
import random
import copy
import hashlib
import datetime as date
import sys
from miner import BlockMiner
sys.path.append("../CryptoWork")
sys.path.append("../block")
sys.path.append("../node")
sys.path.append("../blockchain_lib")
sys.path.append("../p2p-networking")
from transaction import Transaction
from blockchain import BlockChain
from block import Block
from reward import Reward
import crypto_key_gen
import base64
from peerhttp import PeerHTTP
import peer_search
# Import p2p server
from server_p2p_node import Server_P2P
# Import p2p client
from client_p2p_node import Client_P2P
# Import broadcast protocol
from peer_broadcast import PeerBroadcast
## Setup
## Find peers
EXTERNAL_IP = PeerHTTP.get_external_ip()
## TODO do on LAN
start_port = 3000
PEER_LIST = peer_search.local_search(EXTERNAL_IP)
## Server code
SERVER_IP = PeerHTTP.get_local_ip()
SERVER_PORT = random.randint(start_port, start_port + 3000)
post_peer = PeerHTTP.post_local_peer(EXTERNAL_IP, SERVER_IP, SERVER_PORT)
if (post_peer):
print ("Server posted")
else:
print ("Server not posted")
# Client and Server Handlers
# Public and Private key for transactions
public_key = crypto_key_gen.from_public_pem('./keys/public.pem')
private_key = crypto_key_gen.from_private_pem('./keys/secret.pem')
blockchain = BlockChain([])
miners = []
def load_blockchain():
try:
blockchain = BlockChain.load_blockchain('./blockchain/blockchain.json')
except FileNotFoundError:
blocks = []
genesis_block = Block.load_from_file('./genesis_block/genesis_block.json')
blocks.append(genesis_block)
blockchain = BlockChain(blocks)
return blockchain
def get_miner_address():
pk = crypto_key_gen.from_public_pem('./keys/public.pem')
pk_hex = base64.b16encode(pk.to_string()).decode('utf-8')
return pk_hex
def get_miner_secret():
sk = crypto_key_gen.from_private_pem('./keys/secret.pem')
return sk
# Handle transactions
def transaction_handler(broadcast_message, payload):
transaction_raw = payload['data']
tx = Transaction.from_json(transaction_raw)
if (tx.validate_transaction()):
block_miner = BlockMiner(tx, blockchain, get_miner_address(), get_miner_secret())
block_miner.start()
miners.append(block_miner)
new_block = block_miner.join()
miners.remove(block_miner)
if (new_block == None):
print ("Someone beat you to it")
return
good_block = update_blockchain(new_block)
if (good_block == True):
upload_block(new_block, broadcast_message)
else:
print ("That was a pretty bad block, so were not going to send it out to peers")
def update_blockchain(block):
if (block.valid_block() == True):
temp_blocks = copy.copy(blockchain.blocks)
temp_blocks.append(block)
temp_block_chain = BlockChain(temp_blocks)
if (temp_block_chain.validate_chain() == True):
print ("valid new blockchain")
blockchain.blocks.append(block)
return True
else:
print("invalid chain. Not updated")
return False
else:
print ("invalid block")
return False
blockchain.save_blockchain('./blockchain/blockchain.json')
def upload_block(block, broadcast_message):
block_upload_json = {
"message_type": "block_upload",
"data": str(block)
}
block_upload_message = json.dumps(block_upload_json)
broadcast_message(block_upload_message)
def block_recieve(broadcast_message, payload):
print()
print ("NEW BLOCK RECIEVED")
block_json = payload['data']
try:
block = Block.from_json(block_json)
if (block.valid_block() == True):
temp_blocks = copy.copy(blockchain.blocks)
temp_blocks.append(block)
temp_block_chain = BlockChain(temp_blocks)
print (temp_block_chain)
if (temp_block_chain.validate_chain() == True):
print ("valid new blockchain")
blockchain.blocks.append(block)
for miner in miners:
if (miner.is_active()):
miner.intercept_block(block, blockchain)
else:
print("invalid chain. Not updated")
else:
print ("invalid block")
except json.decoder.JSONDecodeError:
print ("invalid block")
blockchain.save_blockchain('./blockchain/blockchain.json')
print ()
## Request blockchains from peers
def request_blockchain(send_message):
message_json = {
"message_type": "blockchain_request"
}
request_message = json.dumps(message_json)
send_message(request_message)
## Upload blockchain to peer
def upload_blockchain(broadcast_message, payload):
print ("preparing to broadcast message");
blockchain_json = {
"message_type": "blockchain_upload",
"data": str(blockchain)
}
blockchain_message = json.dumps(blockchain_json)
broadcast_message(blockchain_message)
## Sync blockchain
def sync_blockchain(broadcast_message, payload):
print ("preparing for sync")
global blockchain
recv_blockchain_raw = json.loads(payload["data"])
recieved_blockchain = BlockChain.from_json(payload["data"])
synced_blockchain = BlockChain.sync_blockchain(blockchain, recieved_blockchain)
blockchain = synced_blockchain
blockchain.save_blockchain('./blockchain/blockchain.json')
print ("blockchain synced")
print()
def client_loop(send_message):
print ("Welcome to Sparkles 2.0 (Miner)")
upload_blockchain(send_message, [])
request_blockchain(send_message)
while True:
continue
# Spin up the threads
server_thread = Server_P2P(PEER_LIST, SERVER_IP, SERVER_PORT)
#Load blockchain
blockchain = load_blockchain()
# Add handlers
server_thread.add_handler("transaction", transaction_handler)
# For handling blockchain stuff
server_thread.add_handler("blockchain_request", upload_blockchain)
server_thread.add_handler("blockchain_upload", sync_blockchain)
server_thread.add_handler("block_upload", block_recieve)
client_thread = Client_P2P(PEER_LIST, server_thread, client_loop)
server_thread.start()
client_thread.start()
client_thread.join()
server_thread.exit()
| import websocket
import threading
from threading import Thread
import time
import json
import random
import copy
import hashlib
import datetime as date
import sys
from miner import BlockMiner
sys.path.append("../CryptoWork")
sys.path.append("../block")
sys.path.append("../node")
sys.path.append("../blockchain_lib")
sys.path.append("../p2p-networking")
from transaction import Transaction
from blockchain import BlockChain
from block import Block
from reward import Reward
import crypto_key_gen
import base64
from peerhttp import PeerHTTP
import peer_search
# Import p2p server
from server_p2p_node import Server_P2P
# Import p2p client
from client_p2p_node import Client_P2P
# Import broadcast protocol
from peer_broadcast import PeerBroadcast
## Setup
## Find peers
EXTERNAL_IP = PeerHTTP.get_external_ip()
## TODO do on LAN
start_port = 3000
PEER_LIST = peer_search.local_search(EXTERNAL_IP)
## Server code
SERVER_IP = PeerHTTP.get_local_ip()
SERVER_PORT = random.randint(start_port, start_port + 3000)
post_peer = PeerHTTP.post_local_peer(EXTERNAL_IP, SERVER_IP, SERVER_PORT)
if (post_peer):
print ("Server posted")
else:
print ("Server not posted")
# Client and Server Handlers
# Public and Private key for transactions
public_key = crypto_key_gen.from_public_pem('./keys/public.pem')
private_key = crypto_key_gen.from_private_pem('./keys/secret.pem')
blockchain = BlockChain([])
miners = []
def load_blockchain():
try:
blockchain = BlockChain.load_blockchain('./blockchain/blockchain.json')
except FileNotFoundError:
blocks = []
genesis_block = Block.load_from_file('./genesis_block/genesis_block.json')
blocks.append(genesis_block)
blockchain = BlockChain(blocks)
return blockchain
def get_miner_address():
pk = crypto_key_gen.from_public_pem('./keys/public.pem')
pk_hex = base64.b16encode(pk.to_string()).decode('utf-8')
return pk_hex
def get_miner_secret():
sk = crypto_key_gen.from_private_pem('./keys/secret.pem')
return sk
# Handle transactions
def transaction_handler(broadcast_message, payload):
transaction_raw = payload['data']
tx = Transaction.from_json(transaction_raw)
if (tx.validate_transaction()):
block_miner = BlockMiner(tx, blockchain, get_miner_address(), get_miner_secret())
block_miner.start()
miners.append(block_miner)
new_block = block_miner.join()
miners.remove(block_miner)
if (new_block == None):
print ("Someone beat you to it")
return
good_block = update_blockchain(new_block)
if (good_block == True):
upload_block(new_block, broadcast_message)
else:
print ("That was a pretty bad block, so were not going to send it out to peers")
def update_blockchain(block):
if (block.valid_block() == True):
temp_blocks = copy.copy(blockchain.blocks)
temp_blocks.append(block)
temp_block_chain = BlockChain(temp_blocks)
if (temp_block_chain.validate_chain() == True):
print ("valid new blockchain")
blockchain.blocks.append(block)
return True
else:
print("invalid chain. Not updated")
return False
else:
print ("invalid block")
return False
blockchain.save_blockchain('./blockchain/blockchain.json')
def upload_block(block, broadcast_message):
block_upload_json = {
"message_type": "block_upload",
"data": str(block)
}
block_upload_message = json.dumps(block_upload_json)
broadcast_message(block_upload_message)
def block_recieve(broadcast_message, payload):
print()
print ("NEW BLOCK RECIEVED")
block_json = payload['data']
try:
block = Block.from_json(block_json)
if (block.valid_block() == True):
temp_blocks = copy.copy(blockchain.blocks)
temp_blocks.append(block)
temp_block_chain = BlockChain(temp_blocks)
print (temp_block_chain)
if (temp_block_chain.validate_chain() == True):
print ("valid new blockchain")
blockchain.blocks.append(block)
for miner in miners:
if (miner.is_active()):
miner.intercept_block(block, blockchain)
else:
print("invalid chain. Not updated")
else:
print ("invalid block")
except json.decoder.JSONDecodeError:
print ("invalid block")
blockchain.save_blockchain('./blockchain/blockchain.json')
print ()
## Request blockchains from peers
def request_blockchain(send_message):
message_json = {
"message_type": "blockchain_request"
}
request_message = json.dumps(message_json)
send_message(request_message)
## Upload blockchain to peer
def upload_blockchain(broadcast_message, payload):
print ("preparing to broadcast message");
blockchain_json = {
"message_type": "blockchain_upload",
"data": str(blockchain)
}
blockchain_message = json.dumps(blockchain_json)
broadcast_message(blockchain_message)
## Sync blockchain
def sync_blockchain(broadcast_message, payload):
print ("preparing for sync")
global blockchain
recv_blockchain_raw = json.loads(payload["data"])
recieved_blockchain = BlockChain.from_json(payload["data"])
synced_blockchain = BlockChain.sync_blockchain(blockchain, recieved_blockchain)
blockchain = synced_blockchain
blockchain.save_blockchain('./blockchain/blockchain.json')
print ("blockchain synced")
print()
def client_loop(send_message):
print ("Welcome to Sparkles 2.0 (Miner)")
upload_blockchain(send_message, [])
request_blockchain(send_message)
while True:
continue
# Spin up the threads
server_thread = Server_P2P(PEER_LIST, SERVER_IP, SERVER_PORT)
#Load blockchain
blockchain = load_blockchain()
# Add handlers
server_thread.add_handler("transaction", transaction_handler)
# For handling blockchain stuff
server_thread.add_handler("blockchain_request", upload_blockchain)
server_thread.add_handler("blockchain_upload", sync_blockchain)
server_thread.add_handler("block_upload", block_recieve)
client_thread = Client_P2P(PEER_LIST, server_thread, client_loop)
server_thread.start()
client_thread.start()
client_thread.join()
server_thread.exit()
| en | 0.546965 | # Import p2p server # Import p2p client # Import broadcast protocol ## Setup ## Find peers ## TODO do on LAN ## Server code # Client and Server Handlers # Public and Private key for transactions # Handle transactions ## Request blockchains from peers ## Upload blockchain to peer ## Sync blockchain # Spin up the threads #Load blockchain # Add handlers # For handling blockchain stuff | 2.304562 | 2 |
contact.py | melonmanchan/My-Website | 0 | 6613505 | <reponame>melonmanchan/My-Website
# Simple mail server utilizing mailgun.net API
from flask import Flask
from flask import request
import sendgrid
client = sendgrid.SendGridClient("SENDGRID_APIKEY")
app = Flask(__name__)
@app.route("/sendmail", methods=['POST'])
def hello():
email = request.form.get('email')
msg = request.form.get('message')
subject = request.form.get('subject')
send_mail(email, msg, subject)
return 'OK'
def send_mail(email, message, subject):
message = sendgrid.Mail()
message.add_to('<EMAIL>')
message.set_from(email)
message.set_subject(subject)
message.set_html(message)
client.send(message)
if __name__ == "__main__":
app.run(host='0.0.0.0')
| # Simple mail server utilizing mailgun.net API
from flask import Flask
from flask import request
import sendgrid
client = sendgrid.SendGridClient("SENDGRID_APIKEY")
app = Flask(__name__)
@app.route("/sendmail", methods=['POST'])
def hello():
email = request.form.get('email')
msg = request.form.get('message')
subject = request.form.get('subject')
send_mail(email, msg, subject)
return 'OK'
def send_mail(email, message, subject):
message = sendgrid.Mail()
message.add_to('<EMAIL>')
message.set_from(email)
message.set_subject(subject)
message.set_html(message)
client.send(message)
if __name__ == "__main__":
app.run(host='0.0.0.0') | en | 0.228455 | # Simple mail server utilizing mailgun.net API | 2.574627 | 3 |
winappdbg/winapputil/__init__.py | parsiya/Parsia-Code | 21 | 6613506 | """
Just here to make this a package.
"""
from winapputil import WinAppUtil
from utils import *
from winapputil import DebugError
| """
Just here to make this a package.
"""
from winapputil import WinAppUtil
from utils import *
from winapputil import DebugError
| en | 0.879037 | Just here to make this a package. | 1.357921 | 1 |
scripts/sources/S_AgnosticCorrPrior.py | dpopadic/arpmRes | 6 | 6613507 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_AgnosticCorrPrior [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_AgnosticCorrPrior&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=UninfPrior).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from collections import namedtuple
import numpy as np
from numpy import ones, zeros, eye, round, log, tile
from numpy import min as npmin
from numpy.linalg import eig
from numpy.random import rand
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, bar, title
plt.style.use('seaborn')
from ARPM_utils import save_plot
from HistogramFP import HistogramFP
# Initialize variables
i_ = 3 # dimension of the correlation matirix
k_ = int(i_ * (i_ - 1) / 2) # number of upper non-diagonal entries
j_ = 10000 # number of simulations
# -
# ## Compute correlations in scenarios
# +
C2 = tile(eye(i_)[..., np.newaxis], (1, 1, j_))
lam = zeros((i_, j_))
Theta = zeros((k_, j_))
j = 1
while j < j_:
Theta_tilde = 2 * rand(k_, 1) - 1 # generate the uninformative correlations
k = 0
for i in range(i_): # build the candidate matrix
for m in range(i + 1, i_):
C2[i, m, j] = Theta_tilde[k]
C2[m, i, j] = C2[i, m, j]
k = k + 1
lam[:, j], _ = eig(C2[:, :, j]) # compute eigenvalues to check positivity
if npmin(lam[:, j]) > 0: # check positivity
Theta[:, [j]] = Theta_tilde # store the correlations
j = j + 1
# -
# ## Create figures
# +
# titles
names = {}
k = 0
for i in range(1, i_ + 1):
for m in range(i + 1, i_ + 1):
names[k] = r'$\Theta_{%d,%d}$' % (i, m)
k = k + 1
# univariate marginals
option = namedtuple('option', 'n_bins')
option.n_bins = round(5 * log(j_))
for k in range(k_):
figure()
p = ones((1, len(Theta[k, :]))) / len(Theta[k, :])
n, x = HistogramFP(Theta[[k], :], p, option)
b = bar(x[:-1], n.flatten(), width=0.95 * (x[1] - x[0]), facecolor=[.7, .7, .7], edgecolor=[1, 1, 1])
title('histogram of {name}'.format(name=names[k]));
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_AgnosticCorrPrior [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_AgnosticCorrPrior&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=UninfPrior).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from collections import namedtuple
import numpy as np
from numpy import ones, zeros, eye, round, log, tile
from numpy import min as npmin
from numpy.linalg import eig
from numpy.random import rand
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, bar, title
plt.style.use('seaborn')
from ARPM_utils import save_plot
from HistogramFP import HistogramFP
# Initialize variables
i_ = 3 # dimension of the correlation matirix
k_ = int(i_ * (i_ - 1) / 2) # number of upper non-diagonal entries
j_ = 10000 # number of simulations
# -
# ## Compute correlations in scenarios
# +
C2 = tile(eye(i_)[..., np.newaxis], (1, 1, j_))
lam = zeros((i_, j_))
Theta = zeros((k_, j_))
j = 1
while j < j_:
Theta_tilde = 2 * rand(k_, 1) - 1 # generate the uninformative correlations
k = 0
for i in range(i_): # build the candidate matrix
for m in range(i + 1, i_):
C2[i, m, j] = Theta_tilde[k]
C2[m, i, j] = C2[i, m, j]
k = k + 1
lam[:, j], _ = eig(C2[:, :, j]) # compute eigenvalues to check positivity
if npmin(lam[:, j]) > 0: # check positivity
Theta[:, [j]] = Theta_tilde # store the correlations
j = j + 1
# -
# ## Create figures
# +
# titles
names = {}
k = 0
for i in range(1, i_ + 1):
for m in range(i + 1, i_ + 1):
names[k] = r'$\Theta_{%d,%d}$' % (i, m)
k = k + 1
# univariate marginals
option = namedtuple('option', 'n_bins')
option.n_bins = round(5 * log(j_))
for k in range(k_):
figure()
p = ones((1, len(Theta[k, :]))) / len(Theta[k, :])
n, x = HistogramFP(Theta[[k], :], p, option)
b = bar(x[:-1], n.flatten(), width=0.95 * (x[1] - x[0]), facecolor=[.7, .7, .7], edgecolor=[1, 1, 1])
title('histogram of {name}'.format(name=names[k]));
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) | en | 0.445441 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.1.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # S_AgnosticCorrPrior [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_AgnosticCorrPrior&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=UninfPrior). # ## Prepare the environment # + # Initialize variables # dimension of the correlation matirix # number of upper non-diagonal entries # number of simulations # - # ## Compute correlations in scenarios # + # generate the uninformative correlations # build the candidate matrix # compute eigenvalues to check positivity # check positivity # store the correlations # - # ## Create figures # + # titles # univariate marginals # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) | 2.228836 | 2 |
miprometheus/models/relational_net/functions.py | vincentalbouy/mi-prometheus | 0 | 6613508 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) IBM Corporation 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""functions.py: contains implementations of g_theta & f_phi for the Relational Network."""
__author__ = "<NAME>"
import torch
import numpy as np
from torch.nn import Module
from miprometheus.utils.app_state import AppState
class PairwiseRelationNetwork(Module):
"""
Implementation of the g_theta MLP used in the Relational Network model.
For recall, the role of g_theta is to infer the ways in which 2 \
regions of the CNN feature maps are related, or if they are even \
related at all.
"""
def __init__(self, input_size):
"""
Constructor for the f_phi MLP.
Instantiates 4 linear layers, having 256 nodes per layers.
:param input_size: input size.
:type input_size: int
"""
# call base constructor
super(PairwiseRelationNetwork, self).__init__()
self.input_size = input_size
self.g_fc1 = torch.nn.Linear(in_features=self.input_size, out_features=256)
self.g_fc2 = torch.nn.Linear(in_features=256, out_features=256)
self.g_fc3 = torch.nn.Linear(in_features=256, out_features=256)
self.g_fc4 = torch.nn.Linear(in_features=256, out_features=256)
def forward(self, inputs):
"""
forward pass of the g_theta MLP.
:param inputs: tensor of shape [batch_size, -1, input_size], should represent the pairs of regions (in the CNN \
feature maps) cat with the question encoding.
:return: tensor of shape [batch_size, -1, 256].
"""
x = self.g_fc1(inputs)
x = torch.nn.functional.relu(x)
x = self.g_fc2(x)
x = torch.nn.functional.relu(x)
x = self.g_fc3(x)
x = torch.nn.functional.relu(x)
x = self.g_fc4(x)
x = torch.nn.functional.relu(x)
return x
class SumOfPairsAnalysisNetwork(Module):
"""
Implementation of the f_phi MLP used in the Relational Network model.
For recall, the role of f_phi is to produce the probability
distribution over all possible answers.
"""
def __init__(self, output_size):
"""
Constructor for the f_phi MLP.
Instantiates 3 linear layers, having 256 nodes per layers.
:param output_size: number of classes for the last layer.
:type output_size: int
"""
# call base constructor
super(SumOfPairsAnalysisNetwork, self).__init__()
self.output_size = output_size
self.f_fc1 = torch.nn.Linear(in_features=256, out_features=256)
self.f_fc2 = torch.nn.Linear(in_features=256, out_features=256)
self.f_fc3 = torch.nn.Linear(in_features=256, out_features=self.output_size)
def forward(self, inputs):
"""
forward pass of the f_phi MLP.
:param inputs: tensor of shape [batch_size, -1, 256], should represent the element-wise sum of the outputs of \
g_theta.
:return: Predictions over the available classes, tensor of shape [batch_size, -1, output_size]
"""
x = self.f_fc1(inputs)
x = torch.nn.functional.relu(x)
x = self.f_fc2(x)
x = torch.nn.functional.relu(x)
x = torch.nn.functional.dropout(x, p=0.5)
x = self.f_fc3(x)
return x
if __name__ == '__main__':
"""
Unit Tests for g_theta & f_phi.
"""
input_size = (24 + 2) * 2 + 13
batch_size = 64
inputs = np.random.binomial(1, 0.5, (batch_size, 3, input_size))
inputs = torch.from_numpy(inputs).type(AppState().dtype)
g_theta = PairwiseRelationNetwork(input_size=input_size)
g_outputs = g_theta(inputs)
print('g_outputs:', g_outputs.shape)
output_size = 10
f_phi = SumOfPairsAnalysisNetwork(output_size=output_size)
f_outputs = f_phi(g_outputs)
print('f_outputs:', f_outputs.shape)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) IBM Corporation 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""functions.py: contains implementations of g_theta & f_phi for the Relational Network."""
__author__ = "<NAME>"
import torch
import numpy as np
from torch.nn import Module
from miprometheus.utils.app_state import AppState
class PairwiseRelationNetwork(Module):
"""
Implementation of the g_theta MLP used in the Relational Network model.
For recall, the role of g_theta is to infer the ways in which 2 \
regions of the CNN feature maps are related, or if they are even \
related at all.
"""
def __init__(self, input_size):
"""
Constructor for the f_phi MLP.
Instantiates 4 linear layers, having 256 nodes per layers.
:param input_size: input size.
:type input_size: int
"""
# call base constructor
super(PairwiseRelationNetwork, self).__init__()
self.input_size = input_size
self.g_fc1 = torch.nn.Linear(in_features=self.input_size, out_features=256)
self.g_fc2 = torch.nn.Linear(in_features=256, out_features=256)
self.g_fc3 = torch.nn.Linear(in_features=256, out_features=256)
self.g_fc4 = torch.nn.Linear(in_features=256, out_features=256)
def forward(self, inputs):
"""
forward pass of the g_theta MLP.
:param inputs: tensor of shape [batch_size, -1, input_size], should represent the pairs of regions (in the CNN \
feature maps) cat with the question encoding.
:return: tensor of shape [batch_size, -1, 256].
"""
x = self.g_fc1(inputs)
x = torch.nn.functional.relu(x)
x = self.g_fc2(x)
x = torch.nn.functional.relu(x)
x = self.g_fc3(x)
x = torch.nn.functional.relu(x)
x = self.g_fc4(x)
x = torch.nn.functional.relu(x)
return x
class SumOfPairsAnalysisNetwork(Module):
"""
Implementation of the f_phi MLP used in the Relational Network model.
For recall, the role of f_phi is to produce the probability
distribution over all possible answers.
"""
def __init__(self, output_size):
"""
Constructor for the f_phi MLP.
Instantiates 3 linear layers, having 256 nodes per layers.
:param output_size: number of classes for the last layer.
:type output_size: int
"""
# call base constructor
super(SumOfPairsAnalysisNetwork, self).__init__()
self.output_size = output_size
self.f_fc1 = torch.nn.Linear(in_features=256, out_features=256)
self.f_fc2 = torch.nn.Linear(in_features=256, out_features=256)
self.f_fc3 = torch.nn.Linear(in_features=256, out_features=self.output_size)
def forward(self, inputs):
"""
forward pass of the f_phi MLP.
:param inputs: tensor of shape [batch_size, -1, 256], should represent the element-wise sum of the outputs of \
g_theta.
:return: Predictions over the available classes, tensor of shape [batch_size, -1, output_size]
"""
x = self.f_fc1(inputs)
x = torch.nn.functional.relu(x)
x = self.f_fc2(x)
x = torch.nn.functional.relu(x)
x = torch.nn.functional.dropout(x, p=0.5)
x = self.f_fc3(x)
return x
if __name__ == '__main__':
"""
Unit Tests for g_theta & f_phi.
"""
input_size = (24 + 2) * 2 + 13
batch_size = 64
inputs = np.random.binomial(1, 0.5, (batch_size, 3, input_size))
inputs = torch.from_numpy(inputs).type(AppState().dtype)
g_theta = PairwiseRelationNetwork(input_size=input_size)
g_outputs = g_theta(inputs)
print('g_outputs:', g_outputs.shape)
output_size = 10
f_phi = SumOfPairsAnalysisNetwork(output_size=output_size)
f_outputs = f_phi(g_outputs)
print('f_outputs:', f_outputs.shape) | en | 0.802729 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (C) IBM Corporation 2018 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. functions.py: contains implementations of g_theta & f_phi for the Relational Network. Implementation of the g_theta MLP used in the Relational Network model. For recall, the role of g_theta is to infer the ways in which 2 \ regions of the CNN feature maps are related, or if they are even \ related at all. Constructor for the f_phi MLP. Instantiates 4 linear layers, having 256 nodes per layers. :param input_size: input size. :type input_size: int # call base constructor forward pass of the g_theta MLP. :param inputs: tensor of shape [batch_size, -1, input_size], should represent the pairs of regions (in the CNN \ feature maps) cat with the question encoding. :return: tensor of shape [batch_size, -1, 256]. Implementation of the f_phi MLP used in the Relational Network model. For recall, the role of f_phi is to produce the probability distribution over all possible answers. Constructor for the f_phi MLP. Instantiates 3 linear layers, having 256 nodes per layers. :param output_size: number of classes for the last layer. :type output_size: int # call base constructor forward pass of the f_phi MLP. :param inputs: tensor of shape [batch_size, -1, 256], should represent the element-wise sum of the outputs of \ g_theta. :return: Predictions over the available classes, tensor of shape [batch_size, -1, output_size] Unit Tests for g_theta & f_phi. | 2.560279 | 3 |
mcbot.py | nowireless/minebot | 0 | 6613509 | <gh_stars>0
import API2
import telepot
import time
import configuration
def handle_plugins(user, args):
message = ""
for p in api.plugins():
message += p["name"] + "\n"
bot.sendMessage(user["id"], message)
def handle_online(user, args):
players = api.players.online()
count = len(players)
msg = ""
if count == 0:
msg = "No players are online"
else:
for player in players:
print player
msg += player["name"] + "\n"
bot.sendMessage(user["id"], msg)
commands = {
"plugins": handle_plugins,
"online": handle_online
}
def handle_msg(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print content_type, chat_type, chat_id
if 'text' in msg and msg['text'][0] == "/":
text = msg["text"]
user = msg["from"]
elements = text.strip().split(" ")
cmd = elements[0][1:]
args = elements[1:]
print cmd
print args
if cmd in commands:
commands[cmd](user, args)
else:
bot.sendMessage(user["id"], "Invalid Command")
# Load config
config = configuration.load_config()
# Setup server connection
conn = API2.Connection(host=config["json_api"]["host"], username=config["json_api"]["username"], password=config["json_api"]["password"], port=config["json_api"]["port"])
api = API2.JSONAPI(conn)
# Setup telegram connection
bot = telepot.Bot(config["telegram"]["api_token"])
bot.notifyOnMessage(handle_msg)
print "Running"
while True:
time.sleep(30)
| import API2
import telepot
import time
import configuration
def handle_plugins(user, args):
message = ""
for p in api.plugins():
message += p["name"] + "\n"
bot.sendMessage(user["id"], message)
def handle_online(user, args):
players = api.players.online()
count = len(players)
msg = ""
if count == 0:
msg = "No players are online"
else:
for player in players:
print player
msg += player["name"] + "\n"
bot.sendMessage(user["id"], msg)
commands = {
"plugins": handle_plugins,
"online": handle_online
}
def handle_msg(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print content_type, chat_type, chat_id
if 'text' in msg and msg['text'][0] == "/":
text = msg["text"]
user = msg["from"]
elements = text.strip().split(" ")
cmd = elements[0][1:]
args = elements[1:]
print cmd
print args
if cmd in commands:
commands[cmd](user, args)
else:
bot.sendMessage(user["id"], "Invalid Command")
# Load config
config = configuration.load_config()
# Setup server connection
conn = API2.Connection(host=config["json_api"]["host"], username=config["json_api"]["username"], password=config["json_api"]["password"], port=config["json_api"]["port"])
api = API2.JSONAPI(conn)
# Setup telegram connection
bot = telepot.Bot(config["telegram"]["api_token"])
bot.notifyOnMessage(handle_msg)
print "Running"
while True:
time.sleep(30) | en | 0.516457 | # Load config # Setup server connection # Setup telegram connection | 2.643254 | 3 |
spectrum/judge/majority.py | totucuong/spectrum | 12 | 6613510 | from .truthdiscoverer import TruthDiscoverer
import pandas as pd
import numpy as np
class MajorityVoting(TruthDiscoverer):
"""Find truths by majority voting."""
def discover(self, claims, auxiliary_data=None):
return (self._majority_vote(claims), None)
def _majority_vote(self, claims):
"""Perform truth discovery using majority voting
Parameters
----------
claims: pd.DataFrame
a data frame that has columns [source_id, object_id, value]
Returns
-------
discovered_truths: pd.DataFrame
a data frame that has [object_id, value]
"""
c_df = claims[['source_id', 'object_id', 'value']].copy()
discovered_truths = c_df.groupby(['object_id'
]).apply(lambda x: self.elect(x))
discovered_truths = pd.DataFrame(discovered_truths)
discovered_truths = discovered_truths.rename(columns={
0: 'value'
}).reset_index()
return discovered_truths
def elect(self, x):
"""compute the truth value based on voting; the value received the most votes (by sources) is returned
Parameters
----------
x: pd.DataFrame
Returns
-------
discovered_truth: pd.DataFrame
the discovered truth
"""
return x.value.value_counts().idxmax() | from .truthdiscoverer import TruthDiscoverer
import pandas as pd
import numpy as np
class MajorityVoting(TruthDiscoverer):
"""Find truths by majority voting."""
def discover(self, claims, auxiliary_data=None):
return (self._majority_vote(claims), None)
def _majority_vote(self, claims):
"""Perform truth discovery using majority voting
Parameters
----------
claims: pd.DataFrame
a data frame that has columns [source_id, object_id, value]
Returns
-------
discovered_truths: pd.DataFrame
a data frame that has [object_id, value]
"""
c_df = claims[['source_id', 'object_id', 'value']].copy()
discovered_truths = c_df.groupby(['object_id'
]).apply(lambda x: self.elect(x))
discovered_truths = pd.DataFrame(discovered_truths)
discovered_truths = discovered_truths.rename(columns={
0: 'value'
}).reset_index()
return discovered_truths
def elect(self, x):
"""compute the truth value based on voting; the value received the most votes (by sources) is returned
Parameters
----------
x: pd.DataFrame
Returns
-------
discovered_truth: pd.DataFrame
the discovered truth
"""
return x.value.value_counts().idxmax() | en | 0.817208 | Find truths by majority voting. Perform truth discovery using majority voting Parameters ---------- claims: pd.DataFrame a data frame that has columns [source_id, object_id, value] Returns ------- discovered_truths: pd.DataFrame a data frame that has [object_id, value] compute the truth value based on voting; the value received the most votes (by sources) is returned Parameters ---------- x: pd.DataFrame Returns ------- discovered_truth: pd.DataFrame the discovered truth | 3.526485 | 4 |
splitviewfuse/SplitViewFuse.py | seiferma/splitviewfuse | 12 | 6613511 | from fuse import FUSE
from splitviewfuse import SplitViewFuseBase
from splitviewfuse.filehandlecontainers.VirtualFileSegmentFileHandleContainer import VirtualFileSegmentFileHandleContainer
from splitviewfuse.SegmentUtils import SegmentUtils
from math import ceil
import os
import sys
from splitviewfuse.SplitViewFuseBase import ArgumentParserError
from argparse import ArgumentTypeError
class SplitViewFuse(SplitViewFuseBase.SplitViewFuseBase):
def __init__(self, root, maxSegmentSize, loglevel, logfile):
super(SplitViewFuse, self).__init__(root, maxSegmentSize, VirtualFileSegmentFileHandleContainer(maxSegmentSize), loglevel, logfile)
def _SplitViewFuseBase__processReadDirEntry(self, absRootPath, entry):
dirContent = list()
absRootPathEntry = os.path.join(absRootPath, entry)
# split large files
if not os.path.isdir(absRootPathEntry) and os.path.exists(absRootPathEntry):
fileSize = os.path.getsize(absRootPathEntry)
if fileSize > self.maxFileSize:
numberOfParts = int(ceil(fileSize / float(self.maxFileSize)))
for i in range(0, numberOfParts):
dirContent.append(SegmentUtils.joinSegmentPath(entry, i))
return dirContent
# return not splitted entry
dirContent.append(entry)
return dirContent
def main():
try:
args = SplitViewFuseBase.parseArguments(sys.argv, 'Filesystem that splits files into segments of given size. The size is specified in the mount options.')
_ = FUSE(SplitViewFuse(args.device, args.mountOptions['segmentsize'], args.mountOptions['loglevel'], args.mountOptions['logfile']), args.dir, **args.mountOptions['other'])
#fuse = FUSE(SplitViewFuse(args.device, args.mountOptions['segmentsize']), args.dir, nothreads=True, foreground=True)
except ArgumentParserError as e:
print('Error during command line parsing: {0}'.format(str(e)))
sys.exit(1)
except ArgumentTypeError as e:
print('Error during command line parsing: {0}'.format(str(e)))
sys.exit(1)
if __name__ == '__main__':
main()
| from fuse import FUSE
from splitviewfuse import SplitViewFuseBase
from splitviewfuse.filehandlecontainers.VirtualFileSegmentFileHandleContainer import VirtualFileSegmentFileHandleContainer
from splitviewfuse.SegmentUtils import SegmentUtils
from math import ceil
import os
import sys
from splitviewfuse.SplitViewFuseBase import ArgumentParserError
from argparse import ArgumentTypeError
class SplitViewFuse(SplitViewFuseBase.SplitViewFuseBase):
def __init__(self, root, maxSegmentSize, loglevel, logfile):
super(SplitViewFuse, self).__init__(root, maxSegmentSize, VirtualFileSegmentFileHandleContainer(maxSegmentSize), loglevel, logfile)
def _SplitViewFuseBase__processReadDirEntry(self, absRootPath, entry):
dirContent = list()
absRootPathEntry = os.path.join(absRootPath, entry)
# split large files
if not os.path.isdir(absRootPathEntry) and os.path.exists(absRootPathEntry):
fileSize = os.path.getsize(absRootPathEntry)
if fileSize > self.maxFileSize:
numberOfParts = int(ceil(fileSize / float(self.maxFileSize)))
for i in range(0, numberOfParts):
dirContent.append(SegmentUtils.joinSegmentPath(entry, i))
return dirContent
# return not splitted entry
dirContent.append(entry)
return dirContent
def main():
try:
args = SplitViewFuseBase.parseArguments(sys.argv, 'Filesystem that splits files into segments of given size. The size is specified in the mount options.')
_ = FUSE(SplitViewFuse(args.device, args.mountOptions['segmentsize'], args.mountOptions['loglevel'], args.mountOptions['logfile']), args.dir, **args.mountOptions['other'])
#fuse = FUSE(SplitViewFuse(args.device, args.mountOptions['segmentsize']), args.dir, nothreads=True, foreground=True)
except ArgumentParserError as e:
print('Error during command line parsing: {0}'.format(str(e)))
sys.exit(1)
except ArgumentTypeError as e:
print('Error during command line parsing: {0}'.format(str(e)))
sys.exit(1)
if __name__ == '__main__':
main()
| en | 0.463629 | # split large files # return not splitted entry #fuse = FUSE(SplitViewFuse(args.device, args.mountOptions['segmentsize']), args.dir, nothreads=True, foreground=True) | 2.34973 | 2 |
qrandom/lib/correlation.py | lastmeta/qrandom | 0 | 6613512 | ''' notes on detecting correlation
when they're zero they're moving togethers.
so we could take the inverse around the axis 0 of one and get the areas that are
on average zero or close to zero, if it's close to zero more often than it should
then we've detected a correlation...
'''
>>> import numpy as np
>>> a = [1,2,3,4,5,6,5,6,5,4,3,2,3,2,3,4,5,4,3,2,1]
>>> b = [1,2,1,2,1,2,3,4,3,2,1,0,1,2,3,4,5,6,7,6,7]
>>> np.array(a) - np.array(b)
array([ 0, 0, 2, 2, 4, 4, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, -2, -4, -4, -6])
>>> np.array(b) - np.array(a) # not the inverse, just the reverse
array([ 0, 0, -2, -2, -4, -4, -2, -2, -2, -2, -2, -2, -2, 0, 0, 0, 0, 2, 4, 4, 6])
| ''' notes on detecting correlation
when they're zero they're moving togethers.
so we could take the inverse around the axis 0 of one and get the areas that are
on average zero or close to zero, if it's close to zero more often than it should
then we've detected a correlation...
'''
>>> import numpy as np
>>> a = [1,2,3,4,5,6,5,6,5,4,3,2,3,2,3,4,5,4,3,2,1]
>>> b = [1,2,1,2,1,2,3,4,3,2,1,0,1,2,3,4,5,6,7,6,7]
>>> np.array(a) - np.array(b)
array([ 0, 0, 2, 2, 4, 4, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, -2, -4, -4, -6])
>>> np.array(b) - np.array(a) # not the inverse, just the reverse
array([ 0, 0, -2, -2, -4, -4, -2, -2, -2, -2, -2, -2, -2, 0, 0, 0, 0, 2, 4, 4, 6])
| en | 0.962657 | notes on detecting correlation when they're zero they're moving togethers. so we could take the inverse around the axis 0 of one and get the areas that are on average zero or close to zero, if it's close to zero more often than it should then we've detected a correlation... # not the inverse, just the reverse | 3.51612 | 4 |
sessions/003 session-numbers/exercises/cash_register_video.py | robotlightsyou/pfb-resources | 0 | 6613513 | <reponame>robotlightsyou/pfb-resources<filename>sessions/003 session-numbers/exercises/cash_register_video.py
#! /usr/bin/env python3
'''
write a function that will ask for the
user for input which will be an amount
of money, then return the minimum
number of coins.
'''
user_coins = [25, 10, 5, 1]
# print return of get_change
def main():
print(get_change())
def get_change():
# take input
print("How much change is owed?")
amount = input("> ")
# modify input from integer
amount = int(float(amount) * 100)
# find least number of coins
# coins = 0
# coins += amount // 25 # -> 1
# amount %= 25 # -> 16
# coins += amount // 10 # -> 1
# amount %= 10 # -> 6
# coins += amount // 5 # -> 1
# amount %= 5 # 1
# coins += amount
# return coins
# find least number of coins
total_coins = 0
for coin in user_coins:
total_coins += amount // coin
amount %= coin
return total_coins
if __name__ == '__main__':
main()
'''
.31
[25, 10, 1]
1 '25', 6 '1'
3 '10', 1 '1'
''' | session-numbers/exercises/cash_register_video.py
#! /usr/bin/env python3
'''
write a function that will ask for the
user for input which will be an amount
of money, then return the minimum
number of coins.
'''
user_coins = [25, 10, 5, 1]
# print return of get_change
def main():
print(get_change())
def get_change():
# take input
print("How much change is owed?")
amount = input("> ")
# modify input from integer
amount = int(float(amount) * 100)
# find least number of coins
# coins = 0
# coins += amount // 25 # -> 1
# amount %= 25 # -> 16
# coins += amount // 10 # -> 1
# amount %= 10 # -> 6
# coins += amount // 5 # -> 1
# amount %= 5 # 1
# coins += amount
# return coins
# find least number of coins
total_coins = 0
for coin in user_coins:
total_coins += amount // coin
amount %= coin
return total_coins
if __name__ == '__main__':
main()
'''
.31
[25, 10, 1]
1 '25', 6 '1'
3 '10', 1 '1'
''' | en | 0.566399 | #! /usr/bin/env python3 write a function that will ask for the user for input which will be an amount of money, then return the minimum number of coins. # print return of get_change # take input # modify input from integer # find least number of coins # coins = 0 # coins += amount // 25 # -> 1 # amount %= 25 # -> 16 # coins += amount // 10 # -> 1 # amount %= 10 # -> 6 # coins += amount // 5 # -> 1 # amount %= 5 # 1 # coins += amount # return coins # find least number of coins .31 [25, 10, 1] 1 '25', 6 '1' 3 '10', 1 '1' | 4.041521 | 4 |
hackerearth/Algorithms/Crazy Matrix/solution.py | ATrain951/01.python-com_Qproject | 4 | 6613514 | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import deque
def moves(x, y, size):
for i in (x - 1, x + 2):
if 0 <= i < size:
for j in (y - 1, y + 2):
if 0 <= j < size:
if i != x and j != y:
yield i, j
def check_path_row(x, y, adjacency, size):
visited = {(x, y)}
if adjacency[x][y] != 1:
return False
stack = deque([(x, y)])
while stack:
curr_x, curr_y = stack.pop()
if curr_x == size - 1:
return True
for next_x, next_y in moves(curr_x, curr_y, size):
if (next_x, next_y) not in visited and adjacency[next_x][next_y] == 1:
visited.add((next_x, next_y))
stack.append((next_x, next_y))
return False
def check_path_col(x, y, adjacency, size):
visited = {(x, y)}
if adjacency[x][y] != 2:
return False
stack = deque([(x, y)])
while stack:
curr_x, curr_y = stack.pop()
if curr_y == size - 1:
return True
for next_x, next_y in moves(curr_x, curr_y, size):
if (next_x, next_y) not in visited and adjacency[next_x][next_y] == 2:
visited.add((next_x, next_y))
stack.append((next_x, next_y))
return False
n = int(input())
matrix = []
for _ in range(n):
matrix.append(list(map(int, input().strip().split())))
is_row = False
for col in range(n):
if check_path_row(0, col, matrix, n):
is_row = True
break
is_col = False
for row in range(n):
if check_path_col(row, 0, matrix, n):
is_col = True
break
if is_row:
if is_col:
print('AMBIGUOUS')
else:
print(1)
elif is_col:
print(2)
else:
print(0)
| """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import deque
def moves(x, y, size):
for i in (x - 1, x + 2):
if 0 <= i < size:
for j in (y - 1, y + 2):
if 0 <= j < size:
if i != x and j != y:
yield i, j
def check_path_row(x, y, adjacency, size):
visited = {(x, y)}
if adjacency[x][y] != 1:
return False
stack = deque([(x, y)])
while stack:
curr_x, curr_y = stack.pop()
if curr_x == size - 1:
return True
for next_x, next_y in moves(curr_x, curr_y, size):
if (next_x, next_y) not in visited and adjacency[next_x][next_y] == 1:
visited.add((next_x, next_y))
stack.append((next_x, next_y))
return False
def check_path_col(x, y, adjacency, size):
visited = {(x, y)}
if adjacency[x][y] != 2:
return False
stack = deque([(x, y)])
while stack:
curr_x, curr_y = stack.pop()
if curr_y == size - 1:
return True
for next_x, next_y in moves(curr_x, curr_y, size):
if (next_x, next_y) not in visited and adjacency[next_x][next_y] == 2:
visited.add((next_x, next_y))
stack.append((next_x, next_y))
return False
n = int(input())
matrix = []
for _ in range(n):
matrix.append(list(map(int, input().strip().split())))
is_row = False
for col in range(n):
if check_path_row(0, col, matrix, n):
is_row = True
break
is_col = False
for row in range(n):
if check_path_col(row, 0, matrix, n):
is_col = True
break
if is_row:
if is_col:
print('AMBIGUOUS')
else:
print(1)
elif is_col:
print(2)
else:
print(0)
| en | 0.705749 | # Sample code to perform I/O: name = input() # Reading input from STDIN print('Hi, %s.' % name) # Writing output to STDOUT # Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail # Write your code here | 3.562859 | 4 |
2020/04.py | MastProTech/Advent-of-Code | 1 | 6613515 | <filename>2020/04.py
import re
from runner import read_file
def extract_keys_values(text:str)->list: # Seperates each passport, and then each passport's keys and values
t_list=re.split('\n{2}', text)
t_list=list(map(str.split, t_list))
output=list()
for i in range(len(t_list)):
output.append([])
for j in range(len(t_list[i])):
output[i].append(t_list[i][j].split(':'))
output[i]=dict(output[i])
return output
def return_passport_validity_part1(l:list)->bool:
i=l.keys()
if 'ecl' in i and 'pid' in i and 'eyr' in i and 'hcl' in i and 'byr' in i and 'iyr' in i and 'hgt' in i:
return True
return False
def verify(key:str, val:str)->bool: # Verifies if keys are assigned valid values or not
if key=='byr':
if int(val)>=1920 and int(val)<=2002:
return True
elif key=='iyr':
if int(val)>=2010 and int(val)<=2020:
return True
elif key=='eyr':
if int(val)>=2020 and int(val)<=2030:
return True
elif key=='hgt':
if val[-2:]=='cm':
if int(val[:-2])>=150 and int(val[:-2])<=193:
return True
elif val[-2:]=='in':
if int(val[:-2])>=59 and int(val[:-2])<=76:
return True
elif key=='hcl':
match=re.match('^#[0-9a-f]{6}$', val)
if match is not None:
return True
elif key=='ecl':
ecl=['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
if val in ecl:
return True
elif key=='pid':
match=re.match('^[0-9]{9}$', val)
if match is not None:
return True
return False
def return_passport_validity_part2(l:list)->bool:
i=l.keys()
if (
('byr' in i and verify('byr', l['byr'])) and
('iyr' in i and verify('iyr', l['iyr'])) and
('eyr' in i and verify('eyr', l['eyr'])) and
('hgt' in i and verify('hgt', l['hgt'])) and
('hcl' in i and verify('hcl', l['hcl'])) and
('ecl' in i and verify('ecl', l['ecl'])) and
('pid' in i and verify('pid', l['pid']))):
return True
return False
def clone_part2(l:list)->bool: # NOTE: Copied code of this function. Source: https://www.reddit.com/r/adventofcode/comments/k6e8sw/2020_day_04_solutions/gemhjlu/
valid=False
fields_required={'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'}
field_pattern = {'byr': '(^(19)[2-9][0-9]$)|(^(200)[0-2]$)',
'iyr': '(^(201)[0-9]$)|(^(2020)$)',
'eyr': '(^(202)[0-9]$)|(^(2030)$)',
'hgt': '(^((1[5-8][0-9])|((19)[0-3]))cm$)|(^((59)|(6[0-9])|(7[0-6]))in$)',
'hcl': '^#[0-9a-f]{6}$',
'ecl': '(^amb$)|(^blu$)|(^brn$)|(^gry$)|(^grn$)|(^hzl$)|(^oth$)',
'pid': '^[0-9]{9}$',
'cid': '(.*?)'}
if fields_required.issubset(l.keys()):
valid=True
for key in l.keys():
valid=valid and bool(re.match(field_pattern[key], l[key]))
return valid
if __name__=='__main__':
text=read_file('04.txt')
output=extract_keys_values(text)
print('Total Passports:',len(output))
print('Part 1: Valid Passports:',list(map(return_passport_validity_part1, output)).count(True))
print('Part 2: Valid Passports:',list(map(return_passport_validity_part2, output)).count(True))
print('Part 2: (Using another function):',list(map(clone_part2, output)).count(True)) # One of the best solutions I found on the internet. ♥ Source: https://www.reddit.com/r/adventofcode/comments/k6e8sw/2020_day_04_solutions/gemhjlu/ | <filename>2020/04.py
import re
from runner import read_file
def extract_keys_values(text:str)->list: # Seperates each passport, and then each passport's keys and values
t_list=re.split('\n{2}', text)
t_list=list(map(str.split, t_list))
output=list()
for i in range(len(t_list)):
output.append([])
for j in range(len(t_list[i])):
output[i].append(t_list[i][j].split(':'))
output[i]=dict(output[i])
return output
def return_passport_validity_part1(l:list)->bool:
i=l.keys()
if 'ecl' in i and 'pid' in i and 'eyr' in i and 'hcl' in i and 'byr' in i and 'iyr' in i and 'hgt' in i:
return True
return False
def verify(key:str, val:str)->bool: # Verifies if keys are assigned valid values or not
if key=='byr':
if int(val)>=1920 and int(val)<=2002:
return True
elif key=='iyr':
if int(val)>=2010 and int(val)<=2020:
return True
elif key=='eyr':
if int(val)>=2020 and int(val)<=2030:
return True
elif key=='hgt':
if val[-2:]=='cm':
if int(val[:-2])>=150 and int(val[:-2])<=193:
return True
elif val[-2:]=='in':
if int(val[:-2])>=59 and int(val[:-2])<=76:
return True
elif key=='hcl':
match=re.match('^#[0-9a-f]{6}$', val)
if match is not None:
return True
elif key=='ecl':
ecl=['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
if val in ecl:
return True
elif key=='pid':
match=re.match('^[0-9]{9}$', val)
if match is not None:
return True
return False
def return_passport_validity_part2(l:list)->bool:
i=l.keys()
if (
('byr' in i and verify('byr', l['byr'])) and
('iyr' in i and verify('iyr', l['iyr'])) and
('eyr' in i and verify('eyr', l['eyr'])) and
('hgt' in i and verify('hgt', l['hgt'])) and
('hcl' in i and verify('hcl', l['hcl'])) and
('ecl' in i and verify('ecl', l['ecl'])) and
('pid' in i and verify('pid', l['pid']))):
return True
return False
def clone_part2(l:list)->bool: # NOTE: Copied code of this function. Source: https://www.reddit.com/r/adventofcode/comments/k6e8sw/2020_day_04_solutions/gemhjlu/
valid=False
fields_required={'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'}
field_pattern = {'byr': '(^(19)[2-9][0-9]$)|(^(200)[0-2]$)',
'iyr': '(^(201)[0-9]$)|(^(2020)$)',
'eyr': '(^(202)[0-9]$)|(^(2030)$)',
'hgt': '(^((1[5-8][0-9])|((19)[0-3]))cm$)|(^((59)|(6[0-9])|(7[0-6]))in$)',
'hcl': '^#[0-9a-f]{6}$',
'ecl': '(^amb$)|(^blu$)|(^brn$)|(^gry$)|(^grn$)|(^hzl$)|(^oth$)',
'pid': '^[0-9]{9}$',
'cid': '(.*?)'}
if fields_required.issubset(l.keys()):
valid=True
for key in l.keys():
valid=valid and bool(re.match(field_pattern[key], l[key]))
return valid
if __name__=='__main__':
text=read_file('04.txt')
output=extract_keys_values(text)
print('Total Passports:',len(output))
print('Part 1: Valid Passports:',list(map(return_passport_validity_part1, output)).count(True))
print('Part 2: Valid Passports:',list(map(return_passport_validity_part2, output)).count(True))
print('Part 2: (Using another function):',list(map(clone_part2, output)).count(True)) # One of the best solutions I found on the internet. ♥ Source: https://www.reddit.com/r/adventofcode/comments/k6e8sw/2020_day_04_solutions/gemhjlu/ | en | 0.63581 | # Seperates each passport, and then each passport's keys and values # Verifies if keys are assigned valid values or not #[0-9a-f]{6}$', val) # NOTE: Copied code of this function. Source: https://www.reddit.com/r/adventofcode/comments/k6e8sw/2020_day_04_solutions/gemhjlu/ #[0-9a-f]{6}$', # One of the best solutions I found on the internet. ♥ Source: https://www.reddit.com/r/adventofcode/comments/k6e8sw/2020_day_04_solutions/gemhjlu/ | 3.21644 | 3 |
tests/test_modules.py | anshulrai/pytorch-toolbelt | 0 | 6613516 | <reponame>anshulrai/pytorch-toolbelt<filename>tests/test_modules.py
import pytest
import torch
import pytorch_toolbelt.modules.encoders as E
from pytorch_toolbelt.modules.backbone.inceptionv4 import inceptionv4
from pytorch_toolbelt.modules.fpn import HFF
from pytorch_toolbelt.utils.torch_utils import maybe_cuda, count_parameters
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="Cuda is not available")
def test_hff_dynamic_size():
feature_maps = [
torch.randn((4, 3, 512, 512)),
torch.randn((4, 3, 256, 256)),
torch.randn((4, 3, 128, 128)),
torch.randn((4, 3, 64, 64)),
]
hff = HFF(upsample_scale=2)
output = hff(feature_maps)
assert output.size(2) == 512
assert output.size(3) == 512
def test_hff_static_size():
feature_maps = [
torch.randn((4, 3, 512, 512)),
torch.randn((4, 3, 384, 384)),
torch.randn((4, 3, 256, 256)),
torch.randn((4, 3, 128, 128)),
torch.randn((4, 3, 32, 32)),
]
hff = HFF(sizes=[(512, 512), (384, 384), (256, 256), (128, 128), (32, 32)])
output = hff(feature_maps)
assert output.size(2) == 512
assert output.size(3) == 512
| import pytest
import torch
import pytorch_toolbelt.modules.encoders as E
from pytorch_toolbelt.modules.backbone.inceptionv4 import inceptionv4
from pytorch_toolbelt.modules.fpn import HFF
from pytorch_toolbelt.utils.torch_utils import maybe_cuda, count_parameters
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="Cuda is not available")
def test_hff_dynamic_size():
feature_maps = [
torch.randn((4, 3, 512, 512)),
torch.randn((4, 3, 256, 256)),
torch.randn((4, 3, 128, 128)),
torch.randn((4, 3, 64, 64)),
]
hff = HFF(upsample_scale=2)
output = hff(feature_maps)
assert output.size(2) == 512
assert output.size(3) == 512
def test_hff_static_size():
feature_maps = [
torch.randn((4, 3, 512, 512)),
torch.randn((4, 3, 384, 384)),
torch.randn((4, 3, 256, 256)),
torch.randn((4, 3, 128, 128)),
torch.randn((4, 3, 32, 32)),
]
hff = HFF(sizes=[(512, 512), (384, 384), (256, 256), (128, 128), (32, 32)])
output = hff(feature_maps)
assert output.size(2) == 512
assert output.size(3) == 512 | none | 1 | 1.975248 | 2 | |
target/migrations/0003_auto_20190421_1909.py | groundupnews/gu | 19 | 6613517 | <filename>target/migrations/0003_auto_20190421_1909.py<gh_stars>10-100
# Generated by Django 2.1.7 on 2019-04-21 17:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('target', '0002_auto_20190421_1530'),
]
operations = [
migrations.AlterField(
model_name='target',
name='letters',
field=models.CharField(max_length=9, unique=True),
),
migrations.AlterField(
model_name='target',
name='number',
field=models.PositiveSmallIntegerField(default=0),
),
]
| <filename>target/migrations/0003_auto_20190421_1909.py<gh_stars>10-100
# Generated by Django 2.1.7 on 2019-04-21 17:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('target', '0002_auto_20190421_1530'),
]
operations = [
migrations.AlterField(
model_name='target',
name='letters',
field=models.CharField(max_length=9, unique=True),
),
migrations.AlterField(
model_name='target',
name='number',
field=models.PositiveSmallIntegerField(default=0),
),
]
| en | 0.690124 | # Generated by Django 2.1.7 on 2019-04-21 17:09 | 1.291615 | 1 |
nova/tests/unit/objects/test_compute_node.py | bopopescu/nova-token | 0 | 6613518 | <reponame>bopopescu/nova-token
begin_unit
comment|'# Copyright 2013 IBM Corp.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'copy'
newline|'\n'
name|'import'
name|'mock'
newline|'\n'
name|'import'
name|'netaddr'
newline|'\n'
name|'from'
name|'oslo_serialization'
name|'import'
name|'jsonutils'
newline|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'timeutils'
newline|'\n'
name|'from'
name|'oslo_versionedobjects'
name|'import'
name|'base'
name|'as'
name|'ovo_base'
newline|'\n'
name|'from'
name|'oslo_versionedobjects'
name|'import'
name|'exception'
name|'as'
name|'ovo_exc'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'db'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'base'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'compute_node'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'hv_spec'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'service'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'fake_pci_device_pools'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'objects'
name|'import'
name|'test_objects'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
name|'import'
name|'uuidsentinel'
newline|'\n'
nl|'\n'
DECL|variable|NOW
name|'NOW'
op|'='
name|'timeutils'
op|'.'
name|'utcnow'
op|'('
op|')'
op|'.'
name|'replace'
op|'('
name|'microsecond'
op|'='
number|'0'
op|')'
newline|'\n'
DECL|variable|fake_stats
name|'fake_stats'
op|'='
op|'{'
string|"'num_foo'"
op|':'
string|"'10'"
op|'}'
newline|'\n'
DECL|variable|fake_stats_db_format
name|'fake_stats_db_format'
op|'='
name|'jsonutils'
op|'.'
name|'dumps'
op|'('
name|'fake_stats'
op|')'
newline|'\n'
comment|'# host_ip is coerced from a string to an IPAddress'
nl|'\n'
comment|'# but needs to be converted to a string for the database format'
nl|'\n'
DECL|variable|fake_host_ip
name|'fake_host_ip'
op|'='
string|"'127.0.0.1'"
newline|'\n'
DECL|variable|fake_numa_topology
name|'fake_numa_topology'
op|'='
name|'objects'
op|'.'
name|'NUMATopology'
op|'('
nl|'\n'
DECL|variable|cells
name|'cells'
op|'='
op|'['
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'0'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'512'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'0'
op|','
name|'memory_usage'
op|'='
number|'0'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
op|']'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
op|']'
op|')'
op|','
nl|'\n'
DECL|variable|siblings
name|'siblings'
op|'='
op|'['
op|']'
op|')'
op|','
nl|'\n'
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'512'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'0'
op|','
name|'memory_usage'
op|'='
number|'0'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
op|']'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
op|']'
op|')'
op|','
nl|'\n'
DECL|variable|siblings
name|'siblings'
op|'='
op|'['
op|']'
op|')'
op|']'
op|')'
newline|'\n'
DECL|variable|fake_numa_topology_db_format
name|'fake_numa_topology_db_format'
op|'='
name|'fake_numa_topology'
op|'.'
name|'_to_json'
op|'('
op|')'
newline|'\n'
DECL|variable|fake_supported_instances
name|'fake_supported_instances'
op|'='
op|'['
op|'('
string|"'x86_64'"
op|','
string|"'kvm'"
op|','
string|"'hvm'"
op|')'
op|']'
newline|'\n'
DECL|variable|fake_hv_spec
name|'fake_hv_spec'
op|'='
name|'hv_spec'
op|'.'
name|'HVSpec'
op|'('
name|'arch'
op|'='
name|'fake_supported_instances'
op|'['
number|'0'
op|']'
op|'['
number|'0'
op|']'
op|','
nl|'\n'
DECL|variable|hv_type
name|'hv_type'
op|'='
name|'fake_supported_instances'
op|'['
number|'0'
op|']'
op|'['
number|'1'
op|']'
op|','
nl|'\n'
DECL|variable|vm_mode
name|'vm_mode'
op|'='
name|'fake_supported_instances'
op|'['
number|'0'
op|']'
op|'['
number|'2'
op|']'
op|')'
newline|'\n'
DECL|variable|fake_supported_hv_specs
name|'fake_supported_hv_specs'
op|'='
op|'['
name|'fake_hv_spec'
op|']'
newline|'\n'
comment|'# for backward compatibility, each supported instance object'
nl|'\n'
comment|'# is stored as a list in the database'
nl|'\n'
DECL|variable|fake_supported_hv_specs_db_format
name|'fake_supported_hv_specs_db_format'
op|'='
name|'jsonutils'
op|'.'
name|'dumps'
op|'('
op|'['
name|'fake_hv_spec'
op|'.'
name|'to_list'
op|'('
op|')'
op|']'
op|')'
newline|'\n'
DECL|variable|fake_pci
name|'fake_pci'
op|'='
name|'jsonutils'
op|'.'
name|'dumps'
op|'('
name|'fake_pci_device_pools'
op|'.'
name|'fake_pool_list_primitive'
op|')'
newline|'\n'
DECL|variable|fake_compute_node
name|'fake_compute_node'
op|'='
op|'{'
nl|'\n'
string|"'created_at'"
op|':'
name|'NOW'
op|','
nl|'\n'
string|"'updated_at'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'deleted_at'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'deleted'"
op|':'
name|'False'
op|','
nl|'\n'
string|"'id'"
op|':'
number|'123'
op|','
nl|'\n'
string|"'uuid'"
op|':'
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
op|','
nl|'\n'
string|"'service_id'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'host'"
op|':'
string|"'fake'"
op|','
nl|'\n'
string|"'vcpus'"
op|':'
number|'4'
op|','
nl|'\n'
string|"'memory_mb'"
op|':'
number|'4096'
op|','
nl|'\n'
string|"'local_gb'"
op|':'
number|'1024'
op|','
nl|'\n'
string|"'vcpus_used'"
op|':'
number|'2'
op|','
nl|'\n'
string|"'memory_mb_used'"
op|':'
number|'2048'
op|','
nl|'\n'
string|"'local_gb_used'"
op|':'
number|'512'
op|','
nl|'\n'
string|"'hypervisor_type'"
op|':'
string|"'Hyper-Dan-VM-ware'"
op|','
nl|'\n'
string|"'hypervisor_version'"
op|':'
number|'1001'
op|','
nl|'\n'
string|"'hypervisor_hostname'"
op|':'
string|"'vm.danplanet.com'"
op|','
nl|'\n'
string|"'free_ram_mb'"
op|':'
number|'1024'
op|','
nl|'\n'
string|"'free_disk_gb'"
op|':'
number|'256'
op|','
nl|'\n'
string|"'current_workload'"
op|':'
number|'100'
op|','
nl|'\n'
string|"'running_vms'"
op|':'
number|'2013'
op|','
nl|'\n'
string|"'cpu_info'"
op|':'
string|"'Schmintel i786'"
op|','
nl|'\n'
string|"'disk_available_least'"
op|':'
number|'256'
op|','
nl|'\n'
string|"'metrics'"
op|':'
string|"''"
op|','
nl|'\n'
string|"'stats'"
op|':'
name|'fake_stats_db_format'
op|','
nl|'\n'
string|"'host_ip'"
op|':'
name|'fake_host_ip'
op|','
nl|'\n'
string|"'numa_topology'"
op|':'
name|'fake_numa_topology_db_format'
op|','
nl|'\n'
string|"'supported_instances'"
op|':'
name|'fake_supported_hv_specs_db_format'
op|','
nl|'\n'
string|"'pci_stats'"
op|':'
name|'fake_pci'
op|','
nl|'\n'
string|"'cpu_allocation_ratio'"
op|':'
number|'16.0'
op|','
nl|'\n'
string|"'ram_allocation_ratio'"
op|':'
number|'1.5'
op|','
nl|'\n'
string|"'disk_allocation_ratio'"
op|':'
number|'1.0'
op|','
nl|'\n'
op|'}'
newline|'\n'
comment|'# FIXME(sbauza) : For compatibility checking, to be removed once we are sure'
nl|'\n'
comment|'# that all computes are running latest DB version with host field in it.'
nl|'\n'
DECL|variable|fake_old_compute_node
name|'fake_old_compute_node'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'del'
name|'fake_old_compute_node'
op|'['
string|"'host'"
op|']'
newline|'\n'
comment|'# resources are passed from the virt drivers and copied into the compute_node'
nl|'\n'
DECL|variable|fake_resources
name|'fake_resources'
op|'='
op|'{'
nl|'\n'
string|"'vcpus'"
op|':'
number|'2'
op|','
nl|'\n'
string|"'memory_mb'"
op|':'
number|'1024'
op|','
nl|'\n'
string|"'local_gb'"
op|':'
number|'10'
op|','
nl|'\n'
string|"'cpu_info'"
op|':'
string|"'fake-info'"
op|','
nl|'\n'
string|"'vcpus_used'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'memory_mb_used'"
op|':'
number|'512'
op|','
nl|'\n'
string|"'local_gb_used'"
op|':'
number|'4'
op|','
nl|'\n'
string|"'numa_topology'"
op|':'
name|'fake_numa_topology_db_format'
op|','
nl|'\n'
string|"'hypervisor_type'"
op|':'
string|"'fake-type'"
op|','
nl|'\n'
string|"'hypervisor_version'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'hypervisor_hostname'"
op|':'
string|"'fake-host'"
op|','
nl|'\n'
string|"'disk_available_least'"
op|':'
number|'256'
op|','
nl|'\n'
string|"'host_ip'"
op|':'
name|'fake_host_ip'
op|','
nl|'\n'
string|"'supported_instances'"
op|':'
name|'fake_supported_instances'
nl|'\n'
op|'}'
newline|'\n'
DECL|variable|fake_compute_with_resources
name|'fake_compute_with_resources'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
op|'('
nl|'\n'
DECL|variable|vcpus
name|'vcpus'
op|'='
name|'fake_resources'
op|'['
string|"'vcpus'"
op|']'
op|','
nl|'\n'
DECL|variable|memory_mb
name|'memory_mb'
op|'='
name|'fake_resources'
op|'['
string|"'memory_mb'"
op|']'
op|','
nl|'\n'
DECL|variable|local_gb
name|'local_gb'
op|'='
name|'fake_resources'
op|'['
string|"'local_gb'"
op|']'
op|','
nl|'\n'
DECL|variable|cpu_info
name|'cpu_info'
op|'='
name|'fake_resources'
op|'['
string|"'cpu_info'"
op|']'
op|','
nl|'\n'
DECL|variable|vcpus_used
name|'vcpus_used'
op|'='
name|'fake_resources'
op|'['
string|"'vcpus_used'"
op|']'
op|','
nl|'\n'
DECL|variable|memory_mb_used
name|'memory_mb_used'
op|'='
name|'fake_resources'
op|'['
string|"'memory_mb_used'"
op|']'
op|','
nl|'\n'
DECL|variable|local_gb_used
name|'local_gb_used'
op|'='
name|'fake_resources'
op|'['
string|"'local_gb_used'"
op|']'
op|','
nl|'\n'
DECL|variable|numa_topology
name|'numa_topology'
op|'='
name|'fake_resources'
op|'['
string|"'numa_topology'"
op|']'
op|','
nl|'\n'
DECL|variable|hypervisor_type
name|'hypervisor_type'
op|'='
name|'fake_resources'
op|'['
string|"'hypervisor_type'"
op|']'
op|','
nl|'\n'
DECL|variable|hypervisor_version
name|'hypervisor_version'
op|'='
name|'fake_resources'
op|'['
string|"'hypervisor_version'"
op|']'
op|','
nl|'\n'
DECL|variable|hypervisor_hostname
name|'hypervisor_hostname'
op|'='
name|'fake_resources'
op|'['
string|"'hypervisor_hostname'"
op|']'
op|','
nl|'\n'
DECL|variable|disk_available_least
name|'disk_available_least'
op|'='
name|'fake_resources'
op|'['
string|"'disk_available_least'"
op|']'
op|','
nl|'\n'
DECL|variable|host_ip
name|'host_ip'
op|'='
name|'netaddr'
op|'.'
name|'IPAddress'
op|'('
name|'fake_resources'
op|'['
string|"'host_ip'"
op|']'
op|')'
op|','
nl|'\n'
DECL|variable|supported_hv_specs
name|'supported_hv_specs'
op|'='
name|'fake_supported_hv_specs'
op|','
nl|'\n'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|_TestComputeNodeObject
name|'class'
name|'_TestComputeNodeObject'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
DECL|member|supported_hv_specs_comparator
indent|' '
name|'def'
name|'supported_hv_specs_comparator'
op|'('
name|'self'
op|','
name|'expected'
op|','
name|'obj_val'
op|')'
op|':'
newline|'\n'
indent|' '
name|'obj_val'
op|'='
op|'['
name|'inst'
op|'.'
name|'to_list'
op|'('
op|')'
name|'for'
name|'inst'
name|'in'
name|'obj_val'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertJsonEqual'
op|'('
name|'expected'
op|','
name|'obj_val'
op|')'
newline|'\n'
nl|'\n'
DECL|member|pci_device_pools_comparator
dedent|''
name|'def'
name|'pci_device_pools_comparator'
op|'('
name|'self'
op|','
name|'expected'
op|','
name|'obj_val'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'obj_val'
name|'is'
name|'not'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'obj_val'
op|'='
name|'obj_val'
op|'.'
name|'obj_to_primitive'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertJsonEqual'
op|'('
name|'expected'
op|','
name|'obj_val'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'expected'
op|','
name|'obj_val'
op|')'
newline|'\n'
nl|'\n'
DECL|member|comparators
dedent|''
dedent|''
name|'def'
name|'comparators'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'{'
string|"'stats'"
op|':'
name|'self'
op|'.'
name|'assertJsonEqual'
op|','
nl|'\n'
string|"'host_ip'"
op|':'
name|'self'
op|'.'
name|'str_comparator'
op|','
nl|'\n'
string|"'supported_hv_specs'"
op|':'
name|'self'
op|'.'
name|'supported_hv_specs_comparator'
op|','
nl|'\n'
string|"'pci_device_pools'"
op|':'
name|'self'
op|'.'
name|'pci_device_pools_comparator'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|subs
dedent|''
name|'def'
name|'subs'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'{'
string|"'supported_hv_specs'"
op|':'
string|"'supported_instances'"
op|','
nl|'\n'
string|"'pci_device_pools'"
op|':'
string|"'pci_stats'"
op|'}'
newline|'\n'
nl|'\n'
DECL|member|test_get_by_id
dedent|''
name|'def'
name|'test_get_by_id'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_get'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_get'
op|'('
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'fake_compute_node'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'.'
name|'get_by_id'
op|'('
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'uuid'"
op|','
name|'compute'
op|'.'
name|'obj_what_changed'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'objects'
op|'.'
name|'Service'
op|','
string|"'get_by_id'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'db'
op|','
string|"'compute_node_get'"
op|')'
newline|'\n'
DECL|member|test_get_by_id_with_host_field_not_in_db
name|'def'
name|'test_get_by_id_with_host_field_not_in_db'
op|'('
name|'self'
op|','
name|'mock_cn_get'
op|','
nl|'\n'
name|'mock_obj_svc_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_compute_node_with_svc_id'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'fake_compute_node_with_svc_id'
op|'['
string|"'service_id'"
op|']'
op|'='
number|'123'
newline|'\n'
name|'fake_compute_node_with_no_host'
op|'='
name|'fake_compute_node_with_svc_id'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'host'
op|'='
name|'fake_compute_node_with_no_host'
op|'.'
name|'pop'
op|'('
string|"'host'"
op|')'
newline|'\n'
name|'fake_service'
op|'='
name|'service'
op|'.'
name|'Service'
op|'('
name|'id'
op|'='
number|'123'
op|')'
newline|'\n'
name|'fake_service'
op|'.'
name|'host'
op|'='
name|'host'
newline|'\n'
nl|'\n'
name|'mock_cn_get'
op|'.'
name|'return_value'
op|'='
name|'fake_compute_node_with_no_host'
newline|'\n'
name|'mock_obj_svc_get'
op|'.'
name|'return_value'
op|'='
name|'fake_service'
newline|'\n'
nl|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'.'
name|'get_by_id'
op|'('
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node_with_svc_id'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_by_service_id
dedent|''
name|'def'
name|'test_get_by_service_id'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_nodes_get_by_service_id'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_nodes_get_by_service_id'
op|'('
name|'self'
op|'.'
name|'context'
op|','
number|'456'
op|')'
op|'.'
name|'AndReturn'
op|'('
nl|'\n'
op|'['
name|'fake_compute_node'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'.'
name|'get_by_service_id'
op|'('
name|'self'
op|'.'
name|'context'
op|','
number|'456'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'db'
op|','
string|"'compute_node_get_by_host_and_nodename'"
op|')'
newline|'\n'
DECL|member|test_get_by_host_and_nodename
name|'def'
name|'test_get_by_host_and_nodename'
op|'('
name|'self'
op|','
name|'cn_get_by_h_and_n'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cn_get_by_h_and_n'
op|'.'
name|'return_value'
op|'='
name|'fake_compute_node'
newline|'\n'
nl|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'.'
name|'get_by_host_and_nodename'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
string|"'fake'"
op|','
string|"'vm.danplanet.com'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get_all_by_host'"
op|')'
newline|'\n'
DECL|member|test_get_first_node_by_host_for_old_compat
name|'def'
name|'test_get_first_node_by_host_for_old_compat'
op|'('
nl|'\n'
name|'self'
op|','
name|'cn_get_all_by_host'
op|')'
op|':'
newline|'\n'
indent|' '
name|'another_node'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'another_node'
op|'['
string|"'hypervisor_hostname'"
op|']'
op|'='
string|"'neverland'"
newline|'\n'
name|'cn_get_all_by_host'
op|'.'
name|'return_value'
op|'='
op|'['
name|'fake_compute_node'
op|','
name|'another_node'
op|']'
newline|'\n'
nl|'\n'
name|'compute'
op|'='
op|'('
nl|'\n'
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'.'
name|'get_first_node_by_host_for_old_compat'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
string|"'fake'"
op|')'
nl|'\n'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.ComputeNodeList.get_all_by_host'"
op|')'
newline|'\n'
DECL|member|test_get_first_node_by_host_for_old_compat_not_found
name|'def'
name|'test_get_first_node_by_host_for_old_compat_not_found'
op|'('
nl|'\n'
name|'self'
op|','
name|'cn_get_all_by_host'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cn_get_all_by_host'
op|'.'
name|'side_effect'
op|'='
name|'exception'
op|'.'
name|'ComputeHostNotFound'
op|'('
nl|'\n'
name|'host'
op|'='
string|"'fake'"
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
nl|'\n'
name|'exception'
op|'.'
name|'ComputeHostNotFound'
op|','
nl|'\n'
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'.'
name|'get_first_node_by_host_for_old_compat'
op|','
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
string|"'fake'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get'"
op|','
name|'return_value'
op|'='
name|'fake_compute_node'
op|')'
newline|'\n'
DECL|member|test_create
name|'def'
name|'test_create'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_create'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_create'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
op|'{'
nl|'\n'
string|"'service_id'"
op|':'
number|'456'
op|','
nl|'\n'
string|"'stats'"
op|':'
name|'fake_stats_db_format'
op|','
nl|'\n'
string|"'host_ip'"
op|':'
name|'fake_host_ip'
op|','
nl|'\n'
string|"'supported_instances'"
op|':'
name|'fake_supported_hv_specs_db_format'
op|','
nl|'\n'
string|"'uuid'"
op|':'
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
op|','
nl|'\n'
op|'}'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'fake_compute_node'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'service_id'
op|'='
number|'456'
newline|'\n'
name|'compute'
op|'.'
name|'uuid'
op|'='
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
newline|'\n'
name|'compute'
op|'.'
name|'stats'
op|'='
name|'fake_stats'
newline|'\n'
comment|'# NOTE (pmurray): host_ip is coerced to an IPAddress'
nl|'\n'
name|'compute'
op|'.'
name|'host_ip'
op|'='
name|'fake_host_ip'
newline|'\n'
name|'compute'
op|'.'
name|'supported_hv_specs'
op|'='
name|'fake_supported_hv_specs'
newline|'\n'
name|'with'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'oslo_utils.uuidutils.generate_uuid'"
op|')'
name|'as'
name|'mock_gu'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'.'
name|'create'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'mock_gu'
op|'.'
name|'called'
op|')'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_create'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'oslo_utils.uuidutils.generate_uuid'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get'"
op|','
name|'return_value'
op|'='
name|'fake_compute_node'
op|')'
newline|'\n'
DECL|member|test_create_allocates_uuid
name|'def'
name|'test_create_allocates_uuid'
op|'('
name|'self'
op|','
name|'mock_get'
op|','
name|'mock_gu'
op|','
name|'mock_create'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mock_create'
op|'.'
name|'return_value'
op|'='
name|'fake_compute_node'
newline|'\n'
name|'mock_gu'
op|'.'
name|'return_value'
op|'='
name|'fake_compute_node'
op|'['
string|"'uuid'"
op|']'
newline|'\n'
name|'obj'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'obj'
op|'.'
name|'create'
op|'('
op|')'
newline|'\n'
name|'mock_gu'
op|'.'
name|'assert_called_once_with'
op|'('
op|')'
newline|'\n'
name|'mock_create'
op|'.'
name|'assert_called_once_with'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
op|'{'
string|"'uuid'"
op|':'
name|'fake_compute_node'
op|'['
string|"'uuid'"
op|']'
op|'}'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get'"
op|','
name|'return_value'
op|'='
name|'fake_compute_node'
op|')'
newline|'\n'
DECL|member|test_recreate_fails
name|'def'
name|'test_recreate_fails'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_create'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_create'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
op|'{'
string|"'service_id'"
op|':'
number|'456'
op|','
nl|'\n'
string|"'uuid'"
op|':'
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
op|'}'
op|')'
op|'.'
name|'AndReturn'
op|'('
nl|'\n'
name|'fake_compute_node'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'service_id'
op|'='
number|'456'
newline|'\n'
name|'compute'
op|'.'
name|'uuid'
op|'='
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
newline|'\n'
name|'compute'
op|'.'
name|'create'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'ObjectActionError'
op|','
name|'compute'
op|'.'
name|'create'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get'"
op|','
name|'return_value'
op|'='
name|'fake_compute_node'
op|')'
newline|'\n'
DECL|member|test_save
name|'def'
name|'test_save'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_update'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_update'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|','
nl|'\n'
op|'{'
nl|'\n'
string|"'vcpus_used'"
op|':'
number|'3'
op|','
nl|'\n'
string|"'stats'"
op|':'
name|'fake_stats_db_format'
op|','
nl|'\n'
string|"'host_ip'"
op|':'
name|'fake_host_ip'
op|','
nl|'\n'
string|"'supported_instances'"
op|':'
name|'fake_supported_hv_specs_db_format'
op|','
nl|'\n'
string|"'uuid'"
op|':'
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
op|','
nl|'\n'
op|'}'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'fake_compute_node'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'id'
op|'='
number|'123'
newline|'\n'
name|'compute'
op|'.'
name|'vcpus_used'
op|'='
number|'3'
newline|'\n'
name|'compute'
op|'.'
name|'stats'
op|'='
name|'fake_stats'
newline|'\n'
name|'compute'
op|'.'
name|'uuid'
op|'='
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
newline|'\n'
comment|'# NOTE (pmurray): host_ip is coerced to an IPAddress'
nl|'\n'
name|'compute'
op|'.'
name|'host_ip'
op|'='
name|'fake_host_ip'
newline|'\n'
name|'compute'
op|'.'
name|'supported_hv_specs'
op|'='
name|'fake_supported_hv_specs'
newline|'\n'
name|'compute'
op|'.'
name|'save'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_update'"
op|')'
newline|'\n'
DECL|member|test_save_pci_device_pools_empty
name|'def'
name|'test_save_pci_device_pools_empty'
op|'('
name|'self'
op|','
name|'mock_update'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_pci'
op|'='
name|'jsonutils'
op|'.'
name|'dumps'
op|'('
nl|'\n'
name|'objects'
op|'.'
name|'PciDevicePoolList'
op|'('
name|'objects'
op|'='
op|'['
op|']'
op|')'
op|'.'
name|'obj_to_primitive'
op|'('
op|')'
op|')'
newline|'\n'
name|'compute_dict'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'compute_dict'
op|'['
string|"'pci_stats'"
op|']'
op|'='
name|'fake_pci'
newline|'\n'
name|'mock_get'
op|'.'
name|'return_value'
op|'='
name|'compute_dict'
newline|'\n'
nl|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'id'
op|'='
number|'123'
newline|'\n'
name|'compute'
op|'.'
name|'pci_device_pools'
op|'='
name|'objects'
op|'.'
name|'PciDevicePoolList'
op|'('
name|'objects'
op|'='
op|'['
op|']'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'save'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'compute_dict'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'mock_update'
op|'.'
name|'assert_called_once_with'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|','
op|'{'
string|"'pci_stats'"
op|':'
name|'fake_pci'
op|'}'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_update'"
op|')'
newline|'\n'
DECL|member|test_save_pci_device_pools_null
name|'def'
name|'test_save_pci_device_pools_null'
op|'('
name|'self'
op|','
name|'mock_update'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute_dict'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'compute_dict'
op|'['
string|"'pci_stats'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'mock_get'
op|'.'
name|'return_value'
op|'='
name|'compute_dict'
newline|'\n'
nl|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'id'
op|'='
number|'123'
newline|'\n'
name|'compute'
op|'.'
name|'pci_device_pools'
op|'='
name|'None'
newline|'\n'
name|'compute'
op|'.'
name|'save'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'compute_dict'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'mock_update'
op|'.'
name|'assert_called_once_with'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|','
op|'{'
string|"'pci_stats'"
op|':'
name|'None'
op|'}'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'db'
op|','
string|"'compute_node_create'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'fake_compute_node'
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'db'
op|','
string|"'compute_node_get'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'fake_compute_node'
op|')'
newline|'\n'
DECL|member|test_set_id_failure
name|'def'
name|'test_set_id_failure'
op|'('
name|'self'
op|','
name|'mock_get'
op|','
name|'db_mock'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
name|'uuid'
op|'='
name|'fake_compute_node'
op|'['
string|"'uuid'"
op|']'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'create'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'ovo_exc'
op|'.'
name|'ReadOnlyFieldError'
op|','
name|'setattr'
op|','
nl|'\n'
name|'compute'
op|','
string|"'id'"
op|','
number|'124'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_destroy
dedent|''
name|'def'
name|'test_destroy'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_delete'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_delete'
op|'('
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'id'
op|'='
number|'123'
newline|'\n'
name|'compute'
op|'.'
name|'destroy'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_all
dedent|''
name|'def'
name|'test_get_all'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_get_all'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_get_all'
op|'('
name|'self'
op|'.'
name|'context'
op|')'
op|'.'
name|'AndReturn'
op|'('
op|'['
name|'fake_compute_node'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'computes'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNodeList'
op|'.'
name|'get_all'
op|'('
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'computes'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'computes'
op|'['
number|'0'
op|']'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_by_hypervisor
dedent|''
name|'def'
name|'test_get_by_hypervisor'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_search_by_hypervisor'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_search_by_hypervisor'
op|'('
name|'self'
op|'.'
name|'context'
op|','
string|"'hyper'"
op|')'
op|'.'
name|'AndReturn'
op|'('
nl|'\n'
op|'['
name|'fake_compute_node'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'computes'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNodeList'
op|'.'
name|'get_by_hypervisor'
op|'('
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
string|"'hyper'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'computes'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'computes'
op|'['
number|'0'
op|']'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_nodes_get_by_service_id'"
op|')'
newline|'\n'
DECL|member|test__get_by_service
name|'def'
name|'test__get_by_service'
op|'('
name|'self'
op|','
name|'cn_get_by_svc_id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cn_get_by_svc_id'
op|'.'
name|'return_value'
op|'='
op|'['
name|'fake_compute_node'
op|']'
newline|'\n'
name|'computes'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNodeList'
op|'.'
name|'_get_by_service'
op|'('
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
number|'123'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'computes'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'computes'
op|'['
number|'0'
op|']'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get_all_by_host'"
op|')'
newline|'\n'
DECL|member|test_get_all_by_host
name|'def'
name|'test_get_all_by_host'
op|'('
name|'self'
op|','
name|'cn_get_all_by_host'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cn_get_all_by_host'
op|'.'
name|'return_value'
op|'='
op|'['
name|'fake_compute_node'
op|']'
newline|'\n'
name|'computes'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNodeList'
op|'.'
name|'get_all_by_host'
op|'('
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
string|"'fake'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'computes'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'computes'
op|'['
number|'0'
op|']'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_numa_topology
dedent|''
name|'def'
name|'test_compat_numa_topology'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'versions'
op|'='
name|'ovo_base'
op|'.'
name|'obj_tree_get_versions'
op|'('
string|"'ComputeNode'"
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.4'"
op|','
nl|'\n'
name|'version_manifest'
op|'='
name|'versions'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'numa_topology'"
op|','
name|'primitive'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_supported_hv_specs
dedent|''
name|'def'
name|'test_compat_supported_hv_specs'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'supported_hv_specs'
op|'='
name|'fake_supported_hv_specs'
newline|'\n'
name|'versions'
op|'='
name|'ovo_base'
op|'.'
name|'obj_tree_get_versions'
op|'('
string|"'ComputeNode'"
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.5'"
op|','
nl|'\n'
name|'version_manifest'
op|'='
name|'versions'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'supported_hv_specs'"
op|','
name|'primitive'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_host
dedent|''
name|'def'
name|'test_compat_host'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.6'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'host'"
op|','
name|'primitive'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_pci_device_pools
dedent|''
name|'def'
name|'test_compat_pci_device_pools'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'pci_device_pools'
op|'='
name|'fake_pci_device_pools'
op|'.'
name|'fake_pool_list'
newline|'\n'
name|'versions'
op|'='
name|'ovo_base'
op|'.'
name|'obj_tree_get_versions'
op|'('
string|"'ComputeNode'"
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.8'"
op|','
nl|'\n'
name|'version_manifest'
op|'='
name|'versions'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'pci_device_pools'"
op|','
name|'primitive'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.Service.get_by_compute_host'"
op|')'
newline|'\n'
DECL|member|test_compat_service_id
name|'def'
name|'test_compat_service_id'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mock_get'
op|'.'
name|'return_value'
op|'='
name|'objects'
op|'.'
name|'Service'
op|'('
name|'id'
op|'='
number|'1'
op|')'
newline|'\n'
name|'compute'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
op|'('
name|'host'
op|'='
string|"'fake-host'"
op|','
name|'service_id'
op|'='
name|'None'
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.12'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'primitive'
op|'['
string|"'nova_object.data'"
op|']'
op|'['
string|"'service_id'"
op|']'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.Service.get_by_compute_host'"
op|')'
newline|'\n'
DECL|member|test_compat_service_id_compute_host_not_found
name|'def'
name|'test_compat_service_id_compute_host_not_found'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mock_get'
op|'.'
name|'side_effect'
op|'='
name|'exception'
op|'.'
name|'ComputeHostNotFound'
op|'('
name|'host'
op|'='
string|"'fake-host'"
op|')'
newline|'\n'
name|'compute'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
op|'('
name|'host'
op|'='
string|"'fake-host'"
op|','
name|'service_id'
op|'='
name|'None'
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.12'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
op|'-'
number|'1'
op|','
name|'primitive'
op|'['
string|"'nova_object.data'"
op|']'
op|'['
string|"'service_id'"
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_update_from_virt_driver
dedent|''
name|'def'
name|'test_update_from_virt_driver'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# copy in case the update has a side effect'
nl|'\n'
indent|' '
name|'resources'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_resources'
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'update_from_virt_driver'
op|'('
name|'resources'
op|')'
newline|'\n'
name|'expected'
op|'='
name|'fake_compute_with_resources'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'base'
op|'.'
name|'obj_equal_prims'
op|'('
name|'expected'
op|','
name|'compute'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_update_from_virt_driver_missing_field
dedent|''
name|'def'
name|'test_update_from_virt_driver_missing_field'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# NOTE(pmurray): update_from_virt_driver does not require'
nl|'\n'
comment|'# all fields to be present in resources. Validation of the'
nl|'\n'
comment|'# resources data structure would be done in a different method.'
nl|'\n'
indent|' '
name|'resources'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_resources'
op|')'
newline|'\n'
name|'del'
name|'resources'
op|'['
string|"'vcpus'"
op|']'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'update_from_virt_driver'
op|'('
name|'resources'
op|')'
newline|'\n'
name|'expected'
op|'='
name|'fake_compute_with_resources'
op|'.'
name|'obj_clone'
op|'('
op|')'
newline|'\n'
name|'del'
name|'expected'
op|'.'
name|'vcpus'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'base'
op|'.'
name|'obj_equal_prims'
op|'('
name|'expected'
op|','
name|'compute'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_update_from_virt_driver_extra_field
dedent|''
name|'def'
name|'test_update_from_virt_driver_extra_field'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# copy in case the update has a side effect'
nl|'\n'
indent|' '
name|'resources'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_resources'
op|')'
newline|'\n'
name|'resources'
op|'['
string|"'extra_field'"
op|']'
op|'='
string|"'nonsense'"
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'update_from_virt_driver'
op|'('
name|'resources'
op|')'
newline|'\n'
name|'expected'
op|'='
name|'fake_compute_with_resources'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'base'
op|'.'
name|'obj_equal_prims'
op|'('
name|'expected'
op|','
name|'compute'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_update_from_virt_driver_bad_value
dedent|''
name|'def'
name|'test_update_from_virt_driver_bad_value'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# copy in case the update has a side effect'
nl|'\n'
indent|' '
name|'resources'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_resources'
op|')'
newline|'\n'
name|'resources'
op|'['
string|"'vcpus'"
op|']'
op|'='
string|"'nonsense'"
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'ValueError'
op|','
nl|'\n'
name|'compute'
op|'.'
name|'update_from_virt_driver'
op|','
name|'resources'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_allocation_ratios
dedent|''
name|'def'
name|'test_compat_allocation_ratios'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.13'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'cpu_allocation_ratio'"
op|','
name|'primitive'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'ram_allocation_ratio'"
op|','
name|'primitive'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_disk_allocation_ratio
dedent|''
name|'def'
name|'test_compat_disk_allocation_ratio'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.15'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'disk_allocation_ratio'"
op|','
name|'primitive'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_allocation_ratios_old_compute
dedent|''
name|'def'
name|'test_compat_allocation_ratios_old_compute'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'cpu_allocation_ratio'
op|'='
number|'2.0'
op|','
name|'ram_allocation_ratio'
op|'='
number|'3.0'
op|','
nl|'\n'
name|'disk_allocation_ratio'
op|'='
number|'0.9'
op|')'
newline|'\n'
name|'compute_dict'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
comment|"# old computes don't provide allocation ratios to the table"
nl|'\n'
name|'compute_dict'
op|'['
string|"'cpu_allocation_ratio'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'compute_dict'
op|'['
string|"'ram_allocation_ratio'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'compute_dict'
op|'['
string|"'disk_allocation_ratio'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'cls'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
newline|'\n'
name|'compute'
op|'='
name|'cls'
op|'.'
name|'_from_db_object'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'cls'
op|'('
op|')'
op|','
name|'compute_dict'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'2.0'
op|','
name|'compute'
op|'.'
name|'cpu_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'3.0'
op|','
name|'compute'
op|'.'
name|'ram_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0.9'
op|','
name|'compute'
op|'.'
name|'disk_allocation_ratio'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_allocation_ratios_default_values
dedent|''
name|'def'
name|'test_compat_allocation_ratios_default_values'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute_dict'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
comment|'# new computes provide allocation ratios defaulted to 0.0'
nl|'\n'
name|'compute_dict'
op|'['
string|"'cpu_allocation_ratio'"
op|']'
op|'='
number|'0.0'
newline|'\n'
name|'compute_dict'
op|'['
string|"'ram_allocation_ratio'"
op|']'
op|'='
number|'0.0'
newline|'\n'
name|'compute_dict'
op|'['
string|"'disk_allocation_ratio'"
op|']'
op|'='
number|'0.0'
newline|'\n'
name|'cls'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
newline|'\n'
name|'compute'
op|'='
name|'cls'
op|'.'
name|'_from_db_object'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'cls'
op|'('
op|')'
op|','
name|'compute_dict'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'16.0'
op|','
name|'compute'
op|'.'
name|'cpu_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1.5'
op|','
name|'compute'
op|'.'
name|'ram_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1.0'
op|','
name|'compute'
op|'.'
name|'disk_allocation_ratio'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_allocation_ratios_old_compute_default_values
dedent|''
name|'def'
name|'test_compat_allocation_ratios_old_compute_default_values'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute_dict'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
comment|"# old computes don't provide allocation ratios to the table"
nl|'\n'
name|'compute_dict'
op|'['
string|"'cpu_allocation_ratio'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'compute_dict'
op|'['
string|"'ram_allocation_ratio'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'compute_dict'
op|'['
string|"'disk_allocation_ratio'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'cls'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
newline|'\n'
name|'compute'
op|'='
name|'cls'
op|'.'
name|'_from_db_object'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'cls'
op|'('
op|')'
op|','
name|'compute_dict'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'16.0'
op|','
name|'compute'
op|'.'
name|'cpu_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1.5'
op|','
name|'compute'
op|'.'
name|'ram_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1.0'
op|','
name|'compute'
op|'.'
name|'disk_allocation_ratio'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
dedent|''
dedent|''
name|'class'
name|'TestComputeNodeObject'
op|'('
name|'test_objects'
op|'.'
name|'_LocalTest'
op|','
nl|'\n'
DECL|class|TestComputeNodeObject
name|'_TestComputeNodeObject'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
nl|'\n'
nl|'\n'
dedent|''
name|'class'
name|'TestRemoteComputeNodeObject'
op|'('
name|'test_objects'
op|'.'
name|'_RemoteTest'
op|','
nl|'\n'
DECL|class|TestRemoteComputeNodeObject
name|'_TestComputeNodeObject'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
dedent|''
endmarker|''
end_unit
| begin_unit
comment|'# Copyright 2013 IBM Corp.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'copy'
newline|'\n'
name|'import'
name|'mock'
newline|'\n'
name|'import'
name|'netaddr'
newline|'\n'
name|'from'
name|'oslo_serialization'
name|'import'
name|'jsonutils'
newline|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'timeutils'
newline|'\n'
name|'from'
name|'oslo_versionedobjects'
name|'import'
name|'base'
name|'as'
name|'ovo_base'
newline|'\n'
name|'from'
name|'oslo_versionedobjects'
name|'import'
name|'exception'
name|'as'
name|'ovo_exc'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'db'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'base'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'compute_node'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'hv_spec'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'service'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'fake_pci_device_pools'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'objects'
name|'import'
name|'test_objects'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
name|'import'
name|'uuidsentinel'
newline|'\n'
nl|'\n'
DECL|variable|NOW
name|'NOW'
op|'='
name|'timeutils'
op|'.'
name|'utcnow'
op|'('
op|')'
op|'.'
name|'replace'
op|'('
name|'microsecond'
op|'='
number|'0'
op|')'
newline|'\n'
DECL|variable|fake_stats
name|'fake_stats'
op|'='
op|'{'
string|"'num_foo'"
op|':'
string|"'10'"
op|'}'
newline|'\n'
DECL|variable|fake_stats_db_format
name|'fake_stats_db_format'
op|'='
name|'jsonutils'
op|'.'
name|'dumps'
op|'('
name|'fake_stats'
op|')'
newline|'\n'
comment|'# host_ip is coerced from a string to an IPAddress'
nl|'\n'
comment|'# but needs to be converted to a string for the database format'
nl|'\n'
DECL|variable|fake_host_ip
name|'fake_host_ip'
op|'='
string|"'127.0.0.1'"
newline|'\n'
DECL|variable|fake_numa_topology
name|'fake_numa_topology'
op|'='
name|'objects'
op|'.'
name|'NUMATopology'
op|'('
nl|'\n'
DECL|variable|cells
name|'cells'
op|'='
op|'['
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'0'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'1'
op|','
number|'2'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'512'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'0'
op|','
name|'memory_usage'
op|'='
number|'0'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
op|']'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
op|']'
op|')'
op|','
nl|'\n'
DECL|variable|siblings
name|'siblings'
op|'='
op|'['
op|']'
op|')'
op|','
nl|'\n'
name|'objects'
op|'.'
name|'NUMACell'
op|'('
name|'id'
op|'='
number|'1'
op|','
name|'cpuset'
op|'='
name|'set'
op|'('
op|'['
number|'3'
op|','
number|'4'
op|']'
op|')'
op|','
name|'memory'
op|'='
number|'512'
op|','
nl|'\n'
name|'cpu_usage'
op|'='
number|'0'
op|','
name|'memory_usage'
op|'='
number|'0'
op|','
nl|'\n'
name|'mempages'
op|'='
op|'['
op|']'
op|','
name|'pinned_cpus'
op|'='
name|'set'
op|'('
op|'['
op|']'
op|')'
op|','
nl|'\n'
DECL|variable|siblings
name|'siblings'
op|'='
op|'['
op|']'
op|')'
op|']'
op|')'
newline|'\n'
DECL|variable|fake_numa_topology_db_format
name|'fake_numa_topology_db_format'
op|'='
name|'fake_numa_topology'
op|'.'
name|'_to_json'
op|'('
op|')'
newline|'\n'
DECL|variable|fake_supported_instances
name|'fake_supported_instances'
op|'='
op|'['
op|'('
string|"'x86_64'"
op|','
string|"'kvm'"
op|','
string|"'hvm'"
op|')'
op|']'
newline|'\n'
DECL|variable|fake_hv_spec
name|'fake_hv_spec'
op|'='
name|'hv_spec'
op|'.'
name|'HVSpec'
op|'('
name|'arch'
op|'='
name|'fake_supported_instances'
op|'['
number|'0'
op|']'
op|'['
number|'0'
op|']'
op|','
nl|'\n'
DECL|variable|hv_type
name|'hv_type'
op|'='
name|'fake_supported_instances'
op|'['
number|'0'
op|']'
op|'['
number|'1'
op|']'
op|','
nl|'\n'
DECL|variable|vm_mode
name|'vm_mode'
op|'='
name|'fake_supported_instances'
op|'['
number|'0'
op|']'
op|'['
number|'2'
op|']'
op|')'
newline|'\n'
DECL|variable|fake_supported_hv_specs
name|'fake_supported_hv_specs'
op|'='
op|'['
name|'fake_hv_spec'
op|']'
newline|'\n'
comment|'# for backward compatibility, each supported instance object'
nl|'\n'
comment|'# is stored as a list in the database'
nl|'\n'
DECL|variable|fake_supported_hv_specs_db_format
name|'fake_supported_hv_specs_db_format'
op|'='
name|'jsonutils'
op|'.'
name|'dumps'
op|'('
op|'['
name|'fake_hv_spec'
op|'.'
name|'to_list'
op|'('
op|')'
op|']'
op|')'
newline|'\n'
DECL|variable|fake_pci
name|'fake_pci'
op|'='
name|'jsonutils'
op|'.'
name|'dumps'
op|'('
name|'fake_pci_device_pools'
op|'.'
name|'fake_pool_list_primitive'
op|')'
newline|'\n'
DECL|variable|fake_compute_node
name|'fake_compute_node'
op|'='
op|'{'
nl|'\n'
string|"'created_at'"
op|':'
name|'NOW'
op|','
nl|'\n'
string|"'updated_at'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'deleted_at'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'deleted'"
op|':'
name|'False'
op|','
nl|'\n'
string|"'id'"
op|':'
number|'123'
op|','
nl|'\n'
string|"'uuid'"
op|':'
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
op|','
nl|'\n'
string|"'service_id'"
op|':'
name|'None'
op|','
nl|'\n'
string|"'host'"
op|':'
string|"'fake'"
op|','
nl|'\n'
string|"'vcpus'"
op|':'
number|'4'
op|','
nl|'\n'
string|"'memory_mb'"
op|':'
number|'4096'
op|','
nl|'\n'
string|"'local_gb'"
op|':'
number|'1024'
op|','
nl|'\n'
string|"'vcpus_used'"
op|':'
number|'2'
op|','
nl|'\n'
string|"'memory_mb_used'"
op|':'
number|'2048'
op|','
nl|'\n'
string|"'local_gb_used'"
op|':'
number|'512'
op|','
nl|'\n'
string|"'hypervisor_type'"
op|':'
string|"'Hyper-Dan-VM-ware'"
op|','
nl|'\n'
string|"'hypervisor_version'"
op|':'
number|'1001'
op|','
nl|'\n'
string|"'hypervisor_hostname'"
op|':'
string|"'vm.danplanet.com'"
op|','
nl|'\n'
string|"'free_ram_mb'"
op|':'
number|'1024'
op|','
nl|'\n'
string|"'free_disk_gb'"
op|':'
number|'256'
op|','
nl|'\n'
string|"'current_workload'"
op|':'
number|'100'
op|','
nl|'\n'
string|"'running_vms'"
op|':'
number|'2013'
op|','
nl|'\n'
string|"'cpu_info'"
op|':'
string|"'Schmintel i786'"
op|','
nl|'\n'
string|"'disk_available_least'"
op|':'
number|'256'
op|','
nl|'\n'
string|"'metrics'"
op|':'
string|"''"
op|','
nl|'\n'
string|"'stats'"
op|':'
name|'fake_stats_db_format'
op|','
nl|'\n'
string|"'host_ip'"
op|':'
name|'fake_host_ip'
op|','
nl|'\n'
string|"'numa_topology'"
op|':'
name|'fake_numa_topology_db_format'
op|','
nl|'\n'
string|"'supported_instances'"
op|':'
name|'fake_supported_hv_specs_db_format'
op|','
nl|'\n'
string|"'pci_stats'"
op|':'
name|'fake_pci'
op|','
nl|'\n'
string|"'cpu_allocation_ratio'"
op|':'
number|'16.0'
op|','
nl|'\n'
string|"'ram_allocation_ratio'"
op|':'
number|'1.5'
op|','
nl|'\n'
string|"'disk_allocation_ratio'"
op|':'
number|'1.0'
op|','
nl|'\n'
op|'}'
newline|'\n'
comment|'# FIXME(sbauza) : For compatibility checking, to be removed once we are sure'
nl|'\n'
comment|'# that all computes are running latest DB version with host field in it.'
nl|'\n'
DECL|variable|fake_old_compute_node
name|'fake_old_compute_node'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'del'
name|'fake_old_compute_node'
op|'['
string|"'host'"
op|']'
newline|'\n'
comment|'# resources are passed from the virt drivers and copied into the compute_node'
nl|'\n'
DECL|variable|fake_resources
name|'fake_resources'
op|'='
op|'{'
nl|'\n'
string|"'vcpus'"
op|':'
number|'2'
op|','
nl|'\n'
string|"'memory_mb'"
op|':'
number|'1024'
op|','
nl|'\n'
string|"'local_gb'"
op|':'
number|'10'
op|','
nl|'\n'
string|"'cpu_info'"
op|':'
string|"'fake-info'"
op|','
nl|'\n'
string|"'vcpus_used'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'memory_mb_used'"
op|':'
number|'512'
op|','
nl|'\n'
string|"'local_gb_used'"
op|':'
number|'4'
op|','
nl|'\n'
string|"'numa_topology'"
op|':'
name|'fake_numa_topology_db_format'
op|','
nl|'\n'
string|"'hypervisor_type'"
op|':'
string|"'fake-type'"
op|','
nl|'\n'
string|"'hypervisor_version'"
op|':'
number|'1'
op|','
nl|'\n'
string|"'hypervisor_hostname'"
op|':'
string|"'fake-host'"
op|','
nl|'\n'
string|"'disk_available_least'"
op|':'
number|'256'
op|','
nl|'\n'
string|"'host_ip'"
op|':'
name|'fake_host_ip'
op|','
nl|'\n'
string|"'supported_instances'"
op|':'
name|'fake_supported_instances'
nl|'\n'
op|'}'
newline|'\n'
DECL|variable|fake_compute_with_resources
name|'fake_compute_with_resources'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
op|'('
nl|'\n'
DECL|variable|vcpus
name|'vcpus'
op|'='
name|'fake_resources'
op|'['
string|"'vcpus'"
op|']'
op|','
nl|'\n'
DECL|variable|memory_mb
name|'memory_mb'
op|'='
name|'fake_resources'
op|'['
string|"'memory_mb'"
op|']'
op|','
nl|'\n'
DECL|variable|local_gb
name|'local_gb'
op|'='
name|'fake_resources'
op|'['
string|"'local_gb'"
op|']'
op|','
nl|'\n'
DECL|variable|cpu_info
name|'cpu_info'
op|'='
name|'fake_resources'
op|'['
string|"'cpu_info'"
op|']'
op|','
nl|'\n'
DECL|variable|vcpus_used
name|'vcpus_used'
op|'='
name|'fake_resources'
op|'['
string|"'vcpus_used'"
op|']'
op|','
nl|'\n'
DECL|variable|memory_mb_used
name|'memory_mb_used'
op|'='
name|'fake_resources'
op|'['
string|"'memory_mb_used'"
op|']'
op|','
nl|'\n'
DECL|variable|local_gb_used
name|'local_gb_used'
op|'='
name|'fake_resources'
op|'['
string|"'local_gb_used'"
op|']'
op|','
nl|'\n'
DECL|variable|numa_topology
name|'numa_topology'
op|'='
name|'fake_resources'
op|'['
string|"'numa_topology'"
op|']'
op|','
nl|'\n'
DECL|variable|hypervisor_type
name|'hypervisor_type'
op|'='
name|'fake_resources'
op|'['
string|"'hypervisor_type'"
op|']'
op|','
nl|'\n'
DECL|variable|hypervisor_version
name|'hypervisor_version'
op|'='
name|'fake_resources'
op|'['
string|"'hypervisor_version'"
op|']'
op|','
nl|'\n'
DECL|variable|hypervisor_hostname
name|'hypervisor_hostname'
op|'='
name|'fake_resources'
op|'['
string|"'hypervisor_hostname'"
op|']'
op|','
nl|'\n'
DECL|variable|disk_available_least
name|'disk_available_least'
op|'='
name|'fake_resources'
op|'['
string|"'disk_available_least'"
op|']'
op|','
nl|'\n'
DECL|variable|host_ip
name|'host_ip'
op|'='
name|'netaddr'
op|'.'
name|'IPAddress'
op|'('
name|'fake_resources'
op|'['
string|"'host_ip'"
op|']'
op|')'
op|','
nl|'\n'
DECL|variable|supported_hv_specs
name|'supported_hv_specs'
op|'='
name|'fake_supported_hv_specs'
op|','
nl|'\n'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|_TestComputeNodeObject
name|'class'
name|'_TestComputeNodeObject'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
DECL|member|supported_hv_specs_comparator
indent|' '
name|'def'
name|'supported_hv_specs_comparator'
op|'('
name|'self'
op|','
name|'expected'
op|','
name|'obj_val'
op|')'
op|':'
newline|'\n'
indent|' '
name|'obj_val'
op|'='
op|'['
name|'inst'
op|'.'
name|'to_list'
op|'('
op|')'
name|'for'
name|'inst'
name|'in'
name|'obj_val'
op|']'
newline|'\n'
name|'self'
op|'.'
name|'assertJsonEqual'
op|'('
name|'expected'
op|','
name|'obj_val'
op|')'
newline|'\n'
nl|'\n'
DECL|member|pci_device_pools_comparator
dedent|''
name|'def'
name|'pci_device_pools_comparator'
op|'('
name|'self'
op|','
name|'expected'
op|','
name|'obj_val'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'obj_val'
name|'is'
name|'not'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'obj_val'
op|'='
name|'obj_val'
op|'.'
name|'obj_to_primitive'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertJsonEqual'
op|'('
name|'expected'
op|','
name|'obj_val'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'expected'
op|','
name|'obj_val'
op|')'
newline|'\n'
nl|'\n'
DECL|member|comparators
dedent|''
dedent|''
name|'def'
name|'comparators'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'{'
string|"'stats'"
op|':'
name|'self'
op|'.'
name|'assertJsonEqual'
op|','
nl|'\n'
string|"'host_ip'"
op|':'
name|'self'
op|'.'
name|'str_comparator'
op|','
nl|'\n'
string|"'supported_hv_specs'"
op|':'
name|'self'
op|'.'
name|'supported_hv_specs_comparator'
op|','
nl|'\n'
string|"'pci_device_pools'"
op|':'
name|'self'
op|'.'
name|'pci_device_pools_comparator'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|subs
dedent|''
name|'def'
name|'subs'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'{'
string|"'supported_hv_specs'"
op|':'
string|"'supported_instances'"
op|','
nl|'\n'
string|"'pci_device_pools'"
op|':'
string|"'pci_stats'"
op|'}'
newline|'\n'
nl|'\n'
DECL|member|test_get_by_id
dedent|''
name|'def'
name|'test_get_by_id'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_get'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_get'
op|'('
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'fake_compute_node'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'.'
name|'get_by_id'
op|'('
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'uuid'"
op|','
name|'compute'
op|'.'
name|'obj_what_changed'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'objects'
op|'.'
name|'Service'
op|','
string|"'get_by_id'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'db'
op|','
string|"'compute_node_get'"
op|')'
newline|'\n'
DECL|member|test_get_by_id_with_host_field_not_in_db
name|'def'
name|'test_get_by_id_with_host_field_not_in_db'
op|'('
name|'self'
op|','
name|'mock_cn_get'
op|','
nl|'\n'
name|'mock_obj_svc_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_compute_node_with_svc_id'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'fake_compute_node_with_svc_id'
op|'['
string|"'service_id'"
op|']'
op|'='
number|'123'
newline|'\n'
name|'fake_compute_node_with_no_host'
op|'='
name|'fake_compute_node_with_svc_id'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'host'
op|'='
name|'fake_compute_node_with_no_host'
op|'.'
name|'pop'
op|'('
string|"'host'"
op|')'
newline|'\n'
name|'fake_service'
op|'='
name|'service'
op|'.'
name|'Service'
op|'('
name|'id'
op|'='
number|'123'
op|')'
newline|'\n'
name|'fake_service'
op|'.'
name|'host'
op|'='
name|'host'
newline|'\n'
nl|'\n'
name|'mock_cn_get'
op|'.'
name|'return_value'
op|'='
name|'fake_compute_node_with_no_host'
newline|'\n'
name|'mock_obj_svc_get'
op|'.'
name|'return_value'
op|'='
name|'fake_service'
newline|'\n'
nl|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'.'
name|'get_by_id'
op|'('
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node_with_svc_id'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_by_service_id
dedent|''
name|'def'
name|'test_get_by_service_id'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_nodes_get_by_service_id'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_nodes_get_by_service_id'
op|'('
name|'self'
op|'.'
name|'context'
op|','
number|'456'
op|')'
op|'.'
name|'AndReturn'
op|'('
nl|'\n'
op|'['
name|'fake_compute_node'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'.'
name|'get_by_service_id'
op|'('
name|'self'
op|'.'
name|'context'
op|','
number|'456'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'db'
op|','
string|"'compute_node_get_by_host_and_nodename'"
op|')'
newline|'\n'
DECL|member|test_get_by_host_and_nodename
name|'def'
name|'test_get_by_host_and_nodename'
op|'('
name|'self'
op|','
name|'cn_get_by_h_and_n'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cn_get_by_h_and_n'
op|'.'
name|'return_value'
op|'='
name|'fake_compute_node'
newline|'\n'
nl|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'.'
name|'get_by_host_and_nodename'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
string|"'fake'"
op|','
string|"'vm.danplanet.com'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get_all_by_host'"
op|')'
newline|'\n'
DECL|member|test_get_first_node_by_host_for_old_compat
name|'def'
name|'test_get_first_node_by_host_for_old_compat'
op|'('
nl|'\n'
name|'self'
op|','
name|'cn_get_all_by_host'
op|')'
op|':'
newline|'\n'
indent|' '
name|'another_node'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'another_node'
op|'['
string|"'hypervisor_hostname'"
op|']'
op|'='
string|"'neverland'"
newline|'\n'
name|'cn_get_all_by_host'
op|'.'
name|'return_value'
op|'='
op|'['
name|'fake_compute_node'
op|','
name|'another_node'
op|']'
newline|'\n'
nl|'\n'
name|'compute'
op|'='
op|'('
nl|'\n'
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'.'
name|'get_first_node_by_host_for_old_compat'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
string|"'fake'"
op|')'
nl|'\n'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.ComputeNodeList.get_all_by_host'"
op|')'
newline|'\n'
DECL|member|test_get_first_node_by_host_for_old_compat_not_found
name|'def'
name|'test_get_first_node_by_host_for_old_compat_not_found'
op|'('
nl|'\n'
name|'self'
op|','
name|'cn_get_all_by_host'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cn_get_all_by_host'
op|'.'
name|'side_effect'
op|'='
name|'exception'
op|'.'
name|'ComputeHostNotFound'
op|'('
nl|'\n'
name|'host'
op|'='
string|"'fake'"
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
nl|'\n'
name|'exception'
op|'.'
name|'ComputeHostNotFound'
op|','
nl|'\n'
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'.'
name|'get_first_node_by_host_for_old_compat'
op|','
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
string|"'fake'"
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get'"
op|','
name|'return_value'
op|'='
name|'fake_compute_node'
op|')'
newline|'\n'
DECL|member|test_create
name|'def'
name|'test_create'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_create'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_create'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
op|'{'
nl|'\n'
string|"'service_id'"
op|':'
number|'456'
op|','
nl|'\n'
string|"'stats'"
op|':'
name|'fake_stats_db_format'
op|','
nl|'\n'
string|"'host_ip'"
op|':'
name|'fake_host_ip'
op|','
nl|'\n'
string|"'supported_instances'"
op|':'
name|'fake_supported_hv_specs_db_format'
op|','
nl|'\n'
string|"'uuid'"
op|':'
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
op|','
nl|'\n'
op|'}'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'fake_compute_node'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'service_id'
op|'='
number|'456'
newline|'\n'
name|'compute'
op|'.'
name|'uuid'
op|'='
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
newline|'\n'
name|'compute'
op|'.'
name|'stats'
op|'='
name|'fake_stats'
newline|'\n'
comment|'# NOTE (pmurray): host_ip is coerced to an IPAddress'
nl|'\n'
name|'compute'
op|'.'
name|'host_ip'
op|'='
name|'fake_host_ip'
newline|'\n'
name|'compute'
op|'.'
name|'supported_hv_specs'
op|'='
name|'fake_supported_hv_specs'
newline|'\n'
name|'with'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'oslo_utils.uuidutils.generate_uuid'"
op|')'
name|'as'
name|'mock_gu'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'.'
name|'create'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'mock_gu'
op|'.'
name|'called'
op|')'
newline|'\n'
dedent|''
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_create'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'oslo_utils.uuidutils.generate_uuid'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get'"
op|','
name|'return_value'
op|'='
name|'fake_compute_node'
op|')'
newline|'\n'
DECL|member|test_create_allocates_uuid
name|'def'
name|'test_create_allocates_uuid'
op|'('
name|'self'
op|','
name|'mock_get'
op|','
name|'mock_gu'
op|','
name|'mock_create'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mock_create'
op|'.'
name|'return_value'
op|'='
name|'fake_compute_node'
newline|'\n'
name|'mock_gu'
op|'.'
name|'return_value'
op|'='
name|'fake_compute_node'
op|'['
string|"'uuid'"
op|']'
newline|'\n'
name|'obj'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'obj'
op|'.'
name|'create'
op|'('
op|')'
newline|'\n'
name|'mock_gu'
op|'.'
name|'assert_called_once_with'
op|'('
op|')'
newline|'\n'
name|'mock_create'
op|'.'
name|'assert_called_once_with'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
op|'{'
string|"'uuid'"
op|':'
name|'fake_compute_node'
op|'['
string|"'uuid'"
op|']'
op|'}'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get'"
op|','
name|'return_value'
op|'='
name|'fake_compute_node'
op|')'
newline|'\n'
DECL|member|test_recreate_fails
name|'def'
name|'test_recreate_fails'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_create'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_create'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
op|'{'
string|"'service_id'"
op|':'
number|'456'
op|','
nl|'\n'
string|"'uuid'"
op|':'
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
op|'}'
op|')'
op|'.'
name|'AndReturn'
op|'('
nl|'\n'
name|'fake_compute_node'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'service_id'
op|'='
number|'456'
newline|'\n'
name|'compute'
op|'.'
name|'uuid'
op|'='
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
newline|'\n'
name|'compute'
op|'.'
name|'create'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'exception'
op|'.'
name|'ObjectActionError'
op|','
name|'compute'
op|'.'
name|'create'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get'"
op|','
name|'return_value'
op|'='
name|'fake_compute_node'
op|')'
newline|'\n'
DECL|member|test_save
name|'def'
name|'test_save'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_update'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_update'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|','
nl|'\n'
op|'{'
nl|'\n'
string|"'vcpus_used'"
op|':'
number|'3'
op|','
nl|'\n'
string|"'stats'"
op|':'
name|'fake_stats_db_format'
op|','
nl|'\n'
string|"'host_ip'"
op|':'
name|'fake_host_ip'
op|','
nl|'\n'
string|"'supported_instances'"
op|':'
name|'fake_supported_hv_specs_db_format'
op|','
nl|'\n'
string|"'uuid'"
op|':'
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
op|','
nl|'\n'
op|'}'
op|')'
op|'.'
name|'AndReturn'
op|'('
name|'fake_compute_node'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'id'
op|'='
number|'123'
newline|'\n'
name|'compute'
op|'.'
name|'vcpus_used'
op|'='
number|'3'
newline|'\n'
name|'compute'
op|'.'
name|'stats'
op|'='
name|'fake_stats'
newline|'\n'
name|'compute'
op|'.'
name|'uuid'
op|'='
name|'uuidsentinel'
op|'.'
name|'fake_compute_node'
newline|'\n'
comment|'# NOTE (pmurray): host_ip is coerced to an IPAddress'
nl|'\n'
name|'compute'
op|'.'
name|'host_ip'
op|'='
name|'fake_host_ip'
newline|'\n'
name|'compute'
op|'.'
name|'supported_hv_specs'
op|'='
name|'fake_supported_hv_specs'
newline|'\n'
name|'compute'
op|'.'
name|'save'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_update'"
op|')'
newline|'\n'
DECL|member|test_save_pci_device_pools_empty
name|'def'
name|'test_save_pci_device_pools_empty'
op|'('
name|'self'
op|','
name|'mock_update'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'fake_pci'
op|'='
name|'jsonutils'
op|'.'
name|'dumps'
op|'('
nl|'\n'
name|'objects'
op|'.'
name|'PciDevicePoolList'
op|'('
name|'objects'
op|'='
op|'['
op|']'
op|')'
op|'.'
name|'obj_to_primitive'
op|'('
op|')'
op|')'
newline|'\n'
name|'compute_dict'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'compute_dict'
op|'['
string|"'pci_stats'"
op|']'
op|'='
name|'fake_pci'
newline|'\n'
name|'mock_get'
op|'.'
name|'return_value'
op|'='
name|'compute_dict'
newline|'\n'
nl|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'id'
op|'='
number|'123'
newline|'\n'
name|'compute'
op|'.'
name|'pci_device_pools'
op|'='
name|'objects'
op|'.'
name|'PciDevicePoolList'
op|'('
name|'objects'
op|'='
op|'['
op|']'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'save'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'compute_dict'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'mock_update'
op|'.'
name|'assert_called_once_with'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|','
op|'{'
string|"'pci_stats'"
op|':'
name|'fake_pci'
op|'}'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get'"
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_update'"
op|')'
newline|'\n'
DECL|member|test_save_pci_device_pools_null
name|'def'
name|'test_save_pci_device_pools_null'
op|'('
name|'self'
op|','
name|'mock_update'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute_dict'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
name|'compute_dict'
op|'['
string|"'pci_stats'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'mock_get'
op|'.'
name|'return_value'
op|'='
name|'compute_dict'
newline|'\n'
nl|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'id'
op|'='
number|'123'
newline|'\n'
name|'compute'
op|'.'
name|'pci_device_pools'
op|'='
name|'None'
newline|'\n'
name|'compute'
op|'.'
name|'save'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'compute'
op|','
name|'compute_dict'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'mock_update'
op|'.'
name|'assert_called_once_with'
op|'('
nl|'\n'
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|','
op|'{'
string|"'pci_stats'"
op|':'
name|'None'
op|'}'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'db'
op|','
string|"'compute_node_create'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'fake_compute_node'
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'.'
name|'object'
op|'('
name|'db'
op|','
string|"'compute_node_get'"
op|','
nl|'\n'
name|'return_value'
op|'='
name|'fake_compute_node'
op|')'
newline|'\n'
DECL|member|test_set_id_failure
name|'def'
name|'test_set_id_failure'
op|'('
name|'self'
op|','
name|'mock_get'
op|','
name|'db_mock'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
name|'uuid'
op|'='
name|'fake_compute_node'
op|'['
string|"'uuid'"
op|']'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'create'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'ovo_exc'
op|'.'
name|'ReadOnlyFieldError'
op|','
name|'setattr'
op|','
nl|'\n'
name|'compute'
op|','
string|"'id'"
op|','
number|'124'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_destroy
dedent|''
name|'def'
name|'test_destroy'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_delete'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_delete'
op|'('
name|'self'
op|'.'
name|'context'
op|','
number|'123'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
name|'context'
op|'='
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'id'
op|'='
number|'123'
newline|'\n'
name|'compute'
op|'.'
name|'destroy'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_all
dedent|''
name|'def'
name|'test_get_all'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_get_all'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_get_all'
op|'('
name|'self'
op|'.'
name|'context'
op|')'
op|'.'
name|'AndReturn'
op|'('
op|'['
name|'fake_compute_node'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'computes'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNodeList'
op|'.'
name|'get_all'
op|'('
name|'self'
op|'.'
name|'context'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'computes'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'computes'
op|'['
number|'0'
op|']'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_by_hypervisor
dedent|''
name|'def'
name|'test_get_by_hypervisor'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'mox'
op|'.'
name|'StubOutWithMock'
op|'('
name|'db'
op|','
string|"'compute_node_search_by_hypervisor'"
op|')'
newline|'\n'
name|'db'
op|'.'
name|'compute_node_search_by_hypervisor'
op|'('
name|'self'
op|'.'
name|'context'
op|','
string|"'hyper'"
op|')'
op|'.'
name|'AndReturn'
op|'('
nl|'\n'
op|'['
name|'fake_compute_node'
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'mox'
op|'.'
name|'ReplayAll'
op|'('
op|')'
newline|'\n'
name|'computes'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNodeList'
op|'.'
name|'get_by_hypervisor'
op|'('
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
string|"'hyper'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'computes'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'computes'
op|'['
number|'0'
op|']'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_nodes_get_by_service_id'"
op|')'
newline|'\n'
DECL|member|test__get_by_service
name|'def'
name|'test__get_by_service'
op|'('
name|'self'
op|','
name|'cn_get_by_svc_id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cn_get_by_svc_id'
op|'.'
name|'return_value'
op|'='
op|'['
name|'fake_compute_node'
op|']'
newline|'\n'
name|'computes'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNodeList'
op|'.'
name|'_get_by_service'
op|'('
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
number|'123'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'computes'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'computes'
op|'['
number|'0'
op|']'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.db.compute_node_get_all_by_host'"
op|')'
newline|'\n'
DECL|member|test_get_all_by_host
name|'def'
name|'test_get_all_by_host'
op|'('
name|'self'
op|','
name|'cn_get_all_by_host'
op|')'
op|':'
newline|'\n'
indent|' '
name|'cn_get_all_by_host'
op|'.'
name|'return_value'
op|'='
op|'['
name|'fake_compute_node'
op|']'
newline|'\n'
name|'computes'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNodeList'
op|'.'
name|'get_all_by_host'
op|'('
name|'self'
op|'.'
name|'context'
op|','
nl|'\n'
string|"'fake'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'len'
op|'('
name|'computes'
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compare_obj'
op|'('
name|'computes'
op|'['
number|'0'
op|']'
op|','
name|'fake_compute_node'
op|','
nl|'\n'
name|'subs'
op|'='
name|'self'
op|'.'
name|'subs'
op|'('
op|')'
op|','
nl|'\n'
name|'comparators'
op|'='
name|'self'
op|'.'
name|'comparators'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_numa_topology
dedent|''
name|'def'
name|'test_compat_numa_topology'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'versions'
op|'='
name|'ovo_base'
op|'.'
name|'obj_tree_get_versions'
op|'('
string|"'ComputeNode'"
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.4'"
op|','
nl|'\n'
name|'version_manifest'
op|'='
name|'versions'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'numa_topology'"
op|','
name|'primitive'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_supported_hv_specs
dedent|''
name|'def'
name|'test_compat_supported_hv_specs'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'supported_hv_specs'
op|'='
name|'fake_supported_hv_specs'
newline|'\n'
name|'versions'
op|'='
name|'ovo_base'
op|'.'
name|'obj_tree_get_versions'
op|'('
string|"'ComputeNode'"
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.5'"
op|','
nl|'\n'
name|'version_manifest'
op|'='
name|'versions'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'supported_hv_specs'"
op|','
name|'primitive'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_host
dedent|''
name|'def'
name|'test_compat_host'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.6'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'host'"
op|','
name|'primitive'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_pci_device_pools
dedent|''
name|'def'
name|'test_compat_pci_device_pools'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'pci_device_pools'
op|'='
name|'fake_pci_device_pools'
op|'.'
name|'fake_pool_list'
newline|'\n'
name|'versions'
op|'='
name|'ovo_base'
op|'.'
name|'obj_tree_get_versions'
op|'('
string|"'ComputeNode'"
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.8'"
op|','
nl|'\n'
name|'version_manifest'
op|'='
name|'versions'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'pci_device_pools'"
op|','
name|'primitive'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.Service.get_by_compute_host'"
op|')'
newline|'\n'
DECL|member|test_compat_service_id
name|'def'
name|'test_compat_service_id'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mock_get'
op|'.'
name|'return_value'
op|'='
name|'objects'
op|'.'
name|'Service'
op|'('
name|'id'
op|'='
number|'1'
op|')'
newline|'\n'
name|'compute'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
op|'('
name|'host'
op|'='
string|"'fake-host'"
op|','
name|'service_id'
op|'='
name|'None'
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.12'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1'
op|','
name|'primitive'
op|'['
string|"'nova_object.data'"
op|']'
op|'['
string|"'service_id'"
op|']'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|"'nova.objects.Service.get_by_compute_host'"
op|')'
newline|'\n'
DECL|member|test_compat_service_id_compute_host_not_found
name|'def'
name|'test_compat_service_id_compute_host_not_found'
op|'('
name|'self'
op|','
name|'mock_get'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mock_get'
op|'.'
name|'side_effect'
op|'='
name|'exception'
op|'.'
name|'ComputeHostNotFound'
op|'('
name|'host'
op|'='
string|"'fake-host'"
op|')'
newline|'\n'
name|'compute'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
op|'('
name|'host'
op|'='
string|"'fake-host'"
op|','
name|'service_id'
op|'='
name|'None'
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.12'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
op|'-'
number|'1'
op|','
name|'primitive'
op|'['
string|"'nova_object.data'"
op|']'
op|'['
string|"'service_id'"
op|']'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_update_from_virt_driver
dedent|''
name|'def'
name|'test_update_from_virt_driver'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# copy in case the update has a side effect'
nl|'\n'
indent|' '
name|'resources'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_resources'
op|')'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'update_from_virt_driver'
op|'('
name|'resources'
op|')'
newline|'\n'
name|'expected'
op|'='
name|'fake_compute_with_resources'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'base'
op|'.'
name|'obj_equal_prims'
op|'('
name|'expected'
op|','
name|'compute'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_update_from_virt_driver_missing_field
dedent|''
name|'def'
name|'test_update_from_virt_driver_missing_field'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# NOTE(pmurray): update_from_virt_driver does not require'
nl|'\n'
comment|'# all fields to be present in resources. Validation of the'
nl|'\n'
comment|'# resources data structure would be done in a different method.'
nl|'\n'
indent|' '
name|'resources'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_resources'
op|')'
newline|'\n'
name|'del'
name|'resources'
op|'['
string|"'vcpus'"
op|']'
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'update_from_virt_driver'
op|'('
name|'resources'
op|')'
newline|'\n'
name|'expected'
op|'='
name|'fake_compute_with_resources'
op|'.'
name|'obj_clone'
op|'('
op|')'
newline|'\n'
name|'del'
name|'expected'
op|'.'
name|'vcpus'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'base'
op|'.'
name|'obj_equal_prims'
op|'('
name|'expected'
op|','
name|'compute'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_update_from_virt_driver_extra_field
dedent|''
name|'def'
name|'test_update_from_virt_driver_extra_field'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# copy in case the update has a side effect'
nl|'\n'
indent|' '
name|'resources'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_resources'
op|')'
newline|'\n'
name|'resources'
op|'['
string|"'extra_field'"
op|']'
op|'='
string|"'nonsense'"
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'compute'
op|'.'
name|'update_from_virt_driver'
op|'('
name|'resources'
op|')'
newline|'\n'
name|'expected'
op|'='
name|'fake_compute_with_resources'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'base'
op|'.'
name|'obj_equal_prims'
op|'('
name|'expected'
op|','
name|'compute'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_update_from_virt_driver_bad_value
dedent|''
name|'def'
name|'test_update_from_virt_driver_bad_value'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# copy in case the update has a side effect'
nl|'\n'
indent|' '
name|'resources'
op|'='
name|'copy'
op|'.'
name|'deepcopy'
op|'('
name|'fake_resources'
op|')'
newline|'\n'
name|'resources'
op|'['
string|"'vcpus'"
op|']'
op|'='
string|"'nonsense'"
newline|'\n'
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertRaises'
op|'('
name|'ValueError'
op|','
nl|'\n'
name|'compute'
op|'.'
name|'update_from_virt_driver'
op|','
name|'resources'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_allocation_ratios
dedent|''
name|'def'
name|'test_compat_allocation_ratios'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.13'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'cpu_allocation_ratio'"
op|','
name|'primitive'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'ram_allocation_ratio'"
op|','
name|'primitive'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_disk_allocation_ratio
dedent|''
name|'def'
name|'test_compat_disk_allocation_ratio'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute'
op|'='
name|'compute_node'
op|'.'
name|'ComputeNode'
op|'('
op|')'
newline|'\n'
name|'primitive'
op|'='
name|'compute'
op|'.'
name|'obj_to_primitive'
op|'('
name|'target_version'
op|'='
string|"'1.15'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertNotIn'
op|'('
string|"'disk_allocation_ratio'"
op|','
name|'primitive'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_allocation_ratios_old_compute
dedent|''
name|'def'
name|'test_compat_allocation_ratios_old_compute'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'cpu_allocation_ratio'
op|'='
number|'2.0'
op|','
name|'ram_allocation_ratio'
op|'='
number|'3.0'
op|','
nl|'\n'
name|'disk_allocation_ratio'
op|'='
number|'0.9'
op|')'
newline|'\n'
name|'compute_dict'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
comment|"# old computes don't provide allocation ratios to the table"
nl|'\n'
name|'compute_dict'
op|'['
string|"'cpu_allocation_ratio'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'compute_dict'
op|'['
string|"'ram_allocation_ratio'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'compute_dict'
op|'['
string|"'disk_allocation_ratio'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'cls'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
newline|'\n'
name|'compute'
op|'='
name|'cls'
op|'.'
name|'_from_db_object'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'cls'
op|'('
op|')'
op|','
name|'compute_dict'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'2.0'
op|','
name|'compute'
op|'.'
name|'cpu_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'3.0'
op|','
name|'compute'
op|'.'
name|'ram_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'0.9'
op|','
name|'compute'
op|'.'
name|'disk_allocation_ratio'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_allocation_ratios_default_values
dedent|''
name|'def'
name|'test_compat_allocation_ratios_default_values'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute_dict'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
comment|'# new computes provide allocation ratios defaulted to 0.0'
nl|'\n'
name|'compute_dict'
op|'['
string|"'cpu_allocation_ratio'"
op|']'
op|'='
number|'0.0'
newline|'\n'
name|'compute_dict'
op|'['
string|"'ram_allocation_ratio'"
op|']'
op|'='
number|'0.0'
newline|'\n'
name|'compute_dict'
op|'['
string|"'disk_allocation_ratio'"
op|']'
op|'='
number|'0.0'
newline|'\n'
name|'cls'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
newline|'\n'
name|'compute'
op|'='
name|'cls'
op|'.'
name|'_from_db_object'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'cls'
op|'('
op|')'
op|','
name|'compute_dict'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'16.0'
op|','
name|'compute'
op|'.'
name|'cpu_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1.5'
op|','
name|'compute'
op|'.'
name|'ram_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1.0'
op|','
name|'compute'
op|'.'
name|'disk_allocation_ratio'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_compat_allocation_ratios_old_compute_default_values
dedent|''
name|'def'
name|'test_compat_allocation_ratios_old_compute_default_values'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'compute_dict'
op|'='
name|'fake_compute_node'
op|'.'
name|'copy'
op|'('
op|')'
newline|'\n'
comment|"# old computes don't provide allocation ratios to the table"
nl|'\n'
name|'compute_dict'
op|'['
string|"'cpu_allocation_ratio'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'compute_dict'
op|'['
string|"'ram_allocation_ratio'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'compute_dict'
op|'['
string|"'disk_allocation_ratio'"
op|']'
op|'='
name|'None'
newline|'\n'
name|'cls'
op|'='
name|'objects'
op|'.'
name|'ComputeNode'
newline|'\n'
name|'compute'
op|'='
name|'cls'
op|'.'
name|'_from_db_object'
op|'('
name|'self'
op|'.'
name|'context'
op|','
name|'cls'
op|'('
op|')'
op|','
name|'compute_dict'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'16.0'
op|','
name|'compute'
op|'.'
name|'cpu_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1.5'
op|','
name|'compute'
op|'.'
name|'ram_allocation_ratio'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
number|'1.0'
op|','
name|'compute'
op|'.'
name|'disk_allocation_ratio'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
dedent|''
dedent|''
name|'class'
name|'TestComputeNodeObject'
op|'('
name|'test_objects'
op|'.'
name|'_LocalTest'
op|','
nl|'\n'
DECL|class|TestComputeNodeObject
name|'_TestComputeNodeObject'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
nl|'\n'
nl|'\n'
dedent|''
name|'class'
name|'TestRemoteComputeNodeObject'
op|'('
name|'test_objects'
op|'.'
name|'_RemoteTest'
op|','
nl|'\n'
DECL|class|TestRemoteComputeNodeObject
name|'_TestComputeNodeObject'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
dedent|''
endmarker|''
end_unit | none | 1 | 1.337469 | 1 | |
server/openapi_server/core/controllers/queue_controller.py | Sage-Bionetworks/submission_service | 0 | 6613519 | import connexion
import six
from openapi_server.models.create_queue_request import CreateQueueRequest # noqa: E501
from openapi_server.models.create_queue_response import CreateQueueResponse # noqa: E501
from openapi_server.models.error import Error # noqa: E501
from openapi_server.models.list_queue_response import ListQueueResponse # noqa: E501
from openapi_server.models.queue import Queue # noqa: E501
from openapi_server.dbmodels import DbQueue
from openapi_server import util
def create_queue(create_queue_request=None): # noqa: E501
"""Create a queue
Creates a queue for storing and running of submissions # noqa: E501
:param create_queue_request:
:type create_queue_request: dict | bytes
:rtype: CreateQueueResponse
"""
res = None
status = None
try:
try:
create_queue_request = CreateQueueRequest.from_dict(
connexion.request.get_json()
)
DbQueue(
name=create_queue_request.name,
computeId=create_queue_request.compute_id,
workflowFiles=create_queue_request.workflow_files,
workflowInput=create_queue_request.workflow_input,
submissionType=create_queue_request.submission_type
).save()
res = CreateQueueResponse(queue_id=1)
status = 201
except NotUniqueError as error:
status = 409
res = Error("Conflict", status, str(error))
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status
def delete_queue(queue_id): # noqa: E501
"""Delete a queue by its ID
Deletes the queue for a given ID # noqa: E501
:param queue_id: The ID of the queue
:type queue_id: str
:rtype: None
"""
return 'do some magic!'
def get_queue(queue_id): # noqa: E501
"""Get a queue by its ID
Returns the queue for a given ID # noqa: E501
:param queue_id: The ID of the queue
:type queue_id: str
:rtype: Queue
"""
return 'do some magic!'
def list_queues(limit=None, offset=None): # noqa: E501
"""List the available queues
Returns the queues # noqa: E501
:param limit: Maximum number of results returned
:type limit: int
:param offset: Index of the first result that must be returned
:type offset: int
:rtype: ListQueueResponse
"""
return 'do some magic!'
| import connexion
import six
from openapi_server.models.create_queue_request import CreateQueueRequest # noqa: E501
from openapi_server.models.create_queue_response import CreateQueueResponse # noqa: E501
from openapi_server.models.error import Error # noqa: E501
from openapi_server.models.list_queue_response import ListQueueResponse # noqa: E501
from openapi_server.models.queue import Queue # noqa: E501
from openapi_server.dbmodels import DbQueue
from openapi_server import util
def create_queue(create_queue_request=None): # noqa: E501
"""Create a queue
Creates a queue for storing and running of submissions # noqa: E501
:param create_queue_request:
:type create_queue_request: dict | bytes
:rtype: CreateQueueResponse
"""
res = None
status = None
try:
try:
create_queue_request = CreateQueueRequest.from_dict(
connexion.request.get_json()
)
DbQueue(
name=create_queue_request.name,
computeId=create_queue_request.compute_id,
workflowFiles=create_queue_request.workflow_files,
workflowInput=create_queue_request.workflow_input,
submissionType=create_queue_request.submission_type
).save()
res = CreateQueueResponse(queue_id=1)
status = 201
except NotUniqueError as error:
status = 409
res = Error("Conflict", status, str(error))
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status
def delete_queue(queue_id): # noqa: E501
"""Delete a queue by its ID
Deletes the queue for a given ID # noqa: E501
:param queue_id: The ID of the queue
:type queue_id: str
:rtype: None
"""
return 'do some magic!'
def get_queue(queue_id): # noqa: E501
"""Get a queue by its ID
Returns the queue for a given ID # noqa: E501
:param queue_id: The ID of the queue
:type queue_id: str
:rtype: Queue
"""
return 'do some magic!'
def list_queues(limit=None, offset=None): # noqa: E501
"""List the available queues
Returns the queues # noqa: E501
:param limit: Maximum number of results returned
:type limit: int
:param offset: Index of the first result that must be returned
:type offset: int
:rtype: ListQueueResponse
"""
return 'do some magic!'
| en | 0.643287 | # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 # noqa: E501 Create a queue Creates a queue for storing and running of submissions # noqa: E501 :param create_queue_request: :type create_queue_request: dict | bytes :rtype: CreateQueueResponse # noqa: E501 Delete a queue by its ID Deletes the queue for a given ID # noqa: E501 :param queue_id: The ID of the queue :type queue_id: str :rtype: None # noqa: E501 Get a queue by its ID Returns the queue for a given ID # noqa: E501 :param queue_id: The ID of the queue :type queue_id: str :rtype: Queue # noqa: E501 List the available queues Returns the queues # noqa: E501 :param limit: Maximum number of results returned :type limit: int :param offset: Index of the first result that must be returned :type offset: int :rtype: ListQueueResponse | 2.481894 | 2 |
test.py | is55555/simple-retry-decorator | 0 | 6613520 | <filename>test.py
from __future__ import print_function
from retry import retry, retry_catch
import random
def fails_the_first_3_times(): # this may be a bit convoluted, fails75percent is a lot simpler
n = 3
while n > 0:
print("no")
yield False
n -= 1
print("yes")
yield True
x = fails_the_first_3_times()
@retry(5, loud=True)
def fails3():
return next(x)
@retry(10, initial_delay=0.5, exponential_backoff=1.5, loud=True)
def fails75percent():
if random.random() < 0.75:
return False
else:
return True
@retry(10, initial_delay=1, exponential_backoff=1.2, loud=True)
def fails75percent_truthy():
r = random.random()
if r < 0.75:
return 0
else:
return r * 100.0
@retry_catch(10, exception=ValueError, initial_delay=1, exponential_backoff=1.2, loud=True)
def fails75percent_nottruthy_raisy():
r = random.random()
if r < 0.75:
raise ValueError("oops")
else:
return 0 # return a falsy value on success to test this is fine
@retry_catch(2, exception=ValueError, initial_delay=1, exponential_backoff=1.2, loud=True)
def fails75percent_nottruthy_raisy_twice():
r = random.random()
if r < 0.75:
raise ValueError("oops")
else:
return 0 # return a falsy value on success to test this is fine
print("calling fails75percent with retry(10, initial_delay=0.5, exponential_backoff=1.5, loud=True)")
print(fails75percent())
print("calling fails75percent_truthy with retry(10, initial_delay=1, exponential_backoff=1.2, loud=True)")
print(fails75percent_truthy())
print("calling fails3 with retry(5)")
print(fails3())
try:
print("calling fails75percent_nottruthy_raisy with " +
"@retry_catch(10, exception=ValueError, initial_delay=1, exponential_backoff=1.2, loud=True)")
print(fails75percent_nottruthy_raisy())
except ValueError:
print("Failed with the expected exception")
try:
print("calling fails75percent_nottruthy_raisy_twice with " +
"@retry_catch(2, exception=ValueError, initial_delay=1, exponential_backoff=1.2, loud=True)")
print(fails75percent_nottruthy_raisy_twice()) # note that this is expected to fail often, and do it with the
# original exception
except ValueError:
print("Failed with the expected exception")
print("end")
| <filename>test.py
from __future__ import print_function
from retry import retry, retry_catch
import random
def fails_the_first_3_times(): # this may be a bit convoluted, fails75percent is a lot simpler
n = 3
while n > 0:
print("no")
yield False
n -= 1
print("yes")
yield True
x = fails_the_first_3_times()
@retry(5, loud=True)
def fails3():
return next(x)
@retry(10, initial_delay=0.5, exponential_backoff=1.5, loud=True)
def fails75percent():
if random.random() < 0.75:
return False
else:
return True
@retry(10, initial_delay=1, exponential_backoff=1.2, loud=True)
def fails75percent_truthy():
r = random.random()
if r < 0.75:
return 0
else:
return r * 100.0
@retry_catch(10, exception=ValueError, initial_delay=1, exponential_backoff=1.2, loud=True)
def fails75percent_nottruthy_raisy():
r = random.random()
if r < 0.75:
raise ValueError("oops")
else:
return 0 # return a falsy value on success to test this is fine
@retry_catch(2, exception=ValueError, initial_delay=1, exponential_backoff=1.2, loud=True)
def fails75percent_nottruthy_raisy_twice():
r = random.random()
if r < 0.75:
raise ValueError("oops")
else:
return 0 # return a falsy value on success to test this is fine
print("calling fails75percent with retry(10, initial_delay=0.5, exponential_backoff=1.5, loud=True)")
print(fails75percent())
print("calling fails75percent_truthy with retry(10, initial_delay=1, exponential_backoff=1.2, loud=True)")
print(fails75percent_truthy())
print("calling fails3 with retry(5)")
print(fails3())
try:
print("calling fails75percent_nottruthy_raisy with " +
"@retry_catch(10, exception=ValueError, initial_delay=1, exponential_backoff=1.2, loud=True)")
print(fails75percent_nottruthy_raisy())
except ValueError:
print("Failed with the expected exception")
try:
print("calling fails75percent_nottruthy_raisy_twice with " +
"@retry_catch(2, exception=ValueError, initial_delay=1, exponential_backoff=1.2, loud=True)")
print(fails75percent_nottruthy_raisy_twice()) # note that this is expected to fail often, and do it with the
# original exception
except ValueError:
print("Failed with the expected exception")
print("end")
| en | 0.894341 | # this may be a bit convoluted, fails75percent is a lot simpler # return a falsy value on success to test this is fine # return a falsy value on success to test this is fine # note that this is expected to fail often, and do it with the # original exception | 3.248737 | 3 |
pyqt_hidable_menubar/__init__.py | yjg30737/pyqt-hidable-menubar | 0 | 6613521 | from .hidableMenuBar import HidableMenuBar | from .hidableMenuBar import HidableMenuBar | none | 1 | 1.069852 | 1 | |
regipy/plugins/plugin_template.py | shakedtanchuma/regipy | 0 | 6613522 | <reponame>shakedtanchuma/regipy<filename>regipy/plugins/plugin_template.py
import logbook
from regipy.hive_types import NTUSER_HIVE_TYPE
from regipy.plugins.plugin import Plugin
from regipy.utils import get_subkey_values_from_list
logger = logbook.Logger(__name__)
class TemplatePlugin(Plugin):
NAME = 'template_plugin'
DESCRIPTION = 'template_description'
def can_run(self):
# TODO: Choose the relevant condition - to determine if the plugin is relevant for the given hive
return self.registry_hive.hive_type == NTUSER_HIVE_TYPE
def run(self):
# TODO: Return the relevant values
raise NotImplementedError
| import logbook
from regipy.hive_types import NTUSER_HIVE_TYPE
from regipy.plugins.plugin import Plugin
from regipy.utils import get_subkey_values_from_list
logger = logbook.Logger(__name__)
class TemplatePlugin(Plugin):
NAME = 'template_plugin'
DESCRIPTION = 'template_description'
def can_run(self):
# TODO: Choose the relevant condition - to determine if the plugin is relevant for the given hive
return self.registry_hive.hive_type == NTUSER_HIVE_TYPE
def run(self):
# TODO: Return the relevant values
raise NotImplementedError | en | 0.44021 | # TODO: Choose the relevant condition - to determine if the plugin is relevant for the given hive # TODO: Return the relevant values | 2.163059 | 2 |
Mathematics/prime_numbers.py | lakshyarawal/pythonPractice | 0 | 6613523 | <gh_stars>0
""" Check if the number is prime """
import math
""" Naive Solution: """
def is_prime(a) -> int:
is_prime_num = True
for i in range(2, a):
if a % i == 0:
is_prime_num = False
return is_prime_num
return is_prime_num
""" Efficient Solution: replace a by int(math.sqrt(a)) as divisors occur in pairs only """
""" More efficient solution: Check basic divisions like 2 and 3 earlier """
def is_prime_eff(a) -> int:
is_prime_eff_num = True
if a == 2 or a == 3:
return is_prime_eff_num
if a % 2 == 0 or a % 3 == 0:
is_prime_eff_num = False
return is_prime_eff_num
for i in range(5, int(math.sqrt(a)), 6):
if a % i == 0 or a % (i+2) == 0:
is_prime_eff_num = False
return is_prime_eff_num
return is_prime_eff_num
def main():
val1 = int(input("Enter your value: "))
if is_prime(val1):
print("It is a prime number")
else:
print("It is not a prime number")
if is_prime_eff(val1):
print("It is a prime number")
else:
print("It is not a prime number")
# Using the special variable
# __name__
if __name__ == "__main__":
main()
| """ Check if the number is prime """
import math
""" Naive Solution: """
def is_prime(a) -> int:
is_prime_num = True
for i in range(2, a):
if a % i == 0:
is_prime_num = False
return is_prime_num
return is_prime_num
""" Efficient Solution: replace a by int(math.sqrt(a)) as divisors occur in pairs only """
""" More efficient solution: Check basic divisions like 2 and 3 earlier """
def is_prime_eff(a) -> int:
is_prime_eff_num = True
if a == 2 or a == 3:
return is_prime_eff_num
if a % 2 == 0 or a % 3 == 0:
is_prime_eff_num = False
return is_prime_eff_num
for i in range(5, int(math.sqrt(a)), 6):
if a % i == 0 or a % (i+2) == 0:
is_prime_eff_num = False
return is_prime_eff_num
return is_prime_eff_num
def main():
val1 = int(input("Enter your value: "))
if is_prime(val1):
print("It is a prime number")
else:
print("It is not a prime number")
if is_prime_eff(val1):
print("It is a prime number")
else:
print("It is not a prime number")
# Using the special variable
# __name__
if __name__ == "__main__":
main() | en | 0.805371 | Check if the number is prime Naive Solution: Efficient Solution: replace a by int(math.sqrt(a)) as divisors occur in pairs only More efficient solution: Check basic divisions like 2 and 3 earlier # Using the special variable # __name__ | 4.189486 | 4 |
supports/pyload/src/pyload/plugins/downloaders/DebridItaliaCom.py | LuckyNicky/pycrawler | 1 | 6613524 | <reponame>LuckyNicky/pycrawler
# -*- coding: utf-8 -*-
import re
from ..base.multi_downloader import MultiDownloader
class DebridItaliaCom(MultiDownloader):
__name__ = "DebridItaliaCom"
__type__ = "downloader"
__version__ = "0.25"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"https?://(?:www\.|s\d+\.)?debriditalia\.com/dl/\d+"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", False),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
("revert_failed", "bool", "Revert to standard download if fails", True),
]
__description__ = """Debriditalia.com multi-downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("stickell", "<EMAIL>"),
("<NAME>", "<EMAIL>"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
API_URL = "https://debriditalia.com/api.php"
def api_response(self, method, **kwargs):
kwargs[method] = ""
return self.load(self.API_URL, get=kwargs)
def handle_premium(self, pyfile):
self.data = self.api_response(
"generate",
link=pyfile.url,
u=self.account.user,
p=self.account.info["login"]["password"],
)
m = re.search(r"ERROR:(.*)", self.data)
if m is None:
self.link = self.data
else:
error = m.group(1).strip()
if error in ("not_available", "not_supported"):
self.offline()
else:
self.fail(error)
| # -*- coding: utf-8 -*-
import re
from ..base.multi_downloader import MultiDownloader
class DebridItaliaCom(MultiDownloader):
__name__ = "DebridItaliaCom"
__type__ = "downloader"
__version__ = "0.25"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"https?://(?:www\.|s\d+\.)?debriditalia\.com/dl/\d+"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", False),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
("revert_failed", "bool", "Revert to standard download if fails", True),
]
__description__ = """Debriditalia.com multi-downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("stickell", "<EMAIL>"),
("<NAME>", "<EMAIL>"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
API_URL = "https://debriditalia.com/api.php"
def api_response(self, method, **kwargs):
kwargs[method] = ""
return self.load(self.API_URL, get=kwargs)
def handle_premium(self, pyfile):
self.data = self.api_response(
"generate",
link=pyfile.url,
u=self.account.user,
p=self.account.info["login"]["password"],
)
m = re.search(r"ERROR:(.*)", self.data)
if m is None:
self.link = self.data
else:
error = m.group(1).strip()
if error in ("not_available", "not_supported"):
self.offline()
else:
self.fail(error) | en | 0.651089 | # -*- coding: utf-8 -*- Debriditalia.com multi-downloader plugin | 2.184592 | 2 |
flax/farmer/pooling/constants.py | felixbrucker/flax-blockchain | 9 | 6613525 | <reponame>felixbrucker/flax-blockchain<filename>flax/farmer/pooling/constants.py
from flax.util.ints import uint64
constants = {
"POOL_SUB_SLOT_ITERS": uint64(36718720)
}
| from flax.util.ints import uint64
constants = {
"POOL_SUB_SLOT_ITERS": uint64(36718720)
} | none | 1 | 1.118526 | 1 | |
filename_trimmer.py | Novaki92/FileName-Trimmer | 0 | 6613526 | <gh_stars>0
import os
head = input("Enter beginning of file to be trimmed: ")
tail = input("Enter end of file to be trimmed (Do not include file etension) : ")
ext = input("Enter the file extension (.exe, .avi, .jpg, etc...): ")
if ext == "":
ext = ".txt"
if "." not in ext:
ext = "." + ext
for filename in os.listdir("."):
if filename.endswith(ext):
if filename.startswith(head):
os.rename(filename, filename[len(head):])
for filename in os.listdir("."):
if filename.endswith(ext):
if filename.endswith(tail + ext):
try:
os.rename(filename, filename[:-len(tail + ext)])
except FileNotFoundError:
print("Tried to create blank file name...")
break;
for filename in os.listdir("."):
if "." not in filename:
os.rename(filename, filename + ext)
for filename in os.listdir("."):
if filename.endswith(ext):
print(filename)
input("Press any key to exit...")
| import os
head = input("Enter beginning of file to be trimmed: ")
tail = input("Enter end of file to be trimmed (Do not include file etension) : ")
ext = input("Enter the file extension (.exe, .avi, .jpg, etc...): ")
if ext == "":
ext = ".txt"
if "." not in ext:
ext = "." + ext
for filename in os.listdir("."):
if filename.endswith(ext):
if filename.startswith(head):
os.rename(filename, filename[len(head):])
for filename in os.listdir("."):
if filename.endswith(ext):
if filename.endswith(tail + ext):
try:
os.rename(filename, filename[:-len(tail + ext)])
except FileNotFoundError:
print("Tried to create blank file name...")
break;
for filename in os.listdir("."):
if "." not in filename:
os.rename(filename, filename + ext)
for filename in os.listdir("."):
if filename.endswith(ext):
print(filename)
input("Press any key to exit...") | none | 1 | 3.714067 | 4 | |
symbolic_behaviour_benchmark/__init__.py | Near32/SymbolicBehaviourBenchmark | 0 | 6613527 | from .envs import *
from .utils import *
from .rule_based_agents import *
import gym
from gym.envs.registration import register
env_dict = gym.envs.registration.registry.env_specs.copy()
for env in env_dict:
if 'SymbolicBehaviourBenchmark' in env:
del gym.envs.registration.registry.env_specs[env]
register(
id='SymbolicBehaviourBenchmark-ReceptiveConstructiveTestEnv-v0',
entry_point='symbolic_behaviour_benchmark.envs:generate_receptive_constructive_test_env'
)
register(
id='SymbolicBehaviourBenchmark-ReceptiveConstructiveTestEnv-2Shots-v0',
entry_point='symbolic_behaviour_benchmark.envs:generate_receptive_constructive_test_env_2shots'
)
register(
id='SymbolicBehaviourBenchmark-RecallTestEnv-v0',
entry_point='symbolic_behaviour_benchmark.envs:generate_recall_test_env'
)
register(
id='SymbolicBehaviourBenchmark-RecallTestEnv-2Shots-v0',
entry_point='symbolic_behaviour_benchmark.envs:generate_recall_test_env_2shots'
)
| from .envs import *
from .utils import *
from .rule_based_agents import *
import gym
from gym.envs.registration import register
env_dict = gym.envs.registration.registry.env_specs.copy()
for env in env_dict:
if 'SymbolicBehaviourBenchmark' in env:
del gym.envs.registration.registry.env_specs[env]
register(
id='SymbolicBehaviourBenchmark-ReceptiveConstructiveTestEnv-v0',
entry_point='symbolic_behaviour_benchmark.envs:generate_receptive_constructive_test_env'
)
register(
id='SymbolicBehaviourBenchmark-ReceptiveConstructiveTestEnv-2Shots-v0',
entry_point='symbolic_behaviour_benchmark.envs:generate_receptive_constructive_test_env_2shots'
)
register(
id='SymbolicBehaviourBenchmark-RecallTestEnv-v0',
entry_point='symbolic_behaviour_benchmark.envs:generate_recall_test_env'
)
register(
id='SymbolicBehaviourBenchmark-RecallTestEnv-2Shots-v0',
entry_point='symbolic_behaviour_benchmark.envs:generate_recall_test_env_2shots'
)
| none | 1 | 1.983778 | 2 | |
cdenn/__init__.py | wallowind/classification-of-depression-by-EEG-signals-using-neural-networks | 4 | 6613528 | <filename>cdenn/__init__.py<gh_stars>1-10
from . import train_scripts
from .lib import *
| <filename>cdenn/__init__.py<gh_stars>1-10
from . import train_scripts
from .lib import *
| none | 1 | 1.162079 | 1 | |
dnd/constants/creatures.py | Saevon/webdnd | 4 | 6613529 | <filename>dnd/constants/creatures.py
"""
Non sorted constants
"""
#TODO: move any existing constants to a fitting module
ATTRIBUTES = (
("str", "Strength"),
("con", "Constitution"),
("dex", "Dexterity"),
("int", "Intelligence"),
("wis", "Wisdom"),
("cha", "Charisma"),
)
SIZES = (
("f", "Fine"),
("d", "Diminuitive"),
("t", "Tiny"),
("s", "Small"),
("m", "Medium"),
("l", "Large"),
("h", "Huge"),
("g", "Gargantuan"),
("c", "Colossal"),
)
| <filename>dnd/constants/creatures.py
"""
Non sorted constants
"""
#TODO: move any existing constants to a fitting module
ATTRIBUTES = (
("str", "Strength"),
("con", "Constitution"),
("dex", "Dexterity"),
("int", "Intelligence"),
("wis", "Wisdom"),
("cha", "Charisma"),
)
SIZES = (
("f", "Fine"),
("d", "Diminuitive"),
("t", "Tiny"),
("s", "Small"),
("m", "Medium"),
("l", "Large"),
("h", "Huge"),
("g", "Gargantuan"),
("c", "Colossal"),
)
| en | 0.354671 | Non sorted constants #TODO: move any existing constants to a fitting module | 1.932788 | 2 |
notebooks/converttxttocsv.py | haithienld/AutoModelImproving | 0 | 6613530 | import os
import pandas as pd
dirpath = 'dataset/'
output = 'poses.csv'
pd.concat(
pd.read_csv(os.path.join(dirpath, fname), sep=',', index_col=0, header=None)
for fname in sorted(os.listdir(dirpath))
).to_csv(output)
| import os
import pandas as pd
dirpath = 'dataset/'
output = 'poses.csv'
pd.concat(
pd.read_csv(os.path.join(dirpath, fname), sep=',', index_col=0, header=None)
for fname in sorted(os.listdir(dirpath))
).to_csv(output)
| none | 1 | 2.696863 | 3 | |
backend/migrations/0002_auto_20210709_1900.py | saad4software/MMSS-Backend | 0 | 6613531 | # Generated by Django 3.2 on 2021-07-09 16:00
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='gender',
field=models.CharField(blank=True, choices=[('M', 'Male'), ('F', 'Female')], max_length=1, null=True),
),
migrations.AlterField(
model_name='motivation',
name='date',
field=models.DateField(default=datetime.datetime(2021, 7, 9, 19, 0, 33, 148579)),
),
migrations.AlterField(
model_name='payment',
name='date',
field=models.DateField(default=datetime.datetime(2021, 7, 9, 19, 0, 33, 164210)),
),
migrations.AlterField(
model_name='subscription',
name='from_date',
field=models.DateField(verbose_name=datetime.datetime(2021, 7, 9, 19, 0, 33, 148579)),
),
migrations.AlterField(
model_name='transaction',
name='date',
field=models.DateField(verbose_name=datetime.datetime(2021, 7, 9, 19, 0, 33, 164210)),
),
]
| # Generated by Django 3.2 on 2021-07-09 16:00
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='gender',
field=models.CharField(blank=True, choices=[('M', 'Male'), ('F', 'Female')], max_length=1, null=True),
),
migrations.AlterField(
model_name='motivation',
name='date',
field=models.DateField(default=datetime.datetime(2021, 7, 9, 19, 0, 33, 148579)),
),
migrations.AlterField(
model_name='payment',
name='date',
field=models.DateField(default=datetime.datetime(2021, 7, 9, 19, 0, 33, 164210)),
),
migrations.AlterField(
model_name='subscription',
name='from_date',
field=models.DateField(verbose_name=datetime.datetime(2021, 7, 9, 19, 0, 33, 148579)),
),
migrations.AlterField(
model_name='transaction',
name='date',
field=models.DateField(verbose_name=datetime.datetime(2021, 7, 9, 19, 0, 33, 164210)),
),
]
| en | 0.851531 | # Generated by Django 3.2 on 2021-07-09 16:00 | 1.768157 | 2 |
lib/Utility/metrics.py | cominger/OCDVAE_ContinualLearning | 1 | 6613532 | import torch
import numpy as np
class AverageMeter:
"""
Computes and stores the average and current value
"""
def __init__(self):
self.reset()
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ConfusionMeter:
"""
Maintains a confusion matrix for a given calssification problem.
The ConfusionMeter constructs a confusion matrix for a multi-class
classification problems. It does not support multi-label, multi-class problems:
for such problems, please use MultiLabelConfusionMeter.
Parameters:
k (int): number of classes in the classification problem
normalized (boolean): Determines whether or not the confusion matrix
is normalized or not
Copied from https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
to avoid installation of the entire torchnet package!
BSD 3-Clause License
Copyright (c) 2017- <NAME>,
Copyright (c) 2017- <NAME>,
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
def __init__(self, k, normalized=False):
super(ConfusionMeter, self).__init__()
self.conf = np.ndarray((k, k), dtype=np.int32)
self.normalized = normalized
self.k = k
self.reset()
def reset(self):
self.conf.fill(0)
def add(self, predicted, target):
"""
Computes the confusion matrix of K x K size where K is no of classes
Paramaters:
predicted (tensor): Can be an N x K tensor of predicted scores obtained from
the model for N examples and K classes or an N-tensor of
integer values between 0 and K-1.
target (tensor): Can be a N-tensor of integer values assumed to be integer
values between 0 and K-1 or N x K tensor, where targets are
assumed to be provided as one-hot vectors
"""
predicted = predicted.cpu().numpy()
target = target.cpu().numpy()
assert predicted.shape[0] == target.shape[0], \
'number of targets and predicted outputs do not match'
if np.ndim(predicted) != 1:
assert predicted.shape[1] == self.k, \
'number of predictions does not match size of confusion matrix'
predicted = np.argmax(predicted, 1)
else:
assert (predicted.max() < self.k) and (predicted.min() >= 0), \
'predicted values are not between 1 and k'
onehot_target = np.ndim(target) != 1
if onehot_target:
assert target.shape[1] == self.k, \
'Onehot target does not match size of confusion matrix'
assert (target >= 0).all() and (target <= 1).all(), \
'in one-hot encoding, target values should be 0 or 1'
assert (target.sum(1) == 1).all(), \
'multi-label setting is not supported'
target = np.argmax(target, 1)
else:
assert (predicted.max() < self.k) and (predicted.min() >= 0), \
'predicted values are not between 0 and k-1'
# hack for bin-counting 2 arrays together
x = predicted + self.k * target
bincount_2d = np.bincount(x.astype(np.int32),
minlength=self.k ** 2)
assert bincount_2d.size == self.k ** 2
conf = bincount_2d.reshape((self.k, self.k))
self.conf += conf
def value(self):
"""
Returns:
Confustion matrix of K rows and K columns, where rows corresponds
to ground-truth targets and columns corresponds to predicted
targets.
"""
if self.normalized:
conf = self.conf.astype(np.float32)
return conf / conf.sum(1).clip(min=1e-12)[:, None]
else:
return self.conf
def accuracy(output, target, topk=(1,)):
"""
Evaluates a model's top k accuracy
Parameters:
output (torch.autograd.Variable): model output
target (torch.autograd.Variable): ground-truths/labels
topk (list): list of integers specifying top-k precisions
to be computed
Returns:
float: percentage of correct predictions
"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
import os
import pathlib
from scipy import linalg
from .inception import InceptionV3
from torch.nn.functional import adaptive_avg_pool2d
class FID:
def __init__(self, device, batch_size, workers, is_gpu, dims = 2048):
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
self.model = InceptionV3([block_idx]).to(device)
self.model.eval()
# self.model = torch.nn.DataParallel(model).to(device)
self.device = device
self.batch_size = batch_size
self.workers = workers
self.is_gpu = is_gpu
self.dims = dims
def _get_features(self, inp):
pred = self.model(inp)[0]
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
return pred
def _get_Dataset_features(self, Dataset):
loader = torch.utils.data.DataLoader(Dataset, batch_size=self.batch_size, shuffle=False,
num_workers=self.workers, pin_memory=self.is_gpu)
act = np.empty((len(Dataset), dims))
with torch.no_grad():
for i, (inp, target) in enumerate(loader):
start = i*self.batch_size
end = start + target.size(0)
inp = inp.to(self.device)
pred = self._get_features(inp)
act[start:end] = pred.cpu().numpy().reshape(pred.size(0), -1)
mu, sigma = self._get_stat(act)
def _get_stat(self, act):
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def calculate_frechet_distance(self, mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by <NAME>.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def _get_FID_Features(self, act1, act2):
mu_gt, sigma_gt = self._get_stat(act1)
mu_gen, sigma_gen = self._get_stat(act2)
fid_value = self.calculate_frechet_distance(mu_gt, sigma_gt, mu_gen, sigma_gen)
return fid_value
def _get_FID_Dataset(self, gt_datasets, gen_datasets):
#Feature Calculations
mu_gt, sigma_gt = self._get_Dataset_features(gt_datasets)
mu_gen, sigma_gen = self._get_Dataset_features(gen_datasets)
fid_value = self.calculate_frechet_distance(mu_gt, sigma_gt, mu_gen, sigma_gen)
return fid_value | import torch
import numpy as np
class AverageMeter:
"""
Computes and stores the average and current value
"""
def __init__(self):
self.reset()
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class ConfusionMeter:
"""
Maintains a confusion matrix for a given calssification problem.
The ConfusionMeter constructs a confusion matrix for a multi-class
classification problems. It does not support multi-label, multi-class problems:
for such problems, please use MultiLabelConfusionMeter.
Parameters:
k (int): number of classes in the classification problem
normalized (boolean): Determines whether or not the confusion matrix
is normalized or not
Copied from https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py
to avoid installation of the entire torchnet package!
BSD 3-Clause License
Copyright (c) 2017- <NAME>,
Copyright (c) 2017- <NAME>,
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
def __init__(self, k, normalized=False):
super(ConfusionMeter, self).__init__()
self.conf = np.ndarray((k, k), dtype=np.int32)
self.normalized = normalized
self.k = k
self.reset()
def reset(self):
self.conf.fill(0)
def add(self, predicted, target):
"""
Computes the confusion matrix of K x K size where K is no of classes
Paramaters:
predicted (tensor): Can be an N x K tensor of predicted scores obtained from
the model for N examples and K classes or an N-tensor of
integer values between 0 and K-1.
target (tensor): Can be a N-tensor of integer values assumed to be integer
values between 0 and K-1 or N x K tensor, where targets are
assumed to be provided as one-hot vectors
"""
predicted = predicted.cpu().numpy()
target = target.cpu().numpy()
assert predicted.shape[0] == target.shape[0], \
'number of targets and predicted outputs do not match'
if np.ndim(predicted) != 1:
assert predicted.shape[1] == self.k, \
'number of predictions does not match size of confusion matrix'
predicted = np.argmax(predicted, 1)
else:
assert (predicted.max() < self.k) and (predicted.min() >= 0), \
'predicted values are not between 1 and k'
onehot_target = np.ndim(target) != 1
if onehot_target:
assert target.shape[1] == self.k, \
'Onehot target does not match size of confusion matrix'
assert (target >= 0).all() and (target <= 1).all(), \
'in one-hot encoding, target values should be 0 or 1'
assert (target.sum(1) == 1).all(), \
'multi-label setting is not supported'
target = np.argmax(target, 1)
else:
assert (predicted.max() < self.k) and (predicted.min() >= 0), \
'predicted values are not between 0 and k-1'
# hack for bin-counting 2 arrays together
x = predicted + self.k * target
bincount_2d = np.bincount(x.astype(np.int32),
minlength=self.k ** 2)
assert bincount_2d.size == self.k ** 2
conf = bincount_2d.reshape((self.k, self.k))
self.conf += conf
def value(self):
"""
Returns:
Confustion matrix of K rows and K columns, where rows corresponds
to ground-truth targets and columns corresponds to predicted
targets.
"""
if self.normalized:
conf = self.conf.astype(np.float32)
return conf / conf.sum(1).clip(min=1e-12)[:, None]
else:
return self.conf
def accuracy(output, target, topk=(1,)):
"""
Evaluates a model's top k accuracy
Parameters:
output (torch.autograd.Variable): model output
target (torch.autograd.Variable): ground-truths/labels
topk (list): list of integers specifying top-k precisions
to be computed
Returns:
float: percentage of correct predictions
"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
import os
import pathlib
from scipy import linalg
from .inception import InceptionV3
from torch.nn.functional import adaptive_avg_pool2d
class FID:
def __init__(self, device, batch_size, workers, is_gpu, dims = 2048):
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
self.model = InceptionV3([block_idx]).to(device)
self.model.eval()
# self.model = torch.nn.DataParallel(model).to(device)
self.device = device
self.batch_size = batch_size
self.workers = workers
self.is_gpu = is_gpu
self.dims = dims
def _get_features(self, inp):
pred = self.model(inp)[0]
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
return pred
def _get_Dataset_features(self, Dataset):
loader = torch.utils.data.DataLoader(Dataset, batch_size=self.batch_size, shuffle=False,
num_workers=self.workers, pin_memory=self.is_gpu)
act = np.empty((len(Dataset), dims))
with torch.no_grad():
for i, (inp, target) in enumerate(loader):
start = i*self.batch_size
end = start + target.size(0)
inp = inp.to(self.device)
pred = self._get_features(inp)
act[start:end] = pred.cpu().numpy().reshape(pred.size(0), -1)
mu, sigma = self._get_stat(act)
def _get_stat(self, act):
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def calculate_frechet_distance(self, mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by <NAME>.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def _get_FID_Features(self, act1, act2):
mu_gt, sigma_gt = self._get_stat(act1)
mu_gen, sigma_gen = self._get_stat(act2)
fid_value = self.calculate_frechet_distance(mu_gt, sigma_gt, mu_gen, sigma_gen)
return fid_value
def _get_FID_Dataset(self, gt_datasets, gen_datasets):
#Feature Calculations
mu_gt, sigma_gt = self._get_Dataset_features(gt_datasets)
mu_gen, sigma_gen = self._get_Dataset_features(gen_datasets)
fid_value = self.calculate_frechet_distance(mu_gt, sigma_gt, mu_gen, sigma_gen)
return fid_value | en | 0.7362 | Computes and stores the average and current value Maintains a confusion matrix for a given calssification problem. The ConfusionMeter constructs a confusion matrix for a multi-class classification problems. It does not support multi-label, multi-class problems: for such problems, please use MultiLabelConfusionMeter. Parameters: k (int): number of classes in the classification problem normalized (boolean): Determines whether or not the confusion matrix is normalized or not Copied from https://github.com/pytorch/tnt/blob/master/torchnet/meter/confusionmeter.py to avoid installation of the entire torchnet package! BSD 3-Clause License Copyright (c) 2017- <NAME>, Copyright (c) 2017- <NAME>, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Computes the confusion matrix of K x K size where K is no of classes Paramaters: predicted (tensor): Can be an N x K tensor of predicted scores obtained from the model for N examples and K classes or an N-tensor of integer values between 0 and K-1. target (tensor): Can be a N-tensor of integer values assumed to be integer values between 0 and K-1 or N x K tensor, where targets are assumed to be provided as one-hot vectors # hack for bin-counting 2 arrays together Returns: Confustion matrix of K rows and K columns, where rows corresponds to ground-truth targets and columns corresponds to predicted targets. Evaluates a model's top k accuracy Parameters: output (torch.autograd.Variable): model output target (torch.autograd.Variable): ground-truths/labels topk (list): list of integers specifying top-k precisions to be computed Returns: float: percentage of correct predictions # self.model = torch.nn.DataParallel(model).to(device) Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by <NAME>. Params: -- mu1 : Numpy array containing the activations of a layer of the inception net (like returned by the function 'get_predictions') for generated samples. -- mu2 : The sample mean over activations, precalculated on an representative data set. -- sigma1: The covariance matrix over activations for generated samples. -- sigma2: The covariance matrix over activations, precalculated on an representative data set. Returns: -- : The Frechet Distance. # Product might be almost singular # Numerical error might give slight imaginary component #Feature Calculations | 3.122766 | 3 |
src/desktopautomator/__main__.py | vadimtitov/desktop-automator | 0 | 6613533 | """Main file to run desktop automator."""
from desktopautomator.config import config
from desktopautomator.setup import pre_setup
def main() -> None:
"""Run Desktop Automator."""
pre_setup()
print(config)
if __name__ == "__main__":
main()
| """Main file to run desktop automator."""
from desktopautomator.config import config
from desktopautomator.setup import pre_setup
def main() -> None:
"""Run Desktop Automator."""
pre_setup()
print(config)
if __name__ == "__main__":
main()
| en | 0.704 | Main file to run desktop automator. Run Desktop Automator. | 1.466501 | 1 |
app.py | D00Movenok/ctf-participation-bot | 8 | 6613534 | <filename>app.py
import logging
from common.database import engine
from common.models import Base
from config import config
from workers import CtftimeMonitor, EventChecker, TelegramMonitor
def main():
if config['DEBUG']:
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s[%(asctime)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger("sqlalchemy.engine").setLevel(logging.DEBUG)
logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO,
format='%(levelname)s[%(asctime)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
Base.metadata.create_all(engine)
CtftimeMonitor().start()
TelegramMonitor().start()
EventChecker().start()
if __name__ == '__main__':
main()
| <filename>app.py
import logging
from common.database import engine
from common.models import Base
from config import config
from workers import CtftimeMonitor, EventChecker, TelegramMonitor
def main():
if config['DEBUG']:
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s[%(asctime)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger("sqlalchemy.engine").setLevel(logging.DEBUG)
logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO,
format='%(levelname)s[%(asctime)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
Base.metadata.create_all(engine)
CtftimeMonitor().start()
TelegramMonitor().start()
EventChecker().start()
if __name__ == '__main__':
main()
| none | 1 | 2.128663 | 2 | |
pier/Parser-py/CompraParser.py | gustavodsf/py_projects | 0 | 6613535 | import CestaDAO
import ColecaoDAO
import CorDAO
import DateDAO
import EstadoDAO
import FaixaEtariaDAO
import GeneroDAO
import LojaDAO
import MaterialDAO
import ModeloDAO
import OperacaoDAO
import TamanhoDAO
import TipoDAO
import ItemDAO
import ReadFile
import re
import CompraDAO
class CompraParser:
def __init__(self):
self.cestaDAO = CestaDAO.CestaDAO()
self.corDAO = CorDAO.CorDAO()
self.colecaoDAO = ColecaoDAO.ColecaoDAO()
self.dateDAO = DateDAO.DateDAO()
self.estadoDAO = EstadoDAO.EstadoDAO()
self.faixaEtariaDAO = FaixaEtariaDAO.FaixaEtariaDAO()
self.generoDAO = GeneroDAO.GeneroDAO()
self.lojaDAO = LojaDAO.LojaDAO()
self.materialDAO = MaterialDAO.MaterialDAO()
self.modeloDAO = ModeloDAO.ModeloDAO()
self.operacaoDAO = OperacaoDAO.OperacaoDAO()
self.itemDAO = ItemDAO.ItemDAO()
self.tamanhoDAO = TamanhoDAO.TamanhoDAO()
self.tipoDAO = TipoDAO.TipoDAO()
self.compraDAO = CompraDAO.CompraDAO()
self.colecaoDAO.retrieveAll()
self.corDAO.retrieveAll()
self.estadoDAO.retrieveAll()
self.faixaEtariaDAO.retrieveAll()
self.generoDAO.retrieveAll()
self.lojaDAO.retrieveAll()
self.materialDAO.retrieveAll()
self.modeloDAO.retrieveAll()
self.operacaoDAO.retrieveAll()
self.itemDAO.retrieveAll()
self.tamanhoDAO.retrieveAll()
self.tipoDAO.retrieveAll()
def parseItem(self,path,counter):
self.cestaDAO.retrieveAll(counter)
self.dateDAO.retrieveAll()
lojaId = self.lojaDAO.lojaDict[counter]
read = ReadFile.ReadFile()
lines = read.readCsvFile(path)
line = lines.readline()
while line:
if "[S]" in line or "[DS]" in line:
operacaoId = self.operacaoDAO.wichOperacao(line)
line = re.sub(' +',' ',line)
line = line.split(' ')
cestaId = self.cestaDAO.cestaDict[int(line[2])]
date = line[0].replace("\"","")
dateId = self.dateDAO.dateDict[date]
if "Código" in line:
line = lines.readline()
while "Destino" not in line:
colecaoId = self.colecaoDAO.wichColecao(line)
corId = self.corDAO.wichCor(line)
estadoId = self.estadoDAO.wichEstado(line)
faixaEtariaId = self.faixaEtariaDAO.wichFaixaEtaria(line)
generoId = self.generoDAO.wichGenero(line)
materialId = self.materialDAO.wichMaterial(line)
modeloId = self.modeloDAO.wichModelo(line)
tamanhoId = self.tamanhoDAO.wichTamanho(line)
itemId = self.itemDAO.wichItem(line)
tipoId = self.tipoDAO.wichTipo(self.itemDAO.wichTipo(itemId))
line = re.sub(' +',' ',line)
line = line.split(' ')
valor=line[len(line)-3]
qtd =line[len(line)-4]
if itemId != 0:
self.compraDAO.saveCompra(lojaId,cestaId,dateId,operacaoId,colecaoId,corId,estadoId,faixaEtariaId,generoId,materialId,modeloId,tamanhoId,itemId,tipoId,valor,qtd)
line = lines.readline()
line = lines.readline()
lines.close()
self.compraDAO.pg.commit() | import CestaDAO
import ColecaoDAO
import CorDAO
import DateDAO
import EstadoDAO
import FaixaEtariaDAO
import GeneroDAO
import LojaDAO
import MaterialDAO
import ModeloDAO
import OperacaoDAO
import TamanhoDAO
import TipoDAO
import ItemDAO
import ReadFile
import re
import CompraDAO
class CompraParser:
def __init__(self):
self.cestaDAO = CestaDAO.CestaDAO()
self.corDAO = CorDAO.CorDAO()
self.colecaoDAO = ColecaoDAO.ColecaoDAO()
self.dateDAO = DateDAO.DateDAO()
self.estadoDAO = EstadoDAO.EstadoDAO()
self.faixaEtariaDAO = FaixaEtariaDAO.FaixaEtariaDAO()
self.generoDAO = GeneroDAO.GeneroDAO()
self.lojaDAO = LojaDAO.LojaDAO()
self.materialDAO = MaterialDAO.MaterialDAO()
self.modeloDAO = ModeloDAO.ModeloDAO()
self.operacaoDAO = OperacaoDAO.OperacaoDAO()
self.itemDAO = ItemDAO.ItemDAO()
self.tamanhoDAO = TamanhoDAO.TamanhoDAO()
self.tipoDAO = TipoDAO.TipoDAO()
self.compraDAO = CompraDAO.CompraDAO()
self.colecaoDAO.retrieveAll()
self.corDAO.retrieveAll()
self.estadoDAO.retrieveAll()
self.faixaEtariaDAO.retrieveAll()
self.generoDAO.retrieveAll()
self.lojaDAO.retrieveAll()
self.materialDAO.retrieveAll()
self.modeloDAO.retrieveAll()
self.operacaoDAO.retrieveAll()
self.itemDAO.retrieveAll()
self.tamanhoDAO.retrieveAll()
self.tipoDAO.retrieveAll()
def parseItem(self,path,counter):
self.cestaDAO.retrieveAll(counter)
self.dateDAO.retrieveAll()
lojaId = self.lojaDAO.lojaDict[counter]
read = ReadFile.ReadFile()
lines = read.readCsvFile(path)
line = lines.readline()
while line:
if "[S]" in line or "[DS]" in line:
operacaoId = self.operacaoDAO.wichOperacao(line)
line = re.sub(' +',' ',line)
line = line.split(' ')
cestaId = self.cestaDAO.cestaDict[int(line[2])]
date = line[0].replace("\"","")
dateId = self.dateDAO.dateDict[date]
if "Código" in line:
line = lines.readline()
while "Destino" not in line:
colecaoId = self.colecaoDAO.wichColecao(line)
corId = self.corDAO.wichCor(line)
estadoId = self.estadoDAO.wichEstado(line)
faixaEtariaId = self.faixaEtariaDAO.wichFaixaEtaria(line)
generoId = self.generoDAO.wichGenero(line)
materialId = self.materialDAO.wichMaterial(line)
modeloId = self.modeloDAO.wichModelo(line)
tamanhoId = self.tamanhoDAO.wichTamanho(line)
itemId = self.itemDAO.wichItem(line)
tipoId = self.tipoDAO.wichTipo(self.itemDAO.wichTipo(itemId))
line = re.sub(' +',' ',line)
line = line.split(' ')
valor=line[len(line)-3]
qtd =line[len(line)-4]
if itemId != 0:
self.compraDAO.saveCompra(lojaId,cestaId,dateId,operacaoId,colecaoId,corId,estadoId,faixaEtariaId,generoId,materialId,modeloId,tamanhoId,itemId,tipoId,valor,qtd)
line = lines.readline()
line = lines.readline()
lines.close()
self.compraDAO.pg.commit() | none | 1 | 2.570066 | 3 | |
protfolio/protfolio/views.py | Tonmoyhridhaan/Django-Learn | 0 | 6613536 | <filename>protfolio/protfolio/views.py<gh_stars>0
from django.shortcuts import render
from django.http import HttpResponse
def home(request):
text = {
'name' : '<NAME>',
'age' : 22,
'phone' : '01777303837',
'skills' : ['CP','PS','DEV']
}
return render(request,'index.html',text)
def about(request):
return render(request,'about.html')
def contact(request):
return render(request,'contact.html') | <filename>protfolio/protfolio/views.py<gh_stars>0
from django.shortcuts import render
from django.http import HttpResponse
def home(request):
text = {
'name' : '<NAME>',
'age' : 22,
'phone' : '01777303837',
'skills' : ['CP','PS','DEV']
}
return render(request,'index.html',text)
def about(request):
return render(request,'about.html')
def contact(request):
return render(request,'contact.html') | none | 1 | 2.066515 | 2 | |
Strings/205. Isomorphic Strings.py | thewires2/Leetcode | 1 | 6613537 | <reponame>thewires2/Leetcode
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
x={}
seen = []
for i,j in zip(s,t):
if i not in x:
if j not in seen:
x[i]=j
seen.append(j)
else:
return False
else:
if x[i]!=j:
return False
print(x)
return True
| class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
x={}
seen = []
for i,j in zip(s,t):
if i not in x:
if j not in seen:
x[i]=j
seen.append(j)
else:
return False
else:
if x[i]!=j:
return False
print(x)
return True | none | 1 | 3.115067 | 3 | |
src/sim_msb_attitude.py | flucto-gmbh/msb_attitude | 0 | 6613538 | import zmq
import logging
import sys
from os import path
import time
from datetime import datetime
import pickle
import numpy as np
# add ahrs directory to PYTHONPATH
try:
from attitude_config import (init, ATTITUDE_TOPIC, IMU_TOPIC)
except ImportError as e:
print(f'failed to import: {e} - exit')
sys.exit(-1)
def main():
config = init()
logging.debug('msb_attitude.py starting up')
broker_xsub = f'{config["ipc_protocol"]}:{config["broker_xsub"]}'
ctx = zmq.Context()
socket_broker_xsub = ctx.socket(zmq.PUB)
logging.debug(f'trying to connect to {broker_xsub}')
try:
socket_broker_xsub.connect(broker_xsub)
except Exception as e:
logging.fatal(f'failed to bind to zeromq socket {broker_xsub}: {e}')
sys.exit(-1)
logging.debug(f'successfully connected to broker XSUB socket as a publisher')
while True:
ts = time.time()
data = np.empty(5)
data[0] = ts
data[1:] = np.random.random_sample(size=4)
if config['print']:
print(f'att: {data}')
# save for next step
socket_broker_xsub.send_multipart(
[
ATTITUDE_TOPIC, # topic
pickle.dumps( # serialize the payload
data.tolist()
)
]
)
time.sleep(0.05)
# recv = zmq_socket_sub.recv_pyobj()
# [topic, data] = zmq_socket_sub.recv_multipart()
# topic = topic.decode('utf-8')
# if config['print']:
# print(f'{pickle.loads(data)}')
if __name__ == '__main__':
main()
| import zmq
import logging
import sys
from os import path
import time
from datetime import datetime
import pickle
import numpy as np
# add ahrs directory to PYTHONPATH
try:
from attitude_config import (init, ATTITUDE_TOPIC, IMU_TOPIC)
except ImportError as e:
print(f'failed to import: {e} - exit')
sys.exit(-1)
def main():
config = init()
logging.debug('msb_attitude.py starting up')
broker_xsub = f'{config["ipc_protocol"]}:{config["broker_xsub"]}'
ctx = zmq.Context()
socket_broker_xsub = ctx.socket(zmq.PUB)
logging.debug(f'trying to connect to {broker_xsub}')
try:
socket_broker_xsub.connect(broker_xsub)
except Exception as e:
logging.fatal(f'failed to bind to zeromq socket {broker_xsub}: {e}')
sys.exit(-1)
logging.debug(f'successfully connected to broker XSUB socket as a publisher')
while True:
ts = time.time()
data = np.empty(5)
data[0] = ts
data[1:] = np.random.random_sample(size=4)
if config['print']:
print(f'att: {data}')
# save for next step
socket_broker_xsub.send_multipart(
[
ATTITUDE_TOPIC, # topic
pickle.dumps( # serialize the payload
data.tolist()
)
]
)
time.sleep(0.05)
# recv = zmq_socket_sub.recv_pyobj()
# [topic, data] = zmq_socket_sub.recv_multipart()
# topic = topic.decode('utf-8')
# if config['print']:
# print(f'{pickle.loads(data)}')
if __name__ == '__main__':
main()
| en | 0.396741 | # add ahrs directory to PYTHONPATH # save for next step # topic # serialize the payload # recv = zmq_socket_sub.recv_pyobj() # [topic, data] = zmq_socket_sub.recv_multipart() # topic = topic.decode('utf-8') # if config['print']: # print(f'{pickle.loads(data)}') | 2.090358 | 2 |
utils.py | softwareunderground/careermap | 1 | 6613539 | import difflib
import shelve
from datetime import datetime
import networkx as nx
import base64
from io import BytesIO
import matplotlib.pyplot as plt
from collections import OrderedDict
from collections import defaultdict
VOCAB = ["undergrad", "postgrad", "faculty", "academic",
"service", "software", "technology",
"consulting", "sales",
"natoc", "intoc", "indoc", "junoc",
"government", "agency", "survey", "localgov",
"mining",
"unemployed", "retired",
"startup", "self-employed",
"other", 'break',
]
def get_info(record):
"""
Take a single response and turn it into a list of careers.
Completely ignore the numbers for now.
"""
items = [tuple(i.strip().split()) for i in record.split(',')]
items = filter(None, items)
path, years = [], defaultdict(int)
for pair in items:
# Get employment.
m = difflib.get_close_matches(pair[0], VOCAB, n=1, cutoff=0.5)
if m:
job = m[0]
else:
job = 'other'
path.append(job)
# Get years.
try:
y = float(pair[1])
except ValueError: # Garbled number
y = 1
except IndexError: # None provided
y = 1
years[job] += y
return path, years
def store(record):
_ = store_entry(record)
path, years = get_info(record)
with shelve.open('edges') as db:
for pair in zip(path[:-1], path[1:]):
count = db.get(','.join(pair), 0)
db[','.join(pair)] = count + 1
with shelve.open('nodes') as db:
for k, v in years.items():
vi = db.get(k, 0)
db[k] = vi + v
with shelve.open('lasts') as db:
last = path[-1]
db[last] = db.get(last, 0) + 1
with shelve.open('lens') as db:
length = str(int(sum(years.values())))
db[length] = db.get(length, 0) + 1
return 'Thank you!'
def store_entry(data):
with open('log.txt', 'ta') as f:
d = datetime.utcnow().isoformat() + '\t'
f.write(d + data + '\n')
return 'Done'
def get_network(years):
"""
Get the network from the Shelf.
"""
G = nx.Graph()
G.add_nodes_from([(k, {'count': v}) for k, v in years.items()])
with shelve.open('edges') as db:
for e, w in db.items():
u, v = e.split(',')
G.add_edge(u, v, weight=w)
G.remove_nodes_from(list(nx.isolates(G)))
return G
def get_years():
with shelve.open('nodes') as db:
d = dict(db).copy()
return d
def get_lasts():
with shelve.open('lasts') as db:
d = dict(db)
return d
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
def get_lens():
with shelve.open('lens') as db:
d = dict(db)
bins = list(chunks([str(n) for n in range(50)], 5))
labels = ['<5', '5-9', '10-14', '15-19', '20-24', '25-29',
'30-34', '35-39', '40-44', '45-49', '>50']
data = [0 for L in labels]
for k, v in d.items():
for idx, row in enumerate(bins):
if k in row:
break
data[idx] += v
return OrderedDict((L, d) for L, d in zip(labels, data))
def plot_network(G, years, scale=10):
"""
Make a networkx plot and convert to base64-encoded string.
"""
edges = G.edges()
weights = [G[u][v]['weight'] for u, v in edges]
counts = [scale * nx.get_node_attributes(G, 'count')[u] for u in G.nodes()]
params = {
'node_size': counts,
'with_labels': True,
'verticalalignment': 'bottom',
'width': weights,
}
pos = nx.spring_layout(G)
fig = plt.figure(figsize=(12, 12))
nx.draw(G, pos, **params)
# Save as base64 string.
handle = BytesIO()
plt.savefig(handle, format='png', facecolor=fig.get_facecolor())
plt.close()
handle.seek(0)
figdata_png = base64.b64encode(handle.getvalue())
return figdata_png.decode('utf8')
def plot_bars(data, drop=False, sort=False, log=False, title=True, lpos=None):
"""
Generic bar plotting function. Does all the plots.
"""
if drop:
_ = data.pop('undergrad', None)
_ = data.pop('retired', None)
_ = data.pop('unemployed', None)
_ = data.pop('break', None)
labels = list(data.keys())
values = list(data.values())
if sort:
labels = [l for _, l in sorted(zip(values, labels), reverse=True)]
values = sorted(values, reverse=True)
y = list(range(len(values)))
y_min, y_max = y[0]-0.75, y[-1]+0.75
fig, ax = plt.subplots(figsize=(8, 8))
_ = ax.barh(y, values, color='orange', align='center', edgecolor='none')
ax.set_yticks(y)
if log:
ax.set_xscale('log')
ax.set_yticklabels(labels, size=12)
ax.set_ylim(y_max, y_min) # Label top-down.
ax.grid(c='black', alpha=0.15, which='both')
ax.patch.set_facecolor("white")
fig.patch.set_facecolor("none")
if title is True:
t = "{:.2f} person-careers of experience".format(sum(values)/40)
elif title:
t = title
else:
t = ""
ax.set_title(t)
if lpos is None:
lpos = min(values)
for i, d in enumerate(values):
ax.text(lpos, i, "{}".format(int(d)), va='center', size=12)
plt.tight_layout()
# Put in memory.
handle = BytesIO()
plt.savefig(handle, format='png', facecolor=fig.get_facecolor())
plt.close()
# Encode.
handle.seek(0)
figdata_png = base64.b64encode(handle.getvalue()).decode('utf8')
return figdata_png
| import difflib
import shelve
from datetime import datetime
import networkx as nx
import base64
from io import BytesIO
import matplotlib.pyplot as plt
from collections import OrderedDict
from collections import defaultdict
VOCAB = ["undergrad", "postgrad", "faculty", "academic",
"service", "software", "technology",
"consulting", "sales",
"natoc", "intoc", "indoc", "junoc",
"government", "agency", "survey", "localgov",
"mining",
"unemployed", "retired",
"startup", "self-employed",
"other", 'break',
]
def get_info(record):
"""
Take a single response and turn it into a list of careers.
Completely ignore the numbers for now.
"""
items = [tuple(i.strip().split()) for i in record.split(',')]
items = filter(None, items)
path, years = [], defaultdict(int)
for pair in items:
# Get employment.
m = difflib.get_close_matches(pair[0], VOCAB, n=1, cutoff=0.5)
if m:
job = m[0]
else:
job = 'other'
path.append(job)
# Get years.
try:
y = float(pair[1])
except ValueError: # Garbled number
y = 1
except IndexError: # None provided
y = 1
years[job] += y
return path, years
def store(record):
_ = store_entry(record)
path, years = get_info(record)
with shelve.open('edges') as db:
for pair in zip(path[:-1], path[1:]):
count = db.get(','.join(pair), 0)
db[','.join(pair)] = count + 1
with shelve.open('nodes') as db:
for k, v in years.items():
vi = db.get(k, 0)
db[k] = vi + v
with shelve.open('lasts') as db:
last = path[-1]
db[last] = db.get(last, 0) + 1
with shelve.open('lens') as db:
length = str(int(sum(years.values())))
db[length] = db.get(length, 0) + 1
return 'Thank you!'
def store_entry(data):
with open('log.txt', 'ta') as f:
d = datetime.utcnow().isoformat() + '\t'
f.write(d + data + '\n')
return 'Done'
def get_network(years):
"""
Get the network from the Shelf.
"""
G = nx.Graph()
G.add_nodes_from([(k, {'count': v}) for k, v in years.items()])
with shelve.open('edges') as db:
for e, w in db.items():
u, v = e.split(',')
G.add_edge(u, v, weight=w)
G.remove_nodes_from(list(nx.isolates(G)))
return G
def get_years():
with shelve.open('nodes') as db:
d = dict(db).copy()
return d
def get_lasts():
with shelve.open('lasts') as db:
d = dict(db)
return d
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
def get_lens():
with shelve.open('lens') as db:
d = dict(db)
bins = list(chunks([str(n) for n in range(50)], 5))
labels = ['<5', '5-9', '10-14', '15-19', '20-24', '25-29',
'30-34', '35-39', '40-44', '45-49', '>50']
data = [0 for L in labels]
for k, v in d.items():
for idx, row in enumerate(bins):
if k in row:
break
data[idx] += v
return OrderedDict((L, d) for L, d in zip(labels, data))
def plot_network(G, years, scale=10):
"""
Make a networkx plot and convert to base64-encoded string.
"""
edges = G.edges()
weights = [G[u][v]['weight'] for u, v in edges]
counts = [scale * nx.get_node_attributes(G, 'count')[u] for u in G.nodes()]
params = {
'node_size': counts,
'with_labels': True,
'verticalalignment': 'bottom',
'width': weights,
}
pos = nx.spring_layout(G)
fig = plt.figure(figsize=(12, 12))
nx.draw(G, pos, **params)
# Save as base64 string.
handle = BytesIO()
plt.savefig(handle, format='png', facecolor=fig.get_facecolor())
plt.close()
handle.seek(0)
figdata_png = base64.b64encode(handle.getvalue())
return figdata_png.decode('utf8')
def plot_bars(data, drop=False, sort=False, log=False, title=True, lpos=None):
"""
Generic bar plotting function. Does all the plots.
"""
if drop:
_ = data.pop('undergrad', None)
_ = data.pop('retired', None)
_ = data.pop('unemployed', None)
_ = data.pop('break', None)
labels = list(data.keys())
values = list(data.values())
if sort:
labels = [l for _, l in sorted(zip(values, labels), reverse=True)]
values = sorted(values, reverse=True)
y = list(range(len(values)))
y_min, y_max = y[0]-0.75, y[-1]+0.75
fig, ax = plt.subplots(figsize=(8, 8))
_ = ax.barh(y, values, color='orange', align='center', edgecolor='none')
ax.set_yticks(y)
if log:
ax.set_xscale('log')
ax.set_yticklabels(labels, size=12)
ax.set_ylim(y_max, y_min) # Label top-down.
ax.grid(c='black', alpha=0.15, which='both')
ax.patch.set_facecolor("white")
fig.patch.set_facecolor("none")
if title is True:
t = "{:.2f} person-careers of experience".format(sum(values)/40)
elif title:
t = title
else:
t = ""
ax.set_title(t)
if lpos is None:
lpos = min(values)
for i, d in enumerate(values):
ax.text(lpos, i, "{}".format(int(d)), va='center', size=12)
plt.tight_layout()
# Put in memory.
handle = BytesIO()
plt.savefig(handle, format='png', facecolor=fig.get_facecolor())
plt.close()
# Encode.
handle.seek(0)
figdata_png = base64.b64encode(handle.getvalue()).decode('utf8')
return figdata_png
| en | 0.917429 | Take a single response and turn it into a list of careers. Completely ignore the numbers for now. # Get employment. # Get years. # Garbled number # None provided Get the network from the Shelf. Make a networkx plot and convert to base64-encoded string. # Save as base64 string. Generic bar plotting function. Does all the plots. # Label top-down. # Put in memory. # Encode. | 2.59804 | 3 |
aws_works/s3.py | guivl/aws_works | 0 | 6613540 | '''
Aws_works.s3
============
It's a python module with useful actions for those who works with amazon web services through boto3(aws high-level api).
Here we focus in s3 bucket like an object. It means you set a bucket and execute some actions in it.
============
'''
import io
import json
import boto3
import numpy as np
import pandas as pd
client = boto3.client('s3')
def get_address(s3url):
'''
Input:
path[string, list] = Expect an s3 ulr or a list of s3 urls;
Return:
bucket[string]
path_file[string]
Example:
b, p = s3.get_address("s3://your-bucket/folder/file.format")
print(b)
"your-bucket"
print(p)
"folder/file.format"
'''
if(s3url.startswith('s3://')):
b = []
p = []
if(type(s3url)==str):
s3url = [s3url]
for url in s3url:
tmp = url.split('/')[2]
tmp2 = url.replace('s3://{}/'.format(tmp),'')
b.append(tmp)
p.append(tmp2)
if((len(b)==1)&(len(p)==1)):
return b[0], p[0]
else:
return b, p
else:
print('not a s3 url')
class s3_bucket:
'''Here we attribute s3 a bucket to work'''
Bucket = None
def __init__(self, name):
self.Bucket = name #public
self.__alias = client #private
def read_csv(self, path):
'''
Read from s3 a csv file
-----------------------
Input:
path[string] = Expect the path to the csv file to be readed.
-----------------------
return:
dataframe[pandas.dataframe]
-----------------------
Example:
df = s3_bucket.read_csv(path='folder/file.csv')
-----------------------
'''
try:
obj = client.get_object(Bucket=self.Bucket, Key=path)['Body']
df = pd.read_csv(io.BytesIO(obj.read()))
return df
except:
raise
def read_log(self, path, output_format='pandas'):
'''
Read from s3 a log file in json.
-------------------------------
Input:
path[string] = Expect the path to the json file to be readed.
output_format['pandas'|'json'] = Specify the format you desire as output, pandas is default.
-------------------------------
Return:
Depending on output_format specified will return or a pandas.dataframe or a json.
-------------------------------
Example:
logDF = s3_bucket.read_log(path='folder/file.json')
-------------------------------
'''
obj = client.get_object(Bucket=self.Bucket,Key=path)['Body']
jobj = json.loads(obj.read())
if(output_format=='pandas'):
logDf = pd.DataFrame(data=jobj['data'],columns=jobj['columns'],index=list(np.arange(0,len(jobj['data']))))
return logDf
if(output_format=='json'):
return jobj
else:
print('output_format not specified correctly.')
def write_csv(self, dataframe, path_name):
'''
Write a pandas dataframe into s3.
---------------------------------
Input:
dataframe[pandas.dataframe] = Expect a pandas dataframe to be written into s3.
path_name[string] = Specify the path and name you desire to save your file.
---------------------------------
Return:
String - dataframe written into s3 bucket.
---------------------------------
Example:
s3_bucket.write_csv(dataframe, namefile='folder/my_dataframe')
---------------------------------
'''
namefile = namefile.replace('.csv','')
obj = io.StringIO()
dataframe.to_csv(obj, sep=',', index=False)
client.put_object(Bucket=self.Bucket, Key=path_name+'.csv', Body=obj.getvalue())
return 'dataframe written into s3.'
def write_log(self, dictionary, path):
'''
This method writes logs into s3.
--------------------------------
Input:
dictionary[dict] = Expect a dictionary structured as json files.
Log description:
To create a log file we highly recomed to create a json structure, such as:
{
'columns' : [],
'data' : []
}
path[string] = Expect a string with the path to write the json.
--------------------------------
Return:
String - 'log updated/created'
--------------------------------
Example:
tmp = {
'columns':['A','B'],
'data':[[0.001, 0.002],[0.003, 0.004]]
}
s3_bucket.write_log(dictionary=tmp, path='sql_exec/logs/log.json')
return:
log updated
--------------------------------
'''
try:
obj = client.get_object(Bucket=self.Bucket, Key=path)['Body']
jobj = json.loads(obj.read())
jobj['data'] = jobj['data']+dictionary['data']
client.put_object(Bucket=self.Bucket, Key=path, Body=json.dumps(jobj))
return 'log updated'
except:
client.put_object(Bucket=self.Bucket, Key=path, Body=json.dumps(dictionary))
return 'log created' | '''
Aws_works.s3
============
It's a python module with useful actions for those who works with amazon web services through boto3(aws high-level api).
Here we focus in s3 bucket like an object. It means you set a bucket and execute some actions in it.
============
'''
import io
import json
import boto3
import numpy as np
import pandas as pd
client = boto3.client('s3')
def get_address(s3url):
'''
Input:
path[string, list] = Expect an s3 ulr or a list of s3 urls;
Return:
bucket[string]
path_file[string]
Example:
b, p = s3.get_address("s3://your-bucket/folder/file.format")
print(b)
"your-bucket"
print(p)
"folder/file.format"
'''
if(s3url.startswith('s3://')):
b = []
p = []
if(type(s3url)==str):
s3url = [s3url]
for url in s3url:
tmp = url.split('/')[2]
tmp2 = url.replace('s3://{}/'.format(tmp),'')
b.append(tmp)
p.append(tmp2)
if((len(b)==1)&(len(p)==1)):
return b[0], p[0]
else:
return b, p
else:
print('not a s3 url')
class s3_bucket:
'''Here we attribute s3 a bucket to work'''
Bucket = None
def __init__(self, name):
self.Bucket = name #public
self.__alias = client #private
def read_csv(self, path):
'''
Read from s3 a csv file
-----------------------
Input:
path[string] = Expect the path to the csv file to be readed.
-----------------------
return:
dataframe[pandas.dataframe]
-----------------------
Example:
df = s3_bucket.read_csv(path='folder/file.csv')
-----------------------
'''
try:
obj = client.get_object(Bucket=self.Bucket, Key=path)['Body']
df = pd.read_csv(io.BytesIO(obj.read()))
return df
except:
raise
def read_log(self, path, output_format='pandas'):
'''
Read from s3 a log file in json.
-------------------------------
Input:
path[string] = Expect the path to the json file to be readed.
output_format['pandas'|'json'] = Specify the format you desire as output, pandas is default.
-------------------------------
Return:
Depending on output_format specified will return or a pandas.dataframe or a json.
-------------------------------
Example:
logDF = s3_bucket.read_log(path='folder/file.json')
-------------------------------
'''
obj = client.get_object(Bucket=self.Bucket,Key=path)['Body']
jobj = json.loads(obj.read())
if(output_format=='pandas'):
logDf = pd.DataFrame(data=jobj['data'],columns=jobj['columns'],index=list(np.arange(0,len(jobj['data']))))
return logDf
if(output_format=='json'):
return jobj
else:
print('output_format not specified correctly.')
def write_csv(self, dataframe, path_name):
'''
Write a pandas dataframe into s3.
---------------------------------
Input:
dataframe[pandas.dataframe] = Expect a pandas dataframe to be written into s3.
path_name[string] = Specify the path and name you desire to save your file.
---------------------------------
Return:
String - dataframe written into s3 bucket.
---------------------------------
Example:
s3_bucket.write_csv(dataframe, namefile='folder/my_dataframe')
---------------------------------
'''
namefile = namefile.replace('.csv','')
obj = io.StringIO()
dataframe.to_csv(obj, sep=',', index=False)
client.put_object(Bucket=self.Bucket, Key=path_name+'.csv', Body=obj.getvalue())
return 'dataframe written into s3.'
def write_log(self, dictionary, path):
'''
This method writes logs into s3.
--------------------------------
Input:
dictionary[dict] = Expect a dictionary structured as json files.
Log description:
To create a log file we highly recomed to create a json structure, such as:
{
'columns' : [],
'data' : []
}
path[string] = Expect a string with the path to write the json.
--------------------------------
Return:
String - 'log updated/created'
--------------------------------
Example:
tmp = {
'columns':['A','B'],
'data':[[0.001, 0.002],[0.003, 0.004]]
}
s3_bucket.write_log(dictionary=tmp, path='sql_exec/logs/log.json')
return:
log updated
--------------------------------
'''
try:
obj = client.get_object(Bucket=self.Bucket, Key=path)['Body']
jobj = json.loads(obj.read())
jobj['data'] = jobj['data']+dictionary['data']
client.put_object(Bucket=self.Bucket, Key=path, Body=json.dumps(jobj))
return 'log updated'
except:
client.put_object(Bucket=self.Bucket, Key=path, Body=json.dumps(dictionary))
return 'log created' | en | 0.501766 | Aws_works.s3 ============ It's a python module with useful actions for those who works with amazon web services through boto3(aws high-level api). Here we focus in s3 bucket like an object. It means you set a bucket and execute some actions in it. ============ Input: path[string, list] = Expect an s3 ulr or a list of s3 urls; Return: bucket[string] path_file[string] Example: b, p = s3.get_address("s3://your-bucket/folder/file.format") print(b) "your-bucket" print(p) "folder/file.format" Here we attribute s3 a bucket to work #public #private Read from s3 a csv file ----------------------- Input: path[string] = Expect the path to the csv file to be readed. ----------------------- return: dataframe[pandas.dataframe] ----------------------- Example: df = s3_bucket.read_csv(path='folder/file.csv') ----------------------- Read from s3 a log file in json. ------------------------------- Input: path[string] = Expect the path to the json file to be readed. output_format['pandas'|'json'] = Specify the format you desire as output, pandas is default. ------------------------------- Return: Depending on output_format specified will return or a pandas.dataframe or a json. ------------------------------- Example: logDF = s3_bucket.read_log(path='folder/file.json') ------------------------------- Write a pandas dataframe into s3. --------------------------------- Input: dataframe[pandas.dataframe] = Expect a pandas dataframe to be written into s3. path_name[string] = Specify the path and name you desire to save your file. --------------------------------- Return: String - dataframe written into s3 bucket. --------------------------------- Example: s3_bucket.write_csv(dataframe, namefile='folder/my_dataframe') --------------------------------- This method writes logs into s3. -------------------------------- Input: dictionary[dict] = Expect a dictionary structured as json files. Log description: To create a log file we highly recomed to create a json structure, such as: { 'columns' : [], 'data' : [] } path[string] = Expect a string with the path to write the json. -------------------------------- Return: String - 'log updated/created' -------------------------------- Example: tmp = { 'columns':['A','B'], 'data':[[0.001, 0.002],[0.003, 0.004]] } s3_bucket.write_log(dictionary=tmp, path='sql_exec/logs/log.json') return: log updated -------------------------------- | 3.420657 | 3 |
lambda/food_script_lambda.py | travis-deshotels/FoodSelector | 0 | 6613541 | <filename>lambda/food_script_lambda.py
import json
import boto3
import os
import random
def choose_food(data):
data_out = dict()
data_out['choices'] = []
restaurant = random.choice(data)
data_out['restaurant'] = restaurant['name']
for row in restaurant['choices']:
data_out['choices'].append({'person': row['person'], 'choice': random.choice(row['likes'])})
return data_out
def lambda_handler(event, context):
s3 = boto3.resource('s3')
s3.Object(os.environ['myBucket'], os.environ['myDataFile']).download_file('/tmp/file.json')
data = json.loads(open('/tmp/file.json').read())
return choose_food(data)
| <filename>lambda/food_script_lambda.py
import json
import boto3
import os
import random
def choose_food(data):
data_out = dict()
data_out['choices'] = []
restaurant = random.choice(data)
data_out['restaurant'] = restaurant['name']
for row in restaurant['choices']:
data_out['choices'].append({'person': row['person'], 'choice': random.choice(row['likes'])})
return data_out
def lambda_handler(event, context):
s3 = boto3.resource('s3')
s3.Object(os.environ['myBucket'], os.environ['myDataFile']).download_file('/tmp/file.json')
data = json.loads(open('/tmp/file.json').read())
return choose_food(data)
| none | 1 | 2.611533 | 3 | |
benchbuild/utils/wrapping.py | sturmianseq/benchbuild | 11 | 6613542 | <gh_stars>10-100
"""
Wrapper utilities for benchbuild.
This module provides methods to wrap binaries with extensions that are
pickled alongside the original binary.
In place of the original binary a new python module is generated that
loads the pickle and redirects the program call with all its arguments
to it. This allows interception of arbitrary programs for experimentation.
Examples:
TODO
Compiler Wrappers:
The compiler wrappers substitute the compiler call with a script that
produces the expected output from the original compiler call first.
Afterwards the pickle is loaded and the original call is forwarded to the
pickle. This way the user is not obligated to produce valid output during
his own experiment.
Runtime Wrappers:
These directly forward the binary call to the pickle without any execution
of the binary. We cannot guarantee that repeated execution is valid,
therefore, we let the user decide what the program should do.
"""
import logging
import os
import sys
import typing as tp
from typing import TYPE_CHECKING
import dill
import jinja2
import plumbum as pb
from plumbum import local
from plumbum.commands.base import BoundCommand
from benchbuild.settings import CFG
from benchbuild.utils import run
from benchbuild.utils.cmd import chmod, mv
from benchbuild.utils.path import list_to_path
from benchbuild.utils.uchroot import no_llvm as uchroot
PROJECT_BIN_F_EXT = ".bin"
PROJECT_BLOB_F_EXT = ".postproc"
LOG = logging.getLogger(__name__)
if TYPE_CHECKING:
from benchbuild.project import Project
from benchbuild.experiment import Experiment
def strip_path_prefix(ipath: str, prefix: str) -> str:
"""
Strip prefix from path.
Args:
ipath: input path
prefix: the prefix to remove, if it is found in :ipath:
Examples:
>>> strip_path_prefix("/foo/bar", "/bar")
'/foo/bar'
>>> strip_path_prefix("/foo/bar", "/")
'foo/bar'
>>> strip_path_prefix("/foo/bar", "/foo")
'/bar'
>>> strip_path_prefix("/foo/bar", "None")
'/foo/bar'
"""
if not prefix:
return ipath
return ipath[len(prefix):] if ipath.startswith(prefix) else ipath
def unpickle(pickle_file: str) -> tp.Any:
"""Unpickle a python object from the given path."""
pickle = None
with open(pickle_file, "rb") as pickle_f:
pickle = dill.load(pickle_f)
if not pickle:
LOG.error("Could not load python object from file")
return pickle
def __create_jinja_env() -> jinja2.Environment:
return jinja2.Environment(
trim_blocks=True,
lstrip_blocks=True,
loader=jinja2.PackageLoader('benchbuild', 'res')
)
def wrap(
name: str,
project: 'Project',
sprefix: str = '',
python: str = sys.executable
) -> pb.commands.ConcreteCommand:
""" Wrap the binary :name: with the runtime extension of the project.
This module generates a python tool that replaces :name:
The function in runner only accepts the replaced binaries
name as argument. We use the cloudpickle package to
perform the serialization, make sure :runner: can be serialized
with it and you're fine.
Args:
name: Binary we want to wrap
project: The project that contains the runtime_extension we want
to run instead of the binary.
Returns:
A plumbum command, ready to launch.
"""
env = __create_jinja_env()
template = env.get_template('wrapping/run_static.py.inc')
name_absolute = os.path.abspath(name)
real_f = name_absolute + PROJECT_BIN_F_EXT
if sprefix:
_mv = run.watch(uchroot()["/bin/mv"])
_mv(
strip_path_prefix(name_absolute, sprefix),
strip_path_prefix(real_f, sprefix)
)
else:
_mv = run.watch(mv)
_mv(name_absolute, real_f)
project_file = persist(project, suffix=".project")
env = CFG['env'].value
bin_path = list_to_path(env.get('PATH', []))
bin_path = list_to_path([bin_path, os.environ["PATH"]])
bin_lib_path = list_to_path(env.get('LD_LIBRARY_PATH', []))
bin_lib_path = list_to_path([bin_lib_path, os.environ["LD_LIBRARY_PATH"]])
home = env.get("HOME", os.getenv("HOME", ""))
with open(name_absolute, 'w') as wrapper:
wrapper.write(
template.render(
runf=strip_path_prefix(real_f, sprefix),
project_file=strip_path_prefix(project_file, sprefix),
path=str(bin_path),
ld_library_path=str(bin_lib_path),
home=str(home),
python=python,
)
)
_chmod = run.watch(chmod)
_chmod("+x", name_absolute)
return local[name_absolute]
def wrap_dynamic(
project: 'Project',
name: str,
sprefix: str = '',
python: str = sys.executable,
name_filters: tp.Optional[tp.List[str]] = None
) -> BoundCommand:
"""
Wrap the binary :name with the function :runner.
This module generates a python tool :name: that can replace
a yet unspecified binary.
It behaves similar to the :wrap: function. However, the first
argument is the actual binary name.
Args:
name: name of the python module
runner: Function that should run the real binary
sprefix: Prefix that should be used for commands.
python: The python executable that should be used.
name_filters:
List of regex expressions that are used to filter the
real project name. Make sure to include a match group named
'name' in the regex, e.g.,
[
r'foo(?P<name>.)-flt'
]
Returns: plumbum command, readty to launch.
"""
env = __create_jinja_env()
template = env.get_template('wrapping/run_dynamic.py.inc')
name_absolute = os.path.abspath(name)
real_f = name_absolute + PROJECT_BIN_F_EXT
project_file = persist(project, suffix=".project")
cfg_env = CFG['env'].value
bin_path = list_to_path(cfg_env.get('PATH', []))
bin_path = list_to_path([bin_path, os.environ["PATH"]])
bin_lib_path = \
list_to_path(cfg_env.get('LD_LIBRARY_PATH', []))
bin_lib_path = \
list_to_path([bin_lib_path, os.environ["LD_LIBRARY_PATH"]])
home = cfg_env.get("HOME", os.getenv("HOME", ""))
with open(name_absolute, 'w') as wrapper:
wrapper.write(
template.render(
runf=strip_path_prefix(real_f, sprefix),
project_file=strip_path_prefix(project_file, sprefix),
path=str(bin_path),
ld_library_path=str(bin_lib_path),
home=str(home),
python=python,
name_filters=name_filters
)
)
chmod("+x", name_absolute)
return local[name_absolute]
def wrap_cc(
filepath: str,
compiler: BoundCommand,
project: 'Project',
python: str = sys.executable,
detect_project: bool = False
) -> BoundCommand:
"""
Substitute a compiler with a script that hides CFLAGS & LDFLAGS.
This will generate a wrapper script in the current directory
and return a complete plumbum command to it.
Args:
filepath (str): Path to the wrapper script.
compiler (benchbuild.utils.cmd):
Real compiler command we should call in the script.
project (benchbuild.project.Project):
The project this compiler will be for.
python (str): Path to the python interpreter we should use.
detect_project: Should we enable project detection or not.
Returns (benchbuild.utils.cmd):
Command of the new compiler we can call.
"""
env = __create_jinja_env()
template = env.get_template('wrapping/run_compiler.py.inc')
cc_fname = local.path(filepath).with_suffix(".benchbuild.cc", depth=0)
cc_f = persist(compiler, filename=cc_fname)
project_file = persist(project, suffix=".project")
with open(filepath, 'w') as wrapper:
wrapper.write(
template.render(
cc_f=cc_f,
project_file=project_file,
python=python,
detect_project=detect_project
)
)
chmod("+x", filepath)
LOG.debug(
"Placed wrapper in: %s for compiler %s", local.path(filepath),
str(compiler)
)
LOG.debug("Placed project in: %s", local.path(project_file))
LOG.debug("Placed compiler command in: %s", local.path(cc_f))
return local[filepath]
def persist(id_obj, filename=None, suffix=None):
"""Persist an object in the filesystem.
This will generate a pickled version of the given obj in the filename path.
Objects shall provide an id() method to be able to use this persistence API.
If not, we will use the id() builtin of python to generate an identifier
for you.
The file will be created, if it does not exist.
If the file already exists, we will overwrite it.
Args:
id_obj (Any): An identifiable object you want to persist in the
filesystem.
"""
if suffix is None:
suffix = ".pickle"
if hasattr(id_obj, 'run_uuid'):
ident = id_obj.run_uuid
else:
ident = str(id(id_obj))
if filename is None:
filename = "{obj_id}{suffix}".format(obj_id=ident, suffix=suffix)
with open(filename, 'wb') as obj_file:
dill.dump(id_obj, obj_file)
return os.path.abspath(filename)
def load(filename: str) -> tp.Optional[tp.Any]:
"""Load a pickled obj from the filesystem.
You better know what you expect from the given pickle, because we don't
check it.
Args:
filename (str): The filename we load the object from.
Returns:
The object we were able to unpickle, else None.
"""
if not os.path.exists(filename):
LOG.error("load object - File '%s' does not exist.", filename)
return None
obj = None
with open(filename, 'rb') as obj_file:
obj = dill.load(obj_file)
return obj
| """
Wrapper utilities for benchbuild.
This module provides methods to wrap binaries with extensions that are
pickled alongside the original binary.
In place of the original binary a new python module is generated that
loads the pickle and redirects the program call with all its arguments
to it. This allows interception of arbitrary programs for experimentation.
Examples:
TODO
Compiler Wrappers:
The compiler wrappers substitute the compiler call with a script that
produces the expected output from the original compiler call first.
Afterwards the pickle is loaded and the original call is forwarded to the
pickle. This way the user is not obligated to produce valid output during
his own experiment.
Runtime Wrappers:
These directly forward the binary call to the pickle without any execution
of the binary. We cannot guarantee that repeated execution is valid,
therefore, we let the user decide what the program should do.
"""
import logging
import os
import sys
import typing as tp
from typing import TYPE_CHECKING
import dill
import jinja2
import plumbum as pb
from plumbum import local
from plumbum.commands.base import BoundCommand
from benchbuild.settings import CFG
from benchbuild.utils import run
from benchbuild.utils.cmd import chmod, mv
from benchbuild.utils.path import list_to_path
from benchbuild.utils.uchroot import no_llvm as uchroot
PROJECT_BIN_F_EXT = ".bin"
PROJECT_BLOB_F_EXT = ".postproc"
LOG = logging.getLogger(__name__)
if TYPE_CHECKING:
from benchbuild.project import Project
from benchbuild.experiment import Experiment
def strip_path_prefix(ipath: str, prefix: str) -> str:
"""
Strip prefix from path.
Args:
ipath: input path
prefix: the prefix to remove, if it is found in :ipath:
Examples:
>>> strip_path_prefix("/foo/bar", "/bar")
'/foo/bar'
>>> strip_path_prefix("/foo/bar", "/")
'foo/bar'
>>> strip_path_prefix("/foo/bar", "/foo")
'/bar'
>>> strip_path_prefix("/foo/bar", "None")
'/foo/bar'
"""
if not prefix:
return ipath
return ipath[len(prefix):] if ipath.startswith(prefix) else ipath
def unpickle(pickle_file: str) -> tp.Any:
"""Unpickle a python object from the given path."""
pickle = None
with open(pickle_file, "rb") as pickle_f:
pickle = dill.load(pickle_f)
if not pickle:
LOG.error("Could not load python object from file")
return pickle
def __create_jinja_env() -> jinja2.Environment:
return jinja2.Environment(
trim_blocks=True,
lstrip_blocks=True,
loader=jinja2.PackageLoader('benchbuild', 'res')
)
def wrap(
name: str,
project: 'Project',
sprefix: str = '',
python: str = sys.executable
) -> pb.commands.ConcreteCommand:
""" Wrap the binary :name: with the runtime extension of the project.
This module generates a python tool that replaces :name:
The function in runner only accepts the replaced binaries
name as argument. We use the cloudpickle package to
perform the serialization, make sure :runner: can be serialized
with it and you're fine.
Args:
name: Binary we want to wrap
project: The project that contains the runtime_extension we want
to run instead of the binary.
Returns:
A plumbum command, ready to launch.
"""
env = __create_jinja_env()
template = env.get_template('wrapping/run_static.py.inc')
name_absolute = os.path.abspath(name)
real_f = name_absolute + PROJECT_BIN_F_EXT
if sprefix:
_mv = run.watch(uchroot()["/bin/mv"])
_mv(
strip_path_prefix(name_absolute, sprefix),
strip_path_prefix(real_f, sprefix)
)
else:
_mv = run.watch(mv)
_mv(name_absolute, real_f)
project_file = persist(project, suffix=".project")
env = CFG['env'].value
bin_path = list_to_path(env.get('PATH', []))
bin_path = list_to_path([bin_path, os.environ["PATH"]])
bin_lib_path = list_to_path(env.get('LD_LIBRARY_PATH', []))
bin_lib_path = list_to_path([bin_lib_path, os.environ["LD_LIBRARY_PATH"]])
home = env.get("HOME", os.getenv("HOME", ""))
with open(name_absolute, 'w') as wrapper:
wrapper.write(
template.render(
runf=strip_path_prefix(real_f, sprefix),
project_file=strip_path_prefix(project_file, sprefix),
path=str(bin_path),
ld_library_path=str(bin_lib_path),
home=str(home),
python=python,
)
)
_chmod = run.watch(chmod)
_chmod("+x", name_absolute)
return local[name_absolute]
def wrap_dynamic(
project: 'Project',
name: str,
sprefix: str = '',
python: str = sys.executable,
name_filters: tp.Optional[tp.List[str]] = None
) -> BoundCommand:
"""
Wrap the binary :name with the function :runner.
This module generates a python tool :name: that can replace
a yet unspecified binary.
It behaves similar to the :wrap: function. However, the first
argument is the actual binary name.
Args:
name: name of the python module
runner: Function that should run the real binary
sprefix: Prefix that should be used for commands.
python: The python executable that should be used.
name_filters:
List of regex expressions that are used to filter the
real project name. Make sure to include a match group named
'name' in the regex, e.g.,
[
r'foo(?P<name>.)-flt'
]
Returns: plumbum command, readty to launch.
"""
env = __create_jinja_env()
template = env.get_template('wrapping/run_dynamic.py.inc')
name_absolute = os.path.abspath(name)
real_f = name_absolute + PROJECT_BIN_F_EXT
project_file = persist(project, suffix=".project")
cfg_env = CFG['env'].value
bin_path = list_to_path(cfg_env.get('PATH', []))
bin_path = list_to_path([bin_path, os.environ["PATH"]])
bin_lib_path = \
list_to_path(cfg_env.get('LD_LIBRARY_PATH', []))
bin_lib_path = \
list_to_path([bin_lib_path, os.environ["LD_LIBRARY_PATH"]])
home = cfg_env.get("HOME", os.getenv("HOME", ""))
with open(name_absolute, 'w') as wrapper:
wrapper.write(
template.render(
runf=strip_path_prefix(real_f, sprefix),
project_file=strip_path_prefix(project_file, sprefix),
path=str(bin_path),
ld_library_path=str(bin_lib_path),
home=str(home),
python=python,
name_filters=name_filters
)
)
chmod("+x", name_absolute)
return local[name_absolute]
def wrap_cc(
filepath: str,
compiler: BoundCommand,
project: 'Project',
python: str = sys.executable,
detect_project: bool = False
) -> BoundCommand:
"""
Substitute a compiler with a script that hides CFLAGS & LDFLAGS.
This will generate a wrapper script in the current directory
and return a complete plumbum command to it.
Args:
filepath (str): Path to the wrapper script.
compiler (benchbuild.utils.cmd):
Real compiler command we should call in the script.
project (benchbuild.project.Project):
The project this compiler will be for.
python (str): Path to the python interpreter we should use.
detect_project: Should we enable project detection or not.
Returns (benchbuild.utils.cmd):
Command of the new compiler we can call.
"""
env = __create_jinja_env()
template = env.get_template('wrapping/run_compiler.py.inc')
cc_fname = local.path(filepath).with_suffix(".benchbuild.cc", depth=0)
cc_f = persist(compiler, filename=cc_fname)
project_file = persist(project, suffix=".project")
with open(filepath, 'w') as wrapper:
wrapper.write(
template.render(
cc_f=cc_f,
project_file=project_file,
python=python,
detect_project=detect_project
)
)
chmod("+x", filepath)
LOG.debug(
"Placed wrapper in: %s for compiler %s", local.path(filepath),
str(compiler)
)
LOG.debug("Placed project in: %s", local.path(project_file))
LOG.debug("Placed compiler command in: %s", local.path(cc_f))
return local[filepath]
def persist(id_obj, filename=None, suffix=None):
"""Persist an object in the filesystem.
This will generate a pickled version of the given obj in the filename path.
Objects shall provide an id() method to be able to use this persistence API.
If not, we will use the id() builtin of python to generate an identifier
for you.
The file will be created, if it does not exist.
If the file already exists, we will overwrite it.
Args:
id_obj (Any): An identifiable object you want to persist in the
filesystem.
"""
if suffix is None:
suffix = ".pickle"
if hasattr(id_obj, 'run_uuid'):
ident = id_obj.run_uuid
else:
ident = str(id(id_obj))
if filename is None:
filename = "{obj_id}{suffix}".format(obj_id=ident, suffix=suffix)
with open(filename, 'wb') as obj_file:
dill.dump(id_obj, obj_file)
return os.path.abspath(filename)
def load(filename: str) -> tp.Optional[tp.Any]:
"""Load a pickled obj from the filesystem.
You better know what you expect from the given pickle, because we don't
check it.
Args:
filename (str): The filename we load the object from.
Returns:
The object we were able to unpickle, else None.
"""
if not os.path.exists(filename):
LOG.error("load object - File '%s' does not exist.", filename)
return None
obj = None
with open(filename, 'rb') as obj_file:
obj = dill.load(obj_file)
return obj | en | 0.840932 | Wrapper utilities for benchbuild. This module provides methods to wrap binaries with extensions that are pickled alongside the original binary. In place of the original binary a new python module is generated that loads the pickle and redirects the program call with all its arguments to it. This allows interception of arbitrary programs for experimentation. Examples: TODO Compiler Wrappers: The compiler wrappers substitute the compiler call with a script that produces the expected output from the original compiler call first. Afterwards the pickle is loaded and the original call is forwarded to the pickle. This way the user is not obligated to produce valid output during his own experiment. Runtime Wrappers: These directly forward the binary call to the pickle without any execution of the binary. We cannot guarantee that repeated execution is valid, therefore, we let the user decide what the program should do. Strip prefix from path. Args: ipath: input path prefix: the prefix to remove, if it is found in :ipath: Examples: >>> strip_path_prefix("/foo/bar", "/bar") '/foo/bar' >>> strip_path_prefix("/foo/bar", "/") 'foo/bar' >>> strip_path_prefix("/foo/bar", "/foo") '/bar' >>> strip_path_prefix("/foo/bar", "None") '/foo/bar' Unpickle a python object from the given path. Wrap the binary :name: with the runtime extension of the project. This module generates a python tool that replaces :name: The function in runner only accepts the replaced binaries name as argument. We use the cloudpickle package to perform the serialization, make sure :runner: can be serialized with it and you're fine. Args: name: Binary we want to wrap project: The project that contains the runtime_extension we want to run instead of the binary. Returns: A plumbum command, ready to launch. Wrap the binary :name with the function :runner. This module generates a python tool :name: that can replace a yet unspecified binary. It behaves similar to the :wrap: function. However, the first argument is the actual binary name. Args: name: name of the python module runner: Function that should run the real binary sprefix: Prefix that should be used for commands. python: The python executable that should be used. name_filters: List of regex expressions that are used to filter the real project name. Make sure to include a match group named 'name' in the regex, e.g., [ r'foo(?P<name>.)-flt' ] Returns: plumbum command, readty to launch. Substitute a compiler with a script that hides CFLAGS & LDFLAGS. This will generate a wrapper script in the current directory and return a complete plumbum command to it. Args: filepath (str): Path to the wrapper script. compiler (benchbuild.utils.cmd): Real compiler command we should call in the script. project (benchbuild.project.Project): The project this compiler will be for. python (str): Path to the python interpreter we should use. detect_project: Should we enable project detection or not. Returns (benchbuild.utils.cmd): Command of the new compiler we can call. Persist an object in the filesystem. This will generate a pickled version of the given obj in the filename path. Objects shall provide an id() method to be able to use this persistence API. If not, we will use the id() builtin of python to generate an identifier for you. The file will be created, if it does not exist. If the file already exists, we will overwrite it. Args: id_obj (Any): An identifiable object you want to persist in the filesystem. Load a pickled obj from the filesystem. You better know what you expect from the given pickle, because we don't check it. Args: filename (str): The filename we load the object from. Returns: The object we were able to unpickle, else None. | 2.783812 | 3 |
Snake_game/main.py | Reeju2019/Game | 0 | 6613543 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from game import Game
import sys
def main():
game = Game()
game.run()
sys.exit()
if __name__ == '__main__':
main() | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from game import Game
import sys
def main():
game = Game()
game.run()
sys.exit()
if __name__ == '__main__':
main() | en | 0.804859 | # -*- coding: utf-8 -*- Spyder Editor This is a temporary script file. | 1.683161 | 2 |
python/438_Find_All_Anagrams_in_a_String.py | dvlpsh/leetcode-1 | 4,416 | 6613544 | class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
res = []
if s is None or p is None or len(s) == 0 or len(p) == 0:
return res
char_map = [0] * 256
for c in p:
char_map[ord(c)] += 1
left, right, count = 0, 0, len(p)
while right < len(s):
if char_map[ord(s[right])] >= 1:
count -= 1
char_map[ord(s[right])] -= 1
right += 1
if count == 0:
res.append(left)
if right - left == len(p):
if char_map[ord(s[left])] >= 0:
count += 1
char_map[ord(s[left])] += 1
left += 1
return res
# def findAnagrams(self, s, p):
# if len(s) < len(p):
# return []
# res = []
# p_len = len(p)
# bit_map = []
# for _ in range(26):
# bit_map.append(0)
# for c in p:
# bit_map[ord(c) - ord('a')] += 1
# s_p = str(bit_map)
# for i in range(26):
# bit_map[i] = 0
# for i in range(p_len - 1):
# bit_map[ord(s[i]) - ord('a')] += 1
# for i in range(p_len - 1, len(s)):
# bit_map[ord(s[i]) - ord('a')] += 1
# if i - p_len >= 0:
# bit_map[ord(s[i - p_len]) - ord('a')] -= 1
# if str(bit_map) == s_p:
# res.append(i - p_len + 1)
# return res
# def findAnagrams(self, s, p):
# """
# :type s: str
# :type p: str
# :rtype: List[int]
# """
# res = []
# pCounter = collections.Counter(p)
# sCounter = collections.Counter(s[:len(p)-1])
# for i in range(len(p)-1,len(s)):
# sCounter[s[i]] += 1 # include a new char in the char_map
# if sCounter == pCounter: # This step is O(1), since there are at most 26 English letters
# res.append(i-len(p)+1) # append the starting index
# sCounter[s[i-len(p)+1]] -= 1 # decrease the count of oldest char in the window
# if sCounter[s[i-len(p)+1]] == 0:
# del sCounter[s[i-len(p)+1]] # remove the count if it is 0
# return res
| class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
res = []
if s is None or p is None or len(s) == 0 or len(p) == 0:
return res
char_map = [0] * 256
for c in p:
char_map[ord(c)] += 1
left, right, count = 0, 0, len(p)
while right < len(s):
if char_map[ord(s[right])] >= 1:
count -= 1
char_map[ord(s[right])] -= 1
right += 1
if count == 0:
res.append(left)
if right - left == len(p):
if char_map[ord(s[left])] >= 0:
count += 1
char_map[ord(s[left])] += 1
left += 1
return res
# def findAnagrams(self, s, p):
# if len(s) < len(p):
# return []
# res = []
# p_len = len(p)
# bit_map = []
# for _ in range(26):
# bit_map.append(0)
# for c in p:
# bit_map[ord(c) - ord('a')] += 1
# s_p = str(bit_map)
# for i in range(26):
# bit_map[i] = 0
# for i in range(p_len - 1):
# bit_map[ord(s[i]) - ord('a')] += 1
# for i in range(p_len - 1, len(s)):
# bit_map[ord(s[i]) - ord('a')] += 1
# if i - p_len >= 0:
# bit_map[ord(s[i - p_len]) - ord('a')] -= 1
# if str(bit_map) == s_p:
# res.append(i - p_len + 1)
# return res
# def findAnagrams(self, s, p):
# """
# :type s: str
# :type p: str
# :rtype: List[int]
# """
# res = []
# pCounter = collections.Counter(p)
# sCounter = collections.Counter(s[:len(p)-1])
# for i in range(len(p)-1,len(s)):
# sCounter[s[i]] += 1 # include a new char in the char_map
# if sCounter == pCounter: # This step is O(1), since there are at most 26 English letters
# res.append(i-len(p)+1) # append the starting index
# sCounter[s[i-len(p)+1]] -= 1 # decrease the count of oldest char in the window
# if sCounter[s[i-len(p)+1]] == 0:
# del sCounter[s[i-len(p)+1]] # remove the count if it is 0
# return res
| en | 0.394962 | :type s: str :type p: str :rtype: List[int] # def findAnagrams(self, s, p): # if len(s) < len(p): # return [] # res = [] # p_len = len(p) # bit_map = [] # for _ in range(26): # bit_map.append(0) # for c in p: # bit_map[ord(c) - ord('a')] += 1 # s_p = str(bit_map) # for i in range(26): # bit_map[i] = 0 # for i in range(p_len - 1): # bit_map[ord(s[i]) - ord('a')] += 1 # for i in range(p_len - 1, len(s)): # bit_map[ord(s[i]) - ord('a')] += 1 # if i - p_len >= 0: # bit_map[ord(s[i - p_len]) - ord('a')] -= 1 # if str(bit_map) == s_p: # res.append(i - p_len + 1) # return res # def findAnagrams(self, s, p): # """ # :type s: str # :type p: str # :rtype: List[int] # """ # res = [] # pCounter = collections.Counter(p) # sCounter = collections.Counter(s[:len(p)-1]) # for i in range(len(p)-1,len(s)): # sCounter[s[i]] += 1 # include a new char in the char_map # if sCounter == pCounter: # This step is O(1), since there are at most 26 English letters # res.append(i-len(p)+1) # append the starting index # sCounter[s[i-len(p)+1]] -= 1 # decrease the count of oldest char in the window # if sCounter[s[i-len(p)+1]] == 0: # del sCounter[s[i-len(p)+1]] # remove the count if it is 0 # return res | 3.175392 | 3 |
lorikeet/api_views_test_payment_method.py | excitedleigh/lorikeet | 6 | 6613545 | <gh_stars>1-10
from json import dumps, loads
import pytest
from shop import models as smodels
@pytest.mark.django_db
def test_add_payment_method(client, cart):
resp = client.post(
"/_cart/new-payment-method/",
dumps({"type": "PipeCard", "data": {"card_token": "<PASSWORD>"}}),
content_type="application/json",
)
assert resp.status_code == 201
assert smodels.PipeCard.objects.count() == 1
cart.refresh_from_db()
assert cart.payment_method is not None
@pytest.mark.django_db
def test_add_payment_method_logged_in(admin_user, admin_client, admin_cart):
resp = admin_client.post(
"/_cart/new-payment-method/",
dumps({"type": "PipeCard", "data": {"card_token": "<PASSWORD>"}}),
content_type="application/json",
)
assert resp.status_code == 201
assert smodels.PipeCard.objects.count() == 1
assert smodels.PipeCard.objects.first().user == admin_user
admin_cart.refresh_from_db()
assert admin_cart.payment_method is not None
@pytest.mark.django_db
def test_view_payment_method(client, cart):
cart.payment_method = smodels.PipeCard.objects.create(card_id="Visa4242")
cart.save()
url = "/_cart/payment-method/{}/".format(cart.payment_method_id)
resp = client.get(url)
data = loads(resp.content.decode("utf-8"))
assert data == {
"type": "PipeCard",
"selected": True,
"data": {"brand": "Visa", "last4": "4242"},
"url": url,
}
@pytest.mark.django_db
def test_view_owned_unselected_payment_method(admin_user, admin_client):
pm = smodels.PipeCard.objects.create(card_id="Visa4242", user=admin_user)
url = "/_cart/payment-method/{}/".format(pm.id)
resp = admin_client.get(url)
data = loads(resp.content.decode("utf-8"))
assert data == {
"type": "PipeCard",
"selected": False,
"data": {"brand": "Visa", "last4": "4242"},
"url": url,
}
@pytest.mark.django_db
def test_view_unowned_payment_method(admin_user, client):
pm = smodels.PipeCard.objects.create(card_id="Visa4242", user=admin_user)
url = "/_cart/payment-method/{}/".format(pm.id)
resp = client.get(url)
assert resp.status_code == 404
@pytest.mark.django_db
def test_view_inactive_payment_method(admin_user, admin_client):
pm = smodels.PipeCard.objects.create(
card_id="Visa4242", user=admin_user, active=False
)
url = "/_cart/payment-method/{}/".format(pm.id)
resp = admin_client.get(url)
assert resp.status_code == 404
@pytest.mark.django_db
def test_select_payment_method(admin_user, admin_client, admin_cart):
pm = smodels.PipeCard.objects.create(card_id="Visa4242", user=admin_user)
url = "/_cart/payment-method/{}/".format(pm.id)
resp = admin_client.patch(
url, dumps({"selected": True}), content_type="application/json"
)
assert resp.status_code == 200
admin_cart.refresh_from_db()
assert admin_cart.payment_method_id == pm.id
@pytest.mark.django_db
def test_select_inactive_payment_method(admin_user, admin_client, admin_cart):
pm = smodels.PipeCard.objects.create(
card_id="Visa4242", user=admin_user, active=False
)
url = "/_cart/payment-method/{}/".format(pm.id)
resp = admin_client.patch(
url, dumps({"selected": True}), content_type="application/json"
)
assert resp.status_code == 404
admin_cart.refresh_from_db()
assert admin_cart.payment_method_id != pm.id
@pytest.mark.django_db
def test_delete_payment_method(client, cart):
pm = smodels.PipeCard.objects.create(card_id="Visa4242")
cart.payment_method = pm
cart.save()
url = "/_cart/payment-method/{}/".format(cart.payment_method_id)
resp = client.delete(url)
assert resp.status_code == 204
cart.refresh_from_db()
assert cart.payment_method is None
pm.refresh_from_db()
assert not pm.active
| from json import dumps, loads
import pytest
from shop import models as smodels
@pytest.mark.django_db
def test_add_payment_method(client, cart):
resp = client.post(
"/_cart/new-payment-method/",
dumps({"type": "PipeCard", "data": {"card_token": "<PASSWORD>"}}),
content_type="application/json",
)
assert resp.status_code == 201
assert smodels.PipeCard.objects.count() == 1
cart.refresh_from_db()
assert cart.payment_method is not None
@pytest.mark.django_db
def test_add_payment_method_logged_in(admin_user, admin_client, admin_cart):
resp = admin_client.post(
"/_cart/new-payment-method/",
dumps({"type": "PipeCard", "data": {"card_token": "<PASSWORD>"}}),
content_type="application/json",
)
assert resp.status_code == 201
assert smodels.PipeCard.objects.count() == 1
assert smodels.PipeCard.objects.first().user == admin_user
admin_cart.refresh_from_db()
assert admin_cart.payment_method is not None
@pytest.mark.django_db
def test_view_payment_method(client, cart):
cart.payment_method = smodels.PipeCard.objects.create(card_id="Visa4242")
cart.save()
url = "/_cart/payment-method/{}/".format(cart.payment_method_id)
resp = client.get(url)
data = loads(resp.content.decode("utf-8"))
assert data == {
"type": "PipeCard",
"selected": True,
"data": {"brand": "Visa", "last4": "4242"},
"url": url,
}
@pytest.mark.django_db
def test_view_owned_unselected_payment_method(admin_user, admin_client):
pm = smodels.PipeCard.objects.create(card_id="Visa4242", user=admin_user)
url = "/_cart/payment-method/{}/".format(pm.id)
resp = admin_client.get(url)
data = loads(resp.content.decode("utf-8"))
assert data == {
"type": "PipeCard",
"selected": False,
"data": {"brand": "Visa", "last4": "4242"},
"url": url,
}
@pytest.mark.django_db
def test_view_unowned_payment_method(admin_user, client):
pm = smodels.PipeCard.objects.create(card_id="Visa4242", user=admin_user)
url = "/_cart/payment-method/{}/".format(pm.id)
resp = client.get(url)
assert resp.status_code == 404
@pytest.mark.django_db
def test_view_inactive_payment_method(admin_user, admin_client):
pm = smodels.PipeCard.objects.create(
card_id="Visa4242", user=admin_user, active=False
)
url = "/_cart/payment-method/{}/".format(pm.id)
resp = admin_client.get(url)
assert resp.status_code == 404
@pytest.mark.django_db
def test_select_payment_method(admin_user, admin_client, admin_cart):
pm = smodels.PipeCard.objects.create(card_id="Visa4242", user=admin_user)
url = "/_cart/payment-method/{}/".format(pm.id)
resp = admin_client.patch(
url, dumps({"selected": True}), content_type="application/json"
)
assert resp.status_code == 200
admin_cart.refresh_from_db()
assert admin_cart.payment_method_id == pm.id
@pytest.mark.django_db
def test_select_inactive_payment_method(admin_user, admin_client, admin_cart):
pm = smodels.PipeCard.objects.create(
card_id="Visa4242", user=admin_user, active=False
)
url = "/_cart/payment-method/{}/".format(pm.id)
resp = admin_client.patch(
url, dumps({"selected": True}), content_type="application/json"
)
assert resp.status_code == 404
admin_cart.refresh_from_db()
assert admin_cart.payment_method_id != pm.id
@pytest.mark.django_db
def test_delete_payment_method(client, cart):
pm = smodels.PipeCard.objects.create(card_id="Visa4242")
cart.payment_method = pm
cart.save()
url = "/_cart/payment-method/{}/".format(cart.payment_method_id)
resp = client.delete(url)
assert resp.status_code == 204
cart.refresh_from_db()
assert cart.payment_method is None
pm.refresh_from_db()
assert not pm.active | none | 1 | 2.160826 | 2 | |
wagtail/search/backends/database/__init__.py | melisayu/wagtail | 8,851 | 6613546 | <reponame>melisayu/wagtail<gh_stars>1000+
from django.db import connection
def SearchBackend(params):
"""
Returns the appropriate search backend for the current 'default' database system
"""
if connection.vendor == 'postgresql':
from .postgres.postgres import PostgresSearchBackend
return PostgresSearchBackend(params)
elif connection.vendor == 'mysql':
from .mysql.mysql import MySQLSearchBackend
return MySQLSearchBackend(params)
elif connection.vendor == 'sqlite':
import sqlite3
if sqlite3.sqlite_version_info < (3, 19, 0):
# Prior to version 3.19, SQLite doesn't support FTS5 queries with column filters ('{column_1 column_2} : query'), so we need to fall back to the dummy fallback backend.
from .fallback import DatabaseSearchBackend
return DatabaseSearchBackend(params)
else:
from .sqlite.sqlite import SQLiteSearchBackend
return SQLiteSearchBackend(params)
else:
from .fallback import DatabaseSearchBackend
return DatabaseSearchBackend(params)
| from django.db import connection
def SearchBackend(params):
"""
Returns the appropriate search backend for the current 'default' database system
"""
if connection.vendor == 'postgresql':
from .postgres.postgres import PostgresSearchBackend
return PostgresSearchBackend(params)
elif connection.vendor == 'mysql':
from .mysql.mysql import MySQLSearchBackend
return MySQLSearchBackend(params)
elif connection.vendor == 'sqlite':
import sqlite3
if sqlite3.sqlite_version_info < (3, 19, 0):
# Prior to version 3.19, SQLite doesn't support FTS5 queries with column filters ('{column_1 column_2} : query'), so we need to fall back to the dummy fallback backend.
from .fallback import DatabaseSearchBackend
return DatabaseSearchBackend(params)
else:
from .sqlite.sqlite import SQLiteSearchBackend
return SQLiteSearchBackend(params)
else:
from .fallback import DatabaseSearchBackend
return DatabaseSearchBackend(params) | en | 0.688496 | Returns the appropriate search backend for the current 'default' database system # Prior to version 3.19, SQLite doesn't support FTS5 queries with column filters ('{column_1 column_2} : query'), so we need to fall back to the dummy fallback backend. | 2.344558 | 2 |
pycircuit/circuit/constants.py | michaelnt/pycircuit | 25 | 6613547 | <gh_stars>10-100
# -*- coding: latin-1 -*-
# Copyright (c) 2008 Pycircuit Development Team
# See LICENSE for details.
"""
Physical constants
"""
kboltzmann=1.38e-23 # Boltzmann's constant
eps0 = 8.8542e-12 # Vacuum permittivity
epsRSi = 11.7 # Relative permittivity of Si
epsRSiO2 = 3.9 # Relative permittivity of SiO2
qelectron=1.602e-19 # Elementary charge
| # -*- coding: latin-1 -*-
# Copyright (c) 2008 Pycircuit Development Team
# See LICENSE for details.
"""
Physical constants
"""
kboltzmann=1.38e-23 # Boltzmann's constant
eps0 = 8.8542e-12 # Vacuum permittivity
epsRSi = 11.7 # Relative permittivity of Si
epsRSiO2 = 3.9 # Relative permittivity of SiO2
qelectron=1.602e-19 # Elementary charge | en | 0.734876 | # -*- coding: latin-1 -*- # Copyright (c) 2008 Pycircuit Development Team # See LICENSE for details. Physical constants # Boltzmann's constant # Vacuum permittivity # Relative permittivity of Si # Relative permittivity of SiO2 # Elementary charge | 1.544826 | 2 |
app.py | noreederek/weathernn | 0 | 6613548 | import requests
from flask import Flask, render_template, request, url_for, redirect
app = Flask(__name__)
app.config['DEBUG'] = True
ACCESS_TOKEN = '<KEY>'
DEFAULT_CITY = 'Нижний Новгород'
def return_weather(access_key, city):
params = {
'access_key': access_key,
'query': city
}
error_flag = False
weather = {
'city' : " - ",
'temperature' : "0",
'precip' : " - ",
'pressure' : " - ",
'weather_descriptions' : " - ",
'icon' : " - ",
'clothes' : "else"
}
api_result = requests.get('http://api.weatherstack.com/current', params)
api_response = api_result.json()
try:
temperature = int(api_response['current']['temperature'])
clothes = "else"
def get_temp_description(temp):
return {
temp < -15: 'minus15',
-15 <= temp < 0: 'minus0',
0 <= temp < 10: 'plus10',
10 <= temp < 20: 'plus20',
20 <= temp: 'plus50'
}[True]
try:
clothes = get_temp_description(temperature)
except TypeError:
clothes = "else"
weather = {
'city' : api_response['location']['name'],
'temperature' : api_response['current']['temperature'],
'precip' : api_response['current']['precip'],
'pressure' : api_response['current']['pressure'],
'weather_descriptions' : api_response['current']['weather_descriptions'][0],
'icon' : api_response['current']['weather_icons'][0],
'clothes' : clothes,
}
except KeyError:
error_flag = True
return weather, error_flag
@app.route('/', methods=['GET', 'POST'])
def index():
weather_default, error_flag_default = return_weather(ACCESS_TOKEN, DEFAULT_CITY)
if request.method == 'POST':
city = request.form.get('city')
print(city)
weather, error_flag = return_weather(ACCESS_TOKEN, city)
print(weather)
if error_flag == True:
return render_template('error.html')
else:
return render_template('other_city.html', weather=weather)
if error_flag_default == True:
return render_template('error_on_default.html')
return render_template('index.html', weather=weather_default)
@app.route('/faq', methods=['GET'])
def faq():
return render_template('faq.html')
@app.errorhandler(404)
def error_404(e):
return render_template('404.html'), 404
@app.errorhandler(403)
def error_403(e):
return render_template('403.html'), 403
@app.errorhandler(500)
def error_500(e):
return render_template('500.html'), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True, threaded=True) | import requests
from flask import Flask, render_template, request, url_for, redirect
app = Flask(__name__)
app.config['DEBUG'] = True
ACCESS_TOKEN = '<KEY>'
DEFAULT_CITY = 'Нижний Новгород'
def return_weather(access_key, city):
params = {
'access_key': access_key,
'query': city
}
error_flag = False
weather = {
'city' : " - ",
'temperature' : "0",
'precip' : " - ",
'pressure' : " - ",
'weather_descriptions' : " - ",
'icon' : " - ",
'clothes' : "else"
}
api_result = requests.get('http://api.weatherstack.com/current', params)
api_response = api_result.json()
try:
temperature = int(api_response['current']['temperature'])
clothes = "else"
def get_temp_description(temp):
return {
temp < -15: 'minus15',
-15 <= temp < 0: 'minus0',
0 <= temp < 10: 'plus10',
10 <= temp < 20: 'plus20',
20 <= temp: 'plus50'
}[True]
try:
clothes = get_temp_description(temperature)
except TypeError:
clothes = "else"
weather = {
'city' : api_response['location']['name'],
'temperature' : api_response['current']['temperature'],
'precip' : api_response['current']['precip'],
'pressure' : api_response['current']['pressure'],
'weather_descriptions' : api_response['current']['weather_descriptions'][0],
'icon' : api_response['current']['weather_icons'][0],
'clothes' : clothes,
}
except KeyError:
error_flag = True
return weather, error_flag
@app.route('/', methods=['GET', 'POST'])
def index():
weather_default, error_flag_default = return_weather(ACCESS_TOKEN, DEFAULT_CITY)
if request.method == 'POST':
city = request.form.get('city')
print(city)
weather, error_flag = return_weather(ACCESS_TOKEN, city)
print(weather)
if error_flag == True:
return render_template('error.html')
else:
return render_template('other_city.html', weather=weather)
if error_flag_default == True:
return render_template('error_on_default.html')
return render_template('index.html', weather=weather_default)
@app.route('/faq', methods=['GET'])
def faq():
return render_template('faq.html')
@app.errorhandler(404)
def error_404(e):
return render_template('404.html'), 404
@app.errorhandler(403)
def error_403(e):
return render_template('403.html'), 403
@app.errorhandler(500)
def error_500(e):
return render_template('500.html'), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True, threaded=True) | none | 1 | 2.953081 | 3 | |
rsvd.py | minohara/MTRA | 0 | 6613549 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
List Reserved Seat in MTRA database
by <NAME>. Takushoku Univ. (2021)
"""
import MySQLdb
conn = MySQLdb.connect(
user='mtra',
passwd='<PASSWORD>',
host='localhost',
db='mtra',
charset='utf8mb4')
try:
cursor=conn.cursor()
cursor.execute("select * from screen;")
print("ID Name")
scrn_name = []
for scrn in cursor.fetchall():
scrn_name.append(scrn[1])
print("%2d. %s" % scrn)
scrn_id = int(input("Input a screen ID: "))
cursor.close()
cursor=conn.cursor()
cursor.execute("""
select screen_id,`row`,`column`,count(*)
from reservation join seat on reservation.seat_id = seat.id
where screen_id=%d group by seat_id
order by count(*);
""" % scrn_id)
for seat in cursor.fetchall():
print("\"%s\"\t%s%d\t%6d" % (scrn_name[seat[0]], seat[1], seat[2], seat[3]))
cursor.close()
except MySQLdb.Error as e:
print("MySQLdb.Error: ", e)
conn.close()
| # -*- coding: utf-8 -*-
"""
List Reserved Seat in MTRA database
by <NAME>. Takushoku Univ. (2021)
"""
import MySQLdb
conn = MySQLdb.connect(
user='mtra',
passwd='<PASSWORD>',
host='localhost',
db='mtra',
charset='utf8mb4')
try:
cursor=conn.cursor()
cursor.execute("select * from screen;")
print("ID Name")
scrn_name = []
for scrn in cursor.fetchall():
scrn_name.append(scrn[1])
print("%2d. %s" % scrn)
scrn_id = int(input("Input a screen ID: "))
cursor.close()
cursor=conn.cursor()
cursor.execute("""
select screen_id,`row`,`column`,count(*)
from reservation join seat on reservation.seat_id = seat.id
where screen_id=%d group by seat_id
order by count(*);
""" % scrn_id)
for seat in cursor.fetchall():
print("\"%s\"\t%s%d\t%6d" % (scrn_name[seat[0]], seat[1], seat[2], seat[3]))
cursor.close()
except MySQLdb.Error as e:
print("MySQLdb.Error: ", e)
conn.close() | en | 0.834344 | # -*- coding: utf-8 -*- List Reserved Seat in MTRA database by <NAME>. Takushoku Univ. (2021) select screen_id,`row`,`column`,count(*) from reservation join seat on reservation.seat_id = seat.id where screen_id=%d group by seat_id order by count(*); | 3.592488 | 4 |
image_comparison/imagecompare.py | w13b3/do_not_use | 0 | 6613550 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# imagecompare.py
import numpy as np
from concurrent import futures
from functools import partial
def gaussian2d(shape: tuple = (5,), sigma: tuple = (1.5,)) -> np.ndarray:
"""
create a gaussian 2d array
shape and sigma tuples with diferent values can create an asymmetric gauss array
"""
size_x, size_y = (shape[0], shape[0]) if len(shape) == 1 else shape[:2]
sigma_x, sigma_y = (sigma[0], sigma[0]) if len(sigma) == 1 else sigma[:2]
# faster than np.meshgrid
x = np.arange(0, size_x, dtype=float)
y = np.arange(0, size_y, dtype=float)[:, np.newaxis]
x = np.subtract(x, (size_x // 2))
y = np.subtract(y, (size_y // 2))
sigma_x_sq = sigma_x ** 2
sigma_y_sq = sigma_y ** 2
exp_part = x ** 2 / (2 * sigma_x_sq) + y ** 2 / (2 * sigma_y_sq)
return 1 / (2 * np.pi * sigma_x * sigma_y) * np.exp(-exp_part)
def convolve_array(arr: np.ndarray, conv_filter: np.ndarray) -> np.ndarray:
"""
Convolves array with conv_filter, over all channels
acknowledgement:
https://songhuiming.github.io/pages/2017/04/16/convolve-correlate-and-image-process-in-numpy/
"""
if len(arr.shape) <= 2: # no `depth` and probably 2d array
return convolve2d(arr, conv_filter)
# function is faster with concurent.futures and functools.partial
partial_convolve2d = partial(convolve2d, conv_filter=conv_filter)
# with futures.ProcessPoolExecutor() as ex: # slow (?)
with futures.ThreadPoolExecutor() as ex: # fast
arr_stack = ex.map(partial_convolve2d, [arr[:, :, dim] for dim in range(arr.ndim)])
# arr_stack = [ # slow comprehension list
# convolve2d(arr[:, :, dim], conv_filter)
# for dim in range(arr.ndim)
# ]
return np.stack(list(arr_stack), axis=2) # -> np.ndarray
def convolve2d(arr: np.ndarray, conv_filter: np.ndarray) -> np.ndarray:
"""
convole2d function
acknowledgement:
https://stackoverflow.com/users/7567938/allosteric
"""
if len(arr.shape) > 2:
raise ValueError("Please input the arr with 2 dimensions")
view_shape = tuple(np.subtract(arr.shape, conv_filter.shape) + 1) + conv_filter.shape
as_strided = np.lib.stride_tricks.as_strided
sub_matrices = as_strided(arr, shape = view_shape, strides = arr.strides * 2)
return np.einsum('ij,ijkl->kl', conv_filter, sub_matrices.transpose()).transpose()
def structural_similarity(array1: np.ndarray, array2: np.ndarray, filter_size: int = 11, filter_sigma: float = 1.5,
k1: float = 0.01, k2: float = 0.03, max_val: int = 255) -> (np.float64, np.ndarray):
if array1.shape != array2.shape:
raise ValueError('Input arrays must have the same shape')
array1 = array1.astype(np.float64)
array2 = array2.astype(np.float64)
height, width = array1.shape[:2]
if filter_size: # is 1 or more
# filter size can't be larger than height or width of arrays.
size = min(filter_size, height, width)
# scale down sigma if a smaller filter size is used.
sigma = size * filter_sigma / filter_size if filter_size else 0
window = gaussian2d(shape=(size,), sigma=(sigma,))
# compute weighted means
mu1 = convolve_array(array1, window)
mu2 = convolve_array(array2, window)
# compute weighted covariances
sigma_11 = convolve_array(np.multiply(array1, array1), window)
sigma_22 = convolve_array(np.multiply(array2, array2), window)
sigma_12 = convolve_array(np.multiply(array1, array2), window)
else: # Empty blur kernel so no need to convolve.
mu1, mu2 = array1, array2
sigma_11 = np.multiply(array1, array1)
sigma_22 = np.multiply(array2, array2)
sigma_12 = np.multiply(array1, array2)
# compute weighted variances
mu_11 = np.multiply(mu1, mu1)
mu_22 = np.multiply(mu2, mu2)
mu_12 = np.multiply(mu1, mu2)
sigma_11 = np.subtract(sigma_11, mu_11)
sigma_22 = np.subtract(sigma_22, mu_22)
sigma_12 = np.subtract(sigma_12, mu_12)
# constants to avoid numerical instabilities close to zero
c1 = (k1 * max_val) ** 2
c2 = (k2 * max_val) ** 2
v1 = 2.0 * sigma_12 + c2
v2 = sigma_11 + sigma_22 + c2
# Numerator of SSIM
num_ssim = (2 * mu_12 + c1) * v1 # -> np.ndarray
# Denominator of SSIM
den_ssim = (mu_11 + mu_22 + c1) * v2 # -> np.ndarray
# SSIM (contrast sensitivity)
ssim = num_ssim / den_ssim # -> np.ndarray
# MeanSSIM
mssim = np.mean(ssim) # -> np.float64
return mssim, ssim # -> (np.float64, np.ndarray)
if __name__ == '__main__':
print("start\n")
import logging
logging.basicConfig(level=logging.DEBUG)
# logging.getLogger("logging").setLevel(logging.DEBUG)
logging.captureWarnings(True)
import cv2
import timeit
image1 = "index1.jpeg"
image2 = "index2.jpeg"
image1 = cv2.imread(image1) # to array
image2 = cv2.imread(image2)
# from skimage.metrics import structural_similarity as ssim
# print(ssim(image1, image1, multichannel=True)) # 1.0
# print(ssim(image1, image2, multichannel=True)) # 0.2996981914517261
# from __main__ import structural_similarity
# print(structural_similarity(image1, image1)[0]) # 1.0
# print(structural_similarity(image1, image2)[0]) # 0.30561782186046865
loops = 10
result = timeit.timeit(
stmt="structural_similarity(image1, image2); print('running')",
setup="from __main__ import structural_similarity",
globals=globals(),
number=loops
)
print(f"total time: {result}sec, per loop: {result / loops}")
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# imagecompare.py
import numpy as np
from concurrent import futures
from functools import partial
def gaussian2d(shape: tuple = (5,), sigma: tuple = (1.5,)) -> np.ndarray:
"""
create a gaussian 2d array
shape and sigma tuples with diferent values can create an asymmetric gauss array
"""
size_x, size_y = (shape[0], shape[0]) if len(shape) == 1 else shape[:2]
sigma_x, sigma_y = (sigma[0], sigma[0]) if len(sigma) == 1 else sigma[:2]
# faster than np.meshgrid
x = np.arange(0, size_x, dtype=float)
y = np.arange(0, size_y, dtype=float)[:, np.newaxis]
x = np.subtract(x, (size_x // 2))
y = np.subtract(y, (size_y // 2))
sigma_x_sq = sigma_x ** 2
sigma_y_sq = sigma_y ** 2
exp_part = x ** 2 / (2 * sigma_x_sq) + y ** 2 / (2 * sigma_y_sq)
return 1 / (2 * np.pi * sigma_x * sigma_y) * np.exp(-exp_part)
def convolve_array(arr: np.ndarray, conv_filter: np.ndarray) -> np.ndarray:
"""
Convolves array with conv_filter, over all channels
acknowledgement:
https://songhuiming.github.io/pages/2017/04/16/convolve-correlate-and-image-process-in-numpy/
"""
if len(arr.shape) <= 2: # no `depth` and probably 2d array
return convolve2d(arr, conv_filter)
# function is faster with concurent.futures and functools.partial
partial_convolve2d = partial(convolve2d, conv_filter=conv_filter)
# with futures.ProcessPoolExecutor() as ex: # slow (?)
with futures.ThreadPoolExecutor() as ex: # fast
arr_stack = ex.map(partial_convolve2d, [arr[:, :, dim] for dim in range(arr.ndim)])
# arr_stack = [ # slow comprehension list
# convolve2d(arr[:, :, dim], conv_filter)
# for dim in range(arr.ndim)
# ]
return np.stack(list(arr_stack), axis=2) # -> np.ndarray
def convolve2d(arr: np.ndarray, conv_filter: np.ndarray) -> np.ndarray:
"""
convole2d function
acknowledgement:
https://stackoverflow.com/users/7567938/allosteric
"""
if len(arr.shape) > 2:
raise ValueError("Please input the arr with 2 dimensions")
view_shape = tuple(np.subtract(arr.shape, conv_filter.shape) + 1) + conv_filter.shape
as_strided = np.lib.stride_tricks.as_strided
sub_matrices = as_strided(arr, shape = view_shape, strides = arr.strides * 2)
return np.einsum('ij,ijkl->kl', conv_filter, sub_matrices.transpose()).transpose()
def structural_similarity(array1: np.ndarray, array2: np.ndarray, filter_size: int = 11, filter_sigma: float = 1.5,
k1: float = 0.01, k2: float = 0.03, max_val: int = 255) -> (np.float64, np.ndarray):
if array1.shape != array2.shape:
raise ValueError('Input arrays must have the same shape')
array1 = array1.astype(np.float64)
array2 = array2.astype(np.float64)
height, width = array1.shape[:2]
if filter_size: # is 1 or more
# filter size can't be larger than height or width of arrays.
size = min(filter_size, height, width)
# scale down sigma if a smaller filter size is used.
sigma = size * filter_sigma / filter_size if filter_size else 0
window = gaussian2d(shape=(size,), sigma=(sigma,))
# compute weighted means
mu1 = convolve_array(array1, window)
mu2 = convolve_array(array2, window)
# compute weighted covariances
sigma_11 = convolve_array(np.multiply(array1, array1), window)
sigma_22 = convolve_array(np.multiply(array2, array2), window)
sigma_12 = convolve_array(np.multiply(array1, array2), window)
else: # Empty blur kernel so no need to convolve.
mu1, mu2 = array1, array2
sigma_11 = np.multiply(array1, array1)
sigma_22 = np.multiply(array2, array2)
sigma_12 = np.multiply(array1, array2)
# compute weighted variances
mu_11 = np.multiply(mu1, mu1)
mu_22 = np.multiply(mu2, mu2)
mu_12 = np.multiply(mu1, mu2)
sigma_11 = np.subtract(sigma_11, mu_11)
sigma_22 = np.subtract(sigma_22, mu_22)
sigma_12 = np.subtract(sigma_12, mu_12)
# constants to avoid numerical instabilities close to zero
c1 = (k1 * max_val) ** 2
c2 = (k2 * max_val) ** 2
v1 = 2.0 * sigma_12 + c2
v2 = sigma_11 + sigma_22 + c2
# Numerator of SSIM
num_ssim = (2 * mu_12 + c1) * v1 # -> np.ndarray
# Denominator of SSIM
den_ssim = (mu_11 + mu_22 + c1) * v2 # -> np.ndarray
# SSIM (contrast sensitivity)
ssim = num_ssim / den_ssim # -> np.ndarray
# MeanSSIM
mssim = np.mean(ssim) # -> np.float64
return mssim, ssim # -> (np.float64, np.ndarray)
if __name__ == '__main__':
print("start\n")
import logging
logging.basicConfig(level=logging.DEBUG)
# logging.getLogger("logging").setLevel(logging.DEBUG)
logging.captureWarnings(True)
import cv2
import timeit
image1 = "index1.jpeg"
image2 = "index2.jpeg"
image1 = cv2.imread(image1) # to array
image2 = cv2.imread(image2)
# from skimage.metrics import structural_similarity as ssim
# print(ssim(image1, image1, multichannel=True)) # 1.0
# print(ssim(image1, image2, multichannel=True)) # 0.2996981914517261
# from __main__ import structural_similarity
# print(structural_similarity(image1, image1)[0]) # 1.0
# print(structural_similarity(image1, image2)[0]) # 0.30561782186046865
loops = 10
result = timeit.timeit(
stmt="structural_similarity(image1, image2); print('running')",
setup="from __main__ import structural_similarity",
globals=globals(),
number=loops
)
print(f"total time: {result}sec, per loop: {result / loops}")
| en | 0.616175 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # imagecompare.py create a gaussian 2d array shape and sigma tuples with diferent values can create an asymmetric gauss array # faster than np.meshgrid Convolves array with conv_filter, over all channels acknowledgement: https://songhuiming.github.io/pages/2017/04/16/convolve-correlate-and-image-process-in-numpy/ # no `depth` and probably 2d array # function is faster with concurent.futures and functools.partial # with futures.ProcessPoolExecutor() as ex: # slow (?) # fast # arr_stack = [ # slow comprehension list # convolve2d(arr[:, :, dim], conv_filter) # for dim in range(arr.ndim) # ] # -> np.ndarray convole2d function acknowledgement: https://stackoverflow.com/users/7567938/allosteric # is 1 or more # filter size can't be larger than height or width of arrays. # scale down sigma if a smaller filter size is used. # compute weighted means # compute weighted covariances # Empty blur kernel so no need to convolve. # compute weighted variances # constants to avoid numerical instabilities close to zero # Numerator of SSIM # -> np.ndarray # Denominator of SSIM # -> np.ndarray # SSIM (contrast sensitivity) # -> np.ndarray # MeanSSIM # -> np.float64 # -> (np.float64, np.ndarray) # logging.getLogger("logging").setLevel(logging.DEBUG) # to array # from skimage.metrics import structural_similarity as ssim # print(ssim(image1, image1, multichannel=True)) # 1.0 # print(ssim(image1, image2, multichannel=True)) # 0.2996981914517261 # from __main__ import structural_similarity # print(structural_similarity(image1, image1)[0]) # 1.0 # print(structural_similarity(image1, image2)[0]) # 0.30561782186046865 | 2.839001 | 3 |