text string | size int64 | token_count int64 |
|---|---|---|
from databases import Database
from sqlalchemy import (
Column,
ForeignKey,
Integer,
MetaData,
String,
create_engine,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy_to_ormar import sqlalchemy_to_ormar
Base = declarative_base()
Database_URL = "sqlite:///test.db"
engine = create_engine(Database_URL)
database = Database(Database_URL)
metadata = MetaData(engine)
class User(Base):
__tablename__ = "user"
USER_ID = Column(Integer(), primary_key=True)
FIRST_NAME = Column(String(255))
LAST_NAME = Column(String(255))
USERNAME = Column(String(255), index=True)
PASSWORD = Column(String(40))
EMAIL = Column(String(255))
PARENT_ID = Column(ForeignKey("user.USER_ID"), index=True) # type: ignore
parent = relationship("User", remote_side=[USER_ID])
def test_self_relation():
OrmarUser = sqlalchemy_to_ormar(User, database=database, metadata=metadata)
assert OrmarUser.extract_related_names() == {"parent", "users"}
| 1,050 | 356 |
"""Tests the `/api/bot/snake_` endpoints."""
from tests import SiteTest, app
class TestSnakeFactsAPI(SiteTest):
"""GET method - get snake fact"""
def test_snake_facts(self):
response = self.client.get(
'/bot/snake_facts',
app.config['API_SUBDOMAIN'],
headers=app.config['TEST_HEADER']
)
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.json), str)
class TestSnakeIdiomAPI(SiteTest):
"""GET method - get snake idiom"""
def test_snake_idiom(self):
response = self.client.get(
'/bot/snake_idioms',
app.config['API_SUBDOMAIN'],
headers=app.config['TEST_HEADER']
)
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.json), str)
class TestSnakeQuizAPI(SiteTest):
"""GET method - get snake quiz"""
def test_snake_quiz(self):
response = self.client.get(
'/bot/snake_quiz',
app.config['API_SUBDOMAIN'],
headers=app.config['TEST_HEADER']
)
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.json), dict)
class TestSnakeNameAPI(SiteTest):
"""GET method - get a single snake name, or all of them."""
def test_snake_names(self):
response = self.client.get(
'/bot/snake_names',
app.config['API_SUBDOMAIN'],
headers=app.config['TEST_HEADER']
)
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.json), dict)
def test_snake_names_all(self):
response = self.client.get(
'/bot/snake_names?get_all=True',
app.config['API_SUBDOMAIN'],
headers=app.config['TEST_HEADER']
)
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.json), list)
| 1,927 | 623 |
from distutils.core import setup
setup(name = 'yamly',
version = '0.1',
py_modules = ['yamly',],
description = "pyyaml wrapper",
author = "Pedro Rodrigues",
author_email = "medecau@gmail.com",
license = 'MIT',
url = "http://medecau.github.com/pyyaml/",
install_requires = ["pyyaml"],
)
| 341 | 118 |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 29 09:35:35 2018
@author: Branson
"""
import BATrader as ba
from BATrader.market.MarketReader import convert
import pandas as pd
from functools import partial
class Filters(object):
"""
FIlter
------------------------------------------
Filter is used when I can told you immediately when you ask for specific
group of stock. This Filter operate symlist in postgresql and deliver
dataframe on row.
Usage:
self.df = ba.fr.symlist
self.df1 = filter_by_mkt_value((0,'1B'))
self.df2 = filter_by_entry_cost('10K')
self.df3 = multiple_condition_filter(filter_by_mkt_value(('300M',0)),
filter_by_entry_cost('10K'),
filter_by_over_ma_60())
d = self.df.query("cap_in_num > 500000000 and (board == 'main' or board == 'gem')") # using query
# Return active stock only
ba.fr.symlist = filter_by_status(1)
NewStyle:
cond1 = stock['毛利率(%)'] > 30
cond2 = stock['營業利益率(%)'] > 30
stock[cond1 & cond2]
"""
def __init__(self):
self.df = ba.fr.init_symlist_once()
def filter_by_symlist(self, symlist: list) -> pd.DataFrame:
"""
filter by symlist, 931 µs
"""
return self.df[self.df.sym.isin(symlist)]
def filter_by_symlist2(self, symlist: list) -> pd.DataFrame:
"""
filter by symlist, slower.....4.22 ms
"""
return self.df.set_index('sym').loc[symlist].reset_index()
def filter_by_chi_name_contain(self, chi_word: str) -> pd.DataFrame:
"""
filter by chinese name contains
"""
return self.df[self.df.chi_name.str.contains(chi_word)]
def filter_by_status(self, status: int) -> pd.DataFrame:
"""
filter by stock trading status.
to simplify,
1 is 'Active', 0 is 'Suspended'
"""
dic = {1: 'Active', 0: 'Suspended'}
return self.df[self.df.status == dic[status]]
def filter_by_over_ma(self, period) -> pd.DataFrame:
return self.df[self.df.pClose > self.df['ma%s' % period]]
def filter_by_over_ma_60(self) -> pd.DataFrame:
return self.filter_by_over_ma(60)
def filter_by_over_ma_200(self) -> pd.DataFrame:
return self.filter_by_over_ma(200)
def filter_by_entry_cost(self, entry_cost) -> pd.DataFrame:
"""
filter by 每手入場費, coz always below entry cost we need so didn't use range
entry_cost = boardlot x p.close
int or str : directly
tuple : high-land, low-land
"""
self.df = ba.fr.symlist
self.df['entry_cost'] = self.df.boardlot * self.df.pClose
self.df = self.df[self.df.entry_cost.notnull()]
if type(entry_cost) == tuple:
return self.df[
(self.df.entry_cost > convert(entry_cost[0])) & (self.df.entry_cost < convert(entry_cost[1]))]
else:
return self.df[self.df.entry_cost < convert(entry_cost)]
def filter_by_board(self, board: str) -> pd.DataFrame:
"""
filter by input board
"""
if type(board) != str:
raise TypeError("main?gem?etf?")
return self.df[self.df.board == board]
def filter_by_mkt_value(self, mkt_value: tuple) -> pd.DataFrame:
"""
filter by market value in range
must be a tuple : (0, '500M')
30億以上: -> ('300M', 0)
"""
if type(mkt_value) != tuple:
raise TypeError("tuple : high-land, low-land")
if mkt_value[1] == 0:
return self.df[self.df.cap_in_num > convert(mkt_value[0])]
else:
return self.df[(self.df.cap_in_num > convert(mkt_value[0])) & (self.df.cap_in_num < convert(mkt_value[1]))]
def filter_by_amount(self, amount: float) -> pd.DataFrame:
"""
filter by amount (Turnover = P x Q)
must be a flow : (0.0)
"""
return self.df[(self.df.pAmount > amount)]
def filter_by_trade_price(self, bigger_or_smaller_sign, trade_price) -> pd.DataFrame:
"""
filter by Traded price(Last or Close)
"""
if type(trade_price) != float and type(trade_price) != int:
raise ValueError("int or float only : 0.0 or 0")
if bigger_or_smaller_sign == '>':
return self.df[(self.df.pClose > trade_price)]
if bigger_or_smaller_sign == '<':
return self.df[(self.df.pClose < trade_price)]
def filter_ot_list(self) -> pd.DataFrame:
return self.df[self.df.ot_and_rubbish == '1']
# ------ aastocks ------
def filter_by_aa_sector(self, sector):
"""
Better update the aa sector regularly
"""
return self.df.query("aa_sector == '{}'".format(sector))
def filter_by_aa_business(self, business):
"""
Better update the aa sector regularly
"""
return self.df.query("aa_business == '{}'".format(business))
# ------ Other ------
@staticmethod
def merge_everything(df1, df2) -> pd.DataFrame:
cols_to_use = df2.columns.difference(df1.columns)
df_ = pd.merge(df1.set_index('sym'), df2.set_index('sym')[cols_to_use], left_index=True, right_index=True,
how='inner').reset_index()
return df_
def multiple_condition_filter(self, *df_hub) -> pd.DataFrame:
"""
pass in multiple condition and return,
this looks complex but it's very convenient.
self.df3 = multiple_condition_filter(filter_by_mkt_value(('300M',0)),
filter_by_entry_cost('10K'),
filter_by_over_ma_60())
"""
# self.df = self.filter_by_status(1)
for d in df_hub:
self.df = self.merge_everything(self.df, d)
return self.df
@staticmethod
def get_the_symlist(df: pd.DataFrame) -> list:
"""
get the symlist by dataframe
"""
return df.sym.tolist()
if __name__ == '__main__':
flt = Filters()
| 6,225 | 2,101 |
import graphene
from core.schema.mutations import StoreEvent
class CoreMutation(graphene.ObjectType):
store_event = StoreEvent.Field()
| 142 | 43 |
from flask import Flask, jsonify
app = Flask('customer-service')
@app.route('/')
def index():
return jsonify({
'message': 'get all customers',
})
if __name__ == '__main__':
app.run(port=8002)
| 205 | 77 |
from tkinter import *
from PIL import ImageTk, Image
root = Tk()
root.title('Batman')
root.iconbitmap('images/batman.ico')
root.geometry('400x400')
def show():
myLabel = Label(root, text=var.get())
myLabel.pack()
var = StringVar()
check = Checkbutton(root, text='Check this box', variable=var, onvalue='On', offvalue='Off')
check.deselect()
check.pack()
myButton = Button(root, text="Show Selection", command=show)
myButton.pack()
root.mainloop()
| 461 | 166 |
import tensorflow as tf
import sys
sys.path.append('../../')
from model.model_utils import bfloat16_getter, get_shape_list, gelu, dropout, layer_norm, sequence_xe_loss, \
construct_host_call, get_assignment_map_from_checkpoint, _sequence_xe_loss_noreduce, stack_jagged, \
get_ltr_attention_mask
from data.thor_constants import THOR_OBJECT_TYPE_TO_IND, THOR_AFFORDANCES, THOR_ACTION_TYPE_TO_IND, \
load_instance_attribute_weights
from model.transformer import attention_layer, residual_mlp_layer, _argmax_sample, residual_mlp
import math
from model import optimization
from model.neat_config import NeatConfig
from model.interact.dataloader import names_and_arities
def embed_with_embedding_table(x, embedding_table, flatten=False):
"""
Embed an int tensor with the embedding table. This ignores -1 things
:param x:
:param embedding_table:
:param flatten: Keep it flat versus reshape to the original like size
:return:
"""
x_shape = get_shape_list(x)
vocab_size, embedding_dim = get_shape_list(embedding_table, 2)
# Need to do something weird bc tf.float32_ref exists
one_hot_x = tf.one_hot(tf.reshape(x, [-1]),
dtype=embedding_table.dtype if embedding_table.dtype in (
tf.float32, tf.bfloat16) else tf.float32,
depth=vocab_size)
output = tf.matmul(one_hot_x, embedding_table)
if not flatten:
output = tf.reshape(output, x_shape + [embedding_dim])
return output
def embed_2d_with_embedding_table(x, embedding_table, flatten=False):
"""
:param x: [..., num_affordances]
:param embedding_table_stacked: [num_affordances, vocab_size, hidden_size]
:return:
"""
x_shape = get_shape_list(x)
num_affordances, vocab_size, hidden_size = get_shape_list(embedding_table, 3)
# assert x_shape[-1] == num_affordances
x_oh = tf.one_hot(tf.reshape(x, [-1, num_affordances]), depth=vocab_size, dtype=tf.float32)
x_embed = tf.einsum('bav,avh->bah', x_oh, embedding_table)
if not flatten:
x_embed = tf.reshape(x_embed, x_shape + [hidden_size])
return x_embed
def summarize_transformer(object_embs, gt_affordances_embed, affordance_name_embed, num_layers=3,
dropout_prob=0.1, initializer_range=0.02):
"""
Use a transformer to summarize the delta between the GT affordances and the prototype that we'd expect from the object
:param object_embs: [batch_size, h]
:param gt_affordances_embed: [batch_size, num_affordances, h]
:param affordance_name_embed: [num_affordances, h]
:param num_layers:
:param dropout_prob:
:param initializer_range:
:return: [batch_size, h] fixed-size representations for each of the objects!
"""
batch_size, hidden_size = get_shape_list(object_embs, 2)
batch_size2, num_affordances, h2 = get_shape_list(gt_affordances_embed, 3)
num_affordances3, h3 = get_shape_list(affordance_name_embed, 2)
assert hidden_size % 64 == 0
assert hidden_size == h2
assert h2 == h3
# [POOL_IDX, OBJECT_NAME, ... attrs ... ]
seq_length = num_affordances + 1
with tf.variable_scope("summarize_transformer"):
with tf.variable_scope('embeddings'):
# starting_embed = tf.get_variable(
# name='pooler',
# shape=[hidden_size],
# initializer=tf.truncated_normal_initializer(stddev=initializer_range),
# )
ctx = layer_norm(tf.concat([
# tf.tile(starting_embed[None, None], [batch_size, 1, 1]),
object_embs[:, None],
gt_affordances_embed + affordance_name_embed[None],
], 1), name='embed_norm')
hidden_state = tf.reshape(ctx, [batch_size * seq_length, -1])
# No masks bc all embeddings are used
mask = tf.ones((seq_length, seq_length), dtype=tf.float32)
for layer_idx in range(num_layers):
with tf.variable_scope(f'layer{layer_idx:02d}'):
# [batch_size * seq_length, hidden_size]
attention_output, _ = attention_layer(
hidden_state,
mask,
batch_size=batch_size,
seq_length=seq_length,
size_per_head=64,
num_attention_heads=hidden_size // 64,
initializer_range=initializer_range,
hidden_dropout_prob=dropout_prob,
attention_probs_dropout_prob=dropout_prob,
)
hidden_state = residual_mlp_layer(hidden_state + attention_output,
intermediate_size=hidden_size * 4,
hidden_dropout_prob=dropout_prob)
h0 = tf.reshape(hidden_state, [batch_size, seq_length, -1])[:, 0]
return h0
def expand_transformer(object_full_state, gt_affordances_embed, affordance_ctx_name_embed,
affordance_trg_name_embed, num_layers=3, dropout_prob=0.1, initializer_range=0.02,
random_perms=True, reuse=False, layer_cache=None):
"""
Use a transformer to predict what the actual affordances of the object are, from the state
# The order will be
(object hidden state)
(nullctx, nullctxname, pred0name) -> pred0
(gt0, gt0name, pred1name) -> pred1
...
(gt{n-1}, gt{n-1}name, predNname) -> predN
:param object_full_state: [batch_size, h]
:param gt_affordances_embed: [batch_size, num_affordances, h]
:param affordance_ctx_name_embed: [num_affordances, h]
:param affordance_trg_name_embed: [num_affordances, h]
:param num_layers:
:param random_perms: Randomly permute
:return: hidden size of [batch_size, num_affordances, h]
"""
batch_size, hidden_size = get_shape_list(object_full_state, 2)
batch_size2, num_affordances, h2 = get_shape_list(gt_affordances_embed, 3)
num_affordances3, h3 = get_shape_list(affordance_ctx_name_embed, 2)
num_affordances4, h4 = get_shape_list(affordance_trg_name_embed, 2)
assert hidden_size % 64 == 0
assert hidden_size == h2
assert h2 == h3
# [OBJECT_NAME, ... attrs ... ]
seq_length = num_affordances + 1
with tf.variable_scope("expand_transformer", reuse=reuse):
if random_perms:
idxs = tf.argsort(tf.random.normal((batch_size, num_affordances)), 1)
else:
idxs = tf.tile(tf.range(num_affordances, dtype=tf.int32)[None], [batch_size, 1])
with tf.variable_scope('embeddings'):
null_ctx_embed = tf.get_variable(
name='nullctx',
shape=[hidden_size],
initializer=tf.truncated_normal_initializer(stddev=initializer_range),
)
ctx_embeds = tf.concat([
tf.tile(null_ctx_embed[None, None], [batch_size, 1, 1]),
tf.gather(gt_affordances_embed + affordance_ctx_name_embed[None], idxs[:, :-1], batch_dims=1),
], 1)
trg_name_embeds = tf.gather(tf.tile(affordance_trg_name_embed[None], [batch_size, 1, 1]),
idxs, batch_dims=1)
ctx = layer_norm(tf.concat([
object_full_state[:, None],
ctx_embeds + trg_name_embeds,
], 1), name='embed_norm')
# don't forget to wear a mask when you go outside!
if layer_cache is not None:
# Shrink hidden state and mask accordingly
cache_length = get_shape_list(layer_cache, expected_rank=6)[-2]
seq_length = 1
ctx = ctx[:, -seq_length:]
mask = get_ltr_attention_mask(1, 1 + cache_length, dtype=ctx.dtype)
else:
mask = get_ltr_attention_mask(seq_length, seq_length, dtype=ctx.dtype)
hidden_state = tf.reshape(ctx, [batch_size * seq_length, -1])
new_kvs = []
for layer_idx in range(num_layers):
with tf.variable_scope(f'layer{layer_idx:02d}'):
# [batch_size * seq_length, hidden_size]
attention_output, new_kv = attention_layer(
hidden_state,
mask,
batch_size=batch_size,
seq_length=seq_length,
size_per_head=64,
num_attention_heads=hidden_size // 64,
initializer_range=initializer_range,
hidden_dropout_prob=dropout_prob,
attention_probs_dropout_prob=dropout_prob,
do_cache=True,
cache=layer_cache[:, layer_idx] if layer_cache is not None else None,
)
new_kvs.append(new_kv)
hidden_state = residual_mlp_layer(hidden_state + attention_output,
intermediate_size=hidden_size * 4,
hidden_dropout_prob=dropout_prob)
# [batch_size, num_attributes, H]
if layer_cache is None:
hidden_states_per_attr = tf.gather(tf.reshape(hidden_state, [batch_size, seq_length, -1]),
tf.argsort(idxs, 1) + 1, batch_dims=1)
else:
hidden_states_per_attr = hidden_state[:, None]
return hidden_states_per_attr, tf.stack(new_kvs, axis=1)
class StateChangePredictModel(object):
def __init__(self, config: NeatConfig, is_training, object_types):
"""
A model to predict what happens to some objects when you apply an action
:param config:
:param is_training:
:param object_types: [batch_size, num_objects, (pre,post) aka 2]
"""
self.config = config
self.hidden_size = config.model['hidden_size']
self.is_training = is_training
if is_training:
self.dropout_prob = config.model.get('dropout_prob', 0.1)
tf.logging.info("Is training -> dropout={:.3f}".format(self.dropout_prob))
else:
self.dropout_prob = 0.0
self.activation_fn = tf.nn.tanh if config.model.get('activation', 'tanh') == 'tanh' else tf.identity
# First embed everything, some of these are static.
with tf.variable_scope('embeddings'):
# 1. Embed everything
object_embedding_table = tf.get_variable(
name='object_embs',
shape=[len(THOR_OBJECT_TYPE_TO_IND), self.hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)
# Technically we assume as input
# [batch_size, num_objects (2), pre post (2)]
# However those last two dimensions were flattened into [batch_size, 4]
# Now we're flattening into [batch_size * 4]
self.batch_size, self.num_objects = get_shape_list(object_types, 2)
assert self.num_objects == 4
self.object_embed = embed_with_embedding_table(object_types, object_embedding_table,
flatten=True)
affordance_embed_table = []
for i, (affordance_name, a) in enumerate(names_and_arities):
if a == len(THOR_OBJECT_TYPE_TO_IND):
tf.logging.info(f"For {affordance_name}: i'm copying the object embedding table")
affordance_embed_table.append(object_embedding_table)
else:
affordance_embed_table.append(tf.get_variable(
name=f'{affordance_name}',
shape=[max(a, 2), self.hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
))
# [num_affordances, vocab_size, hidden_size]
self.affordance_embed_table, self.affordance_embed_table_mask = stack_jagged(affordance_embed_table, 0)
self.num_affordances, self.affordance_vocab_size, _hsz = get_shape_list(self.affordance_embed_table, 3)
tf.logging.info(f"Affordance embed table: ({self.num_affordances},{self.affordance_vocab_size},{_hsz})")
self.affordance_emb_trg = tf.get_variable(
name='affordance_embs_trg',
shape=[len(names_and_arities), self.hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)
self.affordance_emb_ctx = tf.get_variable(
name='affordance_embs_ctx',
shape=[len(names_and_arities), self.hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)
def encode_affordances(self, object_states):
"""
:param object_states: [batch_size, num_objects, num_affordances]
:return: encoded hidden size. [batch_size, num_objects, hidden_size]
"""
#######################################################
# 2. Encoder side
with tf.variable_scope('encode_affordances'):
# [batch_size * num_objects, hidden_size]
gt_affordances_embed_encoder = embed_2d_with_embedding_table(object_states,
embedding_table=self.affordance_embed_table,
flatten=True)
gt_affordances_embed_encoder = dropout(gt_affordances_embed_encoder, dropout_prob=self.dropout_prob)
encoded_h = summarize_transformer(self.object_embed, gt_affordances_embed_encoder,
self.affordance_emb_ctx,
dropout_prob=self.dropout_prob)
encoded_h = tf.layers.dense(encoded_h, self.hidden_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name='final_proj_without_ln')
encoded_h = tf.reshape(encoded_h, [self.batch_size, self.num_objects, self.hidden_size])
return self.activation_fn(encoded_h)
def encode_action(self, action_id, action_args):
"""
Encode the action using a representation of IT as well as a representation of the embedded objects
:param action_id: [batch_size]
:param action_args: [batch_size, 2]
:return: action embed [batch_size, hidden_size]
"""
batch_size, two_ = get_shape_list(action_args, 2)
assert two_ == 2
assert batch_size == self.batch_size
# Pre and post are the same so just extract pre, doesnt matter
object_embeds = tf.reshape(self.object_embed, [self.batch_size, 2, 2, self.hidden_size])[:, :, 0]
with tf.variable_scope('encode_action'):
# Encode action
action_embedding_table = tf.get_variable(
name='action_embs',
shape=[len(THOR_ACTION_TYPE_TO_IND), self.hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)
self.action_embedding_table = action_embedding_table
action_embed = embed_with_embedding_table(action_id, action_embedding_table)
# I originally got action args from
# action_args = []
# for k in ['object_name', 'receptacle_name']:
# ok = item['action'][k]
#
# if ok is None:
# action_args.append(0)
#
# elif ok == item['pre'][0]['index']:
# action_args.append(1)
# elif ok == item['pre'][1]['index']:
# action_args.append(2)
# else:
# import ipdb
# ipdb.set_trace()
nullctx = tf.tile(tf.get_variable(
name='nullobj',
shape=[self.hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)[None, None], [self.batch_size, 1, 1])
encoded_pre_and_zero = tf.concat([nullctx, object_embeds], 1)
object_repr_and_receptacle_repr = tf.gather(encoded_pre_and_zero, action_args, batch_dims=1)
object_repr_and_receptacle_repr = tf.reshape(object_repr_and_receptacle_repr,
[self.batch_size, 2 * self.hidden_size])
action_embed0 = tf.concat([action_embed, object_repr_and_receptacle_repr], 1)
return self.activation_fn(residual_mlp(action_embed0,
hidden_size=self.hidden_size,
final_size=self.hidden_size,
num_layers=2,
hidden_dropout_prob=self.dropout_prob
))
def apply_action_mlp(self, action_embed, encoded_h_pre):
"""
:param action_embed: [batch_size, h]
:param encoded_h_pre: [batch_size, num_objs (probably 2), h] -- one per thing we will predict.
We can model this JOINTLY or applying the model to EACH THING.
:return:
"""
batch_size, num_objs_to_apply, hidden_size = get_shape_list(encoded_h_pre, 3)
assert batch_size == self.batch_size
assert hidden_size == self.hidden_size
if self.config.model.get('fuse_action', True):
tf.logging.info("Apply action MLP -> Fuse action!")
# 3. Change the hidden state
with tf.variable_scope('apply_action_mlp'):
mlp_h = tf.concat([action_embed, tf.reshape(encoded_h_pre, [self.batch_size, -1])], 1)
encoded_h_post_pred = residual_mlp(mlp_h,
initial_proj=False,
num_layers=2,
hidden_size=3*self.hidden_size,
final_size=num_objs_to_apply * self.hidden_size,
hidden_dropout_prob=self.dropout_prob)
encoded_h_post_pred = tf.reshape(encoded_h_post_pred, [self.batch_size, num_objs_to_apply, self.hidden_size])
return self.activation_fn(encoded_h_post_pred)
else:
# 3. Change the hidden state
with tf.variable_scope('apply_action_mlp'):
mlp_h = tf.concat([tf.tile(action_embed[:, None], [1, num_objs_to_apply, 1]), encoded_h_pre], 2)
mlp_h_2d = tf.reshape(mlp_h, [self.batch_size * num_objs_to_apply, self.hidden_size + hidden_size])
encoded_h_post_pred = residual_mlp(mlp_h_2d, hidden_size=self.hidden_size, final_size=self.hidden_size,
hidden_dropout_prob=self.dropout_prob)
encoded_h_post_pred = tf.reshape(encoded_h_post_pred, [self.batch_size, num_objs_to_apply, self.hidden_size])
return self.activation_fn(encoded_h_post_pred)
def decode_affordances_when_gt_is_provided(self, all_encoded_h, gt_affordances_decoded):
"""
:param all_encoded_h: [batch_size, num_objs, hidden_size]
:param gt_affordances_decoded: [batch_size, num_objs, num_afforadnces]
:return: [batch_size, num_objs, num_affordances, vocab_size_for_affordances]
"""
# 4. Predict the states!
with tf.variable_scope('decoder'):
batch_size, num_duplicates_x_num_objs, hidden_size = get_shape_list(all_encoded_h, 3)
assert batch_size == self.batch_size
# assert num_duplicates_x_num_objs == 6
assert hidden_size == self.hidden_size
batch_size_, num_duplicates_x_num_objs_, num_affordances = get_shape_list(gt_affordances_decoded, 3)
assert num_duplicates_x_num_objs_ == num_duplicates_x_num_objs
assert batch_size_ == self.batch_size
all_encoded_h = dropout(tf.reshape(all_encoded_h, [-1, self.hidden_size]), dropout_prob=self.dropout_prob)
# Get GT affordances -- slightly different because we duplicated the postconditions for 2 losses
gt_affordances_decoder_embed = embed_2d_with_embedding_table(gt_affordances_decoded,
self.affordance_embed_table,
flatten=True)
# [batch_size, num_affordances, hidden_size]
hidden_states_per_attr, _ = expand_transformer(
object_full_state=all_encoded_h,
gt_affordances_embed=gt_affordances_decoder_embed,
affordance_ctx_name_embed=self.affordance_emb_ctx,
affordance_trg_name_embed=self.affordance_emb_trg,
dropout_prob=self.dropout_prob,
random_perms=self.is_training and self.config.data.get('random_perms', False),
)
# GET the predictions
affordances_pred = tf.einsum('bah,avh->bav', hidden_states_per_attr, self.affordance_embed_table)
apb = tf.get_variable(
name='affordance_pred_bias',
shape=[len(names_and_arities), len(THOR_OBJECT_TYPE_TO_IND)],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)
affordances_pred += apb[None]
affordance_pred_by_type = tf.reshape(affordances_pred,
[batch_size, num_duplicates_x_num_objs,
len(names_and_arities), len(THOR_OBJECT_TYPE_TO_IND)])
return affordance_pred_by_type
def sample_step(self, encoded_h_flat, prev_affordances=None, cache=None, p=0.95):
"""
:param encoded_h_flat: [Batch_size * num_objs, hidden_size]
:param prev_affordances: [batch_size * num_objs, num_affordances up until now (maybe None)?
:param cache:
:return:
"""
with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
batch_size, hidden_size = get_shape_list(encoded_h_flat, 2)
if prev_affordances is None:
num_affordances_to_now = 0
prev_affordances_embed = tf.zeros((batch_size, 0, self.hidden_size))
else:
batch_size, num_affordances_to_now = get_shape_list(prev_affordances, 2)
prev_affordances_embed = embed_2d_with_embedding_table(prev_affordances,
self.affordance_embed_table[
:num_affordances_to_now],
flatten=True)
prev_affordances_embed = tf.concat([prev_affordances_embed, tf.zeros((batch_size, 1, self.hidden_size))], 1)
hidden_states_per_attr, new_kvs = expand_transformer(
object_full_state=encoded_h_flat,
gt_affordances_embed=prev_affordances_embed,
affordance_ctx_name_embed=self.affordance_emb_ctx[:num_affordances_to_now + 1],
affordance_trg_name_embed=self.affordance_emb_trg[:num_affordances_to_now + 1],
dropout_prob=self.dropout_prob,
random_perms=False,
reuse=tf.AUTO_REUSE,
layer_cache=cache
)
logits = tf.einsum('bh,vh->bv', hidden_states_per_attr[:, -1],
self.affordance_embed_table[num_affordances_to_now])
apb = tf.get_variable(
name='affordance_pred_bias',
shape=[len(names_and_arities), len(THOR_OBJECT_TYPE_TO_IND)],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)[num_affordances_to_now]
logits += apb[None]
cur_name, cur_arity = names_and_arities[num_affordances_to_now]
logits_mask = tf.cast(tf.less(tf.range(len(THOR_OBJECT_TYPE_TO_IND)), max(cur_arity, 2)), dtype=tf.float32)
logits = logits * logits_mask - 1e10 * (1.0 - logits_mask)
# sample_info = _top_p_sample(logits, num_samples=1, p=p)
sample_info = _argmax_sample(logits)
new_tokens = tf.squeeze(sample_info['sample'], 1)
new_probs = tf.squeeze(tf.batch_gather(sample_info['probs'], sample_info['sample']), 1)
return {
'new_tokens': new_tokens,
'new_probs': new_probs,
'new_cache': new_kvs
}
def sample(self, encoded_h):
"""
Decode into actual affordances
:param encoded_h: [batch_size, num_objects, hidden_size]
:return:
"""
bsize0, num_objs0, hidden_size = get_shape_list(encoded_h, 3)
encoded_h_flat = tf.reshape(encoded_h, [-1, self.hidden_size])
batch_size = get_shape_list(encoded_h_flat, 2)[0]
with tf.name_scope('sample'):
h0 = self.sample_step(encoded_h_flat)
ctx = h0['new_tokens'][:, None]
cache = h0['new_cache']
probs = h0['new_probs'][:, None]
# Technically we don't need tf.while_loop here bc always doing it for the same number of steps
for t in range(len(names_and_arities) - 1):
next_outputs = self.sample_step(encoded_h_flat, prev_affordances=ctx, cache=cache)
# Update everything
cache = tf.concat([cache, next_outputs['new_cache']], axis=-2)
ctx = tf.concat([ctx, next_outputs['new_tokens'][:, None]], axis=1)
probs = tf.concat([probs, next_outputs['new_probs'][:, None]], axis=1)
return {
'tokens': tf.reshape(ctx, [bsize0, num_objs0, -1]),
'probs': tf.reshape(probs, [bsize0, num_objs0, -1]),
}
def compute_losses(self, object_states, isvalid_by_type_o1o2,
encoded_h_pre,
encoded_h_post_gt,
encoded_h_post_pred,
affordance_pred_by_type,
gt_affordances_decoder,
isvalid_by_type):
"""
:param object_states: [batch_size, 4, len(names_and_arities)
:param isvalid_by_type_o1o2: first two objs whteher they're valid [batch_size, 2]
:return:
"""
batch_size, num_duplicates_x_num_objs, nlen_names_and_arities = get_shape_list(object_states, 3)
# MAGNITUDE LOSSES
###################
# Check if anything changed
norms = {}
losses = {}
pre_states, post_states = tf.unstack(
tf.reshape(object_states, [batch_size, 2, 2, len(names_and_arities)]), axis=2)
did_change = tf.not_equal(pre_states, post_states)
didchange_weight = tf.cast(tf.reduce_any(did_change, -1), dtype=tf.float32) * isvalid_by_type_o1o2
nochange_weight = (1.0 - tf.cast(tf.reduce_any(did_change, -1), dtype=tf.float32)) * isvalid_by_type_o1o2
### How much did things change
###############
encoded_h_delta = encoded_h_post_pred - encoded_h_pre
encoded_h_delta_l2 = tf.sqrt(tf.reduce_mean(tf.square(encoded_h_delta), -1))
norms['didchange_hdelta_l2'] = tf.reduce_sum(encoded_h_delta_l2 * didchange_weight) / (tf.reduce_sum(
didchange_weight) + 1e-5)
norms['nochange_hdelta_l2'] = tf.reduce_sum(encoded_h_delta_l2 * nochange_weight) / (tf.reduce_sum(
nochange_weight) + 1e-5)
# Delta between pred and GT
###
# gt_mu = tf.stop_gradient(encoded_h_post_gt[:, :, :self.hidden_size])
# pred_mu = encoded_h_post_pred[:, :, :self.hidden_size]
#
# #########################################
# # VAE loss
# all_mu, all_logvar = tf.split(tf.reshape(tf.concat([encoded_h_pre,
# encoded_h_post_gt,
# encoded_h_post_pred], 1),
# [-1, self.hidden_size * 2]), [self.hidden_size, self.hidden_size],
# axis=-1)
# kld = -0.5 * tf.reduce_mean(1.0 + all_logvar - tf.square(all_mu) - tf.exp(all_logvar))
# losses['kld'] = kld
#########################################
gt_stop = tf.stop_gradient(encoded_h_post_gt)
hidden_state_diff_l2 = tf.sqrt(tf.reduce_mean(tf.square(encoded_h_post_pred - gt_stop), -1))
hidden_state_diff_l1 = tf.reduce_mean(tf.abs(encoded_h_post_pred - gt_stop), -1)
norms['hidden_state_diff_l2'] = tf.reduce_sum(hidden_state_diff_l2 * isvalid_by_type_o1o2) / (
tf.reduce_sum(isvalid_by_type_o1o2) + 1e-5)
norms['hidden_state_diff_l1'] = tf.reduce_sum(hidden_state_diff_l1 * isvalid_by_type_o1o2) / (
tf.reduce_sum(isvalid_by_type_o1o2) + 1e-5)
hidden_state_magn_l2 = tf.sqrt(tf.reduce_mean(tf.square(gt_stop), -1))
norms['hidden_state_magn_l2'] = tf.reduce_sum(hidden_state_magn_l2 * isvalid_by_type_o1o2) / (
tf.reduce_sum(isvalid_by_type_o1o2) + 1e-5)
# Upweight changed losses
# did change: [batch_size, num_objs, num_affordances]
for i, (affordance_name, arity_) in enumerate(names_and_arities):
arity = max(arity_, 2)
losses[f'state/{affordance_name}_post'] = sequence_xe_loss(
affordance_pred_by_type[:, 4:, i, :arity],
gt_affordances_decoder[:, 4:, i],
label_weights=isvalid_by_type[:, 4:],
)
losses[f'state/{affordance_name}_pre'] = sequence_xe_loss(
affordance_pred_by_type[:, 0:2, i, :arity],
gt_affordances_decoder[:, 0:2, i],
label_weights=isvalid_by_type[:, 0:2], # + tf.cast(did_change[:, :, i], dtype=tf.float32) * 100.0,
)
losses[f'state/{affordance_name}_postgt'] = sequence_xe_loss(
affordance_pred_by_type[:, 2:4, i, :arity],
gt_affordances_decoder[:, 2:4, i],
label_weights=isvalid_by_type[:, 2:4], # + tf.cast(did_change[:, :, i], dtype=tf.float32) * 100.0,
)
# # Another way for losses
# losses_all = _sequence_xe_loss_noreduce(affordance_pred_by_type, gt_affordances_decoder)
# loss_mask = tf.reshape(tf.tile(isvalid_by_type[:, :, None], [1, 1, len(names_and_arities)]), [-1])
# losses['state/all'] = tf.reduce_sum(losses_all * loss_mask) / (tf.reduce_sum(loss_mask) + 1e-5)
return losses, norms
def model_fn_builder(config: NeatConfig):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
batch_size = get_shape_list(features['actions/action_id'], expected_rank=1)[0]
hidden_size = config.model['hidden_size']
# activation_fn = tf.nn.tanh if config.model.get('activation', 'tanh') == 'tanh' else tf.identity
scp_model = StateChangePredictModel(config,
is_training=is_training,
object_types=features['objects/object_types'],
)
encoded_h = scp_model.encode_affordances(features['objects/object_states'])
encoded_h_pre = tf.gather(encoded_h, [0, 2], axis=1)
encoded_h_post_gt = tf.gather(encoded_h, [1, 3], axis=1)
action_embed = scp_model.encode_action(features['actions/action_id'], action_args=features['actions/action_args'])
encoded_h_post_pred = scp_model.apply_action_mlp(action_embed, encoded_h_pre)
#############################################################
# Now construct a decoder
# [batch_size, 3, #objs, hidden_size] -> [batch_size, 3 * objs, hidden_size]
all_encoded_h = tf.concat([
encoded_h_pre, # [0, 2]
encoded_h_post_gt, # [1, 3]
encoded_h_post_pred, # [1, 3]
], 1)
gt_affordances_decoder = tf.gather(features['objects/object_states'], [0, 2, 1, 3, 1, 3], axis=1)
isvalid_by_type = tf.cast(tf.gather(features['objects/is_valid'], [0, 2, 1, 3, 1, 3], axis=1), dtype=tf.float32)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = scp_model.sample(all_encoded_h)
predictions.update(**features)
return tf.contrib.tpu.TPUEstimatorSpec(mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions)
affordance_pred_by_type = scp_model.decode_affordances_when_gt_is_provided(all_encoded_h,
gt_affordances_decoder)
######################
# For losses
# action_logits = action_result['action_logits']
############################################
# if params.get('demomode', False):
# action_logits['affordances_pred'] = affordance_pred_by_type[:, 4:]
# for k in action_logits:
# action_logits[k] = tf.nn.softmax(action_logits[k], axis=-1)
# return action_logits
losses, norms = scp_model.compute_losses(
object_states=features['objects/object_states'],
isvalid_by_type_o1o2=isvalid_by_type[:, :2],
encoded_h_pre=encoded_h_pre,
encoded_h_post_gt=encoded_h_post_gt,
encoded_h_post_pred=encoded_h_post_pred,
affordance_pred_by_type=affordance_pred_by_type,
gt_affordances_decoder=gt_affordances_decoder,
isvalid_by_type=isvalid_by_type)
# losses['action_success'] = sequence_xe_loss(action_logits['action_success'], features['actions/action_success'])
loss = tf.add_n([x for x in losses.values()])
for k, v in norms.items():
losses[f'norms/{k}'] = v
loss += 0.1 * norms['hidden_state_diff_l2']
loss += 0.1 * norms['hidden_state_diff_l1']
if is_training:
tvars = [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'global_step' not in x.name]
else:
tvars = tf.trainable_variables()
# ckpt_to_assignment_map = {}
# initialized_variable_names = {}
# init_checkpoint = config.model.get('init_checkpoint', None)
# if init_checkpoint:
# regular_assignment_map, regular_initialized_variable_names = get_assignment_map_from_checkpoint(
# tvars, init_checkpoint=init_checkpoint
# )
#
# # If you need to disable loading certain variables, comment something like this in
# # regular_assignment_map = {k: v for k, v in regular_assignment_map.items() if
# # all([x not in k for x in ('temporal_predict',
# # 'roi_language_predict',
# # 'roi_pool/pool_c5',
# # 'aux_roi',
# # 'second_fpn',
# # 'img_mask',
# # 'roi_pool/box_feats_proj/kernel')])}
#
# ckpt_to_assignment_map['regular'] = regular_assignment_map
# initialized_variable_names.update(regular_initialized_variable_names)
#
# def scaffold_fn():
# """Loads pretrained model through scaffold function."""
# # ORDER BY PRIORITY
# return tf.train.Scaffold()
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
# if var.name in initialized_variable_names:
# init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
train_op, train_metrics = optimization.build_optimizer_from_config(
loss=loss,
optimizer_config=config.optimizer,
device_config=config.device,
)
train_metrics.update(losses)
# for k, v in affordance_loss_metrics.items():
# train_metrics[f'affordance_metrics/{k}'] = v
host_call = construct_host_call(scalars_to_log=train_metrics,
model_dir=config.device['output_dir'],
iterations_per_loop=config.device.get('iterations_per_loop', 1000))
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metrics=None,
# scaffold_fn=scaffold_fn,
host_call=host_call)
return model_fn
if __name__ == '__main__':
from model.interact import dataloader
tf.compat.v1.enable_eager_execution()
batch_size = 8
config = NeatConfig.from_yaml('configs/local_debug.yaml')
input_fn = dataloader.input_fn_builder(config, is_training=True)
features, labels = input_fn(params={'batch_size': batch_size}).make_one_shot_iterator().get_next()
lol = model_fn_builder(config)(features, labels, tf.estimator.ModeKeys.TRAIN, {'batch_size': batch_size})
# model = TrajectoryMLP(is_training=True,
# features=features,
# hidden_size=config.model['hidden_size'],
# )
| 38,295 | 12,135 |
import os
import re
import time
from os.path import dirname, exists, join
import pytest
import yaml
from alpenhorn import acquisition as ac
from alpenhorn import archive as ar
from alpenhorn import storage as st
if ("RUN_DOCKER_TESTS" not in os.environ) and ("PLAYGROUND" not in os.environ):
pytestmark = pytest.mark.skip(
reason=(
"Docker tests must be enabled by setting the RUN_DOCKER_TESTS environment "
"variable"
),
)
else:
# Try and import docker.
try:
import docker
client = docker.from_env()
except Exception:
pytestmark = pytest.mark.skip(
reason=(
"Docker tests are enabled, but docker doesn't seem to be installed, or"
"running."
),
)
# ====== Fixtures for controlling Docker ======
@pytest.fixture(scope="module")
def images():
"""Build the images for the tests."""
import os.path
context = os.path.normpath(os.path.join(os.path.dirname(__file__), "..", ".."))
print("Building docker images from location %s..." % context)
# Build alpenhorn image
client.images.build(
path=context,
tag="alpenhorn",
rm=True,
forcerm=True,
dockerfile="tests/docker/Dockerfile.alpenhorn",
)
@pytest.fixture(scope="module")
def network():
"""Set up the network."""
# Note to connect to this network you need to pass network_mode=networks to
# .run(). See https://github.com/docker/docker-py/issues/1433
print("Setting up the network...")
network = client.networks.create("alpenhorn-net", driver="bridge")
yield network.name
network.remove()
@pytest.fixture(scope="module")
def db(network, images):
"""Set up the database and create the tables for alpenhorn.
Also connect peewee to this database, so we can query its state."""
from alpenhorn import db
print("Creating the database...")
# Create the database container
db_container = client.containers.run(
"mysql:5.7",
name="db",
detach=True,
network_mode=network,
ports={"3306/tcp": 63306},
environment={"MYSQL_ALLOW_EMPTY_PASSWORD": "yes"},
)
# Wait until the MySQL instance is properly up
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network=network,
command="bash -c 'while ! mysqladmin ping -h db --silent; do sleep 3; done'",
)
# Create the database
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network=network,
command="mysql -h db -e 'CREATE DATABASE alpenhorn_db'",
)
print("Creating the tables...")
# Initialise alpenhorn
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network=network,
command="alpenhorn init",
)
# Connect our peewee models to the database
db._connect(url="mysql://root@127.0.0.1:63306/alpenhorn_db")
yield db_container
# Take down the peewee connection
db.database_proxy.close()
print("Cleaning up db container...")
_stop_or_kill(db_container)
db_container.remove()
@pytest.fixture(scope="module")
def workers(db, network, images, tmpdir_factory):
"""Create a group of alpenhorn entries."""
workers = []
for i in range(3):
hostname = "container-%i" % i
print("Creating alpenhorn container %s" % hostname)
# Create db entries for the alpenhorn instance
group = st.StorageGroup.create(name=("group_%i" % i))
node = st.StorageNode.create(
name=("node_%i" % i),
root="/data",
username="root",
group=group,
host=hostname,
address=hostname,
active=True,
auto_import=(i == 0),
min_avail_gb=0.0,
)
# Create a temporary directory on the host to store the data, which will
# get mounted into the container
data_dir = str(tmpdir_factory.mktemp(hostname))
print("Node directory (on host): %s" % str(data_dir))
with open(str(data_dir) + "/ALPENHORN_NODE", "w") as f:
f.write(node.name)
print("Created ALPENHORN_NODE file on host: %s" % str(data_dir))
container = client.containers.run(
"alpenhorn",
name=hostname,
hostname=hostname,
network_mode=network,
detach=True,
volumes={data_dir: {"bind": "/data", "mode": "rw"}},
)
workers.append({"node": node, "container": container, "dir": data_dir})
yield workers
# Cleanup
for worker in workers:
container = worker["container"]
print("Stopping and removing alpenhorn container %s" % container.name)
_stop_or_kill(container, timeout=1)
container.remove()
def _stop_or_kill(container, timeout=10):
# Work around for:
# https://github.com/docker/docker-py/issues/1374
import requests.exceptions
try:
container.stop(timeout=timeout)
except requests.exceptions.ReadTimeout:
container.kill()
# ====== Fixtures for generating test files ======
@pytest.fixture(scope="module")
def test_files():
"""Get a set of test files.
Read the test files config, and structure it into acquisitions and files,
labelling each with their respective types.
"""
files = os.path.normpath(
os.path.join(os.path.dirname(__file__), "..", "fixtures", "files.yml")
)
with open(files, "r") as f:
fs = yaml.safe_load(f.read())
acqs = _recurse_acq(fs)
return acqs
def _recurse_acq(f, root=""):
"""Recurse over a dictionary based tree, and find the acquisitions and their files."""
def _type(x):
if "zab" in x:
return "zab"
elif "quux" in x or x == "x":
return "quux"
else:
return None
acqlist = []
for name, sub in f.items():
new_root = join(root, name)
if _type(new_root) is not None:
acqlist.append(
{
"name": new_root,
"type": _type(new_root),
"files": _recurse_files(sub),
}
)
else:
acqlist += _recurse_acq(sub, root=join(root, name))
return acqlist
def _recurse_files(f, root=""):
"""Recurse over a dictionary tree at the acq root, and get the files."""
def _type(x):
if x[-4:] == ".log":
return "log"
elif x[-4:] == ".zxc" or x == "jim":
return "zxc"
elif x[-5:] == ".lock":
return "lock"
filelist = []
for name, sub in f.items():
new_root = join(root, name)
if "md5" in sub:
fileprop = {"name": new_root, "type": _type(new_root)}
fileprop.update(sub)
filelist.append(fileprop)
else:
filelist += _recurse_files(sub, root=new_root)
return filelist
def _make_files(acqs, base, skip_lock=True):
for acq in acqs:
for file_ in acq["files"]:
path = join(base, acq["name"], file_["name"])
if not exists(dirname(path)):
os.makedirs(dirname(path))
if not skip_lock or file_["type"] != "lock":
with open(path, "w") as fh:
fh.write(file_["contents"])
# ====== Helper routines for checking the database ======
def _verify_db(acqs, copies_on_node=None, wants_on_node="Y", has_on_node="Y"):
"""Verify that files are in the database.
Parameters
----------
acqs : dict
Set of acquisitions and files as output by test_files.
copies_on_node : StorageNode, optional
Verify that what the database believes is on this node. If
`None` skip this test.
has_on_node : str, optional
'Has' state of files to check for. Default 'Y'.
`None` to skip test.
wants_on_node : str, optional
'Wants' state of files to check for. Default 'Y'.
`None` to skip test.
"""
# Loop over all acquisitions and files and check that they have been
# correctly added to the database
for acq in acqs:
# Test that the acquisition exists
acq_query = ac.ArchiveAcq.select().where(ac.ArchiveAcq.name == acq["name"])
assert acq_query.count() == 1
acq_obj = acq_query.get()
# Test that it has the correct type
assert acq_obj.type.name == acq["type"]
for file_ in acq["files"]:
# Test that the file exists
file_query = ac.ArchiveFile.select().where(
ac.ArchiveFile.acq == acq_obj, ac.ArchiveFile.name == file_["name"]
)
# Check that we haven't imported types we don't want
if file_["type"] in [None, "lock"]:
assert file_query.count() == 0
continue
assert file_query.count() == 1
file_obj = file_query.get()
# Test that it has the correct type
assert file_obj.type.name == file_["type"]
if copies_on_node is not None:
# Test that this node has a copy
copy_query = ar.ArchiveFileCopy.select().where(
ar.ArchiveFileCopy.file == file_obj,
ar.ArchiveFileCopy.node == copies_on_node,
)
assert copy_query.count() == 1
copy_obj = copy_query.get()
if has_on_node is not None:
assert copy_obj.has_file == has_on_node
if wants_on_node is not None:
assert copy_obj.wants_file == wants_on_node
def _verify_files(worker):
"""Verify the files are in place using the alpenhorn verify command."""
# Run alpenhron verify and return the exit status as a string
output = worker["container"].exec_run(
"bash -c 'alpenhorn node verify %s &> /dev/null; echo $?'" % worker["node"].name
)
# Convert the output back to an exit status
assert not output.exit_code
# ====== Test the auto_import behaviour ======
def test_import(workers, test_files):
# Add a bunch of files onto node_0, wait for them to be picked up by the
# auto_import, and then verify that they all got imported to the db
# correctly.
# Create the files
_make_files(test_files, workers[0]["dir"], skip_lock=True)
# Wait for the auto_import to catch them (it polls at 30s intervals)
time.sleep(3)
node = workers[0]["node"]
_verify_db(test_files, copies_on_node=node)
_verify_files(workers[0])
def test_status(workers, network):
"""Check for #109, `alpenhorn status` failing with MySQL storage"""
status = client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network_mode=network,
command="alpenhorn status",
).decode()
assert re.search(
r"^node_0\s+9\s+0.0\s+100\.0\s+100\.0\s+container-0:/data$",
status,
re.MULTILINE,
)
assert re.search(r"^node_1\s+0\s+0.0\s+container-1:/data$", status, re.MULTILINE)
assert re.search(r"^node_2\s+0\s+0.0\s+container-2:/data$", status, re.MULTILINE)
# ====== Test that the sync between nodes works ======
def test_sync_all(workers, network, test_files):
# Request sync onto a different node
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network_mode=network,
command="alpenhorn sync -f node_0 group_1",
)
time.sleep(3)
_verify_db(test_files, copies_on_node=workers[1]["node"])
_verify_files(workers[1])
def test_sync_acq(workers, network, test_files):
for acq in test_files:
# Request sync of a single acq onto a different node
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network_mode=network,
command=("alpenhorn sync -f node_0 group_2 --acq=%s" % acq["name"]),
)
time.sleep(3)
# Verify that the requested files have been copied
for acq in test_files:
_verify_db([acq], copies_on_node=workers[1]["node"])
_verify_files(workers[2])
# ====== Test that the clean command works ======
def _verify_clean(acqs, worker, unclean=False, check_empty=False):
"""Test the clean command.
Check the comand has been executed as expected on the node associated with
'worker'. If 'unclean' is set to True, check that files are not wanted but still
present (until additional copies on other archive nodes are found).
"""
# Check files are set to deleted / not deleted but not wanted in database
for acq in acqs:
if unclean:
_verify_db(
[acq], copies_on_node=worker["node"], has_on_node="Y", wants_on_node="N"
)
else:
_verify_db(
[acq], copies_on_node=worker["node"], has_on_node="N", wants_on_node="N"
)
# Check files are in fact gone / still there
for acq in acqs:
for f in acq["files"]:
# Ignore files not tracked by the database
if f["type"] is not None and f["type"] != "lock":
file_exists = os.path.exists(
os.path.join(worker["dir"], acq["name"], f["name"])
)
assert (file_exists and unclean) or (not file_exists and not unclean)
# If specified, check no files or directories are left over other than the
# ALPENHORN_NODE file
if not unclean and check_empty:
files = os.listdir(worker["dir"])
assert "ALPENHORN_NODE" in files
assert len(files) == 1
def test_clean(workers, network, test_files):
# Simplest clean request
node_to_clean = workers[1]["node"]
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network_mode=network,
command=("alpenhorn node clean -f {}".format(node_to_clean.name)),
)
# Check files set to 'M'
for acq in test_files:
_verify_db(
[acq], copies_on_node=node_to_clean, has_on_node="Y", wants_on_node="M"
)
# Changed my mind, delete them NOW
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network_mode=network,
command=("alpenhorn node clean -nf {}".format(node_to_clean.name)),
)
# Check files have been deleted
time.sleep(3)
_verify_clean(test_files, workers[1])
# Since no untracked files should be present, check root is empty
_verify_clean(test_files, workers[1], check_empty=True)
# Request clean on a node when only one other archive node has a copy
# Files should not be deleted
node_to_clean = workers[2]["node"]
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network_mode=network,
command=("alpenhorn node clean -nf {}".format(node_to_clean.name)),
)
# Check files are still present
time.sleep(3)
_verify_clean(test_files, workers[2], unclean=True)
# === Test that the node file is being checked successfully
def test_node_active(workers):
data_dir0 = workers[1]["dir"]
os.rename(data_dir0 + "/ALPENHORN_NODE", data_dir0 + "/DIFFERENT_NAME")
print("Changed name of ALPENHORN_NODE file in directory", data_dir0)
this_node = workers[1]["node"]
time.sleep(3)
node_0 = st.StorageNode.get(name=this_node.name)
assert not node_0.active
os.rename(data_dir0 + "/DIFFERENT_NAME", data_dir0 + "/ALPENHORN_NODE")
node_0 = st.StorageNode.get(name=this_node.name)
node_0.active = True
node_0.save(only=node_0.dirty_fields)
time.sleep(3)
node_0 = st.StorageNode.get(name=this_node.name)
assert node_0.active
@pytest.mark.skipif(
"PLAYGROUND" not in os.environ,
reason=("Set PLAYGROUND to leave alpenhorn alive for interactive fun."),
)
def test_playground(workers):
print(
"""
To connect the alpenhorn database to this instance run:
>>> from alpenhorn import db
>>> db._connect(url='mysql://root@127.0.0.1:63306/alpenhorn_db')
To interact with the individual alpenhorn instances use docker exec, e.g.
$ docker exec container_0 alpenhorn status
When you are finished playing, press enter to close the docker containers and
clean up everything."""
)
input("")
| 16,564 | 5,282 |
a="5.2.6 Our First Array"
b="5.2.7 Array Length"
c="5.2.8 Last Element in Array"
d="5.3.6 Print Array"
e="5.3.7 Print Odd Array Indices"
f="5.3.8 Find the Minimum Index"
g="5.3.12 Find the Median"
h="5.3.13 Top Student"
i="5.4.6 Get First Element"
j="5.4.9 Road Trip"
nameL = {1:a,
2:b,
3:c,
4:d,
5:e,
6:f,
7:g,
8:h,
9:i,
10:j,
}
for i in range(22):
x = nameL[i+1]
f= open(x+".java","w+")
f.write("/*---------------------------------------------------------*|\n")
f.write("| Created by Tannre Overly - 2018 |\n")
f.write("|-----------------------------------------------------------|\n")
f.write("| WARNING - WARNING - WARNING - WARNING - WARNING |\n")
f.write("|-----------------------------------------------------------|\n")
f.write("| It is highly recommended that you attempt to solve your |\n")
f.write("| problems before using work provided here in order to |\n")
f.write("| actually learn from your mistakes. Try to use this only to|\n")
f.write("| review or compare your own work. |\n")
f.write("|_______________________Good_Luck___________________________|*/\n\n")
f.close() | 1,219 | 464 |
from django.apps import AppConfig
class ScorecardsConfig(AppConfig):
name = 'scorecards'
| 95 | 29 |
from rest_framework import permissions
class IsLoggedInUserOrAdmin(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj == request.user or request.user.is_admin
class IsAdminUser(permissions.BasePermission):
def has_permission(self, request, view):
return request.user and request.user.is_staff
def has_object_permission(self, request, view, obj):
return request.user and request.user.is_staff
# class IsGetOrPatchOrPut(permissions.BasePermission):
# def has_permission(self, request, view):
# allowed_methods = ['GET', 'PATCH', 'PUT']
# if request.method in allowed_methods:
# return True
# else:
# return False
class IsLoggedInUser(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj == request.user
| 855 | 267 |
import numpy as np
from scipy.optimize import line_search
import locale
locale.setlocale(locale.LC_ALL, '')
class cd_res(object):
def __init__(self, x, fun):
self.x = x
self.fun = fun
print_stop_iteration = 1
def cdl_step(score,
guess,
jac,
val = None,
aggressiveness = 0.1,
zero_eps = 1e2 * np.finfo(float).eps,
print_path = True,
decrement = 1e-1):
print("[FORCING FIRST STEP]")
assert 0 < aggressiveness < 1
assert 0 < decrement < 1
if val is None:
val = score(guess)
grad = jac(guess)
grad_copy = grad.copy()
# constrain to the positive orthant
grad[grad > 0] = 0
if (grad >= 0).all():
# this happens when we're stuck at the origin and the gradient is
# pointing in the all-negative direction
raise runtime("Failed to take a step")
# obviously I'm conflicted about what to do here...
return guess,val
direction = - (aggressiveness * val * grad) / grad.dot(grad.T)
# THE ABOVE IS EQUIVALENT TO :
# step_magnitude = aggressiveness*val/np.linalg.norm(grad)
# direction = -step_magnitude * (grad / np.linalg.norm(grad))
while True:
new_val = score( direction)
if new_val < val:
return direction, new_val
direction *= decrement
if sum(direction) < zero_eps:
raise runtime("Failed to take a step")
def cdl_search(score,
guess,
jac,
tol = 1e-4,
aggressiveness = 0.1,# aggressiveness
alpha_mult = .9,
max_iter = 3000,
min_iter = 3,
# TODO: this is a stupid default (I'm using it out of laziness)
zero_eps = 1e2 * np.finfo(float).eps,
print_path = True,
print_path_verbose = False,
preserve_angle = False):
'''
Implements coordinate descent with line search with the strong wolf
conditions. Note, this tends to give nearly identical results as L-BFGS-B,
and is *much* slower than that the super-fast 40 year old Fortran code
wrapped by SciPy.
'''
assert 0 < aggressiveness < 1
assert 0 < alpha_mult < 1
assert (guess >=0).all(), "Initial guess (`guess`) should be in the closed positive orthant"
val_old = None
grad = None
x_curr = guess
alpha_t = 0
val = score(x_curr)
if (x_curr == np.zeros(x_curr.shape[0])).all():
val0 = val
else:
val0 = score(np.zeros(x_curr.shape[0]))
#-- if (x_curr == 0).all():
#-- # Force a single step away form the origin if it is at least a little
#-- # useful. Intuition: the curvature at the origin is typically
#-- # exceedingly sharp (becasue we're going from a state with "no
#-- # information" to "some information" in the covariate space, and as
#-- # result the strong wolf conditions will have a strong tendency to
#-- # fail. However, the origin is rarely optimal so forcing a step away
#-- # form the origin will be necessary in most cases.
#-- x_curr, val = cdl_step (score, guess, jac, val, aggressiveness, zero_eps, print_path)
for _i in range(max_iter):
if grad is None:
# (this happens when `constrained == True` or the next point falls beyond zero due to rounding error)
if print_path_verbose:
print("[INITIALIZING GRADIENT]")
grad = jac(x_curr)
invalid_directions = np.logical_and(grad > 0,x_curr == 0)
if (grad[np.logical_not(invalid_directions)] == 0).all():
# this happens when we're stuck at the origin and the gradient is
# pointing in the all-negative direction
if print_stop_iteration:
print("[STOP ITERATION: gradient is zero] i: %s" % (_i,))
return cd_res(x_curr, val)
# constrain to the positive orthant
grad[invalid_directions] = 0
direction = - (aggressiveness * val * grad) / grad.dot(grad.T)
# THE ABOVE IS EQUIVALENT TO :
# step_magnitude = aggressiveness*val/np.linalg.norm(grad)
# direction = -step_magnitude * (grad / np.linalg.norm(grad))
# adaptively adjust the step size:
direction *= (alpha_mult ** alpha_t)
# constrain the gradient to being non-negative on axis where the
# current guess is already zero
if (direction<0).any() and preserve_angle:
constrained = True
alpha_ratios = - direction[ direction <0 ] / x_curr[ direction <0 ]
if (alpha_ratios > 1).any():
max_alpha = alpha_ratios.max()
else:
max_alpha = 1
else:
constrained = False
max_alpha = 1
if print_path_verbose:
print("[STARTING LINE SEARCH]")
res = line_search(f=zed_wrapper(score), myfprime=zed_wrapper(jac), xk=x_curr, pk= direction/max_alpha, gfk= grad, old_fval=val,old_old_fval=val_old) #
if print_path_verbose:
print("[FINISHED LINE SEARCH]")
alpha, _, _, _, _, _ = res
if alpha is not None:
# adjust the future step size
if alpha >= 1:
alpha_t -= 1
else:
alpha_t += 1
elif constrained:
for j in range(5): # formerly range(17), but that was excessive,
# in general, this succeeds happens when alpha >= 0.1 (super helpful) or alpha <= 1e-14 (super useless)
if score(x_curr - (.3**j)*grad/max_alpha) < val:
# This can occur when the strong wolf condition insists that the
# current step size is too small (i.e. the gradient is too
# consistent with the function to think that a small step is
# optimal for a global (unconstrained) optimization.
alpha = (.3**j)
# i secretly think this is stupid.
if print_stop_iteration:
print("[STOP ITERATION: simple line search worked :)] i: %s, alpha: 1e-%s" % (_i,j))
break
else:
# moving in the direction of the gradient yielded no improvement: stop
if print_stop_iteration:
print("[STOP ITERATION: simple line search failed] i: %s" % (_i,))
return cd_res(x_curr, val)
else:
# moving in the direction of the gradient yielded no improvement: stop
if print_stop_iteration:
print("[STOP ITERATION: alpha is None] i: %s, grad: %s, step: %s" % (_i, grad, direction/max_alpha, ))
return cd_res(x_curr, val)
# iterate
if constrained:
x_next = x_curr + min(1, alpha)*direction/max_alpha
x_old, x_curr, val_old, val, grad, old_grad = x_curr, x_next, val, score(x_next), None, grad
else:
#x_next = x_curr + alpha *direction/max_alpha
x_next = np.maximum(x_curr + alpha *direction/max_alpha,0)
x_old, x_curr, val_old, val, grad, old_grad = x_curr, x_next, val, res[3], res[5], grad
val_diff = val_old - val
# rounding error can get us really close or even across the coordinate plane.
# NOT SURE IF THIS IS NECESSARY NOW THAT THE GRAD IS WRAPPED IN ZED_WRAPPER
# NOT SURE IF THIS IS NECESSARY NOW THAT THE GRAD IS WRAPPED IN ZED_WRAPPER
#-- xtmp = x_curr.copy()
#-- x_curr[abs(x_curr) < zero_eps] = 0
#-- x_curr[x_curr < zero_eps] = 0
#-- if (xtmp != x_curr).any():
#-- if print_path_verbose:
#-- print('[CLEARING GRADIENT]')
#-- grad = None
# NOT SURE IF THIS IS NECESSARY NOW THAT THE GRAD IS WRAPPED IN ZED_WRAPPER
# NOT SURE IF THIS IS NECESSARY NOW THAT THE GRAD IS WRAPPED IN ZED_WRAPPER
if print_path:
print("[Path] i: %s, In Sample R^2: %0.6f, incremental R^2:: %0.6f, learning rate: %0.5f, alpha: %0.5f, zeros: %s" %
(_i, 1- val / val0, (val_diff/ val0), aggressiveness * (alpha_mult ** alpha_t), alpha, sum( x_curr == 0)))
if print_path_verbose:
print("old_grad: %s,x_curr %s" % (old_grad, x_curr, ))
if (x_curr == 0).all() and (x_old == 0).all():
# this happens when we were at the origin and the gradient didn't
# take us out of the range of zero_eps
if _i == 0:
x_curr, val = cdl_step (score, guess, jac, val, aggressiveness, zero_eps, print_path)
if (x_curr == 0).all():
if print_stop_iteration:
print("[STOP ITERATION: Stuck at the origin] iteration: %s"% (_i,))
if (x_curr == 0).all():
if print_stop_iteration:
print("[STOP ITERATION: Stuck at the origin] iteration: %s"% (_i,))
return cd_res(x_curr, score(x_curr)) # tricky tricky...
if (x_curr < 0).any():
# This shouldn't ever happen if max_alpha is specified properly
raise RuntimeError("An internal Error Occured: (x_curr < 0).any()")
if val_diff/val < tol:
# this a heuristic rule, to be sure, but seems to be useful.
# TODO: this is kinda stupid without a minimum on the learning rate (i.e. `aggressiveness`).
if _i > min_iter:
if print_stop_iteration:
# this is kida stupid
print("[STOP ITERATION: val_diff/val < tol] i: %s, val: %s, val_diff: %s" % (_i, val, val_diff, ))
return cd_res(x_curr, val)
# returns solution in for loop if successfully converges
raise RuntimeError('Solution did not converge to default tolerance')
def zed_wrapper(fun):
def inner(x,*args,**kwargs):
return fun(np.maximum(0,x),*args,**kwargs)
return inner
| 10,172 | 3,177 |
from ogre import generators
import argparse
from configparser import ConfigParser
from ase.io import read, write
import os
from ogre.utils.utils import print_run_time
#print('################')
#teat_a = read('TETCEN.cif', format= 'cif')
#print(teat_a)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--filename', dest='filename', default='ogre.config', type=str)
return parser.parse_args()
@print_run_time
def main():
args = parse_arguments()
filename = args.filename
config = ConfigParser()
config.read(filename, encoding='UTF-8')
io = config['io']
parameters = config['parameters']
methods = config['methods']
structure_path = io['structure_path']
#print(structure_path)
structure_name = io['structure_name']
format_string = io['format']
cleave_option = int(methods['cleave_option'])
layers_string = parameters['layers']
miller_index = [int(x) for x in parameters['miller_index'].split(" ")]
list_of_layers = []
for item in layers_string.split(' '):
if item:
if '-' in item:
start, end = item.split('-')
start, end = item.split('-')
list_of_layers.extend(list(range(int(start), int(end) + 1)))
else:
list_of_layers.append(int(item))
highest_index = int(parameters['highest_index'])
vacuum_size = int(parameters['vacuum_size'])
supercell_size = parameters['supercell_size'].split(' ')
supercell_size = None if len(supercell_size) < 3 else [
int(x) for x in supercell_size]
desired_num_of_molecules_oneLayer = int(parameters['desired_num_of_molecules_oneLayer'])
if not os.path.isdir(structure_name):
os.mkdir(structure_name)
initial_structure = read(structure_path, format= 'cif')
if cleave_option == 0:
print("Cleave single surface")
generators.atomic_task(structure_name, initial_structure, miller_index,
list_of_layers, vacuum_size, supercell_size,
format_string, desired_num_of_molecules_oneLayer)
elif cleave_option == 1:
print("Cleave surfaces for surface energy calculations")
generators.cleave_for_surface_energies(
structure_path, structure_name, vacuum_size, list_of_layers,
highest_index, supercell_size, format_string,
desired_num_of_molecules_oneLayer)
if __name__ == "__main__":
main()
| 2,495 | 761 |
import time
from threading import Thread
import cv2
import keyboard
import drawing
import utils
from config import config
from gameplay import GameWithFriendOpenVINO
from utils import log
class DisplayThread(Thread):
def __init__(self, frame_deque, joints_deque, fps=24, gui=None):
super().__init__()
self._keep_running = False
self.gui = gui
self.frame_deque = frame_deque
self.joints_deque = joints_deque
self.game = None
self.fps = fps
def __del__(self):
cv2.destroyAllWindows()
def quit_app(self):
log.info('Exiting...')
self.stop()
def display_last(self):
if keyboard.is_pressed(config.app.quit_key):
self.quit_app()
if not self.frame_deque:
log.warning('No frames to display; Output fps may be set too high')
return
frame = self.frame_deque[-1]
if self.joints_deque:
joints = self.joints_deque[-1]
drawing.draw_joints(frame, joints, skeleton=config.app.model.SKELETON)
for person_joints in joints:
drawing.draw_limb_circles(frame, person_joints, config.app.model.BODY_PART_INDEXES)
else:
joints = []
if config.app.flip_image:
frame = cv2.flip(frame, 1)
flipped_joints = [utils.flip_joints(item) for item in joints]
else:
flipped_joints = joints
if self.gui.start_status:
game_status = True
if self.gui.countdown != 0:
self.gui.start_prepare(frame)
elif type(self.gui.game_mode) != GameWithFriendOpenVINO:
game_status = self.gui.game_mode.process(frame, flipped_joints[0] if len(flipped_joints) != 0 else [])
else:
game_status = self.gui.game_mode.process(frame, flipped_joints)
if not game_status:
self.gui.reset()
else:
q = self.gui.process(frame, flipped_joints)
if q:
self.quit_app()
cv2.imshow(config.app.window_name, frame)
cv2.waitKey(1)
def run(self):
self._keep_running = True
while self._keep_running:
self.display_last()
# TODO Match fps more accurately
time.sleep(1 / self.fps)
def stop(self):
self._keep_running = False
| 2,405 | 759 |
TRAIN_SIZE = 23654
| 19 | 14 |
import numpy as np
from numpy import *
import pandas as pd
from pandas import DataFrame, Series
from numpy.random import randn
import tensorflow as tf
import matplotlib.pyplot as plt
from PIL import Image
import re
from skimage.io import imread, imshow
from termcolor import colored
import keras
import h5py
import cv2
from scipy import spatial
from keras.layers import Flatten, Dense, Input, concatenate
from keras.layers import MaxPooling2D
from keras.layers import Activation, Dropout
from keras.models import Model
from keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from scipy.fftpack import dct, idct
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r"C:\\Program Files\\Tesseract-OCR\\tesseract.exe"
from pytesseract import *
id_card_input = input ("Enter name of image file; e.g computer.jpg : ")
id_card = cv2.imread(id_card_input)
gray = cv2.cvtColor(id_card, cv2.COLOR_RGB2GRAY)
gray, img_bin = cv2.threshold(gray, 128,255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
gray = cv2.bitwise_not(img_bin)
kernel = np.ones((2,1), np.uint8)
id_card = cv2.erode(gray,kernel, iterations=1)
id_card = cv2.dilate(id_card, kernel, iterations=1)
exp_txt = pytesseract.image_to_string(id_card, lang = 'eng', config = '--psm 6')
print(exp_txt)
with open("exp.txt", mode = "w") as file:
file.write(exp_txt)
file.close()
open_file = open("exp.txt",'r')
with open_file as p:
lines = p.readlines()
imshow(gray)
| 1,524 | 585 |
"""Optional support for sqlalchemy.sql dynamic query generation."""
from asyncmy.sa.connection import SAConnection
from .engine import create_engine, Engine
from .exc import (Error, ArgumentError, InvalidRequestError,
NoSuchColumnError, ResourceClosedError)
from . import result
__all__ = ('create_engine', 'SAConnection', 'Error',
'ArgumentError', 'InvalidRequestError', 'NoSuchColumnError',
'ResourceClosedError', 'Engine', 'result')
(SAConnection, Error, ArgumentError, InvalidRequestError,
NoSuchColumnError, ResourceClosedError, create_engine, Engine)
| 603 | 159 |
from collections import OrderedDict
import math
from quantile_ml import utils
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.metrics import mean_squared_error, make_scorer, brier_score_loss, accuracy_score, explained_variance_score, mean_absolute_error, median_absolute_error, r2_score, log_loss, roc_auc_score
import numpy as np
bad_vals_as_strings = set([str(float('nan')), str(float('inf')), str(float('-inf')), 'None', 'none', 'NaN', 'NAN', 'nan', 'NULL', 'null', '', 'inf', '-inf', 'np.nan', 'numpy.nan'])
def advanced_scoring_classifiers(probas, actuals, name=None):
# pandas Series don't play nice here. Make sure our actuals list is indeed a list
actuals = list(actuals)
predictions = list(probas)
print('Here is our brier-score-loss, which is the default value we optimized for while training, and is the value returned from .score() unless you requested a custom scoring metric')
print('It is a measure of how close the PROBABILITY predictions are.')
if name != None:
print(name)
# Sometimes we will be given "flattened" probabilities (only the probability of our positive label), while other times we might be given "nested" probabilities (probabilities of both positive and negative, in a list, for each item).
try:
probas = [proba[1] for proba in probas]
except:
pass
print(format(brier_score_loss(actuals, probas), '.4f'))
print('\nHere is the trained estimator\'s overall accuracy (when it predicts a label, how frequently is that the correct label?)')
predicted_labels = []
for pred in probas:
if pred >= 0.5:
predicted_labels.append(1)
else:
predicted_labels.append(0)
print(format(accuracy_score(y_true=actuals, y_pred=predicted_labels) * 100, '.1f') + '%')
print('\nHere is a confusion matrix showing predictions and actuals by label')
#it would make sense to use sklearn's confusion_matrix here but it apparently has no labels
#took this idea instead from: http://stats.stackexchange.com/a/109015
conf = pd.crosstab(pd.Series(actuals), pd.Series(predicted_labels), rownames=['v Actual v'], colnames=['Predicted >'], margins=True)
print(conf)
print('Here is the accuracy of our trained estimator at each level of predicted probabilities')
# create summary dict
summary_dict = OrderedDict()
for num in range(0, 110, 10):
summary_dict[num] = []
for idx, proba in enumerate(probas):
proba = math.floor(int(proba * 100) / 10) * 10
summary_dict[proba].append(actuals[idx])
for k, v in summary_dict.items():
if len(v) > 0:
print('Predicted probability: ' + str(k) + '%')
actual = sum(v) * 1.0 / len(v)
# Format into a prettier number
actual = round(actual * 100, 0)
print('Actual: ' + str(actual) + '%')
print('# preds: ' + str(len(v)) + '\n')
print('\n\n')
def calculate_and_print_differences(predictions, actuals, name=None):
pos_differences = []
neg_differences = []
# Technically, we're ignoring cases where we are spot on
for idx, pred in enumerate(predictions):
difference = pred - actuals[idx]
if difference > 0:
pos_differences.append(difference)
elif difference < 0:
neg_differences.append(difference)
if name != None:
print(name)
print('Count of positive differences (prediction > actual):')
print(len(pos_differences))
print('Count of negative differences:')
print(len(neg_differences))
if len(pos_differences) > 0:
print('Average positive difference:')
print(sum(pos_differences) * 1.0 / len(pos_differences))
if len(neg_differences) > 0:
print('Average negative difference:')
print(sum(neg_differences) * 1.0 / len(neg_differences))
def advanced_scoring_regressors(predictions, actuals, verbose=2, name=None):
# pandas Series don't play nice here. Make sure our actuals list is indeed a list
actuals = list(actuals)
predictions = list(predictions)
print('\n\n***********************************************')
if name != None:
print(name)
print('Advanced scoring metrics for the trained regression model on this particular dataset:\n')
# 1. overall RMSE
print('Here is the overall RMSE for these predictions:')
print(mean_squared_error(actuals, predictions)**0.5)
# 2. overall avg predictions
print('\nHere is the average of the predictions:')
print(sum(predictions) * 1.0 / len(predictions))
# 3. overall avg actuals
print('\nHere is the average actual value on this validation set:')
print(sum(actuals) * 1.0 / len(actuals))
# 2(a). median predictions
print('\nHere is the median prediction:')
print(np.median(predictions))
# 3(a). median actuals
print('\nHere is the median actual value:')
print(np.median(actuals))
# 4. avg differences (not RMSE)
print('\nHere is the mean absolute error:')
print(mean_absolute_error(actuals, predictions))
print('\nHere is the median absolute error (robust to outliers):')
print(median_absolute_error(actuals, predictions))
print('\nHere is the explained variance:')
print(explained_variance_score(actuals, predictions))
print('\nHere is the R-squared value:')
print(r2_score(actuals, predictions))
# 5. pos and neg differences
calculate_and_print_differences(predictions=predictions, actuals=actuals, name=name)
actuals_preds = list(zip(actuals, predictions))
# Sort by PREDICTED value, since this is what what we will know at the time we make a prediction
actuals_preds.sort(key=lambda pair: pair[1])
actuals_sorted = [act for act, pred in actuals_preds]
predictions_sorted = [pred for act, pred in actuals_preds]
if verbose > 2:
print('Here\'s how the trained predictor did on each successive decile (ten percent chunk) of the predictions:')
for i in range(1,10):
print('\n**************')
print('Bucket number:')
print(i)
# There's probably some fenceposting error here
min_idx = int((i - 1) / 10.0 * len(actuals_sorted))
max_idx = int(i / 10.0 * len(actuals_sorted))
actuals_for_this_decile = actuals_sorted[min_idx:max_idx]
predictions_for_this_decile = predictions_sorted[min_idx:max_idx]
print('Avg predicted val in this bucket')
print(sum(predictions_for_this_decile) * 1.0 / len(predictions_for_this_decile))
print('Avg actual val in this bucket')
print(sum(actuals_for_this_decile) * 1.0 / len(actuals_for_this_decile))
print('RMSE for this bucket')
print(mean_squared_error(actuals_for_this_decile, predictions_for_this_decile)**0.5)
calculate_and_print_differences(predictions_for_this_decile, actuals_for_this_decile)
print('')
print('\n***********************************************\n\n')
def rmse_func(y, predictions):
return mean_squared_error(y, predictions)**0.5
scoring_name_function_map = {
'rmse': rmse_func
, 'median_absolute_error': median_absolute_error
, 'r2': r2_score
, 'r-squared': r2_score
, 'mean_absolute_error': mean_absolute_error
, 'accuracy': accuracy_score
, 'accuracy_score': accuracy_score
, 'log_loss': log_loss
, 'roc_auc': roc_auc_score
, 'brier_score_loss': brier_score_loss
}
class RegressionScorer(object):
def __init__(self, scoring_method=None):
if scoring_method is None:
scoring_method = 'rmse'
self.scoring_method = scoring_method
if callable(scoring_method):
self.scoring_func = scoring_method
else:
self.scoring_func = scoring_name_function_map[scoring_method]
self.scoring_method = scoring_method
def get(self, prop_name, default=None):
try:
return getattr(self, prop_name)
except AttributeError:
return default
def score(self, estimator, X, y, took_log_of_y=False, advanced_scoring=False, verbose=2, name=None):
X, y = utils.drop_missing_y_vals(X, y, output_column=None)
if isinstance(estimator, GradientBoostingRegressor):
X = X.toarray()
predictions = estimator.predict(X)
if took_log_of_y:
for idx, val in enumerate(predictions):
predictions[idx] = math.exp(val)
try:
score = self.scoring_func(y, predictions)
except ValueError:
bad_val_indices = []
for idx, val in enumerate(y):
if str(val) in bad_vals_as_strings:
bad_val_indices.append(idx)
predictions = [val for idx, val in enumerate(predictions) if idx not in bad_val_indices]
y = [val for idx, val in enumerate(y) if idx not in bad_val_indices]
print('Found ' + str(len(bad_val_indices)) + ' null or infinity values in the y values. We will ignore these, and report the score on the rest of the dataset')
score = self.scoring_func(y, predictions)
if advanced_scoring == True:
if hasattr(estimator, 'name'):
print(estimator.name)
advanced_scoring_regressors(predictions, y, verbose=verbose, name=name)
return - 1 * score
class ClassificationScorer(object):
def __init__(self, scoring_method=None):
if scoring_method is None:
scoring_method = 'brier_score_loss'
self.scoring_method = scoring_method
if callable(scoring_method):
self.scoring_func = scoring_method
else:
self.scoring_func = scoring_name_function_map[scoring_method]
def get(self, prop_name, default=None):
try:
return getattr(self, prop_name)
except AttributeError:
return default
def clean_probas(self, probas):
print('Warning: We have found some values in the predicted probabilities that fall outside the range {0, 1}')
print('This is likely the result of a model being trained on too little data, or with a bad set of hyperparameters. If you get this warning while doing a hyperparameter search, for instance, you can probably safely ignore it')
print('We will cap those values at 0 or 1 for the purposes of scoring, but you should be careful to have similar safeguards in place in prod if you use this model')
if not isinstance(probas[0], list):
probas = [min(max(pred, 0), 1) for pred in probas]
return probas
else:
cleaned_probas = []
for proba_tuple in probas:
cleaned_tuple = []
for item in proba_tuple:
cleaned_tuple.append(max(min(item, 1), 0))
cleaned_probas.append(cleaned_tuple)
return cleaned_probas
def score(self, estimator, X, y, advanced_scoring=False):
X, y = utils.drop_missing_y_vals(X, y, output_column=None)
if isinstance(estimator, GradientBoostingClassifier):
X = X.toarray()
predictions = estimator.predict_proba(X)
if self.scoring_method == 'brier_score_loss':
# At the moment, Microsoft's LightGBM returns probabilities > 1 and < 0, which can break some scoring functions. So we have to take the max of 1 and the pred, and the min of 0 and the pred.
probas = [max(min(row[1], 1), 0) for row in predictions]
predictions = probas
try:
score = self.scoring_func(y, predictions)
except ValueError as e:
bad_val_indices = []
for idx, val in enumerate(y):
if str(val) in bad_vals_as_strings:
bad_val_indices.append(idx)
predictions = [val for idx, val in enumerate(predictions) if idx not in bad_val_indices]
y = [val for idx, val in enumerate(y) if idx not in bad_val_indices]
print('Found ' + str(len(bad_val_indices)) + ' null or infinity values in the y values. We will ignore these, and report the score on the rest of the dataset')
try:
score = self.scoring_func(y, predictions)
except ValueError:
# Sometimes, particularly for a badly fit model using either too little data, or a really bad set of hyperparameters during a grid search, we can predict probas that are > 1 or < 0. We'll cap those here, while warning the user about them, because they're unlikely to occur in a model that's properly trained with enough data and reasonable params
predictions = self.clean_probas(predictions)
score = self.scoring_func(y, predictions)
if advanced_scoring:
return (-1 * score, predictions)
else:
return -1 * score
| 13,058 | 3,941 |
import os
from setuptools import setup, find_packages
version = os.environ.get('VERSION')
if version is None:
with open(os.path.join('.', 'VERSION')) as version_file:
version = version_file.read().strip()
setup_options = {
'name': 'loopchain tools',
'description': 'CLI tools for loopchain',
'long_description': open('README.md').read(),
'long_description_content_type': 'text/markdown',
'url': 'https://github.com/icon-project/loopchain_tools',
'version': version,
'author': 'ICON foundation',
'author_email': 't_core@iconloop.com',
'packages': find_packages(),
'license': "Apache License 2.0",
'install_requires': list(open('requirements.txt')),
'classifiers': [
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only'
]
}
setup(**setup_options)
| 1,150 | 348 |
# -*- coding:utf-8 -*-
"""
@Author:Charles Van
@E-mail: williananjhon@hotmail.com
@Time:2019-08-13 15:52
@Project:InterView_Book
@Filename:stack_4.py
@description:
使用堆栈解决汉诺塔问题
"""
'''
题目描述:
有3根杆子,其中一根上有n快铁饼,铁饼由小到大依次从上往下排列,要求把杆1上的铁饼挪到杆2上,
杆3可以作为铁饼转移的中转站。当转移铁饼时,必须保证小铁饼只能放到大铁饼的上头,请给出移动步骤。
'''
class HanoiMove:
def __init__(self,stackNum,stackFrom,stackTo):
if stackNum <= 0 or stackFrom == stackTo or stackFrom < 0 or stackTo < 0:
raise RuntimeError("Invalid parameters")
self.stackFrom = stackFrom
self.stackTo = stackTo
self.hanoiMove = []
self.moveHanoiStack(self.stackFrom,self.stackTo,1,stackNum)
def printMoveSteps(self):
if len(self.hanoiMove) == 1:
print(self.hanoiMove.pop())
return
s = self.hanoiMove.pop()
self.printMoveSteps()
print(s)
def moveHanoiStack(self, stackFrom, stackTo, top, bottom):
s = "Moving ring " + str(bottom) + " from stack " + str(stackFrom) + " to " + str(stackTo)
if bottom - top == 0:
self.hanoiMove.append(s)
return
other = stackFrom
for i in range(1,4):
if i != stackFrom and i != stackTo:
other = i
break
self.moveHanoiStack(stackFrom,other,top,bottom - 1)
self.hanoiMove.append(s)
self.moveHanoiStack(other,stackTo,top,bottom - 1)
if __name__ == "__main__":
hm = HanoiMove(3,1,2)
hm.printMoveSteps() | 1,315 | 713 |
import functools
import itertools
import operator
import re
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
import toolz
from genopandas import plotting as gplot
from genopandas.util.pandas_ import DfWrapper
from .frame import GenomicDataFrame, GenomicSlice
RANGED_REGEX = r'(?P<chromosome>\w+):(?P<start>\d+)-(?P<end>\d+)'
POSITIONED_REGEX = r'(?P<chromosome>\w+):(?P<position>\d+)'
class AnnotatedMatrix(DfWrapper):
"""AnnotatedMatrix class.
Annotated matrix classes respresent 2D numeric feature-by-sample matrices
(with 'features' along the rows and samples along the columns), which can
be annotated with optional sample_data and feature_data frames that
describe the samples. The type of feature varies between different
sub-classes, examples being genes (for gene expression matrices) and
region-based bins (for copy-number data).
This (base) class mainly contains a variety of methods for querying,
subsetting and combining different annotation matrices. General plotting
methods are also provided (``plot_heatmap``).
Note that the class follows the feature-by-sample convention that is
typically followed in biological packages, rather than the sample-by-feature
orientation. This has the additional advantage of allowing more complex
indices (such as a region-based MultiIndex) for the features, which are
more difficult to use for DataFrame columns than for rows.
Attributes
----------
values : pd.DataFrame or AnnotatedMatrix
Matrix values.
sample_data : pd.DataFrame
DataFrame containing sample annotations, whose index corresponds
with the columns of the matrix.
feature_data : pd.DataFrame
DataFrame containing feature annotations, whose index corresponds
with the rows of the matrix.
"""
def __init__(self, values, sample_data=None, feature_data=None):
if isinstance(values, AnnotatedMatrix):
# Copy values from existing matrix (only copies sample/feature
# data if these are not given explictly).
sample_data = sample_data or values.sample_data
feature_data = feature_data or values.feature_data
values = values.values.copy()
else:
# Create empty annotations if none given.
if sample_data is None:
sample_data = pd.DataFrame({}, index=values.columns)
if feature_data is None:
feature_data = pd.DataFrame({}, index=values.index)
# Check {sample,feature}_data.
# assert (values.shape[1] == sample_data.shape[0]
# and all(values.columns == sample_data.index))
# assert (values.shape[0] == feature_data.shape[0]
# and all(values.index == feature_data.index))
# Check if all matrix columns are numeric.
for col_name, col_values in values.items():
if not is_numeric_dtype(col_values):
raise ValueError(
'Column {} is not numeric'.format(col_name))
super().__init__(values)
self._sample_data = sample_data.reindex(index=values.columns)
self._feature_data = feature_data.reindex(index=values.index)
def _constructor(self, values):
"""Constructor that attempts to build new instance
from given values."""
if isinstance(values, pd.DataFrame):
return self.__class__(
values.copy(),
sample_data=self._sample_data,
feature_data=self._feature_data)
return values
@property
def feature_data(self):
return self._feature_data
@feature_data.setter
def feature_data(self, value):
value = value.reindex(index=self._values.index)
self._feature_data = value
@property
def sample_data(self):
return self._sample_data
@sample_data.setter
def sample_data(self, value):
value = value.reindex(index=self._values.columns)
self._sample_data = value
@classmethod
def from_csv(cls,
file_path,
sample_data=None,
feature_data=None,
sample_mapping=None,
feature_mapping=None,
drop_cols=None,
read_data_kws=None,
**kwargs):
default_kwargs = {'index_col': 0}
kwargs = toolz.merge(default_kwargs, kwargs)
values = pd.read_csv(str(file_path), **kwargs)
# If sample/feature_data are not dataframes, assume they are
# file paths or objects and try to read from them.
read_data_kws_default = {
'sep': kwargs.pop('sep', None),
'index_col': 0
}
read_data_kws = toolz.merge(read_data_kws_default, read_data_kws or {})
if not (sample_data is None or isinstance(sample_data, pd.DataFrame)):
sample_data = pd.read_csv(sample_data, **read_data_kws)
if not (feature_data is None
or isinstance(feature_data, pd.DataFrame)):
feature_data = pd.read_csv(feature_data, **read_data_kws)
values = cls._preprocess_values(
values,
sample_data=sample_data,
feature_data=feature_data,
sample_mapping=sample_mapping,
feature_mapping=feature_mapping,
drop_cols=drop_cols)
return cls(values, sample_data=sample_data, feature_data=feature_data)
@classmethod
def _preprocess_values(cls,
values,
sample_data=None,
feature_data=None,
sample_mapping=None,
feature_mapping=None,
drop_cols=None):
"""Preprocesses matrix to match given sample/feature data."""
# Drop extra columns (if needed).
if drop_cols is not None:
values = values.drop(drop_cols, axis=1)
# Rename samples/features using mappings (if given).
if sample_mapping is not None or feature_mapping is not None:
values = values.rename(
columns=sample_mapping, index=feature_mapping)
# Reorder values to match annotations.
sample_order = None if sample_data is None else sample_data.index
feat_order = None if feature_data is None else feature_data.index
values = values.reindex(
columns=sample_order, index=feat_order, copy=False)
return values
def to_csv(self,
file_path,
sample_data_path=None,
feature_data_path=None,
**kwargs):
"""Writes matrix values to a csv file, using pandas' to_csv method."""
# Write matrix values.
self._values.to_csv(file_path, **kwargs)
# Write sample/feature data if paths given.
if sample_data_path is not None:
self._sample_data.to_csv(
sample_data_path, sep=kwargs.pop('sep', None), index=True)
if feature_data_path is not None:
self._feature_data.to_csv(
feature_data_path, sep=kwargs.pop('sep', None), index=True)
def rename(self, index=None, columns=None):
"""Rename samples/features in the matrix."""
renamed = self._values.rename(index=index, columns=columns)
if index is not None:
feature_data = self._feature_data.rename(index=index)
else:
feature_data = self._feature_data
if columns is not None:
sample_data = self._sample_data.rename(index=columns)
else:
sample_data = self._sample_data
return self.__class__(
renamed, feature_data=feature_data, sample_data=sample_data)
def melt(self,
with_sample_data=False,
with_feature_data=False,
value_name='value'):
"""Melts values into 'tidy' format, optionally including annotation."""
feat_col = self._feature_data.index.name or 'feature'
sample_col = self._sample_data.index.name or 'sample'
values_long = pd.melt(
self._values.rename_axis(feat_col).reset_index(),
id_vars=feat_col,
var_name=sample_col,
value_name=value_name)
if with_sample_data and self._sample_data.shape[1] > 0:
sample_data = (self._sample_data.rename_axis(sample_col)
.reset_index())
values_long = pd.merge(
values_long, sample_data, how='left', on=sample_col)
if with_feature_data and self._feature_data.shape[1] > 0:
feature_data = (self._feature_data.rename_axis(feat_col)
.reset_index())
# Merge with annotation.
values_long = pd.merge(
values_long, feature_data, how='left', on=feat_col)
return values_long
def query_samples(self, expr):
"""Subsets samples in matrix by querying sample_data with expression.
Similar to the pandas ``query`` method, this method queries the sample
data of the matrix with the given boolean expression. Any samples for
which the expression evaluates to True are returned in the resulting
AnnotatedMatrix.
Parameters
----------
expr : str
The query string to evaluate. You can refer to variables in the
environment by prefixing them with an ‘@’ character like @a + b.
Returns
-------
AnnotatedMatrix
Subsetted matrix, containing only the samples for which ``expr``
evaluates to True.
"""
sample_data = self._sample_data.query(expr)
values = self._values.reindex(columns=sample_data.index)
return self.__class__(
values, sample_data=sample_data, feature_data=self._feature_data)
def dropna_samples(self, subset=None, how='any', thresh=None):
"""Drops samples with NAs in sample_data."""
sample_data = self._sample_data.dropna(
subset=subset, how=how, thresh=thresh)
values = self._values.reindex(columns=sample_data.index)
return self.__class__(
values, sample_data=sample_data, feature_data=self._feature_data)
def __eq__(self, other):
if not isinstance(other, AnnotatedMatrix):
return False
return all(self.values == other.values) and \
all(self.sample_data == other.sample_data) and \
all(self.feature_data == other.feature_data)
def plot_heatmap(
self,
cmap='RdBu_r',
sample_cols=None,
sample_colors=None,
feature_cols=None,
feature_colors=None,
metric='euclidean',
method='complete',
transpose=False,
# legend_kws=None,
**kwargs):
"""Plots clustered heatmap of matrix values."""
import matplotlib.pyplot as plt
import seaborn as sns
if sample_cols is not None:
sample_annot, _ = gplot.color_annotation(
self._sample_data[sample_cols], colors=sample_colors)
else:
sample_annot, _ = None, None
if feature_cols is not None:
feature_annot, _ = gplot.color_annotation(
self._feature_data[feature_cols], colors=feature_colors)
else:
feature_annot, _ = None, None
clustermap_kws = dict(kwargs)
if transpose:
values = self._values.T
clustermap_kws['row_colors'] = sample_annot
clustermap_kws['col_colors'] = feature_annot
xlabel, ylabel = 'Features', 'Samples'
else:
values = self._values
clustermap_kws['col_colors'] = sample_annot
clustermap_kws['row_colors'] = feature_annot
xlabel, ylabel = 'Samples', 'Features'
cm = sns.clustermap(
values, cmap=cmap, metric=metric, method=method, **clustermap_kws)
plt.setp(cm.ax_heatmap.get_yticklabels(), rotation=0)
cm.ax_heatmap.set_xlabel(xlabel)
cm.ax_heatmap.set_ylabel(ylabel)
# TODO: handle legend drawing.
#if annot_cmap is not None:
# draw_legends(cm, annot_cmap, **(legend_kws or {}))
return cm
def pca(self,
n_components=None,
axis='columns',
transform=False,
with_annotation=False):
"""Performs PCA on matrix."""
try:
from sklearn.decomposition import PCA
except ImportError:
raise ImportError('Scikit-learn must be installed to '
'perform PCA analyses')
# Fit PCA and transform expression.
pca = PCA(n_components=n_components)
if axis in {1, 'columns', 'samples'}:
values = self._values.T
annotation = self._sample_data
elif axis in {0, 'index', 'features'}:
values = self._values
annotation = self._feature_data
else:
raise ValueError('Unknown value for axis')
pca.fit(values.values)
if transform:
transformed = pca.transform(values.values)
n_components = transformed.shape[1]
transformed = pd.DataFrame(
transformed,
columns=['pca_{}'.format(i + 1) for i in range(n_components)],
index=values.index)
if with_annotation:
transformed = pd.concat([transformed, annotation], axis=1)
return pca, transformed
else:
return pca
def plot_pca(self, components=(1, 2), axis='columns', ax=None, **kwargs):
"""Plots PCA of samples."""
pca, transformed = self.pca(
n_components=max(components),
axis=axis,
transform=True,
with_annotation=True)
# Draw using lmplot.
pca_x, pca_y = ['pca_{}'.format(c) for c in components]
ax = gplot.scatter_plot(
data=transformed, x=pca_x, y=pca_y, ax=ax, **kwargs)
var = pca.explained_variance_ratio_[components[0] - 1] * 100
ax.set_xlabel('Component {} ({:3.1f}%)'.format(components[0], var))
var = pca.explained_variance_ratio_[components[1] - 1] * 100
ax.set_ylabel('Component {} ({:3.1f}%)'.format(components[1], var))
return ax
def plot_pca_variance(self, n_components=None, axis='columns', ax=None):
"""Plots variance explained by PCA components."""
import matplotlib.pyplot as plt
import seaborn as sns
pca = self.pca(n_components=n_components, axis=axis, transform=False)
if ax is None:
_, ax = plt.subplots()
x = np.arange(pca.n_components_) + 1
y = pca.explained_variance_ratio_
ax.plot(x[:len(y)], y)
ax.set_xlabel('Component')
ax.set_ylabel('Explained variance')
sns.despine(ax=ax)
return ax
def plot_feature(self, feature, group=None, kind='box', ax=None, **kwargs):
"""Plots distribution of expression for given feature."""
import seaborn as sns
if group is not None and self._sample_data.shape[1] == 0:
raise ValueError('Grouping not possible without sample data')
# Determine plot type.
plot_funcs = {
'box': sns.boxplot,
'swarm': sns.swarmplot,
'violin': sns.violinplot
}
try:
plot_func = plot_funcs[kind]
except KeyError:
raise ValueError('Unknown plot type {!r}'.format(kind))
# Assemble plot data (sample_data + expression values).
values = self._values.loc[feature].to_frame(name='value')
plot_data = pd.concat([values, self._sample_data], axis=1)
# Plot expression.
ax = plot_func(data=plot_data, x=group, y='value', ax=ax, **kwargs)
ax.set_title(feature)
ax.set_ylabel('Value')
return ax
@classmethod
def concat(cls, matrices, axis):
"""Concatenates matrices along given axis."""
# Collect value/sample/feature data.
tuples = ((mat.values, mat.sample_data, mat.feature_data)
for mat in matrices)
value_list, sample_list, feat_list = zip(*tuples)
# Merge values.
values = pd.concat(value_list, axis=axis)
# Merge sample/feature data.
if axis == 'index' or axis == 0:
sample_data = pd.concat(sample_list, axis='columns')
feature_data = pd.concat(feat_list, axis='index')
elif axis == 'columns' or axis == 1:
sample_data = pd.concat(sample_list, axis='index')
feature_data = pd.concat(feat_list, axis='columns')
else:
raise ValueError('Unknown value for axis')
return cls(values, sample_data=sample_data, feature_data=feature_data)
def drop_duplicate_indices(self, axis='index', keep='first'):
"""Drops duplicate indices along given axis."""
if axis == 'index':
mask = ~self._values.index.duplicated(keep=keep)
values = self._values.loc[mask]
sample_data = self._sample_data
feature_data = self._feature_data.loc[mask]
elif axis == 'columns':
mask = ~self._values.columns.duplicated(keep=keep)
values = self._values.loc[:, mask]
sample_data = self._sample_data.loc[mask]
feature_data = self._feature_data
else:
raise ValueError('Unknown value for axis')
return self.__class__(
values.copy(), sample_data=sample_data, feature_data=feature_data)
class GenomicMatrix(AnnotatedMatrix):
"""Class respresenting matrices indexed by genomic positions."""
def __init__(self, values, sample_data=None, feature_data=None):
if not isinstance(values, GenomicDataFrame):
values = GenomicDataFrame(values)
super().__init__(
values, sample_data=sample_data, feature_data=feature_data)
@classmethod
def from_df(cls, values, chrom_lengths=None, **kwargs):
"""Constructs a genomic matrix from the given DataFrame."""
if not isinstance(values, GenomicDataFrame):
values = GenomicDataFrame.from_df(
values, chrom_lengths=chrom_lengths)
return cls(values, **kwargs)
@classmethod
def from_csv(cls,
file_path,
index_col,
sample_data=None,
feature_data=None,
sample_mapping=None,
feature_mapping=None,
drop_cols=None,
chrom_lengths=None,
read_data_kws=None,
**kwargs):
"""Reads values from a csv file."""
if not 2 <= len(index_col) <= 3:
raise ValueError('index_col should contain 2 entries'
' (for positioned data or 3 entries'
' (for ranged data)')
default_dtype = {index_col[0]: str}
dtype = toolz.merge(default_dtype, kwargs.pop('dtype', {}))
values = pd.read_csv(file_path, dtype=dtype, **kwargs)
values = values.set_index(index_col)
# If sample/feature_data are not dataframes, assume they are
# file paths or objects and try to read from them.
read_data_kws_default = {
'sep': kwargs.pop('sep', None),
'index_col': 0
}
read_data_kws = toolz.merge(read_data_kws_default, read_data_kws or {})
if not (sample_data is None or isinstance(sample_data, pd.DataFrame)):
sample_data = pd.read_csv(sample_data, **read_data_kws)
if not (feature_data is None
or isinstance(feature_data, pd.DataFrame)):
feature_data = pd.read_csv(feature_data, **read_data_kws)
values = cls._preprocess_values(
values,
sample_data=sample_data,
feature_data=feature_data,
sample_mapping=sample_mapping,
feature_mapping=feature_mapping,
drop_cols=drop_cols)
return cls.from_df(
values,
sample_data=sample_data,
feature_data=feature_data,
chrom_lengths=chrom_lengths)
@classmethod
def from_csv_condensed(cls,
file_path,
index_col=0,
sample_data=None,
feature_data=None,
sample_mapping=None,
feature_mapping=None,
drop_cols=None,
chrom_lengths=None,
index_regex=RANGED_REGEX,
is_one_based=False,
is_inclusive=False,
read_data_kws=None,
**kwargs):
"""Reads values from a csv file with a condensed index."""
values = pd.read_csv(file_path, index_col=index_col, **kwargs)
values.index = cls._expand_condensed_index(
values.index,
index_regex,
is_one_based=is_one_based,
is_inclusive=is_inclusive)
# If sample/feature_data are not dataframes, assume they are
# file paths or objects and try to read from them.
read_data_kws_default = {
'sep': kwargs.pop('sep', None),
'index_col': 0
}
read_data_kws = toolz.merge(read_data_kws_default, read_data_kws or {})
if not (sample_data is None or isinstance(sample_data, pd.DataFrame)):
sample_data = pd.read_csv(sample_data, **read_data_kws)
if not (feature_data is None
or isinstance(feature_data, pd.DataFrame)):
feature_data = pd.read_csv(feature_data, **read_data_kws)
values = cls._preprocess_values(
values,
sample_data=sample_data,
feature_data=feature_data,
sample_mapping=sample_mapping,
feature_mapping=feature_mapping,
drop_cols=drop_cols)
return cls.from_df(
values,
sample_data=sample_data,
feature_data=feature_data,
chrom_lengths=chrom_lengths)
@classmethod
def _expand_condensed_index(cls,
index,
regex_expr,
is_one_based=False,
is_inclusive=False):
"""Expands condensed index into a MultiIndex."""
# Parse entries.
regex = re.compile(regex_expr)
group_dicts = (regex.match(el).groupdict() for el in index)
# Extract chromosome, start, end positions.
if regex.groups == 3:
tups = ((grp['chromosome'], int(grp['start']), int(grp['end']))
for grp in group_dicts)
chrom, starts, ends = zip(*tups)
elif regex.groups == 2:
tups = ((grp['chromosome'], int(grp['position']))
for grp in group_dicts)
chrom, starts = zip(*tups)
ends = None
else:
raise ValueError('Regex should have two or three groups '
'(for positioned/ranged data, respectively)')
# Correct for one-base and inclusive-ness to match Python conventions.
starts = np.array(starts)
if is_one_based:
starts -= 1
if ends is not None and is_inclusive:
ends = np.array(ends)
ends += 1
# Build index.
if ends is None:
index = pd.MultiIndex.from_arrays(
[chrom, starts], names=['chromosome', 'position'])
else:
index = pd.MultiIndex.from_arrays(
[chrom, starts, ends], names=['chromosome', 'start', 'end'])
return index
@property
def gloc(self):
"""Genomic-position indexer.
Used to select rows from the matrix by their genomic position.
Interface is the same as for the GenomicDataFrame gloc property
(which this method delegates to).
"""
return GLocWrapper(self._values.gloc, self._gloc_constructor)
def _gloc_constructor(self, values):
"""Constructor that attempts to build new instance
from given values."""
if isinstance(values, GenomicDataFrame):
sample_data = self._sample_data.reindex(index=values.columns)
feature_data = self._feature_data.reindex(index=values.index)
return self.__class__(
values.copy(),
sample_data=sample_data,
feature_data=feature_data)
return values
def expand(self):
"""Expands matrix to include values from missing bins.
Assumes rows are regularly spaced with a fixed bin size.
"""
expanded = self._expand(self._values)
feature_data = self._feature_data.reindex(index=expanded.index)
return self.__class__(
expanded, sample_data=self._sample_data, feature_data=feature_data)
@staticmethod
def _expand(values):
def _bin_indices(grp, bin_size):
chrom = grp.index[0][0]
start = grp.index.get_level_values(1).min()
end = grp.index.get_level_values(2).max()
bins = np.arange(start, end + 1, step=bin_size)
return zip(itertools.cycle([chrom]), bins[:-1], bins[1:])
bin_size = values.index[0][2] - values.index[0][1]
# TODO: Warn if bin_size is 1? (Probably positioned data).
# Check inferred bin size.
starts = values.index.get_level_values(1)
ends = values.index.get_level_values(2)
diffs = ends - starts
if not all(diffs == bin_size):
raise ValueError('Bins do not match inferred bin size')
# Check if following bins match inferred bin size.
if not all(np.mod(np.diff(starts), bin_size) == 0):
raise ValueError('Following bins do not match inferred bin size')
indices = list(
itertools.chain.from_iterable(
_bin_indices(grp, bin_size=bin_size)
for _, grp in values.groupby(level=0)))
return values.reindex(index=indices)
def impute(self, window=11, min_probes=5, expand=True):
"""Imputes nan values from neighboring bins."""
if expand:
values = self._expand(self._values)
else:
values = self._values
# Calculate median value within window (allowing for
# window - min_probes number of NAs within the window).
rolling = values.rolling(
window=window, min_periods=min_probes, center=True)
avg_values = rolling.median()
# Copy over values for null rows for the imputation.
imputed = values.copy()
mask = imputed.isnull().all(axis=1)
imputed.loc[mask] = avg_values.loc[mask]
# Match feature data to new values.
feature_data = self._feature_data.reindex(index=imputed.index)
return self.__class__(
imputed, sample_data=self._sample_data, feature_data=feature_data)
def resample(self, bin_size, start=None, agg='mean'):
"""Resamples values at given interval by binning."""
# Perform resampling per chromosome.
resampled = pd.concat(
(self._resample_chromosome(
grp, bin_size=bin_size, agg=agg, start=start)
for _, grp in self._values.groupby(level=0)),
axis=0) # yapf: disable
# Restore original index order.
resampled = resampled.reindex(self._values.gloc.chromosomes, level=0)
return self.__class__(
GenomicDataFrame(
resampled, chrom_lengths=self._values.chromosome_lengths),
sample_data=self._sample_data)
@staticmethod
def _resample_chromosome(values, bin_size, start=None, agg='mean'):
# Bin rows by their centre positions.
starts = values.index.get_level_values(1)
ends = values.index.get_level_values(2)
positions = (starts + ends) // 2
range_start = starts.min() if start is None else start
range_end = ends.max() + bin_size
bins = np.arange(range_start, range_end, bin_size)
if len(bins) < 2:
raise ValueError('No bins in range ({}, {}) with bin_size {}'.
format(range_start, ends.max(), bin_size))
binned = pd.cut(positions, bins=bins)
# Resample.
resampled = values.groupby(binned).agg(agg)
resampled.index = pd.MultiIndex.from_arrays(
[[values.index[0][0]] * (len(bins) - 1), bins[:-1], bins[1:]],
names=values.index.names)
return resampled
def rename_chromosomes(self, mapping):
"""Returns copy of matrix with renamed chromosomes."""
return self.__class__(
values=self._values.rename_chromosomes(mapping),
sample_data=self.sample_data,
feature_data=self.feature_data)
def annotate(self, features, feature_id='gene_id'):
"""Annotates values for given features."""
# Calculate calls.
get_id = operator.attrgetter(feature_id)
annotated_calls = {}
for feature in features.itertuples():
try:
chrom, start, end = feature.Index
overlap = self._values.gloc.search(chrom, start, end)
annotated_calls[get_id(feature)] = overlap.median()
except KeyError:
pass
# Assemble into dataframe.
annotated = pd.DataFrame.from_records(annotated_calls).T
annotated.index.name = feature_id
return AnnotatedMatrix(annotated, sample_data=self._sample_data)
def plot_sample(self, sample, ax=None, **kwargs):
"""Plots values for given sample along genomic axis."""
ax = gplot.genomic_scatter_plot(
self._values, y=sample, ax=ax, **kwargs)
return ax
def plot_heatmap(self,
cmap='RdBu_r',
sample_cols=None,
sample_colors=None,
metric='euclidean',
method='complete',
transpose=True,
cluster=True,
**kwargs):
"""Plots heatmap of gene expression over samples."""
if 'row_cluster' in kwargs or 'col_cluster' in kwargs:
raise ValueError(
'GenomicMatrices only supports clustering by samples. '
'Use the \'cluster\' argument to specify whether '
'clustering should be performed.')
if cluster:
from scipy.spatial.distance import pdist
from scipy.cluster.hierarchy import linkage
# Do clustering on matrix with only finite values.
values_clust = self._values.replace([np.inf, -np.inf], np.nan)
values_clust = values_clust.dropna()
dist = pdist(values_clust.T, metric=metric)
sample_linkage = linkage(dist, method=method)
else:
sample_linkage = None
# Draw heatmap.
heatmap_kws = dict(kwargs)
if transpose:
heatmap_kws.update({
'row_cluster': sample_linkage is not None,
'row_linkage': sample_linkage,
'col_cluster': False
})
else:
heatmap_kws.update({
'col_cluster': sample_linkage is not None,
'col_linkage': sample_linkage,
'row_cluster': False
})
cm = super().plot_heatmap(
sample_cols=sample_cols,
sample_colors=sample_colors,
cmap=cmap,
metric=metric,
method=method,
transpose=transpose,
**heatmap_kws)
self._style_heatmap(cm, transpose=transpose)
return cm
def _style_heatmap(self, cm, transpose):
chrom_breaks = self._values.groupby(level=0).size().cumsum()
chrom_labels = self._values.gloc.chromosomes
chrom_label_pos = np.concatenate([[0], chrom_breaks])
chrom_label_pos = (chrom_label_pos[:-1] + chrom_label_pos[1:]) / 2
if transpose:
cm.ax_heatmap.set_xticks([])
for loc in chrom_breaks[:-1]:
cm.ax_heatmap.axvline(loc, color='grey', lw=1)
cm.ax_heatmap.set_xticks(chrom_label_pos)
cm.ax_heatmap.set_xticklabels(chrom_labels, rotation=0)
cm.ax_heatmap.set_xlabel('Genomic position')
cm.ax_heatmap.set_ylabel('Samples')
else:
cm.ax_heatmap.set_yticks([])
for loc in chrom_breaks[:-1]:
cm.ax_heatmap.axhline(loc, color='grey', lw=1)
cm.ax_heatmap.set_yticks(chrom_label_pos)
cm.ax_heatmap.set_yticklabels(chrom_labels, rotation=0)
cm.ax_heatmap.set_xlabel('Samples')
cm.ax_heatmap.set_ylabel('Genomic position')
return cm
class GLocWrapper(object):
"""Wrapper class that wraps gloc indexer from given object."""
def __init__(self, gloc, constructor):
self._gloc = gloc
self._constructor = constructor
def __getattr__(self, name):
attr = getattr(self._gloc, name)
if callable(attr):
return self._wrap_function(attr)
return attr
def __getitem__(self, item):
result = self._gloc[item]
if isinstance(result, GenomicSlice):
result = GLocSliceWrapper(
self._gloc, chromosome=item, constructor=self._constructor)
else:
result = self._constructor(result)
return result
def _wrap_function(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Wrapper that calls _constructor on returned result."""
result = func(*args, **kwargs)
return self._constructor(result)
return wrapper
class GLocSliceWrapper(object):
"""Wrapper class that wraps slice from gloc indexer on given object."""
def __init__(self, gloc, chromosome, constructor):
self._gloc = gloc
self._chromosome = chromosome
self._constructor = constructor
def __getitem__(self, item):
result = self._gloc[self._chromosome][item]
return self._constructor(result)
| 34,911 | 9,965 |
#-------------------------------------------------------------------------------
#
# Python GUI - Scrollable objects mixin - Generic
#
#-------------------------------------------------------------------------------
from GUI.Properties import overridable_property
class ScrollableBase(object):
"""Mixin for components that can be configured to have scroll bars."""
scrolling = overridable_property('scrolling',
"String containing 'h' for horizontal and 'v' for vertical scrolling.")
hscrolling = overridable_property('hscrolling',
"True if horizontal scrolling is enabled.")
vscrolling = overridable_property('vscrolling',
"True if vertical scrolling is enabled.")
def get_scrolling(self):
chars = []
if self.hscrolling:
chars.append('h')
if self.vscrolling:
chars.append('v')
return ''.join(chars)
def set_scrolling(self, value):
self.hscrolling = 'h' in value
self.vscrolling = 'v' in value
| 936 | 279 |
from typing import List
from trajectory.trajectory_node import TrajectoryNode
class Trajectory(object):
def __init__(self):
self._trajectory_nodes = [] # type: List[TrajectoryNode]
def trajectory_nodes(self) -> List[TrajectoryNode]:
return self._trajectory_nodes
def load(self, file_path: str):
pass
| 345 | 112 |
import argparse
import better_exceptions
import sys
import time
from pathlib import Path
import zipfile
import bz2
import urllib.request
import dlib
import cv2
zip_names = ["train_1.zip", "train_2.zip", "train_gt.zip", "valid.zip", "valid_gt.zip"]
urls = ["http://***/train_1.zip",
"http://***/train_2.zip",
"http://***/train_gt.zip",
"http://***/valid.zip",
"http://***/valid_gt.zip"]
gt_pwd = b"***"
dataset_root = Path(__file__).resolve().parent.joinpath("dataset")
model_root = Path(__file__).resolve().parent.joinpath("model")
train_image_dir = dataset_root.joinpath("train_images")
validation_image_dir = dataset_root.joinpath("validation_images")
train_crop_dir = dataset_root.joinpath("train_crop")
validation_crop_dir = dataset_root.joinpath("validation_crop")
def get_args():
parser = argparse.ArgumentParser(description="This script downloads the LAP dataset "
"and preprocess for training and evaluation",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers(help="subcommands", dest="subcommand")
subparsers.add_parser("download", help="Downdload the LAP dataset")
subparsers.add_parser("extract", help="Unzip the LAP dataset")
subparsers.add_parser("crop", help="Crop face regions using dlib")
args = parser.parse_args()
return parser, args
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = int(time.time() - start_time)
current_size = count * block_size
remaining_size = total_size - current_size
speed = int(current_size / (1024 * duration + 1))
percent = min(int(count * block_size * 100 / total_size), 100)
remaining_time = int(duration * (remaining_size / current_size))
sys.stdout.write("\r{}%, {:6.2f}/{:6.2f}MB, {}KB/s, passed: {}s, remaining: {}s".format(
percent, current_size / (1024 * 1024), total_size / (1024 * 1024), speed, duration, remaining_time))
sys.stdout.flush()
def download():
dataset_root.mkdir(parents=True, exist_ok=True) # requires Python 3.5 or above
for zip_name, url in zip(zip_names, urls):
print("downloading {}".format(zip_name))
local_path = dataset_root.joinpath(zip_name)
urllib.request.urlretrieve(url, str(local_path), reporthook)
def crop():
detector_model_path = model_root.joinpath("mmod_human_face_detector.dat")
if not detector_model_path.is_file():
model_root.mkdir(parents=True, exist_ok=True) # requires Python 3.5 or above
detector_model_url = "http://dlib.net/files/mmod_human_face_detector.dat.bz2"
detector_model_bz2 = str(detector_model_path) + ".bz2"
print("downloading {}".format(detector_model_path.name))
urllib.request.urlretrieve(detector_model_url, detector_model_bz2, reporthook)
with open(detector_model_bz2, "rb") as source, open(str(detector_model_path), "wb") as dest:
dest.write(bz2.decompress(source.read()))
detector = dlib.cnn_face_detection_model_v1(str(detector_model_path))
for image_dir, crop_dir in [[train_image_dir, train_crop_dir], [validation_image_dir, validation_crop_dir]]:
for image_path in image_dir.glob("*.jpg"):
frame = cv2.imread(str(image_path))
img_h, img_w, _ = frame.shape
factor = 800 / max(img_h, img_w)
frame_resized = cv2.resize(frame, None, fx=factor, fy=factor)
frame_rgb = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2RGB)
dets = detector(frame_rgb, 1)
if len(dets) != 1:
print("{} faces were detected for {}".format(len(dets), image_path.name))
rects = [[d.rect.left(), d.rect.right(), d.rect.top(), d.rect.bottom()] for d in dets]
print(rects)
def extract():
for zip_name in zip_names:
zip_path = dataset_root.joinpath(zip_name)
password = None
if not zip_path.is_file():
raise RuntimeError("{} was not found. Please download the LAP dataset.".format(zip_name))
with zipfile.ZipFile(str(zip_path), "r") as f:
if zip_name in ["train_1.zip", "train_2.zip"]:
extract_path = train_image_dir
elif zip_name == "valid.zip":
extract_path = validation_image_dir
else:
extract_path = dataset_root
if zip_name == "valid_gt.zip":
password = gt_pwd
extract_path.mkdir(parents=True, exist_ok=True) # requires Python 3.5 or above
f.extractall(path=str(extract_path), pwd=password)
def main():
parser, args = get_args()
if args.subcommand == "download":
download()
elif args.subcommand == "extract":
extract()
elif args.subcommand == "crop":
crop()
else:
parser.print_help()
if __name__ == '__main__':
main()
| 5,047 | 1,679 |
import FWCore.ParameterSet.Config as cms
CTPPSSimHits = cms.EDProducer('CTPPSSimHitProducer',
MCEvent = cms.untracked.InputTag("LHCTransport"),
Z_Tracker1 = cms.double(203.827),# first tracker z position in m
Z_Tracker2 = cms.double(212.550),# second tracker z position in m
Z_Timing = cms.double(215.700) # timing detector z position in m
)
| 364 | 154 |
from schema_tools import yaml
from schema_tools.schema import build
from schema_tools.schema.json import IntegerSchema, StringSchema, TupleItem
def test_allow_for_empty_properties():
# empty properties shouldn't fail building object schema from AST
ast = yaml.loads("""
something:
type: object
properties:
""")
schema = build(ast)
assert len(schema.something.properties) == 0
def test_allow_for_no_items():
# empty properties shouldn't fail building object schema from AST
ast = yaml.loads("""
something:
type: array
""")
schema = build(ast)
assert isinstance(schema.something.items, list)
assert len(schema.something.items) == 0
def test_allow_for_tuple_items():
# tuple properties shouldn't fail building object schema from AST
ast = yaml.loads("""
something:
type: array
items:
- type: integer
- type: string
""")
schema = build(ast)
assert isinstance(schema.something.items, list)
assert len(schema.something.items) == 2
assert isinstance(schema.something.items[0], TupleItem)
assert isinstance(schema.something.items[0].definition, IntegerSchema)
assert isinstance(schema.something.items[1].definition, StringSchema)
| 1,222 | 363 |
#!/usr/bin/python
#
# cmake_docutil.py
#
import pyhhi.build.app.cmkdocapp
import pyhhi.build.common.util
app = pyhhi.build.app.cmkdocapp.CMakeDocUtilApp()
pyhhi.build.common.util.exec_main_default_try(app)
| 209 | 92 |
from datetime import datetime
class mentions_self:
nom = 'я'; gen = ['меня', 'себя']; dat = ['мне', 'себе']
acc = ['меня', 'себя']; ins = ['мной', 'собой']; abl = ['мне','себе']
class mentions_unknown:
all = 'всех'
him = 'его'; her = 'её'; it = 'это'
they = 'их'; them = 'их'; us = 'нас'
name_cases = ['nom', 'gen', 'dat', 'acc', 'ins', 'abl']
everyone = ['@everyone', '@all', '@все']
def getDate(time = datetime.now()) -> str:
return f'{"%02d" % time.day}.{"%02d" % time.month}.{time.year}'
def getTime(time = datetime.now()) -> str:
return f'{"%02d" % time.hour}:{"%02d" % time.minute}:{"%02d" % time.second}.{time.microsecond}'
def getDateTime(time = datetime.now()) -> str:
return getDate(time) + ' ' + getTime(time)
def ischecktype(checklist, checktype) -> bool:
for i in checklist:
if isinstance(checktype, list) and type(i) in checktype:
return True
elif isinstance(checktype, type) and isinstance(i, checktype):
return True
return False
| 1,068 | 391 |
"""
DRS De-Registration device resource package.
Copyright (c) 2018-2020 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import json
from flask import Response, request
from flask_restful import Resource
from marshmallow import ValidationError
from flask_babel import lazy_gettext as _
from app import app, db
from app.api.v1.helpers.error_handlers import DEREG_NOT_FOUND_MSG
from app.api.v1.helpers.response import MIME_TYPES, CODES
from app.api.v1.helpers.utilities import Utilities
from app.api.v1.models.deregdetails import DeRegDetails
from app.api.v1.models.deregdevice import DeRegDevice
from app.api.v1.models.status import Status
from app.api.v1.schema.deregdevice import DeRegRequestSchema, DeRegDeviceSchema, DeRegRequestUpdateSchema
from app.api.v1.models.eslog import EsLog
class DeRegDeviceRoutes(Resource):
"""Class for handling De Registration Device routes."""
@staticmethod
def get(dereg_id):
"""GET method handler, returns device of a request."""
if not dereg_id.isdigit() or not DeRegDetails.exists(dereg_id):
return Response(app.json_encoder.encode(DEREG_NOT_FOUND_MSG), status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
try:
schema = DeRegDeviceSchema()
dereg_devices = DeRegDevice.get_devices_by_dereg_id(dereg_id)
response = schema.dump(dereg_devices, many=True).data
return Response(json.dumps(response), status=CODES.get("OK"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
except Exception as e: # pragma: no cover
app.logger.exception(e)
error = {
'message': [_('Failed to retrieve response, please try later')]
}
return Response(app.json_encoder.encode(error), status=CODES.get('INTERNAL_SERVER_ERROR'),
mimetype=MIME_TYPES.get('APPLICATION_JSON'))
finally:
db.session.close()
@staticmethod
def post():
"""POST method handler, creates new devices for request."""
dereg_id = request.form.to_dict().get('dereg_id', None)
if not dereg_id or not dereg_id.isdigit() or not DeRegDetails.exists(dereg_id):
return Response(app.json_encoder.encode(DEREG_NOT_FOUND_MSG), status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
try:
schema_request = DeRegRequestSchema()
device_schema = DeRegDeviceSchema()
dereg = DeRegDetails.get_by_id(dereg_id)
args = request.form.to_dict()
args = DeRegDevice.curate_args(args, dereg)
validation_errors = schema_request.validate(args)
if validation_errors:
return Response(app.json_encoder.encode(validation_errors),
status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
imei_tac_map = Utilities.extract_imeis_tac_map(args, dereg)
imeis_list = Utilities.extract_imeis(imei_tac_map)
not_registered_imeis = Utilities.get_not_registered_imeis(imeis_list)
if not_registered_imeis:
error = {
'not_registered_imeis': not_registered_imeis
}
return Response(json.dumps(error),
status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
else:
old_devices = list(map(lambda x: x.id, dereg.devices))
created = DeRegDevice.bulk_create(args, dereg)
device_id_tac_map = Utilities.get_id_tac_map(created)
devices = device_schema.dump(created, many=True)
dereg_status = 'Pending Review' if app.config['AUTOMATE_IMEI_CHECK'] else 'Awaiting Documents'
dereg.update_status(dereg_status)
db.session.commit()
log = EsLog.new_device_serialize(devices.data, 'Device Deregistration Request', regdetails=dereg,
imeis=imeis_list, reg_status=dereg_status, method='Post', dereg=True)
EsLog.insert_log(log)
DeRegDevice.bulk_insert_imeis(device_id_tac_map, imei_tac_map, old_devices, imeis_list, dereg)
response = {'devices': devices.data, 'dreg_id': dereg.id}
return Response(json.dumps(response), status=CODES.get("OK"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
except Exception as e: # pragma: no cover
app.logger.exception(e)
error = {
'message': [_('Failed to retrieve response, please try later')]
}
return Response(app.json_encoder.encode(error), status=CODES.get('INTERNAL_SERVER_ERROR'),
mimetype=MIME_TYPES.get('APPLICATION_JSON'))
finally:
db.session.close()
@staticmethod
def put():
"""PUT method handler, updates devices of the request."""
dereg_id = request.form.to_dict().get('dereg_id', None)
if not dereg_id or not dereg_id.isdigit() or not DeRegDetails.exists(dereg_id):
return Response(app.json_encoder.encode(DEREG_NOT_FOUND_MSG), status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
try:
schema_request = DeRegRequestUpdateSchema()
device_schema = DeRegDeviceSchema()
dereg = DeRegDetails.get_by_id(dereg_id)
args = request.form.to_dict()
args = DeRegDevice.curate_args(args, dereg)
validation_errors = schema_request.validate(args)
if validation_errors:
return Response(app.json_encoder.encode(validation_errors),
status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
imei_tac_map = Utilities.extract_imeis_tac_map(args, dereg)
imeis_list = Utilities.extract_imeis(imei_tac_map)
not_registered_imeis = Utilities.get_not_registered_imeis(imeis_list)
if not_registered_imeis:
error = {
'not_registered_imeis': not_registered_imeis
}
return Response(json.dumps(error),
status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
else:
processing_failed = dereg.processing_status in [Status.get_status_id('Failed'),
Status.get_status_id('New Request'),
Status.get_status_id('Pending Review')]
report_failed = dereg.report_status == Status.get_status_id('Failed')
processing_required = processing_failed or report_failed
if processing_required:
old_devices = list(map(lambda x: x.id, dereg.devices))
created = DeRegDevice.bulk_create(args, dereg)
device_id_tac_map = Utilities.get_id_tac_map(created)
devices = device_schema.dump(created, many=True)
status = Status.get_status_type(dereg.status)
db.session.commit()
log = EsLog.new_device_serialize(devices.data, 'Update Device Deregistration Request',
regdetails=dereg,
imeis=imeis_list, method='Put',
dereg=True, reg_status=status)
EsLog.insert_log(log)
DeRegDevice.bulk_insert_imeis(device_id_tac_map, imei_tac_map, old_devices, imeis_list, dereg)
response = {'devices': devices.data, 'dreg_id': dereg.id}
else:
response = {'devices': [], 'dreg_id': dereg.id}
return Response(json.dumps(response), status=CODES.get("OK"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
except Exception as e: # pragma: no cover
app.logger.exception(e)
error = {
'message': [_('Failed to retrieve response, please try later')]
}
return Response(app.json_encoder.encode(error), status=CODES.get('INTERNAL_SERVER_ERROR'),
mimetype=MIME_TYPES.get('APPLICATION_JSON'))
finally:
db.session.close()
| 10,999 | 3,178 |
#!/usr/bin/env python
import csv
from itertools import compress
from struct import Struct
def check_ctl(incsv):
"""Check the Length variable within the control file.
Length = Start - End + 1
"""
with open(incsv, 'rU') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
assert (int(row['End']) - int(row['Start']) + 1) == int(row['Length'])
def import_ctl(incsv):
"""Import the control file that contains the starting and ending values for
the fixed width file.
File is structured as:
Field_Name Start End Length Format Notes
field1 1 12 12 A field 1
field2 13 14 2 A field 2
field3 15 19 5 N field 3
"""
# U = universal newline
with open(incsv, 'rU') as f:
csv_reader = csv.DictReader(f)
field_widths = [], keep_fields = []
for fw in csv_reader:
field_widths.append(int(fw['Length']))
keep_fields.append(int(fw['Keep']))
return field_widths, keep_fields
def create_fmt(field_widths, keep_fields):
"""Given two lists: 1) the field widths 2) list with a 1 or 0 indicating whether or not to keep a field,
create a fmt string
Field Widths - https://docs.python.org/3.4/library/struct.html
Format C Type Python Type Standard Size
x pad byte no value
c char bytes of length 1 1
s char[] bytes
"""
keep_fields_pos_neg = [-1 if keep == 0 else keep for keep in keep_fields]
field_widths_pos_neg = [fw*keep for fw, keep in zip(field_widths, keep_fields_pos_neg)]
fmt_string = ''.join('{}{}'.format(abs(fw), 'x' if fw == 0 else 's')
for fw in field_widths_pos_neg)
return fmt_string
def read_records(record_struct, f):
"""Given a struct instance and a file handle, return a tuple containing
all fields (as strings) for a single record
"""
while True:
line = f.read(record_struct.size)
if line == b'':
break
yield decode_record(record_struct, line)
def _decode_record(record_struct, line):
return tuple(s.decode() for s in record_struct.unpack_from(line))
def decode_record(rec):
return tuple(s.decode() for s in rec)
if __name__ == '__main__':
# Will throw an AssertionError if the Length variable within the control file is wrong
check_ctl('/some/dir/to/keep.csv')
field_widths, keep_fields = import_ctl('/some/dir/to/keep.csv')
fmt_string = create_fmt(field_widths, keep_fields)
record_struct = Struct(fmt_string)
with open('/some/dir/to/fixedfield/split1_sample', 'rb') as infile:
with open('/some/dir/to/fixedfield/split1_sample.csv', 'w', newline='') as outfile:
csv_writer = csv.writer(outfile, delimiter=',')
for rec in record_struct.iter_unpack(infile.read(record_struct.size*10)):
# for rec in read_records(record_struct, infile):
csv_writer.writerow(decode_record(rec))
| 3,193 | 980 |
__doc__ = 'for adapting 3 main types of resp: requests, baidubce & bumblebee'
import json
from requests import Response as RequestsResponse
from utils.bce import BceResponse
from utils import SelfAssemblingClass
class GeneralResp():
'''
0. if everything is ok, directly access this for json
1. if something wrong, giving out the raw resp for debugging
thus: not isinstance(this, dict)
'''
def __new__(self, resp):
'''
:param resp: one of the three type of resps
'''
if isinstance(resp, SelfAssemblingClass) or isinstance(resp,
GeneralResp):
return resp
elif isinstance(resp, BceResponse):
print(f'assebmling a BceResponse .obj..')
return SelfAssemblingClass(resp.metadata.__dict__)
elif isinstance(resp, RequestsResponse):
print(f'assembling a requests.Response obj...')
try:
doc = json.loads(resp.text)
print('doc seems ok, pass it for assembling...')
return SelfAssemblingClass(doc)
except Exception:
print('assembling a requests.Response')
return SelfAssemblingClass(resp.__dict__)
else:
print(
f'respadapter.GeneralResp: input must be some Response <obj>,\
got a {type(resp)}')
def __contains__(self, item):
return item in self.__dict__.keys()
| 1,515 | 393 |
from mcpi.minecraft import Minecraft
import math
import time
import random
import pycraft
mc = pycraft.new_minecraft()
destX = random.randint(-127, 127)
destZ = random.randint(-127, 127)
destY = mc.getHeight(destX, destZ)
block = 57
mc.setBlock(destX, destY, destZ, block)
mc.postToChat("Block set")
while True:
pos = mc.player.getPos()
distance = math.sqrt((pos.x - destX) ** 2 + (pos.z - destZ) ** 2)
if distance > 100:
mc.postToChat("Freezing")
elif distance > 50:
mc.postToChat("Cold")
elif distance > 25:
mc.postToChat("Warm")
elif distance > 12:
mc.postToChat("Boiling")
elif distance > 6:
mc.postToChat("On fire")
elif distance == 0:
mc.postToChat("Found it")
break | 698 | 309 |
# Copyright (c) 2016-2018 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import os
from bugzilla import *
from .base import BaseTracker
class BugzillaTracker(BaseTracker):
def __init__(self, product, url):
self.product = product
self.bzapi = Bugzilla(url)
# Remove old token and cookie files since they may be outdated.
if os.path.exists(self.bzapi.tokenfile):
os.remove(self.bzapi.tokenfile)
if os.path.exists(self.bzapi.cookiefile):
os.remove(self.bzapi.cookiefile)
@property
def logged_in(self):
return self.bzapi.user
def login(self, username, pwd):
try:
self.bzapi.login(user=username, password=pwd)
return True
except BugzillaError:
return False
def find_issue(self, query):
return self.bzapi.query(self.bzapi.build_query(product=self.product,
status=['NEW', 'REOPENED', 'ASSIGNED'],
short_desc=query,
include_fields=['id', 'summary', 'weburl']))
def report_issue(self, report_details, test, extension):
create_info = self.bzapi.build_createbug(product=report_details['product'],
component=report_details['component'],
summary=report_details['summary'],
version=report_details['version'],
description=report_details['description'],
blocks=report_details['blocks'])
bug = self.bzapi.createbug(create_info)
test_file = 'test.{ext}'.format(ext=extension)
with open(test_file, 'wb') as f:
f.write(test)
self.bzapi.attachfile(idlist=bug.bug_id, attachfile=test_file, description='Test', is_patch=False)
os.remove(test_file)
return bug
def __call__(self, issue):
pass
def issue_url(self, issue):
return issue.weburl
| 2,368 | 661 |
"""
Object name: HorizontalCylinder
Functions: Gr(g,beta,DT,D,nu) gives the Grashoff number based on:
gravity g, thermal expansion coefficient beta, Temperature difference DT,
length scale D, viscosity nu
Ra(g,beta,DT,D,nu,alpha) gives the Rayleigh number where alpha is the thermal conductivity.
"""
import numpy as np
import scipy
import scipy.optimize
class HorizontalCylinder(object):
""" Natural convection about a horizontal cylinder
from NewLibraries import HT_natural_convection as natconv
cyl = natconv.HorizontalCylinder(correlation, Ra, Pr = 0.0)
where correlation is "Morgan" or "Churchill-Chu"
cyl = natconv.HorizontalCylinder("Morgan", Ra)
cyl = natconv.HorizontalCylinder("Churchill-Chu", Ra, Pr = xx)
"""
def __init__(self,correlation="Morgan", Ra=0.0, Pr = 0.0):
self.correlation = correlation
self.Ra = Ra
if correlation == "Morgan":
if (Ra <= 1e-2):
C=0.675
n=0.058
elif (Ra <= 1e2):
C=1.02
n=0.148
elif (Ra <= 1e4):
C=0.85
n=0.188
elif (Ra <= 1e7):
C=0.480
n=0.250
elif (Ra <= 1e12):
C=0.125
n=0.333
self.Nu = C*Ra**n
elif correlation == "Churchill-Chu":
if Pr == 0.:
print("Warning you must specify Pr for Churchill and Chu correlation")
else:
self.Nu = (0.60+(0.387*Ra**(1./6.))/(1.+(0.559/Pr)**(9./16.))**(8./27.))**2
else:
print("Warning wrong correlation name")
class VerticalEnclosure(object):
""" Natural convection about a horizontal cylinder
from NewLibraries import HT_natural_convection as natconv
cyl = natconv.HorizontalCylinder(correlation, Ra, Pr = 0.0)
where correlation is "Morgan" or "Churchill-Chu"
cyl = natconv.HorizontalCylinder("Morgan", Ra)
cyl = natconv.HorizontalCylinder("Churchill-Chu", Ra, Pr = xx)
"""
def __init__(self,Ra,Pr,H,L):
self.Ra = Ra
self.Pr = Pr
self.H = H
self.L = L
if correlation == "Morgan":
if (H/L) < 2.:
if Ra*Pr/(0.2+Pr)> 1.e3:
self.Nu = 0.18*(Pr/(0.2+Pr)*Ra)**0.29
else:
print('Ra is too low for this correlation')
self.Nu = np.inf
elif H/L < 10:
if Ra < 1e10:
self.Nu = 0.22*(Pr/(0.2+Pr)*Ra)**0.28*(H/L)**(-0.25)
else:
print('Ra is too high for this correlation')
self.Nu = np.inf
elif Ra < 1e4:
print('Ra is too low for this correlation')
self.Nu = np.inf
elif Ra < 1e7:
if Pr > 0.6 and Pr < 2e4:
print('ok')
self.Nu =0.42*Ra**0.25*Pr**0.012*(H/L)**(-0.3)
else :
print('Pr is out of bounds for this correlation')
self.Nu = np.inf
elif Ra < 1e9:
if Pr > 0.6 and Pr < 20.:
self.Nu =0.46*Ra**(1./3.)
else :
print('Pr is out of bounds for this correlation')
self.Nu = np.inf
else:
print('Ra is too high, got nothing for you')
self.Nu = np.inf
def Gr(g=9.81,beta=0.0,DT=0.0,D=0.0,nu=1.0):
return (g*beta*DT*D**3)/(nu**2)
def Ra(g=9.81,beta=0.0,DT=0.0,D=0.0,nu=1.0,alpha=1.0):
return (g*beta*DT*D**3)/(nu*alpha)
| 3,741 | 1,297 |
from pytest_bdd import given, when, then
from model.contact import Contact
import random
@given('a contact list')
def contact_list(db):
return db.get_contacts_list()
@given('a contact with <first_name>, <last_name>, <address>, <home_phone>, <work_phone>, <mobile_phone>, <fax>, <mail_1>, <mail_2> and <mail_3>')
def new_contact(first_name, last_name, address, home_phone, work_phone, mobile_phone, fax, mail_1, mail_2, mail_3):
return Contact(first_name=first_name, last_name=last_name, address=address,
home_phone=home_phone, mobile_phone=mobile_phone, work_phone=work_phone, fax=fax, mail_1=mail_1, mail_2=mail_2, mail_3=mail_3 )
@when('I add a new contact to the list')
def add_new_contact(app, new_contact):
app.contacts.New_contact_form()
app.contacts.Filling_information_form(new_contact)
app.contacts.Submit_new_contact_creation()
app.contacts.Open_home_page()
@then('the new contact list is equal to the old list with the added contact')
def verify_contact_added(db, contact_list, new_contact, app):
assert len(contact_list) + 1 == app.contacts.Count()
new_contacts = db.get_contacts_list()
contact_list.append(new_contact)
assert sorted(contact_list, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(db, app):
if len(db.get_contacts_list()) == 0:
app.contacts.New_contact_form()
app.contacts.Filling_information_form(Contact(first_name="Edited first name", last_name="Edited last name", address="Nizhny_Novgorod",
home_phone="111", work_phone="222", mobile_phone="333", fax = "0000",
mail_1="ddd@ya.by", mail_2='fff@wer.us', mail_3="kol@gmail.com"))
app.contacts.Submit_new_contact_creation()
return db.get_contacts_list()
@given('a random contact from the list')
def random_contact(non_empty_contact_list):
return random.choice(non_empty_contact_list)
@when('I delete the contact from list')
def delete_contact(app, random_contact):
app.contacts.delete_contact_by_id(random_contact.id)
@then('the new contact list is equal to the old list without the delete contact')
def verify_contact_delete(db, non_empty_contact_list, random_contact, app, check_ui):
old_contacts = non_empty_contact_list
assert len(old_contacts) - 1 == app.contacts.Count()
new_contacts = db.get_contacts_list()
old_contacts.remove(random_contact)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contacts.get_contacts_list(),
key=Contact.id_or_max)
@when('I modify the contact from list')
def contact_list_modification(app, random_contact):
new_for_contact = Contact(first_name="Modify first name", last_name="Edited last name", address="Nizhny_Novgorod",
home_phone="111", work_phone="222", mobile_phone="333", fax="0000",
mail_1="ddd@ya.by", mail_2='fff@wer.us', mail_3="kol@gmail.com")
app.contacts.edit_contact_by_id(random_contact.id)
app.contacts.Filling_information_form(new_for_contact)
app.contacts.Submit_updating_form()
@then('the new contact list is equal to the old list')
def verification_list_groups_are_the_same(db, non_empty_contact_list, check_ui, app):
new_contacts = db.get_contacts_list()
assert len(non_empty_contact_list) == app.contacts.Count()
app.contacts.Open_home_page()
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contacts.get_contacts_list(),
key=Contact.id_or_max)
| 3,765 | 1,261 |
'''
The comprehentions really make it easy to add those loops and
conditionals on the existing comprehentions.
'''
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# EXAMPLE 1: Print Lists in a simple way:
# ==============================================================================
my_list1 = [n for n in nums]
my_list2 = [n*n for n in nums]
print my_list1
print my_list2
print
# EXAMPLE 2: Print Lists using map + lambda:
# ==============================================================================
my_list3 = map(lambda n: n*n, nums)
print my_list3
print
# EXAMPLE 3: Print Lists using comprehentions:
# ==============================================================================
my_list4 = [n for n in nums if n%2 == 0]
print my_list4
print
# EXAMPLE 4: Print Lists using filter + lambda:
# ==============================================================================
my_list5 = filter(lambda n: n%2 == 0, nums)
print my_list5
print
# EXAMPLE 5: Print strings lists using comprehentions:
# ==============================================================================
names = ['Bruce', 'Clark', 'Peter', 'Logan', 'Wade']
heros = ['Batman', 'Superman', 'Spiderman', 'Wolverine', 'Deadpool']
my_list6 = [(letter, num) for letter in 'abcd' for num in range(4)]
print my_list6
print
# EXAMPLE 6: Print Disctionary using comprehentions:
# ==============================================================================
print zip(names, heros) # Both lists should match size or length.
my_dict1 = {name: hero for name, hero in zip(names, heros)}
print my_dict1
print
# EXAMPLE 7: Print Disctionary using filters + comprehentions:
# ==============================================================================
my_dict2 = {name: hero for (name, hero) in zip(names, heros) if name != 'Peter'}
print my_dict2
print
# EXAMPLE 8: Sets - similar to List, but it DOES NOT allow duplicated elements:
# ==============================================================================
my_set = {n for n in nums}
print my_set
print
# EXAMPLE 9: Using generators expressions:
# ==============================================================================
my_gen = (n*n for n in nums)
for i in my_gen:
print i
# EXAMPLE 10: More examples:
# ==============================================================================
# Create some static values
# S = {x² : x in {0 ... 9}}
# V = (1, 2, 4, 8, ..., 2¹²)
# M = {x | x in S and x even}
S = [x**2 for x in range(10)]
V = [2**i for i in range(13)]
M = [x for x in S if x % 2 == 0]
print S; print V; print M; print
noprimes = [j for i in range(2, 8) for j in range(i*2, 50, i)]
primes = [x for x in range(2, 50) if x not in noprimes]
print primes
print
words = 'The quick brown fox jumps over the lazy dog'.split()
print words
stuff = [[w.upper(), w.lower(), len(w)] for w in words]
for i in stuff:
print i
stuff = map(lambda w: [w.upper(), w.lower(), len(w)], words)
for i in stuff:
print i | 2,968 | 966 |
from examples.AV.example_runner_mcts_av import runner as mcts_runner
def validate_mcts():
# Overall settings
max_path_length = 50
s_0 = [0.0, -4.0, 1.0, 11.17, -35.0]
base_log_dir = './data'
# experiment settings
run_experiment_args = {'snapshot_mode': 'last',
'snapshot_gap': 1,
'log_dir': None,
'exp_name': None,
'seed': 0,
'n_parallel': 1,
'tabular_log_file': 'progress.csv'
}
# runner settings
runner_args = {'n_epochs': 1,
'batch_size': 500,
'plot': False
}
# env settings
env_args = {'id': 'ast_toolbox:GoExploreAST-v1',
'blackbox_sim_state': True,
'open_loop': False,
'fixed_init_state': True,
's_0': s_0,
}
# simulation settings
sim_args = {'blackbox_sim_state': True,
'open_loop': False,
'fixed_initial_state': True,
'max_path_length': max_path_length
}
# reward settings
reward_args = {'use_heuristic': True}
# spaces settings
spaces_args = {}
sampler_args = {'n_envs': 1,
'open_loop': False}
# MCTS Settings
mcts_bpq_args = {'N': 10}
exp_log_dir = base_log_dir
max_path_length = 50
s_0 = [0.0, -4.0, 1.0, 11.17, -35.0]
env_args['s_0'] = s_0
reward_args['use_heuristic'] = True
sim_args['max_path_length'] = max_path_length
# MCTS settings
run_experiment_args['log_dir'] = exp_log_dir + '/mcts'
run_experiment_args['exp_name'] = 'mcts'
for mcts_type in ['mcts', 'mctsbv', 'mctsrs']:
for stress_test_mode in [1, 2]:
mcts_algo_args = {'max_path_length': max_path_length,
'stress_test_mode': stress_test_mode,
'ec': 100.0,
'n_itr': 1,
'k': 0.5,
'alpha': 0.5,
'clear_nodes': True,
'log_interval': 500,
'plot_tree': True,
'plot_path': run_experiment_args['log_dir'] + '/' + mcts_type + '_tree',
'log_dir': run_experiment_args['log_dir'],
}
mcts_runner(
mcts_type=mcts_type,
env_args=env_args,
run_experiment_args=run_experiment_args,
sim_args=sim_args,
reward_args=reward_args,
spaces_args=spaces_args,
algo_args=mcts_algo_args,
bpq_args=mcts_bpq_args,
runner_args=runner_args,
sampler_args=sampler_args
)
return True
if __name__ == '__main__':
validate_mcts()
| 3,050 | 997 |
import json
import numpy as np
from openfermion import SymbolicOperator
from typing import Union, Dict, Optional, List
from zquantum.core.circuit import (
Circuit,
load_circuit,
load_circuit_template_params,
save_circuit_template_params,
load_parameter_grid,
)
from zquantum.core.cost_function import (
get_ground_state_cost_function,
AnsatzBasedCostFunction,
)
from zquantum.core.estimation import (
estimate_expectation_values_by_averaging,
)
from zquantum.core.serialization import save_optimization_results
from zquantum.core.utils import create_object, load_list
from zquantum.core.typing import Specs
from zquantum.core.openfermion import load_qubit_operator
def optimize_parametrized_circuit_for_ground_state_of_operator(
optimizer_specs: Specs,
target_operator: Union[SymbolicOperator, str],
parametrized_circuit: Union[Circuit, str],
backend_specs: Specs,
estimation_method_specs: Optional[Specs] = None,
estimation_preprocessors_specs: Optional[List[Specs]] = None,
initial_parameters: Union[str, np.ndarray, List[float]] = None,
fixed_parameters: Optional[Union[np.ndarray, str]] = None,
parameter_precision: Optional[float] = None,
parameter_precision_seed: Optional[int] = None,
**kwargs
):
"""Optimize the parameters of a parametrized quantum circuit to prepare the ground state of a target operator.
Args:
optimizer_specs: The specs of the optimizer to use to refine the parameter values
target_operator: The operator of which to prepare the ground state
parametrized_circuit: The parametrized quantum circuit that prepares trial states
backend_specs: The specs of the quantum backend (or simulator) to use to run the circuits
estimation_method_specs: A reference to a callable to use to estimate the expectation value of the operator.
The default is the estimate_expectation_values_by_averaging function.
estimation_preprocessors_specs: A list of Specs that describe callable functions that adhere to the
EstimationPreprocessor protocol.
initial_parameters: The initial parameter values to begin optimization
fixed_parameters: values for the circuit parameters that should be fixed.
parameter_precision: the standard deviation of the Gaussian noise to add to each parameter, if any.
parameter_precision_seed: seed for randomly generating parameter deviation if using parameter_precision
kwaargs:
The following key word arguments are handled explicitly when appropriate:
parameter_grid: A parameter grid artifact that defines a 2D grid for parameter values
"""
if isinstance(optimizer_specs, str):
optimizer_specs = json.loads(optimizer_specs)
parameter_grid = kwargs.pop("parameter_grid", None)
# Load parameter grid
if parameter_grid is not None:
parameter_grid = load_parameter_grid(parameter_grid)
optimizer_specs["grid"] = parameter_grid
optimizer = create_object(optimizer_specs)
if isinstance(target_operator, str):
target_operator = load_qubit_operator(target_operator)
if isinstance(parametrized_circuit, str):
parametrized_circuit = load_circuit(parametrized_circuit)
if isinstance(backend_specs, str):
backend_specs = json.loads(backend_specs)
backend = create_object(backend_specs)
if estimation_method_specs is not None:
if isinstance(estimation_method_specs, str):
estimation_method_specs = json.loads(estimation_method_specs)
estimation_method = create_object(estimation_method_specs)
else:
estimation_method = estimate_expectation_values_by_averaging
estimation_preprocessors = []
if estimation_preprocessors_specs is not None:
for estimation_preprocessor_specs in estimation_preprocessors_specs:
if isinstance(estimation_preprocessor_specs, str):
estimation_preprocessor_specs = json.loads(
estimation_preprocessor_specs
)
estimation_preprocessors.append(
create_object(estimation_preprocessor_specs)
)
if initial_parameters is not None:
if isinstance(initial_parameters, str):
initial_parameters = load_circuit_template_params(initial_parameters)
if fixed_parameters is not None:
if isinstance(fixed_parameters, str):
fixed_parameters = load_circuit_template_params(fixed_parameters)
cost_function = get_ground_state_cost_function(
target_operator,
parametrized_circuit,
backend,
estimation_method=estimation_method,
estimation_preprocessors=estimation_preprocessors,
fixed_parameters=fixed_parameters,
parameter_precision=parameter_precision,
parameter_precision_seed=parameter_precision_seed,
)
optimization_results = optimizer.minimize(cost_function, initial_parameters)
save_optimization_results(optimization_results, "optimization-results.json")
save_circuit_template_params(
optimization_results.opt_params, "optimized-parameters.json"
)
def optimize_ansatz_based_cost_function(
optimizer_specs: Specs,
target_operator: Union[SymbolicOperator, str],
ansatz_specs: Specs,
backend_specs: Specs,
estimation_method_specs: Optional[Specs] = None,
estimation_preprocessors_specs: Optional[List[Specs]] = None,
initial_parameters: Union[str, np.ndarray, List[float]] = None,
fixed_parameters: Optional[Union[np.ndarray, str]] = None,
parameter_precision: Optional[float] = None,
parameter_precision_seed: Optional[int] = None,
**kwargs
):
"""Optimize the parameters of an ansatz circuit to prepare the ground state of a target operator.
Args:
optimizer_specs: The specs of the optimizer to use to refine the parameter values
target_operator: The operator of which to prepare the ground state
ansatz_specs: The specs describing an Ansatz which will prepare the quantum circuit
backend_specs: The specs of the quantum backend (or simulator) to use to run the circuits
estimation_method_specs: A reference to a callable to use to estimate the expectation value of the operator.
The default is the estimate_expectation_values_by_averaging function.
estimation_preprocessors_specs: A list of Specs that describe callable functions that adhere to the
EstimationPreprocessor protocol.
initial_parameters: The initial parameter values to begin optimization
fixed_parameters: values for the circuit parameters that should be fixed.
parameter_precision: the standard deviation of the Gaussian noise to add to each parameter, if any.
parameter_precision_seed: seed for randomly generating parameter deviation if using parameter_precision
kwaargs:
The following key word arguments are handled explicitly when appropriate:
parameter_grid: A parameter grid artifact that defines a 2D grid for parameter values
thetas: A list of thetas used to initialize the WarmStartQAOAAnsatz
"""
if isinstance(optimizer_specs, str):
optimizer_specs = json.loads(optimizer_specs)
parameter_grid = kwargs.pop("parameter_grid", None)
# Load parameter grid
if parameter_grid is not None:
parameter_grid = load_parameter_grid(parameter_grid)
optimizer_specs["grid"] = parameter_grid
optimizer = create_object(optimizer_specs)
if isinstance(target_operator, str):
target_operator = load_qubit_operator(target_operator)
if isinstance(ansatz_specs, str):
ansatz_specs = json.loads(ansatz_specs)
if "WarmStartQAOAAnsatz" in ansatz_specs["function_name"]:
ansatz_specs["thetas"] = np.array(load_list(kwargs.pop("thetas")))
ansatz_specs["cost_hamiltonian"] = target_operator
elif "QAOA" in ansatz_specs["function_name"]:
ansatz_specs["cost_hamiltonian"] = target_operator
ansatz = create_object(ansatz_specs)
if isinstance(backend_specs, str):
backend_specs = json.loads(backend_specs)
backend = create_object(backend_specs)
if estimation_method_specs is not None:
if isinstance(estimation_method_specs, str):
estimation_method_specs = json.loads(estimation_method_specs)
estimation_method = create_object(estimation_method_specs)
else:
estimation_method = estimate_expectation_values_by_averaging
estimation_preprocessors = []
if estimation_preprocessors_specs is not None:
for estimation_preprocessor_specs in estimation_preprocessors_specs:
if isinstance(estimation_preprocessor_specs, str):
estimation_preprocessor_specs = json.loads(
estimation_preprocessor_specs
)
estimation_preprocessors.append(
create_object(estimation_preprocessor_specs)
)
if initial_parameters is not None:
if isinstance(initial_parameters, str):
initial_parameters = load_circuit_template_params(initial_parameters)
if fixed_parameters is not None:
if isinstance(fixed_parameters, str):
fixed_parameters = load_circuit_template_params(fixed_parameters)
cost_function = AnsatzBasedCostFunction(
target_operator,
ansatz,
backend,
estimation_method=estimation_method,
estimation_preprocessors=estimation_preprocessors,
fixed_parameters=fixed_parameters,
parameter_precision=parameter_precision,
parameter_precision_seed=parameter_precision_seed,
)
optimization_results = optimizer.minimize(cost_function, initial_parameters)
save_optimization_results(optimization_results, "optimization-results.json")
save_circuit_template_params(
optimization_results.opt_params, "optimized-parameters.json"
)
| 10,058 | 2,735 |
# Experiment that generates several sets of networks of varying CH-divergence types
# then trains an msbm of a single type in a "consensus" type of way. Then we report the
# average rand_index and average entropy of the z variables, which are indicators of how well
# the algorithm is learning the true model.
import os, sys
import pickle
import numpy as np
sys.path.insert(0, '../..')
import util as ut
import init_msbm_vi as im
import varinf as varinf
def main():
file_list = np.array(sorted(os.listdir('data')))
#We exclude those already trained (for debugging)
ignore_list = [stri.replace("model_","") for stri in sorted(os.listdir('models'))]
fil = [(name not in ignore_list) for name in file_list]
file_list = file_list[fil]
for data_file in file_list:
# load data
file_url = os.path.join('data', data_file)
data = ut.load_data(file_url)
prior = dict()
prior['ALPHA_0'] = 0.5
prior['BETA_0'] = 0.5
prior['NU_0'] = 0.5
prior['ZETA_0'] = 0.5
# assigning hyper-parameters from ground truth (cheating)
hyper = dict()
par = dict()
hyper['M'] = data['M']
hyper['Q'] = data['Q']
par['MAX_ITER'] = 1000
par['TOL_ELBO'] = 1.e-13
par['ALG'] = 'cavi'
par['kappas'] = np.ones(par['MAX_ITER'])
#Best of 4
candidate_moms = []
candidate_elbos = []
candidate_score = []
for r in range(4):
mom = im.init_moments(data, hyper, seed= r)
results_mom, elbo_seq = varinf.infer(data, prior, hyper, mom, par)
candidate_moms.append(results_mom)
candidate_elbos.append(elbo_seq)
candidate_score.append(elbo_seq['all'][-1])
results_mom = candidate_moms[np.argmax(candidate_score)]
elbo_seq = candidate_elbos[np.argmax(candidate_score)]
print('Saving file to {:s} ... '.format('models/model_' + data_file))
out_file_url = os.path.join('models', 'model_' + data_file)
pickle.dump({'results_mom': results_mom, 'elbo_seq': elbo_seq}, open(out_file_url, 'wb'))
if __name__ == '__main__':
main() | 2,200 | 744 |
from django.test import TestCase
from django.urls import reverse, resolve
from goodbuyDatabase.endpoints import is_in_own_database
from goodbuyDatabase.views import product_list, show_list_of_codes
class TestUrls(TestCase):
# Resolves checks if the url is called, the assoziated function is found
def test_is_in_own_database(self):
url = reverse('goodbuyDatabase:is_in_own_database', args=['4000582185399'])
self.assertEqual(resolve(url).func, is_in_own_database)
def test_product_list(self):
url = reverse('goodbuyDatabase:product_list')
self.assertEqual(resolve(url).func, product_list)
def test_show_list_of_codes(self):
url = reverse('goodbuyDatabase:show_list_of_codes', args=['4000582185399'])
self.assertEqual(resolve(url).func, show_list_of_codes)
| 833 | 280 |
try:
import logging
import os
except BaseException:
print('Exception got in importing the module.')
class makeLog:
def __init__(self):
current_dir = os.getcwd()
if 'log_files' in os.listdir(current_dir):
path = os.path.join(current_dir, 'log_files\\')
file_path = path + 'logfile.log'
logging.basicConfig(filename=file_path,
format='%(asctime)s %(message)s',
filemode='w')
else:
path = os.path.join(current_dir, 'log_files\\')
os.mkdir(path)
file_path = path + 'logfile.log'
logging.basicConfig(filename=file_path,
format='%(asctime)s %(message)s',
filemode='w')
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
def debug(self, string):
self.logger.debug(string)
def info(self, string):
self.logger.info(string)
def warning(self, string):
self.logger.warning(string)
def error(self, string):
self.logger.error(string)
def debug(self, string):
self.logger.critical(string)
| 1,274 | 391 |
import redis
from sparweltbitool.config import config
from sparweltbitool.singleton import Singleton
@Singleton
class ClientRedis(object):
""" Operations with Redis."""
def __init__(self):
self.r = redis.StrictRedis(
host=config.get('redis', 'host'),
port=config.get('redis', 'port'),
db=config.get('redis', 'db'))
| 372 | 118 |
#!/usr/bin/env python
'''
Copyright 2015 Ivan Sadikov
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# import libs
import unittest
import warnings
# import classes
import analytics.exceptions.exceptions as ex
import analytics.core.processor.processor as processor
import analytics.analyser.analyser as analyser
import analytics.algorithms.rank as rnk
from analytics.algorithms.algorithmsmap import AlgorithmsMap
from analytics.algorithms.relativecomp import RelativeComparison
from analytics.core.map.elementmap import ElementMap
from analytics.core.map.pulsemap import PulseMap
from analytics.core.map.clustermap import ClusterMap
class Analyser_TestSequence(unittest.TestCase):
def setUp(self):
self.isStarted = True
self._a = [
{"name": "value", "desc": "value", "sample": 128, "dynamic": True, "priority": -1},
{"name": "price", "desc": "price", "sample": 245.0, "dynamic": True, "priority": 1},
{"name": "amount", "desc": "amount", "sample": 3, "dynamic": True}
]
self._b = [
{"id": "1","name": "#1","desc": "", "cluster": "A", "value": 100, "price": 320.0, "amount": 1},
{"id": "2","name": "#2","desc": "", "cluster": "A", "value": 120, "price": 300.0, "amount": 4},
{"id": "3","name": "#3","desc": "", "cluster": "A", "value": 140, "price": 199.0, "amount": 3},
{"id": "4","name": "#4","desc": "", "cluster": "A", "value": 124, "price": 234.0, "amount": 5},
{"id": "5","name": "#5","desc": "", "cluster": "A", "value": 150, "price": 250.0, "amount": 9},
{"id": "6","name": "#6","desc": "", "cluster": "B", "value": 128, "price": 245.0, "amount": 3},
{"id": "7","name": "#7","desc": "", "cluster": "B", "value": 125, "price": 230.0, "amount": 2}
]
self._c = [
{"id": "1", "name": "O", "desc": "O", "parent": None},
{"id": "2", "name": "A", "desc": "A", "parent": "O"},
{"id": "3", "name": "B", "desc": "B", "parent": "O"}
]
# initialise maps and idmapper
self.pulses = PulseMap()
self.elements = ElementMap()
self.clusters = ClusterMap()
idmapper = processor.parseClusters(self._c, self.clusters, {})
idmapper = processor.parseElements(self._b, self.elements, idmapper)
idmapper = processor.parsePulses(self._a, self.pulses, idmapper)
def test_analyser_analyseWithErrors(self):
# algorithms
algorithms = AlgorithmsMap()
for alg in analyser.ALGORITHMS.values():
algorithms.assign(alg)
# run analyser
with self.assertRaises(ex.AnalyticsTypeError):
analyser.analyseUsingMap(
AlgorithmsMap(),
self.elements,
self.pulses,
False
)
# run again to catch warning
algorithms.assign(RelativeComparison())
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# rank elements
block = analyser.AnalyseBlock(
algorithms,
self.elements,
self.pulses
)
block = analyser.analyseWithBlock(block)
self.assertEqual(block._algorithm, algorithms.values()[0])
# warnings assertion
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, UserWarning))
def test_analyser_analyseDynamic(self):
# algorithms
algorithms = AlgorithmsMap()
for alg in analyser.ALGORITHMS.values():
algorithms.assign(alg)
# analyse results
block = analyser.AnalyseBlock(algorithms, self.elements, self.pulses)
# catch warning about too many dynamic properties
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
block = analyser.analyseWithBlock(block)
# warnings assertion
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, UserWarning))
# leave only two dynamic properties
self.pulses._map.values()[0].setStatic(True)
# analyse elements again
block = analyser.analyseWithBlock(block)
self.assertEqual(block._isAnalysed, True)
self.assertEqual(block._algorithm, algorithms.values()[0])
elements = block._elementmap
for element in elements._map.values():
self.assertNotEqual(element.rank()._name, rnk.RSYS.UND_RANK._name)
def test_analyser_analyseStatic(self):
# algorithms
algorithms = AlgorithmsMap()
for alg in analyser.ALGORITHMS.values():
algorithms.assign(alg)
# set all properties as static
for pulse in self.pulses._map.values():
pulse.setStatic(True)
# analyse results
block = analyser.AnalyseBlock(algorithms, self.elements, self.pulses)
# rank elements
block = analyser.analyseWithBlock(block)
self.assertEqual(block._isAnalysed, True)
self.assertEqual(block._algorithm, algorithms.values()[0])
elements = block._elementmap
for element in elements._map.values():
self.assertEqual(element.rank()._name, rnk.RSYS.UND_RANK._name)
# Load test suites
def _suites():
return [
Analyser_TestSequence
]
# Load tests
def loadSuites():
# global test suite for this module
gsuite = unittest.TestSuite()
for suite in _suites():
gsuite.addTest(unittest.TestLoader().loadTestsFromTestCase(suite))
return gsuite
if __name__ == '__main__':
suite = loadSuites()
print ""
print "### Running tests ###"
print "-" * 70
unittest.TextTestRunner(verbosity=2).run(suite)
| 6,317 | 1,941 |
import os
import argparse
import time
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', type=str)
parser.add_argument('output', type=str)
args = parser.parse_args()
os.makedirs(args.output)
size = os.path.getsize(args.input)
assert(size % 160 == 0)
num_samples = size // 160
chunk_size = 1024 * 1024
inp_f = open(args.input, 'rb')
label_f = open(os.path.join(args.output, 'label.bin'), 'wb')
dense_f = open(os.path.join(args.output, 'dense.bin'), 'wb')
category_f = open(os.path.join(args.output, 'category.bin'), 'wb')
num_loops = num_samples // chunk_size + 1
start_time = time.time()
for i in range(num_loops):
t = time.time()
if i == (num_loops - 1):
batch = min(chunk_size, num_samples % chunk_size)
if batch == 0:
break
else:
batch = chunk_size
raw_buffer = inp_f.read(160 * batch)
for j in range(batch):
label_buffer = raw_buffer[j*160: j*160+4]
dense_buffer = raw_buffer[j*160+4: j*160+56]
category_buffer = raw_buffer[j*160+56: j*160+160]
label_f.write(label_buffer)
dense_f.write(dense_buffer)
category_f.write(category_buffer)
print('%d/%d batch finished. write %d samples, time: %.2fms, remaining time: %.2f min'%(
i+1, num_loops, batch, (time.time() - t)*1000, ((time.time() - start_time) / 60) * (num_loops / (i+1) - 1)))
inp_f.close()
label_f.close()
dense_f.close()
category_f.close() | 1,618 | 619 |
from ml_buff.models.input_data import InputData
from ml_buff.database_helper import create_tables, drop_tables
from ml_buff.models.base_model import database
DATABASE = {
'drivername': 'postgresql',
'host': 'localhost',
'port': '5432',
'username': 'postgres',
'password': 'postgres',
'database': 'ml_buff'
}
DATASET_DEFINITIONS = r'../../AVA_dataset/AVA.txt'
drop_tables()
create_tables()
file = open(DATASET_DEFINITIONS)
data_source = []
for line in file:
line = line.strip().split(' ')
data_source.append({ 'external_id': line[1], 'dataset_name': 'AVA' })
print('datasource built with {0} entries'.format(len(data_source)))
with database.atomic():
for idx in range(0, len(data_source), 100):
InputData.insert_many(data_source[idx:idx+100]).execute()
| 836 | 283 |
import pygame
import socket
import errno
import threading
from button import Button
from text import Text, TextFeed
from textbox import TextBox
from message import Message
from instructions import Instruction
from cards import Deck, Card
class TitleScreen:
UPDATE_FREQUENCY = 1000
def __init__(self, screen_size=(1280, 720), title="Mongoose", clear_colour=(66, 135, 245)):
self.screen_size = screen_size
self.title = title
self.clear_colour = clear_colour
pygame.init()
self.screen = pygame.display.set_mode(screen_size, pygame.DOUBLEBUF | pygame.RESIZABLE)
pygame.display.set_caption(title)
self.clock = pygame.time.Clock()
self.__title_text = Text(title, 64, text_colour=(255, 255, 255))
self.__name_input = TextBox((0.5, 0.4), (0.4, 0.06),
Text(font_size=32, font_hierarchy=["Verdana"]),
Text("Name", font_size=32, font_hierarchy=["Verdana"], text_colour=(64, 64, 64)),
register_group="title_screen")
self.__ip_input = TextBox((0.5, 0.5), (0.4, 0.06),
Text(font_size=32, font_hierarchy=["Verdana"]),
Text("IP Address", font_size=32, font_hierarchy=["Verdana"],
text_colour=(64, 64, 64)),
register_group="title_screen")
self.__port_input = TextBox((0.5, 0.6), (0.4, 0.06),
Text(font_size=32, font_hierarchy=["Verdana"]),
Text("Port", font_size=32, font_hierarchy=["Verdana"], text_colour=(64, 64, 64)),
register_group="title_screen")
self.__join_button = Button("Join", (0.5, 0.8), (0.1, 0.08), register_group="title_screen")
self.__join_button.subscribe_event(self.join_game)
self.__status_text = Text("Status: Not connected", font_size=28,
font_hierarchy=["Verdana"], text_colour=(255, 0, 0))
self.__info_feed = TextFeed((0.85, 0.5), (0.3, 0.3))
self.client_socket = None
self.__connected_to_server = False
# self.__server_handling_thread = threading.Thread(target=self.handle_server_io, daemon=True)
# self.__server_handling_thread.start()
self.__sync_deck = None
self.__game_package = []
self.__join_game_thread = None
def run(self):
while not self.__game_package:
pygame.event.pump()
for event in pygame.event.get():
if event.type == pygame.VIDEORESIZE:
self.screen_size = (event.w, event.h)
self.screen = pygame.display.set_mode(self.screen_size, pygame.DOUBLEBUF | pygame.RESIZABLE)
if event.type == pygame.QUIT:
self.quit()
TextBox.update_all("title_screen", self.screen_size, event)
mouse_pos = pygame.mouse.get_pos()
mouse_pressed = pygame.mouse.get_pressed()
Button.update_all("title_screen", self.screen_size, mouse_pos, mouse_pressed)
self.render()
self.handle_server_io()
self.clock.tick(60)
return self.__game_package
def render(self):
self.screen.fill(self.clear_colour)
self.__title_text.render(self.screen, (0.5, 0.2))
Button.render_all("title_screen", self.screen)
TextBox.render_all("title_screen", self.screen)
self.__status_text.render_from_corner(self.screen, (0.1 * self.screen_size[0], 0.8 * self.screen_size[1]))
self.__info_feed.render(self.screen)
pygame.display.flip()
def join_game(self):
if self.__join_game_thread is not None:
if self.__join_game_thread.is_alive():
return
self.__join_game_thread = threading.Thread(target=self.join_game_async)
self.__join_game_thread.start()
def join_game_async(self):
if not self.__port_input.text.isnumeric() or self.__connected_to_server:
return
ip = self.__ip_input.text
port = int(self.__port_input.text)
try:
self.__status_text.text = f"Status: Connecting to server..."
self.__status_text.text_colour = (255, 170, 0)
self.__status_text.update()
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.settimeout(10)
self.client_socket.connect((ip, port))
self.client_socket.setblocking(False)
self.__status_text.text = f"Status: Connected to {ip}:{port}. Waiting for game..."
self.__status_text.text_colour = (0, 255, 0)
self.__status_text.update()
name_message = Message.new_send_message(
f"{Instruction.SET_PROPERTY}:'name':'{self.__name_input.text}'".encode("utf-8")
)
self.client_socket.sendall(name_message.encode())
self.__connected_to_server = True
except ConnectionRefusedError:
self.__status_text.text = f"Status: Connection to {ip}:{port} failed."
self.__status_text.text_colour = (255, 0, 0)
self.__status_text.update()
except socket.timeout:
self.__status_text.text = f"Status: Connection to {ip}:{port} timed out."
self.__status_text.text_colour = (255, 0, 0)
self.__status_text.update()
def handle_server_io(self):
if not self.__connected_to_server:
return
try:
message = Message.new_recv_message()
buffer = self.client_socket.recv(Message.BUFFER_SIZE)
if not buffer:
self.__status_text.text = f"Status: Lost connection to server."
self.__status_text.text_colour = (255, 0, 0)
self.__status_text.update()
self.client_socket.close()
self.__connected_to_server = False
while not message.decode(buffer):
buffer = self.client_socket.recv(Message.BUFFER_SIZE)
self.decode_instruction(message.message.decode("utf-8"))
except IOError as e:
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
self.__status_text.text = f"Error: {e}"
self.__status_text.text_colour = (255, 0, 0)
self.__status_text.update()
self.client_socket.close()
self.__connected_to_server = False
def decode_instruction(self, message):
operands = []
if ":" in message:
instruction, operand = message.split(":", 1)
in_string = False
cur_operand = ""
for c in operand:
if c == "'":
in_string = not in_string
else:
if in_string:
cur_operand += c
elif c == ":":
operands.append(cur_operand)
cur_operand = ""
operands.append(cur_operand)
else:
instruction = message
if instruction == Instruction.Update.GAME_RUNNING:
self.__status_text.text = f"Status: Game already running on server."
self.__status_text.text_colour = (255, 170, 0)
self.__status_text.update()
self.client_socket.close()
self.__connected_to_server = False
if instruction == Instruction.START_GAME:
active_id = int(operands[0])
players = []
_p = []
for i, o in enumerate(operands[1:]):
# even: name, odd: id
if i % 2 == 0:
_p = [o]
else:
_p.append(int(o))
players.append(_p)
self.start_game(active_id, sorted(players, key=lambda x: x[1]))
if instruction == Instruction.Update.PLAYER_JOINED:
assert len(operands) == 1
self.__info_feed.add_line(f"Player {operands[0]} joined the game.")
if instruction == Instruction.Game.SEND_DECK:
assert len(operands) == 52
suit_map = {"0": "Spades", "1": "Diamonds", "2": "Clubs", "3": "Hearts"}
cards = []
for card in operands:
s, v = card.split("-")
cards.append(Card(suit_map[s], int(v)))
self.__sync_deck = Deck(cards)
def start_game(self, active_id, players):
self.__game_package = [active_id, players, self.client_socket, self.__sync_deck]
def quit(self):
if self.__connected_to_server:
self.client_socket.sendall(Message.new_send_message(Instruction.Update.QUIT_GAME.encode("utf-8")).encode())
# self.__server_handling_thread.join(0.5)
pygame.quit()
quit()
| 9,050 | 2,807 |
# import pytest
from rlj import Judge, JudgeStatus, Config, makeConfig
import os
arguments = {
'--O2': False,
'--delete': False,
'--genConfig': False,
'--help': False,
'--silent': False,
'--version': False,
'-c': 'config.yml',
'-j': None,
'FILE': None
}
def getConfig(st):
new_arg = arguments.copy()
new_arg['-j'] = st + '.cpp'
return makeConfig('config.yml', new_arg)
def runTest1(st):
result = list(Judge(getConfig(st)).judge())
compile_status = result[0]
print(result)
print(compile_status)
assert compile_status[0] == 'DONE'
assert compile_status[1] == '编译成功'
assert result[1] == (1, ('data/test1.in', 'data/test1.ans'),
JudgeStatus(st, 2, 0.5, 0))
assert result[2] == (2, ('data/test2.in', 'data/test2.ans'),
JudgeStatus(st, 2, 0.5, 0))
def test_1():
os.chdir(os.path.dirname(os.path.realpath(__file__)))
runTest1('AC')
runTest1('WA')
runTest1('TLE')
runTest1('MLE')
runTest1('RE')
def runTest2(st, chn):
result = list(Judge(getConfig(st)).judge())
compile_status = result[0]
assert compile_status[0] == st
assert compile_status[1] == chn
def test_2():
os.chdir(os.path.dirname(os.path.realpath(__file__)))
runTest2('ERROR', '编译错误')
runTest2('CTLE', '编译超时')
| 1,357 | 532 |
########################################################
# Copyright (c) 2015-2017 by European Commission. #
# All Rights Reserved. #
########################################################
"""
This context action deletes all KPI values in the current context
"""
from com.artelys.CrystalEditor.scripting import Crystal
from com.artelys.platform.gui.dialogs import CrystalOptionDialog
def onExecute(action, event):
Crystal.displayInfoNotification("Invalidate KPI cache", "KPI cache has been invalidated", "Context action query: KPI cache has been invalidated")
invalidateKPIs(action, event)
def onParametersChange(action, event):
invalidateKPIs(action, event)
def onStructureChange(action, event):
invalidateKPIs(action, event)
def invalidateKPIs(action, event):
"""
Invalidate the KPIs results of source and destination context of the action
"""
sourceContext = action.getSourceContext()
destContext = action.getDestinationContext()
sourceContext.getKPIContainer().invalideKPIs()
if sourceContext != destContext and destContext != None:
destContext.getKPIContainer().invalideKPIs()
askOnParametersChange = False
askOnStructureChange = False
| 1,246 | 338 |
# Puzzle Input ----------
with open('Day06-Input.txt', 'r') as file:
puzzle = list(map(int, file.read().split(',')))
with open('Day06-Test01.txt', 'r') as file:
test01 = list(map(int, file.read().split(',')))
# Main Code ----------
# Count the first few fish and organize them by age in a dictionary
def count_first_fish(fish_list: list):
fish_per_age = dict()
for fish in fish_list:
fish_per_age[fish] = fish_per_age.get(fish, 0) + 1
return fish_per_age
# See how many fish there are after n days at sea
def fish_in_n_days(fish_list: list, n: int):
fish_per_age = count_first_fish(fish_list)
# Simulate each day
for _ in range(n):
new_fish_per_age = dict()
for age in fish_per_age:
# Make fish reproduce and create new fish
if age == 0:
new_fish_per_age[8] = fish_per_age[age]
new_fish_per_age[6] = new_fish_per_age.get(6, 0) + fish_per_age[age]
# Decrease the timer in fish by 1
else:
new_fish_per_age[age - 1] = new_fish_per_age.get(age - 1, 0) + fish_per_age[age]
fish_per_age = new_fish_per_age.copy()
# Return the total number of fish
return sum(fish_per_age.values())
# Tests and Solution ----------
print(fish_in_n_days(test01, 256))
print(fish_in_n_days(puzzle, 256))
| 1,358 | 506 |
APP_NAME = "fossa"
| 19 | 11 |
import boost_histogram
from .._core import storage as store
from .utils import set_module
# Simple mixin to provide a common base class for types
class Storage:
_family: object
def __init_subclass__(cls, *, family: object) -> None:
super().__init_subclass__()
cls._family = family
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
@set_module("boost_histogram.storage")
class Int64(store.int64, Storage, family=boost_histogram):
pass
@set_module("boost_histogram.storage")
class Double(store.double, Storage, family=boost_histogram):
pass
@set_module("boost_histogram.storage")
class AtomicInt64(store.atomic_int64, Storage, family=boost_histogram):
pass
@set_module("boost_histogram.storage")
class Unlimited(store.unlimited, Storage, family=boost_histogram):
pass
@set_module("boost_histogram.storage")
class Weight(store.weight, Storage, family=boost_histogram):
pass
@set_module("boost_histogram.storage")
class Mean(store.mean, Storage, family=boost_histogram):
pass
@set_module("boost_histogram.storage")
class WeightedMean(store.weighted_mean, Storage, family=boost_histogram):
pass
| 1,188 | 400 |
#!/usr/bin/env python3
import itertools
import random
import subprocess
import sys
WELCOME_TEXT = """Welcome {name} to the land of Pavlisha.
Many have entered this land but few have returned.
Your quest is to slay the Bad King, who has stolen the Clock of Time. Without
the Clock of Time it will be 2016 forever and there will never be another
another Christmas, birthday or holiday again!
"""
ENDING = """
You have defeated the Bad King and recovered the Clock of Time.
You have saved all future Christmases.
Merry Christmas {name}!
Love,
Brian, Kevin, Sophie, Pavel and Alex.
"""
DIRECTION_CHOICE = """This place seems very familiar to another but you can't
put your finger on how...
You are standing in a snow-covered plain. In every direction stretches untracked
miles of treacherous wilderness. Your blood chills at the thought of entering any
of these foreboding landscapes - but enter you must!
To your East lies Mount Doom - a volcano covered in lava and burning embers.
You can smell the sulfur even from here.
To your South lies a nameless forest. You can hear whispers calling you to
enter. They are not kind voices.
To your West lies Swamp Putrid. Its name is well deserved as you can smell the
decaying remains of those who entered before you.
To your feet lies a cave so dark that you can't see into it more than a
sword-length.
Wait...to your North lies a beautiful meadow with a path that winds away from
the terrible danger.
You take a few minutes to rest and then make your choice.
"""
NORTH_TEXT = """You walk north on the idyllic path. You hear bird song, smell the sweet
flowers and see multi-coloured butterflies. The sun is warm and life is good.
Or is it...
"""
NORTH_CONTINUE_TEXT = """Now that the fight is behind you, you continue on the path.
If anything, the flowers smell even sweater than before. Life is great.
Or is it...
"""
EAST_TEXT = """You walk east towards the hellish fires of Mount Doom.
The air reeks of sulfur and you can feel the heat of the lava as you approach.
Occasional pyroclastic blocks fly from the volcano.
"""
EAST_TREE = """At the peak of the volcano you see a single tree. You wonder how it
managed to survive up here.
As your approach, you see that its huge branches have been charred and covered
with a red film. It radiates a sense of potent malevolence.
Just as you are deciding whether to run of not, it charges you and attempts to
crush you with its powerful branches.
"""
EAST_TREE_WIN = """At the base of the tree you spot a golden ingot and a potion.
You put the ingot in your pocket but you aren't sure what to do with the potion.
Oh heck, you are an adventurer, aren't you? You sip the potion and suddenly
feel a bit stronger.
"""
SOUTH_TEXT = """You enter the dark forest.
Your sense of foreboding lessens briefly when you see five small pigs playing
with each other and eating truffles.
Suddenly lightning flashes from the sky and hits the ground near the pigs. The
sight is horrible but it gets even worse as they change before your eyes into
horrible Zombie Pigmen.
They moan their hatred of life in general (and you in particular) and move
towards you to attack.
Fortunately, the forest restricts their movements so that they can only attack
you one at a time.
"""
SOUTH_ALREADY_DONE = """You wonder around the forest for a while but don't
find anything interesting.
You return to the snowy clearing.
"""
SOUTH_END = """You catch your breath amongst the remains of the Zombie Pigmen.
Suddenly, in the corner of your eye, you see a potion laying next to one of
the zombified pigs.
You read the label and it says "Potion of Invisibility". You hide it in your
pack and return to the clearing.
"""
WEST_TEXT = """You walk into the dank swamp hoping not to vomit from the terrible smells.
In the distance you see a huge giant - maybe the smell of decay is coming from
its victims?
As you get closer, you see the Giant is smiling and realize that it is a
Friendly Giant.
You also see that, behind the giant, there is a Crafting Table and various
magical components! If only you could make use of it for a while...
The giant greats you with a wave and says: "Answer my riddle and the Crafting
Table is yours to use. What gets wetter as it dries?".
"""
WEST_COMPLETED = """You inspect the crafting table and realize that you can use it to
make magical armor and weapons.
You start to work immediately.
After some days, you finish your work and your weapon and armor glow brightly
with their new enchantment!
You walk back to the snowy clearing feeling that there is nothing that you
cannot do with your new magical tools.
Certainly you couldn't be crushed by flying rocks.
"""
WEST_ALREADY_COMPLETED = """You wander around the swamp until the smell overwhelms you.
You return to the snowy clearing.
"""
DOWN_COMMON = """You descend into the dark cave.
There is no light at all except for the faint glow coming from your enchanted
armor. You proceed cautiously, the air chilling you to the bone.
Ahead, you see a massive rock chamber. As you approach, you see that it is so
large that it could contain a huge tower. And it does! Guarding the tower is a
nearly infinite number of soldiers.
"""
DOWN_VISIBLE = DOWN_COMMON + """You carefully sneak towards the tower, trying to avoid
the attention of the guards.
"""
DOWN_INVISIBLE = DOWN_COMMON + """You drink your Potion of Invisibility and race towards
the tower. You make it inside just as it wares off!
You climb the circular stairs until the top of the tower. At the top of the
tower you see a medium-sized man sitting in a throne. It is the Bad King!
"Welcome to my tower, {name}." says the Bad King, "I hope that you are
ready to die."
With those words, he picks up his staff and charges towards you.
"""
EAST_AREA = 'east'
SOUTH_AREA = 'south'
WEST_AREA = 'west'
CAVE_AREA = 'cave'
if sys.version_info[0] < 3:
non_clearing_input = raw_input
else:
non_clearing_input = input
def clear():
subprocess.call('clear', shell=True)
def my_input(*args, **kwargs):
x = non_clearing_input(*args, **kwargs)
clear()
return x
input = my_input
class Character:
def __init__(self, name, race, dexterity, strength, max_hitpoints):
self.name = name
self.race = race
self.dexterity = dexterity
self.strength = strength
self.max_hitpoints = max_hitpoints
self.hitpoints = max_hitpoints
self.weapon = 'Sword'
self.armor = 'Chain Mail'
self.completed_areas = set()
self.inventory = set()
def get_damage(self):
if self.weapon == 'Enchanted Sword':
return int(random.randint(2, 20) * (self.strength + 50) / 100)
else:
return int(random.randint(1, 10) * (self.strength + 50) / 100)
def __str__(self):
return """{} the {}
Dexterity: {}
Strength: {}
Hitpoints: {} (of {})
Armor: {}
Weapon: {}
Other Items: {}
""".format(self.name, self.race, self.dexterity, self.strength, self.hitpoints,
self.max_hitpoints, self.armor, self.weapon,
', '.join(sorted(self.inventory)) or '<none>')
class CharacterDeadException(BaseException):
def __init__(self, character):
pass
def select_character():
print('What race do you want to be?')
print('')
print('Elf - Fast but not very strong')
print('Human - Jack of all trades, master of none')
print('Orc - Strong but slow')
print('')
r = ''
while not r or r[0] not in 'EHO':
r = input('Enter (E)lf, (H)uman or (O)rc: ').upper().strip()
if r[0] == 'E':
race = 'Elf'
dexterity = random.randint(75, 100)
strength = random.randint(25, 50)
hitpoints = random.randint(50, 100)
name = input('What is your name, wise Elf? ')
elif r[0] == 'H':
race = 'Human'
dexterity = random.randint(25, 75)
strength = random.randint(25, 75)
hitpoints = random.randint(100, 150)
name = input('What is your name, bold Human? ')
else:
race = 'Orc'
dexterity = random.randint(25, 50)
strength = random.randint(75, 100)
hitpoints = random.randint(150, 200)
name = input('What is your name, strong Orc? ')
character = Character(name, race, dexterity, strength, hitpoints)
print('')
print(character)
print('')
return character
class Monster:
def __init__(self, name, hitpoints, dexterity, hitname, missname,
attack_min_damage, attack_max_damage):
self.name = name
self.hitpoints = hitpoints
self.dexterity = dexterity
self.hitname = hitname
self.missname = missname
self.attack_min_damage = attack_min_damage
self.attack_max_damage = attack_max_damage
def generate_hit_roll():
return random.randint(0, 100) + 20
def proceed_after_fight(character, monster):
while True:
print('')
hit = generate_hit_roll()
if hit >= character.dexterity:
damage = random.randint(monster.attack_min_damage,
monster.attack_max_damage)
character.hitpoints -= damage
print('The {} {} for {} damage. You have {} hitpoints remaining.'.format(
monster.name, monster.hitname, damage, character.hitpoints))
else:
print('The {} {} - but you dodge away!'.format(monster.name,
monster.missname))
if character.hitpoints <= 0:
raise CharacterDeadException(character)
c = ''
while not c or c[0] not in 'AF':
c = input('What do you want to do? (A)ttack or (F)lee? ').strip().upper()
if c[0] == 'F':
print('You cowardly run back to the snowy plains.')
print('')
return False
hit = generate_hit_roll()
if hit >= monster.dexterity:
damage = character.get_damage()
monster.hitpoints -= damage
if monster.hitpoints > 0:
print('You swing your {} at the {} and hit it for {} damage. It has {} '
'hitpoints remaining.'.format(character.weapon, monster.name,
damage, monster.hitpoints))
else:
print("You swing your mighty {} at the {}. It's body will lay as an "
'example to others who dare to confront you.'.format(
character.weapon, monster.name))
return True
else:
print('You swing your mighty {} at the {} but hit nothing but air! Maybe '
"you aren't cut out for adventuring..."
.format(character.weapon, monster.name))
def proceed_after_random_fight(character):
monster = random.choice([
Monster('Giant Snake',
random.randint(5, 20),
random.randint(10, 50), 'slashes you with its giant fangs',
'strikes at you with its giant fangs', 1, 5),
Monster('Giant Spider',
random.randint(1, 10),
random.randint(1, 10), 'bites you with its poisonous fangs',
'jumpes to bit you', 5, 20),
Monster('Skeleton',
random.randint(1, 10),
random.randint(10, 20), 'stabs you with its ice sword',
'swings at you with its ice sword', 2, 10),
Monster('Zombie',
random.randint(1, 10),
random.randint(10, 20), 'cruches you with its decaying arms',
'tries to grab you with its decaying arms', 2, 10),
Monster('Orc',
random.randint(2, 50),
random.randint(10, 20), 'smashes you with its mace',
'swings at you with its mace', 20, 50),
])
print('You are attacked by a {}!'.format(monster.name))
return proceed_after_fight(character, monster)
def go_north(character):
"""Beautiful Meadow."""
print(NORTH_TEXT)
while proceed_after_random_fight(character):
print('')
print(NORTH_CONTINUE_TEXT)
def go_east(character):
"""Mount Doom."""
print(EAST_TEXT)
for i in range(0, 150, 25):
if random.randint(0, i) > character.dexterity:
print('A block of pyroclastic debris flies towards you. You attempt to '
'dodge but are\ntoo slow.')
print('')
if 'Enchanted' in character.armor:
print('The debris hits your {} and bounces off harmlessly.'.format(
character.armor))
print('')
break
else:
print(
'It crushes you into a smoldering pile of bones and burned flesh.')
print('')
raise CharacterDeadException(character)
else:
print('A block of pyroclastic debris flies towards you but you manage to '
'dodge out\nof the way.')
c = ''
while not c or c[0] not in 'CF':
c = input('Do you go (C)ontinue of (F)lee? ').upper().strip()
if c[0] == 'F':
print('You cowardly run back to the snowy plains.')
return
if EAST_AREA in character.completed_areas:
print(
'At the peak of the volcano, you see the evil tree that you previously'
' defeated. You walk back to the snowy clearing.')
print('')
return
print('')
print(EAST_TREE)
evil_tree = Monster('Evil Tree',
random.randint(50, 100),
random.randint(0, 5),
'cruches you with its huge branches',
'swings its huge branches towards you', 5, 15)
if proceed_after_fight(character, evil_tree):
print(EAST_TREE_WIN)
character.inventory.add('Golden Ingot')
strength = character.strength + random.randint(10, 50)
character.strength += strength
print(
'You finish drinking the potion of strength and gain {} strength. You '
'now have {} strength.'.format(strength, character.strength))
print('')
print('You feel like a titan!')
print('')
print('You walk back to the snowly clearing')
print('')
character.completed_areas.add(EAST_AREA)
def go_south(character):
"""Forest."""
if 'Invisibility Potion' in character.inventory:
print(SOUTH_ALREADY_DONE)
return
print(SOUTH_TEXT)
for i in range(1, 6):
zombie = Monster('Zombie Pigman #{}'.format(i),
random.randint(i * 5, i * 10),
random.randint(25, 75), 'stabs you with its wicked sword',
'swings its sword at you', i, i * 5)
if not proceed_after_fight(character, zombie):
return
print(SOUTH_END)
character.inventory.add('Invisibility Potion')
def go_west(character):
"""Swamp."""
if WEST_AREA in character.completed_areas:
print(WEST_ALREADY_COMPLETED)
print('')
return
print(WEST_TEXT)
answer = input('What gets wetter as it dries? ').strip()
if 'towel' not in answer.lower() and 'sponge' not in answer.lower():
print('"{0}"? "{0}"?! screams the giant. I will smash you into paste!'.
format(answer))
print('')
giant = Monster('Friendly Giant', 500, 50, 'smashes you with a giant fist',
'tries to step on you', 15, 50)
if not proceed_after_fight(character, giant):
return
else:
print(
"""Yes, towels (and sponges) get wetter as they dry, smiles the giant. He walks away humming."""
)
print('')
print(WEST_COMPLETED)
character.armor = 'Enchanted ' + character.armor
character.weapon = 'Enchanted ' + character.weapon
character.completed_areas.add(WEST_AREA)
def go_down(character):
"""Cave."""
if 'Enchanted' not in character.armor:
while True:
character.hitpoints -= 5
print(
'The cave is dark and you stubble around until you bump you head on '
'the ceiling.')
print('You take {} damage. You have {} hitpoints remaining.'.format(
5, character.hitpoints))
if character.hitpoints <= 0:
raise CharacterDeadException(character)
c = ''
while not c or c[0] not in 'CF':
c = input('Do you want to (C)ontinue or (F)lee? ').upper().strip()
if c and c[0] == 'F':
print(
'You cowardly run back to the snowy plains after a little bump on '
'the head.')
print('')
return
if 'Invisibility Potion' not in character.inventory:
print(DOWN_VISIBLE)
for guard_name in itertools.chain(['Guard', 'Guard', 'Strong Guard'],
itertools.repeat('Elite Guard')):
print(
'You are spotted by a {} who immediately rushes to defend his king!'.
format(guard_name))
if guard_name == 'Guard':
guard = Monster(guard_name,
random.randint(1, 10),
random.randint(25, 50), 'stabs you with his spear',
'stabs at you with his spear', 1, 10)
elif guard_name == 'Strong Guard':
guard = Monster(guard_name,
random.randint(10, 20),
random.randint(25, 50), 'hits you with his battle axe',
'swings his battle axe at you', 2, 20)
else:
guard = Monster(guard_name,
random.randint(40, 80),
random.randint(50, 100),
'smashes you with his war hammer',
'swings his war hammer at you', 5, 50)
if not proceed_after_fight(character, guard):
return
character.inventory.remove('Invisibility Potion')
print(DOWN_INVISIBLE.format(name=character.name))
evil_king = Monster('Bad King', 100, 50, 'hits you with his enchanted staff',
'swings at you with his enchanted staff', 5, 10)
if not proceed_after_fight(character, evil_king):
return
print(ENDING.format(name=character.name))
sys.exit(0)
def select_path(character):
while True:
print(DIRECTION_CHOICE)
character.hitpoints = character.max_hitpoints
c = ''
while not c or c[0] not in 'NESWDP':
c = input('Do you go (N)orth (E)ast (S)outh (W)est (D)own '
'or (P)rint Character Information? ').upper().strip()
if c[0] == 'N':
go_north(character)
elif c[0] == 'E':
go_east(character)
elif c[0] == 'S':
go_south(character)
elif c[0] == 'W':
go_west(character)
elif c[0] == 'D':
go_down(character)
elif c[0] == 'P':
print(character)
print('')
def main():
try:
character = select_character()
print(WELCOME_TEXT.format(name=character.name))
select_path(character)
except CharacterDeadException:
print("You died. Try again and maybe you'll get lucky.")
if __name__ == '__main__':
clear()
main()
| 18,424 | 6,017 |
# Copyright 2021 The WAX-ML Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""We implement an online learning non-stationary linear regression problem.
We go there progressively by showing how a linear regression problem can be cast
into an online learning problem thanks to the `OnlineSupervisedLearner` module.
Then, in order to tackle a non-stationary linear regression problem (i.e. with a weight that can vary in time)
we reformulate the problem into a reinforcement learning problem that we implement with the `GymFeedBack` module of WAX-ML.
We then need to define an "agent" and an "environment" using simple functions or modules:
- The agent is responsible for learning the weights of its internal linear model.
- The environment is responsible for generating labels and evaluating the agent's reward metric.
We experiment with a non-stationary environment that returns the sign of the linear regression parameters at a given time step,
known only to the environment.
We will see that doing this is very simple with the WAX-ML tools and that the functional workflow it adopts
allows, each time we increase in complexity, to reuse the previously implemented transformations.
In this journey, we will use:
- Haiku basic linear module `hk.Linear`.
- Optax stochastic gradient descent optimizer: `sgd`.
- WAX-ML modules: `OnlineSupervisedLearner`, `Lag`, `GymFeedBack`
- WAX-ML helper functions: `dynamic_unroll`, `jit_init_apply`
"""
import haiku as hk
import jax
import jax.numpy as jnp
import optax
from jax.tree_util import tree_map
from wax.compile import jit_init_apply
from wax.modules import GymFeedback, Lag, OnlineSupervisedLearner
from wax.unroll import unroll
@jit_init_apply
@hk.transform_with_state
def linear_model(x):
return hk.Linear(output_size=1, with_bias=False)(x)
def test_static_model():
# First let's implement a simple linear regression
# Let's generate a batch of data:
seq = hk.PRNGSequence(42)
T = 100
N = 3
X = jax.random.normal(next(seq), (T, N))
w_true = jnp.ones(N)
params, state = linear_model.init(next(seq), X[0])
linear_model.apply(params, state, None, X[0])
Y_pred = unroll(linear_model, rng=next(seq))(X)
assert Y_pred.shape == (T, 1)
noise = jax.random.normal(next(seq), (T,))
Y = X.dot(w_true) + noise
mean_loss = ((Y - Y_pred) ** 2).sum(axis=1).mean()
assert mean_loss > 0
def generate_many_observations(T=300, sigma=1.0e-2, rng=None):
rng = jax.random.PRNGKey(42) if rng is None else rng
X = jax.random.normal(rng, (T, 3))
noise = sigma * jax.random.normal(rng, (T,))
w_true = jnp.ones(3)
noise = sigma * jax.random.normal(rng, (T,))
Y = X.dot(w_true) + noise
return (X, Y)
def test_online_model():
# # Online model
opt = optax.sgd(1e-3)
@jax.jit
def loss(y_pred, y):
return jnp.mean(jnp.square(y_pred - y))
@jit_init_apply
@hk.transform_with_state
def learner(x, y):
return OnlineSupervisedLearner(linear_model, opt, loss)(x, y)
seq = hk.PRNGSequence(42)
# generate data
T = 300
X, Y = generate_many_observations(T)
# dynamic unroll the learner
x0, y0 = tree_map(lambda x: x[0], (X, Y))
(output, info) = unroll(learner, rng=next(seq))(X, Y)
assert len(info.loss) == T
assert len(info.params["linear"]["w"])
def linear_regression_agent(obs):
x, y = obs
opt = optax.sgd(1e-3)
@jax.jit
def loss(y_pred, y):
return jnp.mean(jnp.square(y_pred - y))
def learner(x, y):
return OnlineSupervisedLearner(linear_model, opt, loss)(x, y)
return learner(x, y)
def stationary_linear_regression_env(y_pred, raw_obs):
# Only the environment now the true value of the parameters
w_true = -jnp.ones(3)
# The environment has its proper loss definition
@jax.jit
def loss(y_pred, y):
return jnp.mean(jnp.square(y_pred - y))
# raw observation contains features and generative noise
x, noise = raw_obs
# generate targets
y = x @ w_true + noise
obs = (x, y)
y_previous = Lag(1)(y)
# evaluate the prediction made by the agent
reward = loss(y_pred, y_previous)
info = {}
return reward, obs, info
def generate_many_raw_observations(T=300, sigma=1.0e-2, rng=None):
rng = jax.random.PRNGKey(42) if rng is None else rng
X = jax.random.normal(rng, (T, 3))
noise = sigma * jax.random.normal(rng, (T,))
return (X, noise)
def test_online_recast_as_reinforcement_learning_pb():
# # Online supervised learning recast as a reinforcement learning problem
# obs = (x, y) are tuple observations.
# raw_obs = (x, noise) consist in the feature and input noise.
@hk.transform_with_state
def gym_fun(raw_obs):
return GymFeedback(
linear_regression_agent,
stationary_linear_regression_env,
return_action=True,
)(raw_obs)
T = 300
raw_observations = generate_many_raw_observations(T)
rng = jax.random.PRNGKey(42)
(gym_output, gym_info) = unroll(gym_fun, rng=rng, skip_first=True)(
raw_observations,
)
assert len(gym_output.reward) == T - 1
assert len(gym_info.agent.loss) == T - 1
assert len(gym_info.agent.params["linear"]["w"]) == T - 1
class NonStationaryEnvironment(hk.Module):
def __call__(self, action, raw_obs):
step = hk.get_state("step", [], init=lambda *_: 0)
# Only the environment now the true value of the parameters
# at step 2000 we flip the sign of the true parameters !
w_true = hk.cond(
step < 2000,
step,
lambda step: -jnp.ones(3),
step,
lambda step: jnp.ones(3),
)
# The environment has its proper loss definition
@jax.jit
def loss(y_pred, y):
return jnp.mean(jnp.square(y_pred - y))
# raw observation contains features and generative noise
x, noise = raw_obs
# generate targets
y = x @ w_true + noise
obs = (x, y)
# evaluate the prediction made by the agent
y_previous = Lag(1)(y)
y_pred = action
reward = loss(y_pred, y_previous)
step += 1
hk.set_state("step", step)
info = {}
return reward, obs, info
def test_non_stationary_environement():
# ## Non-stationary environment
# Now, let's implement a non-stationary environment
# Now let's run a gym simulation to see how the agent adapt to the change of environment.
@hk.transform_with_state
def gym_fun(raw_obs):
return GymFeedback(
linear_regression_agent, NonStationaryEnvironment(), return_action=True
)(raw_obs)
T = 300
raw_observations = generate_many_raw_observations(T)
rng = jax.random.PRNGKey(42)
(gym_output, gym_info), final_state = unroll(
gym_fun, return_final_state=True, skip_first=True, rng=rng
)(raw_observations)
assert len(gym_output.reward) == T - 1
assert len(gym_info.agent.loss) == T - 1
assert len(gym_info.agent.params["linear"]["w"]) == T - 1
| 7,649 | 2,603 |
"""Functions to create objects to add to the visualizer"""
import numpy as np
import torch
from .vtk_object import VTKObject
def _convert_torch_to_numpy(arr):
"""If arr is torch.Tensor, return the numpy equivalent, else return arr
as it is"""
if isinstance(arr, torch.Tensor):
arr = arr.detach().cpu().numpy()
return arr
def create_point_cloud(xyz: np.ndarray, colors=None, cmap=None, color_norm=None,
pt_size=1.0, alpha=1.0):
"""Create a point cloud with colors from a given NumPy array
The NumPy array should have dimension Nx6 where the first three
dimensions correspond to X, Y and Z and the last three dimensions
correspond to R, G and B values (between 0 and 255)
Returns: VTKObject() which encapulsates the point sources and actors
"""
xyz = _convert_torch_to_numpy(xyz)
obj = VTKObject()
obj.CreateFromArray(xyz[:, :3])
if colors is not None:
obj.SetColors(colors, cmap, color_norm)
if alpha < 1.0:
obj.actor.GetProperty().SetOpacity(alpha)
obj.actor.GetProperty().SetPointSize(pt_size)
return obj
def create_hedgehog_actor(xyz, normals, scale=1.0):
obj = VTKObject()
obj.CreateFromArray(xyz)
obj.AddNormals(normals)
obj.SetupPipelineHedgeHog(scale)
return obj
def create_axes(length):
"""Create coordinate system axes with specified length"""
obj = VTKObject()
obj.CreateAxes(length)
return obj
def create_sphere(origin, r=1.0, color=None):
"""Create a sphere with given origin (x,y,z) and radius r"""
origin = _convert_torch_to_numpy(origin)
obj = VTKObject()
obj.CreateSphere(origin, r, color)
return obj
def create_cylinder(origin, r=1.0, h=1.0):
"""Create a cylinder with given origin (x,y,z), radius r and height h"""
obj = VTKObject()
obj.CreateCylinder(origin, r, h)
return obj
def create_plane(normal=None, origin=None):
"""Create a plane (optionally with a given normal vector and origin)
Note: SetActorScale can be used to scale the extent of the plane"""
obj = VTKObject()
obj.CreatePlane(normal, origin)
return obj
def create_box(bounds):
"""Create a box witih the given bounds=[xmin,xmax,ymin,ymax,zmin,zmax]"""
obj = VTKObject()
obj.CreateBox(bounds)
return obj
def create_line(p1, p2):
"""Create a 3D line from p1=[x1,y1,z1] to p2=[x2,y2,z2]"""
obj = VTKObject()
obj.CreateLine(p1, p2)
return obj
def create_lines(lines, line_color=(1.0, 1.0, 1.0), line_width=1):
"""Create multiple 3D lines
Args:
lines: List of 3D lines, each element is [x1, y1, z1, x2, y2, z2]
"""
lines = _convert_torch_to_numpy(lines)
obj = VTKObject()
obj.CreateLines(lines, line_color, line_width)
return obj
| 2,814 | 1,001 |
import sys
import getopt
import trainer
import classifier
def show_help():
print "options: "
print "-t, --training <training-data-dir> Enter training mode and use given directory for training data"
print "-c, --classify <img-dir> Enter classify mode, using the images in the given directory as input"
print "-v, --validate Perform validation (LOOCV) when clustering in training mode (default: false)"
print "-o, --output <output-dir> Location of training data file (in training mode) or classified images (in classify mode) (default: <training-data-dir>/" + trainer.TRAINING_DATA_FILENAME + " training mode, <img-dir>_out in classify mode)"
print "-a, --algorithm <classifier> Use either \"bf\" (brute force) or \"hist\" (histogram) as the classifier algorithm (default: \"bf\")"
print "-d, --data <training-data-file> Use given file as source of training data"
print "-r, --results <prefix-value> [USE WITH -c] Check results after classification by inspecting filenames (filename that starts with the given prefix means it should be classified as a positive)"
def main(argv):
if len(argv) == 0:
show_help()
sys.exit()
# Parse parameters.
training_dir = ""
output_dir = ""
query_path = ""
training_db = ""
training_mode = False
classify_mode = False
classify_mode_alg = classifier.CLASSIFIER_ALG_BF
results_prefix = ""
validate = False
try:
opts, args = getopt.getopt(argv,"t:vo:c:a:d:r:",["training=","validate=","output=", "classify=", "algorithm=", "data=", "results="])
except getopt.GetoptError:
show_help()
sys.exit(2)
for opt, arg in opts:
if opt in ("-t", "--training"):
training_dir = arg
training_mode = True
elif opt in ("-o", "--output"):
output_dir = arg
elif opt in ("-c", "--classify"):
query_path = arg
classify_mode = True
elif opt in ("-a", "--algorithm"):
if arg == "bf":
classify_mode_alg = classifier.CLASSIFIER_ALG_BF
elif arg == "hist":
classify_mode_alg = classifier.CLASSIFIER_ALG_HIST
else:
print "Illegal value for -a/--algorithm: " + arg
sys.exit(3)
elif opt in ("-d", "--data"):
training_db = arg
elif opt in ("-r", "--results"):
results_prefix = arg
elif opt in ("-v", "--validate"):
validate = True
if not classify_mode and not training_mode:
show_help()
sys.exit(1)
if classify_mode:
classifier.classify(query_path, training_db, output_dir, results_prefix, classify_mode_alg)
elif training_mode:
trainer.train_classifier(training_dir, output_dir, validate)
main(sys.argv[1:])
| 2,980 | 873 |
import time, cv2, json
from vision.monocular import Monocular, test_publisher_pixel_coordinate
def test_func(var):
return
def main():
cam = Monocular(test_publisher_pixel_coordinate)
#cam = Monocular(test_func)
cam.start_loop()
cam.debug_mode = True
cam.debug_type = "cont"
time.sleep(3)
while True:
cv2.imshow("Colour Image", cam.debug_frame_output)
# Press q if you want to end the loop
time.sleep(0.1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.loop_running = False
if __name__ == "__main__":
main()
| 622 | 229 |
# Copyright 2020 Aeva Palecek
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .textures import *
from ..syntax.grammar import Pipeline, PipelineOutput
class FrameBufferHandles(SyntaxExpander):
template = "let FrameBufferHandles = new Array(「count:int」);"
class BindFrameBuffer(SyntaxExpander):
template = "gl.bindFramebuffer(gl.FRAMEBUFFER, FrameBufferHandles[「handle:int」]);"
def __init__(self, pipeline:Pipeline):
SyntaxExpander.__init__(self, pipeline.index)
class BindBackBuffer(SyntaxExpander):
template = "gl.bindFramebuffer(gl.FRAMEBUFFER, null);"
class FrameBufferAttachment(SyntaxExpander):
template = "gl.framebufferTexture2D(gl.FRAMEBUFFER, 「attachment:str」, gl.TEXTURE_2D, TextureHandles[「tex_handle:int」], 「mip:int」);"
def __init__(self, pipeline:Pipeline, output:PipelineOutput, attachment:str, mip:int):
SyntaxExpander.__init__(self)
self.rt_handle = pipeline.index
self.tex_handle = output.handle
self.attachment = attachment
assert(mip >= 0)
self.mip = mip
class DepthAttachment(FrameBufferAttachment):
def __init__(self, pipeline:Pipeline, mip:int = 0):
FrameBufferAttachment.__init__(self, pipeline, CAST(PipelineOutput, pipeline.depth_target), "gl.DEPTH_ATTACHMENT", mip)
class ColorAttachment(FrameBufferAttachment):
def __init__(self, pipeline:Pipeline, output:PipelineOutput, mip:int = 0):
assert(output in pipeline.color_targets)
FrameBufferAttachment.__init__(self, pipeline, output, f"gl.COLOR_ATTACHMENT{str(output.color_index)}", mip)
class DrawBuffers(SyntaxExpander):
template = """
gl.drawBuffers([
「attachments」
]);
""".strip()
indent = ("attachments",)
def __init__(self, pipeline:Pipeline):
SyntaxExpander.__init__(self)
attachments = []
if pipeline.depth_target:
attachments.append("gl.DEPTH_ATTACHMENT")
attachments += ["gl.COLOR_ATTACHMENT{str(i)}" for i in range(len(pipeline.color_targets))]
self.attachments = attachments
class CreateFrameBuffer(SyntaxExpander):
template = """
{
FrameBufferHandles[「handle:int」] = gl.createFramebuffer();
「expanders」
}
""".strip()
indent = ("expanders",)
def __init__(self, pipeline:Pipeline):
SyntaxExpander.__init__(self)
self.handle = pipeline.index
self.expanders:List[SyntaxExpander] = []
self.expanders.append(BindFrameBuffer(pipeline))
for color_target in pipeline.color_targets:
self.expanders.append(ColorAttachment(pipeline, color_target))
if pipeline.depth_target:
self.expanders.append(DepthAttachment(pipeline))
if len(pipeline.color_targets) > 1:
self.expanders.append(DrawBuffers(pipeline))
self.expanders.append(BindBackBuffer())
class RebuildFrameBuffer(CreateFrameBuffer):
template = """
{
// recreate framebuffer "「name:str」"
「expanders」
}
""".strip()
indent = ("expanders",)
def __init__(self, pipeline:Pipeline):
CreateFrameBuffer.__init__(self, pipeline)
self.name = pipeline.name
class SetupFrameBuffers(SyntaxExpander):
template = "「wrapped」"
def __init__(self, env:Program):
SyntaxExpander.__init__(self)
self.wrapped:List[SyntaxExpander] = [CreateFrameBuffer(pipeline) for pipeline in env.pipelines.values() if not pipeline.uses_backbuffer]
class ResizeFrameBuffers(SyntaxExpander):
template = """
「wrapped」
""".strip()
def __init__(self, env:Program):
SyntaxExpander.__init__(self)
self.wrapped:List[SyntaxExpander] = []
pipelines = [p for p in env.pipelines.values() if not p.uses_backbuffer]
texture_names = sorted({out.texture.name for p in pipelines for out in p.outputs})
for texture in env.all_target_textures:
self.wrapped.append(ResizeTexture(texture))
for pipeline in env.pipelines.values():
if not pipeline.uses_backbuffer:
self.wrapped.append(RebuildFrameBuffer(pipeline))
| 4,562 | 1,426 |
import webbrowser as wb
from modules import converter
class UserInput(converter.LinkConverter):
def __init__(self, libs, url):
self.libs = libs
self.url = url
def _invalid_input(self):
print(f"\x1b[0;31mERROR: \x1b[0m{self.url} is invalid URL\n")
print("Please, enter a valid url or doi name.")
print("The url should start with 'http'")
print("If it is a doi name, the prefix should start with '10.'")
def _convert_doi(self):
doi = 'https://doi.org/'
link = doi + self.url
access_link = converter.LinkConverter(link, self.libs)
wb.open(access_link.convert_url())
def _convert_http(self):
access_link = converter.LinkConverter(self.url, self.libs)
wb.open(access_link.convert_url())
def _convert_url(self):
if self.url.startswith('10'):
self._convert_doi()
elif self.url.startswith('http'):
self._convert_http()
else:
self._invalid_input()
exit()
def _is_empty_input(self):
self.url = input("Enter url/doi:")
self._convert_url()
def check_user_input(self):
if self.url is None:
self._is_empty_input()
else:
self._convert_url() | 1,301 | 409 |
import os
import unittest
import mock
from chat_unifier.file_iterators import trillian_xml
class TrillianXmlFileIteratorTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def test_picks_correct_trillian_log_files(self):
with mock.patch.object(os, 'walk') as mock_walk:
mock_walk.return_value = [
('/logs', ('AIM', 'junk'), ('README.txt',)),
('/logs/AIM', ('Query',), ()),
('/logs/AIM/Query', (),
('DummyBuddy123.xml', 'DummyBuddy123-assets.xml',
'DummyBuddy123.log', 'DummyBuddy234.xml',
'DummyBuddy234-assets.xml', 'DummyBuddy234.log')),
('/logs/junk', (), ('junk.png',)),
]
self.assertEqual([
'/logs/AIM/Query/DummyBuddy123.xml',
'/logs/AIM/Query/DummyBuddy234.xml'
], [f for f in trillian_xml.iterate_files('/logs')])
| 959 | 342 |
# -*- coding: utf-8 -*-
__version__ = '0.1.dev0'
from .multigp import MultiGP
from .pca import PCA
from .util import *
from . import priors
from george import kernels
| 169 | 66 |
import os
import math
from PIL import Image
def pack_directory(path):
def name_comparer(entry):
return int(entry.name.strip('image .png'))
files = [e for e in os.scandir(path) if e.name.endswith('png') and not e.name.startswith('output')]
files.reverse()
images = [Image.open(e.path) for e in files]
x = max([i.size[0] for i in images])
y = max([i.size[1] for i in images])
column = 32
row = math.ceil(len(images) / column)
output = Image.new('RGBA', (x * column, y * row))
count = 0
for i in images:
c = count % column
r = count // column
x_center = c * x + x // 2
y_center = r * y + y // 2
width = i.size[0]
height = i.size[1]
output.paste(i, (x_center - width // 2, y_center - height // 2))
count += 1
output_name = os.path.join(path, 'output.png')
output.save(output_name)
path = r'E:\export\fuwen'
pack_directory(path) | 952 | 348 |
"""Modules for graph embedding methods."""
import logging
import gensim
import networkx as nx
import numpy as np
import pandas as pd
# For GCN
import stellargraph as sg
import tensorflow as tf
from graph_embeddings import samplers, utils
from scipy import sparse
from sklearn import model_selection
from stellargraph.data import UnsupervisedSampler
from stellargraph.layer import GraphSAGE, link_classification
from stellargraph.mapper import GraphSAGELinkGenerator, GraphSAGENodeGenerator
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras.callbacks import EarlyStopping
logger = logging.getLogger(__name__)
try:
import glove
except ImportError:
print(
"Ignore this message if you do not use Glove. Otherwise, install glove python package by 'pip install glove_python_binary' "
)
#
# Base class
#
class NodeEmbeddings:
"""Super class for node embedding class."""
def __init__(self):
self.in_vec = None
self.out_vec = None
def fit(self):
"""Estimating the parameters for embedding."""
pass
def transform(self, dim, return_out_vector=False):
"""Compute the coordinates of nodes in the embedding space of the
prescribed dimensions."""
# Update the in-vector and out-vector if
# (i) this is the first to compute the vectors or
# (ii) the dimension is different from that for the previous call of transform function
if self.out_vec is None:
self.update_embedding(dim)
elif self.out_vec.shape[1] != dim:
self.update_embedding(dim)
return self.out_vec if return_out_vector else self.in_vec
def update_embedding(self, dim):
"""Update embedding."""
pass
class Node2Vec(NodeEmbeddings):
"""A python class for the node2vec.
Parameters
----------
num_walks : int (optional, default 5)
Number of walks per node
walk_length : int (optional, default 40)
Length of walks
window_length : int (optional, default 10)
restart_prob : float (optional, default 0)
Restart probability of a random walker.
p : node2vec parameter (TODO: Write doc)
q : node2vec parameter (TODO: Write doc)
"""
def __init__(
self,
num_walks=5,
walk_length=40,
window_length=10,
restart_prob=0,
p=1.0,
q=1.0,
verbose=False,
random_teleport=False,
):
self.in_vec = None # In-vector
self.out_vec = None # Out-vector
self.sampler = samplers.SimpleWalkSampler(
num_walks,
walk_length,
window_length,
restart_prob,
p,
q,
sample_center_context_pairs=False,
verbose=False,
random_teleport=random_teleport,
)
self.sentences = None
self.model = None
self.verbose = verbose
self.w2vparams = {
"sg": 1,
"min_count": 0,
"epochs": 1,
"workers": 4,
}
def fit(self, net):
"""Estimating the parameters for embedding.
Parameters
---------
net : nx.Graph object
Network to be embedded. The graph type can be anything if
the graph type is supported for the node samplers.
Return
------
self : Node2Vec
"""
A = utils.to_adjacency_matrix(net)
self.sampler.sampling(A)
return self
def update_embedding(self, dim):
# Update the dimension and train the model
# Sample the sequence of nodes using a random walk
self.w2vparams["window"] = self.sampler.window_length
self.sentences = utils.walk2gensim_sentence(
self.sampler.walks, self.sampler.window_length
)
self.w2vparams["vector_size"] = dim
self.model = gensim.models.Word2Vec(sentences=self.sentences, **self.w2vparams)
num_nodes = len(self.model.wv.key_to_index)
self.in_vec = np.zeros((num_nodes, dim))
self.out_vec = np.zeros((num_nodes, dim))
for i in range(num_nodes):
if "%d" % i not in self.model.wv:
continue
self.in_vec[i, :] = self.model.wv["%d" % i]
self.out_vec[i, :] = self.model.syn1neg[
self.model.wv.key_to_index["%d" % i]
]
class DeepWalk(Node2Vec):
def __init__(self, **params):
Node2Vec.__init__(self, **params)
self.w2vparams["sg"] = 0
self.w2vparams["hs"] = 1
class Glove:
def __init__(
self,
num_walks=5,
walk_length=40,
window_length=10,
restart_prob=0,
p=1.0,
q=1.0,
verbose=False,
):
self.in_vec = None # In-vector
self.out_vec = None # Out-vector
self.sampler = samplers.SimpleWalkSampler(
num_walks,
walk_length,
window_length,
restart_prob,
p,
q,
sample_center_context_pairs=True,
verbose=False,
)
self.learning_rate = 0.05
self.w2vparams = {"epochs": 25, "no_threads": 4}
def fit(self, net):
A = utils.to_adjacency_matrix(net)
self.sampler.sampling(A)
center, context, freq = self.sampler.get_center_context_pairs()
center = center.astype(int)
context = context.astype(int)
N = self.sampler.num_nodes
self.cooccur = sparse.coo_matrix(
(freq, (center, context)), shape=(N, N), dtype="double"
)
return self
def transform(self, dim, return_out_vector=False):
# Update the in-vector and out-vector if
# (i) this is the first to compute the vectors or
# (ii) the dimension is different from that
# for the previous call of transform function
update_embedding = False
if self.out_vec is None:
update_embedding = True
elif self.out_vec.shape[1] != dim:
update_embedding = True
# Update the dimension and train the model
if update_embedding:
self.model = glove.Glove(
no_components=dim, learning_rate=self.learning_rate
)
self.model.fit(self.cooccur, **self.w2vparams)
self.in_vec = self.model.word_vectors
self.out_vec = self.model.word_vectors
if return_out_vector:
return self.out_vec
else:
return self.in_vec
class Fairwalk(Node2Vec):
def __init__(self, group_membership=None, **params):
Node2Vec.__init__(self, **params)
self.group_membership = group_membership
self.w2vparams = {
"sg": 0,
"hs": 1,
"min_count": 0,
"workers": 4,
}
def fit(self, net):
A = utils.to_adjacency_matrix(net)
if self.group_membership is None: # default is degree
self.group_membership = np.unique(
np.array(A.sum(axis=1)).reshape(-1), return_inverse=True
)[1]
# balance transition probability
Ahat = A.copy()
num_nodes = A.shape[0]
for i in range(num_nodes):
# Compute the out-deg
node_ids = A.indices[A.indptr[i] : A.indptr[i + 1]]
_, gids, freq = np.unique(
self.group_membership[node_ids], return_inverse=True, return_counts=True
)
w = 1 / freq[gids]
Ahat.data[A.indptr[i] : A.indptr[i + 1]] = w
self.sampler.sampling(Ahat)
return self
class LaplacianEigenMap(NodeEmbeddings):
def __init__(self):
self.in_vec = None
self.L = None
self.deg = None
def fit(self, G):
A = utils.to_adjacency_matrix(G)
# Compute the (inverse) normalized laplacian matrix
deg = np.array(A.sum(axis=1)).reshape(-1)
Dsqrt = sparse.diags(1 / np.maximum(np.sqrt(deg), 1e-12), format="csr")
L = Dsqrt @ A @ Dsqrt
self.L = L
self.deg = deg
return self
def transform(self, dim, return_out_vector=False):
if self.in_vec is None:
self.update_embedding(dim)
elif self.in_vec.shape[1] != dim:
self.update_embedding(dim)
return self.in_vec
def update_embedding(self, dim):
u, s, _ = utils.rSVD(self.L, dim + 1) # add one for the trivial solution
order = np.argsort(s)[::-1][1:]
u = u[:, order]
Dsqrt = sparse.diags(1 / np.maximum(np.sqrt(self.deg), 1e-12), format="csr")
self.in_vec = Dsqrt @ u
self.out_vec = u
class NetMF(NodeEmbeddings):
"""NetMF.
Alias of LevyWord2Vec
"""
def __init__(self, window_length=10, num_neg_samples=1, h=256, **params):
self.in_vec = None # In-vector
self.out_vec = None # Out-vector
self.window_length = window_length
self.b = num_neg_samples
self.h = h
def fit(self, net):
"""Estimating the parameters for embedding.
Parameters
---------
net : nx.Graph object
Network to be embedded. The graph type can be anything if
the graph type is supported for the node samplers.
Return
------
self : Node2Vec
"""
if self.h is None:
self.h = np.power(net.shape[0], 0.66)
if self.h > net.shape[0]:
self.h = net.shape[0]
if self.window_length > 4:
logger.debug("Approximiating Mhat")
deg = np.array(net.sum(axis=0)).reshape(-1)
Dsqinv = sparse.diags(np.sqrt(1 / np.maximum(deg, 1)))
Uh, Lamh, Vh = utils.rSVD(Dsqinv @ net @ Dsqinv, self.h)
S = np.sign(Lamh)
# out_vec = np.einsum("ij,i->ij", Vh, S)
Lamh = Lamh * S
averaged_Lamh = Lamh.copy()
for t in range(2, self.window_length):
averaged_Lamh += np.power(Lamh, t)
averaged_Lamh /= self.window_length
logger.debug("Computing Mhat")
Uh = Dsqinv @ Uh @ sparse.diags(np.sqrt(averaged_Lamh))
self.Mhat = net.sum() * (Uh @ Uh.T) / self.b
else:
deg = np.array(net.sum(axis=0)).reshape(-1)
self.Mhat = utils.calc_rwr(utils.to_trans_mat(net), 0, self.window_length)
self.Mhat = self.Mhat @ np.diag(1 / np.maximum(deg, 1)) * np.sum(deg)
logger.debug("Thresholding")
self.Mhat = np.log(np.maximum(self.Mhat, 1))
return self
def update_embedding(self, dim):
# Update the dimension and train the model
# Sample the sequence of nodes using a random walk
logger.debug("SVD")
in_vec, val, out_vec = utils.rSVD(self.Mhat, dim)
# in_vec, val, out_vec = sparse.linalg.svds(self.Mhat, dim)
# in_vec, val, out_vec = utils.rSVD(self.Mhat + sparse.diags(np.ones()), dim)
order = np.argsort(val)[::-1]
val = val[order]
alpha = 0.5
self.in_vec = in_vec[:, order] @ np.diag(np.power(val, alpha))
self.out_vec = out_vec[order, :].T @ np.diag(np.power(val, 1 - alpha))
class GAT(NodeEmbeddings):
"""A python class for GAT."""
def __init__(
self,
number_of_walks=1,
batch_size=512,
epochs=200,
lr=1e-2,
num_samples=[25, 10],
layer_sizes=[256, 256],
num_default_features=None,
):
self.in_vec = None # In-vector
self.out_vec = None # Out-vector
self.number_of_walks = number_of_walks
self.batch_size = batch_size
self.num_samples = num_samples
self.epochs = epochs
self.layer_sizes = layer_sizes
self.lr = lr
self.num_default_features = num_default_features
def fit(self, net, node_features=None):
"""Estimating the parameters for embedding.
Parameters
---------
net : nx.Graph object
Network to be embedded. The graph type can be anything if
the graph type is supported for the node samplers.
Return
------
self : Node2Vec
"""
def find_blocks_by_sbm(A, K, directed=False):
"""Jiashun Jin. Fast community detection by SCORE.
:param A: scipy sparse matrix
:type A: sparse.csr_matrix
:param K: number of communities
:type K: int
:param directed: whether to cluster directed or undirected, defaults to False
:type directed: bool, optional
:return: [description]
:rtype: [type]
"""
if K >= (A.shape[0] - 1):
cids = np.arange(A.shape[0])
return cids
u, s, v = utils.rSVD(A, dim=K)
u = np.ascontiguousarray(u, dtype=np.float32)
if directed:
v = np.ascontiguousarray(v.T, dtype=np.float32)
u = np.hstack([u, v])
norm = np.linalg.norm(u, axis=1)
denom = 1 / np.maximum(norm, 1e-5)
denom[np.isclose(norm, 0)] = 0
u = np.einsum("ij,i->ij", u, denom)
if (u.shape[0] / K) < 10:
niter = 1
else:
niter = 10
km = faiss.Kmeans(u.shape[1], K, niter=niter, spherical=True)
km.train(u)
_, cids = km.index.search(u, 1)
cids = np.array(cids).reshape(-1)
return np.array(cids).reshape(-1)
logger.debug("sampling - start")
A = utils.to_adjacency_matrix(net)
Gnx = nx.from_scipy_sparse_matrix(A)
deg = np.array(A.sum(axis=1)).reshape(-1)
self.deg = np.array([deg[i] for i in Gnx.nodes])
self.Gnx = Gnx
self.num_nodes = len(Gnx.nodes)
self.A = A
self.node_features = node_features
return self
def update_embedding(self, dim):
self.layer_sizes[-1] = dim
if self.node_features is None:
if self.num_default_features is None:
self.num_default_features = dim
d = np.maximum(1, np.array(self.A.sum(axis=1)).reshape(-1))
dsqrt = np.sqrt(d)
L = sparse.diags(1 / dsqrt) @ self.A @ sparse.diags(1 / dsqrt)
X, s, _ = utils.rSVD(L, self.num_default_features)
node_features = pd.DataFrame(X)
node_features["deg"] = self.deg
X = node_features.values
X = X @ np.diag(1 / np.maximum(np.linalg.norm(X, axis=0), 1e-12))
node_features = pd.DataFrame(X)
else:
node_features = self.node_features
self.G = sg.StellarGraph.from_networkx(self.Gnx, node_features=node_features)
self.train_targets, self.test_targets = model_selection.train_test_split(
node_features, train_size=0.5
)
self.val_targets, self.test_targets = model_selection.train_test_split(
self.test_targets, train_size=0.5, test_size=None
)
generator = sg.mapper.FullBatchNodeGenerator(self.G, method="gat")
gat = sg.layer.GAT(
layer_sizes=self.layer_sizes,
# layer_sizes=[8, train_targets.shape[1]],
activations=["elu", "softmax"],
attn_heads=8,
in_dropout=0.5,
attn_dropout=0.5,
normalize=None,
generator=generator,
)
x_inp, x_out = gat.in_out_tensors()
predictions = tf.keras.layers.Dense(
units=self.train_targets.shape[1] # , activation="linear"
)(x_out)
model = tf.keras.Model(inputs=x_inp, outputs=predictions)
model.compile(
keras.optimizers.Adam(lr=self.lr),
loss=tf.keras.losses.MeanSquaredError(),
# metrics=["accuracy"],
)
all_gen = generator.flow(np.arange(self.num_nodes))
train_gen = generator.flow(self.train_targets.index, self.train_targets)
val_gen = generator.flow(self.val_targets.index, self.val_targets)
es_callback = EarlyStopping(
monitor="val_loss", patience=100, restore_best_weights=True
)
model.fit(
train_gen,
epochs=self.epochs,
validation_data=val_gen,
shuffle=False, # this should be False, since shuffling data means shuffling the whole graph
callbacks=[es_callback],
)
embedding_model = Model(inputs=x_inp, outputs=x_out)
emb = embedding_model.predict(all_gen).reshape((self.num_nodes, dim))
# %%
self.in_vec = emb.copy()
self.out_vec = emb.copy()
return self
class GCN(NodeEmbeddings):
"""A python class for GCN."""
def __init__(
self,
number_of_walks=1,
batch_size=512,
epochs=200,
lr=1e-2,
num_samples=[25, 10],
layer_sizes=[256, 256],
num_default_features=None,
):
self.in_vec = None # In-vector
self.out_vec = None # Out-vector
self.number_of_walks = number_of_walks
self.batch_size = batch_size
self.num_samples = num_samples
self.epochs = epochs
self.layer_sizes = layer_sizes
self.lr = lr
self.import_lib()
self.num_default_features = num_default_features
def import_lib(self):
import stellargraph as sg
import tensorflow as tf
from sklearn import model_selection, preprocessing
from sklearn.linear_model import LinearRegression, LogisticRegression
from stellargraph.data import UnsupervisedSampler
from stellargraph.layer import GraphSAGE, link_classification
from stellargraph.mapper import (
FullBatchNodeGenerator,
GraphSAGELinkGenerator,
GraphSAGENodeGenerator,
)
from tensorflow import keras
from tensorflow.keras import Model, layers, losses, metrics, optimizers
from tensorflow.keras.callbacks import EarlyStopping
def fit(self, net, node_features=None):
"""Estimating the parameters for embedding.
Parameters
---------
net : nx.Graph object
Network to be embedded. The graph type can be anything if
the graph type is supported for the node samplers.
Return
------
self : Node2Vec
"""
def find_blocks_by_sbm(A, K, directed=False):
"""Jiashun Jin. Fast community detection by SCORE.
:param A: scipy sparse matrix
:type A: sparse.csr_matrix
:param K: number of communities
:type K: int
:param directed: whether to cluster directed or undirected, defaults to False
:type directed: bool, optional
:return: [description]
:rtype: [type]
"""
if K >= (A.shape[0] - 1):
cids = np.arange(A.shape[0])
return cids
u, s, v = utils.rSVD(A, dim=K)
u = np.ascontiguousarray(u, dtype=np.float32)
if directed:
v = np.ascontiguousarray(v.T, dtype=np.float32)
u = np.hstack([u, v])
norm = np.linalg.norm(u, axis=1)
denom = 1 / np.maximum(norm, 1e-5)
denom[np.isclose(norm, 0)] = 0
u = np.einsum("ij,i->ij", u, denom)
if (u.shape[0] / K) < 10:
niter = 1
else:
niter = 10
km = faiss.Kmeans(u.shape[1], K, niter=niter, spherical=True)
km.train(u)
_, cids = km.index.search(u, 1)
cids = np.array(cids).reshape(-1)
return np.array(cids).reshape(-1)
logger.debug("sampling - start")
A = utils.to_adjacency_matrix(net)
Gnx = nx.from_scipy_sparse_matrix(A)
deg = np.array(A.sum(axis=1)).reshape(-1)
self.deg = np.array([deg[i] for i in Gnx.nodes])
self.Gnx = Gnx
self.num_nodes = len(Gnx.nodes)
self.A = A
self.node_features = node_features
return self
def update_embedding(self, dim):
self.layer_sizes[-1] = dim
if self.node_features is None:
if self.num_default_features is None:
self.num_default_features = dim
d = np.maximum(1, np.array(self.A.sum(axis=1)).reshape(-1))
dsqrt = np.sqrt(d)
L = sparse.diags(1 / dsqrt) @ self.A @ sparse.diags(1 / dsqrt)
X, s, _ = utils.rSVD(L, self.num_default_features)
node_features = pd.DataFrame(X)
node_features["deg"] = self.deg
X = node_features.values
X = X @ np.diag(1 / np.maximum(np.linalg.norm(X, axis=0), 1e-12))
node_features = pd.DataFrame(X)
else:
node_features = self.node_features
self.G = sg.StellarGraph.from_networkx(self.Gnx, node_features=node_features)
self.train_targets, self.test_targets = model_selection.train_test_split(
node_features, train_size=0.5
)
self.val_targets, self.test_targets = model_selection.train_test_split(
self.test_targets, train_size=0.5, test_size=None
)
generator = sg.mapper.FullBatchNodeGenerator(self.G, method="gcn")
gcn = sg.layer.GCN(
layer_sizes=self.layer_sizes,
generator=generator,
activations=["relu", "relu"],
dropout=0.5,
)
x_inp, x_out = gcn.in_out_tensors()
predictions = tf.keras.layers.Dense(
units=self.train_targets.shape[1] # , activation="linear"
)(x_out)
model = tf.keras.Model(inputs=x_inp, outputs=predictions)
model.compile(
keras.optimizers.Adam(lr=self.lr),
loss=tf.keras.losses.MeanSquaredError(),
# metrics=["accuracy"],
)
all_gen = generator.flow(np.arange(self.num_nodes))
train_gen = generator.flow(self.train_targets.index, self.train_targets)
val_gen = generator.flow(self.val_targets.index, self.val_targets)
es_callback = EarlyStopping(
monitor="val_loss", patience=100, restore_best_weights=True
)
model.fit(
train_gen,
epochs=self.epochs,
validation_data=val_gen,
shuffle=False, # this should be False, since shuffling data means shuffling the whole graph
callbacks=[es_callback],
)
embedding_model = Model(inputs=x_inp, outputs=x_out)
emb = embedding_model.predict(all_gen).reshape((self.num_nodes, dim))
# %%
self.in_vec = emb.copy()
self.out_vec = emb.copy()
return self
class GraphSage(GCN):
"""A python class for GCN."""
def __init__(
self,
length=2,
number_of_walks=1,
batch_size=512,
epochs=5,
lr=1e-2,
num_samples=[25, 10],
layer_sizes=[256, 256],
num_default_features=None,
):
self.in_vec = None # In-vector
self.out_vec = None # Out-vector
self.length = length
self.number_of_walks = number_of_walks
self.batch_size = batch_size
self.num_samples = num_samples
self.epochs = epochs
self.layer_sizes = layer_sizes
self.lr = lr
self.import_lib()
self.num_default_features = num_default_features
def fit(self, net, node_features=None):
"""Estimating the parameters for embedding.
Parameters
---------
net : nx.Graph object
Network to be embedded. The graph type can be anything if
the graph type is supported for the node samplers.
Return
------
self : Node2Vec
"""
def find_blocks_by_sbm(A, K, directed=False):
"""Jiashun Jin. Fast community detection by SCORE.
:param A: scipy sparse matrix
:type A: sparse.csr_matrix
:param K: number of communities
:type K: int
:param directed: whether to cluster directed or undirected, defaults to False
:type directed: bool, optional
:return: [description]
:rtype: [type]
"""
if K >= (A.shape[0] - 1):
cids = np.arange(A.shape[0])
return cids
u, s, v = utils.rSVD(A, dim=K)
u = np.ascontiguousarray(u, dtype=np.float32)
if directed:
v = np.ascontiguousarray(v.T, dtype=np.float32)
u = np.hstack([u, v])
norm = np.linalg.norm(u, axis=1)
denom = 1 / np.maximum(norm, 1e-5)
denom[np.isclose(norm, 0)] = 0
u = np.einsum("ij,i->ij", u, denom)
if (u.shape[0] / K) < 10:
niter = 1
else:
niter = 10
km = faiss.Kmeans(u.shape[1], K, niter=niter, spherical=True)
km.train(u)
_, cids = km.index.search(u, 1)
cids = np.array(cids).reshape(-1)
return np.array(cids).reshape(-1)
logger.debug("sampling - start")
A = utils.to_adjacency_matrix(net)
Gnx = nx.from_scipy_sparse_matrix(A)
deg = np.array(A.sum(axis=1)).reshape(-1)
self.deg = np.array([deg[i] for i in Gnx.nodes])
self.Gnx = Gnx
self.A = A
self.node_features = node_features
return self
def update_embedding(self, dim):
if self.node_features is None:
if self.num_default_features is None:
self.num_default_features = dim
d = np.maximum(1, np.array(self.A.sum(axis=1)).reshape(-1))
dsqrt = np.sqrt(d)
L = sparse.diags(1 / dsqrt) @ self.A @ sparse.diags(1 / dsqrt)
X, s, _ = utils.rSVD(L, self.num_default_features)
node_features = pd.DataFrame(X)
node_features["deg"] = self.deg
X = node_features.values
X = X @ np.diag(1 / np.maximum(np.linalg.norm(X, axis=0), 1e-12))
node_features = pd.DataFrame(X)
else:
node_features = self.node_features
self.G = sg.StellarGraph.from_networkx(
self.Gnx, node_features=node_features
) # node_features)
unsupervised_samples = UnsupervisedSampler(
self.G,
nodes=list(self.Gnx.nodes),
length=self.length,
number_of_walks=self.number_of_walks,
)
generator = GraphSAGELinkGenerator(self.G, self.batch_size, self.num_samples)
train_gen = generator.flow(unsupervised_samples)
self.layer_sizes[-1] = dim
graphsage = GraphSAGE(
layer_sizes=self.layer_sizes,
generator=generator,
bias=True,
dropout=0.0,
normalize="l2",
)
x_inp, x_out = graphsage.in_out_tensors()
prediction = link_classification(
output_dim=1, output_act="sigmoid", edge_embedding_method="ip"
)(x_out)
model = keras.Model(inputs=x_inp, outputs=prediction)
model.compile(
optimizer=keras.optimizers.Adam(lr=self.lr),
loss=keras.losses.binary_crossentropy,
metrics=[keras.metrics.binary_accuracy],
)
model.fit(
train_gen,
epochs=self.epochs,
verbose=1,
use_multiprocessing=False,
workers=3,
shuffle=True,
)
x_inp_src = x_inp[0::2]
x_out_src = x_out[0]
embedding_model = keras.Model(inputs=x_inp_src, outputs=x_out_src)
# %%
node_gen = GraphSAGENodeGenerator(
self.G, self.batch_size, self.num_samples
).flow(list(self.Gnx.nodes))
node_embeddings = embedding_model.predict(node_gen, workers=4, verbose=1)
# %%
self.in_vec = node_embeddings.copy()
self.out_vec = node_embeddings.copy()
return self
| 28,226 | 9,224 |
from django import forms
from django.core.validators import MaxValueValidator, MinValueValidator
from .additionals.metaData import *
from .models import UserCredentials, UserTasks, UserDepartment
from django.contrib.auth.forms import AuthenticationForm
class AuthForm(AuthenticationForm, forms.ModelForm):
class Meta:
model = UserCredentials
fields = ['username', 'password']
username = forms.CharField(min_length=2, max_length=150, required=True, widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'validationUsername', 'placeholder': 'Required'}))
password = forms.CharField(min_length=2, max_length=128, required=True, widget=forms.PasswordInput(attrs={'type': 'password', 'class': 'form-control', 'id': 'validationPassword', 'placeholder': 'Required'}))
class UserTaskAdditionForm(forms.ModelForm):
class Meta:
model = UserTasks
fields = '__all__'
exclude = ['Task_UUID', 'Task_CreateDate', 'Task_Owner']
Task_Name = forms.CharField(min_length=2, max_length=150, required=True, widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'addDataTaskName', 'placeholder': 'Required'}))
Task_Description = forms.CharField(min_length=2, max_length=512, required=True, widget=forms.Textarea(attrs={'class': 'form-control', 'id':'addTaskDescription', 'placeholder': 'Required'}))
Task_Type = forms.ChoiceField(choices=TaskTypes, required=True, widget=forms.Select(attrs={'class': 'form-control', 'id':'addDataTaskType', 'placeholder': 'Required'}))
Task_StartTime = forms.DateTimeField(input_formats=['%Y-%m-%dT%H:%M'], required=True, widget=forms.DateTimeInput(attrs={'class': 'form-control', 'id':'addTaskStartTime', 'placeholder': 'Start DateTime', 'type': 'datetime-local', 'min': '2020/04/01T000:00', 'max': '2099/12/31T000:00' }))
Task_EndTime = forms.DateTimeField(input_formats=['%Y-%m-%dT%H:%M'], required=True, widget=forms.DateTimeInput(attrs={'class': 'form-control', 'id':'addTaskEndTime', 'placeholder': 'End DateTime', 'type': 'datetime-local', 'min': '2020/04/01T000:00', 'max': '2099/12/31T000:00' }))
class UserTaskUpdateForm(forms.ModelForm):
class Meta:
model = UserTasks
fields = '__all__'
exclude = ['Task_UUID', 'Task_CreateDate', 'Task_Owner']
Task_Name = forms.CharField(min_length=2, max_length=150, required=True, widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'modifyDataTaskName', 'placeholder': 'Required'}))
Task_Description = forms.CharField(min_length=2, max_length=512, required=True, widget=forms.Textarea(attrs={'class': 'form-control', 'id': 'modifyTaskDescription', 'placeholder': 'Required'}))
Task_Type = forms.ChoiceField(choices=TaskTypes, required=True, widget=forms.Select(attrs={'class': 'form-control', 'id': 'modifyDataTaskType', 'placeholder': 'Required'}))
Task_StartTime = forms.DateTimeField(input_formats=['%Y-%m-%dT%H:%M'], required=True, widget=forms.DateTimeInput(attrs={'class': 'form-control', 'id':'modifyTaskStartTime', 'placeholder': 'Start DateTime', 'type': 'datetime-local', 'min': '2020/04/01T000:00', 'max': "2099/12/31T000:00" }))
Task_EndTime = forms.DateTimeField(input_formats=['%Y-%m-%dT%H:%M'], required=True, widget=forms.DateTimeInput(attrs={'class': 'form-control', 'id':'modifyTaskEndTime', 'placeholder': 'End DateTime', 'type': 'datetime-local', 'min': '2020/04/01T000:00', 'max': "2099/12/31T000:00" }))
def clean(self):
return super(UserTaskUpdateForm, self).clean()
class RegistrationForm(forms.ModelForm):
class Meta:
model = UserCredentials
fields = [
'first_name',
'middle_name',
'last_name',
'email',
'dept_residence',
'username',
'password',
'avatar'
]
username = forms.CharField(min_length=2, max_length=150, required=True, widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'validationUsername', 'placeholder': 'Required'}))
password = forms.CharField(min_length=2, max_length=128, required=True, widget=forms.PasswordInput(attrs={'type': 'password', 'class': 'form-control', 'id': 'validationPassword', 'placeholder': 'Required'}))
confirm_password = forms.CharField(min_length=2, max_length=128, required=True, widget=forms.PasswordInput(attrs={'type': 'password', 'class': 'form-control', 'id': 'validationPassword', 'placeholder': 'Required'}))
first_name = forms.CharField(min_length=2, max_length=50, required=True, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Required'}))
middle_name = forms.CharField(min_length=2, max_length=50, required=False, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Optional'}))
last_name = forms.CharField(min_length=2, max_length=50, required=True, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Required'}))
email = forms.CharField(max_length=50, required=True, widget=forms.TextInput(attrs={'class': 'form-control', 'type': 'email', 'placeholder': 'Required'}))
avatar = forms.ImageField(required=False, allow_empty_file=True, widget=forms.FileInput(attrs={'accept': 'image/jpg, image/jpeg, image/png', 'class':'form-control-file', 'type':'file'}))
dept_residence = forms.ModelChoiceField(queryset=UserDepartment.objects.all(), widget=forms.Select(attrs={'class': 'custom-select', 'id':'requireDeptResidence', 'placeholder': 'Required'}), to_field_name='Department_Name', empty_label="Nothing Selected...")
def clean(self, *args, **kwargs):
cleaned_data = super(RegistrationForm, self).clean()
password = cleaned_data.get("password")
confirm_password = cleaned_data.get("confirm_password")
objectDuplicateCheck = UserCredentials.objects.filter(
username=cleaned_data['username'],
first_name=cleaned_data['first_name'],
last_name=cleaned_data['last_name'],
email=cleaned_data['email']
).count()
if objectDuplicateCheck:
raise forms.ValidationError("Some of your credentials are conflicting with the existing accounts! Did you register before? Ask the developer please.")
if password != confirm_password:
raise forms.ValidationError("Your Password and Password Confirmation Field does not match!")
return cleaned_data | 6,410 | 2,024 |
import collections.abc
import json
import os
import os.path
import pathlib
import subprocess
def parse_version_string():
path = pathlib.Path(__file__).resolve().parent # go up one level, from repo/basedir.py to repo, where README.md is located
try:
version = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], cwd=str(path)).decode('utf-8').strip('\n')
if version == 'master':
try:
with (path / 'README.md').open() as readme:
for line in readme.read().splitlines():
if line.startswith('This is `python-xdg-basedir` version '):
return line.split(' ')[4]
except:
pass
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], cwd=str(path)).decode('utf-8').strip('\n')
except:
pass
__version__ = parse_version_string()
class BaseDirFile(collections.abc.Sequence):
def __init__(self, paths, filename, flags='r'):
self.paths = [pathlib.Path(p) for p in paths]
self.filename = filename
self.flags = 'r'
def __enter__(self):
self.fobj = (self.path / self.filename).open(self.flags)
return self.fobj
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.fobj.close()
self.fobj = None
else:
try:
self.fobj.close()
finally:
pass
return False
def __getitem__(self, value):
if isinstance(value, slice):
return [path / self.filename for path in self.paths[value]]
else:
return self.paths[value] / self.filename
def __iter__(self):
for path in self.paths:
yield path / self.filename
def __len__(self):
return len(self.paths)
def __str__(self):
return ':'.join(str(path / self.filename) for path in self.paths)
def lazy_json(self, existing_only=False, readable_only=False, writeable_only=False, default=None, *, init=False):
"""Return a lazyjson object representing the file(s). Requires the lazyjson module.
Optional arguments:
existing_only -- If true, exclude files from the multifile which don't exist at the time of the call. Defaults to False.
readable_only -- If true, exclude files from the multifile for which opening in read mode fails at the time of the call. Defaults to False.
writeable_only -- If true, exclude files from the multifile for which opening in write mode fails at the time of the call. Defaults to False.
default -- A JSON-encodable Python object which is appended to the end of the multifile as a lazyjson.PythonFile, and can be used to provide default values for config files. Defaults to None.
Keyword-only arguments:
init -- If true, create the file on the first path if none of the files exists, and write the “default” argument to it. Defaults to False.
Returns:
A lazyjson.MultiFile created from the paths of this file.
Raises:
ImportError for lazyjson.
"""
import lazyjson
paths = []
for path in self.paths:
if existing_only and not (path / self.filename).exists():
continue
if readable_only:
try:
(path / self.filename).open().close()
except IOError:
continue
if writeable_only:
try:
(path / self.filename).open('a').close()
except IOError:
continue
paths.append(path / self.filename)
if init and not any((path / self.filename).exists() for path in self.paths):
for path in self.paths:
try:
path.mkdir(parents=True, exist_ok=True)
with (path / self.filename).open('w') as f:
json.dump(default, f, indent=4, sort_keys=True)
print(file=f)
except IOError:
continue
else:
break
paths.append(lazyjson.PythonFile(default))
return lazyjson.MultiFile(*paths)
def json(self, base=None):
def patch_json(base, new):
new_json = json.load(new)
if type(new_json) is dict:
if type(base) is not dict:
return new_json
base.update(new_json)
return base
elif type(new_json) is list:
if type(base) is not list:
return new_json
return base + new_json
else:
return new_json
return self.read(patch=patch_json, base=base)
async def json_async(self, base=None):
async def patch_json_async(base, new):
new_json = json.loads(await new.read())
if type(new_json) is dict:
if type(base) is not dict:
return new_json
base.update(new_json)
return base
elif type(new_json) is list:
if type(base) is not list:
return new_json
return base + new_json
else:
return new_json
return await self.read_async(patch=patch_json_async, base=base)
@property
def path(self):
for iter_path in self.paths:
if (iter_path / self.filename).exists():
return iter_path
def read(self, patch=None, base=None):
"""If patch is None (the default), this returns the contents of the first found file.
If patch is not None, it must be a function of the form patch(base, new). This function will then read all existing files in reverse order, and call the patch function with the results of the last call as the first argument, and a file object representing the current file as the second argument. The end result is returned.
"""
if patch is None:
for path in self.paths:
if (path / self.filename).exists():
with (path / self.filename).open() as f:
return f.read()
else:
for path in reversed(self.paths):
if (path / self.filename).exists():
with (path / self.filename).open() as new:
base = patch(base, new)
return base
async def read_async(self, patch=None, base=None):
"""If patch is None (the default), this returns the contents of the first found file.
If patch is not None, it must be a coroutine of the form patch(base, new). This coroutine will then read all existing files in reverse order, and call the patch coroutine with the results of the last call as the first argument, and an aiofiles async file object representing the current file as the second argument. The end result is returned.
"""
import aiofiles
if patch is None:
for path in self.paths:
if (path / self.filename).exists():
async with aiofiles.open(path / self.filename) as f:
return await f.read()
else:
for path in reversed(self.paths):
if (path / self.filename).exists():
async with aiofiles.open(path / self.filename) as new:
base = await patch(base, new)
return base
class BaseDir:
def __call__(self, filename, flags='r'):
return BaseDirFile([self.path], filename, flags=flags)
def __init__(self, envar, default):
self.path = pathlib.Path(os.environ.get(envar) or default)
def __str__(self):
return str(self.path)
def config(self, filename):
return Config(self(filename))
class BaseDirs:
def __call__(self, filename, flags='r'):
return BaseDirFile([self.home] + self.paths, filename, flags=flags)
def __init__(self, envar, default, home):
if isinstance(home, BaseDir):
self.home = home.path
else:
self.home = pathlib.Path(home)
self.paths = os.environ.get(envar) or default
if isinstance(self.paths, str):
self.paths = [pathlib.Path(p) for p in self.paths.split(':')]
def __iter__(self):
yield self.home
for path in self.paths:
yield path
def __str__(self, include_home=False):
paths = ([self.home] if include_home else []) + list(self.paths)
return ':'.join(str(p) for p in paths)
def config(self, filename):
return Config(self(filename))
data_home = BaseDir('XDG_DATA_HOME', pathlib.Path.home() / '.local' / 'share')
config_home = BaseDir('XDG_CONFIG_HOME', pathlib.Path.home() / '.config')
data_dirs = BaseDirs('XDG_DATA_DIRS', ['/usr/local/share', '/usr/share'], data_home)
config_dirs = BaseDirs('XDG_CONFIG_DIRS', ['/etc/xdg'], config_home)
cache_home = BaseDir('XDG_CACHE_HOME', pathlib.Path.home() / '.cache')
| 9,184 | 2,495 |
#!/usr/bin/env python3
import argparse
import collections
import csv
import tensorflow as tf
import numpy as np
from google_research.bert.tokenizer import FullTokenizer
def main():
MAX_SEQUENCE_LENGTH = 512
parser = argparse.ArgumentParser()
parser.add_argument('--do_lower_case', action='store_true')
parser.add_argument('--input_file', help='A file containing many lines for tokenization',
type=str)
parser.add_argument('--output_training_file', help='The output TF Record file',
type=str)
parser.add_argument('--output_validation_file', help='The output TF Record file',
type=str)
parser.add_argument('--validation_ratio', help='The output TF Record file',
type=float)
parser.add_argument('--vocab_file', help='A file containing the dictionary for tokenization',
type=str, default='models/vocab.txt')
args = parser.parse_args()
tokenizer = FullTokenizer(args.vocab_file, args.do_lower_case)
training_writer = tf.python_io.TFRecordWriter(args.output_training_file)
validation_writer = tf.python_io.TFRecordWriter(args.output_validation_file)
with open(args.input_file) as f:
for i, row in enumerate(csv.reader(f)):
if i == 0: continue
tokens = []
tokens.append("[CLS]")
tokens.extend(tokenizer.tokenize(row[1])[0:(MAX_SEQUENCE_LENGTH-2)])
tokens.append("[SEP]")
input_ids = tokenizer.convert_tokens_to_ids(tokens)
mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
while len(input_ids) < MAX_SEQUENCE_LENGTH:
input_ids.append(0)
mask.append(0)
segment_ids.append(0)
targets = list([int(i) for i in row[2:]])
features = collections.OrderedDict()
features["input_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(input_ids)))
features["input_mask"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(mask)))
features["segment_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(segment_ids)))
features["label_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(targets)))
features["is_real_example"] = tf.train.Feature(int64_list=tf.train.Int64List(value=[1]))
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
if np.random.random() > args.validation_ratio:
training_writer.write(tf_example.SerializeToString())
else:
validation_writer.write(tf_example.SerializeToString())
training_writer.close()
validation_writer.close()
if __name__ == '__main__':
main()
| 2,861 | 866 |
# -*- coding: utf-8 -*-
import os, sys
# env
sys.path.append('/usr/local/lib/python2.7/dist-packages/')
sys.path.append('/usr/lib/python2.7/dist-packages/')
sys.path.append('/usr/lib/python2.7/')
sys.path.append('/data2/django_projects/')
sys.path.append('/data2/django_third/')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djusagi.settings")
from django.conf import settings
from djusagi.contacts.manager import ContactsManager
from djusagi.adminsdk.manager.admin import AdminManager
import argparse
# set up command-line options
desc = """
Accepts as input the email address of a google domain user
whose contact information we should update for each user in the domain,
and a python list with the values in quotes for the following fields
in the contact object:
title, name.fullName, name.givenName, name.additionalName, name.familyName
e.g. ['Rosa Luxemburg', 'Rosa Luxemburg', 'Rosa', 'Maria', 'Luxemburg']
"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"-e", "--email",
required=True,
help="email address of user",
dest="email"
)
parser.add_argument(
"-n", "--names",
type=str,
nargs='*',
required=True,
help="python list of values for the new name",
dest='names'
)
parser.add_argument(
"--test",
action='store_true',
help="Dry run?",
dest="test"
)
def main():
'''
Fetch all users from the Google API, go through their
contacts, and update their contact record for the given
email for the following fields:
title
fullName
givenName
familyName
'''
am = AdminManager()
user_list = []
page_token = None
while True:
results = am.service().users().list(
domain=email.split('@')[1],
maxResults=500,
pageToken=page_token,
orderBy='email', viewType='domain_public'
).execute()
for r in results["users"]:
user_list.append(r)
page_token = results.get('nextPageToken')
if not page_token:
break
print "length of user_list: {}".format(len(user_list))
count = 0
for user in user_list:
user_email = user["primaryEmail"]
cm = ContactsManager(user_email)
contacts = cm.contacts(settings.CONTACTS_MAX_RESULTS)
# loop through all contacts in the user's collection
for entry in contacts.entry:
# loop through all emails for any given contact
for e in entry.email:
if e.address == email:
# loop through the various links
for l in entry.link:
if l.type == 'application/atom+xml' and l.rel == 'edit':
if test:
print user_email
print "\n{}".format(l.href)
contact = cm.get_contact(l.href)
if test:
print contact
if contact.name:
new_contact = cm.set_name(
contact, names[0].split(',')
)
if test:
print new_contact
cm.save(new_contact)
count += 1
print "number of accounts updated: {}".format(count)
######################
# shell command line
######################
if __name__ == "__main__":
args = parser.parse_args()
email = args.email
names = args.names
test = args.test
if test:
print args
sys.exit(main())
| 3,733 | 1,057 |
from typing import Optional
import numpy as np
from sklearn.decomposition import KernelPCA, PCA
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, PolynomialFeatures, StandardScaler
from fedot.core.data.data import InputData
from fedot.core.data.data import data_has_categorical_features, divide_data_categorical_numerical, str_columns_check
from fedot.core.operations.evaluation.operation_implementations. \
implementation_interfaces import DataOperationImplementation, EncodedInvariantImplementation
class ComponentAnalysisImplementation(DataOperationImplementation):
""" Class for applying PCA and kernel PCA models form sklearn
:param params: optional, dictionary with the arguments
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
self.pca = None
self.params = None
self.amount_of_features = None
def fit(self, input_data):
"""
The method trains the PCA model
:param input_data: data with features, target and ids for PCA training
:return pca: trained PCA model (optional output)
"""
self.amount_of_features = np.array(input_data.features).shape[1]
if self.amount_of_features > 1:
self.check_and_correct_params()
self.pca.fit(input_data.features)
return self.pca
def transform(self, input_data, is_fit_pipeline_stage: Optional[bool]):
"""
Method for transformation tabular data using PCA
:param input_data: data with features, target and ids for PCA applying
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return input_data: data with transformed features attribute
"""
if self.amount_of_features > 1:
transformed_features = self.pca.transform(input_data.features)
else:
transformed_features = input_data.features
# Update features
output_data = self._convert_to_output(input_data,
transformed_features)
return output_data
def check_and_correct_params(self) -> None:
""" Method check if amount of features in data enough for n_components
parameter in PCA or not. And if not enough - fixes it
"""
current_parameters = self.pca.get_params()
if type(current_parameters['n_components']) == int:
if current_parameters['n_components'] > self.amount_of_features:
current_parameters['n_components'] = self.amount_of_features
self.pca.set_params(**current_parameters)
self.params = current_parameters
def get_params(self):
return self.pca.get_params()
class PCAImplementation(ComponentAnalysisImplementation):
""" Class for applying PCA from sklearn
:param params: optional, dictionary with the hyperparameters
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
if not params:
# Default parameters
self.pca = PCA(svd_solver='full', n_components='mle')
else:
self.pca = PCA(**params)
self.params = params
self.amount_of_features = None
class KernelPCAImplementation(ComponentAnalysisImplementation):
""" Class for applying kernel PCA from sklearn
:param params: optional, dictionary with the hyperparameters
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
if not params:
# Default parameters
self.pca = KernelPCA()
else:
self.pca = KernelPCA(**params)
self.params = params
class OneHotEncodingImplementation(DataOperationImplementation):
""" Class for automatic categorical data detection and one hot encoding """
def __init__(self, **params: Optional[dict]):
super().__init__()
default_params = {
'drop': 'if_binary'
}
if not params:
# Default parameters
self.encoder = OneHotEncoder(**default_params)
else:
self.encoder = OneHotEncoder(**{**params, **default_params})
self.categorical_ids = None
self.non_categorical_ids = None
def fit(self, input_data: InputData):
""" Method for fit encoder with automatic determination of categorical features
:param input_data: data with features, target and ids for encoder training
:return encoder: trained encoder (optional output)
"""
features = input_data.features
categorical_ids, non_categorical_ids = str_columns_check(features)
# Indices of columns with categorical and non-categorical features
self.categorical_ids = categorical_ids
self.non_categorical_ids = non_categorical_ids
if len(categorical_ids) == 0:
pass
else:
categorical_features = np.array(features[:, categorical_ids])
self.encoder.fit(categorical_features)
def transform(self, input_data, is_fit_pipeline_stage: Optional[bool]):
"""
The method that transforms the categorical features in the original
dataset, but does not affect the rest features
:param input_data: data with features, target and ids for transformation
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return output_data: output data with transformed features table
"""
features = input_data.features
if len(self.categorical_ids) == 0:
# If there are no categorical features in the table
transformed_features = features
else:
# If categorical features are exists
transformed_features = self._make_new_table(features)
# Update features
output_data = self._convert_to_output(input_data,
transformed_features)
return output_data
def _make_new_table(self, features):
"""
The method creates a table based on categorical and real features
:param features: tabular data for processing
:return transformed_features: transformed features table
"""
categorical_features = np.array(features[:, self.categorical_ids])
self._check_same_categories(categorical_features)
transformed_categorical = self.encoder.transform(categorical_features).toarray()
# If there are non-categorical features in the data
if len(self.non_categorical_ids) == 0:
transformed_features = transformed_categorical
else:
# Stack transformed categorical and non-categorical data
non_categorical_features = np.array(features[:, self.non_categorical_ids])
frames = (non_categorical_features, transformed_categorical)
transformed_features = np.hstack(frames)
return transformed_features
def _check_same_categories(self, categorical_features):
encoder_unique_categories = sorted(list(np.hstack(self.encoder.categories_)))
features_unique_categories = sorted(np.unique(np.array(categorical_features)))
if encoder_unique_categories != features_unique_categories:
raise ValueError('Category in test data was not exist in train.')
def get_params(self):
return self.encoder.get_params()
class PolyFeaturesImplementation(EncodedInvariantImplementation):
""" Class for application of PolynomialFeatures operation on data,
where only not encoded features (were not converted from categorical using
OneHot encoding) are used
:param params: optional, dictionary with the arguments
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
if not params:
# Default parameters
self.operation = PolynomialFeatures(include_bias=False)
else:
# Checking the appropriate params are using or not
poly_params = {k: params[k] for k in
['degree', 'interaction_only']}
self.operation = PolynomialFeatures(include_bias=False,
**poly_params)
self.params = params
def get_params(self):
return self.operation.get_params()
class ScalingImplementation(EncodedInvariantImplementation):
""" Class for application of Scaling operation on data,
where only not encoded features (were not converted from categorical using
OneHot encoding) are used
:param params: optional, dictionary with the arguments
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
if not params:
# Default parameters
self.operation = StandardScaler()
else:
self.operation = StandardScaler(**params)
self.params = params
def get_params(self):
return self.operation.get_params()
class NormalizationImplementation(EncodedInvariantImplementation):
""" Class for application of MinMax normalization operation on data,
where only not encoded features (were not converted from categorical using
OneHot encoding) are used
:param params: optional, dictionary with the arguments
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
if not params:
# Default parameters
self.operation = MinMaxScaler()
else:
self.operation = MinMaxScaler(**params)
self.params = params
def get_params(self):
return self.operation.get_params()
class ImputationImplementation(DataOperationImplementation):
""" Class for applying imputation on tabular data
:param params: optional, dictionary with the arguments
"""
def __init__(self, **params: Optional[dict]):
super().__init__()
default_params_categorical = {'strategy': 'most_frequent'}
self.params_cat = {**params, **default_params_categorical}
self.params_num = params
if not params:
# Default parameters
self.imputer_cat = SimpleImputer(**default_params_categorical)
self.imputer_num = SimpleImputer()
else:
self.imputer_cat = SimpleImputer(**self.params_cat)
self.imputer_num = SimpleImputer(**self.params_num)
def fit(self, input_data: InputData):
"""
The method trains SimpleImputer
:param input_data: data with features
:return imputer: trained SimpleImputer model
"""
features_with_replaced_inf = np.where(np.isin(input_data.features,
[np.inf, -np.inf]),
np.nan,
input_data.features)
input_data.features = features_with_replaced_inf
if data_has_categorical_features(input_data):
numerical, categorical = divide_data_categorical_numerical(input_data)
if len(categorical.features.shape) == 1:
self.imputer_cat.fit(categorical.features.reshape(-1, 1))
else:
self.imputer_cat.fit(categorical.features)
if len(numerical.features.shape) == 1:
self.imputer_num.fit(numerical.features.reshape(-1, 1))
else:
self.imputer_num.fit(numerical.features)
else:
if len(input_data.features.shape) == 1:
self.imputer_num.fit(input_data.features.reshape(-1, 1))
else:
self.imputer_num.fit(input_data.features)
def transform(self, input_data, is_fit_pipeline_stage: Optional[bool] = None):
"""
Method for transformation tabular data using SimpleImputer
:param input_data: data with features
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return input_data: data with transformed features attribute
"""
features_with_replaced_inf = np.where(np.isin(input_data.features,
[np.inf, -np.inf]),
np.nan,
input_data.features)
input_data.features = features_with_replaced_inf
if data_has_categorical_features(input_data):
numerical, categorical = divide_data_categorical_numerical(input_data)
if len(categorical.features.shape) == 1:
categorical_features = self.imputer_cat.transform(categorical.features.reshape(-1, 1))
else:
categorical_features = self.imputer_cat.transform(categorical.features)
if len(numerical.features.shape) == 1:
numerical_features = self.imputer_num.transform(numerical.features.reshape(-1, 1))
else:
numerical_features = self.imputer_num.transform(numerical.features)
transformed_features = np.hstack((categorical_features, numerical_features))
else:
if len(input_data.features.shape) == 1:
transformed_features = self.imputer_num.transform(input_data.features.reshape(-1, 1))
else:
transformed_features = self.imputer_num.transform(input_data.features)
output_data = self._convert_to_output(input_data, transformed_features, data_type=input_data.data_type)
return output_data
def fit_transform(self, input_data, is_fit_pipeline_stage: Optional[bool] = None):
"""
Method for training and transformation tabular data using SimpleImputer
:param input_data: data with features
:param is_fit_pipeline_stage: is this fit or predict stage for pipeline
:return input_data: data with transformed features attribute
"""
self.fit(input_data)
output_data = self.transform(input_data)
return output_data
def get_params(self) -> dict:
dictionary = {'imputer_categorical': self.params_cat, 'imputer_numerical': self.params_num}
return dictionary
| 14,246 | 3,773 |
from django.urls import include, path
from allauth.utils import import_attribute
def default_urlpatterns(provider):
login_view = import_attribute(provider.get_package() + ".views.saml2_login")
acs_view = import_attribute(provider.get_package() + ".views.saml2_acs")
urlpatterns = [
path("login/", login_view, name=provider.id + "_login"),
# TODO: change to login/acs to keep allauth convention
path("acs/", acs_view, name=provider.id + "_acs"),
]
return [path(provider.get_slug() + "/", include(urlpatterns))]
| 559 | 175 |
import json
def extract(filename):
''' (str) -> list of object
Given the filename of a JSON file in the current directory, extract and return
its data.
:param filename: name of the JSON file:
:return data: data extracted from the JSON file:
'''
file = open(filename, 'r')
json_decode = json.load(file)
data = []
for item in json_decode:
data.append(item)
return data
| 421 | 121 |
# coding: utf-8
# ## This notebook is to analyse the practice indicators available from PHE dataset
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# In[2]:
# General Practice Indicators from PHE (It includes England, CCG level indicators as well)
GPIndicators = pd.read_csv('..\..\..\..\MAIN_PROJECT\Data\PHE\PracticeProfileIndicators.csv', sep=',', encoding = "ISO-8859-1")
GPIndicators.head()
# In[3]:
GPIndicators.columns
# In[4]:
# types of areas
GPIndicators['Area Type'].unique()
# In[9]:
# types of indicators data available
GPIndicators['Indicator Name'].unique()
# In[10]:
# types of indicators data available
GPIndicators['Indicator ID'].unique()
# In[7]:
# Time periods avaoilable
GPIndicators['Time period'].unique()
# In[26]:
# AreaTypeFilters
GPFilter = GPIndicators['Area Type'] == 'GP'
CCGFilter = GPIndicators['Area Type'] == 'CCGs (since 4/2017)'
EnglandFilter = GPIndicators['Area Type'] == 'Country'
# In[27]:
# Data by AreaType
GP_Data = GPIndicators[GPFilter]
CCG_Data = GPIndicators[CCGFilter]
England_Data = GPIndicators[EnglandFilter]
# In[28]:
# Save to CSV
GP_Data.to_csv('PHE_GP_Indicators.csv', sep=',')
# In[29]:
# Save to CSV
CCG_Data.to_csv('PHE_CCG_Indicators.csv', sep=',')
# In[30]:
# Save to CSV
England_Data.to_csv('PHE_ENG_Indicators.csv', sep=',')
# In[36]:
# Extract IMD 2015 data from GP Data
GP_IMD_2k15_Filter = GP_Data['Indicator Name'] == 'Deprivation score (IMD 2015)'
GP_IMD_2k15_Data = GP_Data[GP_IMD_2k15_Filter]
GP_IMD_2k15_Data.to_csv('PHE_GP_IMD_2k15_Indicators.csv', sep=',')
# In[49]:
# Scatter plot of per-patient act cost for UK
IMDColumnFilter = GP_IMD_2k15_Data['Indicator Name'] == 'Deprivation score (IMD 2015)'
IMDData = GP_IMD_2k15_Data[IMDColumnFilter]
x = np.arange(0, IMDData['Indicator Name'].count(), 1)
y = IMDData['Value']
plt.scatter(x, y)
plt.xlabel("Practice Index")
plt.ylabel("IMD Score")
plt.show()
| 1,990 | 858 |
#- Import packages
import random
'''
@module: Travel Dataset Generator
@authors: Leonardo Mauro <leomaurodesenv>
@link: https://github.com/Argo-Solutions/travel-dataset-generator GitHub
@license: Creative Commons BY License
@copyright: 2019 Argo Solutions
@access: public
'''
#- Functions
def funcAgencyGenerator(flightTypes):
'''
Generate random agency services, based on predefinitions.
- flightTypes: types of flight
'''
agency = dict()
types = list(flightTypes.copy().keys())
random.shuffle(types)
typesMany = random.randint(1, len(types))
agency['types'] = [types[i] for i in range(typesMany)]
return agency
| 656 | 212 |
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.UserProfile)
admin.site.register(models.Candidate)
admin.site.register(models.Company)
admin.site.register(models.CompanyImage) | 237 | 67 |
'''
Basic functions used throughout the entirety
of the project.
---------------------------------------------------
---------------------------------------------------
---------------------------------------------------
---------------------------------------------------
---------------------------------------------------
---------------------------------------------------
v1.0
@dirctedbyshawn
---------------------------------------------------
---------------------------------------------------
---------------------------------------------------
---------------------------------------------------
---------------------------------------------------
---------------------------------------------------
'''
import time, random, os, hashlib, six, base64
from termcolor import colored
def print_lines(lines):
'''
Prints lines in the console to space out information.
Parameters:
lines (int) : lines of space to be printed in console
'''
#input validation
if (type(lines) != int):
raise TypeError(colored("ERROR: Only pass ints to the print lines function.", "red"))
print("")
for i in range(lines):
print("-----------------------------------------------")
print("")
def cls():
'''
Clears console.
'''
os.system("cls" if os.name=="nt" else "clear")
def sha256(string):
'''
Returns the SHA-256 hash of the string provided
Parameters:
string (str) : string to be hashed
Returns:
hashed_string (str) : hashed string
'''
#input validation
if (type(string) != str):
raise TypeError("ERROR: Only pass strings to hash function.")
#hashs string
hash_obj = hashlib.sha256()
hash_obj.update(string.encode("utf-8"))
hashed_string = hash_obj.hexdigest()
return hashed_string
def only_letters_and_nums(string):
'''
Determines if a string only contains letters and numbers.
Parameters:
string (str) : string to be tested
Returns:
value (bool) : status of whether the string contains only letters and numbers
'''
#input validation
if (type(string) != str):
raise TypeError(colored("ERROR: Only pass strings to the only letters and numbers function.", "red"))
#value to determine if there are other characters in the string besides numbers and letters
value = True
#list of all lowercase letters, uppercase letters, and numbers
lower = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
upper = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
nums = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
#iterates through string, if a character is detected that is not a number or letter, the value variable is changed
for char in string:
if (char not in lower and char not in upper and char not in nums):
value = False
return value
def encode(key, string):
'''
Encodes string using the vignere cipher algo.
Parameters:
key (str) : key to encode string
string (str) : string to be encoded
Returns:
encoded_string (str) : encoded string
'''
#input validation
if (type(key) != str or type(string) != str):
raise TypeError(colored("ERROR: Only pass strings to encode method.", "red"))
#empty list to store encoded characters
encoded_characters = []
#loops through string and adds encoded characters to list
for i in range(len(string)):
key_c = key[i % len(key)]
encoded_c = chr(ord(string[i]) + ord(key_c) % 256)
encoded_characters.append(encoded_c)
#adds characters from list to string
encoded_string = ''.join(encoded_characters)
encoded_string = encoded_string.encode('latin') if six.PY3 else encoded_string
encoded_string = base64.urlsafe_b64encode(encoded_string).rstrip(b'=')
#converts byte to string
encoded_string = encoded_string.decode("utf-8")
return encoded_string
def decode(key, string):
'''
Decodes string using the vignere cipher algo.
Parameters:
key (str) : key to decode string
string (str) : string to be decoded
Returns:
decoded_string (str) : decoded string
'''
#input validation
if (type(key) != str or type(string) != str):
raise TypeError(colored("ERROR: Only pass strings to decode method.", "red"))
#converts string to byte
string = string.encode("utf-8")
#decodes string from latin
string = base64.urlsafe_b64decode(string + b'===')
string = string.decode('latin') if six.PY3 else string
#empty list to store decoded characters
decoded_characters = []
#loops through string and adds decoded characters to list
for i in range(len(string)):
key_c = key[i % len(key)]
decoded_c = chr((ord(string[i]) - ord(key_c) + 256) % 256)
decoded_characters.append(decoded_c)
#adds characters from list to string to be returned
decoded_string = ''.join(decoded_characters)
return decoded_string
def wait(seconds):
'''
Causes the program to stop for a variable amount of time.
Parameters:
seconds (int) : amount of time program should stop for
'''
#input validation
if (type(seconds) != int):
raise TypeError(colored("ERROR: Only pass ints to wait function.", "red"))
#sleeps for 1 seconds if value is less than 2, or the actual value +- 1
if (seconds < 2):
time.sleep(1)
else:
time.sleep(random.randint(seconds - 1, seconds + 2))
| 5,965 | 1,727 |
# Sets up IOPORT IN/OUT references for the Program
#@category Instructions
# Before running this script, you should have created an OVERLAY memory
# space called IOMEM, starting at address 0, size 0x10000.
#
# Note: This script is rather sloppy and should probably be cleaned up.
from ghidra.program.model.lang.OperandType import SCALAR, REGISTER
from ghidra.program.model.symbol.RefType import READ, WRITE
from ghidra.program.model.symbol.SourceType import *
def add_io_reference(instruction, opIndex, refType):
"""Creates an I/O memory reference for the given scalar operand
of the given instruction."""
# delete all current references from the port address field
for ref in refMgr.getReferences(instruction.address, opIndex):
print " Deleting reference to address", ref.toAddress
refMgr.delete(ref)
# must use int() to avoid creating a long...
# we only have 16 bits of I/O address space, and a long
# would append an 'L' to the hex address
ioAddr = int(instruction.getOpObjects(opIndex)[0].value)
ioAddress = addrFactory.getAddress("IOMEM::" + hex(ioAddr))
print " Adding", refType, "reference from", instruction.address, \
"to address", ioAddress
# from, to, type, sourceType, opIndex
refMgr.addMemoryReference(instruction.address, ioAddress,
refType, ANALYSIS, opIndex)
refMgr = currentProgram.referenceManager
addrFactory = currentProgram.addressFactory
# True means min->max as opposed to listing the Program backwards...
instructions = currentProgram.listing.getInstructions(True)
for instruction in instructions:
if instruction.mnemonicString == "IN":
#print "IN @", instruction.address
if (instruction.getOperandType(1) & SCALAR) != 0:
add_io_reference(instruction, 1, READ)
# no absolute address? okay, let's see if it was set above
prevInstructionAddr = instruction.fallFrom
if prevInstructionAddr is None:
# could be the first instruction in a function, for example
continue
prevInstruction = getInstructionAt(prevInstructionAddr)
if prevInstruction.mnemonicString == "MOV":
# did we move an absolute address into EDX?
if (prevInstruction.getOperandType(1) & SCALAR) != 0:
# we moved a scalar...
if (prevInstruction.getOperandType(0) & REGISTER) != 0:
# okay, we moved into a register...
register = prevInstruction.getOpObjects(0)[0]
if register.getBaseRegister().name == "EDX":
# hooray!
add_io_reference(prevInstruction, 1, READ)
elif instruction.mnemonicString == "OUT":
#print "OUT @", instruction.address
if (instruction.getOperandType(0) & SCALAR) != 0:
add_io_reference(instruction, 0, WRITE)
# no absolute address? okay, let's see if it was set above
prevInstructionAddr = instruction.fallFrom
if prevInstructionAddr is None:
# could be the first instruction in a function, for example
continue
prevInstruction = getInstructionAt(prevInstructionAddr)
if prevInstruction.mnemonicString == "MOV":
# did we move an absolute address into EDX?
if (prevInstruction.getOperandType(1) & SCALAR) != 0:
# we moved a scalar...
if (prevInstruction.getOperandType(0) & REGISTER) != 0:
# okay, we moved into a register...
register = prevInstruction.getOpObjects(0)[0]
if register.getBaseRegister().name == "EDX":
# hooray!
add_io_reference(prevInstruction, 1, WRITE)
elif register.getBaseRegister().name == "EAX":
# d'oh, we were writing to EAX (the value to write to
# the port)! one more try...
try:
prevInstr = getInstructionAt(prevInstruction.fallFrom)
if prevInstr.mnemonicString == "MOV":
# did we move an absolute address into EDX?
if (prevInstr.getOperandType(1) & SCALAR) != 0:
# we moved a scalar...
if (prevInstr.getOperandType(0) &
REGISTER) != 0:
# okay, we moved into a register...
register = prevInstr.getOpObjects(0)[0]
if register.getBaseRegister().name == \
"EDX":
# hooray!
add_io_reference(prevInstr, 1, WRITE)
except:
# oh well
pass
| 5,059 | 1,341 |
#LPHW ex13
from sys import argv
script, first, second, third = argv
print "The script is called:", script
print "First Var:", first
print "Second Var:", second
print "Third Var:", third | 193 | 68 |
import pytest
from indy import error
from indy.anoncreds import issuer_create_credential
# noinspection PyUnusedLocal
@pytest.mark.asyncio
async def test_issuer_create_credential_works(wallet_handle, prepopulated_wallet):
pass
# noinspection PyUnusedLocal
@pytest.mark.asyncio
async def test_issuer_create_credential_works_for_credential_values_not_correspond_to_credential_req(
wallet_handle, prepopulated_wallet, xyz_cred_values_json):
_, cred_offer, cred_req, _, _ = prepopulated_wallet
with pytest.raises(error.CommonInvalidStructure):
await issuer_create_credential(wallet_handle, cred_offer, cred_req, xyz_cred_values_json, None, None)
| 672 | 233 |
class WatermarksSettings(object):
@property
def INSTALLED_APPS(self):
apps = super().INSTALLED_APPS + ['watermarks']
if 'imagekit' not in apps:
apps.append('imagekit')
return apps
default = WatermarksSettings
| 259 | 84 |
from .primitive import Primitive
from .shape import Polygon
from .text import Text, Font
from .colour import Colour
__all__ = ["Primitive",
"Polygon", "Text", "Font", "Colour"
]
| 179 | 59 |
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
# -*- coding:utf-8 -*-
import os
import operator
from test.cli.depend_commands import DependTestBase
from packageship.application.core.depend import DispatchDepend
from packageship.application.serialize.validate import validate
from packageship.application.serialize.dependinfo import DependSchema
FILES = {
"level-0-binary-installdep": "judy-judy-level-0-binary-installdep.json",
"level-0-source-builddep": "judy-judy-level-0-source-builddep.json",
"level-0-binary-builddep": "judy-gcc-level-0-binary-builddep.json",
"selfdep-info": "judy-info-selfdep.json",
"selfdep-info-binary": "judy-info-selfdep-1.json",
"selfdep-info-self-build": "judy-judy-selfdep-2.json",
"selfdep-info-subpack": "judy-judy-selfdep-3.json",
"selfdep-info-subpack-binary": "judy-judy-selfdep-4.json",
"selfdep-info-self-build-packtype": "judy-judy-selfdep-5.json",
"selfdep-info-self-build-subpack": "judy-judy-selfdep-6.json",
"bedep-packtype": "judy-judy-bedep-1.json",
"bedep-subpack": "judy-judy-bedep-2.json",
"bedep-search-type": "judy-judy-bedep-3.json",
"bedep-search-type-packtype": "judy-judy-bedep-4.json",
"bedep-search-type-subpack": "judy-judy-bedep-5.json",
"bedep-search-type-supack-packtype": "judy-judy-bedep-6.json"
}
class BaseGraph:
"""
Graph dependent base class methods
"""
data_folder = os.path.join(os.path.dirname(__file__), "data")
def _extract_edges_paramter(self, data):
"""
Extract request parameters and compare result values
"""
return data.get("request"), data.get("edges")
def get_depend_result(self, path):
"""
Obtain comparative data
"""
_data = self.read_file_content(path=os.path.join(
self.data_folder, path))
request_param, edges = self._extract_edges_paramter(data=_data)
_param, _ = validate(DependSchema, request_param, load=True)
return _param, edges
def _get_graph_data(self, request_param):
node_name = request_param.pop('node_name')
node_type = request_param.pop('node_type')
depend = DispatchDepend.execute(**request_param)
_graph = depend.depend_info_graph(
source=node_name, package_type=node_type)
return _graph["edges"]
def _order_by(self, graph, key="sourceID"):
_graph = sorted(graph, key=operator.itemgetter(key))
return sorted(_graph, key=operator.itemgetter("targetID"))
def _comparison_results(self, edges, request_param):
_graph = self._get_graph_data(request_param=request_param)
self.assertListEqual(self._order_by(_graph), self._order_by(edges))
class TestInstalldepGraph(DependTestBase, BaseGraph):
"""
The installation depends on the graph test
"""
binary_file = "os-version-binary.json"
component_file = "os-version-binary-component.json"
def setUp(self):
super(TestInstalldepGraph, self).setUp()
def test_level_0_binary_installdep(self):
"""
Install dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["level-0-binary-installdep"])
self._comparison_results(edges=edges, request_param=request_param)
def test_level_0_source_installdep(self):
"""
Install dependent graph tests
"""
request_param, _ = validate(DependSchema, {
"packagename": [
"Judy"
],
"depend_type": "installdep",
"node_name": "Judy",
"node_type": "source",
"parameter": {
"db_priority": [
"os-version"
]
}
}, load=True)
self._comparison_results(edges=[], request_param=request_param)
class TestBuilddepGraph(DependTestBase, BaseGraph):
"""
Compile the dependency graph test
"""
binary_file = "os-version-binary.json"
component_file = "os-version-binary-component.json"
source_file = "os-version-source.json"
def setUp(self):
super(TestBuilddepGraph, self).setUp()
def test_level_0_binary_builddep(self):
"""
Compile dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["level-0-binary-builddep"])
self._comparison_results(edges=edges, request_param=request_param)
def test_level_0_source_builddep(self):
"""
Compile dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["level-0-source-builddep"])
self._comparison_results(edges=edges, request_param=request_param)
class TestSelfdepGraph(DependTestBase, BaseGraph):
"""
Self dependent graph testing
"""
binary_file = "os-version-binary.json"
component_file = "os-version-binary-component.json"
source_file = "os-version-source.json"
package_source_file = "os-version-source-package.json"
def setUp(self):
super(TestSelfdepGraph, self).setUp()
def test_selfdep_info(self):
"""
Self dependent data testing
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info"])
self._comparison_results(edges=edges, request_param=request_param)
def test_selfdep_info_binary(self):
"""
Self dependent binary packet data testing
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info-binary"])
self._comparison_results(edges=edges, request_param=request_param)
def test_selfdep_info_self_build(self):
"""
Self dependent selfbuild to true data test
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info-self-build"])
self._comparison_results(edges=edges, request_param=request_param)
def test_selfdep_info_subpack(self):
"""
Self dependent subpack to true data test
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info-subpack"])
self._comparison_results(edges=edges, request_param=request_param)
def test_selfdep_info_subpack_binary(self):
"""
Self dependent binary package data test with selfbuild being true
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info-subpack-binary"])
self._comparison_results(edges=edges, request_param=request_param)
def test_selfdep_info_self_build_packtype(self):
"""
Self dependent binary package data test with selfbuild being true
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info-self-build-packtype"])
self._comparison_results(edges=edges, request_param=request_param)
def test_selfdep_info_self_build_subpack(self):
"""
Self dependent binary package data test with selfbuild being true
"""
request_param, edges = self.get_depend_result(
path=FILES["selfdep-info-self-build-subpack"])
self._comparison_results(edges=edges, request_param=request_param)
class TestBedepGraph(DependTestBase, BaseGraph):
"""
The dependent base class method
"""
binary_file = "os-version-bedepend.json"
source_file = "os-version-source-bedepend.json"
def setUp(self):
super(TestBedepGraph, self).setUp()
def test_bedep_packtype(self):
"""
Dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["bedep-packtype"])
self._comparison_results(edges=edges, request_param=request_param)
def test_bedep_subpack(self):
"""
Dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["bedep-subpack"])
self._comparison_results(edges=edges, request_param=request_param)
def test_bedep_search_type(self):
"""
Dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["bedep-search-type"])
self._comparison_results(edges=edges, request_param=request_param)
def test_bedep_search_type_packtype(self):
"""
Dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["bedep-search-type-packtype"])
self._comparison_results(edges=edges, request_param=request_param)
def test_bedep_search_type_subpack(self):
"""
Dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["bedep-search-type-subpack"])
self._comparison_results(edges=edges, request_param=request_param)
def test_bedep_search_type_supack_packtype(self):
"""
Dependent graph tests
"""
request_param, edges = self.get_depend_result(
path=FILES["bedep-search-type-supack-packtype"])
self._comparison_results(edges=edges, request_param=request_param)
| 9,891 | 3,101 |
from output.models.ms_data.simple_type.st_e065_xsd.st_e065 import Root
__all__ = [
"Root",
]
| 98 | 46 |
import os
import random
import souvlaki as sv
class WordSource():
def __init__(self, nouns, adjectives, prefixes):
self.nouns = nouns
self.adjectives = adjectives
self.prefixes = prefixes
def noun(self):
return random.choice(self.nouns)
def adjective(self):
return random.choice(self.adjectives)
def prefix(self):
return random.choice(self.prefixes)
def main():
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'rocket_nouns.txt')) as noun_file:
nouns = noun_file.read().splitlines()
with open(os.path.join(here, 'rocket_adjectives.txt')) as adj_file:
adjectives = adj_file.read().splitlines()
with open(os.path.join(here, 'rocket_prefixes.txt')) as pre_file:
prefixes = pre_file.read().splitlines()
source = WordSource(nouns, adjectives, prefixes)
spec = '10 $Adj $Adj $Preadj $Noun'
names = sv.generate(spec, source)
for name in names:
print(name)
if __name__ == "__main__":
main()
| 1,066 | 381 |
from api.drivers.student import student_drivers
from api.schemas.student.request_schemas import student_request_schemas
from api.utils.exceptions import exceptions
async def update(
request: student_request_schemas.StudentPersonalInfoSchema,
authorization
):
"""Updates student's info"""
# Check if the user is updating it's own info
if authorization["token"] != request.student_id:
raise exceptions.UnauthorizedUser(authorization["token"], "update info")
# Update personal info
driver_response = await student_drivers.Student().update_general_info(request)
if driver_response:
return True
async def update_skill_info(request):
pass
| 716 | 195 |
## See the html directory in this repo
import re
def get_name_and_number(input):
m = re.match(r"(?P<x0>Organism) (?P<x1>.*) (?P<x2>has) (?P<x3>\d+)", input)
if m == None:
return None
r1 = m.group("x1")
r2 = m.group("x3")
mytuple = (r1, r2)
return mytuple
testary = [
"Organism Enterococcus sp. 8G7_MSG3316 has 2625 proteins with network connections.,overview.1834191.html",
"Organism Lachnoclostridium sp. YL32 has 4965 proteins with network connections.,overview.1834196.html",
"Organism Hungateiclostridiaceae bacterium KB18 has 2724 proteins with network connections.,overview.1834198.html",
"Organism Burkholderiales bacterium YL45 has 2039 proteins with network connections.,overview.1834205.html",
"Organism Erysipelotrichaceae bacterium I46 has 3171 proteins with network connections.,overview.1834207.html",
"Organism Frankia sp. BMG5.36 has 6664 proteins with network connections.,overview.1834512.html",
"Organism Frankia asymbiotica has 5603 proteins with network connections.,overview.1834516.html",
"Organism Fibrella sp. ES10-3-2-2 has 4013 proteins with network connections.,overview.1834519.html",
"Organism Saccharothrix sp. CB00851 has 6645 proteins with network connections.,overview.1835005.html",
"Organism Polynucleobacter duraquae has 1813 proteins with network connections.,overview.1835254.html",
"Organism Penicillium arizonense has 6768 proteins with network connections.,overview.1835702.html",
"Organism secondary endosymbiont of Trabutina mannipara has 231 proteins with network connections.,overview.11.html",
"Organism Arthrobacter sp. OY3WO11 has 3385 proteins with network connections.,overview.1835723.html",
"Organism Maribacter sp. T28 has 3118 proteins with network connections.,overview.1836467.html",
"Organism Phytoplasma sp. Vc33 has 369 proteins with network connections.,overview.1836886.html",
"Organism Shewanella colwelliana has 3467 proteins with network connections.",
]
for i in testary:
print(get_name_and_number(i))
| 2,074 | 802 |
import numpy as np
from gym.spaces import Box
from ..core import PhysicalSystem
from ..physical_systems import electric_motors as em, mechanical_loads as ml, converters as cv, \
voltage_supplies as vs, noise_generators as ng, solvers as sv
from ..utils import instantiate, set_state_array
class SCMLSystem(PhysicalSystem):
"""
The SCML(Supply-Converter-Motor-Load)-System is used for the simulation of a technical setting consisting of these
components as well as a noise generator and a solver for the electrical ODE of the motor and mechanical ODE of the
load.
"""
OMEGA_IDX = 0
TORQUE_IDX = 1
CURRENTS_IDX = []
VOLTAGES_IDX = []
U_SUP_IDX = -1
@property
def limits(self):
return self._limits
@property
def nominal_state(self):
return self._nominal_state
@property
def supply(self):
"""
The voltage supply instance in the physical system
"""
return self._supply
@property
def converter(self):
"""
The power electronic converter instance in the system
"""
return self._converter
@property
def electrical_motor(self):
"""
The electrical motor instance of the system
"""
return self._electrical_motor
@property
def mechanical_load(self):
"""
The mechanical load instance in the system
"""
return self._mechanical_load
def __init__(self,
converter,
motor,
load=None,
supply='IdealVoltageSupply',
ode_solver='euler', solver_kwargs=None,
noise_generator=None,
tau=1e-4, **kwargs):
"""
Args:
converter(PowerElectronicConverter): Converter for the physical system
motor(ElectricMotor): Motor of the system
load(MechanicalLoad): Mechanical Load of the System
supply(VoltageSupply): Voltage Supply
ode_solver(OdeSolver): Ode Solver to use in this setting
solver_kwargs(dict): Special keyword arguments to be passed to the solver
noise_generator(NoiseGenerator): Noise generator
tau(float): discrete time step of the system
kwargs(dict): Further arguments to pass to the modules while instantiation
"""
self._converter = instantiate(cv.PowerElectronicConverter, converter, tau=tau, **kwargs)
self._electrical_motor = instantiate(em.ElectricMotor, motor, tau=tau, **kwargs)
load = load or ml.PolynomialStaticLoad(tau=tau, **kwargs)
self._mechanical_load = instantiate(ml.MechanicalLoad, load, tau=tau, **kwargs)
if 'u_sup' in kwargs.keys():
u_sup = kwargs['u_sup']
else:
u_sup = self._electrical_motor.limits['u']
self._supply = instantiate(vs.VoltageSupply, supply, u_nominal=u_sup, tau=tau, **kwargs)
self._noise_generator = noise_generator or ng.GaussianWhiteNoiseGenerator(tau=tau, **kwargs)
state_names = self._build_state_names()
self._noise_generator.set_state_names(state_names)
solver_kwargs = solver_kwargs or {}
self._ode_solver = instantiate(sv.OdeSolver, ode_solver, **solver_kwargs)
self._ode_solver.set_system_equation(self._system_equation)
self._mechanical_load.set_j_rotor(self._electrical_motor.motor_parameter['j_rotor'])
self._t = 0
self._set_indices()
state_space = self._build_state_space(state_names)
super().__init__(self._converter.action_space, state_space, state_names, tau)
self._limits = np.zeros_like(state_names, dtype=float)
self._nominal_state = np.zeros_like(state_names, dtype=float)
self._set_limits()
self._set_nominal_state()
self._noise_generator.set_signal_power_level(self._nominal_state)
def _set_limits(self):
"""
Method to set the physical limits from the modules.
"""
for ind, state in enumerate(self._state_names):
motor_lim = self._electrical_motor.limits.get(state, np.inf)
mechanical_lim = self._mechanical_load.limits.get(state, np.inf)
self._limits[ind] = min(motor_lim, mechanical_lim)
self._limits[self._state_positions['u_sup']] = self.supply.u_nominal
def _set_nominal_state(self):
"""
Method to set the nominal values from the modules.
"""
for ind, state in enumerate(self._state_names):
motor_nom = self._electrical_motor.nominal_values.get(state, np.inf)
mechanical_nom = self._mechanical_load.nominal_values.get(state, np.inf)
self._nominal_state[ind] = min(motor_nom, mechanical_nom)
self._nominal_state[self._state_positions['u_sup']] = self.supply.u_nominal
def _build_state_space(self, state_names):
"""
Method to build the normalized state space (i.e. the maximum and minimum possible values for each state variable
normalized by the limits).
Args:
state_names(list(str)): list of the names of each state.
"""
raise NotImplementedError
def _build_state_names(self):
"""
Setting of the state names in the physical system.
"""
raise NotImplementedError
def _set_indices(self):
"""
Setting of indices to faster access the arrays during integration.
"""
self._omega_ode_idx = self._mechanical_load.OMEGA_IDX
self._load_ode_idx = list(range(len(self._mechanical_load.state_names)))
self._ode_currents_idx = list(range(
self._load_ode_idx[-1] + 1, self._load_ode_idx[-1] + 1 + len(self._electrical_motor.CURRENTS)
))
self._motor_ode_idx = self._ode_currents_idx
def simulate(self, action, *_, **__):
# Docstring of superclass
transformed_action = self._action_transformation(action)
state = self._ode_solver.y
i_in = self._backward_transform(self._electrical_motor.i_in(state[self._ode_currents_idx]), state)
switching_times = self._converter.set_action(transformed_action, self._t)
i_sup = self._converter.i_sup(i_in)
u_sup = self._supply.get_voltage(self._t, i_sup)
for t in switching_times[:-1]:
u_in = self._converter.convert(i_in, self._ode_solver.t)
u_in = [u * u_sup for u in u_in]
u_transformed = self._forward_transform(u_in, state)
self._ode_solver.set_f_params(u_transformed)
state = self._ode_solver.integrate(t)
i_in = self._backward_transform(self._electrical_motor.i_in(state[self._ode_currents_idx]), state)
u_in = self._converter.convert(i_in, self._ode_solver.t)
u_in = [u * u_sup for u in u_in]
u_transformed = self._forward_transform(u_in, state)
self._ode_solver.set_f_params(u_transformed)
ode_state = self._ode_solver.integrate(self._t + self._tau)
self._t = self._ode_solver.t
self._k += 1
torque = self._electrical_motor.torque(ode_state[self._motor_ode_idx])
noise = self._noise_generator.noise()
return (self._build_state(ode_state, torque, u_in, u_sup) + noise) / self._limits
def _system_equation(self, t, state, u_in, **__):
"""
Systems differential equation system.
It is a concatenation of the motors electrical ode system and the mechanical ode system.
Args:
t(float): Current systems time
state(ndarray(float)): Current systems ODE-State
u_in(list(float)): Input voltages from the converter
Returns:
ndarray(float): The derivatives of the ODE-State. Based on this, the Ode Solver calculates the next state.
"""
motor_derivative = self._electrical_motor.electrical_ode(
state[self._motor_ode_idx], u_in, state[self._omega_ode_idx]
)
torque = self._electrical_motor.torque(state[self._motor_ode_idx])
load_derivative = self._mechanical_load.mechanical_ode(t, state[self._load_ode_idx], torque)
return np.concatenate((load_derivative, motor_derivative))
def reset(self, *_):
"""
Reset all the systems modules to an initial state.
Returns:
The new state of the system.
"""
motor_state = self._electrical_motor.reset()
load_state = self._mechanical_load.reset()
state = np.concatenate((load_state, motor_state))
u_sup = self.supply.reset()
u_in = self.converter.reset()
u_in = [u * u_sup for u in u_in]
torque = self.electrical_motor.torque(motor_state)
noise = self._noise_generator.reset()
self._t = 0
self._k = 0
self._ode_solver.set_initial_value(state, self._t)
return (self._build_state(state, torque, u_in, u_sup) + noise) / self._limits
def _forward_transform(self, quantities, motor_state):
"""
Transformation to transform from the physical systems state to the ode-state
"""
return quantities
def _backward_transform(self, quantities, motor_state):
"""
Transformation to transform from the ode-state to the systems-state
"""
return quantities
def _build_state(self, motor_state, torque, u_in, u_sup):
"""
Based on the ode-state and the further input quantities the new systems state is built.
"""
raise NotImplementedError
@staticmethod
def _action_transformation(action):
"""
Placeholder for the option to use different representations for the synchronous motor in the future.
"""
return action
class DcMotorSystem(SCMLSystem):
"""
SCML-System that can be used for all DC Motors.
"""
def _set_indices(self):
# Docstring of superclass
super()._set_indices()
self.OMEGA_IDX = self.mechanical_load.OMEGA_IDX
self.TORQUE_IDX = len(self.mechanical_load.state_names)
currents_lower = self.TORQUE_IDX + 1
currents_upper = currents_lower + len(self._electrical_motor.CURRENTS)
self.CURRENTS_IDX = list(range(currents_lower, currents_upper))
voltages_lower = currents_upper
voltages_upper = voltages_lower + len(self._electrical_motor.VOLTAGES)
self.VOLTAGES_IDX = list(range(voltages_lower, voltages_upper))
self.U_SUP_IDX = voltages_upper
def _build_state_names(self):
# Docstring of superclass
return (
self._mechanical_load.state_names
+ ['torque']
+ self._electrical_motor.CURRENTS
+ self._electrical_motor.VOLTAGES
+ ['u_sup']
)
def _build_state_space(self, state_names):
# Docstring of superclass
low, high = self._electrical_motor.get_state_space(self._converter.currents, self._converter.voltages)
low_mechanical, high_mechanical = self._mechanical_load.get_state_space((low['omega'], high['omega']))
low.update(low_mechanical)
high.update(high_mechanical)
high['u_sup'] = self._supply.supply_range[1] / self._supply.u_nominal
if self._supply.supply_range[0] != self._supply.supply_range[1]:
low['u_sup'] = self._supply.supply_range[0] / self._supply.u_nominal
else:
low['u_sup'] = 0
low = set_state_array(low, state_names)
high = set_state_array(high, state_names)
return Box(low, high)
def _build_state(self, motor_state, torque, u_in, u_sup):
# Docstring of superclass
state = np.zeros_like(self.state_names, dtype=float)
state[:len(self._mechanical_load.state_names)] = motor_state[:len(self._mechanical_load.state_names)]
state[self.TORQUE_IDX] = torque
state[self.CURRENTS_IDX] = motor_state[self._electrical_motor.CURRENTS_IDX]
state[self.VOLTAGES_IDX] = u_in
state[self.U_SUP_IDX] = u_sup
return state
class SynchronousMotorSystem(SCMLSystem):
"""
SCML-System that can be used with all Synchronous Motors
"""
def _build_state_space(self, state_names):
# Docstring of superclass
low = -1 * np.ones_like(state_names, dtype=float)
low[self.U_SUP_IDX] = 0.0
high = np.ones_like(state_names, dtype=float)
return Box(low, high)
def _build_state_names(self):
# Docstring of superclass
return (
self._mechanical_load.state_names
+ ['torque']
+ ['i_a'] + ['i_b'] + ['i_c']
+ ['u_a'] + ['u_b'] + ['u_c']
+ ['epsilon']
+ ['u_sup']
)
def _set_indices(self):
# Docstring of superclass
super()._set_indices()
self._motor_ode_idx += [self._motor_ode_idx[-1] + 1]
self._ode_currents_idx = self._motor_ode_idx[:-1]
self.OMEGA_IDX = self.mechanical_load.OMEGA_IDX
self.TORQUE_IDX = len(self.mechanical_load.state_names)
currents_lower = self.TORQUE_IDX + 1
currents_upper = currents_lower + 3
self.CURRENTS_IDX = list(range(currents_lower, currents_upper))
voltages_lower = currents_upper
voltages_upper = voltages_lower + 3
self.VOLTAGES_IDX = list(range(voltages_lower, voltages_upper))
self.EPSILON_IDX = voltages_upper
self.U_SUP_IDX = self.EPSILON_IDX + 1
self._ode_epsilon_idx = self._motor_ode_idx[-1]
def _forward_transform(self, quantities, motor_state):
# Docstring of superclass
motor_quantity = self._electrical_motor.q_inv(
self._electrical_motor.t_23(quantities), motor_state[self._ode_epsilon_idx]
)
return motor_quantity[::-1]
def _backward_transform(self, quantities, motor_state):
# Docstring of superclass
return list(self._electrical_motor.t_32(
self._electrical_motor.q(quantities[::-1], motor_state[self._ode_epsilon_idx])
))
def _build_state(self, motor_state, torque, u_in, u_sup):
# Docstring of superclass
mechanical_state = motor_state[self._load_ode_idx]
currents = list(
self._backward_transform(motor_state[self._ode_currents_idx], motor_state)
)
epsilon = motor_state[self._ode_epsilon_idx] % (2 * np.pi)
if epsilon > np.pi:
epsilon -= 2 * np.pi
return np.array(
list(mechanical_state)
+ [torque]
+ currents
+ u_in
+ [epsilon]
+ [u_sup]
)
| 15,070 | 4,935 |
class MultiDict(dict):
def __setitem__(self, key, value):
if key in self:
if isinstance(self[key], list):
val = self[key]
if isinstance(value, list):
val = val + value
else:
val.append(value)
else:
if isinstance(value, list):
val = [self[key]] + value
else:
val = [self[key], value]
else:
if isinstance(value, list):
val = value
else:
val = [value]
super().__setitem__(key, val)
| 648 | 164 |
#Essential Modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Dashboard Modules
import dash
import dash_table
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from dash.dependencies import Input, Output
import dash_table
import plotly.express as px
import datetime as dt
from io import BytesIO
from wordcloud import WordCloud
from collections import deque
import pybase64
import os
import json
import sqlite3
from unidecode import unidecode
import time
from application import app
# app=dash.Dash(__name__,external_stylesheets=[dbc.themes.BOOTSTRAP])
layout=dbc.Card(
[
dbc.CardBody(
[
html.H4("Expert Insights", className="card-title"),
html.Div(html.Div(id="recent-tweets-table")),
dcc.Interval(
id='recent-table-update',
interval=1*1000
)
]
),
],
)
def generate_table(dataframe,max_rows=10):
return dbc.Table.from_dataframe(dataframe.tail(10),bordered=True,striped=True)
@app.callback(Output('recent-tweets-table', 'children'),
[Input('recent-table-update', 'n_intervals')])
def update_recent_tweets(input_data):
conn = sqlite3.connect('twitter.db',check_same_thread=False)
df = pd.read_sql("SELECT * FROM discussion", conn)
return generate_table(df,max_rows=10)
#
# if __name__=="__main__":
# app.run_server(debug=False)
| 1,538 | 480 |
from fontbakery.callable import check
from fontbakery.status import WARN, PASS
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
@check(
id = 'com.google.fonts/check/dsig',
rationale = """
Microsoft Office 2013 and below products expect fonts to have a digital signature declared in a DSIG table in order to implement OpenType features. The EOL date for Microsoft Office 2013 products is 4/11/2023. This issue does not impact Microsoft Office 2016 and above products.
As we approach the EOL date, it is now considered better to completely remove the table.
But if you still want your font to support OpenType features on Office 2013, then you may find it handy to add a fake signature on a placeholder DSIG table by running one of the helper scripts provided at https://github.com/googlefonts/gftools
Reference: https://github.com/googlefonts/fontbakery/issues/1845
""",
proposal = ['legacy:check/045',
'https://github.com/googlefonts/fontbakery/issues/3398']
)
def com_google_fonts_check_dsig(ttFont):
"""Does the font have a DSIG table?"""
if "DSIG" not in ttFont:
yield PASS, "ok"
else:
yield WARN,\
Message("found-DSIG",
"This font has a digital signature (DSIG table) which"
" is only required - even if only a placeholder"
" - on old programs like MS Office 2013 in order to"
" work properly.\n"
"The current recommendation is to completely"
" remove the DSIG table.")
| 1,768 | 493 |
#!/usr/bin/env python3
from slackbot.bot import Bot
import logging
import socket
import uuid
import os
# If you are running multiple homeops bots on the same server enable
# unique device names.
unique_device_names = False
if unique_device_names:
os.environ['DEVICE_NAME'] = '{}-{}'.format(socket.gethostname(),
uuid.uuid4())
else:
os.environ['DEVICE_NAME'] = socket.gethostname()
# Enabling debug will reveal which Troupe device is responding to a request
os.environ['DEBUG'] = 'False'
os.environ['TARGET_DEVICE'] = 'all'
os.environ['BASE_DIR'] = os.path.join(
os.path.dirname(os.path.realpath(__file__)))
os.environ['ONLAUNCH'] = os.environ['BASE_DIR'] + '/.onlaunch'
os.environ['NEXTLAUNCH'] = os.environ['BASE_DIR'] + '/.nextlaunch'
os.environ['API_TOKEN'] = ''
logging.basicConfig()
def main():
bot = Bot()
bot.run()
if __name__ == "__main__":
main()
| 961 | 317 |
# Classe Circulo: Crie uma classe que modele uma bola:
#
# Atributos: Cor, raio, material
# Métodos: trocaCor, mostraCor, area (math.pi*raio**2)
# para acessar o pi:
# import math
# math.pi
# Classe Quadrado: Crie uma classe que modele um quadrado:
#
# Atributos: Tamanho do lado
# Métodos: Mudar valor do Lado, Retornar valor do Lado e calcular Área;
# area = lateral*lateral
# Classe Retangulo: Crie uma classe que modele um retangulo:
#
# Atributos: LadoA, LadoB (ou Comprimento e Largura, ou Base e Altura, a escolher)
# Métodos: Mudar valor dos lados, Retornar valor dos lados, calcular Área;
# area comprimento*largura
# crite testes para validar cada uma destas classes!
class Circulo:
def __init__(self,cor,raio,material):
self.cor = cor
self.raio = raio
self.material = material
def set_Cor(self,cor):
pass
def get_Cor(self):
pass
def calculo_area(self):
pass
| 943 | 341 |
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
# Tests for torch.jit.isinstance
class TestIValue(JitTestCase):
def test_qscheme_ivalue(self):
def qscheme(x: torch.Tensor):
return x.qscheme()
x = torch.rand(2, 2)
self.checkScript(qscheme, (x,))
| 713 | 264 |
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy.io.wavfile import read as read_wav
from scipy.io.wavfile import write as write_wav
from scipy.fftpack import fft, fftshift
from scipy.signal import lfilter, firwin
def filt(sig, Fc=0.5, NFIR=101):
fir_taps = firwin(NFIR, Fc, window=('blackmanharris'))
filtered_signal = lfilter(fir_taps, 1.0, sig)
return filtered_signal
def interpolate(sig, INTERP_ORDER=10, NFIR=101):
sig_with_zeros = np.zeros(int(len(sig)*INTERP_ORDER))
sig_with_zeros[::INTERP_ORDER] = sig
interp_sig = INTERP_ORDER*filt(sig_with_zeros, 1/INTERP_ORDER, NFIR)
return interp_sig
def decimate(sig, DECIM_ORDER=10, NFIR=101):
filt_sig = filt(sig, 1/DECIM_ORDER, NFIR)
decim_sig = filt_sig[::DECIM_ORDER]
return decim_sig
def plot_spectrum(sig, Fs=100e3, NFFT=8192, title='Spectrum'):
f = np.linspace(-int(NFFT/2), int(NFFT/2)-1, int(NFFT))*Fs/NFFT
sigFFT = fft(sig, NFFT)/NFFT
sigFFT = fftshift(sigFFT)
spectrum = 20*np.log10(np.abs(sigFFT))
plt.figure()
plt.plot(f, spectrum)
plt.ylabel('Power (dBm)')
plt.xlabel('Frequency (Hz)')
plt.title(title)
plt.ylim([-150, 0])
plt.grid(True)
def fm_demod(sig, Fs=100e3, FM_dev = 2e3):
sig_len = len(sig)
fm_demod_sig = np.zeros(sig_len)
last_angle = 0.0
last_demod_data = 0.0
for ii in range(0, sig_len):
i = np.real(sig[ii])
q = np.imag(sig[ii])
angle = math.atan2(q, i)
angle_change = angle - last_angle
if angle_change > math.pi:
angle_change -= 2 * math.pi
elif angle_change < -math.pi:
angle_change += 2 * math.pi
last_angle = angle
demod_data = angle_change * Fs / (2 * math.pi * FM_dev)
if abs(demod_data) >= 1:
# some unexpectedly big angle change happened
demod_data = last_demod_data
last_demod_data = demod_data
fm_demod_sig[ii] = demod_data
return fm_demod_sig
def fm_mod(sig, Fc=5e3, Fs=100e3, FM_dev = 2e3):
sig_len = len(sig)
sig = np.array(sig)
sig_max_val = np.amax(np.absolute(sig))
sig = sig/sig_max_val #normalizing signal (max val = 1, min val = -1)
dF = sig*FM_dev
F = Fc + dF
phase = 0
fm_mod_sig = np.zeros(sig_len)
for ii in range(0, sig_len):
phase = phase + 2*np.pi*F[ii]/Fs
fm_mod_sig[ii] = np.cos(phase)
return fm_mod_sig
def create_harmonic(Fc=1e3, Fs=20e3, Amp=1, N=2e1):
time_indexes = np.arange(N)
time_values = time_indexes/Fs
phase_values = (2*np.pi*Fc/Fs)*time_indexes
sig = Amp*np.cos(phase_values)
return Fs, time_values, sig
def create_complex_exponent(Fc=1e3, Fs=20e3, Amp=1, N=2e1):
time_indexes = np.arange(N)
time_values = time_indexes/Fs
phase_values = (2*np.pi*Fc/Fs)*time_indexes
sig_real_part = Amp*np.cos(phase_values)
sig_imag_part = Amp*np.sin(phase_values)
sig_complex = sig_real_part + 1j*sig_imag_part
return sig_complex
def create_from_wav(file_name, N=float('inf')):
Fs, sig = read_wav(file_name)
N = min(N, len(sig))
if len(sig.shape) > 1:
sig = sig[0:int(N),0] # Selecting one audio channel
else:
sig = sig[0:int(N)]
time_indexes = np.arange(N)
time_values = time_indexes/Fs
return Fs, time_values, sig
| 3,457 | 1,479 |
import numpy as np
import matplotlib.pyplot as plt
import sys, os
sys.path.append('../../galference/utils/')
import tools
import diagnostics as dg
def callback(model, ic, bs, losses=None):
fig, ax = plt.subplots(1, 6, figsize=(15, 3))
im = ax[0].imshow(ic[0].sum(axis=0))
plt.colorbar(im, ax=ax[0])
ax[0].set_title('Truth')
#
sample = model.sample_linear
im = ax[1].imshow((sample).numpy()[0].sum(axis=0))
plt.colorbar(im, ax=ax[1])
ax[1].set_title('Sample')
#
diff = sample - ic
im = ax[2].imshow(diff.numpy()[0].sum(axis=0))
plt.colorbar(im, ax=ax[2])
ax[2].set_title('Differnce')
#2pt functions
k, p0 = tools.power(ic[0]+1, boxsize=bs)
ps, rc, ratios = [], [], []
for i in range(20):
sample = model.sample_linear
i0 = (sample).numpy()[0]
k, p1 = tools.power(i0+1, boxsize=bs)
k, p1x = tools.power(i0+1, ic[0]+1, boxsize=bs)
ps.append([p1, p1x])
rc.append(p1x/(p1*p0)**0.5)
ratios.append((p1/p0)**0.5)
rc = np.array(rc)
ratios = np.array(ratios)
ax = ax[3:]
ax[0].plot(k, rc.T, 'C1', alpha=0.2)
ax[0].plot(k, rc.mean(axis=0))
ax[0].semilogx()
ax[0].set_ylim(0., 1.05)
ax[0].set_title('$r_c$', fontsize=12)
ax[1].plot(k, ratios.T, 'C1', alpha=0.2)
ax[1].plot(k, ratios.mean(axis=0))
ax[1].semilogx()
ax[1].set_ylim(0.8, 1.2)
ax[1].set_title('$t_f$', fontsize=12)
# if losses is not None: ax[2].plot(losses)
if losses is not None:
losses = -1. * np.array(losses)
ax[2].plot(losses[:, 0], label='-logl')
ax[2].plot(losses[:, 1], label='-logp')
ax[2].plot(losses[:, 2], label='-logq')
ax[2].plot(losses[:, 3], 'k', label='-elbo')
ax[2].loglog()
ax[2].set_title('-ELBO', fontsize=12)
ax[2].legend()
for axis in ax: axis.grid(which='both')
plt.tight_layout()
return fig
def callback_fvi(model, ic, bs, losses=None, zoomin=True):
fig, ax = plt.subplots(1, 6, figsize=(15, 3))
im = ax[0].imshow(ic[0].sum(axis=0))
plt.colorbar(im, ax=ax[0])
ax[0].set_title('Truth')
#
sample = model.sample_linear
im = ax[1].imshow((sample).numpy()[0].sum(axis=0))
plt.colorbar(im, ax=ax[1])
ax[1].set_title('Sample')
#
diff = sample - ic
im = ax[2].imshow(diff.numpy()[0].sum(axis=0))
plt.colorbar(im, ax=ax[2])
ax[2].set_title('Differnce')
#2pt functions
k, p0 = tools.power(ic[0]+1, boxsize=bs)
ps, rc, ratios = [], [], []
for i in range(20):
sample = model.sample_linear
i0 = (sample).numpy()[0]
k, p1 = tools.power(i0+1, boxsize=bs)
k, p1x = tools.power(i0+1, ic[0]+1, boxsize=bs)
ps.append([p1, p1x])
rc.append(p1x/(p1*p0)**0.5)
ratios.append((p1/p0)**0.5)
rc = np.array(rc)
ratios = np.array(ratios)
ax = ax[3:]
ax[0].plot(k, rc.T, 'C1', alpha=0.2)
ax[0].plot(k, rc.mean(axis=0))
ax[0].semilogx()
ax[0].set_ylim(0., 1.05)
ax[0].set_title('$r_c$', fontsize=12)
ax[1].plot(k, ratios.T, 'C1', alpha=0.2)
ax[1].plot(k, ratios.mean(axis=0))
ax[1].semilogx()
if zoomin: ax[1].set_ylim(0.8, 1.2)
else: ax[1].set_ylim(0.0, 1.5)
ax[1].set_title('$t_f$', fontsize=12)
ax[2].plot(losses)
ax[2].loglog()
ax[2].set_title('-logq', fontsize=12)
ax[2].legend()
for axis in ax: axis.grid(which='both')
plt.tight_layout()
return fig
def callback_sampling(samples, ic, bs):
fig, axar = plt.subplots(2, 3, figsize=(12, 8))
ax = axar[0]
im = ax[0].imshow(ic[0].sum(axis=0))
plt.colorbar(im, ax=ax[0])
ax[0].set_title('Truth')
#
sample = samples[np.random.randint(len(samples))].numpy()
im = ax[1].imshow((sample)[0].sum(axis=0))
plt.colorbar(im, ax=ax[1])
ax[1].set_title('Sample')
#
diff = sample - ic
print(diff.shape)
im = ax[2].imshow(diff[0].sum(axis=0))
plt.colorbar(im, ax=ax[2])
ax[2].set_title('Differnce')
#2pt functions
k, p0 = tools.power(ic[0]+1, boxsize=bs)
ps, rc, ratios = [], [], []
for i in range(len(samples)):
sample = samples[i].numpy()
if len(sample.shape) == 4:
for j in range(sample.shape[0]):
i0 = (sample)[j]
k, p1 = tools.power(i0+1, boxsize=bs)
k, p1x = tools.power(i0+1, ic[0]+1, boxsize=bs)
ps.append([p1, p1x])
rc.append(p1x/(p1*p0)**0.5)
ratios.append((p1/p0)**0.5)
elif len(sample.shape) == 3:
i0 = sample.copy()
k, p1 = tools.power(i0+1, boxsize=bs)
k, p1x = tools.power(i0+1, ic[0]+1, boxsize=bs)
ps.append([p1, p1x])
rc.append(p1x/(p1*p0)**0.5)
ratios.append((p1/p0)**0.5)
rc = np.array(rc)
ratios = np.array(ratios)
ax = axar[1]
ax[0].plot(k, rc.T, alpha=0.3)
ax[0].plot(k, rc.mean(axis=0))
ax[0].semilogx()
ax[0].set_ylim(0., 1.05)
ax[0].set_title('$r_c$', fontsize=12)
ax[1].plot(k, ratios.T, alpha=0.3)
ax[1].plot(k, ratios.mean(axis=0))
ax[1].semilogx()
ax[1].set_ylim(0.8, 1.2)
ax[1].set_title('$t_f$', fontsize=12)
ax[2].plot(k, p0, 'k', alpha=0.8)
for ip in ps:
ax[2].plot(k, ip[0], alpha=0.3)
ax[2].loglog()
for axis in ax: axis.grid(which='both')
plt.tight_layout()
return fig
def datafig(ic, fin, data, bs, dnoise, shotnoise=None):
nc = ic.shape[-1]
k, pic = tools.power(ic[0], boxsize=bs)
k, pf = tools.power(fin[0], boxsize=bs)
k, pd = tools.power(data[0], boxsize=bs)
k, pn = tools.power(1+data[0]-fin[0], boxsize=bs)
if dnoise is not None:
k, pn2 = tools.power(1+np.random.normal(0, dnoise, nc**3).reshape(fin.shape)[0], boxsize=bs)
# plt.plot(k, pd/pf)
# plt.semilogx()
fig, axar = plt.subplots(2, 2, figsize=(8, 8))
im = axar[0, 0].imshow(ic[0].sum(axis=0))
plt.colorbar(im, ax=axar[0, 0])
axar[0, 0].set_title('IC')
im = axar[0, 1].imshow(fin[0].sum(axis=0))
plt.colorbar(im, ax=axar[0, 1])
axar[0, 1].set_title('Final')
im = axar[1, 0].imshow(data[0].sum(axis=0))
plt.colorbar(im, ax=axar[1, 0])
axar[1, 0].set_title('Data')
ax = axar[1]
ax[1].plot(k, pic, label='IC')
ax[1].plot(k, pf, label='Final')
ax[1].plot(k, pd, label='Data')
ax[1].plot(k, pn, label='Noise')
ax[1].axhline((bs**3/nc**3))
if shotnoise is not None: ax[1].axhline(shotnoise, color='k', ls="--")
ax[1].loglog()
ax[1].grid(which='both')
ax[1].legend()
ax[1].set_xlabel('k (h/Mpc)')
ax[1].set_ylabel('P(k)')
plt.suptitle('LPT: Boxsize=%d, Nmesh=%d'%(bs, nc))
return fig
| 6,829 | 3,127 |
"""
TODO: Move the functions to the correct location
"""
import logging as log
import os
DATASET_LOCATION = "/home/mickael/Documents/programming/dj-tracks-switch-points/"
CACHE_LOCATION = "../annotations/"
CACHE_LEVEL = 0
LOG_LEVEL = log.DEBUG
log.getLogger().setLevel(LOG_LEVEL)
def k_fold_split(X, Y, k=10, shuffleDataset=True):
"""
Split both list X and Y into k folds
random will shuffle the data before, so two calls would not return the same folds
ex: print(k_fold_split(["A", "B", "C", "D", "E", "F", "G"], ["a", "b", "c", "d", "e", "f", "g"], k=3, shuffleDataset=0))
[[('A', 'a'), ('B', 'b')], [('C', 'c'), ('D', 'd')], [('E', 'e'), ('F', 'f'), ('G', 'g')]]
"""
from random import shuffle
assert len(X) == len(Y) and k <= len(X)
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
indexes = list(range(len(X)))
if shuffleDataset:
shuffle(indexes)
foldsIndexes = chunkIt(indexes, k)
folds = [[(X[i], Y[i]) for i in foldIndexes] for foldIndexes in foldsIndexes]
return folds
def _getFilename(path):
file, ext = os.path.splitext(os.path.basename(path))
if ext != ".mp3" and ext != ".jams": # in case that we give a file without ext but still contain a "." in the name
return file + ext
else:
return file
def _getFileType(path):
"""
return the extension of the file based on the path
i.e.: 'MP3' or 'WAVE'
"""
ext = path.split("/")[-1].split(".")[-1]
if ext == "mp3":
return 'MP3'
if ext == "wav":
return "WAVE"
if ext == "jams":
return "JAMS"
else:
return ext
def getFolderFiles(directory):
"""
returns the paths located in this folder
"""
paths = sorted(os.listdir(directory))
knownTypes = ["MP3", "WAVE", "mp4", "m4a", "JAMS"]
return [os.path.join(directory, path) for path in paths if _getFileType(path) in knownTypes]
def GET_PAOLO_FULL(checkCompletude=True, sets=["paolo1", "paolo2", "paolo3", "paolo4", "paolo5", "paolo6", "paolo7"]):
"""
return the path of the audio files (.mp3) and the anotation files (.jams)
if checkCompletude si True, erase the tracks without annotations and erase annotation without tracks
"""
tracksPaths = []
for set in sets:
tracksPaths += getFolderFiles(DATASET_LOCATION + str(set) + "/audio/")
gtTrackPaths = getFolderFiles(DATASET_LOCATION + "clean/annotations/")
if checkCompletude:
tracksPaths, gtTrackPaths = CHECK_COMPLETUDE(tracksPaths, gtTrackPaths)
return tracksPaths, gtTrackPaths
def CHECK_COMPLETUDE(tracksPaths, gtTrackPaths):
"""
Check if all the files are annotated and each annotation has a file
"""
tracksPaths = sorted(tracksPaths, key=lambda x: _getFilename(x))
gtTrackPaths = sorted(gtTrackPaths, key=lambda x: _getFilename(x))
newTracksPaths = [track for track in tracksPaths if _getFilename(track) in [_getFilename(t) for t in gtTrackPaths]]
newgtTrackPaths = [track for track in gtTrackPaths if _getFilename(track) in [_getFilename(t) for t in tracksPaths]]
if len(newTracksPaths) != len(tracksPaths):
log.info(("Becareful all the tracks are not annotated", len(newTracksPaths), len(tracksPaths)))
log.debug("\n".join(
[track for track in tracksPaths if _getFilename(track) not in [_getFilename(t) for t in gtTrackPaths]]))
log.debug("\n".join(
[track for track in gtTrackPaths if _getFilename(track) not in [_getFilename(t) for t in tracksPaths]]))
return newTracksPaths, newgtTrackPaths
| 3,769 | 1,345 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Update : 2020-09-05 09:46:01
# @Author : Chenghao Mou (mouchenghao@gmail.com)
"""Sequence taggers."""
from old_fashioned_nlp.tagging.token import CharTfidfTagger
__all__ = ['CharTfidfTagger'] | 255 | 117 |
import graphene
from django.conf import settings
from django.core.exceptions import ValidationError
from ...rider import models as rider_models
from ...subshop import models as subshop_models
from ...account import models as account_models
from ..account.validation import isPhoneNumber, isCnic
from ..core.mutations import ModelMutation
from ..core.types.common import RiderError
from ..shop.types import SubShop
from .types import Rider
from .utils import riderGroupAdd
class RiderCreateInput(graphene.InputObjectType):
name = graphene.String(description="Rider Name", required=True)
city = graphene.String(description="City of Rider", required=True)
cnic = graphene.String(description="Rider cnic", required=True)
shopid = graphene.String(description="Enter shop id", required=True)
phone = graphene.String(description="Rider Phone Number", required=True)
password = graphene.String(description="Rider Password", required=True)
class RiderCreate(ModelMutation):
class Arguments:
input = RiderCreateInput(
description="Fields required to create a rider.", required=True
)
class Meta:
description = "Create A New Rider"
permissions = ("rider.manage_riders",)
model = rider_models.Rider
error_type_class = RiderError
error_type_field = "rider_errors"
@classmethod
def perform_mutation(cls, root, info, **data):
data = data["input"]
isPhoneNumber(data["phone"])
isCnic(data["cnic"])
user = account_models.User.objects.filter(phone=data["phone"])
if len(user) > 0 and user[0].riderid:
raise ValidationError({"phone": "User with phone already is a rider"})
shopid = graphene.Node.get_node_from_global_id(
info, data["shopid"], SubShop)
rider = rider_models.Rider.objects.create(
name=data["name"], city=data["city"], cnic=data["cnic"], shopid=shopid)
if len(user) > 0:
user.update(riderid=rider, is_rider=True)
user = user[0]
else:
user = account_models.User.objects.create_user(
password=data["password"], phone=data["phone"], is_active=True, phone_verified=True, riderid=rider, is_rider=True)
riderGroupAdd(user)
return cls.success_response(rider)
| 2,342 | 684 |
'''
Copyright 2021 Kerry Johnson
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import subprocess
def ip(remove = '10.'):
all_ips = subprocess.check_output(['hostname', '-I'])
all_ips = [ip.decode('utf-8') for ip in all_ips.split()]
if remove is not None:
# Remove (e.g.) Docker/Resin IPs
all_ips = [ip for ip in all_ips if not ip.startswith(remove)]
return all_ips[0] if len(all_ips) > 0 else None | 953 | 297 |
Areas = {
0x00: 'Crateria',
0x01: 'Brinstar',
0x02: 'Norfair',
0x03: 'WreckedShip',
0x04: 'Maridia',
0x05: 'Tourian',
0x06: 'Ceres',
0x07: 'Debug',
}
| 166 | 102 |
#!/usr/bin/python
"""
I wrote this in a couple afternoons while watching Netflix, so it can probably be better.
-jmag
"""
from slackclient import SlackClient
import sys, json, sqlite3, time, re, datetime
MENTION_REGEX = "^<@(|[WU][A-Z0-9]+?)>(.*)"
class ConfigException(Exception):
pass
class ConnectionException(Exception):
pass
class YegsecDatabase:
def __init__(self, db_path):
self.path = db_path
self.conn = sqlite3.connect(db_path)
self.cursor = self.conn.cursor()
def confirm_user(self, user, month, year, pref):
self.cursor.execute("SELECT * FROM users WHERE user_id = ?", (user,))
result = self.cursor.fetchone()
if not result:
self.cursor.execute("INSERT INTO users (user_id) VALUES (?)", (user,))
self.cursor.execute("SELECT meetup_id FROM meetups WHERE month_id = ? and year_id = ?", (month, year))
meeting_id_a = self.cursor.fetchone()
if meeting_id_a:
meeting_id = meeting_id_a[0]
veg_bool = 0
if pref:
veg_bool = 1
else:
veg_bool = 0
self.cursor.execute("SELECT * FROM confirmations WHERE meetup_id = ? AND user_id = ?", (meeting_id, user))
if(self.cursor.fetchone()):
return False
else:
self.cursor.execute("INSERT INTO confirmations (user_id, meetup_id, pizza_pref) VALUES (?, ?, ?)", (user, meeting_id, veg_bool))
self.yegsec_commit()
return True
else:
return False
def remove_confirm_user(self, user, month, year):
self.cursor.execute("SELECT * FROM users WHERE user_id = ?", (user,))
result = self.cursor.fetchone()
#A user cannot remove a confirmation if they don't exist in the database already.
if not result:
return False
else:
self.cursor.execute("SELECT meetup_id FROM meetups WHERE month_id = ? and year_id = ?", (month, year))
meeting_id_a = self.cursor.fetchone()
if meeting_id_a:
meeting_id = meeting_id_a[0]
self.cursor.execute("DELETE FROM confirmations WHERE user_id = ? AND meetup_id = ?", (user, meeting_id))
self.yegsec_commit()
else:
return False
def yegsec_commit(self):
self.conn.commit()
#self.conn.close()
def get_summary(self):
result = self.cursor.execute("SELECT meetup_id FROM meetups")
results = {}
meetup_ids = []
meetup_id = self.cursor.fetchone()
while(meetup_id):
meetup_ids.append(meetup_id)
meetup_id = self.cursor.fetchone()
for meetup_id_a in meetup_ids:
meetup_id = meetup_id_a[0]
self.cursor.execute("SELECT count(*) FROM confirmations WHERE meetup_id = ? AND pizza_pref = 1", (meetup_id,))
veg_count = self.cursor.fetchone()
self.cursor.execute("SELECT count(*) FROM confirmations WHERE meetup_id = ? AND pizza_pref = 0", (meetup_id,))
other_count = self.cursor.fetchone()
self.cursor.execute("SELECT day_id, month_id, year_id FROM meetups WHERE meetup_id = ?", (meetup_id,))
date_result = self.cursor.fetchone()
results[meetup_id] = { "veg": veg_count[0],
"other": other_count[0],
"day": date_result[0],
"month": date_result[1],
"year": date_result[2]
}
return results
class YegsecBot:
def __init__(self, config):
db, token, rtm_delay = self.read_config(config)
self.db = YegsecDatabase(db)
self.bot = SlackClient(token)
self.rtm_delay = rtm_delay
if self.bot.rtm_connect(with_team_state=False):
self.bot_id = self.bot.api_call("auth.test")["user_id"]
try:
self.start()
except KeyboardInterrupt:
self.db.yegsec_commit()
else:
raise ConnectionException("Connection to Slack failed.")
def read_config(self, config_path):
f = open(config_path)
try:
frj = json.loads(f.read())
except:
raise ConfigException("Unable to read provided configuration: {}".format(config_path))
return frj['database'], frj['token'], frj['rtm_delay']
#Source: https://www.fullstackpython.com/blog/build-first-slack-bot-python.html
def parse_bot_commands(self, slack_events):
"""
Parses a list of events coming from the Slack RTM API to find bot commands.
If a bot command is found, this function returns a tuple of command and channel.
If its not found, then this function returns None, None.
"""
for event in slack_events:
if event["type"] == "message" and not "subtype" in event:
user_id, message = self.parse_direct_mention(event["text"])
if user_id == self.bot_id:
#print(event)
return message, event["channel"], event["user"]
return None, None, None
def parse_direct_mention(self, message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search(MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def get_next_meet(self):
return 3,2019
def add_user(self, command, channel, user):
"""
Main function of the bot. We use this command for adding user numbers and their preferred vegetarian options
to the database.
"""
rs = re.findall("add me for ([0-9]{1,2}), ?([0-9]{4}) (vegetarian|any)", command, re.IGNORECASE)
rsm = re.findall("add me next (vegetarian|any)", command, re.IGNORECASE)
if(len(rs) == 1 or len(rsm) == 1):
try:
if len(rs) == 1:
month = int(rs[0][0])
year = int(rs[0][1])
elif len(rsm) == 1:
month, year = self.get_next_meet()
rs = rsm
month_str = datetime.datetime(year, month, 1).strftime("%B")
vegetarian = None
if("VEG" in rs[0][2].upper()):
vegetarian = False
resp_veg = "vegetarian"
vegetarian = True
else:
vegetarian = True
resp_veg = "non-vegetarian"
vegetarian = False
result = self.db.confirm_user(user, month, year, vegetarian)
if result:
return(":pizza::pizza::pizza:Thank you <@{}>, I will add you to the pizza numbers for the month {} for the year {} as a {} option:pizza::pizza::pizza:".format(user, month_str, year, resp_veg))
else:
return(":pizza::pizza::pizza:Sorry, <@{}> it looks like you've already been added for that month.:pizza::pizza::pizza:".format(user))
except:
return("Sorry, I tried to add you with that command, but I couldn't quite understand it. Please try again.")
def remove_user(self, command, channel, user):
"""
Another main function of the bot. We use this command for removing user numbers and their preferred vegetarian options
from the database.
"""
rs = re.findall("remove me for ([0-9]{1,2}), ?([0-9]{4})", command, re.IGNORECASE)
rsm = re.findall("remove me next", command, re.IGNORECASE)
if(len(rs) == 1 or len(rsm) == 1):
try:
if len(rs) == 1:
month = int(rs[0][0])
year = int(rs[0][1])
elif len(rsm) == 1:
month, year = self.get_next_meet()
rs = rsm
month_str = datetime.datetime(year, month, 1).strftime("%B")
self.db.remove_confirm_user(user, month, year)
return(":pizza::pizza::pizza:Thank you <@{}>, I will remove you to the pizza numbers for the month {} for the year {}:pizza::pizza::pizza:".format(user, month_str, year))
except:
return("Sorry, I tried to remove you with that command, but I couldn't quite understand it. Please try again.")
def get_summary(self):
result = self.db.get_summary()
response = ""
for meetup_id, meetup in result.items():
total_pizza_count = meetup['other'] + meetup['veg']
response += "*Summary*\nMeetup Date: `{}/{}/{}`\nTotal Pizza Count: `{}`\nNon-Vegetarian: `{}`\nVegetarian: `{}`\n\n".format(meetup['day'], meetup['month'], meetup['year'], total_pizza_count, meetup['other'], meetup['veg'])
return response
def get_help(self):
return "You can send me the following commands:\n\
To get added to the next meetup's pizza count do: `add me next [any|vegetarian]`\n\
To get added to a future meetup's pizza count do: `add me for [month],[year]`\n\
To get removed from the next meetup's pizza count do: `remove me next`\n\
To be removed from a future meetup's pizza count do: `remove me [month],[year]`"
def handle_command(self, command, channel, user):
"""
Executes bot command if the command is known
"""
print("Received command: {}".format(command))
# Default response is help text for the user
default_response = "Not sure what you mean. Try `{}`".format("help")
# Finds and executes the given command, filling in response
response = None
print("Command received: {}".format(command))
if command.startswith("add me for") or command.startswith("add me next"):
response = self.add_user(command, channel, user)
if command.startswith("remove me for") or command.startswith("remove me next"):
response = self.remove_user(command, channel, user)
if command.startswith("summary"):
response = self.get_summary()
if command.startswith("help"):
response = self.get_help()
# Sends the response back to the channel
# That only requested user can see
self.bot.api_call(
"chat.postEphemeral",
channel=channel,
user=user,
text=response or default_response,
as_user=True,
)
def start(self):
"""
self.bot.api_call(
"chat.postMessage",
channel="general",
text="I'm alive!",
as_user=True
)
"""
while True:
command, channel, user = self.parse_bot_commands(self.bot.rtm_read())
if command:
self.handle_command(command, channel, user)
time.sleep(self.rtm_delay)
if __name__ == "__main__":
bot = YegsecBot("config.json")
| 11,458 | 3,295 |
from django.conf import settings
__all__ = ['track']
if 'workon.contrib.tracker' not in settings.INSTALLED_APPS:
def track(*args, **kwargs):
raise Exception('workon.contrib.tracker missing from settings.INSTALLED_APPS')
else:
import time
import datetime
from operator import itemgetter
def track(*fields, save=True):
"""
Tracks property changes on a model instance.
The changed list of properties is refreshed on model initialization
and save.
>>> @track('name')
>>> class Post(models.Model):
>>> name = models.CharField(...)
>>>
>>> post.name = "new name"
>>> post.save()
>>> post.track_changes()
>>>
>>> post.get_tracked_events()
"""
UNSAVED = dict()
from django.db.models.signals import post_init, post_save, m2m_changed
from django.db.models import Model, ManyToManyField
from django.utils import timezone
from django.contrib.contenttypes.fields import GenericRelation
from workon.contrib.tracker.models import TrackEvent
def _store(self):
"Updates a local copy of attributes values"
if self.id:
self.__initial_data = dict((f, getattr(self, f)) for f in fields)
else:
self.__initial_data = UNSAVED
def inner(cls):
def get_tracked_events(self, group_by=None, **kwargs):
# content_type = ContentType.objects.get_for_model(self)
# queryset = TrackEvent.objects.filter(content_type__pk=content_type.id, object_id=self.pk).filter(**kwargs)
queryset = self.generic_tracked_events.filter(**kwargs)
if group_by == 'timestamp':
events = {}
for event in queryset.order_by('-tracked_at'):
events.setdefault(event.timestamp, []).append(event)
return events
else:
return queryset.order_by('-tracked_at')
cls.get_tracked_events = get_tracked_events
cls.add_to_class('generic_tracked_events', GenericRelation(
TrackEvent,
content_type_field='object_content_type',
object_id_field='object_id',
))
cls.__initial_data = {}
def track_changes(self, user=None):
ts = time.time()
changes = self.get_tracked_changes()
if changes:
if save:
for field, change in changes.items():
change.user = user
change.save()
_store(self)
return changes
cls.track_changes = track_changes
def get_tracked_changes(self):
return getattr(self, '__tracked_changes', dict())
cls.get_tracked_changes = get_tracked_changes
def _post_init(sender, instance, **kwargs):
_store(instance)
post_init.connect(_post_init, sender=cls, weak=False)
def _post_save(sender, instance, **kwargs):
ts = time.time()
changes = instance.get_tracked_changes()
for field_name, old_value in getattr(instance, '__initial_data', dict()).items():
if old_value != getattr(instance, field_name):
changes[field_name] = TrackEvent(
object=instance,
field_name=field_name,
action='field_post_save',
old_value=old_value,
new_value=getattr(instance, field_name)
)
setattr(instance, '__tracked_changes', changes)
post_save.connect(_post_save, sender=cls, weak=False)
for f in fields:
if isinstance(cls._meta.get_field(f), ManyToManyField):
def get_m2m_changed(field_name):
def _m2m_changed(sender, instance, action, **kwargs):
ts = time.time()
changes = instance.get_tracked_changes()
if action.startswith('post_'):
changes[field_name] = TrackEvent(
object=instance,
field_name=field_name,
action=f'm2m_{action}',
m2m_pk_set=list(kwargs.get('pk_set')),
m2m_model=kwargs.get('model')
)
setattr(instance, '__tracked_changes', changes)
return _m2m_changed
m2m_changed.connect(get_m2m_changed(f), sender=getattr(cls, f).through, weak=False)
return cls
return inner | 5,072 | 1,301 |