content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import discord
from discord.ext import commands
import traceback
import datetime
import asyncio
import random
from datetime import datetime
from storage import *
pat_gifs = [
"https://cdn.discordapp.com/attachments/670153232039018516/674299983117156362/1edd1db645f55aa7f2923838b5afabfc863fc109_hq.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674299989152890881/7MPC.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674299989559738378/2e27d5d124bc2a62ddeb5dc9e7a73dd8.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674299990386016257/48f70b7f0f0858254d0e50d68ef4bc4f443b74a7_hq.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674299995922628628/anime-head-pat-gif.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674299997248028712/a.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300008031322114/e3e2588fbae9422f2bd4813c324b1298.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300013492437014/giphy_1.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300014427766801/FlimsyDeafeningGrassspider-small.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300013509214228/giphy.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300026150977563/tenor_1.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300032303759360/tenor.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300033440415754/unnamed.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300032366804992/giphy_2.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300037924126743/tumblr_n9g05o77tU1ttu8odo1_500.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300047004925952/c0c1c5d15f8ad65a9f0aaf6c91a3811e.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300051438305368/giphy_3.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300056601362454/tenor_2.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300062024597514/B7g8Vh.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300069696241684/source_1.gif",
"https://cdn.discordapp.com/attachments/670153232039018516/674300074557177892/source.gif"
]
@bot.command(aliases=["pet"])
async def pat(ctx, user: discord.Member):
embed = discord.Embed(description="**{.message.author.display_name}** pats **{.display_name}**. <a:pat:691589024774750228>".format(ctx, user), color=0xFFFFFF, timestamp=datetime.utcnow())
embed.set_image(url=random.choice(pat_gifs))
embed.set_footer(text="© MommyBot by Shiki.", icon_url=bot.user.avatar_url)
await ctx.send(embed=embed)
@pat.error
async def pat_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
embed = discord.Embed(description="**babi** pats **{.message.author.display_name}**. <a:pat:691589024774750228>".format(ctx), color=0xFFFFFF, timestamp=datetime.utcnow())
embed.set_image(url=random.choice(pat_gifs))
embed.set_footer(text="© MommyBot by Shiki.", icon_url=bot.user.avatar_url)
await ctx.send(embed=embed)
elif isinstance(error, commands.BadArgument):
embed = discord.Embed(description="**babi** pats **{.message.author.display_name}**. <a:pat:691589024774750228>".format(ctx), color=0xFFFFFF, timestamp=datetime.utcnow())
embed.set_image(url=random.choice(pat_gifs))
embed.set_footer(text="© MommyBot by Shiki.", icon_url=bot.user.avatar_url)
await ctx.send(f"**{ctx.message.author.display_name}** member not found, I patted you instead", embed=embed)
else:
print('Ignoring exception in command av:', file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
embed = discord.Embed(description="{}".format(error), color=0x000000)
embed.set_footer(text="© MommyBot by Shiki.", icon_url=bot.user.avatar_url)
await ctx.send("An error has occured. Detailed information below:", embed=embed)
|
nilq/baby-python
|
python
|
import Tkinter as tk
import warnings
VAR_TYPES = {
int: tk.IntVar,
float: tk.DoubleVar,
str: tk.StringVar
}
class ParameterController(tk.Frame):
def __init__(self,parent, key, value):
tk.Frame.__init__(self, parent)
self.value_type = type(value)
self._var = VAR_TYPES[self.value_type]()
self._var.set(value)
self._label = tk.Label(self,text=key,justify=tk.LEFT,width=20)
self._label.pack(side=tk.LEFT,padx=5,anchor="e",fill=tk.BOTH)
validator = self.register(self.validator)
self._entry = tk.Entry(self,textvariable=self._var, validate='all',
validatecommand=(validator, '%P', '%s'))
self._entry.pack(side=tk.RIGHT,expand=1)
def set_bg(self,colour):
try:
self._entry.config(bg=colour)
except:
pass
def validator(self,value,last_value):
if not value.strip() and not self.value_type == str:
self.set_bg('red')
self.bell()
return True
else:
try:
self.value_type(value)
except Exception as error:
return False
else:
self.set_bg('white')
return True
def get(self):
return self._var.get()
def set(self,value):
if self.validator(value):
self._var.set(self.value_type(value))
class DictController(tk.Frame):
def __init__(self, parent, dict_):
tk.Frame.__init__(self, parent)
self._dict = {}
self.update(dict_)
def update(self,new_dict):
self._dict.update(new_dict)
for key,val in sorted(self._dict.items()):
controller = ParameterController(self,key,val)
controller.pack()
self._dict[key] = controller
def __getitem__(self,key):
return self._dict[key].get()
def __setitem__(self,key,value):
self._dict[key].set(value)
def as_dict(self):
output = {}
for key,val in self._dict.items():
try:
output[key] = val.get()
except ValueError:
raise ValueError("Invalid value for key '%s'"%key)
return output
if __name__ == "__main__":
test_dict = {
"Test1":"node name",
"Test2":90,
"Test3":123.
}
root = tk.Tk()
c = DictController(root,test_dict)
c.pack()
def print_vals():
for key in test_dict:
try:
print c.as_dict()
except ValueError as error:
warnings.warn(repr(error))
root.after(1000,print_vals)
root.after(4000,print_vals)
root.mainloop()
|
nilq/baby-python
|
python
|
import factory
import factory.fuzzy
from user.models import User
from company.tests.factories import CompanyFactory
class UserFactory(factory.django.DjangoModelFactory):
sso_id = factory.Iterator(range(99999999))
name = factory.fuzzy.FuzzyText(length=12)
company_email = factory.LazyAttribute(
lambda supplier: '%s@example.com' % supplier.name)
company = factory.SubFactory(CompanyFactory)
is_company_owner = True
class Meta:
model = User
|
nilq/baby-python
|
python
|
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
# Using dfs to record all possible
def dfs(nums, path=None, res=[]):
if path is None:
path = []
if len(path) == k:
res += [path]
return res
for idx, num in enumerate(nums):
dfs(nums[idx+1:], path+[num], res)
return res
res = dfs(range(1, n+1))
return res
|
nilq/baby-python
|
python
|
from typing import Optional
import requests
from libgravatar import Gravatar
from bs4 import BeautifulSoup
def get_gravatar_image(email) -> Optional[str]:
"""Only will return a url if the user exists and is correct on gravatar, otherwise None"""
g = Gravatar(email)
profile_url = g.get_profile()
res = requests.get(profile_url)
if res.status_code == 200:
return g.get_image()
return None
def get_github_repositories(github_username):
"""Only will return a url if the user exists and will return the number of repositories,
even if there are none will return 0"""
url = f'https://github.com/{github_username}'
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.content, 'html.parser')
css_selector = 'div.UnderlineNav > nav > a:nth-child(2) > span'
try:
repositories_info = soup.select_one(css_selector)
return int(repositories_info.text)
except AttributeError:
pass
|
nilq/baby-python
|
python
|
patterns = ['you cannot perform this operation as root']
def match(command):
if command.script_parts and command.script_parts[0] != 'sudo':
return False
for pattern in patterns:
if pattern in command.output.lower():
return True
return False
def get_new_command(command):
return ' '.join(command.script_parts[1:])
|
nilq/baby-python
|
python
|
import json
import os
from py2neo import Graph
class GraphInstanceFactory:
def __init__(self, config_file_path):
"""
init the graph factory by a config path.
the config json file format example:
[
{
"server_name": "LocalHostServer",
"server_id": 1,
"host": "localhost",
"user": "neo4j",
"password": "123456",
"http_port": 7474,
"https_port": 7473,
"bolt_port": 7687
},
...
]
:param config_file_path: the config file path
"""
if not os.path.exists(config_file_path):
raise IOError("Neo4j config file not exist")
if not os.path.isfile(config_file_path):
raise IOError("Neo4j config path is not file")
if not config_file_path.endswith(".json"):
raise IOError("Neo4j config file is not json")
self.config_file_path = config_file_path
with open(self.config_file_path, 'r') as f:
self.configs = json.load(f)
## todo add more json format check,raise exception when same name or same id config
def create_py2neo_graph_by_server_name(self, server_name):
"""
:param server_name: the server name in config file, can be used to find a unique neo4j graph instance location
:return: the Graph object in py2neo, None if create fail
"""
for config in self.configs:
if config["server_name"] == server_name:
return self.__create_py2neo_graph_by_config(config)
return None
def create_py2neo_graph_by_server_id(self, server_id):
"""
:param server_id: the server id in config file, can be used to find a unique neo4j graph instance location
:return: the Graph object in py2neo, None if create fail
"""
for config in self.configs:
if config["server_id"] == server_id:
return self.__create_py2neo_graph_by_config(config)
return None
def get_configs(self):
"""
get the config server list
:return: a list of config
"""
return self.configs
def get_config_file_path(self):
"""
get the config file path
:return: a string for config file path
"""
return self.config_file_path
def __create_py2neo_graph_by_config(self, config):
try:
return Graph(host=config['host'],
port=config['bolt_port'],
scheme="bolt",
user=config['user'],
password=config['password'])
except BaseException:
return Graph('bolt' + ':' + '//' + config['host'] + ':' + str(config['bolt_port']),
auth=(config['user'], config['password']))
|
nilq/baby-python
|
python
|
from datetime import datetime
class mentions_self:
nom = 'я'; gen = ['меня', 'себя']; dat = ['мне', 'себе']
acc = ['меня', 'себя']; ins = ['мной', 'собой']; abl = ['мне','себе']
class mentions_unknown:
all = 'всех'
him = 'его'; her = 'её'; it = 'это'
they = 'их'; them = 'их'; us = 'нас'
name_cases = ['nom', 'gen', 'dat', 'acc', 'ins', 'abl']
everyone = ['@everyone', '@all', '@все']
def getDate(time = datetime.now()) -> str:
return f'{"%02d" % time.day}.{"%02d" % time.month}.{time.year}'
def getTime(time = datetime.now()) -> str:
return f'{"%02d" % time.hour}:{"%02d" % time.minute}:{"%02d" % time.second}.{time.microsecond}'
def getDateTime(time = datetime.now()) -> str:
return getDate(time) + ' ' + getTime(time)
def ischecktype(checklist, checktype) -> bool:
for i in checklist:
if isinstance(checktype, list) and type(i) in checktype:
return True
elif isinstance(checktype, type) and isinstance(i, checktype):
return True
return False
|
nilq/baby-python
|
python
|
from flask import (
Blueprint,
render_template,
)
from sqlalchemy import desc, func, or_, text
from .. import db
from ..models import (
Video,
Vote,
GamePeriod,
Reward,
)
game = Blueprint(
'game',
__name__,
template_folder='templates'
)
@game.route('/')
def index():
q = """
SELECT *, rewards/videos AS rpv
FROM top_creators_30_days
ORDER BY rewards DESC
LIMIT :limit;
"""
rs = db.session.execute(q, {
"limit": 10,
})
leaderboard = [dict(zip(rs.keys(), item)) for item in rs.fetchall()]
return render_template(
'index.html',
leaderboard=leaderboard
)
@game.route('/periods')
def list_periods():
periods = \
(db.session.query(GamePeriod)
.order_by(desc(GamePeriod.end))
.limit(1000)
.all())
return render_template(
'periods.html',
periods=periods,
)
@game.route('/rewards')
def list_rewards():
rewards = \
(db.session.query(Reward)
.filter_by(creator_payable=True)
.order_by(desc(Reward.period_id))
.limit(1000)
.all())
return render_template(
'rewards.html',
rewards=rewards,
)
@game.route('/period/<int:period_id>')
def period_rewards(period_id):
period = db.session.query(GamePeriod).filter_by(id=period_id).one()
rewards_summary = \
(db.session.query(
Reward.video_id,
func.count(Reward.id),
func.sum(Reward.creator_reward).label('creator_rewards'),
func.sum(Reward.voter_reward))
.filter_by(period_id=period_id)
.group_by(Reward.video_id)
.order_by(text("creator_rewards desc"))
.all())
rewards = \
(db.session.query(Reward, Vote)
.filter_by(period_id=period_id)
.from_self()
.join(Vote, Vote.id == Reward.vote_id)
.order_by(desc(Reward.creator_reward))
.all())
return render_template(
'period_rewards.html',
period=period,
rewards=rewards,
rewards_summary=rewards_summary,
)
@game.route('/payment/<string:txid>')
def explain_payment(txid):
rewards = \
(db.session.query(Reward)
.filter(or_(Reward.creator_txid == txid, Reward.voter_txid == txid))
.order_by(desc(Reward.period_id))
.all())
return render_template(
'payment.html',
txid=txid,
rewards=rewards,
)
@game.route('/votes/<string:video_id>')
def video_votes(video_id: str):
video = db.session.query(Video).filter_by(id=video_id).one()
votes = \
(db.session.query(Vote)
.filter_by(video_id=video_id)
.order_by(desc(Vote.token_amount))
.all())
rewards = \
(db.session.query(Reward, Vote)
.filter_by(video_id=video_id)
.join(Vote)
.order_by(desc(Reward.creator_reward))
.all())
period = None
summary = None
if rewards:
period_id = rewards[0][0].period_id
period = db.session.query(GamePeriod).filter_by(id=period_id).one()
summary = \
(db.session.query(
func.count(Reward.id).label('rewards_count'),
func.sum(Reward.creator_reward).label('creator_rewards'),
func.sum(Reward.voter_reward).label('voter_rewards'))
.filter_by(video_id=video_id, creator_payable=True)
.one())
return render_template(
'video_votes.html',
video=video,
votes=votes,
rewards=rewards,
period=period,
summary=summary,
)
@game.route('/voter/<string:eth_address>')
def voter_activity(eth_address: str):
votes = \
(db.session.query(Vote)
.filter_by(eth_address=eth_address)
.order_by(desc(Vote.created_at))
.limit(100)
.all())
return render_template(
'voter.html',
eth_address=eth_address,
votes=votes,
)
|
nilq/baby-python
|
python
|
import torch
import torch.nn.utils.rnn as rnn
import numpy as np
import pandas
from torch.utils.data import Dataset
from sklearn.preprocessing import LabelEncoder
from parsers.spacy_wrapper import spacy_whitespace_parser as spacy_ws
from common.symbols import SPACY_POS_TAGS
import json
import transformers
from transformers import BertForTokenClassification, BertConfig, BertTokenizer
class OpenIE_CONLL_Dataset(Dataset):
def __init__(self, file_path, emb, sep='\t', sent_maxlen=300, label_map=None):
'''
data is a list of triples (according to data keys)
label is a list of int
'''
self.file_path = file_path
self.sep = sep
self.emb = emb
self.sent_maxlen = sent_maxlen
self.label_map = label_map
if label_map is None:
self.label_map = LabelEncoder()
self.classes = set()
self.data = []
self.labels = []
self.data_keys = ["word_inputs", "predicate_inputs", "postags_inputs"]
self.build()
def __getitem__(self, i):
x = []
for key in self.data_keys:
datum = self.data[key][i]
x.append(datum)
return x, self.labels[i]
def __len__(self):
return len(self.labels)
def collate(self, data):
x = [[],[],[]]
y = []
for i in data:
for j in range(len(i[0])):
x[j].append(torch.LongTensor(i[0][j]))
y.append(torch.LongTensor(i[1]))
return x, y
def build(self):
"""
Load a supervised OIE dataset from file
"""
df = pandas.read_csv(self.file_path,
sep = self.sep,
header = 0,
keep_default_na = False)
self.label_map.fit(df.label.values)
# Split according to sentences and encode
sents = self.get_sents_from_df(df)
self.data = self.encode_inputs(sents)
self.labels = self.encode_outputs(sents)
def get_sents_from_df(self, df):
"""
Split a data frame by rows accroding to the sentences
"""
return [df[df.run_id == run_id]
for run_id
in sorted(set(df.run_id.values))]
def encode_inputs(self, sents):
"""
Given a dataframe which is already split to sentences,
encode inputs for rnn classification.
Should return a dictionary of sequences of sample of length maxlen.
"""
word_inputs = []
pred_inputs = []
pos_inputs = []
# Preproc to get all preds per run_id
# Sanity check - make sure that all sents agree on run_id
assert(all([len(set(sent.run_id.values)) == 1
for sent in sents]))
run_id_to_pred = dict([(int(sent.run_id.values[0]),
self.get_head_pred_word(sent))
for sent in sents])
# Construct a mapping from running word index to pos
word_id_to_pos = {}
for sent in sents:
indices = sent.index.values
words = sent.word.values
for index, word in zip(indices,
spacy_ws(" ".join(words))):
word_id_to_pos[index] = word.tag_
fixed_size_sents = sents # removed
for sent in fixed_size_sents:
assert(len(set(sent.run_id.values)) == 1)
word_indices = sent.index.values
sent_words = sent.word.values
sent_str = " ".join(sent_words)
pos_tags_encodings = [(SPACY_POS_TAGS.index(word_id_to_pos[word_ind]) \
if word_id_to_pos[word_ind] in SPACY_POS_TAGS \
else 0)
for word_ind
in word_indices]
for hh in pos_tags_encodings:
if hh > 55:
print(pos_tags_encodings)
word_encodings = [self.emb.get_word_index(w)
for w in sent_words]
# Same pred word encodings for all words in the sentence
pred_word = run_id_to_pred[int(sent.run_id.values[0])]
pred_word_encodings = [self.emb.get_word_index(pred_word)
for _ in sent_words]
word_inputs.append(word_encodings)
pred_inputs.append(pred_word_encodings)
pos_inputs.append(pos_tags_encodings)
# Pad / truncate to desired maximum length
# NOTE: removed pad in reimplementation
ret = {}
for name, sequence in zip(["word_inputs", "predicate_inputs", "postags_inputs"],
[word_inputs, pred_inputs, pos_inputs]):
ret[name] = []
for samples in truncate_sequences(sequence,
maxlen = self.sent_maxlen):
ret[name].append(samples)
return {k: np.array(v) for k, v in ret.items()}
def encode_outputs(self, sents):
"""
Given a dataframe split to sentences, encode outputs for rnn classification.
Should return a list sequence of sample of length maxlen.
"""
output_encodings = []
# Encode outputs
for sent in sents:
output_encodings.append(list(self.transform_labels(sent.label.values)))
return truncate_sequences(output_encodings, maxlen=self.sent_maxlen)
def transform_labels(self, labels):
"""
Encode a list of textual labels
"""
# Fallback:
return self.label_map.transform(labels)
def num_of_classes(self):
if self.label_map is not None:
return len(self.label_map.classes_)
else:
print("encoder not instantiated for num of classes")
return 0
def get_head_pred_word(self, full_sent):
"""
Get the head predicate word from a full sentence conll.
"""
assert(len(set(full_sent.head_pred_id.values)) == 1) # Sanity check
pred_ind = full_sent.head_pred_id.values[0]
return full_sent.word.values[pred_ind] \
if pred_ind != -1 \
else full_sent.pred.values[0].split(" ")[0]
class OIE_BERT_Dataset(Dataset):
def __init__(self, file_path, sep='\t', sent_maxlen=300, label_map=None, bert_model='bert-base-uncased'):
'''
data is a list of triples (according to data keys)
label is a list of int
'''
self.file_path = file_path
self.sep = sep
self.sent_maxlen = sent_maxlen
self.label_map = label_map
self.bert_model = bert_model
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model)
if label_map is None:
self.label_map = LabelEncoder()
self.classes = set()
self.data = []
self.labels = []
self.data_keys = ["word_inputs", "predicate_inputs", "postags_inputs"]
self.build()
def __getitem__(self, i):
x = {}
for key in self.data.keys():
x[key] = self.data[key][i]
return x, self.labels[i]
def __len__(self):
return len(self.labels)
def collate(self, data):
x = {}
y = []
batch_max_len = 0
for i in data:
for key in self.data.keys():
x[key] = x.get(key, [])
if key == 'word_inputs':
x[key].append(i[0][key])
batch_max_len = max(batch_max_len, len(i[0][key]))
else:
x[key].append(torch.LongTensor(i[0][key]))
y.append(torch.LongTensor(i[1]))
x['predicate_inputs'] = torch.LongTensor(x['predicate_inputs'])
bert_in = self.tokenizer.batch_encode_plus(x['word_inputs'],
return_tensors='pt', pad_to_max_length=True,
max_length=batch_max_len, return_lengths=True,
add_special_tokens = False)
x['bert_inputs'] = bert_in
return x, y
def build(self):
"""
Load a supervised OIE dataset from file
"""
df = pandas.read_csv(self.file_path,
sep = self.sep,
header = 0,
keep_default_na = False)
self.label_map.fit(df.label.values)
# Split according to sentences and encode
sents = self.get_sents_from_df(df)
data, labels = self.encode_data(sents)
self.data = data
self.labels = labels
def get_sents_from_df(self, df):
"""
Split a data frame by rows accroding to the sentences
"""
return [df[df.run_id == run_id]
for run_id in sorted(set(df.run_id.values))]
def encode_data(self, sents):
"""
Given a dataframe which is already split to sentences,
Should return a tuple of (sequences of sample of length maxlen, sequencecs of labels).
"""
word_inputs = []
pred_inputs = []
pos_inputs = []
output_encodings = []
# Preproc to get all preds per run_id
# Sanity check - make sure that all sents agree on run_id
assert(all([len(set(sent.run_id.values)) == 1
for sent in sents]))
run_id_to_pred = dict([(int(sent.run_id.values[0]),
self.get_head_pred_id(sent))
for sent in sents])
# Construct a mapping from running word index to pos
word_id_to_pos = {}
for sent in sents:
indices = sent.index.values
words = sent.word.values
for index, word in zip(indices, spacy_ws(" ".join(words))):
word_id_to_pos[index] = word.tag_
for sent in sents:
assert(len(set(sent.run_id.values)) == 1)
word_indices = sent.index.values
sent_words = sent.word.values
pos_tags_encodings = [(SPACY_POS_TAGS.index(word_id_to_pos[word_ind]) \
if word_id_to_pos[word_ind] in SPACY_POS_TAGS \
else 0)
for word_ind in word_indices]
# Same pred word encodings for all words in the sentence
word_encodings = sent_words.tolist()
pred_id = run_id_to_pred[int(sent.run_id.values[0])]
pred_word_encodings = [pred_id]
if pred_id != -1:
word_inputs.append(word_encodings)
pred_inputs.append(pred_word_encodings)
pos_inputs.append(pos_tags_encodings)
output_encodings.append(list(self.transform_labels(sent.label.values)))
x = {}
for name, sequence in zip(self.data_keys,
[word_inputs, pred_inputs, pos_inputs]):
x[name] = []
for samples in truncate_sequences(sequence, maxlen = self.sent_maxlen):
x[name].append(samples)
y = truncate_sequences(output_encodings, maxlen=self.sent_maxlen)
return x, y
def transform_labels(self, labels):
"""
Encode a list of textual labels
"""
# Fallback:
return self.label_map.transform(labels)
def num_of_classes(self):
if self.label_map is not None:
return len(self.label_map.classes_)
else:
print("encoder not instantiated for num of classes")
return 0
def get_head_pred_word(self, full_sent):
"""
Get the head predicate word from a full sentence conll.
"""
assert(len(set(full_sent.head_pred_id.values)) == 1) # Sanity check
pred_ind = full_sent.head_pred_id.values[0]
return full_sent.word.values[pred_ind] \
if pred_ind != -1 \
else full_sent.pred.values[0].split(" ")[0]
def get_head_pred_id(self, full_sent):
# only get the id
assert(len(set(full_sent.head_pred_id.values)) == 1) # Sanity check
pred_ind = full_sent.head_pred_id.values[0]
if pred_ind == -1:
pred_word = full_sent.pred.values[0].split(" ")[0]
words = full_sent.word.values.tolist()
if pred_word in words:
pred_ind = words.index(pred_word) # might not capture the second or later occurrence
else:
pred_ind = -1 # will be filtered out
return pred_ind
def truncate_sequences(sequences, maxlen=None):
ret = []
if maxlen is not None:
for seq in sequences:
truc_seq = seq[:maxlen]
ret.append(truc_seq)
return ret
|
nilq/baby-python
|
python
|
# coding=utf-8
from selenium.webdriver.common.by import By
from view_models import certification_services, sidebar, ss_system_parameters
import re
import time
def test_ca_cs_details_view_cert(case, profile_class=None):
'''
:param case: MainController object
:param profile_class: string The fully qualified name of the Java class
:return:
'''
self = case
def view_cert():
'''Open "Certification services"'''
self.wait_until_visible(self.by_css(sidebar.CERTIFICATION_SERVICES_CSS)).click()
self.wait_jquery()
view_cert_data(self, profile_class=profile_class)
return view_cert
def view_cert_data(self, profile_class=None):
'''Get approved CA row'''
service_row = self.wait_until_visible(type=By.XPATH, element=certification_services.LAST_ADDED_CERT_XPATH)
'''Double click on approved CA row'''
self.double_click(service_row)
'''Click on "Edit button"'''
self.by_id(certification_services.DETAILS_BTN_ID).click()
self.log('UC TRUST_04 1.CS administrator selects to view the settings of a certification service.')
self.wait_until_visible(type=By.XPATH, element=certification_services.CA_SETTINGS_TAB_XPATH).click()
self.wait_jquery()
self.log(
'UC TRUST_04: 2.System displays the following settings. Usage restrictions for the certificates issued by the certification service.')
auth_checkbox = self.wait_until_visible(certification_services.EDIT_CA_AUTH_ONLY_CHECKBOX_XPATH,
By.XPATH).is_enabled()
self.is_true(auth_checkbox, msg='Authentication chechkbox not found')
'''Click on authentication checkbox'''
self.wait_until_visible(certification_services.EDIT_CA_AUTH_ONLY_CHECKBOX_XPATH, By.XPATH).click()
self.log(
'UC TRUST_04: 2.System displays the following settings. The fully qualified name of the Java class that describes the certificate profile for certificates issued by the certification service.')
'''Get profile info'''
profile_info_area = self.wait_until_visible(type=By.XPATH,
element=certification_services.EDIT_CERTIFICATE_PROFILE_INFO_AREA_XPATH)
profile_info = profile_info_area.get_attribute("value")
'''Verify profile info'''
self.is_equal(profile_info, profile_class,
msg='The name of the Java class that describes the certificate profile is wrong')
self.log(
'UC TRUST_04: 2. The following user action options are displayed:edit the settings of the certification service')
'''Verify "Save" button'''
save_button_id = self.wait_until_visible(type=By.ID,
element=certification_services.SAVE_CA_SETTINGS_BTN_ID).is_enabled()
self.is_true(save_button_id, msg='"Save" button not found')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: Thursday, January 31st 2019, 1:15:46 pm
from pathlib import Path
import argparse
import ibllib.io.params as params
import oneibl.params
from alf.one_iblrig import create
from poop_count import main as poop
IBLRIG_DATA = Path().cwd().parent.parent.parent.parent / 'iblrig_data' / 'Subjects' # noqa
def main():
pfile = Path(params.getfile('one_params'))
if not pfile.exists():
oneibl.params.setup_alyx_params()
create(IBLRIG_DATA, dry=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create session in Alyx')
parser.add_argument(
'--patch', help='Ask for a poop count before registering',
required=False, default=True, type=bool)
args = parser.parse_args()
if args.patch:
poop()
main()
else:
main()
print('done')
|
nilq/baby-python
|
python
|
"""
Flask-Limiter extension for rate limiting
"""
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from .errors import ConfigurationError, RateLimitExceeded
from .extension import Limiter, HEADERS
|
nilq/baby-python
|
python
|
from foldrm import Classifier
import numpy as np
def acute():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6']
nums = ['a1']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/acute/acute.csv')
print('\n% acute dataset', np.shape(data))
return model, data
def exercise():
attrs = ["age","gender","height_cm","weight_kg","body fat_%","diastolic","systolic","gripForce","sit and bend forward_cm","sit-ups counts","broad jump_cm"]
nums = ["age","height_cm","weight_kg","body fat_%","diastolic","systolic","gripForce","sit and bend forward_cm","sit-ups counts","broad jump_cm"]
model = Classifier(attrs=attrs, numeric=nums, label='class')
data = model.load_data('data/exercise/exercise.csv')
print('\n% exercise dataset', np.shape(data))
return model, data
def data_science():
attrs = ["HOURS_DATASCIENCE","HOURS_BACKEND","HOURS_FRONTEND","NUM_COURSES_BEGINNER_DATASCIENCE","NUM_COURSES_BEGINNER_BACKEND","NUM_COURSES_BEGINNER_FRONTEND","NUM_COURSES_ADVANCED_DATASCIENCE","NUM_COURSES_ADVANCED_BACKEND","NUM_COURSES_ADVANCED_FRONTEND","AVG_SCORE_DATASCIENCE","AVG_SCORE_BACKEND","AVG_SCORE_FRONTEND"]
nums = ["HOURS_DATASCIENCE","HOURS_BACKEND","HOURS_FRONTEND","NUM_COURSES_BEGINNER_DATASCIENCE","NUM_COURSES_BEGINNER_BACKEND","NUM_COURSES_BEGINNER_FRONTEND","NUM_COURSES_ADVANCED_DATASCIENCE","NUM_COURSES_ADVANCED_BACKEND","NUM_COURSES_ADVANCED_FRONTEND","AVG_SCORE_DATASCIENCE","AVG_SCORE_BACKEND","AVG_SCORE_FRONTEND"]
model = Classifier(attrs=attrs, numeric=nums, label='PROFILE')
data = model.load_data('data/data_science/data_science.csv')
print('\n% data_science dataset', np.shape(data))
return model, data
def air():
attrs = ["year","month","day","hour","PM2.5","PM10","SO2","NO2","CO","O3","TEMP","PRES","DEWP","RAIN","wd","WSPM"]
nums = ["year","month","day","hour","PM2.5","PM10","SO2","NO2","CO","O3","TEMP","PRES","DEWP","RAIN","WSPM"]
model = Classifier(attrs=attrs, numeric=nums, label='station')
data = model.load_data('data/air/air3.csv')
print('\n% air dataset', np.shape(data))
return model, data
def adult():
attrs = ['age','workclass','fnlwgt','education','education_num','marital_status','occupation','relationship',
'race','sex','capital_gain','capital_loss','hours_per_week','native_country']
nums = ['age','fnlwgt','education_num','capital_gain','capital_loss','hours_per_week']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/adult/adult.csv')
print('\n% adult dataset', np.shape(data))
return model, data
def autism():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'age', 'gender', 'ethnicity', 'jaundice',
'pdd', 'used_app_before', 'relation']
nums = ['age']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/autism/autism.csv')
print('\n% autism dataset', np.shape(data))
return model, data
def breastw():
attrs = ['clump_thickness', 'cell_size_uniformity', 'cell_shape_uniformity', 'marginal_adhesion',
'single_epi_cell_size', 'bare_nuclei', 'bland_chromatin', 'normal_nucleoli', 'mitoses']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/breastw/breastw.csv')
print('\n% breastw dataset', np.shape(data))
return model, data
def cars():
attrs = ['buying', 'maint', 'doors', 'persons', 'lugboot', 'safety']
model = Classifier(attrs=attrs, numeric=[], label='label')
data = model.load_data('data/cars/cars.csv')
print('\n% cars dataset', np.shape(data))
return model, data
def credit():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15']
nums = ['a2', 'a3', 'a8', 'a11', 'a14', 'a15']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/credit/credit.csv')
print('\n% credit dataset', np.shape(data))
return model, data
def heart():
attrs = ['age', 'sex', 'chest_pain', 'blood_pressure', 'serum_cholestoral', 'fasting_blood_sugar',
'resting_electrocardiographic_results', 'maximum_heart_rate_achieved', 'exercise_induced_angina', 'oldpeak',
'slope', 'major_vessels', 'thal']
nums = ['age', 'blood_pressure', 'serum_cholestoral', 'maximum_heart_rate_achieved', 'oldpeak']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/heart/heart.csv')
print('\n% heart dataset', np.shape(data))
return model, data
def kidney():
attrs = ['age', 'bp', 'sg', 'al', 'su', 'rbc', 'pc', 'pcc', 'ba', 'bgr', 'bu', 'sc', 'sod', 'pot', 'hemo', 'pcv',
'wbcc', 'rbcc', 'htn', 'dm', 'cad', 'appet', 'pe', 'ane']
nums = ['age', 'bp', 'sg', 'bgr', 'bu', 'sc', 'sod', 'pot', 'hemo', 'pcv', 'wbcc', 'rbcc']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/kidney/kidney.csv')
print('\n% kidney dataset', np.shape(data))
return model, data
def krkp():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15', 'a16',
'a17', 'a18', 'a19', 'a20', 'a21', 'a22', 'a23', 'a24', 'a25', 'a26', 'a27', 'a28', 'a29', 'a30', 'a31', 'a32',
'a33', 'a34', 'a35', 'a36']
model = Classifier(attrs=attrs, numeric=[], label='label')
data = model.load_data('data/krkp/krkp.csv')
print('\n% krkp dataset', np.shape(data))
return model, data
def mushroom():
attrs = ['cap_shape', 'cap_surface', 'cap_color', 'bruises', 'odor', 'gill_attachment', 'gill_spacing',
'gill_size', 'gill_color', 'stalk_shape', 'stalk_root', 'stalk_surface_above_ring', 'stalk_surface_below_ring',
'stalk_color_above_ring', 'stalk_color_below_ring', 'veil_type', 'veil_color', 'ring_number', 'ring_type',
'spore_print_color', 'population', 'habitat']
model = Classifier(attrs=attrs, numeric=[], label='label')
data = model.load_data('data/mushroom/mushroom.csv')
print('\n% mushroom dataset', np.shape(data))
return model, data
def sonar():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15', 'a16',
'a17', 'a18', 'a19', 'a20', 'a21', 'a22', 'a23', 'a24', 'a25', 'a26', 'a27', 'a28', 'a29', 'a30', 'a31', 'a32',
'a33', 'a34', 'a35', 'a36', 'a37', 'a38', 'a39', 'a40', 'a41', 'a42', 'a43', 'a44', 'a45', 'a46', 'a47', 'a48',
'a49', 'a50', 'a51', 'a52', 'a53', 'a54', 'a55', 'a56', 'a57', 'a58', 'a59', 'a60']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/sonar/sonar.csv')
print('\n% sonar dataset', np.shape(data))
return model, data
def voting():
attrs = ['handicapped_infants', 'water_project_cost_sharing', 'budget_resolution', 'physician_fee_freeze',
'el_salvador_aid', 'religious_groups_in_schools', 'anti_satellite_test_ban', 'aid_to_nicaraguan_contras',
'mx_missile', 'immigration', 'synfuels_corporation_cutback', 'education_spending', 'superfund_right_to_sue',
'crime', 'duty_free_exports', 'export_administration_act_south_africa']
model = Classifier(attrs=attrs, numeric=[], label='label')
data = model.load_data('data/voting/voting.csv')
print('\n% voting dataset', np.shape(data))
return model, data
def ecoli():
attrs = ['sn','mcg','gvh','lip','chg','aac','alm1','alm2']
nums = ['mcg','gvh','lip','chg','aac','alm1','alm2']
model = Classifier(attrs=attrs, numeric=nums, label='label')
data = model.load_data('data/ecoli/ecoli.csv')
print('\n% ecoli dataset', np.shape(data))
return model, data
def ionosphere():
attrs = ['c1','c2','c3','c4','c5','c6','c7','c8','c9','c10','c11','c12','c13','c14','c15','c16','c17','c18','c19',
'c20','c21','c22','c23','c24','c25','c26','c27','c28','c29','c30','c31','c32','c33','c34']
model = Classifier(attrs=attrs, numeric=attrs, label='label')
data = model.load_data('data/ionosphere/ionosphere.csv')
print('\n% ionosphere dataset', np.shape(data))
return model, data
def wine():
attrs = ['alcohol','malic_acid','ash','alcalinity_of_ash','magnesium','tot_phenols','flavanoids',
'nonflavanoid_phenols','proanthocyanins','color_intensity','hue','OD_of_diluted','proline']
model = Classifier(attrs=attrs, numeric=attrs, label='label')
data = model.load_data('data/wine/wine.csv')
print('\n% wine dataset', np.shape(data))
return model, data
def credit_card():
attrs = ['LIMIT_BAL','SEX','EDUCATION','MARRIAGE','AGE','PAY_0','PAY_2','PAY_3','PAY_4','PAY_5','PAY_6',
'BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5','BILL_AMT6','PAY_AMT1','PAY_AMT2','PAY_AMT3','PAY_AMT4',
'PAY_AMT5','PAY_AMT6']
nums = ['LIMIT_BAL','AGE','BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5','BILL_AMT6','PAY_AMT1',
'PAY_AMT2','PAY_AMT3','PAY_AMT4','PAY_AMT5','PAY_AMT6']
model = Classifier(attrs=attrs, numeric=nums, label='DEFAULT_PAYMENT')
data = model.load_data('data/credit_card/credit_card.csv')
print('\n% credit card dataset', np.shape(data))
return model, data
def rain():
attrs = ['Month','Day','Location','MinTemp','MaxTemp','Rainfall','Evaporation','Sunshine','WindGustDir','WindGustSpeed','WindDir9am','WindDir3pm','WindSpeed9am','WindSpeed3pm','Humidity9am','Humidity3pm','Pressure9am','Pressure3pm','Cloud9am','Cloud3pm','Temp9am','Temp3pm','RainToday']
nums = ['Month','Day','MinTemp','MaxTemp','Rainfall','WindDir9am','WindDir3pm','WindSpeed9am','WindSpeed3pm','Humidity9am','Humidity3pm','Pressure9am','Pressure3pm','Temp9am','Temp3pm']
model = Classifier(attrs=attrs, numeric=nums, label='RainTomorrow')
data = model.load_data('data/rain/rain.csv')
print('\n% rain dataset', np.shape(data))
return model, data
def heloc():
attrs = ['ExternalRiskEstimate','MSinceOldestTradeOpen','MSinceMostRecentTradeOpen','AverageMInFile','NumSatisfactoryTrades','NumTrades60Ever2DerogPubRec','NumTrades90Ever2DerogPubRec','PercentTradesNeverDelq','MSinceMostRecentDelq','MaxDelq2PublicRecLast12M','MaxDelqEver','NumTotalTrades','NumTradesOpeninLast12M','PercentInstallTrades','MSinceMostRecentInqexcl7days','NumInqLast6M','NumInqLast6Mexcl7days','NetFractionRevolvingBurden','NetFractionInstallBurden','NumRevolvingTradesWBalance','NumInstallTradesWBalance','NumBank2NatlTradesWHighUtilization','PercentTradesWBalance']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='RiskPerformance')
data = model.load_data('data/heloc/heloc_dataset_v1.csv')
print('\n% rain dataset', np.shape(data))
return model, data
def avila():
attrs = ['f1','f2','f3','f4','f5','f6','f7','f8','f9','f10']
nums = ['f1','f2','f3','f4','f5','f6','f7','f8','f9','f10']
model = Classifier(attrs=attrs, numeric=nums, label='class')
data_train = model.load_data('data/avila/train.csv')
data_test = model.load_data('data/avila/test.csv')
print('\n% avila dataset train', np.shape(data_train), 'test', np.shape(data_test))
return model, data_train, data_test
def titanic():
attrs = ['Sex', 'Age', 'Number_of_Siblings_Spouses', 'Number_Of_Parents_Children', 'Fare', 'Class', 'Embarked']
nums = ['Age', 'Number_of_Siblings_Spouses', 'Number_Of_Parents_Children', 'Fare']
model = Classifier(attrs=attrs, numeric=nums, label='Survived')
data_train = model.load_data('data/titanic/train.csv')
data_test = model.load_data('data/titanic/test.csv')
print('\n% titanic dataset train', np.shape(data_train), 'test', np.shape(data_test))
return model, data_train, data_test
def anneal():
attrs = ['family', 'product_type', 'steel', 'carbon', 'hardness', 'temper_rolling', 'condition', 'formability',
'strength', 'non_ageing', 'surface_finish', 'surface_quality', 'enamelability', 'bc', 'bf', 'bt', 'bw_me', 'bl',
'm', 'chrom', 'phos', 'cbond', 'marvi', 'exptl', 'ferro', 'corr', 'blue_bright_varn_clean', 'lustre', 'jurofm',
's', 'p', 'shape', 'thick', 'width', 'len', 'oil', 'bore', 'packing']
nums = ['thick', 'width', 'len']
model = Classifier(attrs=attrs, numeric=nums, label='classes')
data_train = model.load_data('data/anneal/anneal_train.csv')
data_test = model.load_data('data/anneal/anneal_test.csv')
print('\n% anneal dataset train', np.shape(data_train), 'test', np.shape(data_test))
return model, data_train, data_test
def weight_lifting():
attrs = ['new_window','num_window','roll_belt','pitch_belt','yaw_belt','total_accel_belt','kurtosis_roll_belt','kurtosis_picth_belt','kurtosis_yaw_belt','skewness_roll_belt','skewness_roll_belt','skewness_yaw_belt','max_roll_belt','max_picth_belt','max_yaw_belt','min_roll_belt','min_pitch_belt','min_yaw_belt','amplitude_roll_belt','amplitude_pitch_belt','amplitude_yaw_belt','var_total_accel_belt','avg_roll_belt','stddev_roll_belt','var_roll_belt','avg_pitch_belt','stddev_pitch_belt','var_pitch_belt','avg_yaw_belt','stddev_yaw_belt','var_yaw_belt','gyros_belt_x','gyros_belt_y','gyros_belt_z','accel_belt_x','accel_belt_y','accel_belt_z','magnet_belt_x','magnet_belt_y','magnet_belt_z','roll_arm','pitch_arm','yaw_arm','total_accel_arm','var_accel_arm','avg_roll_arm','stddev_roll_arm','var_roll_arm','avg_pitch_arm','stddev_pitch_arm','var_pitch_arm','avg_yaw_arm','stddev_yaw_arm','var_yaw_arm','gyros_arm_x','gyros_arm_y','gyros_arm_z','accel_arm_x','accel_arm_y','accel_arm_z','magnet_arm_x','magnet_arm_y','magnet_arm_z','kurtosis_roll_arm','kurtosis_picth_arm','kurtosis_yaw_arm','skewness_roll_arm','skewness_pitch_arm','skewness_yaw_arm','max_roll_arm','max_picth_arm','max_yaw_arm','min_roll_arm','min_pitch_arm','min_yaw_arm','amplitude_roll_arm','amplitude_pitch_arm','amplitude_yaw_arm','roll_dumbbell','pitch_dumbbell','yaw_dumbbell','kurtosis_roll_dumbbell','kurtosis_picth_dumbbell','kurtosis_yaw_dumbbell','skewness_roll_dumbbell','skewness_pitch_dumbbell','skewness_yaw_dumbbell','max_roll_dumbbell','max_picth_dumbbell','max_yaw_dumbbell','min_roll_dumbbell','min_pitch_dumbbell','min_yaw_dumbbell','amplitude_roll_dumbbell','amplitude_pitch_dumbbell','amplitude_yaw_dumbbell','total_accel_dumbbell','var_accel_dumbbell','avg_roll_dumbbell','stddev_roll_dumbbell','var_roll_dumbbell','avg_pitch_dumbbell','stddev_pitch_dumbbell','var_pitch_dumbbell','avg_yaw_dumbbell','stddev_yaw_dumbbell','var_yaw_dumbbell','gyros_dumbbell_x','gyros_dumbbell_y','gyros_dumbbell_z','accel_dumbbell_x','accel_dumbbell_y','accel_dumbbell_z','magnet_dumbbell_x','magnet_dumbbell_y','magnet_dumbbell_z','roll_forearm','pitch_forearm','yaw_forearm','kurtosis_roll_forearm','kurtosis_picth_forearm','kurtosis_yaw_forearm','skewness_roll_forearm','skewness_pitch_forearm','skewness_yaw_forearm','max_roll_forearm','max_picth_forearm','max_yaw_forearm','min_roll_forearm','min_pitch_forearm','min_yaw_forearm','amplitude_roll_forearm','amplitude_pitch_forearm','amplitude_yaw_forearm','total_accel_forearm','var_accel_forearm','avg_roll_forearm','stddev_roll_forearm','var_roll_forearm','avg_pitch_forearm','stddev_pitch_forearm','var_pitch_forearm','avg_yaw_forearm','stddev_yaw_forearm','var_yaw_forearm','gyros_forearm_x','gyros_forearm_y','gyros_forearm_z','accel_forearm_x','accel_forearm_y','accel_forearm_z','magnet_forearm_x','magnet_forearm_y','magnet_forearm_z']
nums = ['num_window','roll_belt','pitch_belt','yaw_belt','total_accel_belt','kurtosis_roll_belt','kurtosis_picth_belt','kurtosis_yaw_belt','skewness_roll_belt','skewness_roll_belt','skewness_yaw_belt','max_roll_belt','max_picth_belt','max_yaw_belt','min_roll_belt','min_pitch_belt','min_yaw_belt','amplitude_roll_belt','amplitude_pitch_belt','amplitude_yaw_belt','var_total_accel_belt','avg_roll_belt','stddev_roll_belt','var_roll_belt','avg_pitch_belt','stddev_pitch_belt','var_pitch_belt','avg_yaw_belt','stddev_yaw_belt','var_yaw_belt','gyros_belt_x','gyros_belt_y','gyros_belt_z','accel_belt_x','accel_belt_y','accel_belt_z','magnet_belt_x','magnet_belt_y','magnet_belt_z','roll_arm','pitch_arm','yaw_arm','total_accel_arm','var_accel_arm','avg_roll_arm','stddev_roll_arm','var_roll_arm','avg_pitch_arm','stddev_pitch_arm','var_pitch_arm','avg_yaw_arm','stddev_yaw_arm','var_yaw_arm','gyros_arm_x','gyros_arm_y','gyros_arm_z','accel_arm_x','accel_arm_y','accel_arm_z','magnet_arm_x','magnet_arm_y','magnet_arm_z','kurtosis_roll_arm','kurtosis_picth_arm','kurtosis_yaw_arm','skewness_roll_arm','skewness_pitch_arm','skewness_yaw_arm','max_roll_arm','max_picth_arm','max_yaw_arm','min_roll_arm','min_pitch_arm','min_yaw_arm','amplitude_roll_arm','amplitude_pitch_arm','amplitude_yaw_arm','roll_dumbbell','pitch_dumbbell','yaw_dumbbell','kurtosis_roll_dumbbell','kurtosis_picth_dumbbell','kurtosis_yaw_dumbbell','skewness_roll_dumbbell','skewness_pitch_dumbbell','skewness_yaw_dumbbell','max_roll_dumbbell','max_picth_dumbbell','max_yaw_dumbbell','min_roll_dumbbell','min_pitch_dumbbell','min_yaw_dumbbell','amplitude_roll_dumbbell','amplitude_pitch_dumbbell','amplitude_yaw_dumbbell','total_accel_dumbbell','var_accel_dumbbell','avg_roll_dumbbell','stddev_roll_dumbbell','var_roll_dumbbell','avg_pitch_dumbbell','stddev_pitch_dumbbell','var_pitch_dumbbell','avg_yaw_dumbbell','stddev_yaw_dumbbell','var_yaw_dumbbell','gyros_dumbbell_x','gyros_dumbbell_y','gyros_dumbbell_z','accel_dumbbell_x','accel_dumbbell_y','accel_dumbbell_z','magnet_dumbbell_x','magnet_dumbbell_y','magnet_dumbbell_z','roll_forearm','pitch_forearm','yaw_forearm','kurtosis_roll_forearm','kurtosis_picth_forearm','kurtosis_yaw_forearm','skewness_roll_forearm','skewness_pitch_forearm','skewness_yaw_forearm','max_roll_forearm','max_picth_forearm','max_yaw_forearm','min_roll_forearm','min_pitch_forearm','min_yaw_forearm','amplitude_roll_forearm','amplitude_pitch_forearm','amplitude_yaw_forearm','total_accel_forearm','var_accel_forearm','avg_roll_forearm','stddev_roll_forearm','var_roll_forearm','avg_pitch_forearm','stddev_pitch_forearm','var_pitch_forearm','avg_yaw_forearm','stddev_yaw_forearm','var_yaw_forearm','gyros_forearm_x','gyros_forearm_y','gyros_forearm_z','accel_forearm_x','accel_forearm_y','accel_forearm_z','magnet_forearm_x','magnet_forearm_y','magnet_forearm_z']
model = Classifier(attrs=attrs, numeric=nums, label='classe')
data = model.load_data('data/weight_lifting/weight_lifting.csv')
print('\n% weight lifting dataset', np.shape(data))
return model, data
def yeast():
attrs = ['sequence','mcg','gvh','alm','mit','erl','pox','vac','nuc']
nums = ['mcg','gvh','alm','mit','erl','pox','vac','nuc']
model = Classifier(attrs=attrs, numeric=nums, label='class')
data = model.load_data('data/yeast/yeast.csv')
print('\n% yeast dataset', np.shape(data))
return model, data
def drug():
attrs = ['Age','Gender','Education','Country','Ethnicity','Nscore','Escore','Oscore','Ascore','Cscore','Impulsive','SS']
nums = attrs
output = ['Alcohol','Amphet','Amyl','Benzos','Caff','Cannabis','Choc','Code','Crack','Ecstasy','Heroin','Ketamine','Legalh','LSD','Meth','Mushrooms','Nicotine','Semer','VSA']
model = Classifier(attrs=attrs, numeric=nums, label=output[17])
data = model.load_data('data/drug/drug.csv')
print('\n% drug consumption dataset', np.shape(data))
return model, data
def dry_bean():
attrs = ['Area','Perimeter','MajorAxisLength','MinorAxisLength','AspectRation','Eccentricity','ConvexArea','EquivDiameter','Extent','Solidity','roundness','Compactness','ShapeFactor1','ShapeFactor2','ShapeFactor3','ShapeFactor4']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='Class')
data = model.load_data('data/dry_bean/dry_bean.csv')
print('\n% dry bean dataset', np.shape(data))
return model, data
def eeg():
attrs = ['AF3','F7','F3','FC5','T7','P7','O1','O2','P8','T8','FC6','F4','F8','AF4']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='eyeDetection')
data = model.load_data('data/eeg/eeg.csv')
print('\n% eeg dataset', np.shape(data))
return model, data
def nursery():
attrs = ['parents','has_nurs','form','children','housing','finance','social','health']
nums = []
model = Classifier(attrs=attrs, numeric=nums, label='class')
data = model.load_data('data/nursery/nursery.csv')
print('\n% nursery dataset', np.shape(data))
return model, data
def intention():
attrs = ['Administrative','Administrative_Duration','Informational','Informational_Duration','ProductRelated','ProductRelated_Duration','BounceRates','ExitRates','PageValues','SpecialDay','Month','OperatingSystems','Browser','Region','TrafficType','VisitorType','Weekend']
nums = ['Administrative','Administrative_Duration','Informational','Informational_Duration','ProductRelated','ProductRelated_Duration','BounceRates','ExitRates','PageValues','SpecialDay']
model = Classifier(attrs=attrs, numeric=nums, label='Revenue')
data = model.load_data('data/intention/intention.csv')
print('\n% online shoppers intention dataset', np.shape(data))
return model, data
def page_blocks():
attrs = ['height','lenght','area','eccen','p_black','p_and','mean_tr','blackpix','blackand','wb_trans']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='class')
data = model.load_data('data/page_blocks/page_blocks.csv')
print('\n% page blocks dataset', np.shape(data))
return model, data
def parkison():
attrs = ['gender','PPE','DFA','RPDE','numPulses','numPeriodsPulses','meanPeriodPulses','stdDevPeriodPulses','locPctJitter','locAbsJitter','rapJitter','ppq5Jitter','ddpJitter','locShimmer','locDbShimmer','apq3Shimmer','apq5Shimmer','apq11Shimmer','ddaShimmer','meanAutoCorrHarmonicity','meanNoiseToHarmHarmonicity','meanHarmToNoiseHarmonicity','minIntensity','maxIntensity','meanIntensity','f1','f2','f3','f4','b1','b2','b3','b4','GQ_prc5_95','GQ_std_cycle_open','GQ_std_cycle_closed','GNE_mean','GNE_std','GNE_SNR_TKEO','GNE_SNR_SEO','GNE_NSR_TKEO','GNE_NSR_SEO','VFER_mean','VFER_std','VFER_entropy','VFER_SNR_TKEO','VFER_SNR_SEO','VFER_NSR_TKEO','VFER_NSR_SEO','IMF_SNR_SEO','IMF_SNR_TKEO','IMF_SNR_entropy','IMF_NSR_SEO','IMF_NSR_TKEO','IMF_NSR_entropy','mean_Log_energy','mean_MFCC_0th_coef','mean_MFCC_1st_coef','mean_MFCC_2nd_coef','mean_MFCC_3rd_coef','mean_MFCC_4th_coef','mean_MFCC_5th_coef','mean_MFCC_6th_coef','mean_MFCC_7th_coef','mean_MFCC_8th_coef','mean_MFCC_9th_coef','mean_MFCC_10th_coef','mean_MFCC_11th_coef','mean_MFCC_12th_coef','mean_delta_log_energy','mean_0th_delta','mean_1st_delta','mean_2nd_delta','mean_3rd_delta','mean_4th_delta','mean_5th_delta','mean_6th_delta','mean_7th_delta','mean_8th_delta','mean_9th_delta','mean_10th_delta','mean_11th_delta','mean_12th_delta','mean_delta_delta_log_energy','mean_delta_delta_0th','mean_1st_delta_delta','mean_2nd_delta_delta','mean_3rd_delta_delta','mean_4th_delta_delta','mean_5th_delta_delta','mean_6th_delta_delta','mean_7th_delta_delta','mean_8th_delta_delta','mean_9th_delta_delta','mean_10th_delta_delta','mean_11th_delta_delta','mean_12th_delta_delta','std_Log_energy','std_MFCC_0th_coef','std_MFCC_1st_coef','std_MFCC_2nd_coef','std_MFCC_3rd_coef','std_MFCC_4th_coef','std_MFCC_5th_coef','std_MFCC_6th_coef','std_MFCC_7th_coef','std_MFCC_8th_coef','std_MFCC_9th_coef','std_MFCC_10th_coef','std_MFCC_11th_coef','std_MFCC_12th_coef','std_delta_log_energy','std_0th_delta','std_1st_delta','std_2nd_delta','std_3rd_delta','std_4th_delta','std_5th_delta','std_6th_delta','std_7th_delta','std_8th_delta','std_9th_delta','std_10th_delta','std_11th_delta','std_12th_delta','std_delta_delta_log_energy','std_delta_delta_0th','std_1st_delta_delta','std_2nd_delta_delta','std_3rd_delta_delta','std_4th_delta_delta','std_5th_delta_delta','std_6th_delta_delta','std_7th_delta_delta','std_8th_delta_delta','std_9th_delta_delta','std_10th_delta_delta','std_11th_delta_delta','std_12th_delta_delta','Ea','Ed_1_coef','Ed_2_coef','Ed_3_coef','Ed_4_coef','Ed_5_coef','Ed_6_coef','Ed_7_coef','Ed_8_coef','Ed_9_coef','Ed_10_coef','det_entropy_shannon_1_coef','det_entropy_shannon_2_coef','det_entropy_shannon_3_coef','det_entropy_shannon_4_coef','det_entropy_shannon_5_coef','det_entropy_shannon_6_coef','det_entropy_shannon_7_coef','det_entropy_shannon_8_coef','det_entropy_shannon_9_coef','det_entropy_shannon_10_coef','det_entropy_log_1_coef','det_entropy_log_2_coef','det_entropy_log_3_coef','det_entropy_log_4_coef','det_entropy_log_5_coef','det_entropy_log_6_coef','det_entropy_log_7_coef','det_entropy_log_8_coef','det_entropy_log_9_coef','det_entropy_log_10_coef','det_TKEO_mean_1_coef','det_TKEO_mean_2_coef','det_TKEO_mean_3_coef','det_TKEO_mean_4_coef','det_TKEO_mean_5_coef','det_TKEO_mean_6_coef','det_TKEO_mean_7_coef','det_TKEO_mean_8_coef','det_TKEO_mean_9_coef','det_TKEO_mean_10_coef','det_TKEO_std_1_coef','det_TKEO_std_2_coef','det_TKEO_std_3_coef','det_TKEO_std_4_coef','det_TKEO_std_5_coef','det_TKEO_std_6_coef','det_TKEO_std_7_coef','det_TKEO_std_8_coef','det_TKEO_std_9_coef','det_TKEO_std_10_coef','app_entropy_shannon_1_coef','app_entropy_shannon_2_coef','app_entropy_shannon_3_coef','app_entropy_shannon_4_coef','app_entropy_shannon_5_coef','app_entropy_shannon_6_coef','app_entropy_shannon_7_coef','app_entropy_shannon_8_coef','app_entropy_shannon_9_coef','app_entropy_shannon_10_coef','app_entropy_log_1_coef','app_entropy_log_2_coef','app_entropy_log_3_coef','app_entropy_log_4_coef','app_entropy_log_5_coef','app_entropy_log_6_coef','app_entropy_log_7_coef','app_entropy_log_8_coef','app_entropy_log_9_coef','app_entropy_log_10_coef','app_det_TKEO_mean_1_coef','app_det_TKEO_mean_2_coef','app_det_TKEO_mean_3_coef','app_det_TKEO_mean_4_coef','app_det_TKEO_mean_5_coef','app_det_TKEO_mean_6_coef','app_det_TKEO_mean_7_coef','app_det_TKEO_mean_8_coef','app_det_TKEO_mean_9_coef','app_det_TKEO_mean_10_coef','app_TKEO_std_1_coef','app_TKEO_std_2_coef','app_TKEO_std_3_coef','app_TKEO_std_4_coef','app_TKEO_std_5_coef','app_TKEO_std_6_coef','app_TKEO_std_7_coef','app_TKEO_std_8_coef','app_TKEO_std_9_coef','app_TKEO_std_10_coef','Ea2','Ed2_1_coef','Ed2_2_coef','Ed2_3_coef','Ed2_4_coef','Ed2_5_coef','Ed2_6_coef','Ed2_7_coef','Ed2_8_coef','Ed2_9_coef','Ed2_10_coef','det_LT_entropy_shannon_1_coef','det_LT_entropy_shannon_2_coef','det_LT_entropy_shannon_3_coef','det_LT_entropy_shannon_4_coef','det_LT_entropy_shannon_5_coef','det_LT_entropy_shannon_6_coef','det_LT_entropy_shannon_7_coef','det_LT_entropy_shannon_8_coef','det_LT_entropy_shannon_9_coef','det_LT_entropy_shannon_10_coef','det_LT_entropy_log_1_coef','det_LT_entropy_log_2_coef','det_LT_entropy_log_3_coef','det_LT_entropy_log_4_coef','det_LT_entropy_log_5_coef','det_LT_entropy_log_6_coef','det_LT_entropy_log_7_coef','det_LT_entropy_log_8_coef','det_LT_entropy_log_9_coef','det_LT_entropy_log_10_coef','det_LT_TKEO_mean_1_coef','det_LT_TKEO_mean_2_coef','det_LT_TKEO_mean_3_coef','det_LT_TKEO_mean_4_coef','det_LT_TKEO_mean_5_coef','det_LT_TKEO_mean_6_coef','det_LT_TKEO_mean_7_coef','det_LT_TKEO_mean_8_coef','det_LT_TKEO_mean_9_coef','det_LT_TKEO_mean_10_coef','det_LT_TKEO_std_1_coef','det_LT_TKEO_std_2_coef','det_LT_TKEO_std_3_coef','det_LT_TKEO_std_4_coef','det_LT_TKEO_std_5_coef','det_LT_TKEO_std_6_coef','det_LT_TKEO_std_7_coef','det_LT_TKEO_std_8_coef','det_LT_TKEO_std_9_coef','det_LT_TKEO_std_10_coef','app_LT_entropy_shannon_1_coef','app_LT_entropy_shannon_2_coef','app_LT_entropy_shannon_3_coef','app_LT_entropy_shannon_4_coef','app_LT_entropy_shannon_5_coef','app_LT_entropy_shannon_6_coef','app_LT_entropy_shannon_7_coef','app_LT_entropy_shannon_8_coef','app_LT_entropy_shannon_9_coef','app_LT_entropy_shannon_10_coef','app_LT_entropy_log_1_coef','app_LT_entropy_log_2_coef','app_LT_entropy_log_3_coef','app_LT_entropy_log_4_coef','app_LT_entropy_log_5_coef','app_LT_entropy_log_6_coef','app_LT_entropy_log_7_coef','app_LT_entropy_log_8_coef','app_LT_entropy_log_9_coef','app_LT_entropy_log_10_coef','app_LT_TKEO_mean_1_coef','app_LT_TKEO_mean_2_coef','app_LT_TKEO_mean_3_coef','app_LT_TKEO_mean_4_coef','app_LT_TKEO_mean_5_coef','app_LT_TKEO_mean_6_coef','app_LT_TKEO_mean_7_coef','app_LT_TKEO_mean_8_coef','app_LT_TKEO_mean_9_coef','app_LT_TKEO_mean_10_coef','app_LT_TKEO_std_1_coef','app_LT_TKEO_std_2_coef','app_LT_TKEO_std_3_coef','app_LT_TKEO_std_4_coef','app_LT_TKEO_std_5_coef','app_LT_TKEO_std_6_coef','app_LT_TKEO_std_7_coef','app_LT_TKEO_std_8_coef','app_LT_TKEO_std_9_coef','app_LT_TKEO_std_10_coef','tqwt_energy_dec_1','tqwt_energy_dec_2','tqwt_energy_dec_3','tqwt_energy_dec_4','tqwt_energy_dec_5','tqwt_energy_dec_6','tqwt_energy_dec_7','tqwt_energy_dec_8','tqwt_energy_dec_9','tqwt_energy_dec_10','tqwt_energy_dec_11','tqwt_energy_dec_12','tqwt_energy_dec_13','tqwt_energy_dec_14','tqwt_energy_dec_15','tqwt_energy_dec_16','tqwt_energy_dec_17','tqwt_energy_dec_18','tqwt_energy_dec_19','tqwt_energy_dec_20','tqwt_energy_dec_21','tqwt_energy_dec_22','tqwt_energy_dec_23','tqwt_energy_dec_24','tqwt_energy_dec_25','tqwt_energy_dec_26','tqwt_energy_dec_27','tqwt_energy_dec_28','tqwt_energy_dec_29','tqwt_energy_dec_30','tqwt_energy_dec_31','tqwt_energy_dec_32','tqwt_energy_dec_33','tqwt_energy_dec_34','tqwt_energy_dec_35','tqwt_energy_dec_36','tqwt_entropy_shannon_dec_1','tqwt_entropy_shannon_dec_2','tqwt_entropy_shannon_dec_3','tqwt_entropy_shannon_dec_4','tqwt_entropy_shannon_dec_5','tqwt_entropy_shannon_dec_6','tqwt_entropy_shannon_dec_7','tqwt_entropy_shannon_dec_8','tqwt_entropy_shannon_dec_9','tqwt_entropy_shannon_dec_10','tqwt_entropy_shannon_dec_11','tqwt_entropy_shannon_dec_12','tqwt_entropy_shannon_dec_13','tqwt_entropy_shannon_dec_14','tqwt_entropy_shannon_dec_15','tqwt_entropy_shannon_dec_16','tqwt_entropy_shannon_dec_17','tqwt_entropy_shannon_dec_18','tqwt_entropy_shannon_dec_19','tqwt_entropy_shannon_dec_20','tqwt_entropy_shannon_dec_21','tqwt_entropy_shannon_dec_22','tqwt_entropy_shannon_dec_23','tqwt_entropy_shannon_dec_24','tqwt_entropy_shannon_dec_25','tqwt_entropy_shannon_dec_26','tqwt_entropy_shannon_dec_27','tqwt_entropy_shannon_dec_28','tqwt_entropy_shannon_dec_29','tqwt_entropy_shannon_dec_30','tqwt_entropy_shannon_dec_31','tqwt_entropy_shannon_dec_32','tqwt_entropy_shannon_dec_33','tqwt_entropy_shannon_dec_34','tqwt_entropy_shannon_dec_35','tqwt_entropy_shannon_dec_36','tqwt_entropy_log_dec_1','tqwt_entropy_log_dec_2','tqwt_entropy_log_dec_3','tqwt_entropy_log_dec_4','tqwt_entropy_log_dec_5','tqwt_entropy_log_dec_6','tqwt_entropy_log_dec_7','tqwt_entropy_log_dec_8','tqwt_entropy_log_dec_9','tqwt_entropy_log_dec_10','tqwt_entropy_log_dec_11','tqwt_entropy_log_dec_12','tqwt_entropy_log_dec_13','tqwt_entropy_log_dec_14','tqwt_entropy_log_dec_15','tqwt_entropy_log_dec_16','tqwt_entropy_log_dec_17','tqwt_entropy_log_dec_18','tqwt_entropy_log_dec_19','tqwt_entropy_log_dec_20','tqwt_entropy_log_dec_21','tqwt_entropy_log_dec_22','tqwt_entropy_log_dec_23','tqwt_entropy_log_dec_24','tqwt_entropy_log_dec_25','tqwt_entropy_log_dec_26','tqwt_entropy_log_dec_27','tqwt_entropy_log_dec_28','tqwt_entropy_log_dec_29','tqwt_entropy_log_dec_30','tqwt_entropy_log_dec_31','tqwt_entropy_log_dec_32','tqwt_entropy_log_dec_33','tqwt_entropy_log_dec_34','tqwt_entropy_log_dec_35','tqwt_entropy_log_dec_36','tqwt_TKEO_mean_dec_1','tqwt_TKEO_mean_dec_2','tqwt_TKEO_mean_dec_3','tqwt_TKEO_mean_dec_4','tqwt_TKEO_mean_dec_5','tqwt_TKEO_mean_dec_6','tqwt_TKEO_mean_dec_7','tqwt_TKEO_mean_dec_8','tqwt_TKEO_mean_dec_9','tqwt_TKEO_mean_dec_10','tqwt_TKEO_mean_dec_11','tqwt_TKEO_mean_dec_12','tqwt_TKEO_mean_dec_13','tqwt_TKEO_mean_dec_14','tqwt_TKEO_mean_dec_15','tqwt_TKEO_mean_dec_16','tqwt_TKEO_mean_dec_17','tqwt_TKEO_mean_dec_18','tqwt_TKEO_mean_dec_19','tqwt_TKEO_mean_dec_20','tqwt_TKEO_mean_dec_21','tqwt_TKEO_mean_dec_22','tqwt_TKEO_mean_dec_23','tqwt_TKEO_mean_dec_24','tqwt_TKEO_mean_dec_25','tqwt_TKEO_mean_dec_26','tqwt_TKEO_mean_dec_27','tqwt_TKEO_mean_dec_28','tqwt_TKEO_mean_dec_29','tqwt_TKEO_mean_dec_30','tqwt_TKEO_mean_dec_31','tqwt_TKEO_mean_dec_32','tqwt_TKEO_mean_dec_33','tqwt_TKEO_mean_dec_34','tqwt_TKEO_mean_dec_35','tqwt_TKEO_mean_dec_36','tqwt_TKEO_std_dec_1','tqwt_TKEO_std_dec_2','tqwt_TKEO_std_dec_3','tqwt_TKEO_std_dec_4','tqwt_TKEO_std_dec_5','tqwt_TKEO_std_dec_6','tqwt_TKEO_std_dec_7','tqwt_TKEO_std_dec_8','tqwt_TKEO_std_dec_9','tqwt_TKEO_std_dec_10','tqwt_TKEO_std_dec_11','tqwt_TKEO_std_dec_12','tqwt_TKEO_std_dec_13','tqwt_TKEO_std_dec_14','tqwt_TKEO_std_dec_15','tqwt_TKEO_std_dec_16','tqwt_TKEO_std_dec_17','tqwt_TKEO_std_dec_18','tqwt_TKEO_std_dec_19','tqwt_TKEO_std_dec_20','tqwt_TKEO_std_dec_21','tqwt_TKEO_std_dec_22','tqwt_TKEO_std_dec_23','tqwt_TKEO_std_dec_24','tqwt_TKEO_std_dec_25','tqwt_TKEO_std_dec_26','tqwt_TKEO_std_dec_27','tqwt_TKEO_std_dec_28','tqwt_TKEO_std_dec_29','tqwt_TKEO_std_dec_30','tqwt_TKEO_std_dec_31','tqwt_TKEO_std_dec_32','tqwt_TKEO_std_dec_33','tqwt_TKEO_std_dec_34','tqwt_TKEO_std_dec_35','tqwt_TKEO_std_dec_36','tqwt_medianValue_dec_1','tqwt_medianValue_dec_2','tqwt_medianValue_dec_3','tqwt_medianValue_dec_4','tqwt_medianValue_dec_5','tqwt_medianValue_dec_6','tqwt_medianValue_dec_7','tqwt_medianValue_dec_8','tqwt_medianValue_dec_9','tqwt_medianValue_dec_10','tqwt_medianValue_dec_11','tqwt_medianValue_dec_12','tqwt_medianValue_dec_13','tqwt_medianValue_dec_14','tqwt_medianValue_dec_15','tqwt_medianValue_dec_16','tqwt_medianValue_dec_17','tqwt_medianValue_dec_18','tqwt_medianValue_dec_19','tqwt_medianValue_dec_20','tqwt_medianValue_dec_21','tqwt_medianValue_dec_22','tqwt_medianValue_dec_23','tqwt_medianValue_dec_24','tqwt_medianValue_dec_25','tqwt_medianValue_dec_26','tqwt_medianValue_dec_27','tqwt_medianValue_dec_28','tqwt_medianValue_dec_29','tqwt_medianValue_dec_30','tqwt_medianValue_dec_31','tqwt_medianValue_dec_32','tqwt_medianValue_dec_33','tqwt_medianValue_dec_34','tqwt_medianValue_dec_35','tqwt_medianValue_dec_36','tqwt_meanValue_dec_1','tqwt_meanValue_dec_2','tqwt_meanValue_dec_3','tqwt_meanValue_dec_4','tqwt_meanValue_dec_5','tqwt_meanValue_dec_6','tqwt_meanValue_dec_7','tqwt_meanValue_dec_8','tqwt_meanValue_dec_9','tqwt_meanValue_dec_10','tqwt_meanValue_dec_11','tqwt_meanValue_dec_12','tqwt_meanValue_dec_13','tqwt_meanValue_dec_14','tqwt_meanValue_dec_15','tqwt_meanValue_dec_16','tqwt_meanValue_dec_17','tqwt_meanValue_dec_18','tqwt_meanValue_dec_19','tqwt_meanValue_dec_20','tqwt_meanValue_dec_21','tqwt_meanValue_dec_22','tqwt_meanValue_dec_23','tqwt_meanValue_dec_24','tqwt_meanValue_dec_25','tqwt_meanValue_dec_26','tqwt_meanValue_dec_27','tqwt_meanValue_dec_28','tqwt_meanValue_dec_29','tqwt_meanValue_dec_30','tqwt_meanValue_dec_31','tqwt_meanValue_dec_32','tqwt_meanValue_dec_33','tqwt_meanValue_dec_34','tqwt_meanValue_dec_35','tqwt_meanValue_dec_36','tqwt_stdValue_dec_1','tqwt_stdValue_dec_2','tqwt_stdValue_dec_3','tqwt_stdValue_dec_4','tqwt_stdValue_dec_5','tqwt_stdValue_dec_6','tqwt_stdValue_dec_7','tqwt_stdValue_dec_8','tqwt_stdValue_dec_9','tqwt_stdValue_dec_10','tqwt_stdValue_dec_11','tqwt_stdValue_dec_12','tqwt_stdValue_dec_13','tqwt_stdValue_dec_14','tqwt_stdValue_dec_15','tqwt_stdValue_dec_16','tqwt_stdValue_dec_17','tqwt_stdValue_dec_18','tqwt_stdValue_dec_19','tqwt_stdValue_dec_20','tqwt_stdValue_dec_21','tqwt_stdValue_dec_22','tqwt_stdValue_dec_23','tqwt_stdValue_dec_24','tqwt_stdValue_dec_25','tqwt_stdValue_dec_26','tqwt_stdValue_dec_27','tqwt_stdValue_dec_28','tqwt_stdValue_dec_29','tqwt_stdValue_dec_30','tqwt_stdValue_dec_31','tqwt_stdValue_dec_32','tqwt_stdValue_dec_33','tqwt_stdValue_dec_34','tqwt_stdValue_dec_35','tqwt_stdValue_dec_36','tqwt_minValue_dec_1','tqwt_minValue_dec_2','tqwt_minValue_dec_3','tqwt_minValue_dec_4','tqwt_minValue_dec_5','tqwt_minValue_dec_6','tqwt_minValue_dec_7','tqwt_minValue_dec_8','tqwt_minValue_dec_9','tqwt_minValue_dec_10','tqwt_minValue_dec_11','tqwt_minValue_dec_12','tqwt_minValue_dec_13','tqwt_minValue_dec_14','tqwt_minValue_dec_15','tqwt_minValue_dec_16','tqwt_minValue_dec_17','tqwt_minValue_dec_18','tqwt_minValue_dec_19','tqwt_minValue_dec_20','tqwt_minValue_dec_21','tqwt_minValue_dec_22','tqwt_minValue_dec_23','tqwt_minValue_dec_24','tqwt_minValue_dec_25','tqwt_minValue_dec_26','tqwt_minValue_dec_27','tqwt_minValue_dec_28','tqwt_minValue_dec_29','tqwt_minValue_dec_30','tqwt_minValue_dec_31','tqwt_minValue_dec_32','tqwt_minValue_dec_33','tqwt_minValue_dec_34','tqwt_minValue_dec_35','tqwt_minValue_dec_36','tqwt_maxValue_dec_1','tqwt_maxValue_dec_2','tqwt_maxValue_dec_3','tqwt_maxValue_dec_4','tqwt_maxValue_dec_5','tqwt_maxValue_dec_6','tqwt_maxValue_dec_7','tqwt_maxValue_dec_8','tqwt_maxValue_dec_9','tqwt_maxValue_dec_10','tqwt_maxValue_dec_11','tqwt_maxValue_dec_12','tqwt_maxValue_dec_13','tqwt_maxValue_dec_14','tqwt_maxValue_dec_15','tqwt_maxValue_dec_16','tqwt_maxValue_dec_17','tqwt_maxValue_dec_18','tqwt_maxValue_dec_19','tqwt_maxValue_dec_20','tqwt_maxValue_dec_21','tqwt_maxValue_dec_22','tqwt_maxValue_dec_23','tqwt_maxValue_dec_24','tqwt_maxValue_dec_25','tqwt_maxValue_dec_26','tqwt_maxValue_dec_27','tqwt_maxValue_dec_28','tqwt_maxValue_dec_29','tqwt_maxValue_dec_30','tqwt_maxValue_dec_31','tqwt_maxValue_dec_32','tqwt_maxValue_dec_33','tqwt_maxValue_dec_34','tqwt_maxValue_dec_35','tqwt_maxValue_dec_36','tqwt_skewnessValue_dec_1','tqwt_skewnessValue_dec_2','tqwt_skewnessValue_dec_3','tqwt_skewnessValue_dec_4','tqwt_skewnessValue_dec_5','tqwt_skewnessValue_dec_6','tqwt_skewnessValue_dec_7','tqwt_skewnessValue_dec_8','tqwt_skewnessValue_dec_9','tqwt_skewnessValue_dec_10','tqwt_skewnessValue_dec_11','tqwt_skewnessValue_dec_12','tqwt_skewnessValue_dec_13','tqwt_skewnessValue_dec_14','tqwt_skewnessValue_dec_15','tqwt_skewnessValue_dec_16','tqwt_skewnessValue_dec_17','tqwt_skewnessValue_dec_18','tqwt_skewnessValue_dec_19','tqwt_skewnessValue_dec_20','tqwt_skewnessValue_dec_21','tqwt_skewnessValue_dec_22','tqwt_skewnessValue_dec_23','tqwt_skewnessValue_dec_24','tqwt_skewnessValue_dec_25','tqwt_skewnessValue_dec_26','tqwt_skewnessValue_dec_27','tqwt_skewnessValue_dec_28','tqwt_skewnessValue_dec_29','tqwt_skewnessValue_dec_30','tqwt_skewnessValue_dec_31','tqwt_skewnessValue_dec_32','tqwt_skewnessValue_dec_33','tqwt_skewnessValue_dec_34','tqwt_skewnessValue_dec_35','tqwt_skewnessValue_dec_36','tqwt_kurtosisValue_dec_1','tqwt_kurtosisValue_dec_2','tqwt_kurtosisValue_dec_3','tqwt_kurtosisValue_dec_4','tqwt_kurtosisValue_dec_5','tqwt_kurtosisValue_dec_6','tqwt_kurtosisValue_dec_7','tqwt_kurtosisValue_dec_8','tqwt_kurtosisValue_dec_9','tqwt_kurtosisValue_dec_10','tqwt_kurtosisValue_dec_11','tqwt_kurtosisValue_dec_12','tqwt_kurtosisValue_dec_13','tqwt_kurtosisValue_dec_14','tqwt_kurtosisValue_dec_15','tqwt_kurtosisValue_dec_16','tqwt_kurtosisValue_dec_17','tqwt_kurtosisValue_dec_18','tqwt_kurtosisValue_dec_19','tqwt_kurtosisValue_dec_20','tqwt_kurtosisValue_dec_21','tqwt_kurtosisValue_dec_22','tqwt_kurtosisValue_dec_23','tqwt_kurtosisValue_dec_24','tqwt_kurtosisValue_dec_25','tqwt_kurtosisValue_dec_26','tqwt_kurtosisValue_dec_27','tqwt_kurtosisValue_dec_28','tqwt_kurtosisValue_dec_29','tqwt_kurtosisValue_dec_30','tqwt_kurtosisValue_dec_31','tqwt_kurtosisValue_dec_32','tqwt_kurtosisValue_dec_33','tqwt_kurtosisValue_dec_34','tqwt_kurtosisValue_dec_35','tqwt_kurtosisValue_dec_36']
nums = attrs[1:]
model = Classifier(attrs=attrs, numeric=nums, label='class')
data = model.load_data('data/parkison_disease/parkison_disease.csv')
print('\n% parkison disease dataset', np.shape(data))
return model, data
def pendigits():
attrs = ['a1','a2','a3','a4','a5','a6','a7','a8','a9','a10','a11','a12','a13','a14','a15','a16']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='class')
data_train = model.load_data('data/pendigits/train.csv')
data_test = model.load_data('data/pendigits/test.csv')
print('\n% pendigits train dataset', np.shape(data_train), 'test', np.shape(data_test))
return model, data_train, data_test
def wall_robot():
attrs = ['US1','US2','US3','US4','US5','US6','US7','US8','US9','US10','US11','US12','US13','US14','US15','US16','US17','US18','US19','US20','US21','US22','US23','US24']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='Class')
data = model.load_data('data/wall_following_robot/wall_following_robot.csv')
print('\n% wall_following_robot dataset', np.shape(data))
return model, data
def glass():
attrs = ['RI','Na','Mg','Al','Si','K','Ca','Ba','Fe']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='Type')
data = model.load_data('data/glass/glass.csv')
print('\n% glass dataset', np.shape(data))
return model, data
def flags():
attrs = ['name','landmass','zone','area','population','language','bars','stripes','colours','red','green','blue','gold','white','black','orange','mainhue','circles','crosses','saltires','quarters','sunstars','crescent','triangle','icon','animate','text','topleft','botright']
nums = ['area','population','stripes','colours','sunstars']
model = Classifier(attrs=attrs, numeric=nums, label='religion')
data = model.load_data('data/flags/flags.csv')
print('\n% flags dataset', np.shape(data))
return model, data
|
nilq/baby-python
|
python
|
import tensorflow as tf
# 本节主要讲 placeholder
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
# 原教程中为 mul, 我使用的版本为 multiply
output = tf.multiply(input1, input2)
with tf.Session() as sess:
print(sess.run(output, feed_dict={input1: [7.], input2: [2.]}))
|
nilq/baby-python
|
python
|
'''Statistical tests for NDVars
Common Attributes
-----------------
The following attributes are always present. For ANOVA, they are lists with the
corresponding items for different effects.
t/f/... : NDVar
Map of the statistical parameter.
p_uncorrected : NDVar
Map of uncorrected p values.
p : NDVar | None
Map of corrected p values (None if no correct was applied).
clusters : Dataset | None
Table of all the clusters found (None if no clusters were found, or if no
clustering was performed).
n_samples : None | int
The actual number of permutations. If ``samples = -1``, i.e. a complete set
or permutations is performed, then ``n_samples`` indicates the actual
number of permutations that constitute the complete set.
'''
from datetime import datetime, timedelta
from functools import reduce, partial
from itertools import chain, repeat
from math import ceil
from multiprocessing import Process, Event, SimpleQueue
from multiprocessing.sharedctypes import RawArray
import logging
import operator
import os
import re
import socket
from time import time as current_time
from typing import Union
import numpy as np
import scipy.stats
from scipy import ndimage
from tqdm import trange
from .. import fmtxt, _info, _text
from ..fmtxt import FMText
from .._celltable import Celltable
from .._config import CONFIG
from .._data_obj import (
CategorialArg, CellArg, IndexArg, ModelArg, NDVarArg, VarArg,
Dataset, Var, Factor, Interaction, NestedEffect,
NDVar, Categorial, UTS,
ascategorial, asmodel, asndvar, asvar, assub,
cellname, combine, dataobj_repr)
from .._exceptions import OldVersionError, WrongDimension, ZeroVariance
from .._utils import LazyProperty, user_activity
from .._utils.numpy_utils import FULL_AXIS_SLICE
from . import opt, stats, vector
from .connectivity import Connectivity, find_peaks
from .connectivity_opt import merge_labels, tfce_increment
from .glm import _nd_anova
from .permutation import (
_resample_params, permute_order, permute_sign_flip, random_seeds,
rand_rotation_matrices)
from .t_contrast import TContrastRel
from .test import star, star_factor
__test__ = False
def check_for_vector_dim(y: NDVar) -> None:
for dim in y.dims:
if dim._connectivity_type == 'vector':
raise WrongDimension(f"{dim}: mass-univariate methods are not suitable for vectors. Consider using vector norm as test statistic, or using a testnd.Vector test function.")
def check_variance(x):
if x.ndim != 2:
x = x.reshape((len(x), -1))
if opt.has_zero_variance(x):
raise ZeroVariance("y contains data column with zero variance")
class NDTest:
"""Baseclass for testnd test results
Attributes
----------
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
"""
_state_common = ('y', 'match', 'sub', 'samples', 'tfce', 'pmin', '_cdist',
'tstart', 'tstop', '_dims')
_state_specific = ()
_statistic = None
_statistic_tail = 0
@property
def _attributes(self):
return self._state_common + self._state_specific
def __init__(self, y, match, sub, samples, tfce, pmin, cdist, tstart, tstop):
self.y = y.name
self.match = dataobj_repr(match) if match else match
self.sub = sub
self.samples = samples
self.tfce = tfce
self.pmin = pmin
self._cdist = cdist
self.tstart = tstart
self.tstop = tstop
self._dims = y.dims[1:]
def __getstate__(self):
return {name: getattr(self, name, None) for name in self._attributes}
def __setstate__(self, state):
# backwards compatibility:
if 'Y' in state:
state['y'] = state.pop('Y')
if 'X' in state:
state['x'] = state.pop('X')
for k, v in state.items():
setattr(self, k, v)
# backwards compatibility:
if 'tstart' not in state:
cdist = self._first_cdist
self.tstart = cdist.tstart
self.tstop = cdist.tstop
if '_dims' not in state: # 0.17
if 't' in state:
self._dims = state['t'].dims
elif 'r' in state:
self._dims = state['r'].dims
elif 'f' in state:
self._dims = state['f'][0].dims
else:
raise RuntimeError("Error recovering old test results dims")
self._expand_state()
def __repr__(self):
args = self._repr_test_args()
if self.sub is not None:
if isinstance(self.sub, np.ndarray):
sub_repr = '<array>'
else:
sub_repr = repr(self.sub)
args.append(f'sub={sub_repr}')
if self._cdist:
args += self._repr_cdist()
else:
args.append('samples=0')
return f"<{self.__class__.__name__} {', '.join(args)}>"
def _repr_test_args(self):
"""List of strings describing parameters unique to the test
Will be joined with ``", ".join(repr_args)``
"""
raise NotImplementedError()
def _repr_cdist(self):
"""List of results (override for MultiEffectResult)"""
return (self._cdist._repr_test_args(self.pmin) +
self._cdist._repr_clusters())
def _expand_state(self):
"Override to create secondary results"
cdist = self._cdist
if cdist is None:
self.tfce_map = None
self.p = None
self._kind = None
else:
self.tfce_map = cdist.tfce_map
self.p = cdist.probability_map
self._kind = cdist.kind
def _desc_samples(self):
if self.samples == -1:
return f"a complete set of {self.n_samples} permutations"
elif self.samples is None:
return "no permutations"
else:
return f"{self.n_samples} random permutations"
def _desc_timewindow(self):
tstart = self._time_dim.tmin if self.tstart is None else self.tstart
tstop = self._time_dim.tstop if self.tstop is None else self.tstop
return f"{_text.ms(tstart)} - {_text.ms(tstop)} ms"
def _asfmtext(self):
p = self.p.min()
max_stat = self._max_statistic()
return FMText((fmtxt.eq(self._statistic, max_stat, 'max', stars=p), ', ', fmtxt.peq(p)))
def _default_plot_obj(self):
raise NotImplementedError
def _iter_cdists(self):
yield (None, self._cdist)
@property
def _first_cdist(self):
return self._cdist
def _plot_model(self):
"Determine x for plotting categories"
return None
def _plot_sub(self):
if isinstance(self.sub, str) and self.sub == "<unsaved array>":
raise RuntimeError("The sub parameter was not saved for previous "
"versions of Eelbrain. Please recompute this "
"result with the current version.")
return self.sub
def _assert_has_cdist(self):
if self._cdist is None:
raise RuntimeError("This method only applies to results of tests "
"with threshold-based clustering and tests with "
"a permutation distribution (samples > 0)")
def masked_parameter_map(self, pmin=0.05, **sub):
"""Create a copy of the parameter map masked by significance
Parameters
----------
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map wherever p <= pmin
and 0 everywhere else.
"""
self._assert_has_cdist()
return self._cdist.masked_parameter_map(pmin, **sub)
def cluster(self, cluster_id):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
self._assert_has_cdist()
return self._cdist.cluster(cluster_id)
@LazyProperty
def clusters(self):
if self._cdist is None:
return None
else:
return self.find_clusters(None, True)
def find_clusters(self, pmin=None, maps=False, **sub):
"""Find significant regions or clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value. For threshold-based tests, all clusters with a
p-value smaller than ``pmin`` are included (default 1);
for other tests, find contiguous regions with ``p ≤ pmin`` (default
0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default ``False``).
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
self._assert_has_cdist()
return self._cdist.clusters(pmin, maps, **sub)
def find_peaks(self):
"""Find peaks in a threshold-free cluster distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
self._assert_has_cdist()
return self._cdist.find_peaks()
def compute_probability_map(self, **sub):
"""Compute a probability map
Returns
-------
probability : NDVar
Map of p-values.
"""
self._assert_has_cdist()
return self._cdist.compute_probability_map(**sub)
def info_list(self, computation=True):
"List with information about the test"
out = fmtxt.List("Mass-univariate statistics:")
out.add_item(self._name())
dimnames = [dim.name for dim in self._dims]
dimlist = out.add_sublist(f"Over {_text.enumeration(dimnames)}")
if 'time' in dimnames:
dimlist.add_item(f"Time interval: {self._desc_timewindow()}.")
cdist = self._first_cdist
if cdist is None:
out.add_item("No inferential statistics")
return out
# inference
l = out.add_sublist("Inference:")
if cdist.kind == 'raw':
l.add_item("Based on maximum statistic")
elif cdist.kind == 'tfce':
l.add_item("Based on maximum statistic with threshold-"
"free cluster enhancement (Smith & Nichols, 2009)")
elif cdist.kind == 'cluster':
l.add_item("Based on maximum cluster mass statistic")
sl = l.add_sublist("Cluster criteria:")
for dim in dimnames:
if dim == 'time':
sl.add_item(f"Minimum cluster duration {_text.ms(cdist.criteria.get('mintime', 0))} ms")
elif dim == 'source':
sl.add_item(f"At least {cdist.criteria.get('minsource', 0)} contiguous sources.")
elif dim == 'sensor':
sl.add_item(f"At least {cdist.criteria.get('minsensor', 0)} contiguous sensors.")
else:
value = cdist.criteria.get(f'min{dim}', 0)
sl.add_item(f"Minimum number of contiguous elements in {dim}: {value}")
# n samples
l.add_item(f"In {self._desc_samples()}")
# computation
if computation:
out.add_item(cdist.info_list())
return out
@property
def _statistic_map(self):
return getattr(self, self._statistic)
def _max_statistic(self):
tail = getattr(self, 'tail', self._statistic_tail)
return self._max_statistic_from_map(self._statistic_map, self.p, tail)
@staticmethod
def _max_statistic_from_map(stat_map: NDVar, p_map: NDVar, tail: int):
if tail == 0:
func = stat_map.extrema
elif tail == 1:
func = stat_map.max
else:
func = stat_map.min
if p_map:
mask = p_map <= .05 if p_map.min() <= .05 else None
else:
mask = None
return func() if mask is None else func(mask)
@property
def n_samples(self):
if self.samples == -1:
return self._first_cdist.samples
else:
return self.samples
@property
def _time_dim(self):
for dim in self._first_cdist.dims:
if isinstance(dim, UTS):
return dim
return None
class t_contrast_rel(NDTest):
"""Mass-univariate contrast based on t-values
Parameters
----------
y : NDVar
Dependent variable.
x : categorial
Model containing the cells which are compared with the contrast.
contrast : str
Contrast specification: see Notes.
match : Factor
Match cases for a repeated measures test.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value for a related samples t-test (with df =
len(match.cells) - 1).
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Notes
-----
A contrast specifies the steps to calculate a map based on *t*-values.
Contrast definitions can contain:
- Comparisons using ``>`` or ``<`` and data cells to compute *t*-maps.
For example, ``"cell1 > cell0"`` will compute a *t*-map of the comparison
if ``cell1`` and ``cell0``, being positive where ``cell1`` is greater than
``cell0`` and negative where ``cell0`` is greater than ``cell1``.
If the data is defined based on an interaction, cells are specified with
``|``, e.g. ``"a1 | b1 > a0 | b0"``. Cells can contain ``*`` to average
multiple cells. Thus, if the second factor in the model has cells ``b1``
and ``b0``, ``"a1 | * > a0 | *"`` would compare ``a1`` to ``a0``
while averaging ``b1`` and ``b0`` within ``a1`` and ``a0``.
- Unary numpy functions ``abs`` and ``negative``, e.g.
``"abs(cell1 > cell0)"``.
- Binary numpy functions ``subtract`` and ``add``, e.g.
``"add(a>b, a>c)"``.
- Numpy functions for multiple arrays ``min``, ``max`` and ``sum``,
e.g. ``min(a>d, b>d, c>d)``.
Cases with zero variance are set to t=0.
Examples
--------
To find cluster where both of two pairwise comparisons are reliable,
i.e. an intersection of two effects, one could use
``"min(a > c, b > c)"``.
To find a specific kind of interaction, where a is greater than b, and
this difference is greater than the difference between c and d, one
could use ``"(a > b) - abs(c > d)"``.
"""
_state_specific = ('x', 'contrast', 't', 'tail')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: CategorialArg,
contrast: str,
match: CategorialArg = None,
sub: CategorialArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
if match is None:
raise TypeError("The `match` parameter needs to be specified for repeated measures test t_contrast_rel")
ct = Celltable(y, x, match, sub, ds=ds, coercion=asndvar, dtype=np.float64)
check_for_vector_dim(ct.y)
check_variance(ct.y.x)
# setup contrast
t_contrast = TContrastRel(contrast, ct.cells, ct.data_indexes)
# original data
tmap = t_contrast.map(ct.y.x)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
df = len(ct.match.cells) - 1
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
cdist = NDPermutationDistribution(
ct.y, samples, threshold, tfce, tail, 't', "t-contrast",
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_order(len(ct.y), samples, unit=ct.match)
run_permutation(t_contrast, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=ct.y.info)
t = NDVar(tmap, ct.y.dims[1:], info, 't')
# store attributes
NDTest.__init__(self, ct.y, ct.match, sub, samples, tfce, pmin, cdist,
tstart, tstop)
self.x = ('%'.join(ct.x.base_names) if isinstance(ct.x, Interaction) else
ct.x.name)
self.contrast = contrast
self.tail = tail
self.tmin = tmin
self.t = t
self._expand_state()
def _name(self):
if self.y:
return "T-Contrast: %s ~ %s" % (self.y, self.contrast)
else:
return "T-Contrast: %s" % self.contrast
def _plot_model(self):
return self.x
def _repr_test_args(self):
args = [repr(self.y), repr(self.x), repr(self.contrast)]
if self.tail:
args.append("tail=%r" % self.tail)
if self.match:
args.append('match=%r' % self.match)
return args
class corr(NDTest):
"""Mass-univariate correlation
Parameters
----------
y : NDVar
Dependent variable.
x : continuous
The continuous predictor variable.
norm : None | categorial
Categories in which to normalize (z-score) x.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use an r-value equivalent to an
uncorrected p-value.
rmin : None | scalar
Threshold for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
match : None | categorial
When permuting data, only shuffle the cases within the categories
of match.
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
r : NDVar
Map of correlation values (with threshold contours).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
"""
_state_specific = ('x', 'norm', 'n', 'df', 'r')
_statistic = 'r'
@user_activity
def __init__(
self,
y: NDVarArg,
x: VarArg,
norm: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
pmin: float = None,
rmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
match: CategorialArg = None,
parc: str = None,
**criteria):
sub = assub(sub, ds)
y = asndvar(y, sub=sub, ds=ds, dtype=np.float64)
check_for_vector_dim(y)
if not y.has_case:
raise ValueError("Dependent variable needs case dimension")
x = asvar(x, sub=sub, ds=ds)
if norm is not None:
norm = ascategorial(norm, sub, ds)
if match is not None:
match = ascategorial(match, sub, ds)
name = "%s corr %s" % (y.name, x.name)
# Normalize by z-scoring the data for each subject
# normalization is done before the permutation b/c we are interested in
# the variance associated with each subject for the z-scoring.
y = y.copy()
if norm is not None:
for cell in norm.cells:
idx = (norm == cell)
y.x[idx] = scipy.stats.zscore(y.x[idx], None)
# subtract the mean from y and x so that this can be omitted during
# permutation
y -= y.summary('case')
x = x - x.mean()
n = len(y)
df = n - 2
rmap = stats.corr(y.x, x.x)
n_threshold_params = sum((pmin is not None, rmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, rmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.rtest_r(pmin, df)
elif rmin is not None:
threshold = abs(rmin)
else:
threshold = None
cdist = NDPermutationDistribution(
y, samples, threshold, tfce, 0, 'r', name,
tstart, tstop, criteria, parc)
cdist.add_original(rmap)
if cdist.do_permutation:
iterator = permute_order(n, samples, unit=match)
run_permutation(stats.corr, cdist, iterator, x.x)
# compile results
info = _info.for_stat_map('r', threshold)
r = NDVar(rmap, y.dims[1:], info, name)
# store attributes
NDTest.__init__(self, y, match, sub, samples, tfce, pmin, cdist,
tstart, tstop)
self.x = x.name
self.norm = None if norm is None else norm.name
self.rmin = rmin
self.n = n
self.df = df
self.r = r
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
r = self.r
# uncorrected probability
pmap = stats.rtest_p(r.x, self.df)
info = _info.for_p_map()
p_uncorrected = NDVar(pmap, r.dims, info, 'p_uncorrected')
self.p_uncorrected = p_uncorrected
self.r_p = [[r, self.p]] if self.samples else None
def _name(self):
if self.y and self.x:
return "Correlation: %s ~ %s" % (self.y, self.x)
else:
return "Correlation"
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.norm:
args.append('norm=%r' % self.norm)
return args
def _default_plot_obj(self):
if self.samples:
return self.masked_parameter_map()
else:
return self.r
class NDDifferenceTest(NDTest):
difference = None
def _get_mask(self, p=0.05):
self._assert_has_cdist()
if not 1 >= p > 0:
raise ValueError(f"p={p}: needs to be between 1 and 0")
if p == 1:
if self._cdist.kind != 'cluster':
raise ValueError(f"p=1 is only a valid mask for threshold-based cluster tests")
mask = self._cdist.cluster_map == 0
else:
mask = self.p > p
return self._cdist.uncrop(mask, self.difference, True)
def masked_difference(self, p=0.05):
"""Difference map masked by significance
Parameters
----------
p : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
"""
mask = self._get_mask(p)
return self.difference.mask(mask)
class NDMaskedC1Mixin:
def masked_c1(self, p=0.05):
"""``c1`` map masked by significance of the ``c1``-``c0`` difference
Parameters
----------
p : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
"""
mask = self._get_mask(p)
return self.c1_mean.mask(mask)
class ttest_1samp(NDDifferenceTest):
"""Mass-univariate one sample t-test
Parameters
----------
y : NDVar
Dependent variable.
popmean : scalar
Value to compare y against (default is 0).
match : None | categorial
Combine data for these categories before testing.
sub : index
Perform test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
The difference value entering the test (``y`` if popmean is 0).
n : int
Number of cases.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Notes
-----
Data points with zero variance are set to t=0.
"""
_state_specific = ('popmean', 'tail', 'n', 'df', 't', 'difference')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
popmean: float = 0,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
ct = Celltable(y, match=match, sub=sub, ds=ds, coercion=asndvar, dtype=np.float64)
check_for_vector_dim(ct.y)
n = len(ct.y)
df = n - 1
y = ct.y.summary()
tmap = stats.t_1samp(ct.y.x)
if popmean:
raise NotImplementedError("popmean != 0")
diff = y - popmean
if np.any(diff < 0):
diff.info['cmap'] = 'xpolar'
else:
diff = y
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
if popmean:
y_perm = ct.y - popmean
else:
y_perm = ct.y
n_samples, samples = _resample_params(len(y_perm), samples)
cdist = NDPermutationDistribution(
y_perm, n_samples, threshold, tfce, tail, 't', '1-Sample t-Test',
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_sign_flip(n, samples)
run_permutation(opt.t_1samp_perm, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=ct.y.info)
t = NDVar(tmap, ct.y.dims[1:], info, 't')
# store attributes
NDDifferenceTest.__init__(self, ct.y, ct.match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.popmean = popmean
self.n = n
self.df = df
self.tail = tail
self.t = t
self.tmin = tmin
self.difference = diff
self._expand_state()
def __setstate__(self, state):
if 'diff' in state:
state['difference'] = state.pop('diff')
NDTest.__setstate__(self, state)
def _expand_state(self):
NDTest._expand_state(self)
t = self.t
pmap = stats.ttest_p(t.x, self.df, self.tail)
info = _info.for_p_map(t.info)
p_uncorr = NDVar(pmap, t.dims, info, 'p')
self.p_uncorrected = p_uncorr
def _name(self):
if self.y:
return "One-Sample T-Test: %s" % self.y
else:
return "One-Sample T-Test"
def _repr_test_args(self):
args = [repr(self.y)]
if self.popmean:
args.append(repr(self.popmean))
if self.match:
args.append('match=%r' % self.match)
if self.tail:
args.append("tail=%i" % self.tail)
return args
def _default_plot_obj(self):
if self.samples:
return self.masked_difference()
else:
return self.difference
def _independent_measures_args(y, x, c1, c0, match, ds, sub):
"Interpret parameters for independent measures tests (2 different argspecs)"
if isinstance(x, str):
x = ds.eval(x)
if isinstance(x, NDVar):
assert c1 is None
assert c0 is None
assert match is None
y1 = asndvar(y, sub, ds)
y0 = asndvar(x, sub, ds)
y = combine((y1, y0))
c1_name = y1.name
c0_name = y0.name
x_name = y0.name
else:
ct = Celltable(y, x, match, sub, cat=(c1, c0), ds=ds, coercion=asndvar, dtype=np.float64)
c1, c0 = ct.cat
c1_name = c1
c0_name = c0
x_name = ct.x.name
match = ct.match
y = ct.y
y1 = ct.data[c1]
y0 = ct.data[c0]
return y, y1, y0, c1, c0, match, x_name, c1_name, c0_name
class ttest_ind(NDDifferenceTest):
"""Mass-univariate independent samples t-test
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Combine cases with the same cell on ``x % match``.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold p value for forming clusters. None for threshold-free
cluster enhancement.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Notes
-----
Cases with zero variance are set to t=0.
"""
_state_specific = ('x', 'c1', 'c0', 'tail', 't', 'n1', 'n0', 'df', 'c1_mean',
'c0_mean')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: CellArg = None,
c0: CellArg = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
y, y1, y0, c1, c0, match, x_name, c1_name, c0_name = _independent_measures_args(y, x, c1, c0, match, ds, sub)
check_for_vector_dim(y)
n1 = len(y1)
n = len(y)
n0 = n - n1
df = n - 2
groups = np.arange(n) < n1
groups.dtype = np.int8
tmap = stats.t_ind(y.x, groups)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
cdist = NDPermutationDistribution(y, samples, threshold, tfce, tail, 't', 'Independent Samples t-Test', tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_order(n, samples)
run_permutation(stats.t_ind, cdist, iterator, groups)
# store attributes
NDDifferenceTest.__init__(self, y, match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.x = x_name
self.c0 = c0
self.c1 = c1
self.n1 = n1
self.n0 = n0
self.df = df
self.tail = tail
info = _info.for_stat_map('t', threshold, tail=tail, old=y.info)
self.t = NDVar(tmap, y.dims[1:], info, 't')
self.tmin = tmin
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
# difference
diff = self.c1_mean - self.c0_mean
if np.any(diff.x < 0):
diff.info['cmap'] = 'xpolar'
diff.name = 'difference'
self.difference = diff
# uncorrected p
pmap = stats.ttest_p(self.t.x, self.df, self.tail)
info = _info.for_p_map(self.t.info)
p_uncorr = NDVar(pmap, self.t.dims, info, 'p')
self.p_uncorrected = p_uncorr
# composites
if self.samples:
diff_p = self.masked_difference()
else:
diff_p = self.difference
self.all = [self.c1_mean, self.c0_mean, diff_p]
def _name(self):
if self.tail == 0:
comp = "%s == %s" % (self.c1, self.c0)
elif self.tail > 0:
comp = "%s > %s" % (self.c1, self.c0)
else:
comp = "%s < %s" % (self.c1, self.c0)
if self.y:
return "Independent-Samples T-Test: %s ~ %s" % (self.y, comp)
else:
return "Independent-Samples T-Test: %s" % comp
def _plot_model(self):
return self.x
def _plot_sub(self):
return "(%s).isin(%s)" % (self.x, (self.c1, self.c0))
def _repr_test_args(self):
if self.c1 is None:
args = [f'{self.y!r} (n={self.n1})', f'{self.x!r} (n={self.n0})']
else:
args = [f'{self.y!r}', f'{self.x!r}', f'{self.c1!r} (n={self.n1})', f'{self.c0!r} (n={self.n0})']
if self.match:
args.append(f'match{self.match!r}')
if self.tail:
args.append(f'tail={self.tail}')
return args
def _default_plot_obj(self):
if self.samples:
diff = self.masked_difference()
else:
diff = self.difference
return [self.c1_mean, self.c0_mean, diff]
def _related_measures_args(y, x, c1, c0, match, ds, sub):
"Interpret parameters for related measures tests (2 different argspecs)"
if isinstance(x, str):
if ds is None:
raise TypeError(f"x={x!r} specified as str without specifying ds")
x = ds.eval(x)
if isinstance(x, NDVar):
assert c1 is None
assert c0 is None
assert match is None
y1 = asndvar(y, sub, ds)
n = len(y1)
y0 = asndvar(x, sub, ds, n)
c1_name = y1.name
c0_name = y0.name
x_name = y0.name
elif match is None:
raise TypeError("The `match` argument needs to be specified for related measures tests")
else:
ct = Celltable(y, x, match, sub, cat=(c1, c0), ds=ds, coercion=asndvar,
dtype=np.float64)
c1, c0 = ct.cat
c1_name = c1
c0_name = c0
if not ct.all_within:
raise ValueError(f"conditions {c1!r} and {c0!r} do not have the same values on {dataobj_repr(ct.match)}")
n = len(ct.y) // 2
y1 = ct.y[:n]
y0 = ct.y[n:]
x_name = ct.x.name
match = ct.match
return y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name
class ttest_rel(NDMaskedC1Mixin, NDDifferenceTest):
"""Mass-univariate related samples t-test
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Units within which measurements are related (e.g. 'subject' in a
within-subject comparison).
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed, default);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
n : int
Number of cases.
Notes
-----
In the permutation cluster test, permutations are done within the
categories of ``match``.
Cases with zero variance are set to t=0.
"""
_state_specific = ('x', 'c1', 'c0', 'tail', 't', 'n', 'df', 'c1_mean',
'c0_mean')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: CellArg = None,
c0: CellArg = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name = _related_measures_args(y, x, c1, c0, match, ds, sub)
check_for_vector_dim(y1)
if n <= 2:
raise ValueError("Not enough observations for t-test (n=%i)" % n)
df = n - 1
diff = y1 - y0
tmap = stats.t_1samp(diff.x)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
n_samples, samples = _resample_params(len(diff), samples)
cdist = NDPermutationDistribution(
diff, n_samples, threshold, tfce, tail, 't', 'Related Samples t-Test',
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_sign_flip(n, samples)
run_permutation(opt.t_1samp_perm, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=y1.info)
t = NDVar(tmap, y1.dims[1:], info, 't')
# store attributes
NDDifferenceTest.__init__(self, y1, match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.x = x_name
self.c0 = c0
self.c1 = c1
self.n = n
self.df = df
self.tail = tail
self.t = t
self.tmin = tmin
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
cdist = self._cdist
t = self.t
# difference
diff = self.c1_mean - self.c0_mean
if np.any(diff.x < 0):
diff.info['cmap'] = 'xpolar'
diff.name = 'difference'
self.difference = diff
# uncorrected p
pmap = stats.ttest_p(t.x, self.df, self.tail)
info = _info.for_p_map()
self.p_uncorrected = NDVar(pmap, t.dims, info, 'p')
# composites
if self.samples:
diff_p = self.masked_difference()
else:
diff_p = self.difference
self.all = [self.c1_mean, self.c0_mean, diff_p]
def _name(self):
if self.tail == 0:
comp = "%s == %s" % (self.c1, self.c0)
elif self.tail > 0:
comp = "%s > %s" % (self.c1, self.c0)
else:
comp = "%s < %s" % (self.c1, self.c0)
if self.y:
return "Related-Samples T-Test: %s ~ %s" % (self.y, comp)
else:
return "Related-Samples T-Test: %s" % comp
def _plot_model(self):
return self.x
def _plot_sub(self):
return "(%s).isin(%s)" % (self.x, (self.c1, self.c0))
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.c1 is not None:
args.extend((repr(self.c1), repr(self.c0), repr(self.match)))
args[-1] += " (n=%i)" % self.n
if self.tail:
args.append("tail=%i" % self.tail)
return args
def _default_plot_obj(self):
if self.samples:
diff = self.masked_difference()
else:
diff = self.difference
return [self.c1_mean, self.c0_mean, diff]
class MultiEffectNDTest(NDTest):
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.match is not None:
args.append('match=%r' % self.match)
return args
def _repr_cdist(self):
args = self._cdist[0]._repr_test_args(self.pmin)
for cdist in self._cdist:
effect_args = cdist._repr_clusters()
args.append("%r: %s" % (cdist.name, ', '.join(effect_args)))
return args
def _asfmtext(self):
table = fmtxt.Table('llll')
table.cells('Effect', fmtxt.symbol(self._statistic, 'max'), fmtxt.symbol('p'), 'sig')
table.midrule()
for i, effect in enumerate(self.effects):
table.cell(effect)
table.cell(fmtxt.stat(self._max_statistic(i)))
pmin = self.p[i].min()
table.cell(fmtxt.p(pmin))
table.cell(star(pmin))
return table
def _expand_state(self):
self.effects = tuple(e.name for e in self._effects)
# clusters
cdists = self._cdist
if cdists is None:
self._kind = None
else:
self.tfce_maps = [cdist.tfce_map for cdist in cdists]
self.p = [cdist.probability_map for cdist in cdists]
self._kind = cdists[0].kind
def _effect_index(self, effect: Union[int, str]):
if isinstance(effect, str):
return self.effects.index(effect)
else:
return effect
def _iter_cdists(self):
for cdist in self._cdist:
yield cdist.name.capitalize(), cdist
@property
def _first_cdist(self):
if self._cdist is None:
return None
else:
return self._cdist[0]
def _max_statistic(self, effect: Union[str, int]):
i = self._effect_index(effect)
stat_map = self._statistic_map[i]
tail = getattr(self, 'tail', self._statistic_tail)
return self._max_statistic_from_map(stat_map, self.p[i], tail)
def cluster(self, cluster_id, effect=0):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
effect : int | str
Index or name of the effect from which to retrieve a cluster
(default is the first effect).
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].cluster(cluster_id)
def compute_probability_map(self, effect=0, **sub):
"""Compute a probability map
Parameters
----------
effect : int | str
Index or name of the effect from which to use the parameter map
(default is the first effect).
Returns
-------
probability : NDVar
Map of p-values.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].compute_probability_map(**sub)
def masked_parameter_map(self, effect=0, pmin=0.05, **sub):
"""Create a copy of the parameter map masked by significance
Parameters
----------
effect : int | str
Index or name of the effect from which to use the parameter map.
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map wherever p <= pmin
and 0 everywhere else.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].masked_parameter_map(pmin, **sub)
def find_clusters(self, pmin=None, maps=False, effect=None, **sub):
"""Find significant regions or clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value. For threshold-based tests, all clusters with a
p-value smaller than ``pmin`` are included (default 1);
for other tests, find contiguous regions with ``p ≤ pmin`` (default
0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default ``False``).
effect : int | str
Index or name of the effect from which to find clusters (default is
all effects).
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
self._assert_has_cdist()
if effect is not None:
i = self._effect_index(effect)
return self._cdist[i].clusters(pmin, maps, **sub)
dss = []
info = {}
for cdist in self._cdist:
ds = cdist.clusters(pmin, maps, **sub)
ds[:, 'effect'] = cdist.name
if 'clusters' in ds.info:
info['%s clusters' % cdist.name] = ds.info.pop('clusters')
dss.append(ds)
out = combine(dss)
out.info.update(info)
return out
def find_peaks(self):
"""Find peaks in a TFCE distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
self._assert_has_cdist()
dss = []
for cdist in self._cdist:
ds = cdist.find_peaks()
ds[:, 'effect'] = cdist.name
dss.append(ds)
return combine(dss)
class anova(MultiEffectNDTest):
"""Mass-univariate ANOVA
Parameters
----------
y : NDVar
Dependent variable.
x : Model
Independent variables.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use an f-value equivalent to an
uncorrected p-value.
fmin : scalar
Threshold for forming clusters as f-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
replacement : bool
whether random samples should be drawn with replacement or
without
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
match : categorial | False
When permuting data, only shuffle the cases within the categories
of match. By default, ``match`` is determined automatically based on
the random efects structure of ``x``.
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
effects : tuple of str
Names of the tested effects, in the same order as in other attributes.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
f : list of NDVar
Maps of F values.
p : list of NDVar | None
Maps of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : list of NDVar
Maps of p-values uncorrected for multiple comparison.
tfce_maps : list of NDVar | None
Maps of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Examples
--------
For information on model specification see the univariate
:func:`~eelbrain.test.anova` examples.
"""
_state_specific = ('x', 'pmin', '_effects', '_dfs_denom', 'f')
_statistic = 'f'
_statistic_tail = 1
@user_activity
def __init__(
self,
y: NDVarArg,
x: ModelArg,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
pmin: float = None,
fmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
match: Union[CategorialArg, bool] = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
x_arg = x
sub_arg = sub
sub = assub(sub, ds)
y = asndvar(y, sub, ds, dtype=np.float64)
check_for_vector_dim(y)
x = asmodel(x, sub, ds)
if match is None:
random_effects = [e for e in x.effects if e.random]
if not random_effects:
match = None
elif len(random_effects) > 1:
raise NotImplementedError(
"Automatic match parameter for model with more than one "
"random effect. Set match manually.")
else:
match = random_effects[0]
elif match is not False:
match = ascategorial(match, sub, ds)
lm = _nd_anova(x)
effects = lm.effects
dfs_denom = lm.dfs_denom
fmaps = lm.map(y.x)
n_threshold_params = sum((pmin is not None, fmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
cdists = None
thresholds = tuple(repeat(None, len(effects)))
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, fmin and tfce can be specified")
else:
if pmin is not None:
thresholds = tuple(stats.ftest_f(pmin, e.df, df_den) for e, df_den in zip(effects, dfs_denom))
elif fmin is not None:
thresholds = tuple(repeat(abs(fmin), len(effects)))
else:
thresholds = tuple(repeat(None, len(effects)))
cdists = [
NDPermutationDistribution(
y, samples, thresh, tfce, 1, 'f', e.name,
tstart, tstop, criteria, parc, force_permutation)
for e, thresh in zip(effects, thresholds)]
# Find clusters in the actual data
do_permutation = 0
for cdist, fmap in zip(cdists, fmaps):
cdist.add_original(fmap)
do_permutation += cdist.do_permutation
if do_permutation:
iterator = permute_order(len(y), samples, unit=match)
run_permutation_me(lm, cdists, iterator)
# create ndvars
dims = y.dims[1:]
f = []
for e, fmap, df_den, f_threshold in zip(effects, fmaps, dfs_denom, thresholds):
info = _info.for_stat_map('f', f_threshold, tail=1, old=y.info)
f.append(NDVar(fmap, dims, info, e.name))
# store attributes
MultiEffectNDTest.__init__(self, y, match, sub_arg, samples, tfce, pmin,
cdists, tstart, tstop)
self.x = x_arg if isinstance(x_arg, str) else x.name
self._effects = effects
self._dfs_denom = dfs_denom
self.f = f
self._expand_state()
def _expand_state(self):
# backwards compatibility
if hasattr(self, 'effects'):
self._effects = self.effects
MultiEffectNDTest._expand_state(self)
# backwards compatibility
if hasattr(self, 'df_den'):
df_den_temp = {e.name: df for e, df in self.df_den.items()}
del self.df_den
self._dfs_denom = tuple(df_den_temp[e] for e in self.effects)
# f-maps with clusters
pmin = self.pmin or 0.05
if self.samples:
f_and_clusters = []
for e, fmap, df_den, cdist in zip(self._effects, self.f,
self._dfs_denom, self._cdist):
# create f-map with cluster threshold
f0 = stats.ftest_f(pmin, e.df, df_den)
info = _info.for_stat_map('f', f0)
f_ = NDVar(fmap.x, fmap.dims, info, e.name)
# add overlay with cluster
if cdist.probability_map is not None:
f_and_clusters.append([f_, cdist.probability_map])
else:
f_and_clusters.append([f_])
self.f_probability = f_and_clusters
# uncorrected probability
p_uncorr = []
for e, f, df_den in zip(self._effects, self.f, self._dfs_denom):
info = _info.for_p_map()
pmap = stats.ftest_p(f.x, e.df, df_den)
p_ = NDVar(pmap, f.dims, info, e.name)
p_uncorr.append(p_)
self.p_uncorrected = p_uncorr
def _name(self):
if self.y:
return "ANOVA: %s ~ %s" % (self.y, self.x)
else:
return "ANOVA: %s" % self.x
def _plot_model(self):
return '%'.join(e.name for e in self._effects if isinstance(e, Factor) or
(isinstance(e, NestedEffect) and isinstance(e.effect, Factor)))
def _plot_sub(self):
return super(anova, self)._plot_sub()
def _default_plot_obj(self):
if self.samples:
return [self.masked_parameter_map(e) for e in self.effects]
else:
return self._statistic_map
def table(self):
"""Table with effects and smallest p-value"""
table = fmtxt.Table('rlr' + ('' if self.p is None else 'rl'))
table.cells('#', 'Effect', 'f_max')
if self.p is not None:
table.cells('p', 'sig')
table.midrule()
for i in range(len(self.effects)):
table.cell(i)
table.cell(self.effects[i])
table.cell(fmtxt.stat(self.f[i].max()))
if self.p is not None:
pmin = self.p[i].min()
table.cell(fmtxt.p(pmin))
table.cell(star(pmin))
return table
class Vector(NDDifferenceTest):
"""Test a vector field for vectors with non-random direction
Parameters
----------
y : NDVar
Dependent variable (needs to include one vector dimension).
match : None | categorial
Combine data for these categories before testing.
sub : index
Perform test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Number of cases.
difference : NDVar
The vector field averaged across cases.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or ``None`` if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
Notes
-----
Vector tests are based on the Hotelling T-Square statistic. Computation of
the T-Square statistic relies on [1]_.
References
----------
.. [1] Kopp, J. (2008). Efficient numerical diagonalization of hermitian 3 x
3 matrices. International Journal of Modern Physics C, 19(3), 523-548.
`10.1142/S0129183108012303 <https://doi.org/10.1142/S0129183108012303>`_
"""
_state_specific = ('difference', 'n', '_v_dim', 't2')
@user_activity
def __init__(
self,
y: NDVarArg,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
ct = Celltable(y, match=match, sub=sub, ds=ds, coercion=asndvar, dtype=np.float64)
n = len(ct.y)
cdist = NDPermutationDistribution(ct.y, samples, tmin, tfce, 1, 'norm', 'Vector test', tstart, tstop, criteria, parc, force_permutation)
v_dim = ct.y.dimnames[cdist._vector_ax + 1]
v_mean = ct.y.mean('case')
v_mean_norm = v_mean.norm(v_dim)
if not use_norm:
t2_map = self._vector_t2_map(ct.y)
cdist.add_original(t2_map.x if v_mean.ndim > 1 else t2_map)
if v_mean.ndim == 1:
self.t2 = t2_map
else:
self.t2 = NDVar(t2_map, v_mean_norm.dims, _info.for_stat_map('t2'), 't2')
else:
cdist.add_original(v_mean_norm.x if v_mean.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator)
# store attributes
NDTest.__init__(self, ct.y, ct.match, sub, samples, tfce, None, cdist, tstart, tstop)
self.difference = v_mean
self._v_dim = v_dim
self.n = n
self._expand_state()
def __setstate__(self, state):
if 'diff' in state:
state['difference'] = state.pop('diff')
NDTest.__setstate__(self, state)
@property
def _statistic(self):
return 'norm' if self.t2 is None else 't2'
def _name(self):
if self.y:
return f"Vector test: {self.y}"
else:
return "Vector test"
def _repr_test_args(self):
args = []
if self.y:
args.append(repr(self.y))
if self.match:
args.append(f'match={self.match!r}')
return args
@staticmethod
def _vector_perm(y, out, seed, use_norm):
n_cases, n_dims, n_tests = y.shape
assert n_dims == 3
rotation = rand_rotation_matrices(n_cases, seed)
if use_norm:
return vector.mean_norm_rotated(y, rotation, out)
else:
return vector.t2_stat_rotated(y, rotation, out)
@staticmethod
def _vector_t2_map(y):
dimnames = y.get_dimnames(first=('case', 'space'))
x = y.get_data(dimnames)
t2_map = stats.t2_1samp(x)
if y.ndim == 2:
return np.float64(t2_map)
else:
dims = y.get_dims(dimnames[2:])
return NDVar(t2_map, dims)
class VectorDifferenceIndependent(Vector):
"""Test difference between two vector fields for non-random direction
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Combine cases with the same cell on ``x % match``.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Total number of cases.
n1 : int
Number of cases in ``c1``.
n0 : int
Number of cases in ``c0``.
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
"""
_state_specific = ('difference', 'c1_mean', 'c0_mean' 'n', '_v_dim', 't2')
_statistic = 'norm'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: str = None,
c0: str = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: bool = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
y, y1, y0, c1, c0, match, x_name, c1_name, c0_name = _independent_measures_args(y, x, c1, c0, match, ds, sub)
self.n1 = len(y1)
self.n0 = len(y0)
self.n = len(y)
cdist = NDPermutationDistribution(y, samples, tmin, tfce, 1, 'norm', 'Vector test (independent)', tstart, tstop, criteria, parc, force_permutation)
self._v_dim = v_dim = y.dimnames[cdist._vector_ax + 1]
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self.difference = self.c1_mean - self.c0_mean
self.difference.name = 'difference'
v_mean_norm = self.difference.norm(v_dim)
if not use_norm:
raise NotImplementedError("t2 statistic not implemented for VectorDifferenceIndependent")
else:
cdist.add_original(v_mean_norm.x if self.difference.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator, self.n1)
NDTest.__init__(self, y, match, sub, samples, tfce, None, cdist, tstart, tstop)
self._expand_state()
def _name(self):
if self.y:
return f"Vector test (independent): {self.y}"
else:
return "Vector test (independent)"
@staticmethod
def _vector_perm(y, n1, out, seed, use_norm):
assert use_norm
n_cases, n_dims, n_tests = y.shape
assert n_dims == 3
# randomize directions
rotation = rand_rotation_matrices(n_cases, seed)
# randomize groups
cases = np.arange(n_cases)
np.random.shuffle(cases)
# group 1
mean_1 = np.zeros((n_dims, n_tests))
for case in cases[:n1]:
mean_1 += np.tensordot(rotation[case], y[case], ((1,), (0,)))
mean_1 /= n1
# group 0
mean_0 = np.zeros((n_dims, n_tests))
for case in cases[n1:]:
mean_0 += np.tensordot(rotation[case], y[case], ((1,), (0,)))
mean_0 /= (n_cases - n1)
# difference
mean_1 -= mean_0
norm = scipy.linalg.norm(mean_1, 2, axis=0)
if out is not None:
out[:] = norm
return norm
class VectorDifferenceRelated(NDMaskedC1Mixin, Vector):
"""Test difference between two vector fields for non-random direction
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Units within which measurements are related (e.g. 'subject' in a
within-subject comparison).
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Number of cases.
c1_mean : NDVar
Mean in the ``c1`` condition.
c0_mean : NDVar
Mean in the ``c0`` condition.
difference : NDVar
Difference between the mean in condition ``c1`` and condition ``c0``.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or ``None`` if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
See Also
--------
Vector : One-sample vector test, notes on vector test implementation
"""
_state_specific = ('difference', 'c1_mean', 'c0_mean' 'n', '_v_dim', 't2')
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: str = None,
c0: str = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: bool = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name = _related_measures_args(y, x, c1, c0, match, ds, sub)
difference = y1 - y0
difference.name = 'difference'
n_samples, samples = _resample_params(n, samples)
cdist = NDPermutationDistribution(difference, n_samples, tmin, tfce, 1, 'norm', 'Vector test (related)', tstart, tstop, criteria, parc, force_permutation)
v_dim = difference.dimnames[cdist._vector_ax + 1]
v_mean = difference.mean('case')
v_mean_norm = v_mean.norm(v_dim)
if not use_norm:
t2_map = self._vector_t2_map(difference)
cdist.add_original(t2_map.x if v_mean.ndim > 1 else t2_map)
if v_mean.ndim == 1:
self.t2 = t2_map
else:
self.t2 = NDVar(t2_map, v_mean_norm.dims, _info.for_stat_map('t2'), 't2')
else:
cdist.add_original(v_mean_norm.x if v_mean.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(n_samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator)
# store attributes
NDTest.__init__(self, difference, match, sub, samples, tfce, None, cdist, tstart, tstop)
self.difference = v_mean
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._v_dim = v_dim
self.n = n
self._expand_state()
def _name(self):
if self.y:
return f"Vector test (related): {self.y}"
else:
return "Vector test (related)"
def flatten(array, connectivity):
"""Reshape SPM buffer array to 2-dimensional map for connectivity processing
Parameters
----------
array : ndarray
N-dimensional array (with non-adjacent dimension at first position).
connectivity : Connectivity
N-dimensional connectivity.
Returns
-------
flat_array : ndarray
The input array reshaped if necessary, making sure that input and output
arrays share the same underlying data buffer.
"""
if array.ndim == 2 or not connectivity.custom:
return array
else:
out = array.reshape((array.shape[0], -1))
assert out.base is array
return out
def flatten_1d(array):
if array.ndim == 1:
return array
else:
out = array.ravel()
assert out.base is array
return out
def label_clusters(stat_map, threshold, tail, connectivity, criteria):
"""Label clusters
Parameters
----------
stat_map : array
Statistical parameter map (non-adjacent dimension on the first
axis).
Returns
-------
cmap : np.ndarray of uint32
Array with clusters labelled as integers.
cluster_ids : np.ndarray of uint32
Identifiers of the clusters that survive the minimum duration
criterion.
"""
cmap = np.empty(stat_map.shape, np.uint32)
bin_buff = np.empty(stat_map.shape, np.bool8)
cmap_flat = flatten(cmap, connectivity)
if tail == 0:
int_buff = np.empty(stat_map.shape, np.uint32)
int_buff_flat = flatten(int_buff, connectivity)
else:
int_buff = int_buff_flat = None
cids = _label_clusters(stat_map, threshold, tail, connectivity, criteria,
cmap, cmap_flat, bin_buff, int_buff, int_buff_flat)
return cmap, cids
def _label_clusters(stat_map, threshold, tail, conn, criteria, cmap, cmap_flat,
bin_buff, int_buff, int_buff_flat):
"""Find clusters on a statistical parameter map
Parameters
----------
stat_map : array
Statistical parameter map (non-adjacent dimension on the first
axis).
cmap : array of int
Buffer for the cluster id map (will be modified).
Returns
-------
cluster_ids : np.ndarray of uint32
Identifiers of the clusters that survive the minimum duration
criterion.
"""
# compute clusters
if tail >= 0:
bin_map_above = np.greater(stat_map, threshold, bin_buff)
cids = _label_clusters_binary(bin_map_above, cmap, cmap_flat, conn,
criteria)
if tail <= 0:
bin_map_below = np.less(stat_map, -threshold, bin_buff)
if tail < 0:
cids = _label_clusters_binary(bin_map_below, cmap, cmap_flat, conn,
criteria)
else:
cids_l = _label_clusters_binary(bin_map_below, int_buff,
int_buff_flat, conn, criteria)
x = cmap.max()
int_buff[bin_map_below] += x
cids_l += x
cmap += int_buff
cids = np.concatenate((cids, cids_l))
return cids
def label_clusters_binary(bin_map, connectivity, criteria=None):
"""Label clusters in a boolean map
Parameters
----------
bin_map : numpy.ndarray
Binary map.
connectivity : Connectivity
Connectivity corresponding to ``bin_map``.
criteria : dict
Cluster criteria.
Returns
-------
cmap : numpy.ndarray of uint32
Array with clusters labelled as integers.
cluster_ids : numpy.ndarray of uint32
Sorted identifiers of the clusters that survive the selection criteria.
"""
cmap = np.empty(bin_map.shape, np.uint32)
cmap_flat = flatten(cmap, connectivity)
cids = _label_clusters_binary(bin_map, cmap, cmap_flat, connectivity, criteria)
return cmap, cids
def _label_clusters_binary(bin_map, cmap, cmap_flat, connectivity, criteria):
"""Label clusters in a binary array
Parameters
----------
bin_map : np.ndarray
Binary map of where the parameter map exceeds the threshold for a
cluster (non-adjacent dimension on the first axis).
cmap : np.ndarray
Array in which to label the clusters.
cmap_flat : np.ndarray
Flat copy of cmap (ndim=2, only used when all_adjacent==False)
connectivity : Connectivity
Connectivity.
criteria : None | list
Cluster size criteria, list of (axes, v) tuples. Collapse over axes
and apply v minimum length).
Returns
-------
cluster_ids : np.ndarray of uint32
Sorted identifiers of the clusters that survive the selection criteria.
"""
# find clusters
n = ndimage.label(bin_map, connectivity.struct, cmap)
if n <= 1:
# in older versions, n is 1 even when no cluster is found
if n == 0 or cmap.max() == 0:
return np.array((), np.uint32)
else:
cids = np.array((1,), np.uint32)
elif connectivity.custom:
cids = merge_labels(cmap_flat, n, *connectivity.custom[0])
else:
cids = np.arange(1, n + 1, 1, np.uint32)
# apply minimum cluster size criteria
if criteria and cids.size:
for axes, v in criteria:
cids = np.setdiff1d(cids,
[i for i in cids if np.count_nonzero(np.equal(cmap, i).any(axes)) < v],
True)
if cids.size == 0:
break
return cids
def tfce(stat_map, tail, connectivity, dh=0.1):
tfce_im = np.empty(stat_map.shape, np.float64)
tfce_im_1d = flatten_1d(tfce_im)
bin_buff = np.empty(stat_map.shape, np.bool8)
int_buff = np.empty(stat_map.shape, np.uint32)
int_buff_flat = flatten(int_buff, connectivity)
int_buff_1d = flatten_1d(int_buff)
return _tfce(stat_map, tail, connectivity, tfce_im, tfce_im_1d, bin_buff, int_buff,
int_buff_flat, int_buff_1d, dh)
def _tfce(stat_map, tail, conn, out, out_1d, bin_buff, int_buff,
int_buff_flat, int_buff_1d, dh=0.1, e=0.5, h=2.0):
"Threshold-free cluster enhancement"
out.fill(0)
# determine slices
if tail == 0:
hs = chain(np.arange(-dh, stat_map.min(), -dh),
np.arange(dh, stat_map.max(), dh))
elif tail < 0:
hs = np.arange(-dh, stat_map.min(), -dh)
else:
hs = np.arange(dh, stat_map.max(), dh)
# label clusters in slices at different heights
# fill each cluster with total section value
# each point's value is the vertical sum
for h_ in hs:
if h_ > 0:
np.greater_equal(stat_map, h_, bin_buff)
h_factor = h_ ** h
else:
np.less_equal(stat_map, h_, bin_buff)
h_factor = (-h_) ** h
c_ids = _label_clusters_binary(bin_buff, int_buff, int_buff_flat, conn, None)
tfce_increment(c_ids, int_buff_1d, out_1d, e, h_factor)
return out
class StatMapProcessor:
def __init__(self, tail, max_axes, parc):
"""Reduce a statistical map to the relevant maximum statistic"""
self.tail = tail
self.max_axes = max_axes
self.parc = parc
def max_stat(self, stat_map):
if self.tail == 0:
v = np.abs(stat_map, stat_map).max(self.max_axes)
elif self.tail > 0:
v = stat_map.max(self.max_axes)
else:
v = -stat_map.min(self.max_axes)
if self.parc is None:
return v
else:
return [v[idx].max() for idx in self.parc]
class TFCEProcessor(StatMapProcessor):
def __init__(self, tail, max_axes, parc, shape, connectivity, dh):
StatMapProcessor.__init__(self, tail, max_axes, parc)
self.shape = shape
self.connectivity = connectivity
self.dh = dh
# Pre-allocate memory buffers used for cluster processing
self._bin_buff = np.empty(shape, np.bool8)
self._int_buff = np.empty(shape, np.uint32)
self._tfce_im = np.empty(shape, np.float64)
self._tfce_im_1d = flatten_1d(self._tfce_im)
self._int_buff_flat = flatten(self._int_buff, connectivity)
self._int_buff_1d = flatten_1d(self._int_buff)
def max_stat(self, stat_map):
v = _tfce(
stat_map, self.tail, self.connectivity, self._tfce_im, self._tfce_im_1d,
self._bin_buff, self._int_buff, self._int_buff_flat, self._int_buff_1d,
self.dh,
).max(self.max_axes)
if self.parc is None:
return v
else:
return [v[idx].max() for idx in self.parc]
class ClusterProcessor(StatMapProcessor):
def __init__(self, tail, max_axes, parc, shape, connectivity, threshold,
criteria):
StatMapProcessor.__init__(self, tail, max_axes, parc)
self.shape = shape
self.connectivity = connectivity
self.threshold = threshold
self.criteria = criteria
# Pre-allocate memory buffers used for cluster processing
self._bin_buff = np.empty(shape, np.bool8)
self._cmap = np.empty(shape, np.uint32)
self._cmap_flat = flatten(self._cmap, connectivity)
if tail == 0:
self._int_buff = np.empty(shape, np.uint32)
self._int_buff_flat = flatten(self._int_buff, connectivity)
else:
self._int_buff = self._int_buff_flat = None
def max_stat(self, stat_map, threshold=None):
if threshold is None:
threshold = self.threshold
cmap = self._cmap
cids = _label_clusters(stat_map, threshold, self.tail, self.connectivity,
self.criteria, cmap, self._cmap_flat,
self._bin_buff, self._int_buff,
self._int_buff_flat)
if self.parc is not None:
v = []
for idx in self.parc:
clusters_v = ndimage.sum(stat_map[idx], cmap[idx], cids)
if len(clusters_v):
if self.tail <= 0:
np.abs(clusters_v, clusters_v)
v.append(clusters_v.max())
else:
v.append(0)
return v
elif len(cids):
clusters_v = ndimage.sum(stat_map, cmap, cids)
if self.tail <= 0:
np.abs(clusters_v, clusters_v)
return clusters_v.max()
else:
return 0
def get_map_processor(kind, *args):
if kind == 'tfce':
return TFCEProcessor(*args)
elif kind == 'cluster':
return ClusterProcessor(*args)
elif kind == 'raw':
return StatMapProcessor(*args)
else:
raise ValueError("kind=%s" % repr(kind))
class NDPermutationDistribution:
"""Accumulate information on a cluster statistic.
Parameters
----------
y : NDVar
Dependent variable.
samples : int
Number of permutations.
threshold : scalar > 0
Threshold-based clustering.
tfce : bool | scalar
Threshold-free cluster enhancement.
tail : 1 | 0 | -1
Which tail(s) of the distribution to consider. 0 is two-tailed,
whereas 1 only considers positive values and -1 only considers
negative values.
meas : str
Label for the parameter measurement (e.g., 't' for t-values).
name : None | str
Name for the comparison.
tstart, tstop : None | scalar
Restrict the time window for finding clusters (None: use the whole
epoch).
criteria : dict
Dictionary with threshold criteria for cluster size: 'mintime'
(seconds) and 'minsource' (n_sources).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation : bool
Conduct permutations regardless of whether there are any clusters.
Notes
-----
Use of the NDPermutationDistribution proceeds in 3 steps:
- initialize the NDPermutationDistribution object: ``cdist = NDPermutationDistribution(...)``
- use a copy of y cropped to the time window of interest:
``y = cdist.Y_perm``
- add the actual statistical map with ``cdist.add_original(pmap)``
- if any clusters are found (``if cdist.n_clusters``):
- proceed to add statistical maps from permuted data with
``cdist.add_perm(pmap)``.
Permutation data shape: case, [vector, ][non-adjacent, ] ...
internal shape: [non-adjacent, ] ...
"""
tfce_warning = None
def __init__(self, y, samples, threshold, tfce=False, tail=0, meas='?', name=None,
tstart=None, tstop=None, criteria={}, parc=None, force_permutation=False):
assert y.has_case
assert parc is None or isinstance(parc, str)
if tfce and threshold:
raise RuntimeError(f"threshold={threshold!r}, tfce={tfce!r}: mutually exclusive parameters")
elif tfce:
if tfce is not True:
tfce = abs(tfce)
kind = 'tfce'
elif threshold:
threshold = float(threshold)
kind = 'cluster'
assert threshold > 0
else:
kind = 'raw'
# vector: will be removed for stat_map
vector = [d._connectivity_type == 'vector' for d in y.dims[1:]]
has_vector_ax = any(vector)
if has_vector_ax:
vector_ax = vector.index(True)
else:
vector_ax = None
# prepare temporal cropping
if (tstart is None) and (tstop is None):
y_perm = y
self._crop_for_permutation = False
self._crop_idx = None
else:
t_ax = y.get_axis('time') - 1
y_perm = y.sub(time=(tstart, tstop))
# for stat-maps
if vector_ax is not None and vector_ax < t_ax:
t_ax -= 1
t_slice = y.time._array_index(slice(tstart, tstop))
self._crop_for_permutation = True
self._crop_idx = FULL_AXIS_SLICE * t_ax + (t_slice,)
dims = list(y_perm.dims[1:])
if has_vector_ax:
del dims[vector_ax]
# custom connectivity: move non-adjacent connectivity to first axis
custom = [d._connectivity_type == 'custom' for d in dims]
n_custom = sum(custom)
if n_custom > 1:
raise NotImplementedError("More than one axis with custom connectivity")
nad_ax = None if n_custom == 0 else custom.index(True)
if nad_ax:
swapped_dims = list(dims)
swapped_dims[0], swapped_dims[nad_ax] = dims[nad_ax], dims[0]
else:
swapped_dims = dims
connectivity = Connectivity(swapped_dims, parc)
assert connectivity.vector is None
# cluster map properties
ndim = len(dims)
# prepare cluster minimum size criteria
if criteria:
criteria_ = []
for k, v in criteria.items():
m = re.match('min(\w+)', k)
if m:
dimname = m.group(1)
if not y.has_dim(dimname):
raise TypeError(
"%r is an invalid keyword argument for this testnd "
"function (no dimension named %r)" % (k, dimname))
ax = y.get_axis(dimname) - 1
if dimname == 'time':
v = int(ceil(v / y.time.tstep))
else:
raise TypeError("%r is an invalid keyword argument for this testnd function" % (k,))
if nad_ax:
if ax == 0:
ax = nad_ax
elif ax == nad_ax:
ax = 0
axes = tuple(i for i in range(ndim) if i != ax)
criteria_.append((axes, v))
if kind != 'cluster':
# here so that invalid keywords raise explicitly
err = ("Can not use cluster size criteria when doing "
"threshold free cluster evaluation")
raise ValueError(err)
else:
criteria_ = None
# prepare distribution
samples = int(samples)
if parc:
for parc_ax, parc_dim in enumerate(swapped_dims):
if parc_dim.name == parc:
break
else:
raise ValueError("parc=%r (no dimension named %r)" % (parc, parc))
if parc_dim._connectivity_type == 'none':
parc_indexes = np.arange(len(parc_dim))
elif kind == 'tfce':
raise NotImplementedError(
f"TFCE for parc={parc!r} ({parc_dim.__class__.__name__} dimension)")
elif parc_dim._connectivity_type == 'custom':
if not hasattr(parc_dim, 'parc'):
raise NotImplementedError(f"parc={parc!r}: dimension has no parcellation")
parc_indexes = tuple(np.flatnonzero(parc_dim.parc == cell) for
cell in parc_dim.parc.cells)
parc_dim = Categorial(parc, parc_dim.parc.cells)
else:
raise NotImplementedError(f"parc={parc!r}")
dist_shape = (samples, len(parc_dim))
dist_dims = ('case', parc_dim)
max_axes = tuple(chain(range(parc_ax), range(parc_ax + 1, ndim)))
else:
dist_shape = (samples,)
dist_dims = None
max_axes = None
parc_indexes = None
# arguments for the map processor
shape = tuple(map(len, swapped_dims))
if kind == 'raw':
map_args = (kind, tail, max_axes, parc_indexes)
elif kind == 'tfce':
dh = 0.1 if tfce is True else tfce
map_args = (kind, tail, max_axes, parc_indexes, shape, connectivity, dh)
else:
map_args = (kind, tail, max_axes, parc_indexes, shape, connectivity, threshold, criteria_)
self.kind = kind
self.y_perm = y_perm
self.dims = tuple(dims) # external stat map dims (cropped time)
self.shape = shape # internal stat map shape
self._connectivity = connectivity
self.samples = samples
self.dist_shape = dist_shape
self._dist_dims = dist_dims
self._max_axes = max_axes
self.dist = None
self.threshold = threshold
self.tfce = tfce
self.tail = tail
self._nad_ax = nad_ax
self._vector_ax = vector_ax
self.tstart = tstart
self.tstop = tstop
self.parc = parc
self.meas = meas
self.name = name
self._criteria = criteria_
self.criteria = criteria
self.map_args = map_args
self.has_original = False
self.do_permutation = False
self.dt_perm = None
self._finalized = False
self._init_time = current_time()
self._host = socket.gethostname()
self.force_permutation = force_permutation
from .. import __version__
self._version = __version__
def _crop(self, im):
"Crop an original stat_map"
if self._crop_for_permutation:
return im[self._crop_idx]
else:
return im
def uncrop(
self,
ndvar: NDVar, # NDVar to uncrop
to: NDVar, # NDVar that has the target time dimensions
default: float = 0, # value to fill in uncropped area
):
if self.tstart is None and self.tstop is None:
return ndvar
target_time = to.get_dim('time')
t_ax = ndvar.get_axis('time')
dims = list(ndvar.dims)
dims[t_ax] = target_time
shape = list(ndvar.shape)
shape[t_ax] = len(target_time)
t_slice = target_time._array_index(slice(self.tstart, self.tstop))
x = np.empty(shape, ndvar.x.dtype)
x.fill(default)
x[FULL_AXIS_SLICE * t_ax + (t_slice,)] = ndvar.x
return NDVar(x, dims, ndvar.info, ndvar.name)
def add_original(self, stat_map):
"""Add the original statistical parameter map.
Parameters
----------
stat_map : array
Parameter map of the statistic of interest (uncropped).
"""
if self.has_original:
raise RuntimeError("Original pmap already added")
logger = logging.getLogger(__name__)
logger.debug("Adding original parameter map...")
# crop/reshape stat_map
stat_map = self._crop(stat_map)
if self._nad_ax:
stat_map = stat_map.swapaxes(0, self._nad_ax)
# process map
if self.kind == 'tfce':
dh = 0.1 if self.tfce is True else self.tfce
self.tfce_warning = max(stat_map.max(), -stat_map.min()) < dh
cmap = tfce(stat_map, self.tail, self._connectivity, dh)
cids = None
n_clusters = cmap.max() > 0
elif self.kind == 'cluster':
cmap, cids = label_clusters(stat_map, self.threshold, self.tail,
self._connectivity, self._criteria)
n_clusters = len(cids)
# clean original cluster map
idx = np.in1d(cmap, cids, invert=True).reshape(self.shape)
cmap[idx] = 0
else:
cmap = stat_map
cids = None
n_clusters = True
self._t0 = current_time()
self._original_cluster_map = cmap
self._cids = cids
self.n_clusters = n_clusters
self.has_original = True
self.dt_original = self._t0 - self._init_time
self._original_param_map = stat_map
if self.force_permutation or (self.samples and n_clusters):
self._create_dist()
self.do_permutation = True
else:
self.dist_array = None
self.finalize()
def _create_dist(self):
"Create the distribution container"
if CONFIG['n_workers']:
n = reduce(operator.mul, self.dist_shape)
dist_array = RawArray('d', n)
dist = np.frombuffer(dist_array, np.float64, n)
dist.shape = self.dist_shape
else:
dist_array = None
dist = np.zeros(self.dist_shape)
self.dist_array = dist_array
self.dist = dist
def _aggregate_dist(self, **sub):
"""Aggregate permutation distribution to one value per permutation
Parameters
----------
[dimname] : index
Limit the data for the distribution.
Returns
-------
dist : array, shape = (samples,)
Maximum value for each permutation in the given region.
"""
dist = self.dist
if sub:
if self._dist_dims is None:
raise TypeError("NDPermutationDistribution does not have parcellation")
dist_ = NDVar(dist, self._dist_dims)
dist_sub = dist_.sub(**sub)
dist = dist_sub.x
if dist.ndim > 1:
axes = tuple(range(1, dist.ndim))
dist = dist.max(axes)
return dist
def __repr__(self):
items = []
if self.has_original:
dt = timedelta(seconds=round(self.dt_original))
items.append("%i clusters (%s)" % (self.n_clusters, dt))
if self.samples > 0 and self.n_clusters > 0:
if self.dt_perm is not None:
dt = timedelta(seconds=round(self.dt_perm))
items.append("%i permutations (%s)" % (self.samples, dt))
else:
items.append("no data")
return "<NDPermutationDistribution: %s>" % ', '.join(items)
def __getstate__(self):
if not self._finalized:
raise RuntimeError("Cannot pickle cluster distribution before all "
"permutations have been added.")
state = {
name: getattr(self, name) for name in (
'name', 'meas', '_version', '_host', '_init_time',
# settings ...
'kind', 'threshold', 'tfce', 'tail', 'criteria', 'samples', 'tstart', 'tstop', 'parc',
# data properties ...
'dims', 'shape', '_nad_ax', '_vector_ax', '_criteria', '_connectivity',
# results ...
'dt_original', 'dt_perm', 'n_clusters', '_dist_dims', 'dist', '_original_param_map', '_original_cluster_map', '_cids',
)}
state['version'] = 3
return state
def __setstate__(self, state):
# backwards compatibility
version = state.pop('version', 0)
if version == 0:
if '_connectivity_src' in state:
del state['_connectivity_src']
del state['_connectivity_dst']
if '_connectivity' in state:
del state['_connectivity']
if 'N' in state:
state['samples'] = state.pop('N')
if '_version' not in state:
state['_version'] = '< 0.11'
if '_host' not in state:
state['_host'] = 'unknown'
if '_init_time' not in state:
state['_init_time'] = None
if 'parc' not in state:
if state['_dist_dims'] is None:
state['parc'] = None
else:
raise OldVersionError("This pickled file is from a previous version of Eelbrain and is not compatible anymore. Please recompute this test.")
elif isinstance(state['parc'], tuple):
if len(state['parc']) == 0:
state['parc'] = None
elif len(state['parc']) == 1:
state['parc'] = state['parc'][0]
else:
raise OldVersionError("This pickled file is from a previous version of Eelbrain and is not compatible anymore. Please recompute this test.")
nad_ax = state['_nad_ax']
state['dims'] = dims = state['dims'][1:]
state['_connectivity'] = Connectivity(
(dims[nad_ax],) + dims[:nad_ax] + dims[nad_ax + 1:],
state['parc'])
if version < 2:
state['_vector_ax'] = None
if version < 3:
state['tfce'] = ['kind'] == 'tfce'
for k, v in state.items():
setattr(self, k, v)
self.has_original = True
self.finalize()
def _repr_test_args(self, pmin):
"Argument representation for TestResult repr"
args = ['samples=%r' % self.samples]
if pmin is not None:
args.append(f"pmin={pmin!r}")
elif self.kind == 'tfce':
arg = f"tfce={self.tfce!r}"
if self.tfce_warning:
arg = f"{arg} [WARNING: The TFCE step is larger than the largest value in the data]"
args.append(arg)
if self.tstart is not None:
args.append(f"tstart={self.tstart!r}")
if self.tstop is not None:
args.append(f"tstop={self.tstop!r}")
for k, v in self.criteria.items():
args.append(f"{k}={v!r}")
return args
def _repr_clusters(self):
info = []
if self.kind == 'cluster':
if self.n_clusters == 0:
info.append("no clusters")
else:
info.append("%i clusters" % self.n_clusters)
if self.n_clusters and self.samples:
info.append(f"{fmtxt.peq(self.probability_map.min())}")
return info
def _package_ndvar(self, x, info=None, external_shape=False):
"Generate NDVar from map with internal shape"
if not self.dims:
if isinstance(x, np.ndarray):
return x.item()
return x
if not external_shape and self._nad_ax:
x = x.swapaxes(0, self._nad_ax)
if info is None:
info = {}
return NDVar(x, self.dims, info, self.name)
def finalize(self):
"Package results and delete temporary data"
if self.dt_perm is None:
self.dt_perm = current_time() - self._t0
# original parameter map
param_contours = {}
if self.kind == 'cluster':
if self.tail >= 0:
param_contours[self.threshold] = (0.7, 0.7, 0)
if self.tail <= 0:
param_contours[-self.threshold] = (0.7, 0, 0.7)
info = _info.for_stat_map(self.meas, contours=param_contours)
self.parameter_map = self._package_ndvar(self._original_param_map, info)
# TFCE map
if self.kind == 'tfce':
self.tfce_map = self._package_ndvar(self._original_cluster_map)
else:
self.tfce_map = None
# cluster map
if self.kind == 'cluster':
self.cluster_map = self._package_ndvar(self._original_cluster_map)
else:
self.cluster_map = None
self._finalized = True
def data_for_permutation(self, raw=True):
"""Retrieve data flattened for permutation
Parameters
----------
raw : bool
Return a RawArray and a shape tuple instead of a numpy array.
"""
# get data in the right shape
x = self.y_perm.x
if self._vector_ax:
x = np.moveaxis(x, self._vector_ax + 1, 1)
if self._nad_ax is not None:
dst = 1
src = 1 + self._nad_ax
if self._vector_ax is not None:
dst += 1
if self._vector_ax > self._nad_ax:
src += 1
if dst != src:
x = x.swapaxes(dst, src)
# flat y shape
ndims = 1 + (self._vector_ax is not None)
n_flat = 1 if x.ndim == ndims else reduce(operator.mul, x.shape[ndims:])
y_flat_shape = x.shape[:ndims] + (n_flat,)
if not raw:
return x.reshape(y_flat_shape)
n = reduce(operator.mul, y_flat_shape)
ra = RawArray('d', n)
ra[:] = x.ravel() # OPT: don't copy data
return ra, y_flat_shape, x.shape[ndims:]
def _cluster_properties(self, cluster_map, cids):
"""Create a Dataset with cluster properties
Parameters
----------
cluster_map : NDVar
NDVar in which clusters are marked by bearing the same number.
cids : array_like of int
Numbers specifying the clusters (must occur in cluster_map) which
should be analyzed.
Returns
-------
cluster_properties : Dataset
Cluster properties. Which properties are included depends on the
dimensions.
"""
ndim = cluster_map.ndim
n_clusters = len(cids)
# setup compression
compression = []
for ax, dim in enumerate(cluster_map.dims):
extents = np.empty((n_clusters, len(dim)), dtype=np.bool_)
axes = tuple(i for i in range(ndim) if i != ax)
compression.append((ax, dim, axes, extents))
# find extents for all clusters
c_mask = np.empty(cluster_map.shape, np.bool_)
for i, cid in enumerate(cids):
np.equal(cluster_map, cid, c_mask)
for ax, dim, axes, extents in compression:
np.any(c_mask, axes, extents[i])
# prepare Dataset
ds = Dataset()
ds['id'] = Var(cids)
for ax, dim, axes, extents in compression:
properties = dim._cluster_properties(extents)
if properties is not None:
ds.update(properties)
return ds
def cluster(self, cluster_id):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
if self.kind != 'cluster':
raise RuntimeError(
f'Only cluster-based tests have clusters with stable ids, this '
f'is a {self.kind} distribution. Use the .find_clusters() '
f'method instead with maps=True.')
elif cluster_id not in self._cids:
raise ValueError(f'No cluster with id {cluster_id!r}')
out = self.parameter_map * (self.cluster_map == cluster_id)
properties = self._cluster_properties(self.cluster_map, (cluster_id,))
for k in properties:
out.info[k] = properties[0, k]
return out
def clusters(self, pmin=None, maps=True, **sub):
"""Find significant clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value for clusters (for thresholded cluster tests the
default is 1, for others 0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default True).
[dimname] : index
Limit the data for the distribution.
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
if pmin is None:
if self.samples > 0 and self.kind != 'cluster':
pmin = 0.05
elif self.samples == 0:
msg = ("Can not determine p values in distribution without "
"permutations.")
if self.kind == 'cluster':
msg += " Find clusters with pmin=None."
raise RuntimeError(msg)
if sub:
param_map = self.parameter_map.sub(**sub)
else:
param_map = self.parameter_map
if self.kind == 'cluster':
if sub:
cluster_map = self.cluster_map.sub(**sub)
cids = np.setdiff1d(cluster_map.x, [0])
else:
cluster_map = self.cluster_map
cids = np.array(self._cids)
if len(cids):
# measure original clusters
cluster_v = ndimage.sum(param_map.x, cluster_map.x, cids)
# p-values
if self.samples:
# p-values: "the proportion of random partitions that
# resulted in a larger test statistic than the observed
# one" (179)
dist = self._aggregate_dist(**sub)
n_larger = np.sum(dist > np.abs(cluster_v[:, None]), 1)
cluster_p = n_larger / self.samples
# select clusters
if pmin is not None:
idx = cluster_p <= pmin
cids = cids[idx]
cluster_p = cluster_p[idx]
cluster_v = cluster_v[idx]
# p-value corrected across parc
if sub:
dist = self._aggregate_dist()
n_larger = np.sum(dist > np.abs(cluster_v[:, None]), 1)
cluster_p_corr = n_larger / self.samples
else:
cluster_v = cluster_p = cluster_p_corr = []
ds = self._cluster_properties(cluster_map, cids)
ds['v'] = Var(cluster_v)
if self.samples:
ds['p'] = Var(cluster_p)
if sub:
ds['p_parc'] = Var(cluster_p_corr)
threshold = self.threshold
else:
p_map = self.compute_probability_map(**sub)
bin_map = np.less_equal(p_map.x, pmin)
# threshold for maps
if maps:
values = np.abs(param_map.x)[bin_map]
if len(values):
threshold = values.min() / 2
else:
threshold = 1.
# find clusters (reshape to internal shape for labelling)
if self._nad_ax:
bin_map = bin_map.swapaxes(0, self._nad_ax)
if sub:
raise NotImplementedError("sub")
# need to subset connectivity!
c_map, cids = label_clusters_binary(bin_map, self._connectivity)
if self._nad_ax:
c_map = c_map.swapaxes(0, self._nad_ax)
# Dataset with cluster info
cluster_map = NDVar(c_map, p_map.dims, {}, "clusters")
ds = self._cluster_properties(cluster_map, cids)
ds.info['clusters'] = cluster_map
min_pos = ndimage.minimum_position(p_map.x, c_map, cids)
ds['p'] = Var([p_map.x[pos] for pos in min_pos])
if 'p' in ds:
ds['sig'] = star_factor(ds['p'])
# expand clusters
if maps:
shape = (ds.n_cases,) + param_map.shape
c_maps = np.empty(shape, dtype=param_map.x.dtype)
c_mask = np.empty(param_map.shape, dtype=np.bool_)
for i, cid in enumerate(cids):
np.equal(cluster_map.x, cid, c_mask)
np.multiply(param_map.x, c_mask, c_maps[i])
# package ndvar
dims = ('case',) + param_map.dims
param_contours = {}
if self.tail >= 0:
param_contours[threshold] = (0.7, 0.7, 0)
if self.tail <= 0:
param_contours[-threshold] = (0.7, 0, 0.7)
info = _info.for_stat_map(self.meas, contours=param_contours)
info['summary_func'] = np.sum
ds['cluster'] = NDVar(c_maps, dims, info)
else:
ds.info['clusters'] = self.cluster_map
return ds
def find_peaks(self):
"""Find peaks in a TFCE distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
if self.kind == 'cluster':
raise RuntimeError("Not a threshold-free distribution")
param_map = self._original_param_map
probability_map = self.probability_map.x
if self._nad_ax:
probability_map = probability_map.swapaxes(0, self._nad_ax)
peaks = find_peaks(self._original_cluster_map, self._connectivity)
peak_map, peak_ids = label_clusters_binary(peaks, self._connectivity)
ds = Dataset()
ds['id'] = Var(peak_ids)
v = ds.add_empty_var('v')
if self.samples:
p = ds.add_empty_var('p')
bin_buff = np.empty(peak_map.shape, np.bool8)
for i, id_ in enumerate(peak_ids):
idx = np.equal(peak_map, id_, bin_buff)
v[i] = param_map[idx][0]
if self.samples:
p[i] = probability_map[idx][0]
return ds
def compute_probability_map(self, **sub):
"""Compute a probability map
Parameters
----------
[dimname] : index
Limit the data for the distribution.
Returns
-------
probability : NDVar
Map of p-values.
"""
if not self.samples:
raise RuntimeError("Can't compute probability without permutations")
if self.kind == 'cluster':
cpmap = np.ones(self.shape)
if self.n_clusters:
cids = self._cids
dist = self._aggregate_dist(**sub)
cluster_map = self._original_cluster_map
param_map = self._original_param_map
# measure clusters
cluster_v = ndimage.sum(param_map, cluster_map, cids)
# p-values: "the proportion of random partitions that resulted
# in a larger test statistic than the observed one" (179)
n_larger = np.sum(dist >= np.abs(cluster_v[:, None]), 1)
cluster_p = n_larger / self.samples
c_mask = np.empty(self.shape, dtype=np.bool8)
for i, cid in enumerate(cids):
np.equal(cluster_map, cid, c_mask)
cpmap[c_mask] = cluster_p[i]
# revert to original shape
if self._nad_ax:
cpmap = cpmap.swapaxes(0, self._nad_ax)
dims = self.dims
else:
if self.kind == 'tfce':
stat_map = self.tfce_map
else:
if self.tail == 0:
stat_map = self.parameter_map.abs()
elif self.tail < 0:
stat_map = -self.parameter_map
else:
stat_map = self.parameter_map
if sub:
stat_map = stat_map.sub(**sub)
dims = stat_map.dims if isinstance(stat_map, NDVar) else None
cpmap = np.zeros(stat_map.shape) if dims else 0.
if self.dist is None: # flat stat-map
cpmap += 1
else:
dist = self._aggregate_dist(**sub)
idx = np.empty(stat_map.shape, dtype=np.bool8)
actual = stat_map.x if self.dims else stat_map
for v in dist:
cpmap += np.greater_equal(v, actual, idx)
cpmap /= self.samples
if dims:
return NDVar(cpmap, dims, _info.for_cluster_pmap(), self.name)
else:
return cpmap
def masked_parameter_map(self, pmin=0.05, name=None, **sub):
"""Parameter map masked by significance
Parameters
----------
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map, masked with
p <= pmin.
"""
if not 1 >= pmin > 0:
raise ValueError(f"pmin={pmin}: needs to be between 1 and 0")
if name is None:
name = self.parameter_map.name
if sub:
param_map = self.parameter_map.sub(**sub)
else:
param_map = self.parameter_map
if pmin == 1:
if self.kind != 'cluster':
raise ValueError(f"pmin=1 is only a valid mask for threshold-based cluster tests")
mask = self.cluster_map == 0
else:
probability_map = self.compute_probability_map(**sub)
mask = probability_map > pmin
return param_map.mask(mask, name)
@LazyProperty
def probability_map(self):
if self.samples:
return self.compute_probability_map()
else:
return None
@LazyProperty
def _default_plot_obj(self):
if self.samples:
return [[self.parameter_map, self.probability_map]]
else:
return [[self.parameter_map]]
def info_list(self, title="Computation Info"):
"List with information on computation"
l = fmtxt.List(title)
l.add_item("Eelbrain version: %s" % self._version)
l.add_item("Host Computer: %s" % self._host)
if self._init_time is not None:
l.add_item("Created: %s" % datetime.fromtimestamp(self._init_time)
.strftime('%y-%m-%d %H:%M'))
l.add_item("Original time: %s" % timedelta(seconds=round(self.dt_original)))
l.add_item("Permutation time: %s" % timedelta(seconds=round(self.dt_perm)))
return l
class _MergedTemporalClusterDist:
"""Merge permutation distributions from multiple tests"""
def __init__(self, cdists):
if isinstance(cdists[0], list):
self.effects = [d.name for d in cdists[0]]
self.samples = cdists[0][0].samples
dist = {}
for i, effect in enumerate(self.effects):
if any(d[i].n_clusters for d in cdists):
dist[effect] = np.column_stack([d[i].dist for d in cdists if d[i].dist is not None])
if len(dist):
dist = {c: d.max(1) for c, d in dist.items()}
else:
self.samples = cdists[0].samples
if any(d.n_clusters for d in cdists):
dist = np.column_stack([d.dist for d in cdists if d.dist is not None])
dist = dist.max(1)
else:
dist = None
self.dist = dist
def correct_cluster_p(self, res):
clusters = res.find_clusters()
keys = list(clusters.keys())
if not clusters.n_cases:
return clusters
if isinstance(res, MultiEffectNDTest):
keys.insert(-1, 'p_parc')
cluster_p_corr = []
for cl in clusters.itercases():
n_larger = np.sum(self.dist[cl['effect']] > np.abs(cl['v']))
cluster_p_corr.append(float(n_larger) / self.samples)
else:
keys.append('p_parc')
vs = np.array(clusters['v'])
n_larger = np.sum(self.dist > np.abs(vs[:, None]), 1)
cluster_p_corr = n_larger / self.samples
clusters['p_parc'] = Var(cluster_p_corr)
clusters = clusters[keys]
return clusters
def distribution_worker(dist_array, dist_shape, in_queue, kill_beacon):
"Worker that accumulates values and places them into the distribution"
n = reduce(operator.mul, dist_shape)
dist = np.frombuffer(dist_array, np.float64, n)
dist.shape = dist_shape
samples = dist_shape[0]
for i in trange(samples, desc="Permutation test", unit=' permutations',
disable=CONFIG['tqdm']):
dist[i] = in_queue.get()
if kill_beacon.is_set():
return
def permutation_worker(in_queue, out_queue, y, y_flat_shape, stat_map_shape,
test_func, args, map_args, kill_beacon):
"Worker for 1 sample t-test"
if CONFIG['nice']:
os.nice(CONFIG['nice'])
n = reduce(operator.mul, y_flat_shape)
y = np.frombuffer(y, np.float64, n).reshape(y_flat_shape)
stat_map = np.empty(stat_map_shape)
stat_map_flat = stat_map.ravel()
map_processor = get_map_processor(*map_args)
while not kill_beacon.is_set():
perm = in_queue.get()
if perm is None:
break
test_func(y, *args, stat_map_flat, perm)
max_v = map_processor.max_stat(stat_map)
out_queue.put(max_v)
def run_permutation(test_func, dist, iterator, *args):
if CONFIG['n_workers']:
workers, out_queue, kill_beacon = setup_workers(test_func, dist, args)
try:
for perm in iterator:
out_queue.put(perm)
for _ in range(len(workers) - 1):
out_queue.put(None)
logger = logging.getLogger(__name__)
for w in workers:
w.join()
logger.debug("worker joined")
except KeyboardInterrupt:
kill_beacon.set()
raise
else:
y = dist.data_for_permutation(False)
map_processor = get_map_processor(*dist.map_args)
stat_map = np.empty(dist.shape)
stat_map_flat = stat_map.ravel()
for i, perm in enumerate(iterator):
test_func(y, *args, stat_map_flat, perm)
dist.dist[i] = map_processor.max_stat(stat_map)
dist.finalize()
def setup_workers(test_func, dist, func_args):
"Initialize workers for permutation tests"
logger = logging.getLogger(__name__)
logger.debug("Setting up %i worker processes..." % CONFIG['n_workers'])
permutation_queue = SimpleQueue()
dist_queue = SimpleQueue()
kill_beacon = Event()
# permutation workers
y, y_flat_shape, stat_map_shape = dist.data_for_permutation()
args = (permutation_queue, dist_queue, y, y_flat_shape, stat_map_shape,
test_func, func_args, dist.map_args, kill_beacon)
workers = []
for _ in range(CONFIG['n_workers']):
w = Process(target=permutation_worker, args=args)
w.start()
workers.append(w)
# distribution worker
args = (dist.dist_array, dist.dist_shape, dist_queue, kill_beacon)
w = Process(target=distribution_worker, args=args)
w.start()
workers.append(w)
return workers, permutation_queue, kill_beacon
def run_permutation_me(test, dists, iterator):
dist = dists[0]
if dist.kind == 'cluster':
thresholds = tuple(d.threshold for d in dists)
else:
thresholds = None
if CONFIG['n_workers']:
workers, out_queue, kill_beacon = setup_workers_me(test, dists, thresholds)
try:
for perm in iterator:
out_queue.put(perm)
for _ in range(len(workers) - 1):
out_queue.put(None)
logger = logging.getLogger(__name__)
for w in workers:
w.join()
logger.debug("worker joined")
except KeyboardInterrupt:
kill_beacon.set()
raise
else:
y = dist.data_for_permutation(False)
map_processor = get_map_processor(*dist.map_args)
stat_maps = test.preallocate(dist.shape)
if thresholds:
stat_maps_iter = tuple(zip(stat_maps, thresholds, dists))
else:
stat_maps_iter = tuple(zip(stat_maps, dists))
for i, perm in enumerate(iterator):
test.map(y, perm)
if thresholds:
for m, t, d in stat_maps_iter:
if d.do_permutation:
d.dist[i] = map_processor.max_stat(m, t)
else:
for m, d in stat_maps_iter:
if d.do_permutation:
d.dist[i] = map_processor.max_stat(m)
for d in dists:
if d.do_permutation:
d.finalize()
def setup_workers_me(test_func, dists, thresholds):
"Initialize workers for permutation tests"
logger = logging.getLogger(__name__)
logger.debug("Setting up %i worker processes..." % CONFIG['n_workers'])
permutation_queue = SimpleQueue()
dist_queue = SimpleQueue()
kill_beacon = Event()
# permutation workers
dist = dists[0]
y, y_flat_shape, stat_map_shape = dist.data_for_permutation()
args = (permutation_queue, dist_queue, y, y_flat_shape, stat_map_shape,
test_func, dist.map_args, thresholds, kill_beacon)
workers = []
for _ in range(CONFIG['n_workers']):
w = Process(target=permutation_worker_me, args=args)
w.start()
workers.append(w)
# distribution worker
args = ([d.dist_array for d in dists], dist.dist_shape, dist_queue, kill_beacon)
w = Process(target=distribution_worker_me, args=args)
w.start()
workers.append(w)
return workers, permutation_queue, kill_beacon
def permutation_worker_me(in_queue, out_queue, y, y_flat_shape, stat_map_shape,
test, map_args, thresholds, kill_beacon):
if CONFIG['nice']:
os.nice(CONFIG['nice'])
n = reduce(operator.mul, y_flat_shape)
y = np.frombuffer(y, np.float64, n).reshape(y_flat_shape)
iterator = test.preallocate(stat_map_shape)
if thresholds:
iterator = tuple(zip(iterator, thresholds))
else:
iterator = tuple(iterator)
map_processor = get_map_processor(*map_args)
while not kill_beacon.is_set():
perm = in_queue.get()
if perm is None:
break
test.map(y, perm)
if thresholds:
max_v = [map_processor.max_stat(m, t) for m, t in iterator]
else:
max_v = [map_processor.max_stat(m) for m in iterator]
out_queue.put(max_v)
def distribution_worker_me(dist_arrays, dist_shape, in_queue, kill_beacon):
"Worker that accumulates values and places them into the distribution"
n = reduce(operator.mul, dist_shape)
dists = [d if d is None else np.frombuffer(d, np.float64, n).reshape(dist_shape)
for d in dist_arrays]
samples = dist_shape[0]
for i in trange(samples, desc="Permutation test", unit=' permutations',
disable=CONFIG['tqdm']):
for dist, v in zip(dists, in_queue.get()):
if dist is not None:
dist[i] = v
if kill_beacon.is_set():
return
# Backwards compatibility for pickling
_ClusterDist = NDPermutationDistribution
|
nilq/baby-python
|
python
|
class SelectionSort:
@staticmethod
def sort(a):
for i, v in enumerate(a):
minimum = i
j = i+1
while j < len(a):
if a[j] < a[minimum]:
minimum = j
j += 1
tmp = a[i]
a[i] = a[minimum]
a[minimum] = tmp
original = [
325432, 989, 547510, 3, -93, 189019, 5042, 123,
597, 42, 7506, 184, 184, 2409, 45, 824,
4, -2650, 9, 662, 3928, -170, 45358, 395,
842, 7697, 110, 14, 99, 221
]
selection = SelectionSort()
selection.sort(original)
sorted_ = [
-2650, -170, -93, 3, 4, 9, 14, 42, 45, 99, 110,
123, 184, 184, 221, 395, 597, 662, 824, 842, 989,
2409, 3928, 5042, 7506, 7697, 45358, 189019, 325432, 547510
]
for i, v in enumerate(original):
assert original[i] == sorted_[i]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/18 15:40
|
nilq/baby-python
|
python
|
# 정수를 저장하는 큐를 구현한 다음, 입력으로 주어지는 명령을 처리하는 프로그램을 작성하시오.
# 명령은 총 여섯 가지이다.
# push X: 정수 X를 큐에 넣는 연산이다.
# pop: 큐에서 가장 앞에 있는 정수를 빼고, 그 수를 출력한다. 만약 큐에 들어있는 정수가 없는 경우에는 -1을 출력한다.
# size: 큐에 들어있는 정수의 개수를 출력한다.
# empty: 큐가 비어있으면 1, 아니면 0을 출력한다.
# front: 큐의 가장 앞에 있는 정수를 출력한다. 만약 큐에 들어있는 정수가 없는 경우에는 -1을 출력한다.
# back: 큐의 가장 뒤에 있는 정수를 출력한다. 만약 큐에 들어있는 정수가 없는 경우에는 -1을 출력한다.
import sys
from collections import deque
t=int(input())
q=deque()
for _ in range(t):
ql=len(q)
s = sys.stdin.readline().rstrip().split()
if(len(s)==2):
q.append(s[1])
elif s[0]=='front':
if(ql!=0):
print(q[0])
else:
print(-1)
elif s[0]=='back':
if(ql!=0):
print(q[ql-1])
else:
print(-1)
elif s[0]=='size':
print(ql)
elif s[0]=='empty':
if(ql!=0):
print(0)
else:
print(1)
else:
if(ql!=0):
print(q.popleft())
else:
print(-1)
|
nilq/baby-python
|
python
|
from clickhouse_orm import migrations
from ..test_migrations import *
operations = [migrations.AlterIndexes(ModelWithIndex2, reindex=True)]
|
nilq/baby-python
|
python
|
from functools import partial
import pytest
from stp_core.loop.eventually import eventuallyAll
from plenum.test import waits
from plenum.test.helper import checkReqNack
whitelist = ['discarding message']
class TestVerifier:
@staticmethod
def verify(operation):
assert operation['amount'] <= 100, 'amount too high'
@pytest.fixture(scope="module")
def restrictiveVerifier(nodeSet):
for n in nodeSet:
n.opVerifiers = [TestVerifier()]
@pytest.fixture(scope="module")
def request1(wallet1):
op = {"type": "buy",
"amount": 999}
req = wallet1.signOp(op)
return req
@pytest.mark.skip(reason="old style plugin")
def testRequestFullRoundTrip(restrictiveVerifier,
client1,
sent1,
looper,
nodeSet):
update = {'reason': 'client request invalid: InvalidClientRequest() '
'[caused by amount too high\nassert 999 <= 100]'}
coros2 = [partial(checkReqNack, client1, node, sent1.identifier,
sent1.reqId, update)
for node in nodeSet]
timeout = waits.expectedReqAckQuorumTime()
looper.run(eventuallyAll(*coros2, totalTimeout=timeout))
|
nilq/baby-python
|
python
|
import torch
from torch import autograd
def steptaker(data, critic, step, num_step = 1):
"""Applies gradient descent (GD) to data using critic
Inputs
- data; data to apply GD to
- critic; critic to compute gradients of
- step; how large of a step to take
- num_step; how finely to discretize flow. taken as 1 in TTC
Outputs
- data with gradient descent applied
"""
for j in range(num_step):
gradients = grad_calc(data, critic)
data = (data - (step/num_step)*gradients).detach()
return data.detach()
def rk4(data, critic, step, num_step = 1):
"""Assumes data is a batch of images, critic is a Kantorovich potential,
and step is desired step size. Applies fourth order Runge-Kutta to the data num_step times
with stepsize step/num_step. Unused in TTC"""
h = step/num_step
for j in range(num_step):
data_0 = data.detach().clone()
k = grad_calc(data_0, critic)
data += (h/6)*k
k = grad_calc(data_0 + (h/2)*k, critic)
data += (h/3)*k
k = grad_calc(data_0 + (h/2)*k, critic)
data += (h/3)*k
k = grad_calc(data_0 + k, critic)
data += (h/6)*k
data = data.detach()
return data
def grad_calc(data, critic):
"""Returns the gradients of critic at data"""
data = data.detach().clone()
data.requires_grad = True
Dfake = critic(data)
gradients = autograd.grad(outputs = Dfake, inputs = data,
grad_outputs = torch.ones(Dfake.size()).cuda(), only_inputs=True)[0]
return gradients.detach()
|
nilq/baby-python
|
python
|
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
import pickle
import pprint
import datefinder
# What the program can access within Calendar
# See more at https://developers.google.com/calendar/auth
scopes = ["https://www.googleapis.com/auth/calendar"]
flow = InstalledAppFlow.from_client_secrets_file("client_secret.json", scopes=scopes)
# Use this to pull the users credentials into a pickle file
#credentials = flow.run_console()
#pickle.dump(credentials, open("token.pkl", "wb"))
# Read the credentials from a saved pickle file
credentials = pickle.load(open("token.pkl", "rb"))
# Build the calendar resource
service = build("calendar", "v3", credentials=credentials)
# Store a list of Calendars on the account
result = service.calendarList().list().execute()
calendar_id = result["items"][0]["id"]
result = service.events().list(calendarId=calendar_id).execute()
def create_event(my_event):
"""
Create a Google Calendar Event
Args:
my_event: CalendarEvent object
"""
print("Created Event for " + str(my_event.date))
event = {
"summary": my_event.summary,
"location": my_event.location,
"description": my_event.description,
"start": {
"dateTime": my_event.start_date_time.strftime('%Y-%m-%dT%H:%M:%S'),
"timeZone": "Europe/London",
},
"end": {
"dateTime": my_event.end_date_time.strftime('%Y-%m-%dT%H:%M:%S'),
"timeZone": "Europe/London",
},
"reminders": {
"useDefault": False,
},
}
return service.events().insert(calendarId=calendar_id, body=event, sendNotifications=True).execute()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""binomial_mix.
Chen Qiao: cqiao@connect.hku.hk
"""
import sys
import warnings
import numpy as np
from scipy.special import gammaln, logsumexp
from .model_base import ModelBase
class MixtureBinomial(ModelBase):
"""Mixture of Binomial Models
This class implements EM algorithm for parameter estimation of Mixture
of Binomial models.
Attributes:
n_components (int): number of mixtures.
tor (float): tolarance difference for earlier stop training.
params (numpy float array): parameters of the model, [p_1, p_2, ...,
p_K, pi_1, pi_2, ..., pi_K],
None before parameter estimation.
losses (list): list of negative loglikelihood losses of the training
process, None before parameter estimation.
model_scores (dict): scores for the model, including "BIC" and "ICL" scores
Notes
-----
Because M-step has analytical solution, parameter estimation is fast.
Usage:
em_mb = MixtureBinomial(
n_components=2,
tor=1e-6)
params = em_mb.EM((ys, ns), max_iters=250, early_stop=True)
Simulation experiment:
import numpy as np
from scipy.stats import bernoulli, binom
from bbmix.models import MixtureBinomial
n_samples = 2000
n_trials = 1000
pis = [0.6, 0.4]
p1, p2 = 0.4, 0.8
gammars = bernoulli.rvs(pis[0], size=n_samples)
n_pos_events = sum(gammars)
n_neg_events = n_samples - n_pos_events
ys_of_type1 = binom.rvs(n_trials, p1, size=n_pos_events)
ys_of_type2 = binom.rvs(n_trials, p2, size=n_neg_events)
ys = np.concatenate((ys_of_type1, ys_of_type2))
ns = np.ones(n_samples, dtype=np.int) * n_trials
em_mb = MixtureBinomial(
n_components=2,
tor=1e-20)
params = em_mb.fit((ys, ns), max_iters=250, early_stop=True)
print(params)
print(p1, p2, pis)
print(em_mb.model_scores)
"""
def __init__(self,
n_components=2,
tor=1e-6
):
"""Initialization method
Args:
n_components (int): number of mixtures. Defaults to 2.
tor (float): tolerance shreshold for early-stop training.
Defaults to 1e-6.
"""
super(MixtureBinomial, self).__init__()
self.n_components = n_components
self.tor = tor
def E_step(self, y, n, params):
"""Expectation step
Args:
y (np.array): number of positive events
n (np.array): number of total trials
params (np.array): model parameters
Returns:
np.array: expectation of the latent variables
"""
E_gammas = [None] * self.n_components
for k in range(self.n_components):
p_k, pi_k = params[k], params[k + self.n_components]
E_gammas[k] = y * np.log(p_k) + (n - y) * \
np.log(1 - p_k) + np.log(pi_k)
# normalize as they havn't been
E_gammas = E_gammas - logsumexp(E_gammas, axis=0)
return np.exp(E_gammas)
def M_step(self, y, n, E_gammas, params):
"""Maximization step
Args:
y (np.array): number of positive events
n (np.array): number of total trials
E_gammas (np.array): results of E step
params (np.array): model parameters
Returns:
np.array: updated model parameters
"""
N_samples = len(n)
for k in range(self.n_components):
E_gammas[k][E_gammas[k] == 0] = 1e-20
params[k] = np.sum(y * E_gammas[k]) / np.sum(n * E_gammas[k])
params[k + self.n_components] = np.sum(E_gammas[k]) / N_samples
return params
def log_likelihood_binomial(self, y, n, p, pi=1.0):
"""log likelihood of data under binomial distribution
Args:
y (np.array): number of positive events
n (np.array): number of total trials
p (float): probability of positive event
pi (float): weight of mixture component
Returns:
np.array: log likelihood of data
"""
return gammaln(n + 1) - (gammaln(y + 1) + gammaln(n - y + 1)) \
+ y * np.log(p) + (n - y) * np.log(1 - p) + np.log(pi)
def log_likelihood_mixture_bin(self, y, n, params):
"""log likelihood of dataset under mixture of binomial distribution
Args:
y (np.array): number of positive events
n (np.array): number of total trials
params (np.array): parameters of the model
Returns:
float: log likelihood of the dataset
"""
logLik_mat = np.zeros((len(n), self.n_components), dtype=np.float)
for k in range(self.n_components):
p_k, pi_k = params[k], params[k + self.n_components]
logLik_mat[:, k] = self.log_likelihood_binomial(y, n, p_k, pi_k)
return logsumexp(logLik_mat, axis=1).sum()
def EM(self, y, n, params, max_iters=250, early_stop=False, n_tolerance=10,
verbose=False):
"""EM algorithim
Args:
y (np.array): number of positive events
n (np.array): total number of trials respectively
params (list): init model params
max_iters (int, optional): maximum number of iterations for EM. Defaults to 250.
early_stop (bool, optional): whether early stop training. Defaults to False.
n_tolerance (int): the max number of violations to trigger early stop.
pseudocount (float) : add pseudocount if data is zero
verbose (bool, optional): whether print training information. Defaults to False.
Returns:
np.array: trained parameters
"""
n_tol = n_tolerance
losses = [sys.maxsize]
for ith in range(max_iters):
# E step
E_gammas = self.E_step(y, n, params)
# M step
params = self.M_step(y, n, E_gammas, params)
# record current NLL loss
losses.append(-self.log_likelihood_mixture_bin(y, n, params))
if verbose:
print("=" * 10, "Iteration {}".format(ith + 1), "=" * 10)
print("Current params: {}".format(params))
print("Negative LogLikelihood Loss: {}".format(losses[-1]))
print("=" * 25)
improvement = losses[-2] - losses[-1]
if early_stop:
if improvement < self.tor:
n_tol -= 1
else:
n_tol = n_tolerance
if n_tol == 0:
if verbose:
print("Improvement halts, early stop training.")
break
self.score_model(len(params), len(y), losses[-1], E_gammas)
self.params = params
self.losses = losses[1:]
return params
def _param_init(self, y, n):
"""Initialziation of model parameters
Args:
y (np.array): number of positive events
n (np.array): number of total trials
Returns:
np.array: initialized model parameters
"""
return np.concatenate([np.random.uniform(0.49, 0.51, self.n_components),
np.random.uniform(0.4, 0.6, self.n_components)])
def fit(self, data, max_iters=250, early_stop=False, pseudocount=0.1,
n_tolerance=10, verbose=False):
"""Fit function
Args:
data (tuple of arrays): y, n: number of positive events and total number of trials respectively
max_iters (int, optional): maximum number of iterations for EM. Defaults to 250.
early_stop (bool, optional): whether early stop training. Defaults to False.
pseudocount (float) : add pseudocount if data is zero
n_tolerance (int): the max number of violations to trigger early stop.
verbose (bool, optional): whether print training information. Defaults to False.
Returns:
np.array: trained parameters
"""
y, n = data
self.nzero_prop = np.sum(y > 0)/np.shape(y)[0]
y, n = self._preprocess(data, pseudocount)
init_params = self._param_init(y, n)
if verbose:
print("=" * 25)
print("Init params: {}".format(init_params))
print("=" * 25)
params = self.EM(y, n, init_params, max_iters=max_iters,
early_stop=early_stop, verbose=verbose,
n_tolerance=n_tolerance)
if self.n_components == 2 and np.abs(params[0] - params[1]) < 1e-4 and verbose:
print("Colapsed to one component, please check proportion of non-zero counts.")
return params
def sample(self, n_trials):
"""Generate data from fitted parameters
n_trails :
Args:
n_trails (array_like): total number of trials
Returns:
np.array: ys generated from the fitted distribution
"""
if hasattr(self, 'params') == False:
raise Exception("Error: please fit the model or set params before sample()")
mus = self.params[:self.n_components]
pis = self.params[self.n_components: 2 * self.n_components]
labels = np.random.choice(self.n_components, size=n_trials.shape, p=pis)
ys_out = np.zeros(n_trials.shape, dtype=int)
for i in range(self.n_components):
_idx = np.where(labels == i)
ys_out[_idx] = binom.rvs(n_trials[_idx].astype(np.int32), mus[i])
return ys_out
if __name__ == "__main__":
import numpy as np
from scipy.stats import bernoulli, binom
from bbmix.models import MixtureBinomial
n_samples = 2000
n_trials = 1000
pis = [0.6, 0.4]
p1, p2 = 0.4, 0.8
gammars = bernoulli.rvs(pis[0], size=n_samples)
n_pos_events = sum(gammars)
n_neg_events = n_samples - n_pos_events
ys_of_type1 = binom.rvs(n_trials, p1, size=n_pos_events)
ys_of_type2 = binom.rvs(n_trials, p2, size=n_neg_events)
ys = np.concatenate((ys_of_type1, ys_of_type2))
ns = np.ones(n_samples, dtype=np.int) * n_trials
em_mb = MixtureBinomial(
n_components=2,
tor=1e-20)
params = em_mb.fit((ys, ns), max_iters=250, early_stop=True)
print(params)
print(p1, p2, pis)
print(em_mb.model_scores)
|
nilq/baby-python
|
python
|
from keras.layers import Conv2D, SeparableConv2D, MaxPooling2D, Flatten, Dense
from keras.layers import Dropout, Input, BatchNormalization, Activation, add, GlobalAveragePooling2D
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from keras.utils import plot_model
from keras import callbacks
from keras import models
from keras.applications import Xception
from utils_datagen import TrainValTensorBoard
from utils_basic import chk_n_mkdir
from models.base_model import BaseModel
class XCEPTION_APP(BaseModel):
def __init__(self, output_directory, input_shape, n_classes, verbose=False):
self.output_directory = output_directory + '/xception_kapp'
chk_n_mkdir(self.output_directory)
self.model = self.build_model(input_shape, n_classes)
if verbose:
self.model.summary()
self.verbose = verbose
self.model.save_weights(self.output_directory + '/model_init.hdf5')
def build_model(self, input_shape, n_classes):
# Load the VGG model
xception_conv = Xception(weights='imagenet', include_top=False, input_shape=input_shape)
# Freeze the layers except the last 4 layers
for layer in xception_conv.layers:
layer.trainable = False
# Create the model
model = models.Sequential()
# Add the vgg convolutional base model
model.add(xception_conv)
# Add new layers
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax', name='predictions'))
# define the model with input layer and output layer
model.summary()
plot_model(model, to_file=self.output_directory + '/model_graph.png', show_shapes=True, show_layer_names=True)
model.compile(loss=categorical_crossentropy, optimizer=Adam(lr=0.01), metrics=['acc'])
# model save
file_path = self.output_directory + '/best_model.hdf5'
model_checkpoint = callbacks.ModelCheckpoint(filepath=file_path, monitor='loss', save_best_only=True)
# Tensorboard log
log_dir = self.output_directory + '/tf_logs'
chk_n_mkdir(log_dir)
tb_cb = TrainValTensorBoard(log_dir=log_dir)
self.callbacks = [model_checkpoint, tb_cb]
return model
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from ..models import PermissionModel as Model
from ..models import GroupPermissionModel
from .base_dao import BaseDao
class PermissionDao(BaseDao):
def add_permission(self, permission):
# 如果存在相同app codename,则修改permission,否则新增permission
new = Model.from_dict(permission)
exiting = self.get_permission_by_app_and_codename(
new.app, new.codename)
if exiting:
new.id = exiting.id
self.session.merge(new)
self.session.commit()
else:
self.session.add(new)
self.session.commit()
def get_permission_list(self):
query = self.session.query(Model)
return [_.to_dict() for _ in query.all()]
def delete_permission_list(self):
query = self.session.query(Model)
query.delete()
self.session.commit()
def get_permission_by_app_and_codename(self, app, codename):
query = self.session.query(Model)
permission = query.filter(
Model.app == app, Model.codename == codename).first()
return permission
def delete_permission_by_app_and_codename(self, app, codename):
permission = self.get_permission_by_app_and_codename(app, codename)
if permission:
self.session.delete(permission)
self.session.commit()
def count(self):
query = self.session.query(Model)
return query.count()
class GroupPermissionDao(BaseDao):
def add_group_permission(self, group_id, codename, app='nebula', extra_settings=''):
# 根据app codename查询permission,再增加用户组权限
permission = PermissionDao().get_permission_by_app_and_codename(app, codename)
if permission:
group_permission = GroupPermissionModel.from_dict(dict(
group_id=group_id,
permission_id=permission.id,
extra_settings=extra_settings
))
self.session.add(group_permission)
self.session.commit()
def update_group_permission(self, group_id, codename, app='nebula', extra_settings=''):
# 根据app codename查询permission,再修改用户组权限
permission = PermissionDao().get_permission_by_app_and_codename(app, codename)
if permission:
group_permission = GroupPermissionModel.from_dict(dict(
group_id=group_id,
permission_id=permission.id,
extra_settings=extra_settings
))
query = self.session.query(GroupPermissionModel)
existing = query.filter(GroupPermissionModel.group_id == group_id,
GroupPermissionModel.permission_id == permission.id).first()
if existing:
group_permission.id = existing.id
self.session.merge(group_permission)
self.session.commit()
else:
self.session.add(group_permission)
self.session.commit()
def get_group_permission(self, group_id, codename, app='nebula'):
# 根据app codename查询permission,再查询group_permission
permission = PermissionDao().get_permission_by_app_and_codename(app, codename)
if permission:
query = self.session.query(GroupPermissionModel)
group_permission = query.filter(
GroupPermissionModel.group_id == group_id, GroupPermissionModel.permission_id == permission.id).first()
return group_permission
def add_group_strategy_block(self, be_blocked_id, blocked_id):
# 保存策略查看黑名单,be_blocked_id为被禁止查看的用户组id,block_id为禁止其他用户组查看本组策略的用户组id
group_permission = self.get_group_permission(
be_blocked_id, 'view_strategy')
if group_permission:
extra_settings = json.loads(group_permission.extra_settings)
be_blocked_settings = extra_settings.get('be_blocked', [])
if blocked_id not in be_blocked_settings:
be_blocked_settings.append(blocked_id)
extra_settings['be_blocked'] = be_blocked_settings
self.update_group_permission(
be_blocked_id, 'view_strategy', extra_settings=json.dumps(extra_settings))
else:
extra_settings = {'be_blocked': [blocked_id]}
self.add_group_permission(
be_blocked_id, 'view_strategy', extra_settings=json.dumps(extra_settings))
def delete_group_strategy_block(self, be_blocked_id, blocked_id):
# 删除策略查看黑名单,be_blocked_id为被禁止查看的用户组id,block_id为禁止其他用户组查看本组策略的用户组id
group_permission = self.get_group_permission(
be_blocked_id, 'view_strategy')
if group_permission:
extra_settings = json.loads(group_permission.extra_settings)
be_blocked_settings = extra_settings.get('be_blocked', [])
if blocked_id in be_blocked_settings:
be_blocked_settings.remove(blocked_id)
extra_settings['be_blocked'] = be_blocked_settings
self.update_group_permission(
be_blocked_id, 'view_strategy', extra_settings=json.dumps(extra_settings))
def get_group_strategy_block(self, group_id):
# 本组策略查看黑名单
view_strategy = self.get_group_extra_settings(
group_id, 'view_strategy', app='nebula')
return json.loads(view_strategy) if view_strategy else {}
def get_group_extra_settings(self, group_id, codename, app='nebula'):
permission = PermissionDao().get_permission_by_app_and_codename(app, codename)
if permission:
query = self.session.query(GroupPermissionModel)
group_permission = query.filter(
GroupPermissionModel.group_id == group_id, GroupPermissionModel.permission_id == permission.id).first()
if group_permission:
return group_permission.extra_settings
|
nilq/baby-python
|
python
|
from itertools import permutations
with open("input.txt") as f:
data = [int(i) for i in f.read().split("\n")]
preamble = 25
for d in range(preamble + 1, len(data)):
numbers = data[d - (preamble + 1):d]
target = data[d]
sol = [nums for nums in permutations(numbers, 2) if sum(nums) == target]
if not sol:
print(f"Target is: {target}")
|
nilq/baby-python
|
python
|
import collections
import pathlib
import time
from multiprocessing import Process
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from omegaconf import OmegaConf
from gdsfactory import components
from gdsfactory.config import CONFIG, logger
from gdsfactory.doe import get_settings_list
from gdsfactory.placer import (
build_components,
doe_exists,
load_doe_component_names,
save_doe,
)
from gdsfactory.types import PathType
from gdsfactory.write_doe import write_doe_metadata
factory = {
i: getattr(components, i)
for i in dir(components)
if not i.startswith("_") and callable(getattr(components, i))
}
def separate_does_from_templates(dicts: Dict[str, Any]) -> Any:
type_to_dict = {}
does = {}
for name, d in dicts.items():
if "type" in d.keys():
template_type = d.pop("type")
if template_type not in type_to_dict:
type_to_dict[template_type] = {}
type_to_dict[template_type][name] = d
else:
does[name] = d
return does, type_to_dict
def update_dicts_recurse(
target_dict: Dict[
str, Union[List[int], str, Dict[str, List[int]], Dict[str, str], bool]
],
default_dict: Dict[str, Union[bool, Dict[str, Union[int, str]], int, str]],
) -> Dict[str, Any]:
target_dict = target_dict.copy()
default_dict = default_dict.copy()
for k, v in default_dict.items():
if k not in target_dict:
target_dict[k] = v
else:
if isinstance(target_dict[k], (dict, collections.OrderedDict)):
target_dict[k] = update_dicts_recurse(target_dict[k], default_dict[k])
return target_dict
def save_doe_use_template(doe, doe_root_path=None) -> None:
"""Write a "content.txt" pointing to the DOE used as a template"""
doe_name = doe["name"]
doe_template = doe["doe_template"]
doe_root_path = doe_root_path or CONFIG["cache_doe_directory"]
doe_dir = doe_root_path / doe_name
doe_dir.mkdir(exist_ok=True)
content_file = doe_dir / "content.txt"
with open(content_file, "w") as fw:
fw.write(f"TEMPLATE: {doe_template}")
def write_doe(
doe,
component_factory=factory,
doe_root_path: Optional[PathType] = None,
doe_metadata_path: Optional[PathType] = None,
overwrite: bool = False,
precision: float = 1e-9,
**kwargs,
) -> None:
doe_name = doe["name"]
list_settings = doe["list_settings"]
# Otherwise generate each component using the component library
component_type = doe["component"]
components = build_components(
component_type, list_settings, component_factory=component_factory
)
component_names = [c.name for c in components]
save_doe(doe_name, components, doe_root_path=doe_root_path, precision=precision)
write_doe_metadata(
doe_name=doe["name"],
cell_names=component_names,
list_settings=doe["list_settings"],
doe_settings=kwargs,
doe_metadata_path=doe_metadata_path,
)
def load_does(
filepath: PathType, defaults: Optional[Dict[str, bool]] = None
) -> Tuple[Any, Any]:
"""Load_does from file."""
does = {}
defaults = defaults or {"do_permutation": True, "settings": {}}
data = OmegaConf.load(filepath)
data = OmegaConf.to_container(data)
mask = data.pop("mask")
for doe_name, doe in data.items():
for k in defaults:
if k not in doe:
doe[k] = defaults[k]
does[doe_name] = doe
return does, mask
def generate_does(
filepath: PathType,
component_factory: Dict[str, Callable] = factory,
doe_root_path: PathType = CONFIG["cache_doe_directory"],
doe_metadata_path: PathType = CONFIG["doe_directory"],
n_cores: int = 8,
overwrite: bool = False,
precision: float = 1e-9,
cache: bool = False,
) -> None:
"""Generates a DOEs of components specified in a yaml file
allows for each DOE to have its own x and y spacing (more flexible than method1)
similar to write_doe
"""
doe_root_path = pathlib.Path(doe_root_path)
doe_metadata_path = pathlib.Path(doe_metadata_path)
doe_root_path.mkdir(parents=True, exist_ok=True)
doe_metadata_path.mkdir(parents=True, exist_ok=True)
dicts, mask_settings = load_does(filepath)
does, templates_by_type = separate_does_from_templates(dicts)
dict_templates = (
templates_by_type["template"] if "template" in templates_by_type else {}
)
default_use_cached_does = (
mask_settings["cache"] if "cache" in mask_settings else cache
)
list_args = []
for doe_name, doe in does.items():
doe["name"] = doe_name
component = doe["component"]
if component not in component_factory:
raise ValueError(f"{component} not in {component_factory.keys()}")
if "template" in doe:
# The keyword template is used to enrich the dictionary from the template
templates = doe["template"]
if not isinstance(templates, list):
templates = [templates]
for template in templates:
try:
doe = update_dicts_recurse(doe, dict_templates[template])
except Exception:
print(template, "does not exist")
raise
do_permutation = doe.pop("do_permutation")
settings = doe["settings"]
doe["list_settings"] = get_settings_list(do_permutation, **settings)
list_args += [doe]
does_running = []
start_times = {}
finish_times = {}
doe_name_to_process = {}
while list_args:
while len(does_running) < n_cores:
if not list_args:
break
doe = list_args.pop()
doe_name = doe["name"]
# Only launch a build process if we do not use the cache
# Or if the DOE is not built
list_settings = doe["list_settings"]
use_cached_does = (
default_use_cached_does if "cache" not in doe else doe["cache"]
)
_doe_exists = False
if "doe_template" in doe:
# this DOE points to another existing component
_doe_exists = True
logger.info("Using template - {}".format(doe_name))
save_doe_use_template(doe)
elif use_cached_does:
_doe_exists = doe_exists(doe_name, list_settings)
if _doe_exists:
logger.info("Cached - {}".format(doe_name))
if overwrite:
component_names = load_doe_component_names(doe_name)
write_doe_metadata(
doe_name=doe["name"],
cell_names=component_names,
list_settings=doe["list_settings"],
doe_metadata_path=doe_metadata_path,
)
if not _doe_exists:
start_times[doe_name] = time.time()
p = Process(
target=write_doe,
args=(doe, component_factory),
kwargs={
"doe_root_path": doe_root_path,
"doe_metadata_path": doe_metadata_path,
"overwrite": overwrite,
"precision": precision,
},
)
doe_name_to_process[doe_name] = p
does_running += [doe_name]
try:
p.start()
except Exception:
print("Issue starting process for {}".format(doe_name))
print(type(component_factory))
raise
to_rm = []
for i, doe_name in enumerate(does_running):
_p = doe_name_to_process[doe_name]
if not _p.is_alive():
to_rm += [i]
finish_times[doe_name] = time.time()
dt = finish_times[doe_name] - start_times[doe_name]
line = "Done - {} ({:.1f}s)".format(doe_name, dt)
logger.info(line)
for i in to_rm[::-1]:
does_running.pop(i)
time.sleep(0.001)
while does_running:
to_rm = []
for i, _doe_name in enumerate(does_running):
_p = doe_name_to_process[_doe_name]
if not _p.is_alive():
to_rm += [i]
for i in to_rm[::-1]:
does_running.pop(i)
time.sleep(0.05)
if __name__ == "__main__":
filepath = CONFIG["samples_path"] / "mask" / "does.yml"
generate_does(filepath, precision=2e-9)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import sys
import logging
import requests
import time
from extensions import valid_tagging_extensions
from readSettings import ReadSettings
from autoprocess import plex
from tvdb_mp4 import Tvdb_mp4
from mkvtomp4 import MkvtoMp4
from post_processor import PostProcessor
from logging.config import fileConfig
logpath = '/var/log/sickbeard_mp4_automator'
if os.environ.get('sonarr_eventtype') == "Test":
sys.exit(0)
if os.name == 'nt':
logpath = os.path.dirname(sys.argv[0])
elif not os.path.isdir(logpath):
try:
os.mkdir(logpath)
except:
logpath = os.path.dirname(sys.argv[0])
configPath = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'logging.ini')).replace("\\", "\\\\")
logPath = os.path.abspath(os.path.join(logpath, 'index.log')).replace("\\", "\\\\")
fileConfig(configPath, defaults={'logfilename': logPath})
log = logging.getLogger("SonarrPostConversion")
log.info("Sonarr extra script post processing started.")
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini")
inputfile = os.environ.get('sonarr_episodefile_path')
original = os.environ.get('sonarr_episodefile_scenename')
tvdb_id = int(os.environ.get('sonarr_series_tvdbid'))
season = int(os.environ.get('sonarr_episodefile_seasonnumber'))
try:
episode = int(os.environ.get('sonarr_episodefile_episodenumbers'))
except:
episode = int(os.environ.get('sonarr_episodefile_episodenumbers').split(",")[0])
converter = MkvtoMp4(settings)
log.debug("Input file: %s." % inputfile)
log.debug("Original name: %s." % original)
log.debug("TVDB ID: %s." % tvdb_id)
log.debug("Season: %s episode: %s." % (season, episode))
if MkvtoMp4(settings).validSource(inputfile):
log.info("Processing %s." % inputfile)
output = converter.process(inputfile, original=original)
if output:
# Tag with metadata
if settings.tagfile and output['output_extension'] in valid_tagging_extensions:
log.info("Tagging %s with ID %s season %s episode %s." % (inputfile, tvdb_id, season, episode))
try:
tagmp4 = Tvdb_mp4(tvdb_id, season, episode, original, language=settings.taglanguage)
tagmp4.setHD(output['x'], output['y'])
tagmp4.writeTags(output['output'], settings.artwork, settings.thumbnail)
except:
log.error("Unable to tag file")
# Copy to additional locations
output_files = converter.replicate(output['output'])
# Update Sonarr to continue monitored status
try:
host = settings.Sonarr['host']
port = settings.Sonarr['port']
webroot = settings.Sonarr['web_root']
apikey = settings.Sonarr['apikey']
if apikey != '':
try:
ssl = int(settings.Sonarr['ssl'])
except:
ssl = 0
if ssl:
protocol = "https://"
else:
protocol = "http://"
seriesID = os.environ.get('sonarr_series_id')
log.debug("Sonarr host: %s." % host)
log.debug("Sonarr port: %s." % port)
log.debug("Sonarr webroot: %s." % webroot)
log.debug("Sonarr apikey: %s." % apikey)
log.debug("Sonarr protocol: %s." % protocol)
log.debug("Sonarr sonarr_series_id: %s." % seriesID)
headers = {'X-Api-Key': apikey}
# First trigger rescan
payload = {'name': 'RescanSeries', 'seriesId': seriesID}
url = protocol + host + ":" + port + webroot + "/api/command"
r = requests.post(url, json=payload, headers=headers)
rstate = r.json()
log.info("Sonarr response: ID %d %s." % (rstate['id'], rstate['state']))
log.info(str(rstate)) # debug
# Then wait for it to finish
url = protocol + host + ":" + port + webroot + "/api/command/" + str(rstate['id'])
log.info("Requesting episode information from Sonarr for series ID %s." % seriesID)
r = requests.get(url, headers=headers)
command = r.json()
attempts = 0
while command['state'].lower() not in ['complete', 'completed'] and attempts < 6:
log.info(str(command['state']))
time.sleep(10)
r = requests.get(url, headers=headers)
command = r.json()
attempts += 1
log.info("Command completed")
log.info(str(command))
# Then get episode information
url = protocol + host + ":" + port + webroot + "/api/episode?seriesId=" + seriesID
log.info("Requesting updated episode information from Sonarr for series ID %s." % seriesID)
r = requests.get(url, headers=headers)
payload = r.json()
sonarrepinfo = None
for ep in payload:
if int(ep['episodeNumber']) == episode and int(ep['seasonNumber']) == season:
sonarrepinfo = ep
break
sonarrepinfo['monitored'] = True
# Then set that episode to monitored
log.info("Sending PUT request with following payload:") # debug
log.info(str(sonarrepinfo)) # debug
url = protocol + host + ":" + port + webroot + "/api/episode/" + str(sonarrepinfo['id'])
r = requests.put(url, json=sonarrepinfo, headers=headers)
success = r.json()
log.info("PUT request returned:") # debug
log.info(str(success)) # debug
log.info("Sonarr monitoring information updated for episode %s." % success['title'])
else:
log.error("Your Sonarr API Key can not be blank. Update autoProcess.ini.")
except:
log.exception("Sonarr monitor status update failed.")
# Run any post process scripts
if settings.postprocess:
post_processor = PostProcessor(output_files, log)
post_processor.setTV(tvdb_id, season, episode)
post_processor.run_scripts()
plex.refreshPlex(settings, 'show', log)
sys.exit(0)
|
nilq/baby-python
|
python
|
def do_print():
print "hello world"
def add(a, b):
return a + b
def names_of_three_people(a, b, c):
return a['name'] + " and " + b['name'] + " and " + c['name']
def divide(a, b):
return a / b
def float_divide(a, b):
return float(a) / float(b)
def func_return_struct(name, age, hobby1, hobby2):
return {
"name": name,
"age": age,
"hobby": [
hobby1,
hobby2
]
}
def first_param_and_other_params(first, **other):
total = other
total['first'] = first
return total
|
nilq/baby-python
|
python
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from marshmallow import fields, post_load, validate
from typing import Optional
from ..schema import PatchedSchemaMeta
from ..fields import ArmVersionedStr
from azure.ml.constants import AssetType
class InputEntry:
def __init__(self, *, mode: Optional[str] = None, data: str):
self.data = data
self.mode = mode
INPUT_MODE_MOUNT = "Mount"
INPUT_MODE_DOWNLOAD = "Download"
INPUT_MODES = [INPUT_MODE_MOUNT, INPUT_MODE_DOWNLOAD]
class InputEntrySchema(metaclass=PatchedSchemaMeta):
mode = fields.Str(validate=validate.OneOf(INPUT_MODES))
data = ArmVersionedStr(asset_type=AssetType.DATA)
@post_load
def make(self, data, **kwargs):
return InputEntry(**data)
|
nilq/baby-python
|
python
|
from typing import Union, List, Dict
from py_expression_eval import Parser # type: ignore
import math
from . import error
def add(num1: Union[int, float], num2: Union[int, float], *args) -> Union[int, float]:
"""Adds given numbers"""
sum: Union[int, float] = num1 + num2
for num in args:
sum += num
return sum
def subtract(
num1: Union[int, float], num2: Union[int, float], *args
) -> Union[int, float]:
"""Subtracts given numbers"""
sub: Union[int, float] = num1 - num2
for num in args:
sub -= num
return sub
def multiply(num1: Union[int, float], *args) -> Union[int, float]:
"""Multiplies given numbers"""
product: Union[int, float] = num1
for num in args:
product = product * num
return product
def divide(
num1: Union[int, float], num2: Union[int, float], type: str
) -> Union[int, float]:
"""Divides given numbers"""
if type.lower() == "int":
int_quotient: Union[int, float] = num1 / num2
return int_quotient
if type.lower() == "float":
float_quotient: Union[int, float] = num1 // num2
return float_quotient
raise error.UnknownDivisionTypeError(type)
def floatDiv(num1: Union[int, float], num2: Union[int, float]) -> Union[int, float]:
"""Divides given numbers"""
quotient: Union[int, float] = num1 / num2
return quotient
def intDiv(num1: Union[int, float], num2: Union[int, float]) -> Union[int, float]:
"""Divides given numbers and returns rounded off integer as result"""
quotient: Union[int, float] = num1 // num2
return quotient
def expo(num1: Union[int, float], num2: Union[int, float]) -> Union[int, float]:
"""Raises given number to given power and returns result"""
expo: Union[int, float] = num1 ** num2
return expo
def mod(num1: Union[int, float], num2: Union[int, float]) -> Union[int, float]:
"""Returns remainder of a division"""
remain: Union[int, float] = num1 % num2
return remain
def evalExp(exp: str, vars_: Dict[str, int] = {}):
"""Evaluates given mathematical expression"""
parser = Parser()
solution: Union[int, float] = parser.parse(exp).evaluate(vars_)
return solution
def avg(listOfNos: Union[List[int], List[float]]) -> float:
"""Return average of given numbers"""
avg: float = 0.0
for num in listOfNos:
avg += num
avg /= len(listOfNos)
return avg
def factorial(num: int) -> int:
"""Returns factorial of a number"""
factorial: int = 1
for i in range(1, num):
factorial *= i
return factorial
def ceil(num: int) -> int:
"""Returns the number rounded up"""
ceil: int = math.ceil(num)
return ceil
def floor(num: int) -> int:
"""Returns the number rounded down"""
floor: int = math.floor(num)
return floor
|
nilq/baby-python
|
python
|
#
# This file is part of DroneBridge: https://github.com/seeul8er/DroneBridge
#
# Copyright 2017 Wolfgang Christl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import mmap
import time
from shmemctypes import ShmemRawArray
class wifi_adapter_rx_status_t(ctypes.Structure):
_fields_ = [
('received_packet_cnt', ctypes.c_uint32),
('wrong_crc_cnt', ctypes.c_uint32),
('current_signal_dbm', ctypes.c_int8),
('type', ctypes.c_int8)
]
class WBC_RX_Status(ctypes.Structure):
_fields_ = [
('last_update', ctypes.c_int32),
('received_block_cnt', ctypes.c_uint32),
('damaged_block_cnt', ctypes.c_uint32),
('lost_packet_cnt', ctypes.c_uint32),
('received_packet_cnt', ctypes.c_uint32),
('tx_restart_cnt', ctypes.c_uint32),
('kbitrate', ctypes.c_uint32),
('wifi_adapter_cnt', ctypes.c_uint32),
('adapter', wifi_adapter_rx_status_t * 8)
]
def open_shm():
f = open("/wifibroadcast_rx_status_0", "r+b")
return mmap.mmap(f.fileno(), 0)
def read_wbc_status(mapped_structure):
wbc_status = WBC_RX_Status.from_buffer(mapped_structure)
print(str(wbc_status.kbitrate)+"kbit/s"+" "+str(wbc_status.damaged_block_cnt)+" damages blocks")
def main():
print("DB_WBC_STATUSREADER: starting")
shared_data = ShmemRawArray(WBC_RX_Status, 0, "/wifibroadcast_rx_status_0", False)
#mymap = open_shm()
while(True):
for d in shared_data:
print(str(d.received_block_cnt))
time.sleep(1)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import errno
import gc
# from collections import namedtuple
import math
import os
import os.path
import time
from functools import lru_cache
from pathlib import Path
import numpy as np
import pandas as pd
import artistools as at
@lru_cache(maxsize=8)
def get_modeldata(inputpath=Path(), dimensions=None, get_abundances=False, derived_cols=False):
"""
Read an artis model.txt file containing cell velocities, density, and abundances of radioactive nuclides.
Arguments:
- inputpath: either a path to model.txt file, or a folder containing model.txt
- dimensions: number of dimensions in input file, or None for automatic
- get_abundances: also read elemental abundances (abundances.txt) and
merge with the output DataFrame
Returns (dfmodel, t_model_init_days)
- dfmodel: a pandas DataFrame with a row for each model grid cell
- t_model_init_days: the time in days at which the snapshot is defined
"""
assert dimensions in [1, 3, None]
inputpath = Path(inputpath)
if os.path.isdir(inputpath):
modelpath = inputpath
filename = at.firstexisting(['model.txt.xz', 'model.txt.gz', 'model.txt'], path=inputpath)
elif os.path.isfile(inputpath): # passed in a filename instead of the modelpath
filename = inputpath
modelpath = Path(inputpath).parent
elif not inputpath.exists() and inputpath.parts[0] == 'codecomparison':
modelpath = inputpath
_, inputmodel, _ = modelpath.parts
filename = Path(at.config['codecomparisonmodelartismodelpath'], inputmodel, 'model.txt')
else:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), inputpath)
headerrows = 0
with at.misc.zopen(filename, 'rt') as fmodel:
gridcellcount = int(fmodel.readline())
t_model_init_days = float(fmodel.readline())
headerrows += 2
t_model_init_seconds = t_model_init_days * 24 * 60 * 60
filepos = fmodel.tell()
# if the next line is a single float then the model is 3D
try:
vmax_cmps = float(fmodel.readline()) # velocity max in cm/s
xmax_tmodel = vmax_cmps * t_model_init_seconds # xmax = ymax = zmax
headerrows += 1
if dimensions is None:
print("Detected 3D model file")
dimensions = 3
elif dimensions != 3:
print(f" {dimensions} were specified but file appears to be 3D")
assert False
except ValueError:
if dimensions is None:
print("Detected 1D model file")
dimensions = 1
elif dimensions != 1:
print(f" {dimensions} were specified but file appears to be 1D")
assert False
fmodel.seek(filepos) # undo the readline() and go back
columns = None
filepos = fmodel.tell()
line = fmodel.readline()
if line.startswith('#'):
headerrows += 1
columns = line.lstrip('#').split()
else:
fmodel.seek(filepos) # undo the readline() and go back
ncols_file = len(fmodel.readline().split())
if dimensions > 1:
# columns split over two lines
ncols_file += len(fmodel.readline().split())
if columns is not None:
assert ncols_file == len(columns)
elif dimensions == 1:
columns = ['inputcellid', 'velocity_outer', 'logrho', 'X_Fegroup', 'X_Ni56',
'X_Co56', 'X_Fe52', 'X_Cr48', 'X_Ni57', 'X_Co57'][:ncols_file]
elif dimensions == 3:
columns = ['inputcellid', 'inputpos_a', 'inputpos_b', 'inputpos_c', 'rho',
'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52', 'X_Cr48', 'X_Ni57', 'X_Co57'][:ncols_file]
# number of grid cell steps along an axis (same for xyz)
ncoordgridx = int(round(gridcellcount ** (1. / 3.)))
ncoordgridy = int(round(gridcellcount ** (1. / 3.)))
ncoordgridz = int(round(gridcellcount ** (1. / 3.)))
assert (ncoordgridx * ncoordgridy * ncoordgridz) == gridcellcount
if dimensions == 1:
dfmodel = pd.read_csv(
filename, delim_whitespace=True, header=None, names=columns, skiprows=headerrows, nrows=gridcellcount)
else:
dfmodel = pd.read_csv(
filename, delim_whitespace=True, header=None,
skiprows=lambda x: x < headerrows or (x - headerrows - 1) % 2 == 0, names=columns[:5],
nrows=gridcellcount)
dfmodeloddlines = pd.read_csv(
filename, delim_whitespace=True, header=None,
skiprows=lambda x: x < headerrows or (x - headerrows - 1) % 2 == 1, names=columns[5:],
nrows=gridcellcount)
assert len(dfmodel) == len(dfmodeloddlines)
dfmodel = dfmodel.merge(dfmodeloddlines, left_index=True, right_index=True)
del dfmodeloddlines
if len(dfmodel) > gridcellcount:
dfmodel = dfmodel.iloc[:gridcellcount]
assert len(dfmodel) == gridcellcount
dfmodel.index.name = 'cellid'
# dfmodel.drop('inputcellid', axis=1, inplace=True)
if dimensions == 1:
dfmodel['velocity_inner'] = np.concatenate([[0.], dfmodel['velocity_outer'].values[:-1]])
dfmodel.eval(
'cellmass_grams = 10 ** logrho * 4. / 3. * 3.14159265 * (velocity_outer ** 3 - velocity_inner ** 3)'
'* (1e5 * @t_model_init_seconds) ** 3', inplace=True)
vmax_cmps = dfmodel.velocity_outer.max() * 1e5
elif dimensions == 3:
wid_init = at.misc.get_wid_init_at_tmodel(modelpath, gridcellcount, t_model_init_days, xmax_tmodel)
dfmodel.eval('cellmass_grams = rho * @wid_init ** 3', inplace=True)
dfmodel.rename(columns={
'pos_x_min': 'pos_x_min', 'pos_y_min': 'pos_y_min', 'pos_z_min': 'pos_z_min'
}, inplace=True)
if 'pos_x_min' in dfmodel.columns:
print("Cell positions in model.txt are defined in the header")
else:
cellid = dfmodel.index.values
xindex = cellid % ncoordgridx
yindex = (cellid // ncoordgridx) % ncoordgridy
zindex = (cellid // (ncoordgridx * ncoordgridy)) % ncoordgridz
dfmodel['pos_x_min'] = -xmax_tmodel + 2 * xindex * xmax_tmodel / ncoordgridx
dfmodel['pos_y_min'] = -xmax_tmodel + 2 * yindex * xmax_tmodel / ncoordgridy
dfmodel['pos_z_min'] = -xmax_tmodel + 2 * zindex * xmax_tmodel / ncoordgridz
def vectormatch(vec1, vec2):
xclose = np.isclose(vec1[0], vec2[0], atol=xmax_tmodel / ncoordgridx)
yclose = np.isclose(vec1[1], vec2[1], atol=xmax_tmodel / ncoordgridy)
zclose = np.isclose(vec1[2], vec2[2], atol=xmax_tmodel / ncoordgridz)
return all([xclose, yclose, zclose])
posmatch_xyz = True
posmatch_zyx = True
# important cell numbers to check for coordinate column order
indexlist = [0, ncoordgridx - 1, (ncoordgridx - 1) * (ncoordgridy - 1),
(ncoordgridx - 1) * (ncoordgridy - 1) * (ncoordgridz - 1)]
for index in indexlist:
cell = dfmodel.iloc[index]
if not vectormatch([cell.inputpos_a, cell.inputpos_b, cell.inputpos_c],
[cell.pos_x_min, cell.pos_y_min, cell.pos_z_min]):
posmatch_xyz = False
if not vectormatch([cell.inputpos_a, cell.inputpos_b, cell.inputpos_c],
[cell.pos_z_min, cell.pos_y_min, cell.pos_x_min]):
posmatch_zyx = False
assert posmatch_xyz != posmatch_zyx # one option must match
if posmatch_xyz:
print("Cell positions in model.txt are consistent with calculated values when x-y-z column order")
if posmatch_zyx:
print("Cell positions in model.txt are consistent with calculated values when z-y-x column order")
if get_abundances:
if dimensions == 3:
print('Getting abundances')
abundancedata = get_initialabundances(modelpath)
dfmodel = dfmodel.merge(abundancedata, how='inner', on='inputcellid')
if derived_cols:
add_derived_cols_to_modeldata(dfmodel, derived_cols, dimensions, t_model_init_seconds, wid_init, modelpath)
return dfmodel, t_model_init_days, vmax_cmps
def add_derived_cols_to_modeldata(dfmodel, derived_cols, dimensions=None, t_model_init_seconds=None, wid_init=None,
modelpath=None):
"""add columns to modeldata using e.g. derived_cols = ('velocity', 'Ye')"""
if dimensions is None:
dimensions = get_dfmodel_dimensions(dfmodel)
if dimensions == 3 and 'velocity' in derived_cols:
dfmodel['vel_x_min'] = dfmodel['pos_x_min'] / t_model_init_seconds
dfmodel['vel_y_min'] = dfmodel['pos_y_min'] / t_model_init_seconds
dfmodel['vel_z_min'] = dfmodel['pos_z_min'] / t_model_init_seconds
dfmodel['vel_x_max'] = (dfmodel['pos_x_min'] + wid_init) / t_model_init_seconds
dfmodel['vel_y_max'] = (dfmodel['pos_y_min'] + wid_init) / t_model_init_seconds
dfmodel['vel_z_max'] = (dfmodel['pos_z_min'] + wid_init) / t_model_init_seconds
dfmodel['vel_x_mid'] = (dfmodel['pos_x_min'] + (0.5 * wid_init)) / t_model_init_seconds
dfmodel['vel_y_mid'] = (dfmodel['pos_y_min'] + (0.5 * wid_init)) / t_model_init_seconds
dfmodel['vel_z_mid'] = (dfmodel['pos_z_min'] + (0.5 * wid_init)) / t_model_init_seconds
dfmodel.eval('vel_mid_radial = sqrt(vel_x_mid ** 2 + vel_y_mid ** 2 + vel_z_mid ** 2)', inplace=True)
if dimensions == 3 and 'pos_mid' in derived_cols or 'angle_bin' in derived_cols:
dfmodel['pos_x_mid'] = (dfmodel['pos_x_min'] + (0.5 * wid_init))
dfmodel['pos_y_mid'] = (dfmodel['pos_y_min'] + (0.5 * wid_init))
dfmodel['pos_z_mid'] = (dfmodel['pos_z_min'] + (0.5 * wid_init))
if 'angle_bin' in derived_cols:
get_cell_angle(dfmodel, modelpath)
if 'Ye' in derived_cols and os.path.isfile(modelpath / 'Ye.txt'):
dfmodel['Ye'] = at.inputmodel.opacityinputfile.get_Ye_from_file(modelpath)
if 'Q' in derived_cols and os.path.isfile(modelpath / 'Q_energy.txt'):
dfmodel['Q'] = at.inputmodel.energyinputfiles.get_Q_energy_from_file(modelpath)
return dfmodel
def get_cell_angle(dfmodel, modelpath):
"""get angle between cell midpoint and axis"""
syn_dir = at.get_syn_dir(modelpath)
cos_theta = np.zeros(len(dfmodel))
i = 0
for _, cell in dfmodel.iterrows():
mid_point = [cell['pos_x_mid'], cell['pos_y_mid'], cell['pos_z_mid']]
cos_theta[i] = (
at.dot(mid_point, syn_dir)) / (at.vec_len(mid_point) * at.vec_len(syn_dir))
i += 1
dfmodel['cos_theta'] = cos_theta
cos_bins = [-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1] # including end bin
labels = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90] # to agree with escaping packet bin numbers
dfmodel['cos_bin'] = pd.cut(dfmodel['cos_theta'], cos_bins, labels=labels)
# dfmodel['cos_bin'] = np.searchsorted(cos_bins, dfmodel['cos_theta'].values) -1
return dfmodel
def get_mean_cell_properties_of_angle_bin(dfmodeldata, vmax_cmps, modelpath=None):
if 'cos_bin' not in dfmodeldata:
get_cell_angle(dfmodeldata, modelpath)
dfmodeldata['rho'][dfmodeldata['rho'] == 0] = None
dfmodeldata['rho']
cell_velocities = np.unique(dfmodeldata['vel_x_min'].values)
cell_velocities = cell_velocities[cell_velocities >= 0]
velocity_bins = np.append(cell_velocities, vmax_cmps)
mid_velocities = np.unique(dfmodeldata['vel_x_mid'].values)
mid_velocities = mid_velocities[mid_velocities >= 0]
mean_bin_properties = {}
for bin_number in range(10):
mean_bin_properties[bin_number] = pd.DataFrame({'velocity': mid_velocities,
'mean_rho': np.zeros_like(mid_velocities, dtype=float),
'mean_Ye': np.zeros_like(mid_velocities, dtype=float),
'mean_Q': np.zeros_like(mid_velocities, dtype=float)})
# cos_bin_number = 90
for bin_number in range(10):
cos_bin_number = bin_number * 10
# get cells with bin number
dfanglebin = dfmodeldata.query('cos_bin == @cos_bin_number', inplace=False)
binned = pd.cut(dfanglebin['vel_mid_radial'], velocity_bins, labels=False, include_lowest=True)
i = 0
for binindex, mean_rho in dfanglebin.groupby(binned)['rho'].mean().iteritems():
i += 1
mean_bin_properties[bin_number]['mean_rho'][binindex] += mean_rho
i = 0
if 'Ye' in dfmodeldata.keys():
for binindex, mean_Ye in dfanglebin.groupby(binned)['Ye'].mean().iteritems():
i += 1
mean_bin_properties[bin_number]['mean_Ye'][binindex] += mean_Ye
if 'Q' in dfmodeldata.keys():
for binindex, mean_Q in dfanglebin.groupby(binned)['Q'].mean().iteritems():
i += 1
mean_bin_properties[bin_number]['mean_Q'][binindex] += mean_Q
return mean_bin_properties
def get_2d_modeldata(modelpath):
filepath = os.path.join(modelpath, 'model.txt')
num_lines = sum(1 for line in open(filepath))
skiprowlist = [0, 1, 2]
skiprowlistodds = skiprowlist + [i for i in range(3, num_lines) if i % 2 == 1]
skiprowlistevens = skiprowlist + [i for i in range(3, num_lines) if i % 2 == 0]
model1stlines = pd.read_csv(filepath, delim_whitespace=True, header=None, skiprows=skiprowlistevens)
model2ndlines = pd.read_csv(filepath, delim_whitespace=True, header=None, skiprows=skiprowlistodds)
model = pd.concat([model1stlines, model2ndlines], axis=1)
column_names = ['inputcellid', 'cellpos_mid[r]', 'cellpos_mid[z]', 'rho_model',
'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52', 'X_Cr48']
model.columns = column_names
return model
def get_3d_model_data_merged_model_and_abundances_minimal(args):
"""Get 3D data without generating all the extra columns in standard routine.
Needed for large (eg. 200^3) models"""
model = get_3d_modeldata_minimal(args.modelpath)
abundances = get_initialabundances(args.modelpath[0])
with open(os.path.join(args.modelpath[0], 'model.txt'), 'r') as fmodelin:
fmodelin.readline() # npts_model3d
args.t_model = float(fmodelin.readline()) # days
args.vmax = float(fmodelin.readline()) # v_max in [cm/s]
print(model.keys())
merge_dfs = model.merge(abundances, how='inner', on='inputcellid')
del model
del abundances
gc.collect()
merge_dfs.info(verbose=False, memory_usage="deep")
return merge_dfs
def get_3d_modeldata_minimal(modelpath):
"""Read 3D model without generating all the extra columns in standard routine.
Needed for large (eg. 200^3) models"""
model = pd.read_csv(os.path.join(modelpath[0], 'model.txt'),
delim_whitespace=True, header=None, skiprows=3, dtype=np.float64)
columns = ['inputcellid', 'cellpos_in[z]', 'cellpos_in[y]', 'cellpos_in[x]', 'rho_model',
'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52', 'X_Cr48']
model = pd.DataFrame(model.values.reshape(-1, 10))
model.columns = columns
print('model.txt memory usage:')
model.info(verbose=False, memory_usage="deep")
return model
def save_modeldata(
dfmodel, t_model_init_days, filename=None, modelpath=None, vmax=None, dimensions=1, radioactives=True):
"""Save a pandas DataFrame and snapshot time into ARTIS model.txt"""
timestart = time.perf_counter()
assert dimensions in [1, 3, None]
if dimensions == 1:
standardcols = ['inputcellid', 'velocity_outer', 'logrho', 'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52',
'X_Cr48']
elif dimensions == 3:
dfmodel.rename(columns={'gridindex': 'inputcellid'}, inplace=True)
griddimension = int(round(len(dfmodel) ** (1. / 3.)))
print(f' grid size: {len(dfmodel)} ({griddimension}^3)')
assert griddimension ** 3 == len(dfmodel)
standardcols = [
'inputcellid', 'pos_x_min', 'pos_y_min', 'pos_z_min', 'rho',
'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52', 'X_Cr48']
# these two columns are optional, but position is important and they must appear before any other custom cols
if 'X_Ni57' in dfmodel.columns:
standardcols.append('X_Ni57')
if 'X_Co57' in dfmodel.columns:
standardcols.append('X_Co57')
dfmodel['inputcellid'] = dfmodel['inputcellid'].astype(int)
customcols = [col for col in dfmodel.columns if col not in standardcols and col.startswith('X_')]
customcols.sort(key=lambda col: at.get_z_a_nucname(col)) # sort columns by atomic number, mass number
# set missing radioabundance columns to zero
for col in standardcols:
if col not in dfmodel.columns and col.startswith('X_'):
dfmodel[col] = 0.0
assert modelpath is not None or filename is not None
if filename is None:
filename = 'model.txt'
if modelpath is not None:
modelfilepath = Path(modelpath, filename)
else:
modelfilepath = Path(filename)
with open(modelfilepath, 'w') as fmodel:
fmodel.write(f'{len(dfmodel)}\n')
fmodel.write(f'{t_model_init_days}\n')
if dimensions == 3:
fmodel.write(f'{vmax}\n')
if customcols:
fmodel.write(f'#{" ".join(standardcols)} {" ".join(customcols)}\n')
abundcols = [*[col for col in standardcols if col.startswith('X_')], *customcols]
# for cell in dfmodel.itertuples():
# if dimensions == 1:
# fmodel.write(f'{cell.inputcellid:6d} {cell.velocity_outer:9.2f} {cell.logrho:10.8f} ')
# elif dimensions == 3:
# fmodel.write(f"{cell.inputcellid:6d} {cell.posx} {cell.posy} {cell.posz} {cell.rho}\n")
#
# fmodel.write(" ".join([f'{getattr(cell, col)}' for col in abundcols]))
#
# fmodel.write('\n')
if dimensions == 1:
for cell in dfmodel.itertuples(index=False):
fmodel.write(f'{cell.inputcellid:6d} {cell.velocity_outer:9.2f} {cell.logrho:10.8f} ')
fmodel.write(" ".join([f'{getattr(cell, col)}' for col in abundcols]))
fmodel.write('\n')
elif dimensions == 3:
zeroabund = ' '.join(['0.0' for _ in abundcols])
for inputcellid, posxmin, posymin, poszmin, rho, *massfracs in dfmodel[
['inputcellid', 'pos_x_min', 'pos_y_min', 'pos_z_min', 'rho', *abundcols]
].itertuples(index=False, name=None):
fmodel.write(f"{inputcellid:6d} {posxmin} {posymin} {poszmin} {rho}\n")
fmodel.write(" ".join([f'{abund}' for abund in massfracs]) if rho > 0. else zeroabund)
fmodel.write('\n')
print(f'Saved {filename} (took {time.perf_counter() - timestart:.1f} seconds)')
def get_mgi_of_velocity_kms(modelpath, velocity, mgilist=None):
"""Return the modelgridindex of the cell whose outer velocity is closest to velocity.
If mgilist is given, then chose from these cells only"""
modeldata, _, _ = get_modeldata(modelpath)
velocity = float(velocity)
if not mgilist:
mgilist = [mgi for mgi in modeldata.index]
arr_vouter = modeldata['velocity_outer'].values
else:
arr_vouter = np.array([modeldata['velocity_outer'][mgi] for mgi in mgilist])
index_closestvouter = np.abs(arr_vouter - velocity).argmin()
if velocity < arr_vouter[index_closestvouter] or index_closestvouter + 1 >= len(mgilist):
return mgilist[index_closestvouter]
elif velocity < arr_vouter[index_closestvouter + 1]:
return mgilist[index_closestvouter + 1]
elif np.isnan(velocity):
return float('nan')
else:
print(f"Can't find cell with velocity of {velocity}. Velocity list: {arr_vouter}")
assert(False)
@lru_cache(maxsize=8)
def get_initialabundances(modelpath):
"""Return a list of mass fractions."""
abundancefilepath = at.firstexisting(
['abundances.txt.xz', 'abundances.txt.gz', 'abundances.txt'], path=modelpath)
abundancedata = pd.read_csv(abundancefilepath, delim_whitespace=True, header=None)
abundancedata.index.name = 'modelgridindex'
abundancedata.columns = [
'inputcellid', *['X_' + at.get_elsymbol(x) for x in range(1, len(abundancedata.columns))]]
if len(abundancedata) > 100000:
print('abundancedata memory usage:')
abundancedata.info(verbose=False, memory_usage="deep")
return abundancedata
def save_initialabundances(dfelabundances, abundancefilename):
"""Save a DataFrame (same format as get_initialabundances) to abundances.txt.
columns must be:
- inputcellid: integer index to match model.txt (starting from 1)
- X_El: mass fraction of element with two-letter code 'El' (e.g., X_H, X_He, H_Li, ...)
"""
timestart = time.perf_counter()
if Path(abundancefilename).is_dir():
abundancefilename = Path(abundancefilename) / 'abundances.txt'
dfelabundances['inputcellid'] = dfelabundances['inputcellid'].astype(int)
atomic_numbers = [at.get_atomic_number(colname[2:])
for colname in dfelabundances.columns if colname.startswith('X_')]
elcolnames = [f'X_{at.get_elsymbol(Z)}' for Z in range(1, 1 + max(atomic_numbers))]
# set missing elemental abundance columns to zero
for col in elcolnames:
if col not in dfelabundances.columns:
dfelabundances[col] = 0.0
with open(abundancefilename, 'w') as fabund:
for row in dfelabundances.itertuples(index=False):
fabund.write(f' {row.inputcellid:6d} ')
fabund.write(" ".join([f'{getattr(row, colname, 0.)}' for colname in elcolnames]))
fabund.write("\n")
print(f'Saved {abundancefilename} (took {time.perf_counter() - timestart:.1f} seconds)')
def save_empty_abundance_file(ngrid, outputfilepath='.'):
"""Dummy abundance file with only zeros"""
Z_atomic = np.arange(1, 31)
abundancedata = {'cellid': range(1, ngrid + 1)}
for atomic_number in Z_atomic:
abundancedata[f'Z={atomic_number}'] = np.zeros(ngrid)
# abundancedata['Z=28'] = np.ones(ngrid)
abundancedata = pd.DataFrame(data=abundancedata)
abundancedata = abundancedata.round(decimals=5)
abundancedata.to_csv(Path(outputfilepath) / 'abundances.txt', header=False, sep='\t', index=False)
def get_dfmodel_dimensions(dfmodel):
if 'pos_x_min' in dfmodel.columns:
return 3
return 1
def sphericalaverage(dfmodel, t_model_init_days, vmax, dfelabundances=None, dfgridcontributions=None):
"""Convert 3D Cartesian grid model to 1D spherical"""
t_model_init_seconds = t_model_init_days * 24 * 60 * 60
xmax = vmax * t_model_init_seconds
ngridpoints = len(dfmodel)
ncoordgridx = round(ngridpoints ** (1. / 3.))
wid_init = 2 * xmax / ncoordgridx
print(f'Spherically averaging 3D model with {ngridpoints} cells...')
timestart = time.perf_counter()
# dfmodel = dfmodel.query('rho > 0.').copy()
dfmodel = dfmodel.copy()
celldensity = {cellindex: rho for cellindex, rho in dfmodel[['inputcellid', 'rho']].itertuples(index=False)}
dfmodel = add_derived_cols_to_modeldata(
dfmodel, ['velocity'], dimensions=3, t_model_init_seconds=t_model_init_seconds, wid_init=wid_init)
# print(dfmodel)
# print(dfelabundances)
km_to_cm = 1e5
velocity_bins = [vmax * n / ncoordgridx for n in range(ncoordgridx + 1)] # cm/s
outcells = []
outcellabundances = []
outgridcontributions = []
# cellidmap_3d_to_1d = {}
highest_active_radialcellid = -1
for radialcellid, (velocity_inner, velocity_outer) in enumerate(zip(velocity_bins[:-1], velocity_bins[1:]), 1):
assert velocity_outer > velocity_inner
matchedcells = dfmodel.query(
'vel_mid_radial > @velocity_inner and vel_mid_radial <= @velocity_outer')
matchedcellrhosum = matchedcells.rho.sum()
# cellidmap_3d_to_1d.update({cellid_3d: radialcellid for cellid_3d in matchedcells.inputcellid})
if len(matchedcells) == 0:
rhomean = 0.
else:
shell_volume = (4 * math.pi / 3) * (
(velocity_outer * t_model_init_seconds) ** 3 - (velocity_inner * t_model_init_seconds) ** 3)
rhomean = matchedcellrhosum * wid_init ** 3 / shell_volume
# volumecorrection = len(matchedcells) * wid_init ** 3 / shell_volume
# print(radialcellid, volumecorrection)
if rhomean > 0. and dfgridcontributions is not None:
dfcellcont = dfgridcontributions.query('cellindex in @matchedcells.inputcellid.values')
for particleid, dfparticlecontribs in dfcellcont.groupby('particleid'):
frac_of_cellmass_avg = sum([
(row.frac_of_cellmass *
celldensity[row.cellindex])
for row in dfparticlecontribs.itertuples(index=False)]) / matchedcellrhosum
frac_of_cellmass_includemissing_avg = sum([
(row.frac_of_cellmass_includemissing *
celldensity[row.cellindex])
for row in dfparticlecontribs.itertuples(index=False)]) / matchedcellrhosum
outgridcontributions.append({
'particleid': particleid,
'cellindex': radialcellid,
'frac_of_cellmass': frac_of_cellmass_avg,
'frac_of_cellmass_includemissing': frac_of_cellmass_includemissing_avg,
})
if rhomean > 0.:
highest_active_radialcellid = radialcellid
logrho = math.log10(max(1e-99, rhomean))
dictcell = {
'inputcellid': radialcellid,
'velocity_outer': velocity_outer / km_to_cm,
'logrho': logrho,
}
for column in matchedcells.columns:
if column.startswith('X_'):
if rhomean > 0.:
massfrac = np.dot(matchedcells[column], matchedcells.rho) / matchedcellrhosum
else:
massfrac = 0.
dictcell[column] = massfrac
outcells.append(dictcell)
if dfelabundances is not None:
if rhomean > 0.:
abund_matchedcells = dfelabundances.loc[matchedcells.index]
else:
abund_matchedcells = None
dictcellabundances = {'inputcellid': radialcellid}
for column in dfelabundances.columns:
if column.startswith('X_'):
if rhomean > 0.:
massfrac = np.dot(abund_matchedcells[column], matchedcells.rho) / matchedcellrhosum
else:
massfrac = 0.
dictcellabundances[column] = massfrac
outcellabundances.append(dictcellabundances)
dfmodel1d = pd.DataFrame(outcells[:highest_active_radialcellid])
dfabundances1d = (
pd.DataFrame(outcellabundances[:highest_active_radialcellid]) if outcellabundances else None)
dfgridcontributions1d = pd.DataFrame(outgridcontributions) if outgridcontributions else None
print(f' took {time.perf_counter() - timestart:.1f} seconds')
return dfmodel1d, dfabundances1d, dfgridcontributions1d
|
nilq/baby-python
|
python
|
#Codeacademy's Madlibs
from datetime import datetime
now = datetime.now()
print(now)
story = "%s wrote this story on a %s line train to test Python strings. Python is better than %s but worse than %s -------> written by %s on %02d/%02d/%02d at %02d:%02d"
story_name = raw_input("Enter a name: ")
story_line = raw_input("Enter a tube line: ")
story_programme_one = raw_input("Enter a programme: ")
story_programme_two = raw_input("Enter another programme: ")
print story % (story_name, story_line, story_programme_one, story_programme_two, story_name, now.day, now.month, now.year, now.hour, now.minute)
|
nilq/baby-python
|
python
|
import logging
import os
import json
from pprint import pformat
import pysftp
from me4storage.common.exceptions import ApiError
logger = logging.getLogger(__name__)
def save_logs(host, port, username, password, output_file):
cnopts = pysftp.CnOpts(knownhosts=os.path.expanduser(os.path.join('~','.ssh','known_hosts')))
cnopts.hostkeys = None
logger.info(f"Downloading log bundle from {host} to "
f"{output_file} ... This can take a few minutes.")
with pysftp.Connection(host,
port=int(port),
username=username,
password=password,
cnopts=cnopts,
) as sftp:
sftp.get(remotepath='/logs', localpath=output_file)
return True
|
nilq/baby-python
|
python
|
import builtins
import traceback
from os.path import relpath
def dprint(*args, **kwargs):
"""Pre-pends the filename and linenumber to the print statement"""
stack = traceback.extract_stack()[:-1]
i = -1
last = stack[i]
if last.name in ('clearln', 'finish'):
return builtins.__dict__['oldprint'](*args, **kwargs)
# Handle print wrappers in pytorch_classification/utils/progress/progress/helpers.py
while last.name in ('writeln','write','update','write'):
i = i - 1
last = stack[i]
# Handle different versions of the traceback module
if hasattr(last, 'filename'):
out_str = "{}:{} ".format(relpath(last.filename), last.lineno)
else:
out_str = "{}:{} ".format(relpath(last[0]), last[1])
# Prepend the filename and linenumber
return builtins.__dict__['oldprint'](out_str, *args, **kwargs)
def enable():
if 'oldprint' not in builtins.__dict__:
builtins.__dict__['oldprint'] = builtins.__dict__['print']
builtins.__dict__['print'] = dprint
def disable():
if 'oldprint' in builtins.__dict__:
builtins.__dict__['print'] = builtins.__dict__['oldprint']
|
nilq/baby-python
|
python
|
import config_cosmos
import azure.cosmos.cosmos_client as cosmos_client
import json
from dateutil import parser
def post_speech(speech_details, category):
speech_details = speech_details.copy()
collection_link = "dbs/speakeasy/colls/" + category
speech_details["id"] = speech_details["user_name"] + "_" + speech_details["speech_name"]
client = cosmos_client.CosmosClient(url_connection=config_cosmos.COSMOSDB_HOST, auth={'masterKey': config_cosmos.COSMOSDB_KEY})
client.CreateItem(collection_link, speech_details)
return True
def get_speech_details(speech_name, user_name, category):
collection_link = "dbs/speakeasy/colls/" + category
client = cosmos_client.CosmosClient(url_connection=config_cosmos.COSMOSDB_HOST, auth={'masterKey': config_cosmos.COSMOSDB_KEY})
query = "SELECT * FROM %s WHERE %s.speech_name ='%s' AND %s.user_name='%s'" %(category, category, speech_name, category, user_name)
data = list(client.QueryItems(collection_link, query, config_cosmos.OPTIONS))
return data[0]
def get_all_speeches(user_name):
categories = ["gaze", "speech", "gestures"]
final = []
for category in categories:
collection_link = "dbs/speakeasy/colls/" + category
client = cosmos_client.CosmosClient(url_connection=config_cosmos.COSMOSDB_HOST, auth={'masterKey': config_cosmos.COSMOSDB_KEY})
query = "SELECT * FROM %s WHERE %s.user_name='%s'" %(category, category, user_name)
data = list(client.QueryItems(collection_link, query, config_cosmos.OPTIONS))
for item in data:
final.append({"speech_name": item["speech_name"], "timestamp": item["timestamp"], "category": category})
final = sorted(final, key=lambda x: parser.parse(" ".join(x["timestamp"].split(" ")[:-4])))[::-1]
return final
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Outputs the relative error in a particular stat for deg1 and deg2 FEM.
# Output columns:
# mesh_num medianEdgeLength deg1Error deg2Error
import sys, os, re, numpy as np
from numpy.linalg import norm
resultDir, stat = sys.argv[1:]
# Input data columns
meshInfo = ["mesh_num", "corner_angle", "medianEdgeLength"]
strains = ["strain"]
displacements = ["u_x", "u_y"] # per sample
numSamples = 3
columnNames = meshInfo
columnNames += strains
for s in range(numSamples):
columnNames += map(lambda n: "%s[%i]" % (n, s), displacements)
for s in range(numSamples):
columnNames += map(lambda n: "mathematica %s[%i]" % (n, s), displacements)
def read_table_sorted(path):
data = map(lambda s: s.strip().split('\t'), file(path))
return sorted(data, key=lambda r: int(r[0]))
def validateColumnCount(table, numColumns):
for row in table:
if (len(row) != numColumns):
raise Exception("Invalid number of columns: %i (expected %i)" % (len(row), numColumns))
deg1Table = read_table_sorted(resultDir + "/deg_1.txt")
deg2Table = read_table_sorted(resultDir + "/deg_2.txt")
validateColumnCount(deg1Table, len(columnNames))
validateColumnCount(deg2Table, len(columnNames))
if (len(deg1Table) != len(deg2Table)):
raise Exception("Data tables for deg1 and deg2 differ in length")
groundTruth = np.array(map(float, deg2Table[-1]))
for (d1, d2) in zip(deg1Table, deg2Table):
msh_num, medianEdgeLength = [d1[0], d1[2]];
relErrors = []
if stat in columnNames:
cidx = columnNames.index(stat)
relErrors = [ abs(float(d1[cidx]) - groundTruth[cidx]) / abs(groundTruth[cidx]),
abs(float(d2[cidx]) - groundTruth[cidx]) / abs(groundTruth[cidx])]
elif (stat.replace("norm", "x") in columnNames):
xidx = columnNames.index(stat.replace("norm", "x"))
yidx = columnNames.index(stat.replace("norm", "y"))
d1Vec = np.array(map(float, [d1[xidx], d1[yidx]]))
d2Vec = np.array(map(float, [d2[xidx], d2[yidx]]))
groundTruthVec = groundTruth[[xidx, yidx]]
relErrors = [ norm(d1Vec - groundTruthVec),
norm(d2Vec - groundTruthVec) ]
else: raise Exception("Unknown stat %s" % stat)
# mesh_num medianEdgeLength deg1Error deg2Error
print "\t".join([msh_num, medianEdgeLength] + map(str, relErrors))
|
nilq/baby-python
|
python
|
"""
Here we implement some simple policies that
one can use directly in simple tasks.
More complicated policies can also be created
by inheriting from the Policy class
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal, Bernoulli
class Policy(nn.Module):
def __init__(self, fn_approximator):
super().__init__()
self.fn_approximator = fn_approximator
def forward(self, state):
raise NotImplementedError('Must be implemented.')
class RandomPolicy(Policy):
"""
A random policy that just takes one of output_dim actions randomly
"""
def __init__(self, output_dim=2):
super().__init__(None)
self.output_dim = output_dim
self.p = nn.Parameter(torch.IntTensor([0]), requires_grad=False)
def forward(self, state):
batch_size = state.size()[0]
probs = torch.ones(batch_size, self.output_dim) / self.output_dim
stochastic_policy = Categorical(probs)
actions = stochastic_policy.sample()
log_probs = stochastic_policy.log_prob(actions)
return actions, log_probs
class CategoricalPolicy(Policy):
"""
Used to pick from a range of actions.
```
fn_approximator = MLP_factory(input_size=4, output_size=3)
policy = policies.MultinomialPolicy(fn_approximator)
the actions will be a number in [0, 1, 2]
```
"""
def forward(self, state):
policy_log_probs = self.fn_approximator(state)
probs = F.softmax(policy_log_probs, dim=1)
stochastic_policy = Categorical(probs)
# sample discrete actions
actions = stochastic_policy.sample()
# get log probs
log_probs = stochastic_policy.log_prob(actions)
return actions, log_probs
def log_prob(self, state, action):
policy_log_probs = self.fn_approximator(state)
probs = F.softmax(policy_log_probs, dim=1)
stochastic_policy = Categorical(probs)
return stochastic_policy.log_prob(action)
class MultinomialPolicy(CategoricalPolicy):
def __init__(self, fn_approximator):
super().__init__(fn_approximator)
logging.warning('Use `CategoricalPolicy` since `MultinomialPolicy` will soon be deprecated.')
class GaussianPolicy(Policy):
"""
Used to take actions in continous spaces
```
fn_approximator = MLP_factory(input_size=4, output_size=2)
policy = policies.GaussianPolicy(fn_approximator)
```
"""
def forward(self, state):
policy_mu, policy_sigma = self.fn_approximator(state)
policy_sigma = F.softplus(policy_sigma)
stochastic_policy = Normal(policy_mu, policy_sigma)
actions = stochastic_policy.sample()
log_probs = stochastic_policy.log_prob(actions)
return actions, log_probs
def log_prob(self, state, action):
raise NotImplementedError('Not implemented yet')
class BernoulliPolicy(Policy):
"""
Used to take binary actions.
This can also be used when each action consists of
a many binary actions, for example:
```
fn_approximator = MLP_factory(input_size=4, output_size=5)
policy = policies.BernoulliPolicy(fn_approximator)
```
this will result in each action being composed of 5 binary actions.
"""
def forward(self, state):
policy_p = self.fn_approximator(state)
policy_p = F.sigmoid(policy_p)
try:
stochastic_policy = Bernoulli(policy_p)
actions = stochastic_policy.sample()
log_probs = stochastic_policy.log_prob(actions)
except RuntimeError as e:
logging.debug('Runtime error occured. policy_p was {}'.format(policy_p))
logging.debug('State was: {}'.format(state))
logging.debug('Function approximator return was: {}'.format(self.fn_approximator(state)))
logging.debug('This has occured before when parameters of the network became NaNs.')
logging.debug('Check learning rate, or change eps in adaptive gradient descent methods.')
raise RuntimeError('BernoulliPolicy returned nan information. Logger level with DEBUG will have more '
'information')
return actions, log_probs
def log_prob(self, state, action):
policy_p = self.fn_approximator(state)
policy_p = F.sigmoid(policy_p)
stochastic_policy = Bernoulli(policy_p)
return stochastic_policy.log_prob(action)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('person', '__first__'),
]
operations = [
migrations.CreateModel(
name='Attendee',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('state', models.CharField(max_length=4, choices=[(b'yes', b'yes'), (b'no', b'no')])),
('event', models.ForeignKey(to='person.Person')),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('from_dt', models.DateTimeField()),
('to_dt', models.DateTimeField()),
('title', models.CharField(max_length=128)),
('text', models.TextField()),
('price', models.IntegerField()),
],
),
]
|
nilq/baby-python
|
python
|
class GPSlocation:
"""used to translate the location system"""
_prop_ = 'GPSlocation'
import math
pi = 3.1415926535897932384626
a = 6378245.0
ee = 0.00669342162296594323
def gcj02_to_wgs84(self, lng, lat):
"""GCJ02 system to WGS1984 system"""
dlat = self._transformlat(lng - 105.0, lat - 35.0)
dlng = self._transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * self.pi
magic = math.sin(radlat)
magic = 1 - self.ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((self.a * (1 - self.ee)) / (magic * sqrtmagic) * self.pi)
dlng = (dlng * 180.0) / (self.a / sqrtmagic * math.cos(radlat) * self.pi)
mglat = lat + dlat
mglng = lng + dlng
return [lng * 2 - mglng, lat * 2 - mglat]
def wgs84_to_gcj02(self, lng, lat):
"""WGS1984 system to GCJ02 system"""
dlat = self._transformlat(lng - 105.0, lat - 35.0)
dlng = self._transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * self.pi
magic = math.sin(radlat)
magic = 1 - self.ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((self.a * (1 - self.ee)) / (magic * sqrtmagic) * self.pi)
dlng = (dlng * 180.0) / (self.a / sqrtmagic * math.cos(radlat) * self.pi)
mglat = lat + dlat
mglng = lng + dlng
return [mglng, mglat]
def _transformlat(self, lng, lat):
ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + 0.1 * lng * lat + 0.2 * math.sqrt(math.fabs(lng))
ret += (20.0 * math.sin(6.0 * lng * self.pi) + 20.0 * math.sin(2.0 * lng * self.pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lat * self.pi) + 40.0 * math.sin(lat / 3.0 * self.pi)) * 2.0 / 3.0
ret += (160.0 * math.sin(lat / 12.0 * self.pi) + 320 * math.sin(lat * self.pi / 30.0)) * 2.0 / 3.0
return ret
def _transformlng(self, lng, lat):
ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + 0.1 * lng * lat + 0.1 * math.sqrt(math.fabs(lng))
ret += (20.0 * math.sin(6.0 * lng * self.pi) + 20.0 * math.sin(2.0 * lng * self.pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lng * self.pi) + 40.0 * math.sin(lng / 3.0 * self.pi)) * 2.0 / 3.0
ret += (150.0 * math.sin(lng / 12.0 * self.pi) + 300.0 * math.sin(lng / 30.0 * self.pi)) * 2.0 / 3.0
return ret
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -
import re
import copy
import urllib
import urllib3
import string
import dateutil.parser
from iso8601 import parse_date
from robot.libraries.BuiltIn import BuiltIn
from datetime import datetime, timedelta
import pytz
TZ = pytz.timezone('Europe/Kiev')
def get_library():
return BuiltIn().get_library_instance('Selenium2Library')
def get_webdriver_instance():
return get_library()._current_browser()
# return of variable is None
def get_variable_is_none(variable):
if variable is None:
return True
return False
# run specified keyword if condition is not none type
def run_keyword_if_condition_is_not_none(condition, name, *args):
if get_variable_is_none(condition) == False:
BuiltIn().run_keyword(name, *args)
# run specified keyword if condition is none type
def run_keyword_if_condition_is_none(condition, name, *args):
if get_variable_is_none(condition) == True:
BuiltIn().run_keyword(name, *args)
# return value for *keys (nested) in `element` (dict).
def get_from_dictionary_by_keys(element, *keys):
if not isinstance(element, dict):
raise AttributeError('keys_exists() expects dict as first argument.')
if len(keys) == 0:
raise AttributeError('keys_exists() expects at least two arguments, one given.')
_element = element
for key in keys:
try:
_element = _element[key]
except KeyError:
return None
return _element
# returns if element exists on page. optimization
def get_is_element_exist(locator):
jquery_locator = convert_locator_to_jquery(locator)
if get_variable_is_none(jquery_locator) == False:
jquery_locator = jquery_locator.replace('"', '\\"')
length = get_webdriver_instance().execute_script('return $("' + jquery_locator + '").length;')
return length > 0
try:
get_library()._element_find(locator, None, True)
except Exception:
return False
return True
# click
def js_click_element(locator):
element = get_library()._element_find(locator, None, True)
get_webdriver_instance().execute_script(
'var $el = jQuery(arguments[0]); if($el.length) $el.click();',
element
)
# convert locator to jquery locator
def convert_locator_to_jquery(locator):
locator_params = locator.split('=', 1)
if locator_params[0] == 'id':
return '#' + locator_params[1]
if locator_params[0] == 'jquery':
return locator_params[1]
if locator_params[0] == 'css':
return locator_params[1]
return None
# set scroll to element in view
def set_element_scroll_into_view(locator):
element = get_library()._element_find(locator, None, True)
get_webdriver_instance().execute_script(
'var $el = jQuery(arguments[0]); if($el.length) $el.get(0).scrollIntoView();',
element
)
# return text/value by specified locator
def get_value_by_locator(locator):
element = get_library()._element_find(locator, None, True)
text = get_webdriver_instance().execute_script(
'var $element = jQuery(arguments[0]);'
'if($element.is("input[type=checkbox]")) return $element.is(":checked") ? "1":"0";'
'if($element.is("input,textarea,select")) return $element.val();'
'return $element.text();',
element
)
return text
# input text to hidden input
def input_text_to_hidden_input(locator, text):
element = get_library()._element_find(locator, None, True)
get_webdriver_instance().execute_script(
'jQuery(arguments[0]).val("' + text.replace('"', '\\"') + '");',
element
)
# select option by label for hidden select
def select_from_hidden_list_by_label(locator, label):
element = get_library()._element_find(locator, None, True)
get_webdriver_instance().execute_script(
'var $option = jQuery("option:contains(' + label.replace('"', '\\"') + ')", arguments[0]);' +
'if($option.length) jQuery(arguments[0]).val($option.attr("value"));',
element
)
# trigger change event for input by locator
def trigger_input_change_event(locator):
element = get_library()._element_find(locator, None, True)
get_webdriver_instance().execute_script(
'var $el = jQuery(arguments[0]); if($el.length) $el.trigger("change");',
element
)
# convert all numners to string
def convert_float_to_string(number):
return repr(float(number))
def convert_esco__float_to_string(number):
return '{0:.5f}'.format(float(number))
def convert_float_to_string_3f(number):
return '{0:.3f}'.format(float(number))
# convert any variable to specified type
def convert_to_specified_type(value, type):
value = "%s" % (value)
if type == 'integer':
value = value.split()
value = ''.join(value)
print(value)
value = int(value)
if type == 'float':
value = value.split()
value = ''.join(value)
print(value)
value = float(value)
return value
# prepare isodate in needed format
def isodate_format(isodate, format):
iso_dt = parse_date(isodate)
return iso_dt.strftime(format)
def procuring_entity_name(tender_data):
tender_data.data.procuringEntity['name'] = u"ТОВ \"ПабликБид\""
tender_data.data.procuringEntity['name_en'] = u"TOV \"publicbid\""
tender_data.data.procuringEntity.identifier['id'] = u"1234567890-publicbid"
tender_data.data.procuringEntity.identifier['legalName'] = u"ТОВ \"ПабликБид\""
tender_data.data.procuringEntity.identifier['legalName_en'] = u"TOV \"publicbid\""
if 'address' in tender_data.data.procuringEntity:
tender_data.data.procuringEntity.address['region'] = u"м. Київ"
tender_data.data.procuringEntity.address['postalCode'] = u"123123"
tender_data.data.procuringEntity.address['locality'] = u"Київ"
tender_data.data.procuringEntity.address['streetAddress'] = u"address"
if 'contactPoint' in tender_data.data.procuringEntity:
tender_data.data.procuringEntity.contactPoint['name'] = u"Test ЗамовникОборони"
tender_data.data.procuringEntity.contactPoint['name_en'] = u"Test"
tender_data.data.procuringEntity.contactPoint['email'] = u"chuzhin@mail.ua"
tender_data.data.procuringEntity.contactPoint['telephone'] = u"+3801111111111"
tender_data.data.procuringEntity.contactPoint['url'] = u"https://public-bid.com.ua"
if 'buyers' in tender_data.data:
tender_data.data.buyers[0]['name'] = u"ТОВ \"ПабликБид\""
tender_data.data.buyers[0].identifier['id'] = u"1234567890-publicbid"
tender_data.data.buyers[0].identifier['legalName'] = u"ТОВ \"ПабликБид\""
return tender_data
# prepare data
def prepare_procuring_entity_data(data):
try:
data['name'] = u"publicbid"
data.identifier['id'] = u"publicbid"
data.identifier['legalName'] = u"publicbid"
data.identifier['scheme'] = u"UA-EDR"
if 'name_en' in data:
data['name_en'] = u"publicbid"
if 'legalName_en' in data.identifier:
data.identifier['legalName_en'] = u"publicbid"
if 'address' in data:
data.address['countryName'] = u"Україна"
data.address['locality'] = u"Київ"
data.address['postalCode'] = u"01111"
data.address['region'] = u"місто Київ"
data.address['streetAddress'] = u"вулиця Тестова, 220, 8"
if 'contactPoint' in data:
data.contactPoint['email'] = u"chuzhin@mail.ua"
data.contactPoint['faxNumber'] = u"+3801111111111"
data.contactPoint['telephone'] = u"+3801111111111"
data.contactPoint['name'] = u"Test"
if 'name_en' in data.contactPoint:
data.contactPoint['name_en'] = u"Test"
data.contactPoint['url'] = u"https://public-bid.com.ua"
except Exception:
raise Exception('data is not a dictionary')
# prepare data
def prepare_buyers_data(data):
if type(data) is not list:
raise Exception('data is not a list')
# preventing console errors about changing buyer data in cases
if len(data) != 1:
return
item = next(iter(data), None)
item['name'] = u"publicbid"
item.identifier['id'] = u"publicbid"
item.identifier['legalName'] = u"publicbid"
item.identifier['scheme'] = u"UA-EDR"
# prepare dictionary from field path + value
def generate_dictionary_from_field_path_and_value(path, value):
data = dict()
path_keys_list = path.split('.')
if len(path_keys_list) > 1:
key = path_keys_list.pop(0)
value = generate_dictionary_from_field_path_and_value('.'.join(path_keys_list), value)
indexRegex = re.compile(r'(\[(\d+)\]$)')
matchObj = indexRegex.search(key)
print matchObj
if matchObj:
key = indexRegex.sub('', key)
value['list_index'] = matchObj.group(2)
value = [value]
data[key] = value
else:
data = dict()
data[path] = value
return data
# Percentage conversion
def multiply_hundred(number):
return number * 100
# prepares data for filling form in easiest way
def prepare_tender_data(data_original):
# preventing change data in global view
data = copy.deepcopy(data_original)
# check if data is for multilot
if 'lots' not in data:
return data
# moves features to its related items
if 'features' in data:
i = 0
l = len(data['features'])
while i < l:
if data['features'][i]['featureOf'] == 'lot':
for lot in data['lots']:
if lot['id'] == data['features'][i]['relatedItem']:
if 'features' not in lot:
lot['features'] = []
lot['features'].append(data['features'].pop(i))
l = l - 1
i = i - 1
break
if data['features'][i]['featureOf'] == 'item':
for item in data['items']:
if item['id'] == data['features'][i]['relatedItem']:
if 'features' not in item:
item['features'] = []
item['features'].append(data['features'].pop(i))
l = l - 1
i = i - 1
break
i = i + 1
if 'features' in data:
if len(data['features']) == 0:
del data['features']
# moves items to its related lots
i = 0
l = len(data['items'])
while i < l:
for lot in data['lots']:
if lot['id'] == data['items'][i]['relatedLot']:
if 'items' not in lot:
lot['items'] = []
lot['items'].append(data['items'].pop(i))
l = l - 1
i = i - 1
break
i = i + 1
del data['items']
if 'milestones' not in data:
return data
# moves milestones to its related lots
i = 0
l = len(data['milestones'])
while i < l:
for lot in data['lots']:
if lot['id'] == data['milestones'][i]['relatedLot']:
if 'milestones' not in lot:
lot['milestones'] = []
lot['milestones'].append(data['milestones'].pop(i))
l = l - 1
i = i - 1
break
i = i + 1
del data['milestones']
return data
def split_agreementDuration(str, type):
if type in 'year':
year_temp = str.split('Y', 1)
value = year_temp[0].split('P', 1)
elif type in 'month':
month_temp = str.split('M', 1)
value = month_temp[0].split('Y', 1)
else:
day_temp = str.split('D', 1)
value = day_temp[0].split('M', 1)
return value[1]
def convert_date_to_string_contr(date):
date = dateutil.parser.parse(date)
date = date.strftime("%d.%m.%Y %H:%M:%S")
return date
def get_value_minimalStepPercentage(value):
value = value / 100
return value
def set_value_minimalStepPercentage(value):
value = value * 100
return value
def convert_esco__float_to_string(number):
return '{0:.5f}'.format(float(number))
def convert_string_to_float(number):
return float(number)
def download_file(url, file_name, output_dir):
urllib.urlretrieve(url, ('{}/{}'.format(output_dir, file_name)))
def parse_complaintPeriod_date(date_string):
date_str = datetime.strptime(date_string, "%d.%m.%Y %H:%M")
date = datetime(date_str.year, date_str.month, date_str.day, date_str.hour, date_str.minute, date_str.second,
date_str.microsecond)
date = TZ.localize(date).isoformat()
return date
def parse_deliveryPeriod_date1(date):
date = dateutil.parser.parse(date)
date = date.strftime("%d.%m.%Y")
return date
def parse_deliveryPeriod_date(date_string):
# date_str = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S+03:00")
if '+03' in date_string:
date_str = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S+03:00")
else:
date_str = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S+02:00")
date = datetime(date_str.year, date_str.month, date_str.day)
date = date.strftime("%d.%m.%Y")
return date
def split_joinvalue(str_value):
str_value = str_value.split()
str_value = ''.join(str_value)
print(str_value)
str_value.replace(" ", "")
return str_value
|
nilq/baby-python
|
python
|
import sys
sys.path.append("C:\Program Files\Vicon\Nexus2.1\SDK\Python")
import ViconNexus
import numpy as np
import smooth
vicon = ViconNexus.ViconNexus()
subject = vicon.GetSubjectNames()[0]
print 'Gap filling for subject ', subject
markers = vicon.GetMarkerNames(subject)
frames = vicon.GetFrameCount()
# Get data from nexus
print 'Populating data matrix'
rawData = np.zeros((frames,len(markers)*3))
for i in range(0,len(markers)):
rawData[:,3*i-3], rawData[:,3*i-2], rawData[:,3*i-1], E = vicon.GetTrajectory(subject,markers[i])
rawData[np.asarray(E)==0,3*i-3] = np.nan;
rawData[np.asarray(E)==0,3*i-2] = np.nan;
rawData[np.asarray(E)==0,3*i-1] = np.nan;
# Run low dimensional smoothing
Y = smooth.smooth(rawData,tol =1e-2,sigR=1e-3,keepOriginal=True)
print 'Writing new trajectories'
#Create new smoothed trjectories
for i in range(0,len(markers)):
E = np.ones((len(E),1)).tolist();
vicon.SetTrajectory(subject,markers[i],Y[:,3*i-3].tolist(),Y[:,3*i-2].tolist(),Y[:,3*i-1].tolist(),E)
print 'Done'
|
nilq/baby-python
|
python
|
from jinja2 import DictLoader, Environment
import argparse
import json
import importlib
import random
import string
HEADER = """
#pragma once
#include <rapidjson/rapidjson.h>
#include <rapidjson/writer.h>
#include <rapidjson/reader.h>
#include <iostream>
#include <string>
#include <vector>
#include <map>
struct {{ schema["title"] }}
{
{{ schema["title"] }}()
{
{%- for property_name, property_dict in schema["properties"].items() %}
PropertyMap["{{ property_dict["title"] }}"] = &{{ property_dict["title"] }};
{%- endfor %}
}
template<typename OutputStream>
void Write(rapidjson::Writer<OutputStream>& writer)
{
writer.StartObject();
{%- for property_name, property_dict in schema["properties"].items() %}
writer.Key("{{ property_dict["title"] }}");
{{ get_writer_code(property_dict) }}
{%- endfor %}
writer.EndObject();
}
{%- for property_name, property_dict in schema["properties"].items() %}
{{ get_property_type(property_dict) }} {{ property_dict["title"] }};
{%- endfor %}
bool operator==(const {{ schema["title"] }}& rhs) const
{
bool equals = true;
{%- for property_name, property_dict in schema["properties"].items() %}
equals = equals && {{ property_dict["title"] }} == rhs.{{ property_dict["title"] }};
{%- endfor %}
return equals;
}
std::map<std::string, void*> PropertyMap;
};
struct {{ schema["title"] }}Handler
{
{{ schema["title"] }}Handler( {{ schema["title"] }}* ParseObject)
{
Object = ParseObject;
}
template<typename T>
void WriteProperty(const T& Value)
{
T& Property = *reinterpret_cast<T*>(CurrentProperty);
Property = Value;
CurrentProperty = nullptr;
CurrentPropertyName = "";
}
template<typename T>
void WriteArray(const T& Value)
{
std::vector<T>& PropertyArray = *reinterpret_cast<std::vector<T>*>(CurrentProperty);
PropertyArray.push_back(Value);
}
template<typename T>
bool WriteType(const T& Value)
{
if(!CurrentProperty)
{
std::cerr << "WriteType no CurrentProperty" << std::endl; return true;
return false;
}
if(CurrentArray)
{
WriteArray(Value);
return true;
}
else
{
WriteProperty(Value);
return true;
}
return false;
}
bool Null() { std::cout << "Null()" << std::endl; return true; }
bool Bool(bool b)
{
return WriteType(b);
}
bool Int(int i)
{
return WriteType(i);
}
bool Uint(unsigned u)
{
return WriteType(u);
}
bool Int64(int64_t i)
{
return WriteType(i);
}
bool Uint64(uint64_t u)
{
return WriteType(u);
}
bool Double(double d)
{
return WriteType(d);
}
bool RawNumber(const char* str, rapidjson::SizeType length, bool copy)
{
std::cout << "Number(" << str << ", " << length << ", " << "boolalpha" << copy << ")" << std::endl;
return true;
}
bool String(const char* str, rapidjson::SizeType length, bool copy)
{
if(!CurrentProperty)
{
std::cerr << "String no CurrentProperty" << std::endl; return true;
return false;
}
if(CurrentArray)
{
std::string str = std::string(str, length);
WriteArray(str);
return true;
}
else
{
std::string& PropertyString = *reinterpret_cast<std::string*>(CurrentProperty);
PropertyString = std::string(str, length);
CurrentProperty = nullptr;
CurrentPropertyName = "";
}
return true;
}
bool Key(const char* str, rapidjson::SizeType length, bool copy)
{
const auto it = Object->PropertyMap.find(str);
if(it != Object->PropertyMap.end())
{
CurrentProperty = it->second;
CurrentPropertyName = str;
return true;
}
else
{
std::cerr << "Key Property Not Found:" << str << std::endl; return true;
return false;
}
}
bool StartObject() { std::cout << "StartObject()" << std::endl; return true; }
bool EndObject(rapidjson::SizeType memberCount) { std::cout << "EndObject(" << memberCount << ")" << std::endl; return true; }
bool StartArray()
{
if(CurrentPropertyName.empty())
{
std::cerr << "StartArray Property " << CurrentPropertyName << "not found!" << std::endl;
return false;
}
const auto it = Object->PropertyMap.find(CurrentPropertyName);
if(it != Object->PropertyMap.end())
{
CurrentArray = it->second;
return true;
}
else
{
std::cerr << "StartArray Property " << CurrentPropertyName << "not found!" << std::endl;
return false;
}
}
bool EndArray(rapidjson::SizeType elementCount)
{
CurrentProperty = nullptr;
CurrentArray = nullptr;
return true;
}
{{ schema["title"] }}* Object = nullptr;
void* CurrentProperty = nullptr;
void* CurrentArray= nullptr;
std::string CurrentPropertyName;
};
"""
TEST = """
#include "Json{{ schema["title"] }}.h"
int main(int argc, char** argv)
{
{{ schema["title"] }} WriteObject;
{%- for property_name, property_dict in schema["properties"].items() %}
WriteObject.{{ property_dict["title"] }} = {{ get_random_property(property_dict) }};
{%- endfor %}
{{ schema["title"] }} ReadObject;
rapidjson::StringBuffer StringBuf;
rapidjson::Writer<rapidjson::StringBuffer> Writer(StringBuf);
WriteObject.Write(Writer);
{{ schema["title"] }}Handler Handler(&ReadObject);
rapidjson::Reader Reader;
rapidjson::StringStream StringStream(StringBuf.GetString());
Reader.Parse(StringStream, Handler);
bool Equals = WriteObject == ReadObject;
if(!Equals)
{
std::cerr << "Objects not equals." << std::endl;
return 1;
}
else
{
std::cout << "Objects are equals." << std::endl;
}
return 0;
}
"""
writer_function_map = {
"integer" : "Int",
"number" : "Double",
"boolean" : "Bool"
}
def get_writer_code(prop : dict, title = None):
type_name = prop["type"]
if title == None: title = prop["title"]
if type_name in writer_function_map:
return "writer." + writer_function_map[type_name] + "(" + title + ");"
elif type_name == "string":
return "writer.String("+ prop["title"] + ".c_str());"
elif type_name == "array":
write_array = "writer.StartArray();\n"
write_array += " for( auto it = " + title + ".begin(); it != " + title + ".end(); ++it)\n"
write_array += " {\n"
write_array += " " + get_writer_code(prop["items"], "(*it)") + "\n"
write_array += " }\n"
write_array += " writer.EndArray(" + title + ".size());"
return write_array
return None
# types
basic_type_map = {
"integer" : "int32_t",
"string" : "std::string",
"number" : "double",
"boolean" : "bool"
}
def get_property_type(prop : dict):
type_name = prop["type"]
if type_name in basic_type_map:
return basic_type_map[type_name]
if type_name == "array":
return "std::vector<" + get_property_type(prop["items"]) + ">"
return "void"
# test methods
def random_string(len=10):
letters = string.ascii_lowercase
s = ''.join(random.choice(letters) for i in range(len))
return "\"" + s + "\""
def random_int():
return random.randint(0,1024)
def random_double():
return random.randint(0,1024)
def random_bool():
return random.choice(["true", "false"])
random_function_map = {
"integer" : random_int,
"string" : random_string,
"number" : random_double,
"boolean" : random_bool
}
def get_random_property(prop):
type_name = prop["type"]
if type_name in random_function_map:
return random_function_map[type_name]()
if type_name == "array":
array =[str(get_random_property(prop["items"])) for i in range(10)]
return "{" +",".join(array) + "}"
return "void"
templates = Environment(loader=DictLoader(globals()))
def generate_header(schema_class):
print(schema_class.schema_json())
template = templates.get_template("HEADER")
schema = json.loads(schema_class.schema_json())
rendered = template.render(
{ "schema" : schema,
"get_property_type" : get_property_type,
"get_writer_code" : get_writer_code,
}
)
header = open("Json"+schema["title"]+".h", "w+")
header.write(rendered)
header.close()
def generate_test(schema_class):
template = templates.get_template("TEST")
schema = json.loads(schema_class.schema_json())
rendered = template.render(
{ "schema" : schema,
"get_property_type" : get_property_type,
"get_random_property" : get_random_property
}
)
test = open("Json"+schema["title"]+"Test.cpp", "w+")
test.write(rendered)
test.close()
if __name__== "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--package", help="Package that needs to be loaded to access your type")
parser.add_argument("--typename", help="Name of the type to generate code from.")
args = parser.parse_args();
module = None
if args.package != None:
print("Loading %s" %(args.package))
module = importlib.import_module(args.package)
if args.typename != None:
generate_header(getattr(module,args.typename))
generate_test(getattr(module,args.typename))
|
nilq/baby-python
|
python
|
def summary(p,c=10,x=5):
print('-' * 30)
print(f'Value Summary'.center(30))
print('-' * 30)
print(f'{"analyzed price:"} \t{coins(p)}')
print(f"{'Half-price: '} \t{half(p, True)}")
print(f'{"double the price: "}\t{double(p, True)}')
print(f'{c}% {"increase: ":} \t{increase(p, c, True)}')
print(f'{x}% {"reduction: "} \t{reduction(p, x, True)}')
print('-'*30)
def increase(p = 0, por= 0, formato=False):
#increase the desired%
"""
=> Function that increases the price by the desired percentage
: param p: original price
: param por: desired percentage
: param format: formatting if desired
: return: returns the price to the variable
"""
p = ((p / 100) * por) + p
return p if formato is False else coins(p)
def reduction(p = 0, por= 0, formato=False):
"""
=> Function that decreases the price by the desired percentage
:param p: Original price
:param por: porcentagem desejada
:param formato: formatting if desired
:return: returns the price to the variable
"""
p = p - ((p / 100) * por)
return p if not formato else coins(p)
#Reduction the desired %
def double(p = 0, formato=False):
"""
=> Function that doubles the price
:param p: Original price
:param formato: formatting if desired
:return: returns the price to the variable
"""
p = p * 2
return p if not formato else coins(p)
#dobra o preço
def half(p = 0, formato=False):
"""
=> Function that cuts the price in half
:param p: Original price
:param formato: formatting if desired
:return: returns the price to the variable
"""
p = p / 2
# Half-Price
return p if formato is False else coins(p)
def coins(p = 0, moeda = 'R$'):
"""
=> Formatting function
:param p: Original price
:param moeda: currency
:return: returns the formatted price
"""
return f'{moeda}{p:>.2f}'.replace('.',',')
|
nilq/baby-python
|
python
|
from ._sha512 import sha384
|
nilq/baby-python
|
python
|
from app import app, api
from flask import request
from flask_restful import Resource
import json
import pprint
import os
import subprocess
import traceback
import logging
class WelcomeController(Resource):
def get(self):
return {'welcome': "welcome, stranger!"}
api.add_resource(WelcomeController, '/')
|
nilq/baby-python
|
python
|
import os
from dotenv import dotenv_values
config = {
**dotenv_values(os.path.join(os.getcwd(), ".env")),
**os.environ
}
VERSION = "0.0.0-alfa"
APP_HOST = config['APP_HOST']
APP_PORT = int(config['APP_PORT'])
APP_DEBUG = bool(config['APP_DEBUG'])
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
""" Views for the stats application. """
# standard library
# django
# models
from .models import Stat
# views
from base.views import BaseCreateView
from base.views import BaseDeleteView
from base.views import BaseDetailView
from base.views import BaseListView
from base.views import BaseUpdateView
# forms
from .forms import StatForm
class StatListView(BaseListView):
"""
View for displaying a list of stats.
"""
model = Stat
template_name = 'stats/list.pug'
permission_required = 'stats.view_stat'
class StatCreateView(BaseCreateView):
"""
A view for creating a single stat
"""
model = Stat
form_class = StatForm
template_name = 'stats/create.pug'
permission_required = 'stats.add_stat'
class StatDetailView(BaseDetailView):
"""
A view for displaying a single stat
"""
model = Stat
template_name = 'stats/detail.pug'
permission_required = 'stats.view_stat'
class StatUpdateView(BaseUpdateView):
"""
A view for editing a single stat
"""
model = Stat
form_class = StatForm
template_name = 'stats/update.pug'
permission_required = 'stats.change_stat'
class StatDeleteView(BaseDeleteView):
"""
A view for deleting a single stat
"""
model = Stat
permission_required = 'stats.delete_stat'
template_name = 'stats/delete.pug'
|
nilq/baby-python
|
python
|
"""Session class and utility functions used in conjunction with the session."""
from .session import Session
from .session_manager import SessionManager
__all__ = ["Session", "SessionManager"]
|
nilq/baby-python
|
python
|
''' 046 Faça um programa que mostre na tela uma contagem regressiva para o estouro de fogos de artifício, indo de 10 até 0
, com um pausa de 1 seg entre eles'''
from time import sleep
for c in range(10, -1, -1):
print(c)
sleep(1)
print('Fogos !!!!!')
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Shun Arahata
"""
Imitation learning environment
"""
import pathlib
# import cupy as xp
import sys
import numpy as xp
current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append(str(current_dir) + '/../mpc')
sys.path.append(str(current_dir) + '/../')
from box_ddp import BoxDDP
from pendulum import PendulumDx
from chainer import functions as F
from util import QuadCost, chainer_diag
class IL_Env:
"""
Imitation learning Environmn class
"""
def __init__(self, env, lqr_iter=500, mpc_T=20):
"""
:param env:
:param lqr_iter:
:param mpc_T:
"""
self.env = env
if self.env == 'pendulum':
self.true_dx = PendulumDx()
else:
assert False
self.lqr_iter = lqr_iter
self.mpc_T = mpc_T
self.train_data = None
self.val_data = None
self.test_data = None
@staticmethod
def sample_xinit(n_batch=1):
""" random sampling x_init
:param n_batch:
:return:
"""
def uniform(shape, low, high):
"""
:param shape:
:param low:
:param high:
:return:
"""
r = high - low
return xp.random.rand(shape) * r + low
th = uniform(n_batch, -(1 / 2) * xp.pi, (1 / 2) * xp.pi)
# th = uniform(n_batch, -xp.pi, xp.pi)
thdot = uniform(n_batch, -1., 1.)
xinit = xp.stack((xp.cos(th), xp.sin(th), thdot), axis=1)
return xinit
def populate_data(self, n_train, n_val, n_test, seed=0):
"""
:param n_train:
:param n_val:
:param n_test:
:param seed:
:return:
"""
xp.random.seed(seed)
n_data = n_train + n_val + n_test
xinit = self.sample_xinit(n_batch=n_data)
print(xinit.shape)
# for (1,0,0) into the dataset
'''
n_init_zero = int(n_train/4)
xinit[n_init_zero][0] = 1.0
xinit[n_init_zero][1] = 0.0
xinit[n_init_zero][2] = 0.0
'''
true_q, true_p = self.true_dx.get_true_obj()
# self.mpc defined later
true_x_mpc, true_u_mpc = self.mpc(self.true_dx, xinit, true_q, true_p, update_dynamics=True)
true_x_mpc = true_x_mpc.array
true_u_mpc = true_u_mpc.array
tau = xp.concatenate((true_x_mpc, true_u_mpc), axis=2)
tau = xp.transpose(tau, (1, 0, 2))
self.train_data = tau[:n_train]
self.val_data = tau[n_train:n_train + n_val]
self.test_data = tau[-n_test:]
def mpc(self, dx, xinit, q, p, u_init=None, eps_override=None,
lqr_iter_override=None, update_dynamics=False):
"""
:param dx:
:param xinit:
:param q:
:param p:
:param u_init:
:param eps_override:
:param lqr_iter_override:
:return:
"""
n_batch = xinit.shape[0]
n_sc = self.true_dx.n_state + self.true_dx.n_ctrl
Q = chainer_diag(q)
Q = F.expand_dims(Q, axis=0)
Q = F.expand_dims(Q, axis=0)
Q = F.repeat(Q, self.mpc_T, axis=0)
Q = F.repeat(Q, n_batch, axis=1)
p = F.expand_dims(p, axis=0)
p = F.expand_dims(p, axis=0)
p = F.repeat(p, self.mpc_T, axis=0)
p = F.repeat(p, n_batch, axis=1)
if eps_override:
eps = eps_override
else:
eps = self.true_dx.mpc_eps
if lqr_iter_override:
lqr_iter = lqr_iter_override
else:
lqr_iter = self.lqr_iter
assert len(Q.shape) == 4
assert len(p.shape) == 3
solver = BoxDDP(
T=self.mpc_T, u_lower=self.true_dx.lower, u_upper=self.true_dx.upper,
n_batch=n_batch, n_state=self.true_dx.n_state, n_ctrl=self.true_dx.n_ctrl,
u_init=u_init, eps=eps, max_iter=lqr_iter, verbose=False,
exit_unconverged=False, detach_unconverged=True,
line_search_decay=self.true_dx.linesearch_decay,
max_line_search_iter=self.true_dx.max_linesearch_iter,
update_dynamics=update_dynamics
)
x_mpc, u_mpc, objs_mpc = solver((xinit, QuadCost(Q, p), dx))
'''
g = c.build_computational_graph(u_mpc)
with open('graph.dot', 'w') as o:
o.write(g.dump())
assert False
'''
return x_mpc, u_mpc
def mpc_Q(self, dx, xinit, Q, p, u_init=None, eps_override=None,
lqr_iter_override=None, update_dynamics=False):
"""
:param dx:
:param xinit:
:param q:
:param p:
:param u_init:
:param eps_override:
:param lqr_iter_override:
:return:
"""
n_batch = xinit.shape[0]
n_sc = self.true_dx.n_state + self.true_dx.n_ctrl
Q = F.expand_dims(Q, axis=0)
Q = F.expand_dims(Q, axis=0)
Q = F.repeat(Q, self.mpc_T, axis=0)
Q = F.repeat(Q, n_batch, axis=1)
p = F.expand_dims(p, axis=0)
p = F.expand_dims(p, axis=0)
p = F.repeat(p, self.mpc_T, axis=0)
p = F.repeat(p, n_batch, axis=1)
if eps_override:
eps = eps_override
else:
eps = self.true_dx.mpc_eps
if lqr_iter_override:
lqr_iter = lqr_iter_override
else:
lqr_iter = self.lqr_iter
assert len(Q.shape) == 4
assert len(p.shape) == 3
solver = BoxDDP(
T=self.mpc_T, u_lower=self.true_dx.lower, u_upper=self.true_dx.upper,
n_batch=n_batch, n_state=self.true_dx.n_state, n_ctrl=self.true_dx.n_ctrl,
u_init=u_init, eps=eps, max_iter=lqr_iter, verbose=False,
exit_unconverged=False, detach_unconverged=True,
line_search_decay=self.true_dx.linesearch_decay,
max_line_search_iter=self.true_dx.max_linesearch_iter,
update_dynamics=update_dynamics
)
x_mpc, u_mpc, objs_mpc = solver((xinit, QuadCost(Q, p), dx))
'''
g = c.build_computational_graph(u_mpc)
with open('graph.dot', 'w') as o:
o.write(g.dump())
assert False
'''
return x_mpc, u_mpc
|
nilq/baby-python
|
python
|
# -*- Python -*-
# Copyright 2021 The Verible Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bazel rule to wrap sh_test with a wrapper loading runfiles library prior to execution
"""
def sh_test_with_runfiles_lib(name, srcs, size, args, data, deps = []):
"""sh_test wrapper that loads bazel's runfiles library before calling the test.
This is necessary because on Windows, runfiles are not symlinked like on Unix and
are thus not available from the path returned by $(location Label). The runfiles
library provide the rlocation function, which converts a runfile path (from $location)
to the fullpath of the file.
Args:
name: sh_test's name
srcs: sh_test's srcs, must be an array of a single file
size: sh_test's size
args: sh_test's args
data: sh_test's data
deps: sh_test's deps
"""
if len(srcs) > 1:
fail("you must specify exactly one file in 'srcs'")
# Add the runfiles library to dependencies
if len(deps) == 0:
deps = ["@bazel_tools//tools/bash/runfiles"]
else:
deps.append("@bazel_tools//tools/bash/runfiles")
# Replace first arguments with location of the main script to run
# and add script to run to sh_test's data
args = ["$(location " + srcs[0] + ")"] + args
data += srcs
native.sh_test(
name = name,
srcs = ["//bazel:sh_test_with_runfiles_lib.sh"],
size = size,
args = args,
data = data,
deps = deps,
)
|
nilq/baby-python
|
python
|
# https://github.com/ArtemNikolaev/gb-hw/issues/23
def run(array):
return [
array[i]
for i in range(1, len(array))
if array[i] > array[i-1]
]
test_input = [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55]
print(run(test_input))
|
nilq/baby-python
|
python
|
import glob
from hdf5_getters import *
import os
import numpy as np
from collections import Counter
from music_utils import *
tags_list = []
data_path = "/mnt/snap/data/"
count = 0
for root, dirs, files in os.walk(data_path):
files = glob.glob(os.path.join(root, '*h5'))
#if count > 1000: break
for f in files:
h5 = open_h5_file_read(f)
tags = get_artist_mbtags(h5).tolist()
tags_list += tags
#count += 1
h5.close()
print Counter(tags_list).most_common(100)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import time
from data_output import DataOutput
from html_downloader import HtmlDownloader
from html_parser import HtmlParser
__author__ = 'Aollio Hou'
__email__ = 'aollio@outlook.com'
class Spider:
def __init__(self):
self.downloader = HtmlDownloader()
self.parser = HtmlParser()
self.output = DataOutput()
def crawl(self, root_url):
content = self.downloader.download(root_url)
urls = self.parser.parse_url(root_url, content)
for url in urls:
try:
# http://service.library.mtime.com/Movie.api
# ?Ajax_CallBack=true
# &Ajax_CallBackType=Mtime.Library.Services
# &Ajax_CallBackMethod=GetMovieOverviewRating
# &Ajax_CrossDomain=1
# &Ajax_RequestUrl=http%3A%2F%2Fmovie.mtime.com%2F246526%2F&t=201710117174393728&Ajax_CallBackArgument0=246526
t = time.strftime('%Y%m%d%H%M%S3282', time.localtime())
rank_url = 'http://service.library.mtime.com/Movie.api' \
'?Ajax_CallBack=true' \
'&Ajax_CallBackType=Mtime.Library.Services' \
'&Ajax_CallBackMethod=GetMovieOverviewRating' \
'&Ajax_CrossDomain=1' \
'&Ajax_RequestUrl=%s' \
'&t=%s' \
'&Ajax_CallbackArgument0=%s' % (url[0].replace('://', '%3A%2F%2F')[:-1], t, url[1])
rank_content = self.downloader.download(rank_url)
if rank_content is None:
print('None')
data = self.parser.parse_json(rank_url, rank_content)
self.output.store_data(data)
except Exception as e:
raise e
# print(e)
# print('Crawl failed')
self.output.output_end()
print('Crawl finish')
def main():
spider = Spider()
spider.crawl('http://theater.mtime.com/China_Beijing/')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from django import forms
from .models import Project
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ["title", "describe", "technology"]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# encoding: utf-8
"""Terminal UI for histdata_downloader project."""
import os
import sys
import logging
import subprocess
from datetime import date
import time
import npyscreen
from histdata_downloader.logger import log_setup
from histdata_downloader.histdata_downloader import load_available_pairs
logger = logging.getLogger(__name__)
class TestApp(npyscreen.NPSAppManaged):
def onStart(self):
logger.debug("On start")
self.registerForm("MAIN", MainForm())
def onCleanExit(self):
logger.debug("onCleanExit called")
class MainForm(npyscreen.ActionFormV2):
def create(self):
logger.debug("main form method called.")
self.type = self.add(npyscreen.TitleSelectOne, name='type',
max_height=2, values=['M1', 'ticks'],
scroll_exit=True)
self.date_start = self.add(npyscreen.TitleDateCombo, name="Date start")
self.date_start.value = date(2019, 1, 1)
self.date_end = self.add(npyscreen.TitleDateCombo, name="Date end")
self.instruments = self.add(npyscreen.TitleMultiSelect,
name='instruments', max_height=5,
values=load_available_pairs(),
scroll_exit=True)
self.select_all = self.add(SelectAllButton,
name='select all', relx=20)
self.unselect_all = self.add(UnselectAllButton,
name='unselect all', relx=20)
self.output_path = self.add(npyscreen.TitleFilenameCombo,
name="Output path", label=True)
self.verbosity = self.add(npyscreen.TitleSelectOne, name='verbosity',
max_height=3, values=['DEBUG',
'INFO',
'WARNING'],
scroll_exit=True, value=1)
self.command = self.add(npyscreen.TitleFixedText, name="cmd",
editable=False,
value='histdata_downloader download')
self.launch_button = self.add(LauchButton, name='Run', relx=50)
self.log = self.add(Output, name='Output',
editable=True, scroll_exit=True,
values=['Waiting...'])
def while_editing(self, *args):
verb = self.selected_verbosity[0]
cmd = "histdata_downloader -v {} download".format(verb)
if self.type.value:
cmd += " -t %s " % self.selected_type[0]
if self.date_end.value:
cmd += " -ds {} -de {}".format(self.date_start.value,
self.date_end.value)
if self.output_path.value:
cmd += " -o {}".format(self.output_path.value)
if self.instruments.value:
sub_cmd = ' '.join(['-i %s' % i for i in self.selected_instruments])
cmd += ' ' + sub_cmd
self.command.value = cmd
self.command.update()
def afterEditing(self):
self.parentApp.setNextForm(None)
def return_as_config(self):
logger.debug('return_as_config method called.')
config = {'type' : self.type.values[self.type.value[0]],
'date_start': self.date_start.value,
'date_end': self.date_end.value,
'instruments': self.selected_instruments,
'output_path': self.output_path.value}
return config
@property
def selected_instruments(self):
name_field = lambda idx : self.instruments.values[idx]
return list(map(name_field, self.instruments.value))
@property
def selected_type(self):
name_field = lambda idx : self.type.values[idx]
return list(map(name_field, self.type.value))
@property
def selected_verbosity(self):
name_field = lambda idx : self.verbosity.values[idx]
return list(map(name_field, self.verbosity.value))
def perform(cmd, log):
with subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
for line in iter(proc.stdout.readline, b''):
log.values.append(line.decode('ascii'))
log.display()
for line in iter(proc.stderr.readline, b''):
log.values.append(line.decode('ascii'))
log.display()
class LauchButton(npyscreen.ButtonPress):
def whenPressed(self):
self.parent.log.values = ['Executing %s.' % self.parent.command.value]
self.parent.log.display()
perform(self.parent.command.value, self.parent.log)
class SelectAllButton(npyscreen.ButtonPress):
def whenPressed(self):
instr = self.parent.instruments
instr.value = [x for x in range(len(instr.values))]
instr.display()
class UnselectAllButton(npyscreen.ButtonPress):
def whenPressed(self):
instr = self.parent.instruments
instr.value = []
instr.display
class Output(npyscreen.BoxTitle):
_contained_widget = npyscreen.MultiLine
if __name__ == "__main__":
App = TestApp()
App.run()
|
nilq/baby-python
|
python
|
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Jython example AMF server and client with Swing interface.
@see: U{Jython<http://pyamf.org/wiki/JythonExample>} wiki page.
@since: 0.5
"""
import logging
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
from pyamf.remoting.gateway.wsgi import WSGIGateway
from pyamf.remoting.client import RemotingService
import java.lang as lang
import javax.swing as swing
import java.awt as awt
class AppGUI(object):
"""
Swing graphical user interface.
"""
def __init__(self, title, host, port, service):
# create window
win = swing.JFrame(title, size=(800, 480))
win.setDefaultCloseOperation(swing.JFrame.EXIT_ON_CLOSE)
win.contentPane.layout = awt.BorderLayout(10, 10)
# add scrollable textfield
status = swing.JTextPane(preferredSize=(780, 400))
status.setAutoscrolls(True)
status.setEditable(False)
status.setBorder(swing.BorderFactory.createEmptyBorder(20, 20, 20, 20))
paneScrollPane = swing.JScrollPane(status)
paneScrollPane.setVerticalScrollBarPolicy(
swing.JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED)
win.contentPane.add(paneScrollPane, awt.BorderLayout.CENTER)
# add server button
self.started = "Start Server"
self.stopped = "Stop Server"
self.serverButton = swing.JButton(self.started, preferredSize=(150, 20),
actionPerformed=self.controlServer)
# add client button
self.clientButton = swing.JButton("Invoke Method", preferredSize=(150, 20),
actionPerformed=self.runClient)
self.clientButton.enabled = False
# position buttons
buttonPane = swing.JPanel()
buttonPane.setLayout(swing.BoxLayout(buttonPane, swing.BoxLayout.X_AXIS))
buttonPane.setBorder(swing.BorderFactory.createEmptyBorder(0, 10, 10, 10))
buttonPane.add(swing.Box.createHorizontalGlue())
buttonPane.add(self.serverButton)
buttonPane.add(swing.Box.createRigidArea(awt.Dimension(10, 0)))
buttonPane.add(self.clientButton)
win.contentPane.add(buttonPane, awt.BorderLayout.SOUTH)
# add handler that writes log messages to the status textfield
txtHandler = TextFieldLogger(status)
logger = logging.getLogger("")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(message)s')
txtHandler.setFormatter(formatter)
logger.addHandler(txtHandler)
# setup server
self.service_name = service
self.url = "http://%s:%d" % (host, port)
self.server = ThreadedAmfServer(host, port, self.service_name)
# center and display window on the screen
win.pack()
us = win.getSize()
them = awt.Toolkit.getDefaultToolkit().getScreenSize()
newX = (them.width - us.width) / 2
newY = (them.height - us.height) / 2
win.setLocation(newX, newY)
win.show()
def controlServer(self, event):
"""
Handler for server button clicks.
"""
if event.source.text == self.started:
logging.info("Created AMF gateway at %s" % self.url)
event.source.text = self.stopped
self.clientButton.enabled = True
self.server.start()
else:
logging.info("Terminated AMF gateway at %s\n" % self.url)
event.source.text = self.started
self.clientButton.enabled = False
self.server.stop()
def runClient(self, event):
"""
Invoke a method on the server using an AMF client.
"""
self.client = ThreadedAmfClient(self.url, self.service_name)
self.client.invokeMethod("Hello World!")
class ThreadedAmfClient(object):
"""
Threaded AMF client that doesn't block the Swing GUI.
"""
def __init__(self, url, serviceName):
self.gateway = RemotingService(url, logger=logging)
self.service = self.gateway.getService(serviceName)
def invokeMethod(self, param):
"""
Invoke a method on the AMF server.
"""
class ClientThread(lang.Runnable):
"""
Create a thread for the client.
"""
def run(this):
try:
self.service(param)
except lang.InterruptedException:
return
swing.SwingUtilities.invokeLater(ClientThread())
class ThreadedAmfServer(object):
"""
Threaded WSGI server that doesn't block the Swing GUI.
"""
def __init__(self, host, port, serviceName):
services = {serviceName: self.echo}
gw = WSGIGateway(services, logger=logging)
self.httpd = WSGIServer((host, port),
ServerRequestLogger)
self.httpd.set_app(gw)
def start(self):
"""
Start the server.
"""
class WSGIThread(lang.Runnable):
"""
Create a thread for the server.
"""
def run(this):
try:
self.httpd.serve_forever()
except lang.InterruptedException:
return
self.thread = lang.Thread(WSGIThread())
self.thread.start()
def stop(self):
"""
Stop the server.
"""
self.thread = None
def echo(self, data):
"""
Just return data back to the client.
"""
return data
class ServerRequestLogger(WSGIRequestHandler):
"""
Request handler that logs WSGI server messages.
"""
def log_message(self, format, *args):
"""
Log message with debug level.
"""
logging.debug("%s - %s" % (self.address_string(), format % args))
class TextFieldLogger(logging.Handler):
"""
Logging handler that displays PyAMF log messages in the status text field.
"""
def __init__(self, textfield, *args, **kwargs):
self.status = textfield
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
msg = '%s\n' % self.format(record)
doc = self.status.getStyledDocument()
doc.insertString(doc.getLength(), msg, doc.getStyle('regular'))
self.status.setCaretPosition(self.status.getStyledDocument().getLength())
host = "localhost"
port = 8000
service_name = "echo"
title = "PyAMF server/client using Jython with Swing"
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", "--port", default=port,
dest="port", help="port number [default: %default]")
parser.add_option("--host", default=host,
dest="host", help="host address [default: %default]")
(opt, args) = parser.parse_args()
app = AppGUI(title, opt.host, int(opt.port), service_name)
|
nilq/baby-python
|
python
|
from autodisc.systems.lenia.classifierstatistics import LeniaClassifierStatistics
from autodisc.systems.lenia.isleniaanimalclassifier import IsLeniaAnimalClassifier
from autodisc.systems.lenia.lenia import *
|
nilq/baby-python
|
python
|
# MIT License
#
# Copyright (c) 2017 Anders Steen Christensen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import os
import numpy as np
import qml
import qml.data
from qml.ml.kernels import laplacian_kernel
from qml.ml.math import cho_solve
from qml.ml.representations import get_slatm_mbtypes
from qml.ml.kernels import get_local_kernels_gaussian
from qml.ml.kernels import get_local_kernels_laplacian
def get_energies(filename):
""" Returns a dictionary with heats of formation for each xyz-file.
"""
f = open(filename, "r")
lines = f.readlines()
f.close()
energies = dict()
for line in lines:
tokens = line.split()
xyz_name = tokens[0]
hof = float(tokens[1])
energies[xyz_name] = hof
return energies
def test_krr_gaussian_local_cmat():
test_dir = os.path.dirname(os.path.realpath(__file__))
# Parse file containing PBE0/def2-TZVP heats of formation and xyz filenames
data = get_energies(test_dir + "/data/hof_qm7.txt")
# Generate a list of qml.data.Compound() objects"
mols = []
for xyz_file in sorted(data.keys())[:1000]:
# Initialize the qml.data.Compound() objects
mol = qml.data.Compound(xyz=test_dir + "/qm7/" + xyz_file)
# Associate a property (heat of formation) with the object
mol.properties = data[xyz_file]
# This is a Molecular Coulomb matrix sorted by row norm
mol.generate_atomic_coulomb_matrix(size=23, sorting="row-norm")
mols.append(mol)
# Shuffle molecules
np.random.seed(666)
np.random.shuffle(mols)
# Make training and test sets
n_test = 100
n_train = 200
training = mols[:n_train]
test = mols[-n_test:]
X = np.concatenate([mol.representation for mol in training])
Xs = np.concatenate([mol.representation for mol in test])
N = np.array([mol.natoms for mol in training])
Ns = np.array([mol.natoms for mol in test])
# List of properties
Y = np.array([mol.properties for mol in training])
Ys = np.array([mol.properties for mol in test])
# Set hyper-parameters
sigma = 724.0
llambda = 10**(-6.5)
K = get_local_kernels_gaussian(X, X, N, N, [sigma])[0]
assert np.allclose(K, K.T), "Error in local Gaussian kernel symmetry"
# Test below will sometimes fail, since sorting occasionally differs due close row-norms
# K_test = np.loadtxt(test_dir + "/data/K_local_gaussian.txt")
# assert np.allclose(K, K_test), "Error in local Gaussian kernel (vs. reference)"
# Solve alpha
K[np.diag_indices_from(K)] += llambda
alpha = cho_solve(K,Y)
# Calculate prediction kernel
Ks = get_local_kernels_gaussian(Xs, X, Ns, N, [sigma])[0]
# Test below will sometimes fail, since sorting occasionally differs due close row-norms
# Ks_test = np.loadtxt(test_dir + "/data/Ks_local_gaussian.txt")
# assert np.allclose(Ks, Ks_test), "Error in local Gaussian kernel (vs. reference)"
Yss = np.dot(Ks, alpha)
mae = np.mean(np.abs(Ys - Yss))
print(mae)
assert abs(19.0 - mae) < 1.0, "Error in local Gaussian kernel-ridge regression"
def test_krr_laplacian_local_cmat():
test_dir = os.path.dirname(os.path.realpath(__file__))
# Parse file containing PBE0/def2-TZVP heats of formation and xyz filenames
data = get_energies(test_dir + "/data/hof_qm7.txt")
# Generate a list of qml.data.Compound() objects"
mols = []
for xyz_file in sorted(data.keys())[:1000]:
# Initialize the qml.data.Compound() objects
mol = qml.data.Compound(xyz=test_dir + "/qm7/" + xyz_file)
# Associate a property (heat of formation) with the object
mol.properties = data[xyz_file]
# This is a Molecular Coulomb matrix sorted by row norm
mol.generate_atomic_coulomb_matrix(size=23, sorting="row-norm")
mols.append(mol)
# Shuffle molecules
np.random.seed(666)
np.random.shuffle(mols)
# Make training and test sets
n_test = 100
n_train = 200
training = mols[:n_train]
test = mols[-n_test:]
X = np.concatenate([mol.representation for mol in training])
Xs = np.concatenate([mol.representation for mol in test])
N = np.array([mol.natoms for mol in training])
Ns = np.array([mol.natoms for mol in test])
# List of properties
Y = np.array([mol.properties for mol in training])
Ys = np.array([mol.properties for mol in test])
# Set hyper-parameters
sigma = 10**(3.6)
llambda = 10**(-12.0)
K = get_local_kernels_laplacian(X, X, N, N, [sigma])[0]
assert np.allclose(K, K.T), "Error in local Laplacian kernel symmetry"
# Test below will sometimes fail, since sorting occasionally differs due close row-norms
# K_test = np.loadtxt(test_dir + "/data/K_local_laplacian.txt")
# assert np.allclose(K, K_test), "Error in local Laplacian kernel (vs. reference)"
# Solve alpha
K[np.diag_indices_from(K)] += llambda
alpha = cho_solve(K,Y)
# Calculate prediction kernel
Ks = get_local_kernels_laplacian(Xs, X, Ns, N, [sigma])[0]
# Test below will sometimes fail, since sorting occasionally differs due close row-norms
# Ks_test = np.loadtxt(test_dir + "/data/Ks_local_laplacian.txt")
# assert np.allclose(Ks, Ks_test), "Error in local Laplacian kernel (vs. reference)"
Yss = np.dot(Ks, alpha)
mae = np.mean(np.abs(Ys - Yss))
assert abs(8.7 - mae) < 1.0, "Error in local Laplacian kernel-ridge regression"
if __name__ == "__main__":
test_krr_gaussian_local_cmat()
test_krr_laplacian_local_cmat()
|
nilq/baby-python
|
python
|
import dash, os, itertools, flask
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
from pandas_datareader import data as web
from datetime import datetime as dt
import plotly.graph_objs as go
import pandas as pd
from random import randint
import plotly.plotly as py
server = flask.Flask(__name__)
server.secret_key = os.environ.get('secret_key', 'secret')
app = dash.Dash(name = __name__, server = server)
app.config.supress_callback_exceptions = True
#Data variables
cli = pd.read_pickle('Climate_full.p')
models_list = ['GFDL-CM3', 'GISS-E2-R', 'NCAR-CCSM4', 'IPSL-CM5A-LR','MRI-CGCM3']
web = 'https://www.snap.uaf.edu/webshared/jschroder/db/CSV/'
metrics = [ 'avg_fire_size','number_of_fires','total_area_burned']
#Function updating #1 plot => Alfresco plot
def get_data( models , scenarios, metric, domain, cumsum ) :
metric = str(metric)
domain = str(domain)
def _get_metric_cumsum(lnk , cumsum ):
#Extract, average and cumsum the raw data to a dataframe
_df = pd.read_csv(lnk, index_col=0)
_df = _df.ix[2006:].mean(axis = 1)
if 'cumsum' in cumsum :
_df = _df.cumsum(axis=0)
else : pass
return pd.Series.to_frame(_df)
#Build the models full name and the link towards the CSV <= todo build decent database but will do for now
selection = [a[0]+ '_' + a[1] for a in itertools.product(models,scenarios)]
if type(selection) is str : selection = [selection]
rmt = [os.path.join(web, metric, "_".join(['alfresco', metric.replace('_',''), domain.title(), model, '1902_2100.csv' ])) for model in selection]
#Extract dataframe and concat them together
df_list = [_get_metric_cumsum(lnk , cumsum) for lnk in rmt]
df = pd.concat(df_list,axis=1)
df.columns=selection
return df
#Functions used to update #2 and #3 with climate data
def get_cli_data(models, scenarios, dictionnary):
date = pd.date_range('2006','2101',freq='A-DEC')
def _get_climate_annual(_df) :
_df = _df[(_df.index.month >= 3 ) & (_df.index.month <= 9 )]
_df1 = _df.resample("A-DEC").mean()["Boreal"]
_df2 = pd.DataFrame(['NaN'] * len(date),date)
_df3 = pd.concat([_df1 , _df2],axis=1)["Boreal"]
return pd.Series.to_frame(_df3)
#Build the full models name and extract the dataframe
selection = [a[0]+ '_' + a[1] for a in itertools.product(models,scenarios)]
if type(selection) is str : selection = [selection]
df_list = [_get_climate_annual(dictionnary[model]) for model in selection]
df = pd.concat(df_list,axis=1)
df.columns=selection
return df
app.css.append_css({'external_url': 'https://cdn.rawgit.com/plotly/dash-app-stylesheets/2d266c578d2a6e8850ebce48fdb52759b2aef506/stylesheet-oil-and-gas.css'}) # noqa: E501
app.layout = html.Div(
[
html.Div(
[
html.H1(
'ALFRESCO Post Processing Outputs',
className='eight columns',
),
html.Img(
src="https://www.snap.uaf.edu/sites/all/themes/snap_bootstrap/logo.png",
className='one columns',
style={
'height': '80',
'width': '225',
'float': 'right',
'position': 'relative',
},
),
],
className='row'
),
html.Div(
[
html.Div(
[
html.P('Scenarios Selection :'),
dcc.Dropdown(
id='rcp',
options=[
{'label': 'RCP 45 ', 'value': 'rcp45'},
{'label': 'RCP 60 ', 'value': 'rcp60'},
{'label': 'RCP 85 ', 'value': 'rcp85'}
],
multi=True,
value=[]
),
html.P('Models Selection :'),
dcc.Dropdown(
id='model',
options=[{'label': a , 'value' : a} for a in models_list],
multi=True,
value=[]
),
dcc.Checklist(
id='cumsum',
options=[
{'label': 'Cumulative Sum', 'value': 'cumsum'}
],
values=[],
)
],
className='six columns'
),
html.Div(
[
html.P('Metric Selection:'),
dcc.Dropdown(
id='metric',
options=[{'label': a.replace('_',' ').title() , 'value' : a} for a in metrics],
value=None
),
html.P('Domains Selection :'),
dcc.Dropdown(
id='domains',
options=[
{'label': 'Boreal', 'value': 'boreal'},
{'label': 'Tundra', 'value': 'tundra'}
],
value=None
),
],
className='six columns'
),
],
className='row'
),
html.Div(
[
html.Div(
[
dcc.Graph(id='ALF')
],
className='eleven columns'
),
],
),
html.Div(
[
html.Div(
[
dcc.Graph(id='climate_tas')
],
className='eleven columns'
),
],
),
html.Div(
[
html.Div(
[
dcc.Graph(id='climate_pr')
],
className='eleven columns'
),
],
),
],
className='ten columns offset-by-one'
)
@app.callback(
Output('ALF', 'figure'),
[Input('model', 'value'),
Input('rcp', 'value'),
Input('metric', 'value'),
Input('domains', 'value'),
Input('cumsum', 'values')]
)
def update_graph(models, rcp, met_value, domain, cumsum):
if (len(models) > 0 and len(rcp) > 0 and domain is not None and met_value is not None):
df = get_data(models, rcp, met_value, domain, cumsum)
if str(met_value) in ['total_area_burned','avg_fire_size'] :
label = 'Area (km\u00b2)'
else : label = 'Number of fires'
return {
'data': [{
'x': df.index,
'y': df[col],
'name':col,
} for col in df.columns],
'layout' : go.Layout(
height=300,
margin= {'t': 20,'b':30 },
xaxis = {
'ticks' : 'outside',
'ticklen' : 5,
'showgrid' : False,
'linewidth' : 1,
'zeroline' : False,
'zerolinewidth' : 0
},
yaxis = {
'title' : label,
'ticks' : 'outside',
'ticklen' : 5,
'showgrid' : False,
'linewidth' : 1,
'zeroline' : False,
'zerolinewidth' : 0
},
showlegend=False)
}
@app.callback(
Output('climate_tas', 'figure'),
[Input('model', 'value'),
Input('rcp', 'value')
])
def update_tas(models, rcp):
if (len(models) > 0 and len(rcp) > 0):
df = get_cli_data(models, rcp, cli['tas'])
return {
'data': [{
'x': df.index,
'y': df[col],
'name':col,
} for col in df.columns],
'layout' : go.Layout(
height=200,
margin= {'t': 20,'b':30 },
xaxis = {
'ticks' : 'outside',
'ticklen' : 5,
'showgrid' : False,
'linewidth' : 1,
'zeroline' : False,
'zerolinewidth' : 0
},
yaxis = {
'title' : "Temperature (\xb0C)",
'ticks' : 'outside',
'ticklen' : 5,
'showgrid' : False,
'linewidth' : 1,
'zeroline' : False,
'zerolinewidth' : 0
},
showlegend=False)
}
@app.callback(
Output('climate_pr', 'figure'),
[Input('model', 'value'),
Input('rcp', 'value')
])
def update_pr(models, rcp):
if (len(models) > 0 and len(rcp) > 0):
df = get_cli_data(models, rcp, cli['pr'])
return {
'data': [{
'x': df.index,
'y': df[col],
'name':col
}
for col in df.columns],
'layout' : go.Layout(
height=200,
margin= {'t': 20,'b':30 },
xaxis = {
'ticks' : 'outside',
'ticklen' : 5,
'showgrid' : False,
'linewidth' : 1,
'zeroline' : False,
'zerolinewidth' : 0
},
yaxis = {
'title' : 'Precipitation (mm)',
'ticks' : 'outside',
'ticklen' : 5,
'showgrid' : False,
'linewidth' : 1,
'zeroline' : False
},
showlegend=False)
}
# Run the Dash app
if __name__ == '__main__':
app.server.run()
|
nilq/baby-python
|
python
|
import tanjun
import typing
from hikari import Embed
from modules import package_fetcher
component = tanjun.Component()
@component.with_command
@tanjun.with_argument("repo_n", default="main")
@tanjun.with_argument("arch_n", default="aarch64")
@tanjun.with_argument("pkg_n", default=None)
@tanjun.with_parser
@tanjun.as_message_command("pkg", "apt")
async def pkg_msg(ctx: tanjun.abc.MessageContext, pkg_n: str, arch_n: str, repo_n: str) -> None:
if repo_n not in ["main", "root", "x11"] or arch_n not in ["aarch64", "arm", "i686", "x86_64"]:
await ctx.respond(embed=Embed(
description="the Arch or Repo name are Wrong!",
color="#ff0000"
))
return
await pkg(ctx, pkg_n, arch_n, repo_n)
@component.with_slash_command
@tanjun.with_str_slash_option("repo_name", "The repo name", choices=["main", "root", "x11"], default="main")
@tanjun.with_str_slash_option("arch", "The arch name", choices=["aarch64", "arm", "i686", "x86_64"], default="aarch64")
@tanjun.with_str_slash_option("package_name", "The package name", default=None)
@tanjun.as_slash_command("pkg", "show package details")
async def pkg_slash(ctx: tanjun.abc.SlashContext, package_name: typing.Optional[str], arch: typing.Optional[str], repo_name: typing.Optional[str]) -> None:
await pkg(ctx, package_name, arch, repo_name)
async def pkg(ctx: tanjun.abc.Context, pkg_n, arch_n, repo_n) -> None:
if pkg_n:
await ctx.respond(embed=Embed(
description="Connecting to the repository...",
color="#ffff00"
))
r = package_fetcher.fetch(arch_n, repo_n)
ct = lambda x, y: x[y-3] + "..." if len(x) > y else x
if not r:
await ctx.edit_last_response(embed=Embed(
description="Failed to connect to the repository!",
color="#ff0000"
))
elif pkg_n in r and pkg_n != "_host":
pkg_embed = Embed(color="#00ff00")
pkg_embed.add_field(name="Package name:", value=r[pkg_n]["Package"])
pkg_embed.add_field(name="Description:", value=ct(r[pkg_n]["Description"], 500))
pkg_embed.add_field(name="Version:", value=ct(r[pkg_n]["Version"], 200))
if "Depends" in r[pkg_n]:
pkg_embed.add_field(name="Dependencies:", value=ct(", ".join(f"`{x}`" for x in r[pkg_n]["Depends"].split(", ")), 2500))
pkg_embed.add_field(name="Size:", value=f"{int(r[pkg_n]['Size'])/1024/1024:.2f} MB")
pkg_embed.add_field(name="Maintainer:", value=ct(r[pkg_n]["Maintainer"], 300))
pkg_embed.add_field(name="Installation:", value=f"```\napt install {r[pkg_n]['Package']}\n```")
pkg_embed.add_field(name="Links:", value=f"[Homepage]({r[pkg_n]['Homepage']}) | [Download .deb]({r['_host']['url']}/{r[pkg_n]['Filename']})")
pkg_embed.set_footer(text=f"Connected to {r['_host']['host_name']}")
await ctx.edit_last_response(embed=pkg_embed)
else:
await ctx.edit_last_response(embed=Embed(
description=f"Unable to locate package `{pkg_n}`",
color="#ff0000"
))
else:
await ctx.respond(embed=Embed(
description="Please enter the package name!",
color="#ff0000"
))
load_command = component.make_loader()
|
nilq/baby-python
|
python
|
# Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import unittest
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import torch
import fastestimator as fe
from fastestimator.test.unittest_util import check_img_similar, fig_to_rgb_array, img_to_rgb_array
class TestShowImage(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.color_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_color.png")))
cls.hw_ratio_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_height_width.png")))
cls.bb_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_bounding_box.png")))
cls.mixed_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_mixed.png")))
cls.text_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_text.png")))
cls.title_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_title.png")))
cls.float_img_ans = img_to_rgb_array(
os.path.abspath(os.path.join(__file__, "..", "resources", "test_show_image_check_float.png")))
def setUp(self) -> None:
self.old_backend = matplotlib.get_backend()
matplotlib.use("Agg")
def tearDown(self) -> None:
matplotlib.use(self.old_backend)
def test_show_image_color_np(self):
img = np.zeros((90, 90, 3), dtype=np.uint8)
img[:, 0:30, :] = np.array([255, 0, 0])
img[:, 30:60, :] = np.array([0, 255, 0])
img[:, 60:90, :] = np.array([0, 0, 255])
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
# Now we can save it to a numpy array.
obj1 = fig_to_rgb_array(fig)
obj2 = self.color_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_color_torch(self):
img = np.zeros((90, 90, 3), dtype=np.uint8)
img[:, 0:30, :] = np.array([255, 0, 0])
img[:, 30:60, :] = np.array([0, 255, 0])
img[:, 60:90, :] = np.array([0, 0, 255])
img = torch.from_numpy(img.transpose((2, 0, 1)))
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.color_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_color_tf(self):
img = np.zeros((90, 90, 3), dtype=np.uint8)
img[:, 0:30, :] = np.array([255, 0, 0])
img[:, 30:60, :] = np.array([0, 255, 0])
img[:, 60:90, :] = np.array([0, 0, 255])
img = tf.convert_to_tensor(img)
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.color_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_check_float_0_to_1_np(self):
img = np.zeros((256, 256, 3), dtype=np.float32)
for x in range(256):
img[x, :, :] = x / 255
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.float_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_check_float_neg_1_to_1_np(self):
img = np.zeros((256, 256, 3), dtype=np.float32)
for x in range(256):
img[x, :, :] = (x - 127.5) / 127.5
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.float_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_color_arbitrary_range_np(self):
img = np.zeros((256, 256, 3), dtype=np.float32)
for x in range(256):
img[x, :, :] = x * 0.2
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.float_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_height_width_np(self):
img = np.zeros((150, 100))
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.hw_ratio_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_text_np(self):
text = "apple"
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(text, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.text_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_bounding_box_np(self):
bg_img = np.zeros((150, 150))
boxes = np.array([[0, 0, 10, 20, "apple"], [10, 20, 30, 50, "dog"], [40, 70, 200, 200, "cat"],
[0, 0, 0, 0, "shouldn't shown"], [0, 0, -50, -30, "shouldn't shown2"]])
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(bg_img, fig=fig, axis=axis)
fe.util.show_image(boxes, fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.bb_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_mixed_figure_layer_np(self):
bg_img = np.ones((150, 150, 3), dtype=np.uint8) * 255
boxes = np.array([[0, 0, 10, 20], [10, 20, 30, 50], [40, 70, 200, 200]])
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(bg_img, fig=fig, axis=axis)
fe.util.show_image(boxes, fig=fig, axis=axis)
fe.util.show_image("apple", fig=fig, axis=axis)
obj1 = fig_to_rgb_array(fig)
obj2 = self.mixed_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
def test_show_image_title_np(self):
img = np.ones((150, 150), dtype=np.uint8) * 255
fig, axis = plt.subplots(1, 1, figsize=(6.4, 4.8))
fe.util.show_image(img, fig=fig, axis=axis, title="test title")
obj1 = fig_to_rgb_array(fig)
obj2 = self.title_img_ans
self.assertTrue(check_img_similar(obj1, obj2))
|
nilq/baby-python
|
python
|
import data
from base import nbprint
from tokenizer.main import run_tokenizer
def check_requirements(info):
# Check if tokens file exists
if not data.tokenized_document_exists(info):
# Run Tokenizer
nbprint('Tokens missing.')
run_tokenizer(info)
# Check if it was successfull
return data.tokenized_document_exists(info)
return True
class VocabItem:
def __init__(self, token, total = 0, document = 0):
self.token = token
self.total = total
self.document = document
def increase_total(self, count = 1):
self.total += count
def increase_document(self, count = 1):
self.document += count
class Vectorizer:
def __init__(self, info):
self.info = info
def build_vocab(self):
self.counts = []
def get_vocab(self):
return [{'id': id, 'token': vi.token, 'total': vi.total, 'document': vi.document}
for id, vi in enumerate(self.counts)]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from robotender_flexbe_behaviors.multiple_cups_pour_behavior_using_containers_sm import multiplecupspourbehaviorusingcontainersSM
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Thu Nov 02 2017
@author: Davis Catherman, Shannon Enders
'''
class multiplecuponloopSM(Behavior):
'''
loooped
'''
def __init__(self):
super(multiplecuponloopSM, self).__init__()
self.name = 'multiple cup on loop'
# parameters of this behavior
# references to used behaviors
self.add_behavior(multiplecupspourbehaviorusingcontainersSM, 'multiple cups pour behavior using containers')
_state_machine.userdata.joint_names = ["m1n6s200_joint_1", "m1n6s200_joint_2", "m1n6s200_joint_3", "m1n6s200_joint_4", "m1n6s200_joint_5", "m1n6s200_joint_6"]
_state_machine.userdata.center_values = [4.825370393837993, 4.804768712277358, 1.7884682005958692, 2.781744729201632, 1.7624776125694588, 2.5668808924540394]
_state_machine.userdata.prep_pour_to_left = [4.8484381625680415, 4.172889801498073, 1.372345285529353, 3.0126762157540004, 1.4690217615247554, 2.627620406383804]
_state_machine.userdata.pour_to_left = [4.610045297589599, 4.293199701639057, 1.419019181003809, 3.012844793851002, 1.4674078859041673, 4.845438377916176]
_state_machine.userdata.post_pour_to_left = [4.8484381625680415, 4.172889801498073, 1.372345285529353, 3.0126762157540004, 1.4690217615247554, 2.627620406383804]
_state_machine.userdata.left_values = [4.501794723496712, 4.784133474886988, 1.6909002314255626, 2.766800400744653, 1.8037183931040444, 2.543646143523643]
_state_machine.userdata.prep_pour_to_center = [4.4696588912549435, 4.2865780179046835, 1.371823705429861, 2.7555946178259263, 1.6906042210704002, 2.5960829864389763]
_state_machine.userdata.pour_to_center = [4.700331784865464, 4.265325726089742, 1.4461706409493849, 2.7535296027166787, 1.4171899888090882, 0.5029200288136196]
_state_machine.userdata.post_pour_to_center = [4.4696588912549435, 4.2865780179046835, 1.371823705429861, 2.7555946178259263, 1.6906042210704002, 2.5960829864389763]
_state_machine.userdata.OPEN = [0,0]
_state_machine.userdata.CLOSE = [5000,5000]
_state_machine.userdata.pre_grab_left = [4.616985495390345, 4.361768642857545, 0.8309522662125534, 2.772490244413607, 1.7511775537481435, 2.6507113446153356]
_state_machine.userdata.back_off_center = [4.8380550301100405, 4.49428940291265, 1.2147491327564424, 2.784340512316133, 1.7494544885228622, 2.530367888644617]
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:947 y:100, x:618 y:382
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:212 y:48
OperatableStateMachine.add('multiple cups pour behavior using containers',
self.use_behavior(multiplecupspourbehaviorusingcontainersSM, 'multiple cups pour behavior using containers'),
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
nilq/baby-python
|
python
|
import numpy as np
a = input("enter the matrix with ; after each row : ")
m =np.matrix(a)
b = input("enter the matrix 2 with row matching with matrix 1 : ")
n =np.matrix(b)
print(m)
print(n)
m3 = np.dot(m,n)
print(m3)
|
nilq/baby-python
|
python
|
#//////////////#####///////////////
#
# ANU u6325688 Yangyang Xu
# Supervisor: Dr.Penny Kyburz
# SPP used in this scrip is adopted some methods from :
# https://github.com/yueruchen/sppnet-pytorch/blob/master/cnn_with_spp.py
#//////////////#####///////////////
"""
Policy Generator
"""
import torch.nn as nn
from GAIL.SPP import SPP
from commons.DataInfo import DataInfo
from torch.distributions import Categorical
import torch.nn.functional as F
import torch
from torch.distributions import Normal, Beta
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Generator(nn.Module):
def __init__(self, datainfo:DataInfo):
super(Generator, self).__init__()
self.inChannel = datainfo.generatorIn #state space size
self.outChannel = datainfo.generatorOut #action space size
self.maxAction = datainfo.maxAction
self.criticScore = 0
self.hidden = torch.nn.Linear(self.inChannel, self.inChannel*2)
self.out = torch.nn.Linear(self.inChannel*2, self.outChannel)
def forward(self, input):
mid = self.hidden(input)
hOut = F.sigmoid(mid)
out = self.out(hOut)
# Critic's
criticFC = nn.Linear(self.outChannel, 1).to(device)
self.criticScore = criticFC(mid)
# Generator's
actionDistribution = self.softmax(out)
action = (actionDistribution).argmax(1)
for x in range(actionDistribution.shape[0]):
if sum(actionDistribution[x]) == 0:
actionDistribution[x]= actionDistribution[x] + 1e-8
tmp = Categorical(actionDistribution)
actionDistribution = tmp.log_prob(action)
entropy = tmp.entropy()
return actionDistribution, action.detach(), entropy
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from __future__ import print_function
import sys
import os
import time
sys.path.append('..')
import childmgt.ChildMgt
def create_children(num_children=5):
for child_num in range(0, num_children):
child_pid = os.fork()
if child_pid == 0:
time.sleep(3)
sys.exit(0)
for child_num in range(0, num_children):
child_pid = os.fork()
if child_pid == 0:
time.sleep(3)
sys.exit(1)
def main():
result = 0
create_children()
yyy = childmgt.ChildMgt.ChildMgt()
print("Checking Count Zombies=",yyy.countZombiedChild())
print("Sleeping wait for children to exit.")
time.sleep(30)
print("back from sleep")
print("Count Zombies=",yyy.countZombiedChild())
print("Reaping Status.")
child_status = yyy.reapZombieChildStatus()
for key in child_status.keys():
if os.WIFEXITED(child_status[key]) is True:
print("pid:", key, "status:",os.WEXITSTATUS(child_status[key]))
else:
print("pid:", key, "status:",child_status[key])
print("Child status: ",child_status)
print("Sleeping for 120 seconds")
time.sleep(120)
return result
if __name__ == "__main__":
result = main()
sys.exit(result)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import rospy
from week2.srv import roboCmd, roboCmdResponse
import math as np
class Unicycle:
def __init__(self, x, y, theta, dt=0.05):
self.x = x
self.y = y
self.theta = theta
self.dt = dt
self.x_points = [self.x]
self.y_points = [self.y]
def step(self, v, w, n=50):
for i in range(n):
self.theta += w*self.dt # angle = angle + angular_velociy * delta
self.x += v*np.cos(self.theta)*self.dt # X = X + horizontal_velocity * delta
self.y += v*np.sin(self.theta)*self.dt # Y = Y + vertical_velocity * delta
self.x_points.append(self.x)
self.y_points.append(self.y)
return self.x_points, self.y_points
def handle_return_traj(req):
uni = Unicycle(req.x, req.y, req.theta)
resp = uni.step(req.v, req.w)
return roboCmdResponse(resp[0], resp[1])
def return_traj_server():
rospy.init_node('return_traj_server')
s = rospy.Service('return_traj', roboCmd, handle_return_traj)
rospy.loginfo('Available to return Trajectory')
rospy.spin()
if __name__ == "__main__":
return_traj_server()
|
nilq/baby-python
|
python
|
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
Get Jc, RA, etc from measured parameter DB
BB, 2015
"""
import sqlite3
import numpy as np
import matplotlib.pyplot as plt
# display units
unit_i = 1e-6 # uA
unit_v = 1e-6 # uV
unit_r = 1 # Ohm
unit_i1 = 1e-3 # mA; control I
unit_v1 = 1e-3 # mV; control V
unit_h = 10 # mT
def setplotparams():
plt.rcParams['font.size'] = 12
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['legend.fontsize'] = 12
plt.rcParams['legend.frameon'] = False
def plot_svjj(filenames, **kwargs):
whichplot = kwargs('whichplot', 'hic')
if whichplot == 'hic': # H vs Ic
ix = 1; iy = 3
#for fn = in filenames:
# data = np.loadtxt(filename,
class LinFitSVJJ():
def __init__(self, filename='svjj.db'):
self.conn = sqlite3.connect(filename)
self.c = self.conn.cursor()
setplotparams()
def get_area(self, row):
if row[0] == 'circle':
return np.pi*row[1]**2/4
elif row[0] == 'ellipse':
return np.pi*row[1]*row[2]/4
elif row[0] == 'rectangle':
return row[1]*row[2]
def select_chip(self, wafer, chip):
self.c.execute('''
SELECT shape.shape, shape.dim1, shape.dim2,
josephson.ic_p, josephson.ic_ap, josephson.r_p, josephson.r_ap
FROM shape JOIN josephson
ON shape.wafer=josephson.wafer AND shape.chip=josephson.chip
AND shape.device=josephson.device''')
self.chipdata = self.c.fetchall()
self.areas = []
self.ic_p = []
self.ic_ap = []
self.r_p = []
for row in self.chipdata:
self.areas += [self.get_area(row)]
self.ic_p += [row[3]]
self.ic_ap += [row[4]]
self.r_p += [row[5]]
def print_chip(self):
print(self.chipdata)
def plot_chip(self):
fig = plt.figure(0, (12,6))
# plot Ic's
ax1 = fig.add_subplot(121)
ax1.plot(self.areas, self.ic_p, 's')
ax1.plot(self.areas, self.ic_ap, 'o')
# plot R's
ax2 = fig.add_subplot(122)
ax2.plot(self.areas, 1/np.array(self.r_p), 's')
print(self.ic_p)
plt.show()
# main shell interface (run SVJJDBInteract class)
def app(argv):
"""Execute in system shell
"""
if len(argv) < 2:
print("Usage: python %s <command> <table>\n"
" <command>: print, insert, delete, or edit\n"
" <table>: barrier, shape, or josephson\n" % argv[0])
sys.exit(0)
db = SVJJDBInteract()
methodname = argv[1]
print(argv[2:])
getattr(db, methodname)(*argv[2:])
db.close()
print('Bye!')
def test(argv):
lf = LinFitSVJJ()
lf.select_chip('B150323a', '56')
lf.print_chip()
lf.plot_chip()
if __name__ == '__main__':
import sys
print(sys.version)
test(sys.argv)
print('Bye!')
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module ZHONE-COM-IP-DHCP-SERVER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZHONE-COM-IP-DHCP-SERVER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:40:42 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
sysObjectID, = mibBuilder.importSymbols("SNMPv2-MIB", "sysObjectID")
ModuleIdentity, Counter64, Counter32, IpAddress, ObjectIdentity, Integer32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, iso, Bits, MibIdentifier, Unsigned32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Counter64", "Counter32", "IpAddress", "ObjectIdentity", "Integer32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "iso", "Bits", "MibIdentifier", "Unsigned32", "NotificationType")
TruthValue, TextualConvention, DisplayString, PhysAddress = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "DisplayString", "PhysAddress")
cardPostResults, cardMfgSerialNumber = mibBuilder.importSymbols("ZHONE-CARD-RESOURCES-MIB", "cardPostResults", "cardMfgSerialNumber")
ZhoneRDIndex, rdEntry = mibBuilder.importSymbols("ZHONE-COM-IP-RD-MIB", "ZhoneRDIndex", "rdEntry")
ipIfAddr, ipIfLgId, ipIfVpi, ipIfVci = mibBuilder.importSymbols("ZHONE-COM-IP-REC-MIB", "ipIfAddr", "ipIfLgId", "ipIfVpi", "ipIfVci")
zhoneShelfNumber, pportNumber, zhoneSlotNumber, subPortNumber = mibBuilder.importSymbols("ZHONE-INTERFACE-TRANSLATION-MIB", "zhoneShelfNumber", "pportNumber", "zhoneSlotNumber", "subPortNumber")
zhoneSysCardSwSpecificVers, = mibBuilder.importSymbols("ZHONE-SYSTEM-MIB", "zhoneSysCardSwSpecificVers")
zhoneModules, zhoneIp = mibBuilder.importSymbols("Zhone", "zhoneModules", "zhoneIp")
ZhoneShelfValue, ZhoneRowStatus, ZhoneFileName, ZhoneSlotValue, ZhoneAdminString = mibBuilder.importSymbols("Zhone-TC", "ZhoneShelfValue", "ZhoneRowStatus", "ZhoneFileName", "ZhoneSlotValue", "ZhoneAdminString")
comIpDhcpServer = ModuleIdentity((1, 3, 6, 1, 4, 1, 5504, 6, 61))
comIpDhcpServer.setRevisions(('2003-09-10 10:47', '2003-04-18 10:10', '2000-12-03 14:00', '2000-11-28 15:00', '2000-12-05 12:11', '2000-10-02 12:05', '2000-09-15 16:50', '2000-09-11 15:41',))
if mibBuilder.loadTexts: comIpDhcpServer.setLastUpdated('200309101500Z')
if mibBuilder.loadTexts: comIpDhcpServer.setOrganization('Zhone Technologies, Inc.')
dhcpServer = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11))
if mibBuilder.loadTexts: dhcpServer.setStatus('current')
dhcpServerTraps = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 0))
if mibBuilder.loadTexts: dhcpServerTraps.setStatus('current')
dhcpTrapZhoneCpeDetected = NotificationType((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 0, 1)).setObjects(("ZHONE-INTERFACE-TRANSLATION-MIB", "zhoneShelfNumber"), ("ZHONE-INTERFACE-TRANSLATION-MIB", "zhoneSlotNumber"), ("ZHONE-INTERFACE-TRANSLATION-MIB", "pportNumber"), ("ZHONE-INTERFACE-TRANSLATION-MIB", "subPortNumber"), ("ZHONE-COM-IP-REC-MIB", "ipIfVpi"), ("ZHONE-COM-IP-REC-MIB", "ipIfVci"), ("ZHONE-COM-IP-REC-MIB", "ipIfLgId"), ("ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpTrapZhoneCpeSysObjectID"), ("ZHONE-CARD-RESOURCES-MIB", "cardMfgSerialNumber"), ("ZHONE-CARD-RESOURCES-MIB", "cardPostResults"), ("ZHONE-SYSTEM-MIB", "zhoneSysCardSwSpecificVers"), ("ZHONE-COM-IP-REC-MIB", "ipIfAddr"))
if mibBuilder.loadTexts: dhcpTrapZhoneCpeDetected.setStatus('current')
dhcpTrapZhoneCpeSysObjectID = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 0, 2), ObjectIdentifier()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: dhcpTrapZhoneCpeSysObjectID.setStatus('current')
dhcpTrapZhoneIpAddressUpdate = NotificationType((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 0, 3)).setObjects(("ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpTrapZhoneIpInterfaceIndex"), ("ZHONE-COM-IP-REC-MIB", "ipIfAddr"))
if mibBuilder.loadTexts: dhcpTrapZhoneIpAddressUpdate.setStatus('current')
dhcpTrapZhoneIpInterfaceIndex = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 0, 4), InterfaceIndex()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: dhcpTrapZhoneIpInterfaceIndex.setStatus('current')
dhcpServerDefaultLeaseTime = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerDefaultLeaseTime.setStatus('current')
dhcpServerDefaultMinLeaseTime = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerDefaultMinLeaseTime.setStatus('current')
dhcpServerDefaultMaxLeaseTime = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerDefaultMaxLeaseTime.setStatus('current')
dhcpServerDefaultReserveStart = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerDefaultReserveStart.setStatus('current')
dhcpServerDefaultReserveEnd = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerDefaultReserveEnd.setStatus('current')
dhcpServerLeaseTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6), )
if mibBuilder.loadTexts: dhcpServerLeaseTable.setStatus('current')
dhcpServerLeaseEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1), ).setIndexNames((0, "ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpLeaseDomain"), (0, "ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpLeaseIpAddress"))
if mibBuilder.loadTexts: dhcpServerLeaseEntry.setStatus('current')
dhcpLeaseDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 1), ZhoneRDIndex())
if mibBuilder.loadTexts: dhcpLeaseDomain.setStatus('current')
dhcpLeaseIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 2), IpAddress())
if mibBuilder.loadTexts: dhcpLeaseIpAddress.setStatus('current')
dhcpLeaseStarts = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 3), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseStarts.setStatus('current')
dhcpLeaseEnds = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 4), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseEnds.setStatus('current')
dhcpLeaseHardwareAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 5), PhysAddress().subtype(subtypeSpec=ValueSizeConstraint(0, 16)).clone(hexValue="0000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseHardwareAddress.setStatus('current')
dhcpLeaseFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 6), Bits().clone(namedValues=NamedValues(("static", 0), ("bootp", 1), ("unused2", 2), ("unused3", 3), ("abandoned", 4), ("zhoneCPE", 5)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseFlags.setStatus('current')
dhcpLeaseClientId = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseClientId.setStatus('current')
dhcpLeaseClientHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 8), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseClientHostname.setStatus('current')
dhcpLeaseHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseHostname.setStatus('current')
dhcpLeaseDDNSFwdName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 10), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseDDNSFwdName.setStatus('current')
dhcpLeaseDDNSRevName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 11), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseDDNSRevName.setStatus('current')
dhcpLeaseRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 6, 1, 12), ZhoneRowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpLeaseRowStatus.setStatus('current')
dhcpServerNextGroupIndex = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpServerNextGroupIndex.setStatus('current')
dhcpServerGroupTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8), )
if mibBuilder.loadTexts: dhcpServerGroupTable.setStatus('current')
dhcpServerGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1), ).setIndexNames((0, "ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpGroupIndex"))
if mibBuilder.loadTexts: dhcpServerGroupEntry.setStatus('current')
dhcpGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: dhcpGroupIndex.setStatus('current')
dhcpGroupName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 2), ZhoneAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupName.setStatus('current')
dhcpGroupDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 3), ZhoneRDIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupDomain.setStatus('current')
dhcpGroupVendorMatchString = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupVendorMatchString.setStatus('current')
dhcpGroupVendorMatchOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupVendorMatchOffset.setStatus('current')
dhcpGroupVendorMatchLength = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255)).clone(-1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupVendorMatchLength.setStatus('current')
dhcpGroupClientMatchString = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupClientMatchString.setStatus('current')
dhcpGroupClientMatchOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupClientMatchOffset.setStatus('current')
dhcpGroupClientMatchLength = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255)).clone(-1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupClientMatchLength.setStatus('current')
dhcpGroupRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 8, 1, 10), ZhoneRowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpGroupRowStatus.setStatus('current')
dhcpServerGroupOptionTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9), )
if mibBuilder.loadTexts: dhcpServerGroupOptionTable.setStatus('current')
dhcpServerGroupOptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1), )
dhcpServerGroupEntry.registerAugmentions(("ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpServerGroupOptionEntry"))
dhcpServerGroupOptionEntry.setIndexNames(*dhcpServerGroupEntry.getIndexNames())
if mibBuilder.loadTexts: dhcpServerGroupOptionEntry.setStatus('current')
dhcpGroupOptionDefaultLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionDefaultLeaseTime.setStatus('current')
dhcpGroupOptionMinLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionMinLeaseTime.setStatus('current')
dhcpGroupOptionMaxLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionMaxLeaseTime.setStatus('current')
dhcpGroupOptionBootFile = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 4), ZhoneFileName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionBootFile.setStatus('current')
dhcpGroupOptionBootServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionBootServer.setStatus('current')
dhcpGroupOptionDefaultRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionDefaultRouter.setStatus('current')
dhcpGroupOptionPrimaryNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionPrimaryNameServer.setStatus('current')
dhcpGroupOptionSecondaryNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionSecondaryNameServer.setStatus('current')
dhcpGroupOptionDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 9, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpGroupOptionDomainName.setStatus('current')
dhcpServerNextSubnetIndex = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpServerNextSubnetIndex.setStatus('current')
dhcpServerSubnetTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11), )
if mibBuilder.loadTexts: dhcpServerSubnetTable.setStatus('current')
dhcpServerSubnetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1), ).setIndexNames((0, "ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpSubnetIndex"))
if mibBuilder.loadTexts: dhcpServerSubnetEntry.setStatus('current')
dhcpSubnetIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: dhcpSubnetIndex.setStatus('current')
dhcpSubnetNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetNetwork.setStatus('current')
dhcpSubnetNetmask = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 3), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetNetmask.setStatus('current')
dhcpSubnetDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 4), ZhoneRDIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetDomain.setStatus('current')
dhcpSubnetRange1Start = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 5), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange1Start.setStatus('current')
dhcpSubnetRange1End = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 6), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange1End.setStatus('current')
dhcpSubnetRange2Start = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 7), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange2Start.setStatus('current')
dhcpSubnetRange2End = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 8), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange2End.setStatus('current')
dhcpSubnetRange3Start = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 9), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange3Start.setStatus('current')
dhcpSubnetRange3End = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 10), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange3End.setStatus('current')
dhcpSubnetRange4Start = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 11), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange4Start.setStatus('current')
dhcpSubnetRange4End = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 12), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRange4End.setStatus('current')
dhcpSubnetRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 13), ZhoneRowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetRowStatus.setStatus('current')
dhcpSubnetGroup2 = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 14), Integer32().clone(0)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetGroup2.setStatus('current')
dhcpStickyAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 15), TruthValue().clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpStickyAddr.setStatus('current')
dhcpSubnetExternalServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 16), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetExternalServer.setStatus('current')
dhcpSubnetExternalServerAlt = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 11, 1, 17), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpSubnetExternalServerAlt.setStatus('current')
dhcpServerSubnetOptionTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12), )
if mibBuilder.loadTexts: dhcpServerSubnetOptionTable.setStatus('current')
dhcpServerSubnetOptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1), )
dhcpServerSubnetEntry.registerAugmentions(("ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpServerSubnetOptionEntry"))
dhcpServerSubnetOptionEntry.setIndexNames(*dhcpServerSubnetEntry.getIndexNames())
if mibBuilder.loadTexts: dhcpServerSubnetOptionEntry.setStatus('current')
dhcpSubnetOptionDefaultLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionDefaultLeaseTime.setStatus('current')
dhcpSubnetOptionMinLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionMinLeaseTime.setStatus('current')
dhcpSubnetOptionMaxLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionMaxLeaseTime.setStatus('current')
dhcpSubnetOptionBootFile = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 4), ZhoneFileName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionBootFile.setStatus('current')
dhcpSubnetOptionBootServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionBootServer.setStatus('current')
dhcpSubnetOptionDefaultRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionDefaultRouter.setStatus('current')
dhcpSubnetOptionPrimaryNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionPrimaryNameServer.setStatus('current')
dhcpSubnetOptionSecondaryNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionSecondaryNameServer.setStatus('current')
dhcpSubnetOptionDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 12, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpSubnetOptionDomainName.setStatus('current')
dhcpServerNextHostIndex = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpServerNextHostIndex.setStatus('current')
dhcpServerHostTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14), )
if mibBuilder.loadTexts: dhcpServerHostTable.setStatus('current')
dhcpServerHostEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1), ).setIndexNames((0, "ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpHostIndex"))
if mibBuilder.loadTexts: dhcpServerHostEntry.setStatus('current')
dhcpHostIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: dhcpHostIndex.setStatus('current')
dhcpHostHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 2), ZhoneAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostHostname.setStatus('current')
dhcpHostDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 3), ZhoneRDIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostDomain.setStatus('current')
dhcpHostHardwareAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 4), PhysAddress().subtype(subtypeSpec=ValueSizeConstraint(0, 16)).clone(hexValue="0000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostHardwareAddress.setStatus('current')
dhcpHostClientId = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostClientId.setStatus('current')
dhcpHostIpAddress1 = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 6), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostIpAddress1.setStatus('current')
dhcpHostIpAddress2 = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 7), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostIpAddress2.setStatus('current')
dhcpHostIpAddress3 = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 8), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostIpAddress3.setStatus('current')
dhcpHostIpAddress4 = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 9), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostIpAddress4.setStatus('current')
dhcpHostRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 14, 1, 10), ZhoneRowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dhcpHostRowStatus.setStatus('current')
dhcpServerHostOptionTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15), )
if mibBuilder.loadTexts: dhcpServerHostOptionTable.setStatus('current')
dhcpServerHostOptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1), )
dhcpServerHostEntry.registerAugmentions(("ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpServerHostOptionEntry"))
dhcpServerHostOptionEntry.setIndexNames(*dhcpServerHostEntry.getIndexNames())
if mibBuilder.loadTexts: dhcpServerHostOptionEntry.setStatus('current')
dhcpHostOptionDefaultLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionDefaultLeaseTime.setStatus('current')
dhcpHostOptionMinLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionMinLeaseTime.setStatus('current')
dhcpHostOptionMaxLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionMaxLeaseTime.setStatus('current')
dhcpHostOptionBootFile = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 4), ZhoneFileName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionBootFile.setStatus('current')
dhcpHostOptionBootServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionBootServer.setStatus('current')
dhcpHostOptionDefaultRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionDefaultRouter.setStatus('current')
dhcpHostOptionPrimaryNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionPrimaryNameServer.setStatus('current')
dhcpHostOptionSecondaryNameServer = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionSecondaryNameServer.setStatus('current')
dhcpHostOptionDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 15, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpHostOptionDomainName.setStatus('current')
dhcpServerStatistics = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16))
if mibBuilder.loadTexts: dhcpServerStatistics.setStatus('current')
serverSystem = ObjectIdentity((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1))
if mibBuilder.loadTexts: serverSystem.setStatus('current')
serverSystemDescr = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 1), ZhoneAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverSystemDescr.setStatus('current')
serverSystemObjectID = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverSystemObjectID.setStatus('current')
serverUptime = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverUptime.setStatus('current')
serverActiveShelf = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 4), ZhoneShelfValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverActiveShelf.setStatus('current')
serverActiveSlot = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 5), ZhoneSlotValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverActiveSlot.setStatus('current')
serverStandbyShelf = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 6), ZhoneShelfValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverStandbyShelf.setStatus('current')
serverStandbySlot = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 1, 7), ZhoneSlotValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serverStandbySlot.setStatus('current')
bootpCountersTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2), )
if mibBuilder.loadTexts: bootpCountersTable.setStatus('current')
bootpCountersEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2, 1), )
rdEntry.registerAugmentions(("ZHONE-COM-IP-DHCP-SERVER-MIB", "bootpCountersEntry"))
bootpCountersEntry.setIndexNames(*rdEntry.getIndexNames())
if mibBuilder.loadTexts: bootpCountersEntry.setStatus('current')
bootpCountRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootpCountRequests.setStatus('current')
bootpCountInvalids = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootpCountInvalids.setStatus('current')
bootpCountReplies = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootpCountReplies.setStatus('current')
bootpCountDroppedUnknownClients = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootpCountDroppedUnknownClients.setStatus('current')
bootpCountDroppedNotServingSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bootpCountDroppedNotServingSubnet.setStatus('current')
dhcpCountersTable = MibTable((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3), )
if mibBuilder.loadTexts: dhcpCountersTable.setStatus('current')
dhcpCountersEntry = MibTableRow((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1), )
rdEntry.registerAugmentions(("ZHONE-COM-IP-DHCP-SERVER-MIB", "dhcpCountersEntry"))
dhcpCountersEntry.setIndexNames(*rdEntry.getIndexNames())
if mibBuilder.loadTexts: dhcpCountersEntry.setStatus('current')
dhcpCountDiscovers = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountDiscovers.setStatus('current')
dhcpCountRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountRequests.setStatus('current')
dhcpCountReleases = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountReleases.setStatus('current')
dhcpCountDeclines = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountDeclines.setStatus('current')
dhcpCountInforms = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountInforms.setStatus('current')
dhcpCountInvalids = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountInvalids.setStatus('current')
dhcpCountOffers = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountOffers.setStatus('current')
dhcpCountAcks = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountAcks.setStatus('current')
dhcpCountNacks = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountNacks.setStatus('current')
dhcpCountDroppedUnknownClient = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountDroppedUnknownClient.setStatus('current')
dhcpCountDroppedNotServingSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 16, 3, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dhcpCountDroppedNotServingSubnet.setStatus('current')
dhcpServerConfigurationVersion = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 17), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerConfigurationVersion.setStatus('deprecated')
dhcpServerRestart = MibScalar((1, 3, 6, 1, 4, 1, 5504, 4, 1, 11, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dhcpServerRestart.setStatus('current')
mibBuilder.exportSymbols("ZHONE-COM-IP-DHCP-SERVER-MIB", dhcpGroupOptionMinLeaseTime=dhcpGroupOptionMinLeaseTime, dhcpSubnetRange3Start=dhcpSubnetRange3Start, dhcpSubnetExternalServer=dhcpSubnetExternalServer, dhcpGroupVendorMatchString=dhcpGroupVendorMatchString, dhcpSubnetOptionBootFile=dhcpSubnetOptionBootFile, dhcpSubnetRange1End=dhcpSubnetRange1End, dhcpServerGroupTable=dhcpServerGroupTable, serverActiveShelf=serverActiveShelf, dhcpServer=dhcpServer, bootpCountDroppedNotServingSubnet=bootpCountDroppedNotServingSubnet, dhcpGroupClientMatchString=dhcpGroupClientMatchString, dhcpGroupOptionSecondaryNameServer=dhcpGroupOptionSecondaryNameServer, dhcpSubnetRange1Start=dhcpSubnetRange1Start, dhcpHostOptionDefaultRouter=dhcpHostOptionDefaultRouter, serverSystemObjectID=serverSystemObjectID, dhcpSubnetNetmask=dhcpSubnetNetmask, dhcpGroupClientMatchOffset=dhcpGroupClientMatchOffset, dhcpGroupIndex=dhcpGroupIndex, dhcpServerDefaultReserveStart=dhcpServerDefaultReserveStart, dhcpGroupVendorMatchOffset=dhcpGroupVendorMatchOffset, dhcpCountersTable=dhcpCountersTable, dhcpServerNextGroupIndex=dhcpServerNextGroupIndex, dhcpHostOptionBootServer=dhcpHostOptionBootServer, dhcpHostOptionPrimaryNameServer=dhcpHostOptionPrimaryNameServer, dhcpTrapZhoneIpAddressUpdate=dhcpTrapZhoneIpAddressUpdate, dhcpServerTraps=dhcpServerTraps, dhcpLeaseIpAddress=dhcpLeaseIpAddress, dhcpSubnetRange4End=dhcpSubnetRange4End, dhcpSubnetRange2End=dhcpSubnetRange2End, dhcpHostDomain=dhcpHostDomain, dhcpLeaseHardwareAddress=dhcpLeaseHardwareAddress, dhcpLeaseRowStatus=dhcpLeaseRowStatus, bootpCountersEntry=bootpCountersEntry, dhcpHostOptionMinLeaseTime=dhcpHostOptionMinLeaseTime, PYSNMP_MODULE_ID=comIpDhcpServer, dhcpServerGroupOptionEntry=dhcpServerGroupOptionEntry, dhcpGroupRowStatus=dhcpGroupRowStatus, dhcpSubnetOptionSecondaryNameServer=dhcpSubnetOptionSecondaryNameServer, dhcpSubnetOptionDefaultLeaseTime=dhcpSubnetOptionDefaultLeaseTime, dhcpServerSubnetOptionEntry=dhcpServerSubnetOptionEntry, dhcpSubnetRange4Start=dhcpSubnetRange4Start, dhcpSubnetOptionBootServer=dhcpSubnetOptionBootServer, dhcpLeaseDDNSFwdName=dhcpLeaseDDNSFwdName, dhcpSubnetNetwork=dhcpSubnetNetwork, dhcpCountOffers=dhcpCountOffers, comIpDhcpServer=comIpDhcpServer, dhcpGroupVendorMatchLength=dhcpGroupVendorMatchLength, dhcpGroupOptionDefaultLeaseTime=dhcpGroupOptionDefaultLeaseTime, dhcpServerRestart=dhcpServerRestart, dhcpSubnetExternalServerAlt=dhcpSubnetExternalServerAlt, dhcpHostIpAddress4=dhcpHostIpAddress4, dhcpServerConfigurationVersion=dhcpServerConfigurationVersion, dhcpGroupName=dhcpGroupName, dhcpTrapZhoneCpeDetected=dhcpTrapZhoneCpeDetected, dhcpSubnetOptionMinLeaseTime=dhcpSubnetOptionMinLeaseTime, dhcpServerNextSubnetIndex=dhcpServerNextSubnetIndex, dhcpSubnetIndex=dhcpSubnetIndex, dhcpServerDefaultMinLeaseTime=dhcpServerDefaultMinLeaseTime, bootpCountDroppedUnknownClients=bootpCountDroppedUnknownClients, dhcpServerLeaseEntry=dhcpServerLeaseEntry, serverSystemDescr=serverSystemDescr, dhcpServerDefaultReserveEnd=dhcpServerDefaultReserveEnd, dhcpGroupOptionDomainName=dhcpGroupOptionDomainName, dhcpGroupOptionMaxLeaseTime=dhcpGroupOptionMaxLeaseTime, dhcpServerSubnetTable=dhcpServerSubnetTable, dhcpLeaseClientHostname=dhcpLeaseClientHostname, dhcpHostIpAddress2=dhcpHostIpAddress2, dhcpServerSubnetEntry=dhcpServerSubnetEntry, dhcpLeaseEnds=dhcpLeaseEnds, dhcpSubnetOptionMaxLeaseTime=dhcpSubnetOptionMaxLeaseTime, dhcpSubnetGroup2=dhcpSubnetGroup2, dhcpGroupClientMatchLength=dhcpGroupClientMatchLength, dhcpCountNacks=dhcpCountNacks, dhcpHostOptionDomainName=dhcpHostOptionDomainName, dhcpTrapZhoneCpeSysObjectID=dhcpTrapZhoneCpeSysObjectID, serverActiveSlot=serverActiveSlot, dhcpSubnetRowStatus=dhcpSubnetRowStatus, dhcpServerNextHostIndex=dhcpServerNextHostIndex, dhcpServerLeaseTable=dhcpServerLeaseTable, dhcpStickyAddr=dhcpStickyAddr, dhcpSubnetOptionPrimaryNameServer=dhcpSubnetOptionPrimaryNameServer, dhcpCountReleases=dhcpCountReleases, dhcpTrapZhoneIpInterfaceIndex=dhcpTrapZhoneIpInterfaceIndex, dhcpSubnetRange2Start=dhcpSubnetRange2Start, dhcpServerSubnetOptionTable=dhcpServerSubnetOptionTable, bootpCountInvalids=bootpCountInvalids, dhcpGroupOptionPrimaryNameServer=dhcpGroupOptionPrimaryNameServer, dhcpHostIndex=dhcpHostIndex, dhcpHostOptionBootFile=dhcpHostOptionBootFile, dhcpHostClientId=dhcpHostClientId, dhcpHostOptionMaxLeaseTime=dhcpHostOptionMaxLeaseTime, dhcpLeaseDDNSRevName=dhcpLeaseDDNSRevName, serverStandbySlot=serverStandbySlot, dhcpHostHostname=dhcpHostHostname, dhcpServerGroupEntry=dhcpServerGroupEntry, dhcpServerDefaultLeaseTime=dhcpServerDefaultLeaseTime, dhcpHostOptionSecondaryNameServer=dhcpHostOptionSecondaryNameServer, serverUptime=serverUptime, dhcpServerDefaultMaxLeaseTime=dhcpServerDefaultMaxLeaseTime, dhcpGroupOptionDefaultRouter=dhcpGroupOptionDefaultRouter, bootpCountReplies=bootpCountReplies, dhcpServerHostOptionTable=dhcpServerHostOptionTable, dhcpHostRowStatus=dhcpHostRowStatus, dhcpHostHardwareAddress=dhcpHostHardwareAddress, dhcpCountDroppedUnknownClient=dhcpCountDroppedUnknownClient, dhcpHostIpAddress1=dhcpHostIpAddress1, dhcpHostIpAddress3=dhcpHostIpAddress3, dhcpServerHostOptionEntry=dhcpServerHostOptionEntry, dhcpCountAcks=dhcpCountAcks, dhcpServerGroupOptionTable=dhcpServerGroupOptionTable, serverSystem=serverSystem, dhcpGroupOptionBootServer=dhcpGroupOptionBootServer, bootpCountRequests=bootpCountRequests, dhcpSubnetDomain=dhcpSubnetDomain, dhcpCountRequests=dhcpCountRequests, dhcpCountInvalids=dhcpCountInvalids, dhcpSubnetOptionDefaultRouter=dhcpSubnetOptionDefaultRouter, dhcpLeaseFlags=dhcpLeaseFlags, dhcpLeaseDomain=dhcpLeaseDomain, dhcpCountDeclines=dhcpCountDeclines, dhcpGroupOptionBootFile=dhcpGroupOptionBootFile, dhcpLeaseStarts=dhcpLeaseStarts, dhcpHostOptionDefaultLeaseTime=dhcpHostOptionDefaultLeaseTime, dhcpServerHostTable=dhcpServerHostTable, dhcpGroupDomain=dhcpGroupDomain, dhcpLeaseClientId=dhcpLeaseClientId, dhcpSubnetRange3End=dhcpSubnetRange3End, dhcpSubnetOptionDomainName=dhcpSubnetOptionDomainName, dhcpLeaseHostname=dhcpLeaseHostname, dhcpCountersEntry=dhcpCountersEntry, dhcpCountDroppedNotServingSubnet=dhcpCountDroppedNotServingSubnet, serverStandbyShelf=serverStandbyShelf, bootpCountersTable=bootpCountersTable, dhcpCountDiscovers=dhcpCountDiscovers, dhcpCountInforms=dhcpCountInforms, dhcpServerStatistics=dhcpServerStatistics, dhcpServerHostEntry=dhcpServerHostEntry)
|
nilq/baby-python
|
python
|
"""Implementation classes that are used as application configuration containers
parsed from files.
"""
__author__ = 'Paul Landes'
from typing import Dict, Set
import logging
import re
import collections
from zensols.persist import persisted
from . import Configurable, ConfigurableError
logger = logging.getLogger(__name__)
class StringConfig(Configurable):
"""A simple string based configuration. This takes a single comma delimited
key/value pair string in the format:
``<section>.<name>=<value>[,<section>.<name>=<value>,...]``
A dot (``.``) is used to separate the section from the option instead of a
colon (``:``), as used in more sophisticaed interpolation in the
:class:`configparser.ExtendedInterpolation`. The dot is used for this
reason to make other section interpolation easier.
"""
KEY_VAL_REGEX = re.compile(r'^(?:([^.]+?)\.)?([^=]+?)=(.+)$')
def __init__(self, config_str: str, option_sep: str = ',',
default_section: str = None):
"""Initialize with a string given as described in the class docs.
:param config_str: the configuration
:param option_sep: the string used to delimit the section
:param default_section: used as the default section when non given on
the get methds such as :meth:`get_option`
"""
super().__init__(default_section)
self.config_str = config_str
self.option_sep = option_sep
@persisted('_parsed_config')
def _get_parsed_config(self) -> Dict[str, str]:
"""Parse the configuration string given in the initializer (see class docs).
"""
conf = collections.defaultdict(lambda: {})
for kv in self.config_str.split(self.option_sep):
m = self.KEY_VAL_REGEX.match(kv)
if m is None:
raise ConfigurableError(f'unexpected format: {kv}')
sec, name, value = m.groups()
sec = self.default_section if sec is None else sec
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'section={sec}, name={name}, value={value}')
conf[sec][name] = value
return conf
@property
@persisted('_sections')
def sections(self) -> Set[str]:
return set(self._get_parsed_config().keys())
def has_option(self, name: str, section: str = None) -> bool:
section = self.default_section if section is None else section
return self._get_parsed_config(section)[name]
def get_options(self, section: str = None) -> Dict[str, str]:
section = self.default_section if section is None else section
opts = self._get_parsed_config()[section]
if opts is None:
raise ConfigurableError(f'no section: {section}')
return opts
def __str__(self) -> str:
return self.__class__.__name__ + ': config=' + self.config_str
def __repr__(self) -> str:
return f'<{self.__str__()}>'
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from test_utils.label_to_str_voc import convert_label_to_str
def render_boxs_info_for_display(image, net_out, select_index, net_score, image_size, label_out = None):
valid_box = net_out[select_index]
valid_score = net_score[select_index]
for index, value in enumerate(select_index):
if net_score[index] > 0.5 and value == True:
# if value == True:
valid_box = net_out[index]
valid_score = net_score[index]
print("current box info is " + str(valid_box))
print("current box scores is " + str(valid_score))
if label_out is not None :
print("current label is %s"%(convert_label_to_str(label_out[index])))
ymin = int(valid_box[0] * image_size)
xmin = int(valid_box[1] * image_size)
ymax = int(valid_box[2] * image_size)
xmax = int(valid_box[3] * image_size)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), thickness=1,color=(0,0,255))
return image
def render_rectangle_box(image, box, colour = (255, 255, 255), offset = 0, thickness = 1):
"""
:param image: 需要显示的图片
:param box: box信息
:param colour: 颜色信息
:param offset: box偏移
:param thickness: 线条宽度
:return:
"""
height,width, channel = image.shape
y_start = int(height * box[0]) + offset
x_start = int(width * box[1]) + offset
y_end = int(height * box[2]) + offset
x_end = int(width * box[3]) + offset
image = cv2.rectangle(image,(x_start,y_start), (x_end,y_end), color=colour, thickness= thickness)
return image
|
nilq/baby-python
|
python
|
import unittest
import numpy as np
from sca.analysis import nicv
class TestNicvUnit(unittest.TestCase):
def test_calculate_mean_x_given_y_matrix(self):
""" Tests whether the calculations of means work properly"""
traces = np.array([[1, 2, 3], [4, 5, 6], [7, 0.4, 9], [2, 3, 12]])
plain = np.array([[1], [2], [1], [2]])
keys = plain
resulting_matrix = np.zeros((9, 3))
resulting_matrix[4] = [3.5, 2.6, 7.5]
calculated_matrix = nicv.NICV.calculate_mean_x_given_y_matrix(plain, traces, 0, keys)
print(calculated_matrix)
self.assertTrue(np.allclose(calculated_matrix, resulting_matrix))
def test_calculate_single_nicv(self):
""" Tests whether the calculation of a single nicv value works properly"""
mean_x_given_y = np.array([[-0.01, 0.01, 0, 0.014]])
y = np.array([[0.1, -0.01, 0.03, 0.1]])
resulting_nicv = 0.03898876404494381
calculated_nicv = nicv.NICV.calculate_single_nicv(mean_x_given_y, y)
self.assertAlmostEqual(calculated_nicv, resulting_nicv)
def test_get_points_of_interest_indices(self):
""" Tests if the point of interest selection works properly"""
traces = np.array([[1, 2, 3], [4, 5, 6], [7, 0.4, 9], [2, 3, 12]])
plain = np.array([[1], [2], [1], [2]])
keys = plain
resulting_points_of_interest_indices = [1, 2]
calculated_points_of_interest_indices = nicv.NICV.get_points_of_interest_indices(plain, traces, 2, 0, keys)
print(calculated_points_of_interest_indices)
self.assertTrue(np.allclose(resulting_points_of_interest_indices, calculated_points_of_interest_indices))
def test_get_points_of_interest(self):
""" Tests if the point of interest selection works properly"""
traces = np.array([[1, 2, 3], [4, 5, 6], [7, 0.4, 9], [2, 3, 12]])
plain = np.array([[1], [2], [1], [2]])
keys = plain
resulting_points_of_interest = [[2, 3], [5, 6], [0.4, 9], [3, 12]]
calculated_points_of_interest = nicv.NICV.get_points_of_interest(plain, traces, 2, 0, keys)
self.assertTrue(np.allclose(resulting_points_of_interest, calculated_points_of_interest))
|
nilq/baby-python
|
python
|
# coding: utf-8
# In[ ]:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import tree
get_ipython().run_line_magic('matplotlib', 'inline')
# In[ ]:
def maybe_load_loan_data(threshold=1, path='../input/loan.csv', force='n'):
def load_data():
data = pd.read_csv(path, low_memory=False)
t = len(data) / threshold
data = data.dropna(thresh=t, axis=1) # Drop any column with more than 50% missing values
return data
# conditionally load the data
try:
if df.empty or force=='y':
data = load_data()
else:
return df
except:
data = load_data()
return data
df = maybe_load_loan_data(2)
# In[ ]:
df.columns
# In[ ]:
def show_stats(df):
print ("Number of records {}".format(len(df)))
print ("Dataset Shape {}".format(df.shape))
sns.distplot(df['loan_amnt'].astype(int))
show_stats(df)
# In[ ]:
# Understand data correlations
numeric_features = df.select_dtypes(include=[np.number])
print(numeric_features.describe())
categoricals = df.select_dtypes(exclude=[np.number])
print(categoricals.describe())
corr = numeric_features.corr()
print (corr['loan_amnt'].sort_values(ascending=False)[:10], '\n')
print (corr['loan_amnt'].sort_values(ascending=False)[-10:])
''' move this to model evaluation section
from sklearn.metrics import confusion_matrix
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)'''
# In[ ]:
def show_dictionary(path='../input/LCDataDictionary.xlsx'):
data_dictionary = pd.read_excel(path)
print(data_dictionary.shape[0])
print(data_dictionary.columns.tolist())
data_dictionary.rename(columns={'Name': 'name',
'Description': 'description'})
return data_dictionary
dict = show_dictionary()
dict.set_index('LoanStatNew', inplace=True)
dict.loc[:]
# In[ ]:
dict[categoricals]
# In[ ]:
from pandas.tools.plotting import scatter_matrix
attributes = ['annual_inc','loan_amnt', 'revol_util', 'dti','open_acc','revol_bal','revol_util','total_rec_int' ]
# 'recoveries','acc_now_delinq','delinq_2yrs','emp_length','int_rate','funded_amnt'
scatter_matrix(df[attributes], figsize=(12,8))
# In[ ]:
def print_data_shape(df):
print ("No rows: {}".format(df.shape[0]))
print ("No cols: {}".format(df.shape[1]))
print (df.head(1).values)
print ("Columns: " + df.columns)
# In[ ]:
def proc_emp_length():
df.replace('n/a', np.nan, inplace=True)
df.emp_length.fillna(value=0, inplace=True)
df['emp_length'].replace(to_replace='[^0-9]+', value='', inplace=True, regex=True)
df['emp_length'] = df['emp_length'].astype(int)
#df.emp_length.head()
# In[ ]:
df.revol_bal.head()
#df.revol_util = pd.Series(df.revol_util).str.replace('%', '').astype(float)
# In[ ]:
print (df.emp_title.value_counts().head())
print (df.emp_title.value_counts().tail())
df.emp_title.unique().shape
# In[ ]:
df.verification_status.value_counts()
# In[ ]:
def proc_desc_len():
df['desc_lenght'] = df['desc'].fillna(0).str.len()
#df.desc_lenght
# In[ ]:
def proc_issue_d():
df['issue_month'], df['issue_year'] = zip(*df.issue_d.str.split('-'))
df.drop(['issue_d'], 1, inplace=True)
# In[ ]:
def proc_zip_code():
df['zip_code'] = df['zip_code'].str.rstrip('x')
# In[ ]:
print (df.purpose.value_counts())
print ('')
print (df.title.value_counts().head())
# In[ ]:
#df = maybe_load_loan_data(threshold=2)
df.plot(kind='barh', x='purpose', y='int_rate')
# In[ ]:
print_data_shape(df)
# In[ ]:
def proc_loan_status(df):
#mapping_dict = {'loan_status':{'Fully Paid':0, 'Charged Off': 1, 'Default': 1, 'Current': 0}}
mapping_dict = {'loan_status':{'Fully Paid':0, 'Charged Off': 1}}
df = df.replace(mapping_dict)
df = df[(df['loan_status'] == 1) | (df['loan_status'] == 0)]
return df
# In[ ]:
def show_nulls(df):
nulls = pd.DataFrame(df.isnull().sum().sort_values(ascending=False)[:25])
nulls.columns = ['Null Count']
nulls.index.name = 'Feature'
return nulls
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as torch_models
class PerceptualLoss(nn.Module):
def __init__(self, rank):
super(PerceptualLoss, self).__init__()
self.rank = rank
self.vgg19 = torch_models.vgg19(pretrained=True)
self.vgg19_relu_5_2 = nn.Sequential(*list(self.vgg19.features.children())[:-5]).eval()
for p in self.vgg19_relu_5_2.parameters():
p.requires_grad = False
self.register_buffer("mean", torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer("std", torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def forward(self, input_, target):
input_ = (input_ - self.mean) / self.std
target = (target - self.mean) / self.std
input_ = F.interpolate(input_, mode='bilinear', size=(224, 224), align_corners=False)
target = F.interpolate(target, mode='bilinear', size=(224, 224), align_corners=False)
input_vgg = self.vgg19_relu_5_2(input_)
target_vgg = self.vgg19_relu_5_2(target)
loss = F.l1_loss(input_vgg, target_vgg)
return loss
class Color2EmbedLoss(nn.Module):
def __init__(self, rank, lambda_reconstruction=1, lambda_perceptual=0.1):
super(Color2EmbedLoss, self).__init__()
self.lambda_reconstruction = lambda_reconstruction
self.lambda_perceptual = lambda_perceptual
self.reconstruction_loss = nn.SmoothL1Loss()
self.perceptual_loss = PerceptualLoss(rank)
def forward(self, pab, gtab, prgb, gtrgb):
l_rec = self.reconstruction_loss(pab, gtab)
l_per = self.perceptual_loss(prgb, gtrgb)
return self.lambda_reconstruction * l_rec + self.lambda_perceptual * l_per, l_per, l_rec
if __name__ == '__main__':
batch = 4
pab = torch.rand(batch, 2, 256, 256)
gtab = torch.rand(batch, 2, 256, 256)
prgb = torch.rand(batch, 3, 256, 256)
gtrgb = torch.rand(batch, 3, 256, 256)
loss = Color2EmbedLoss()
print(loss(pab, gtab, prgb, gtrgb))
# print(mm(torch.rand(5, 3, 256, 256).to(0)).shape)
# summary(loss.vgg19, (3, 224, 224))
|
nilq/baby-python
|
python
|
"""
Generates a powershell script to install Windows agent - dcos_install.ps1
"""
import os
import os.path
import gen.build_deploy.util as util
import gen.template
import gen.util
import pkgpanda
import pkgpanda.util
def generate(gen_out, output_dir):
print("Generating Powershell configuration files for DC/OS")
make_powershell(gen_out, output_dir)
def make_powershell(gen_out, output_dir):
"""Build powershell deployment script and store this at Bootstrap serve"""
output_dir = output_dir + '/windows/'
pkgpanda.util.make_directory(output_dir)
bootstrap_url = gen_out.arguments['bootstrap_url']
if gen_out.arguments['master_discovery'] == 'static':
master_list = gen_out.arguments['master_list']
elif gen_out.arguments['master_discovery'] == 'master_http_loadbalancer':
master_list = gen_out.arguments['exhibitor_address'] + ':2181'
else:
master_list = 'zk-1.zk:2181,zk-2.zk:2181,zk-3.zk:2181,zk-4.zk:2181,zk-5.zk:2181'
powershell_template_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'powershell/dcos_install.ps1.in')
with open(powershell_template_path, 'r') as f:
powershell_template = f.read()
powershell_script = gen.template.parse_str(powershell_template).render({
'dcos_image_commit': util.dcos_image_commit,
'generation_date': util.template_generation_date,
'bootstrap_url': bootstrap_url,
'master_list': master_list,
})
# Output the dcos install ps1 script
install_script_filename = 'dcos_install.ps1'
pkgpanda.util.write_string(install_script_filename, powershell_script)
pkgpanda.util.write_string(output_dir + install_script_filename, powershell_script)
f.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
#Self Written Module to Decrypt Files
#=========================================================
#This Module is Written to Reverse_Attack of Ransomeware
#=========================================================
# Reverse_Attack
# |____*****TAKES 1 ARGUMENTS, i.e. KEY *****
# |____Initiate Decryption Process
from pathlib import Path #Used to Find the Home Path
import threading #Using Threads to Boost Search Process BY Searching Diff. Drive on Diff. Thread
from os.path import expanduser
from Crypto import Random
from Crypto.Cipher import AES
import os
import hashlib, base64
class Reverse:
def __init__(self, key):
self.decryption_key = key
self.list_of_files = []
def start(self):
home = self.get_home_dir()
target1 = home + "Pictures"
target2 = home + "Music"
target3 = home + "Downloads"
target4 = home + "Documents"
target5 = home + "Desktop"
t1 = threading.Thread(target=self.run_locate_class, args=[target1,])
t2 = threading.Thread(target=self.run_locate_class, args=[target2,])
t3 = threading.Thread(target=self.run_locate_class, args=[target3,])
t4 = threading.Thread(target=self.run_locate_class, args=[target4,])
t5 = threading.Thread(target=self.run_locate_class, args=[target5,])
t1.start()
t1.join()
t2.start()
t2.join()
t3.start()
t3.join()
t4.start()
t4.join()
t5.start()
t5.join()
for files in self.list_of_files:
decrypt = Decryptor(self.decryption_key, files)
decrypt.decrypt_file() #Starting Decryption of Each File One-BY-One
def get_home_dir(self):
return str(Path.home()) + '\\'
def run_locate_class(self, drive_name):
'''
Function to make Object of LocateTargetFiles Class
'''
starting = LocateEncryptedFiles()
list_of_files = starting.start(drive_name)
self.list_of_files.extend(list_of_files)
return True
class LocateEncryptedFiles:
def __init__(self, exclude = None):
self.files_on_system = []
self.target_extension = ['enc',]
self.exclude_dir = []
if exclude != None:
self.exclude_dir.extend(exclude)
def start(self, root_dir):
self.locate_files(root_dir)
return self.files_on_system
def locate_files(self, root_dir):
for root, _, files in os.walk(root_dir):
for f in files:
abs_file_path = os.path.join(root, f)
self.filter(self.target_extension, abs_file_path)
def filter(self, target_extension, abs_file_path):
if self.is_excluded_dir(abs_file_path) == False:
# Filtering Files On the basics of file extension
if abs_file_path.split('.')[-1] in self.target_extension:
self.files_on_system.append(abs_file_path)
else:
pass
def is_excluded_dir(self, path):
'''
@summary: Checks whether the specified path should be excluded from encryption
@param path: The path to check
@return: True if the path should be excluded from encryption, otherwise False
'''
for dir_to_exclude in self.exclude_dir:
lenght = len(dir_to_exclude)
if path[:lenght] == dir_to_exclude:
return True
return False
class Decryptor:
def __init__(self, key, file_name):
self.key = hashlib.sha256(key.encode('utf-8')).digest()
self.file_name = file_name
def pad(self, s):
return s + b"\0" * (AES.block_size - len(s) % AES.block_size)
def decrypt(self, ciphertext, key):
iv = ciphertext[:AES.block_size]
cipher = AES.new(key, AES.MODE_CBC, iv)
plaintext = cipher.decrypt(ciphertext[AES.block_size:])
return plaintext.rstrip(b"\0")
def decrypt_file(self):
with open(self.file_name, 'rb') as fo:
ciphertext = fo.read()
dec = self.decrypt(ciphertext, self.key)
with open(self.file_name[:-4], 'wb') as fo:
fo.write(dec)
os.remove(self.file_name)
if __name__ == '__main__':
key = input("Enter Key : ")
warning = input("\n!!!Warning!!! \nIs This Key Correct [Wrong KEY Will Just Destroy The Data] y/n: ")
if warning.lower() == 'y':
print("\n[*] Reversing Attack ...")
print("\n[*] Initiating Decryption Process ...")
test = Reverse(key)
test.start()
print("\n[+] Completed Successfully : )")
elif warning.lower() == 'n':
print("\nPlease Try Later With Correct KEY !")
else:
print("\n[!] Invaid Argument : (")
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# ******************************************************
# @author: Haifeng CHEN - optical.dlz@gmail.com
# @date (created): 2019-12-12 09:07
# @file: memory_monitor.py
# @brief: A tool to monitor memory usage of given process
# @internal:
# revision: 14
# last modified: 2020-03-06 12:24:48
# *****************************************************
import os
import sys
import psutil
import random
import sqlite3
import logging
import datetime
import collections
import numpy as np
import pandas as pd
from typing import Union, Tuple
from qtpy import QtCore, QtWidgets, QtGui
from utils.qapp import setHighDPI, setDarkStyle, loadQIcon
from utils.qapp import checkQLineEditValidatorState
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from parse_log import parse_memory_log
__version__ = '1.2.3'
__revision__ = 14
__app_tittle__ = 'MemoryUsageMonitor'
class MemoryLogParserRunnable(QtCore.QObject):
""" Runnable object for parsing memory log """
queue = QtCore.Signal()
ev = QtCore.Signal(object)
def __init__(self, fpath, p_name=None):
super().__init__()
self._fpath = fpath
self._p_name = p_name
self.queue.connect(self.run)
@QtCore.Slot()
def run(self):
self.ev.emit({'progress_init': ('Parsing ...', 200, 0, 0)})
try:
d = parse_memory_log(self._fpath, self._p_name)
self.ev.emit({'progress_reset': 1})
self.ev.emit({'memory_log': d})
except Exception as e:
error_msg = 'Failed to parse memory log {}. Error message is {}'.format(self._fpath, repr(e))
logging.error(error_msg)
self.ev.emit({'progress_reset': 1})
self.ev.emit({'error': error_msg})
class TreeItemsSelector(QtWidgets.QDialog):
""" A common item selector using tree widget """
def __init__(self, items: list, title='Items Selector', item_cat='Features', parent=None):
super().__init__(parent)
self.setWindowTitle(title)
self.setMinimumSize(400, 200)
self._items = {}
self._init_ui(items, item_cat)
def _init_ui(self, items, item_cat):
""" Initialize the user interface """
tree = QtWidgets.QTreeWidget()
tree.setColumnCount(1)
# tree.setHeaderHidden(True)
tree.setHeaderLabel(item_cat)
# parent = QtWidgets.QTreeWidgetItem(tree)
# parent.setText(0, '{}'.format(item_cat))
# parent.setFlags(parent.flags() | QtCore.Qt.ItemIsTristate | QtCore.Qt.ItemIsUserCheckable)
for item in items:
tree_item = QtWidgets.QTreeWidgetItem(tree)
tree_item.setText(0, '{}'.format(item))
tree_item.setFlags(tree_item.flags() | QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsSelectable)
tree_item.setCheckState(0, QtCore.Qt.Unchecked)
tree.itemChanged.connect(self._on_item_toggled)
btn_box = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
)
btn_box.accepted.connect(self.accept)
btn_box.rejected.connect(self.reject)
vbox_layout = QtWidgets.QVBoxLayout()
vbox_layout.addWidget(tree)
vbox_layout.addWidget(btn_box)
self.setLayout(vbox_layout)
def _on_item_toggled(self, item, column):
if item.checkState(column) == QtCore.Qt.Checked:
checked = True
elif item.checkState(column) == QtCore.Qt.Unchecked:
checked = False
self._items[item.text(column)] = checked
@property
def items(self) -> Tuple:
items = [k for k, v in self._items.items() if v]
return tuple(items)
class MemoryUsageMonitor(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self._settings = QtCore.QSettings(QtCore.QSettings.NativeFormat,
QtCore.QSettings.UserScope,
'HF_AIO', 'MemoryUsageMonitor')
self._pid = None
self._ct = ''
self._dq = collections.deque(maxlen=self._settings.value('dq_maxlen', 120, type=int))
self._progress = QtWidgets.QProgressDialog(self)
self._progress.setCancelButton(None)
self._progress.setWindowTitle(__app_tittle__)
self._progress.setWindowModality(QtCore.Qt.WindowModal)
self._progress.setMinimumWidth(300)
self._progress.reset()
self._worker_thread = QtCore.QThread()
self._worker_thread.start()
self._log_parse_runnable = None # type: Union[None, QtCore.QObject]
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._on_timer)
self._init_ui()
self._setup_shortcuts()
def _init_ui(self):
self.setMinimumSize(800, 600)
self.setWindowTitle("{0} ({1}.{2})".format(
__app_tittle__, __version__, __revision__))
# self.setWindowIcon(loadQIcon('icons/app_icon.png'))
# The main widget
widget = QtWidgets.QWidget()
size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(widget.sizePolicy().hasHeightForWidth())
widget.setSizePolicy(size_policy)
# create widgets ... # the first row
ctrl_layout = self._create_main_ctrls()
# create matplotlib widgets and components
canvas = self._setup_mpl_widget()
main_layout = QtWidgets.QVBoxLayout()
main_layout.addWidget(canvas)
main_layout.addLayout(ctrl_layout)
widget.setLayout(main_layout)
self.setCentralWidget(widget)
self.statusBar().showMessage('Launched ...', 1000)
def _setup_plot_frame(self, monitor=True):
self._mpl_ax.spines['bottom'].set_color('w')
self._mpl_ax.spines['top'].set_color('w')
self._mpl_ax.spines['right'].set_color('w')
self._mpl_ax.spines['left'].set_color('w')
# white text, ticks
self._mpl_ax.set_title('Memory Usage Monitor',
color='w', fontdict={'fontsize': 10})
self._mpl_ax.set_ylabel('Usage (MB)', color='w')
self._mpl_ax.tick_params(axis='both', color='w')
self._mpl_ax.tick_params(colors='w', labelsize=8)
# dark background
color = self.palette().color(QtGui.QPalette.Window).getRgbF()
self._mpl_ax.figure.patch.set_facecolor(color)
color = self.palette().color(QtGui.QPalette.Base).getRgbF()
self._mpl_ax.set_facecolor(color)
if monitor:
x = np.linspace(0, 10 * np.pi, 100)
self.line_rss = self._mpl_ax.plot(x, np.sin(x), '-', label='Mem Usage')[0]
self.line_vms = self._mpl_ax.plot(
x, np.sin(random.random() * np.pi + x), '--', label='VM Size')[0]
self._mpl_ax.legend()
self._mpl_ax.set_xlabel('Date', color='w')
else:
self._mpl_ax.grid(True)
self._mpl_ax.set_xlabel('Elapsed Hours', color='w')
def _setup_mpl_widget(self):
canvas = FigureCanvas(Figure(figsize=(5, 3)))
self._mpl_ax = canvas.figure.subplots()
canvas.figure.set_tight_layout(True)
self.addToolBar(
QtCore.Qt.TopToolBarArea,
NavigationToolbar(self._mpl_ax.figure.canvas, self)
)
self._setup_plot_frame()
return canvas
def _create_main_ctrls(self):
layout = QtWidgets.QHBoxLayout()
label1 = QtWidgets.QLabel('Interval (second)')
interval = QtWidgets.QLineEdit()
interval.setValidator(QtGui.QIntValidator(0, 1000000000))
interval.setObjectName('interval')
interval.setAlignment(QtCore.Qt.AlignCenter)
interval.setToolTip('Data sampling interval')
interval.setText(self._settings.value('interval', '10', type=str))
interval.textEdited[str].connect(self._update_settings)
interval.textChanged.connect(self._check_validator_state)
layout.addWidget(label1)
layout.addWidget(interval)
label2 = QtWidgets.QLabel('Process name')
p_name = QtWidgets.QLineEdit()
p_name.setObjectName('process_name')
p_name.setAlignment(QtCore.Qt.AlignCenter)
p_name.setToolTip('Name of the process including the extension.'
' It is case sensitive and duplicated name not well supported!')
p_name.setText(self._settings.value('process_name', '', type=str))
p_name.textEdited[str].connect(self._update_settings)
layout.addWidget(label2)
layout.addWidget(p_name)
label3 = QtWidgets.QLabel('Buffered data length*')
dq_maxlen = QtWidgets.QLineEdit()
dq_maxlen.setValidator(QtGui.QIntValidator(0, 9999))
dq_maxlen.setObjectName('dq_maxlen')
dq_maxlen.setAlignment(QtCore.Qt.AlignCenter)
dq_maxlen.setToolTip('Maximal length of the buffered data points, press entry to apply the change on the fly!')
dq_maxlen.setText(self._settings.value('dq_maxlen', '120', type=str))
dq_maxlen.editingFinished.connect(self._on_buffer_size_changed)
dq_maxlen.textEdited[str].connect(self._update_settings)
dq_maxlen.textChanged.connect(self._check_validator_state)
layout.addWidget(label3)
layout.addWidget(dq_maxlen)
self._start_btn = QtWidgets.QPushButton('Start')
self._start_btn.clicked.connect(self._on_start)
self._start_btn.setEnabled(True)
self._stop_btn = QtWidgets.QPushButton('Stop')
self._stop_btn.clicked.connect(self._on_stop)
self._stop_btn.setEnabled(False)
layout.addWidget(self._start_btn)
layout.addWidget(self._stop_btn)
return layout
def _setup_shortcuts(self):
shortcut_t = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.Key_T), self)
shortcut_t.activated.connect(self._toggle_window_on_top)
shortcut_s = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.Key_S), self)
shortcut_s.activated.connect(self._toggle_start_stop)
shortcut_o = QtWidgets.QShortcut(QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.Key_O), self)
shortcut_o.activated.connect(self._open_memory_log)
def _on_buffer_size_changed(self):
try:
val = self._settings.value('dq_maxlen', 120, type=int)
self._dq = collections.deque(reversed(self._dq), maxlen=val)
self._dq.reverse()
msg = 'New buffer max length is {}, current size is {}'.format(val, len(self._dq))
self.statusBar().showMessage(msg, 1000)
except Exception as e:
self.statusBar().showMessage(repr(e), 1000)
def _toggle_window_on_top(self):
self.setWindowFlags(self.windowFlags() ^ QtCore.Qt.WindowStaysOnTopHint)
self.show()
if self.windowFlags() & QtCore.Qt.WindowStaysOnTopHint:
msg = 'Stays On Top: ON'
else:
msg = 'Stays On Top: OFF'
self.statusBar().showMessage(msg, 1000)
def _toggle_start_stop(self):
if self._timer.isActive():
self._on_stop()
else:
self._on_start()
def _on_start(self):
self._stop_btn.setEnabled(True)
self._start_btn.setEnabled(False)
interval = self._settings.value('interval', 10, type=int)
p_name = self._settings.value('process_name', '', type=str)
msg = 'Start monitor: [interval: {}, process name {}]'.format(interval, p_name)
logging.debug(msg)
self.statusBar().showMessage(msg, 1000)
# start timer
self._dq.clear()
self._pid = None
self._ct = ''
self._timer.start(interval * 1000)
self._mpl_ax.clear()
self._setup_plot_frame()
def _on_stop(self):
self._stop_btn.setEnabled(False)
self._start_btn.setEnabled(True)
msg = 'Stop monitor: [pid: {}, create time: {}]'.format(self._pid, self._ct)
logging.debug(msg)
self.statusBar().showMessage(msg, 1000)
# stop timer
self._timer.stop()
def _update_settings(self, q_str):
w = self.sender()
if isinstance(w, QtWidgets.QCheckBox):
if w.checkState() == QtCore.Qt.Checked:
self._settings.setValue(w.objectName(), '1')
else:
self._settings.setValue(w.objectName(), '0')
elif isinstance(w, QtWidgets.QLineEdit):
self._settings.setValue(w.objectName(), w.text())
elif isinstance(w, QtWidgets.QComboBox):
self._settings.setValue(w.objectName(),
'{}'.format(w.currentIndex()))
def _check_validator_state(self):
checkQLineEditValidatorState(self.sender(), self.palette().color(QtGui.QPalette.Base))
def closeEvent(self, event):
super().closeEvent(event)
def _update_process_id(self, p_name):
# try to check whether this id is still valid
if self._pid is not None:
try:
p = psutil.Process(self._pid)
if p.name() != p_name:
self._pid = None
self._ct = ''
except Exception:
msg = 'Process [{}]-[{}] is Dead'.format(self._pid, self._ct)
logging.info(msg)
self.statusBar().showMessage(msg, 1000)
self._pid, self._ct = None, ''
self._dq.clear()
self._mpl_ax.set_title(
'Memory Usage Monitor ({} Not Found)'.format(p_name),
color='w', fontdict={'fontsize': 10})
self._mpl_ax.figure.canvas.draw_idle()
# try to get a new pid
if self._pid is None:
for proc in psutil.process_iter(attrs=['pid', 'name']):
if proc.info['name'] == p_name:
self._pid = proc.info['pid']
self._ct = datetime.datetime.fromtimestamp(
proc.create_time()).strftime('%Y-%m-%d %H:%M:%S')
self._mpl_ax.set_title('Memory Usage Monitor ({} - {})'.format(p_name, self._ct),
color='w', fontdict={'fontsize': 10})
msg = 'New process [{}]-[{}] found'.format(self._pid, self._ct)
logging.info(msg)
self.statusBar().showMessage(msg, 1000)
break
def _on_timer(self):
p_name = self._settings.value('process_name', '', type=str)
self._update_process_id(p_name)
if self._pid is not None:
process = psutil.Process(self._pid)
memory_usage = process.memory_info()
logging.info('[{}]-[{}]-[{}] - [{}, {}]'.format(
self._pid, p_name, self._ct, memory_usage.rss, memory_usage.vms))
ts = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self._dq.appendleft((ts, memory_usage.rss, memory_usage.vms))
x = np.arange(0, len(self._dq))
self.line_rss.set_xdata(x)
rss = np.array([x[1] / 1024 / 1024 for x in self._dq])
self.line_rss.set_ydata(rss)
self.line_vms.set_xdata(x)
vms = np.array([x[2] / 1024 / 1024 for x in self._dq])
self.line_vms.set_ydata(vms)
self._mpl_ax.set_ylim(0, max(np.max(vms), np.max(rss)) * 1.1)
self._mpl_ax.set_xlim(
0,
min(max(len(x) * 1.2, self._dq.maxlen // 4), self._dq.maxlen)
)
ts = [x[0] for x in self._dq]
labels = []
for pos in self._mpl_ax.get_xticks():
pos = int(pos)
if pos < len(ts):
labels.append(ts[pos][5:])
else:
labels.append('')
self._mpl_ax.set_xticklabels(labels)
self._mpl_ax.figure.canvas.draw()
@QtCore.Slot(object)
def _on_assist_worker_thread_event(self, d):
""" d is python dict """
if 'error' in d:
error_msg = d['error']
QtWidgets.QMessageBox.critical(self, __app_tittle__, error_msg)
elif 'warn' in d:
warn_msg = d['warn']
QtWidgets.QMessageBox.warning(self, __app_tittle__, warn_msg)
elif 'progress_init' in d:
txt, duration, pos_min, pos_max = d['progress_init']
self._progress.setLabelText(txt)
self._progress.setMinimumDuration(duration)
self._progress.setRange(pos_min, pos_max)
self._progress.setValue(pos_min)
elif 'progress_update' in d:
self._progress.setValue(d['progress_update'])
elif 'progress_reset' in d:
self._progress.reset()
elif 'memory_log' in d:
self._draw_memory_log(d['memory_log'])
def _draw_memory_log(self, d: pd.DataFrame):
if d.empty:
p_name = self._settings.value('process_name', '', type=str)
QtWidgets.QMessageBox.warning(self, __app_tittle__,
'Memory usage log of process `{}` is not found!'.format(p_name))
return
g = d.groupby(['Process'])
items = list(g.groups.keys())
if len(items) != 1:
dlg = TreeItemsSelector(items, title='Select items to draw', item_cat='Process Information', parent=self)
if dlg.exec() == QtWidgets.QDialog.Accepted:
items = dlg.items
else:
return
if not items:
return
n = len(items)
self._progress.setRange(0, n)
self._progress.setValue(0)
self._mpl_ax.clear()
self._setup_plot_frame(False)
interval = self._settings.value('interval', 10, type=int)
length_lim = self._settings.value('length_limit', 100, type=int)
convert_to_hours = 60 * 60 / interval
not_empty_plot = False
for key, grp in g:
if key not in items or len(grp['rss']) < length_lim:
logging.warning('{} dropped, not selected or not enough length'.format(key))
else:
not_empty_plot = True
self._mpl_ax.plot(np.arange(len(grp['rss'])) / convert_to_hours, grp['rss'] / 1024 / 1024, label=key)
self._progress.setValue(self._progress.value() + 1)
if not_empty_plot:
self._mpl_ax.legend()
self._mpl_ax.figure.canvas.draw()
self._progress.reset()
def _open_memory_log(self):
log_path, _filter = QtWidgets.QFileDialog.getOpenFileName(
self, 'Select Memory Log file',
directory=self._settings.value('prev_log_dir', '.', type=str),
filter='Memory Log (*.log)')
if not log_path:
return
self._settings.setValue('prev_log_dir', os.path.dirname(log_path))
# firstly stop monitor
self._on_stop()
p_name = self._settings.value('process_name', '', type=str)
if self._log_parse_runnable is not None:
self._log_parse_runnable.ev.disconnect(self._on_assist_worker_thread_event)
# pass image to worker
self._log_parse_runnable = MemoryLogParserRunnable(log_path, p_name)
self._log_parse_runnable.moveToThread(self._worker_thread)
self._log_parse_runnable.ev.connect(self._on_assist_worker_thread_event)
self._log_parse_runnable.queue.emit()
def center(self):
frame_gm = self.frameGeometry()
screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())
center_pt = QtWidgets.QApplication.desktop().screenGeometry(screen).center()
frame_gm.moveCenter(center_pt)
self.move(frame_gm.topLeft())
if __name__ == "__main__":
# enable logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)-8s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
# file output to record memory usage
fh = logging.FileHandler('memory.log')
fh.setFormatter(formatter)
fh.setLevel(logging.INFO)
# we also need stream output for debugging
ch = logging.StreamHandler()
ch.setFormatter(formatter)
ch.setLevel(logging.WARNING)
# add the handlers to logger
logger.addHandler(fh)
logger.addHandler(ch)
# logging end
setHighDPI()
# create Qt Application
app = QtWidgets.QApplication(sys.argv)
app.setWindowIcon(loadQIcon('icons/app_icon.png'))
try:
import qtmodern.styles
qtmodern.styles.dark(app)
except ModuleNotFoundError:
setDarkStyle(app)
# update default font for Windows 10
if sys.platform == "win32":
font = QtGui.QFont("Segoe UI", 9)
app.setFont(font)
# create the MainForm
form = MemoryUsageMonitor()
form.center()
try:
import qtmodern.windows
mw = qtmodern.windows.ModernWindow(form)
mw.show()
except ModuleNotFoundError:
form.show()
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
##序列解包
dict={"name":"jim","age":"1","sex":"male"}
key,value=dict.popitem();
print(key,value)
#使用*号收集多余的值
a,b,*rest=[1,2,3,4];
print(a,b,rest)
##链式赋值 x=y=somfunction()
##增强赋值 x+=1
###代码块
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pydoc
import subprocess
import sys
import signal
from pkg_resources import get_distribution
from termcolor import colored
from projects import config
from projects import gui
from projects import paths
from projects import projectfile
__version__ = get_distribution('projects').version
help_text = '''\
===============================================================================
_ _
(_) | |
_ __ _ __ ___ _ ___ ___| |_ ___
| '_ \| '__/ _ \| |/ _ \/ __| __/ __|
| |_) | | | (_) | | __/ (__| |_\__ \\
| .__/|_| \___/| |\___|\___|\__|___/
| | _/ |
|_| |__/
===============================================================================
i n t u i t i v e p r o j e c t m a n a g e m e n t
===============================================================================
<projects> is an easy to use project navigation tool and a Makefile-like
scripting engine. It's main purpose is to provide a simpler scripting interface
with a built in man page generator. You can define your commands with inline
documentation in Projectfiles. You can have one Projectfile in every directory
inside your project, <projects> will process them recursively.
<projects> works on every UNIX system with Python 2.7+ or 3.x installed.
<projects> is not a replacement for Makefile or CMake it is an optional wrapper
for them.
Features:
- quick project navigation with minimal typing
- Projectfile based recursive scripting system
- command concatenation and recursive separation
- automatic manual page generation
Configuration
When projects starts up for the first time, it creates it's configuration
file (only if it isn't exist already) inside your home directory: ~/.prc
By default it contains the following options in YAML format:
╔═══════════════════════════════════════════════════════════════════════════╗
║ $ cat ~/.prc ║
║ max-doc-width: 80 ║
║ projects-path: ~/projects ║
╚═══════════════════════════════════════════════════════════════════════════╝
projects-path [mandatory]
It's value will tell projects where it can find your projects' repositories
max-doc-width [optional]
The maximum width of the generated manual pages. If not defined, it will be
set to 80. <projects> will adapt to narrower terminals.
Usage:
p
p p
p <command>
p (-h|--help)
p (-v|--version)
p (-i|--init)
p (-w|--walk)
p (-l|--list) <command>
p (-md|--markdown) [<file_name>]
p
This command is the main trigger for projects. It behaves differently
depending on your current working directory.
OUTSIDE your projects directory, it opens the project selector screen, where
you can select your project by typing the projects name or by using the
arrows keys.
INSIDE any of your projects (inside the repository root directory) this
command will show the manual generated from the Projectfiles.
p p
This command behaves the same as the previous "p" command but it will always
display the project selector screen. This could be handy if you want to
switch projects quickly.
This is the only prohibited command name that you cannot use for your
commands.
p <command>
This is the command for executing commands defined in the Projectfiles. By
convention all defined command should start with an alphanumeric character.
The commands started with a dash reserved for <projects> itself.
The <command> keyword can be anything except the already taken keywords:
p, -h, --help, -v, --version, -i, --init, -w, --walk, -l, --list
p (-h|--help)
Brings up this help screen.
p (-v|--version)
Prints out the current <projects> version.
p (-i|--init)
Generates a template Projectfile into the current directory.
p (-w|--walk)
Lists out all directories in your project in the walk order <projects> will
follow. It marks the directories that contain a Projectfile.
p (-l|--list) <command>
Lists out the processed command bodies for the given command.
p (-md|--markdown) [<file_name>]
Generates a Markdown file from your processed Projectfiles. You can
optionally specify a name for the generated file. The default name is
README.md.
===============================================================================
_____ _ _ __ _ _
| __ \ (_) | | / _(_) |
| |__) | __ ___ _ ___ ___| |_| |_ _| | ___
| ___/ '__/ _ \| |/ _ \/ __| __| _| | |/ _ \\
| | | | | (_) | | __/ (__| |_| | | | | __/
|_| |_| \___/| |\___|\___|\__|_| |_|_|\___|
_/ |
|__/
===============================================================================
Projectfiles are the files you create in order to define commands that will be
executed with the "p <command>". Projectfiles provide a powerful and self
explanatory way to interact with your project.
You can create an example Projectfile with the "p (-i|--init)" command. The
generated Projectfile will demonstrate all provided functionality except the
recursive command concatenation since it will generate only one Projectfile.
There are mandatory and optional features you can add to Projectfile.
Mandatory:
- <projects> version
- at least one command definition header
- command body
Optional:
- main description
- variables
- command alternatives
- command dependency list
- command description
- recursive separator
Feature order:
There is a strict order where you can place each features. Between each
feature arbitrary number of empty lines are allowed. The order is the
following:
1. version
2. main description
3. variables
4. command header
5. command description
6. command body (pre, separator and post)
version [mandatory]
╔═══════════════════════════════════════════════════════════════════════════╗
║ from v{version} ║
║ ... ║
╚═══════════════════════════════════════════════════════════════════════════╝
This feature will define the earliest version that is compatible with the
used Projectfile format. All <projects> versions greater or equal to the
defined one will be compatible with the format, but earlier versions may have
problems with future features. The first release version is v1.0.0.
If there are more Projectfiles in your project and the defined versions are
different, the smallest version will be used to maximize the functionality.
main description [optional]
╔═══════════════════════════════════════════════════════════════════════════╗
║ ... ║
║ """ ║
║ Description for the whole project. ║
║ """ ║
║ ... ║
╚═══════════════════════════════════════════════════════════════════════════╝
After the version you can define a global description of the whole project.
You can write long lines, <projects> will wrap them according to the defined
"max-doc-width" key in the ~/.prc configuration file. Single line breaks
won't break the lines in the generated manual. You have to use an empty line
in order to add a line break.
If you have multiple Projectfiles created, the main descriptions will be
concatenated with empty lines according to the walk order.
variables [optional]
╔═══════════════════════════════════════════════════════════════════════════╗
║ ... ║
║ variable = 42 ║
║ other_variable = "This is a string with spaces" ║
║ yet_another_variable = Quotes are optional. This is still valid. ║
║ ... ║
╚═══════════════════════════════════════════════════════════════════════════╝
You can define variables as well. Each variable will be used as a string. No
other variable format is currently supported. You can omit the quotes if you
want, <projects> will use the entire string you write after the "=" sign.
To use the variables you need to escape them:
$variable
${{variable}}
Both escapement is interpreted equally.
Defined variables go to the global variable pool. You cannot assign a
variable the more than once. Hence you cannot redefine a variable in a later
Projectfile (a Projectfile is thant is processed later according to the walk
order). Redefining a variable will raise an error. Since every variables go
to the global variable pool, you can use the variables in any Projectfile
independently which Projectfile you defined them. It is possible to use a
variable in the root level Projectfile that is defined in a later
Projectfile.
command header [mandatory]
╔═══════════════════════════════════════════════════════════════════════════╗
║ ... ║
║ my_command|alternative1|alt2: [dependency1, dependency2] ║
║ ... ║
╚═══════════════════════════════════════════════════════════════════════════╝
The command header feature allows you to define a command, it's alternatives
and it's dependent other commands. The first keyword is the default keyword
for the command. Alternatives are separated with the pipe "|" character.
After the keyword definitions, a colon ":" closes the command header. After
the colon, you can define a list of other commands, that are executed in the
order you defined them before the current command execution.
According to the given example you can invoke your command with the following
syntax inside your project directory:
p my_command
p alternative1
p alt2
Both will execute the same command body after the dependent commands
(dependency1 and dependency2) is executed first in the given order.
A command cannot be redefined in the same Projectfile twice. If you redefine
a command in another Projectfile, the commands' bodies will be appended to
each other according to the path relationship of these files.
command description [optional]
╔═══════════════════════════════════════════════════════════════════════════╗
║ ... ║
║ my_command: ║
║ """ ║
║ This is a command description. ║
║ """ ║
║ ... ║
╚═══════════════════════════════════════════════════════════════════════════╝
The command description will be added to the generated manual. It behaves the
same as the main description, except it requires an indentation in any way
(space, tab, count doesn't matter).
If a command is redefined in another Projectfile, the command descriptions
will be appended according to the path relationship of these files.
command body [mandatory]
╔═══════════════════════════════════════════════════════════════════════════╗
║ ... ║
║ my_command: ║
║ command1 ║
║ command2 ║
║ ... ║
╚═══════════════════════════════════════════════════════════════════════════╝
The command body defines what commands <projects> needs to execute if you
invoke the given command with the "p <command>" syntax inside your project
directory. Commands needs to be indented in any way (at least one space).
<projects> will execute all given commands line by line.
Template Projectfile
The following Projectfile can be generated with the `p (-i|--init)` command:
╔═══════════════════════════════════════════════════════════════════════════╗
║ from v1.0.0 ║
║ ║
║ """ ║
║ This is a template Projectfile you have created with the 'p (-i|--init])' ║
║ command. You can use the provided commands 'hello' and 'answer' or it's ║
║ shorter alternatives 'h' and 'ans' or 'a'. ie.: p <command>. ║
║ ║
║ You can start a new paragraph in the descriptions by inserting an empty ║
║ line like this. ║
║ ║
║ Descriptions are useful as they provide a searchable automatically ║
║ generated manual for your project for free. You can invoke this manual ║
║ with the "p" command if you are inside your project directory. ║
║ """ ║
║ ║
║ magic = 42 # variables goes to the global variable space ║
║ ║
║ hello|h: [a] ║
║ """ ║
║ This command will great you. ║
║ ║
║ There is a shorter alternative "h" for the command. It is depending ║
║ on the "a" command which is the alternative of the "answer" command. ║
║ ║
║ If you execute a command with dependencies, it's dependencies will be ║
║ executed first in the defined order. ║
║ """ ║
║ echo "Hi! This is my very own Projectfile." ║
║ ║
║ answer|ans|a: ║
║ """ ║
║ This command will give you the answer for every question. ║
║ ║
║ You can use the long "answer" keyword as well as the shorter "ans" or ║
║ "a" to execute this command. ║
║ ║
║ Inside the Projectfile, you can also refer to a command in another ║
║ command's dependency list by any of it's alternatives. ║
║ """ ║
║ echo "The answer for everything is $magic!" ║
║ # you can also use the ${{magic}} form ║
║ ║
╚═══════════════════════════════════════════════════════════════════════════╝
If you use the "p" command inside your project's root directory,projects will
generate a manual page from the Projectfiles you created. The previously
listed Projectfile will result the following manual page assuming that your
project is called "example" (the project name is picked from it's containing
directory's name):
╔═══════════════════════════════════════════════════════════════════════════╗
║ ========================================================================= ║
║ E X A M P L E ║
║ ========================================================================= ║
║ ║
║ This is a template Projectfile you have created with the "p (-i|--init])" ║
║ command. You can use the provided commands 'hello' and 'answer' or it's ║
║ shorter alternatives 'h' and 'ans' or 'a'. ie.: p <command>. ║
║ ║
║ You can start a new paragraph in the descriptions by inserting an empty ║
║ line like this. ║
║ ║
║ Descriptions are useful as they provide a searchable automatically ║
║ generated manual for your project for free. You can invoke this manual ║
║ with the "p" command if you are inside your project directory. ║
║ ║
║ ║
║ answer|ans|a: ║
║ ║
║ This command will give you the answer for every question. ║
║ ║
║ You can use the long "answer" keyword as well as the shorter "ans" or ║
║ "a" to execute this command. ║
║ ║
║ Inside the Projectfile, you can also refer to a command in another ║
║ command's dependency list by any of it's alternatives. ║
║ ║
║ ║
║ hello|h: [a] ║
║ ║
║ This command will great you. ║
║ ║
║ There is a shorter alternative "h" for the command. It is depending ║
║ on the "a" command which is the alternative of the "answer" command. ║
║ ║
║ If you execute a command with dependencies, it's dependencies will be ║
║ executed first in the defined order. ║
║ ║
╚═══════════════════════════════════════════════════════════════════════════╝
This manual is displayed in a pager, so you can exit with the "q" key.
Advanced Projectfile examples
Command concatenation
If you have multiple Projectfiles in your project and there are command
headers that are defined in more than one Projectfile, the command bodies
will be appended according to the path relationship of these files.
╔═════════════════════════════════════╦═════════════════════════════════════╗
║ $ cat ./Projectfile ║ $ cat ./dir/Projectfile ║
║ from v{version} ║ from v{version} ║
║ my_command: ║ my_command: ║
║ echo "This is the root." ║ echo "This is a subdir." ║
╠═════════════════════════════════════╩═════════════════════════════════════╣
║ $ p --walk ║
║ [x] . ║
║ [x] dir ║
╠═══════════════════════════════════════════════════════════════════════════╣
║ $ p --list my_command ║
║ cd /home/user/projects/example ║
║ echo "This is the root." ║
║ cd /home/user/projects/example/dir ║
║ echo "This is the a subdir." ║
╠═══════════════════════════════════════════════════════════════════════════╣
║ $ p my_command ║
║ This is the root. ║
║ This is a subdir. ║
╚═══════════════════════════════════════════════════════════════════════════╝
What you can notice in this example:
1. You can use the "(-w|--walk)" and "(-l|--list)" commands to get
information about the commands will be executed by <projects>.
2. The command listing shows that the command bodies were concatenated
according to the walk order (you can check with the "(-w|--walk)"
command).
3. The concatenated command list contains directory change commands (cd)
so every command defined in a Projectfile gets executed in the same
directory level as it's Projectfile's directory level.
4. Thus the directory change commands, you can notice that each command
will execute in the same execution context regardless of the command's
length (number of lines). This is different than the Makefile
conventions, and provide a much more simpler script writing.
More complex example
There is another feature that can be used to execute post configuration eg.
executing commands after all lower order command bodies were executed. This
feature is called recursive separator ("==="). If you place this separator
inside a command's body, and there are other lower level Projectfiles in your
project, the command bodies will be appended in a special, recursive order.
In a Projectfile , all commands before the separator are called the "pre"
commands, and all the commands after the separator are called the "post"
commands. The seprator in every command body is optional. If there is no
separator, all the command lines in the command body will be handled as a
"pre" command block. Similarly if the command body starts with a separator
the whole body will be used as a post block.
If there are no lower level Projectfiles, and you have a command with
separated body, the sepration will be ignored.
If you have lower level Projectfiles, the base level pre commands will be
executed first then the execution will jump to the lower level Projectfile.
After the lower level Projectfile's command script gets executed, the
execution will be jump back after the base level separator, and the base post
block will be executed.
If the lower level Projectfile has separated command bodies, and there are
yet another lower level Projectfile, the execution will jump down
recursively until the last possible separation is executed.
The following example will demonstrate this behavior:
╔═════════════════════════════════════╦═════════════════════════════════════╗
║ $ cat ./Projectfile ║ $ cat ./A/Projectfile ║
║ from v{version} ║ from v{version} ║
║ my_command: ║ my_command: ║
║ echo "pre root" ║ echo "pre A" ║
║ === ║ === ║
║ echo "post root" ║ echo "post A" ║
╠═════════════════════════════════════╬═════════════════════════════════════╣
║ $ cat ./A/B/Projectfile ║ $ cat ./C/Projectfile ║
║ from v{version} ║ from v{version} ║
║ my_command: ║ my_command: ║
║ echo "listing inside A/B" ║ echo "pre C" ║
║ ls -1 ║ === ║
║ echo "done" ║ echo "post C" ║
╠═════════════════════════════════════╩═════════════════════════════════════╣
║ $ ls -1 A/B ║
║ Projectfile ║
║ file1 ║
║ file2 ║
╠═══════════════════════════════════════════════════════════════════════════╣
║ $ p --walk ║
║ [x] . ║
║ [x] A ║
║ [x] A/B ║
║ [x] C ║
╠═══════════════════════════════════════════════════════════════════════════╣
║ $ p --list my_command ║
║ cd /home/user/projects/example ║
║ echo "pre root" ║
║ cd /home/user/projects/example/A ║
║ echo "pre A" ║
║ cd /home/user/projects/example/A/B ║
║ echo "listing inside A/B" ║
║ ls -1 ║
║ echo "done" ║
║ cd /home/user/projects/example/A ║
║ echo "post A" ║
║ cd /home/user/projects/example/C ║
║ echo "pre C" ║
║ echo "post C" ║
║ cd /home/user/projects/example ║
║ echo "post root" ║
╠═══════════════════════════════════════════════════════════════════════════╣
║ $ p my_command ║
║ pre root ║
║ pre A ║
║ listing inside A/B ║
║ Projectfile ║
║ file1 ║
║ file2 ║
║ done ║
║ post A ║
║ pre C ║
║ post C ║
║ post root ║
╚═══════════════════════════════════════════════════════════════════════════╝
What you can notice in this example:
1. The recursive separators works as described. The post commands are
executed after the pre commands for that level and all the recursive
lower level other commands executed.
2. Commands get executed in the same level where the Projectfile they are
defined in is located.
3. Automatic directory changing command insertion is smart enough to insert
only the absolute necessary directory changing commands. If there are no
lower level commands, but the recursive separator exists, no directory
changing will be inserted before the post commands. If there are no pre
commands, no directory cahnging will be happen before the recursive
separator content. Same goes to the post commands. If there are no post
commands, no directory changing commands will be inserted after the
recursive separator's content is executed.
TIP: You can always create a template Projectfile with the "(-i|--init)"
command.
'''.format(version=__version__)
return_path = ''
def path_setting_callback(path):
global return_path
return_path = path
def process_command(command_name, data):
command = data['commands'][command_name]
if 'alias' in command:
command = data['commands'][command['alias']]
if 'dependencies' in command:
for dep in command['dependencies']:
process_command(dep, data)
echoed_commands = []
for line in command['script']:
if '&&' in line:
line = line.split('&&')
line = [l.strip() for l in line]
else:
line = [line.strip()]
for l in line:
if l.startswith('echo'):
echoed_commands.append('printf "\033[1;32m> " && {0} && printf "\033[0m"'.format(l))
elif l.startswith('cd'):
p = l.split('cd')
p = p[1].strip()
echoed_commands.append('printf "\033[0;34m@ {0}\033[0m\n" && {1}'.format(p, l))
else:
echoed_commands.append('printf "\033[1;33m$ {0}\033[0m\n" && {0}'.format(l))
concatenated_commands = ' && '.join(echoed_commands)
execute_call(concatenated_commands)
def execute_call(command):
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() is not None:
break
sys.stdout.write(nextline)
sys.stdout.flush()
output, error = process.communicate()
exit_code = process.returncode
if exit_code != 0:
sys.stderr.write('\r\033[1;31m[ERROR {}]\033[0;31m Error during execution!\033[0m\n'.format(exit_code))
def execute(args, data, conf):
if args:
for command_name in args:
if command_name in data['commands']:
try:
process_command(command_name, data)
except (KeyboardInterrupt):
sigterm_handle(None, None)
else:
pass
else:
gui.show_project_details(data, conf['max-doc-width'])
def sigterm_handle(signal, frame):
sys.stderr.write('\r\r\033[1;31m[!]\033[0;31m User interrupt..\033[0m\n')
sys.exit(1)
def main(args):
signal.signal(signal.SIGTSTP, sigterm_handle)
try:
conf = config.get()
if not os.path.isdir(conf['projects-path']):
os.mkdir(conf['projects-path'])
print("Projects root was created: {}".format(conf['projects-path']))
print("You can put your projects here.")
with open(os.path.join(os.path.expanduser('~'), '.p-path'), 'w+') as f:
f.write(conf['projects-path'])
return
else:
if not os.listdir(conf['projects-path']):
print("Your projects directory is empty. Nothing to do..")
with open(os.path.join(os.path.expanduser('~'), '.p-path'), 'w+') as f:
f.write(conf['projects-path'])
return
args = args[2:]
if len(args) == 1:
if args[0] in ['-v', '--version']:
print(__version__)
return
elif args[0] in ['-i', '--init']:
if paths.inside_project(conf['projects-path']):
if os.path.isfile('Projectfile'):
print('You already have a Projectfile in this directory.. Nothing to do ;)')
else:
projectfile_content = projectfile.DEFAULT_PROJECTFILE.format(__version__)
with open('Projectfile', 'w+') as f:
f.write(projectfile_content)
print('Projectfile created. Use the "p" command to invoke the manual.')
else:
print('You are not inside any of your projects. Use the "p" command to navigate into one.')
return
elif args[0] in ['-h', '--help']:
pydoc.pager(help_text)
return
elif args[0] in ['-w', '--walk']:
if paths.inside_project(conf['projects-path']):
print(projectfile.get_walk_order(os.getcwd()))
else:
print('You are not inside any of your projects. Use the "p" command to navigate into one.')
return
elif args[0] in ['p']:
handle_project_selection(conf)
return
elif args[0] in ['-l', '--list']:
print('Command name missing after this option. Cannot list the command body..\np (-l|--list) <command>')
return
elif args[0] in ['-md', '--markdown']:
project_root = paths.get_project_root(conf['projects-path'], os.getcwd())
data = projectfile.get_data_for_root(project_root['path'])
data['name'] = project_root['name']
md_content = gui.generate_markdown(data)
with open(os.path.join(project_root['path'], 'README.md'), 'w+') as f:
f.write(md_content)
print("README.md file was generated into your project's root.")
return
if len(args) == 2:
if args[0] in ['-l', '--list']:
command = args[1]
project_root = paths.get_project_root(conf['projects-path'], os.getcwd())
data = projectfile.get_data_for_root(project_root['path'])
if command in data['commands']:
if 'alias' in data['commands'][command]:
command = data['commands'][command]['alias']
for line in data['commands'][command]['script']:
print(line)
else:
print('Invalid command: "{}"\nAvailable commands:'.format(command))
for c in data['commands']:
print(c)
return
elif args[0] in ['-md', '--markdown']:
name = args[1]
project_root = paths.get_project_root(conf['projects-path'], os.getcwd())
data = projectfile.get_data_for_root(project_root['path'])
data['name'] = project_root['name']
md_content = gui.generate_markdown(data)
with open(os.path.join(project_root['path'], name), 'w+') as f:
f.write(md_content)
print("A markdown file named \"{}\" was generated into your project's root.".format(name))
return
if paths.inside_project(conf['projects-path']):
handle_inside_project(args, conf)
else:
handle_project_selection(conf)
except projectfile.error.ProjectfileError as e:
error = e.args[0]
message = 'Projectfile error!\n{}'.format(error['error'])
if 'path' in error:
message = '{}\nPath: {}/Projectfile'.format(message, error['path'])
if 'line' in error:
message = '{}\nLine: {}'.format(message, error['line'])
print(colored(message, 'red'))
sys.exit(-1)
except config.ConfigError as e:
error = e.args[0]
message = 'Config error!\n{}'.format(error)
print(colored(message, 'red'))
sys.exit(-1)
def handle_project_selection(conf):
gui.select_project(
paths.list_dir_for_path(conf['projects-path']),
path_setting_callback
)
if return_path:
with open(os.path.join(os.path.expanduser('~'), '.p-path'), 'w+') as f:
f.write(os.path.join(os.path.expanduser(conf['projects-path']), return_path))
def handle_inside_project(args, conf):
project_root = paths.get_project_root(conf['projects-path'], os.getcwd())
data = projectfile.get_data_for_root(project_root['path'])
data['name'] = project_root['name']
execute(args, data, conf)
|
nilq/baby-python
|
python
|
"""Frigate API client."""
from __future__ import annotations
import asyncio
import logging
import socket
from typing import Any, Dict, List, cast
import aiohttp
import async_timeout
from yarl import URL
TIMEOUT = 10
_LOGGER: logging.Logger = logging.getLogger(__name__)
HEADERS = {"Content-type": "application/json; charset=UTF-8"}
# ==============================================================================
# Please do not add HomeAssistant specific imports/functionality to this module,
# so that this library can be optionally moved to a different repo at a later
# date.
# ==============================================================================
class FrigateApiClientError(Exception):
"""General FrigateApiClient error."""
class FrigateApiClient:
"""Frigate API client."""
def __init__(self, host: str, session: aiohttp.ClientSession) -> None:
"""Construct API Client."""
self._host = host
self._session = session
async def async_get_version(self) -> str:
"""Get data from the API."""
return cast(
str,
await self.api_wrapper(
"get", str(URL(self._host) / "api/version"), decode_json=False
),
)
async def async_get_stats(self) -> dict[str, Any]:
"""Get data from the API."""
return cast(
Dict[str, Any],
await self.api_wrapper("get", str(URL(self._host) / "api/stats")),
)
async def async_get_events(
self,
camera: str | None = None,
label: str | None = None,
zone: str | None = None,
after: int | None = None,
before: int | None = None,
limit: int | None = None,
has_clip: bool | None = None,
has_snapshot: bool | None = None,
) -> list[dict[str, Any]]:
"""Get data from the API."""
params = {
"camera": camera,
"label": label,
"zone": zone,
"after": after,
"before": before,
"limit": limit,
"has_clip": int(has_clip) if has_clip is not None else None,
"has_snapshot": int(has_snapshot) if has_snapshot is not None else None,
}
return cast(
List[Dict[str, Any]],
await self.api_wrapper(
"get",
str(
URL(self._host)
/ "api/events"
% {k: v for k, v in params.items() if v is not None}
),
),
)
async def async_get_event_summary(
self,
has_clip: bool | None = None,
has_snapshot: bool | None = None,
) -> list[dict[str, Any]]:
"""Get data from the API."""
params = {
"has_clip": int(has_clip) if has_clip is not None else None,
"has_snapshot": int(has_snapshot) if has_snapshot is not None else None,
}
return cast(
List[Dict[str, Any]],
await self.api_wrapper(
"get",
str(
URL(self._host)
/ "api/events/summary"
% {k: v for k, v in params.items() if v is not None}
),
),
)
async def async_get_config(self) -> dict[str, Any]:
"""Get data from the API."""
return cast(
Dict[str, Any],
await self.api_wrapper("get", str(URL(self._host) / "api/config")),
)
async def async_get_path(self, path: str) -> Any:
"""Get data from the API."""
return await self.api_wrapper("get", str(URL(self._host) / f"{path}/"))
async def api_wrapper(
self,
method: str,
url: str,
data: dict | None = None,
headers: dict | None = None,
decode_json: bool = True,
) -> Any:
"""Get information from the API."""
if data is None:
data = {}
if headers is None:
headers = {}
try:
async with async_timeout.timeout(TIMEOUT, loop=asyncio.get_event_loop()):
if method == "get":
response = await self._session.get(
url, headers=headers, raise_for_status=True
)
if decode_json:
return await response.json()
return await response.text()
if method == "put":
await self._session.put(url, headers=headers, json=data)
elif method == "patch":
await self._session.patch(url, headers=headers, json=data)
elif method == "post":
await self._session.post(url, headers=headers, json=data)
except asyncio.TimeoutError as exc:
_LOGGER.error(
"Timeout error fetching information from %s: %s",
url,
exc,
)
raise FrigateApiClientError from exc
except (KeyError, TypeError) as exc:
_LOGGER.error(
"Error parsing information from %s: %s",
url,
exc,
)
raise FrigateApiClientError from exc
except (aiohttp.ClientError, socket.gaierror) as exc:
_LOGGER.error(
"Error fetching information from %s: %s",
url,
exc,
)
raise FrigateApiClientError from exc
|
nilq/baby-python
|
python
|
import jieba
import jieba.posseg as pseg #词性标注
import jieba.analyse as anls #关键词提取
class Fenci:
def __init__(self):
pass
#全模式和精确模式
def cut(self,word,cut_all=True):
return jieba.cut(word, cut_all=True)
#搜索引擎模式
# def cut_for_search(self,word):
# return jieba.cut_for_search(word)
if __name__ == "__main__":
seg_list = Fenci().cut("你一点也不好看")
print("【cut】:" + "/ ".join(seg_list))
seg_list = Fenci().cut_for_search("你一点也不好看")
print("【cut for search】:" + "/ ".join(seg_list))
|
nilq/baby-python
|
python
|
'''
Unit tests for the environments.py module.
'''
import boto3
import json
import pytest
from mock import patch
from moto import ( mock_ec2,
mock_s3 )
from deployer.exceptions import ( EnvironmentExistsException,
InvalidCommandException)
import deployer.environments as env
import deployer.tests.MyBoto3 as MyBoto3
fake_boto3 = MyBoto3.MyBoto3()
def mock_run_cmd(args, cwd=None):
print("CWD: {}, Running command: {}".format(cwd, " ".join(args)))
return 0
def mock_inst_is_running(instance_id):
return True
@pytest.fixture
def mock_config(scope="function"):
return {
"terraform": "git@gitlab.org:group/project.git?branch=made_up_branch",
"aws_profile": "tests-random",
"aws_region": "us-east-1",
"availability_zones": [
'us-east-1b',
'us-east-1c',
'us-east-1d',
'us-east-1e'
],
"account_id": "123456789012",
"environment": {
"name": "myenvname",
"version": "a",
},
'tags': {
'system_type' : 'mock_product'
},
"env_name": "myenvname-a",
"tf_state": "myenvname-a.tfstate",
"tf_state_bucket": "123456789012-myproj-tfstate",
"project_config": "123456789012-myproj-data",
"project": 'myproj',
"tfvars" : '/tmp/test_tmp_dir/vars.tf',
"tf_root": '/tmp/test_tmp_dir/terraform',
"tmpdir" : '/tmp/test_tmp_dir'
}
@mock_ec2
def mock_vpcs(scope="function"):
ec2c = boto3.client('ec2',
region_name='us-east-1',
aws_access_key_id='',
aws_secret_access_key='',
aws_session_token='')
vpc1 = ec2c.create_vpc(CidrBlock='10.1.0.0/16').get('Vpc').get('VpcId')
vpc2 = ec2c.create_vpc(CidrBlock='10.2.0.0/16').get('Vpc').get('VpcId')
vpc3 = ec2c.create_vpc(CidrBlock='10.3.0.0/16').get('Vpc').get('VpcId')
ec2c.create_tags(Resources = [ vpc1 ],
Tags=[ {'Key':'Name',
'Value' : 'myproj-myenvname-a'},
{'Key':'env',
'Value' : 'myenvname-a'} ])
ec2c.create_tags(Resources = [ vpc2 ],
Tags=[ {'Key':'Name',
'Value' : 'myproj-myenvname-b'},
{'Key':'env',
'Value' : 'myenvname-b'} ])
ec2c.create_tags(Resources = [ vpc3 ],
Tags=[ {'Key':'Name',
'Value' : 'myproj-myenvname-c'},
{'Key':'env',
'Value' : 'myenvname-c'} ])
return ec2c
@mock_s3
@mock_ec2
def test_create_env_exists(mock_config):
expected_arn = [ "arn:aws:ec2:us-east-1:419934374614:instance/i-c3bef428" ]
expected_msg = "\n\nAn environment with the name {} already exists."
expected_msg += "\nPlease tear it down before trying to rebuild."
expected_msg += "\n\n{}".format(json.dumps(expected_arn, indent=4))
env_name = mock_config['env_name']
if 'tags' in mock_config and 'system_type' in mock_config['tags']:
env_name = "-".join([mock_config['tags']['system_type'], env_name ])
s3client = boto3.client('s3')
s3client.create_bucket(Bucket="123456789012-myproj-tfstate")
with pytest.raises(EnvironmentExistsException) as e:
ec2c = boto3.client('ec2')
vpc1 = ec2c.create_vpc(CidrBlock='10.1.0.0/16').get('Vpc').get('VpcId')
ec2c.create_tags(Resources = [ vpc1 ],
Tags=[ {'Key':'Name',
'Value' : 'myproj-myenvname-a'},
{'Key':'env',
'Value' : 'myenvname-a'},
{'Key' : 'system_type',
'Value' : 'mock_product'} ])
with patch('deployer.aws.instance_is_running', mock_inst_is_running):
with patch('deployer.utils.run_command', mock_run_cmd):
with patch('deployer.aws.boto3', fake_boto3):
env.create(mock_config)
from termcolor import colored
assert(e.value.args[0] == colored(expected_msg.format(env_name), 'red'))
return
@mock_s3
@mock_ec2
def test_create_env_does_not_exist(mock_config):
mock_config['environment']['name'] = 'myotherenvname'
mock_config['environment']['version'] = 'z'
s3client = boto3.client('s3')
s3client.create_bucket(Bucket="123456789012-myproj-tfstate")
with patch('deployer.utils.run_command', mock_run_cmd):
with patch('deployer.aws.boto3', fake_boto3):
assert env.create(mock_config)
return
def test_precheck_valid_keys(mock_config):
actions = [ 'create', 'destroy' ]
for action in actions:
with patch('deployer.utils.run_command', mock_run_cmd):
env._precheck(mock_config, action)
return
def test_precheck_invalid_key(mock_config):
with patch('deployer.utils.run_command', mock_run_cmd):
with pytest.raises(InvalidCommandException):
env._precheck(mock_config, 'invalid_command')
return
@mock_ec2
def test_list_deployed_environment_versions(mock_config):
mock_vpcs()
env_name = mock_config['environment']['name']
with patch('deployer.aws.boto3', fake_boto3):
existing_env_versions = env.list_deployed_environment_versions(env_name)
assert existing_env_versions == [ 'a', 'b', 'c' ]
return
@mock_ec2
def test_get_next_env_version(mock_config):
mock_vpcs()
env_name = mock_config['environment']['name']
expected = 'd'
with patch('deployer.aws.boto3', fake_boto3):
with patch('deployer.aws.instance_is_running', mock_inst_is_running):
next_version = env.get_next_version(env_name)
assert expected == next_version
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# coding: utf-8
# print の出力時に日本語でもエラーが出ないようにするおまじない
import sys
import io
sys.stdout = io.TextIOWrapper( sys.stdout.buffer, encoding='utf-8' )
# CGIとして実行した際のフォーム情報を取り出すライブラリ
import cgi
form_data = cgi.FieldStorage( keep_blank_values = True )
# MySQLデータベース接続用ライブラリ
import MySQLdb
con = None
cur = None
# トップ画面のHTMLを出力するメソッド
def print_html():
# html 開始
print( '<!DOCTYPE html>' )
print( '<html>' )
# head 出力
print( '<head>' )
print( '<meta charset="UTF-8">' )
print( '</head>' )
# body 開始
print( '<body>' )
print( '<p>ひとこと掲示板</p>' )
# 書き込みフォームを出力
print( '<form action="" method="POST">' )
print( '<input type="hidden" name="method_type" value="tweet">' )
print( '<input type="text" name="poster_name" value="" placeholder="なまえ">' )
print( '<br>' )
print( '<textarea name="body_text" value="" placeholder="本文"></textarea>' )
print( '<input type="submit" value="投稿">' )
print( '</form>' )
# 罫線を出力
print( '<hr>' )
# 書き込みの一覧を取得するSQL文を作成
sql = "select * from posts"
# SQLを実行
cur.execute( sql )
# 取得した書き込みの一覧の全レコードを取り出し
rows = cur.fetchall()
# 全レコードから1レコードずつ取り出すループ処理
for row in rows:
print( '<div class="meta">' )
print( '<span class="id">' + row[ 'id' ] + '</span>' )
print( '<span class="name">' + row[ 'name' ] + '</span>' )
print( '<span class="date">' + row[ 'created_at' ] + '</span>' )
print( '</div>' )
print( '<div class="message"><span>' + row[ 'body' ] + '</span></div>' )
# body 閉じ
print( '</body>' )
# html 閉じ
print( '</html>' )
# フォーム経由のアクセスを処理するメソッド
def proceed_methods():
# フォームの種類を取得(今のところ書き込みのみ)
method = form_data[ 'method_type' ].value
# tweet (書き込み) だったら
if( method == 'tweet' ):
# 名前を取り出し
poster_name = form_data[ 'poster_name' ].value
# 投稿内容を取り出し
body_text = form_data[ 'body_text' ].value
# 投稿をデータベースに書き込むSQL文を作成
sql = 'insert into posts ( name, body ) values ( %s, %s )'
# 取り出した名前と投稿内容をセットしてSQLを実行
cur.execute( sql, ( poster_name, body_text ) )
con.commit()
# 処理に成功したらトップ画面に自動遷移するページを出力
print( '<!DOCTYPE html>' )
print( '<html>' )
print( ' <head>' )
print( ' <meta http-equiv="refresh" content="5; url=./">' )
print( ' </head>' )
print( ' <body>' )
print( ' 処理が成功しました。5秒後に元のページに戻ります。' )
print( ' </body>' )
print( '</html>' )
# メイン処理を実行するメソッド
def main():
# CGIとして実行するためのおまじない
print( 'Content-Type: text/html; charset=utf-8' )
print( '' )
# ここでデータベースに接続しておく
global con, cur
try:
con = MySQLdb.connect(
host = 'xxx.xxx.xxx.xxx',
user = 'yourname',
passwd = 'yourpassword',
db = 'yourdbname',
use_unicode = True,
charset = 'utf8'
)
except MySQLdb.Error as e:
print( 'データベース接続に失敗しました。' )
print( e )
# データベースに接続できなかった場合は、ここで処理を終了
exit()
cur = con.cursor( MySQLdb.cursors.DictCursor )
# フォーム経由のアクセスかを判定
if( 'method_type' in form_data ):
# フォーム経由のアクセスである場合は、フォームの種類に従って処理を実行
proceed_methods()
else:
# フォーム経由のアクセスでない場合は、通常のトップ画面を表示
print_html()
# 一通りの処理が完了したら最後にデータベースを切断しておく
cur.close()
con.close()
# Pythonスクリプトとして実行された場合のみ実行
if __name__ == "__main__":
# main() を実行
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# coding: utf-8
import os
import sys
import re
import numpy as np
#==============================================================================#
def atomgroup_header(atomgroup):
"""
Return a string containing info about the AtomGroup
containing the total number of atoms,
the including residues and the number of residues.
Useful for writing output file headers.
"""
unq_res, n_unq_res = np.unique(
atomgroup.residues.resnames, return_counts=True)
return "{} atom(s): {}".format(
atomgroup.n_atoms, ", ".join(
"{} {}".format(*i) for i in np.vstack([n_unq_res, unq_res]).T))
def fill_template(template, vars, s = "<", e = ">"):
"""
Search and replace tool for filling template files.
Replaces text bounded by the delimiters `s` and `e`
with values found in the lookup dictionary `vars`.
"""
exp = s + "\w*" + e
matches = re.findall(exp, template)
for m in matches:
key = m[1:-1]
template = template.replace(m, str(vars.get(key, m)))
return template
def save_path(prefix = ""):
"""Returns a formatted output location for a given file prefix."""
if prefix != "" and prefix[-1] != "/":
prefix += "_"
output = prefix if os.path.dirname(prefix) else os.path.join(os.getcwd(), prefix)
if not os.path.exists(os.path.dirname(output)):
os.makedirs(os.path.dirname(prefix))
return output
#==============================================================================#
def nearest_power_two(n):
"""
Select the closest i such that n<=2**i.
"""
current_exp = int(np.ceil(np.log2(n+1)))
if n == 2**current_exp:
n_fft = n
if n < 2**current_exp:
n_fft = 2**current_exp
elif n > 2**current_exp:
n_fft = 2**(current_exp+1)
return n_fft
def zero_pad(x, n):
"""
Pad an array to length `n` with zeros.
If the original array length is greater than `n`,
a copy of the original array is returned with it's length unchanged.
"""
nx = len(x)
if n < nx:
n = nx
new = np.zeros((n, *x.shape[1:]), dtype = x.dtype)
new[:nx] = x
return new
def bin_data(arr, nbins, after = 1, log = True):
"""
Averages array values in bins for easier plotting.
"""
# Determine indices to average between
if log:
bins = np.logspace(np.log10(after), np.log10(len(arr)-1), nbins+1).astype(int)
else:
bins = np.linspace(after, len(arr), nbins+1).astype(int)
bins = np.unique(np.append(np.arange(after), bins))
avg = np.zeros(len(bins)-1, dtype = arr.dtype)
for i in range(len(bins)-1):
avg[i] = np.mean(arr[bins[i]:bins[i+1]])
return avg
|
nilq/baby-python
|
python
|
from pydantic import BaseModel
class ConsumerResponse(BaseModel):
topic: str
timestamp: str
product_name: str
product_id: int
success: bool
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `te_python` module."""
import pytest
import requests
from te_python import te_python
def test_te_python_initialization():
response = te_python.email_get_details('a53b7747d6bd3f59076d63469d92924e00f407ff472e5a539936453185ecca6c')
assert isinstance(response, dict)
def test_update_api_url():
# make a request to localhost (which should fail)... this makes sure that the base_api_url is being properly used
with pytest.raises(requests.ConnectionError):
te_python.email_get_details('a53b7747d6bd3f59076d63469d92924e00f407ff472e5a539936453185ecca6c')
|
nilq/baby-python
|
python
|
from django.db import models
class Person(models.Model):
first_name = models.CharField(max_length=64)
surname = models.CharField(max_length=64)
class Meta:
app_label = 'person'
db_table = 'person'
ordering = ('surname', 'first_name')
|
nilq/baby-python
|
python
|
from util.Tile import Tile
from util.Button import Button
from util.Maze import Maze
from algorithms.BFS import BFS
from algorithms.DFS import DFS
from algorithms.GFS import GFS
from algorithms.AStar import AStar
from math import floor
import pygame
class Grid:
def __init__(self, width, height, tile_w, colorPalette,
line_w = 1, menuOffset = 0, txtSize = 42, nSolutions = 4, fpsFast = 45, fpsSlow = 10):
self.WIDTH = width
self.HEIGHT = height
self.colorPalette = colorPalette
self.Y_OFFSET = menuOffset
self.LINE_W = line_w
self.nSolutions = nSolutions
self.fpsFast = fpsFast
self.fpsSlow = fpsSlow
self.TILE_W = tile_w # should be divisible by width and height so it all works out
self.RECT_OFF = floor(self.LINE_W / 2) # offset due to the line's width
# array of tiles
self.tiles = []
for h in range(0, self.HEIGHT, self.TILE_W):
row = []
for w in range(0, self.WIDTH, self.TILE_W):
# menu offset is included in the height
row.append(Tile(w, h + self.Y_OFFSET, self.TILE_W, self.RECT_OFF))
self.tiles.append(row)
# array of buttons
pygame.font.init()
bigFont = pygame.font.SysFont('Calibri', txtSize)
self.algButtons = [
Button(110, 50, "Depth FS", bigFont, self.colorPalette),
Button(315, 50, "Breadth FS", bigFont, self.colorPalette),
Button(530, 50, "Greedy FS", bigFont, self.colorPalette),
Button(740, 50, "A-Star", bigFont, self.colorPalette)
]
smallFont = pygame.font.SysFont('Calibri', floor(txtSize / 2))
self.otherButtons = {
"Maze" : Button(110, 150, "Generate Maze", smallFont, self.colorPalette),
"Clear" : Button(280, 150, "Clear", smallFont, self.colorPalette),
"Slow" : Button(725, 150, "Slow", smallFont, self.colorPalette),
"Fast" : Button(800, 150, "Fast", smallFont, self.colorPalette)
}
self.otherButtons["Slow"].highlightTrue()
self.FPS = self.fpsSlow
# origin and target tile -> for dragging them
self.originTile = self.tiles[0][0]
self.originTile.updateState("origin")
self.targetTile = self.tiles[-1][-1]
self.targetTile.updateState("target")
# maze generator
self.mazeGen = None
# for mouse dragging
self.leftBeingClicked = False
self.rightBeingClicked = False
self.originDragged = False
self.targetDragged = False
# algorithm selected stores the selected button representing the choice of algorithm
self.algorithmSelected = None
self.updateAlgorithm(self.algButtons[0])
# algorithm is the algorithm object in itself
self.algorithm = None
self.solved = False
def draw(self, screen):
self.drawGrid(screen)
self.drawTiles(screen)
self.drawButtons(screen)
def update(self, state):
# DRAW STATE
if state == "draw":
(x, y) = pygame.mouse.get_pos()
(xGrid, yGrid) = self.pixelsToGrid(x, y)
if y > self.Y_OFFSET:
clickedTile = self.tiles[yGrid][xGrid]
if self.leftBeingClicked and clickedTile != self.originTile and clickedTile != self.targetTile:
clickedTile.updateState("wall")
elif self.rightBeingClicked and clickedTile != self.originTile and clickedTile != self.targetTile:
clickedTile.updateState("tile")
elif self.originDragged and clickedTile != self.targetTile:
self.originTile.updateState("tile")
self.originTile = clickedTile
self.originTile.updateState("origin")
elif self.targetDragged and clickedTile != self.originTile:
self.targetTile.updateState("tile")
self.targetTile = clickedTile
self.targetTile.updateState("target")
# SOLVE STATE
elif state == "solve":
if self.algorithm.stepSearch() == 1:
self.solved = True
self.updateTilesState(self.algorithm.seen, "seen")
self.updateTilesState(self.algorithm.path, "path")
self.updateTilesState([self.algorithm.getCurrent()], "current")
def drawGrid(self, screen):
# + 1 so that the last lines are included
for w in range(0, self.WIDTH + 1, self.TILE_W):
pygame.draw.line(screen, self.colorPalette["DARKBLUE"], (w, self.Y_OFFSET), (w, self.HEIGHT + self.Y_OFFSET), self.LINE_W)
for h in range(self.Y_OFFSET, self.HEIGHT + self.Y_OFFSET + 1, self.TILE_W):
pygame.draw.line(screen, self.colorPalette["DARKBLUE"], (0, h), (self.WIDTH, h), self.LINE_W)
def drawTiles(self, screen):
for row in self.tiles:
for tile in row:
state = tile.getState()
color = self.colorPalette["GRAY"] # default is gray
# with python 3.10 a switch case statement would work
if state == "wall":
color = self.colorPalette["DARKBLUE"]
elif state == "seen":
color = self.colorPalette["BLUE"]
elif state == "path":
color = self.colorPalette["MINT"]
elif state == "current":
color = self.colorPalette["ORANGE"]
elif state == "origin":
color = self.colorPalette["GREEN"]
elif state == "target":
color = self.colorPalette["RED"]
pygame.draw.rect(screen, color, tile.getRect())
def drawButtons(self, screen):
for button in self.algButtons:
button.draw(screen)
for key in self.otherButtons:
self.otherButtons[key].draw(screen)
def clickDown(self, x, y, left, state): # update tiles according to a click down and and x,y coord of the mouse
# left argument is true if it was a left click, false if it was a right click
if (y < self.Y_OFFSET):
self.menuClick(x, y, state)
elif state == "draw":
if self.originTile.wasItClicked(x, y):
self.originDragged = True
elif self.targetTile.wasItClicked(x, y):
self.targetDragged = True
elif left:
self.leftBeingClicked = True
else:
self.rightBeingClicked = True
def menuClick(self, x, y, state):
if state == "draw":
for button in self.algButtons:
if button.clicked(x, y):
self.updateAlgorithm(button)
return
if state == "draw":
if self.otherButtons["Maze"].clicked(x, y): # generate a maze
self.mazeGen = Maze(
len(self.tiles[0]),
len(self.tiles),
self.pixelsToGrid(*self.originTile.getPosition()),
self.pixelsToGrid(*self.targetTile.getPosition())
)
newMap = self.mazeGen.createMaze(self.nSolutions)
self.changeToNewMap(newMap)
elif self.otherButtons["Clear"].clicked(x, y):
self.changeToNewMap() # leave empty to clear it
if self.otherButtons["Slow"].clicked(x, y):
self.FPS = self.fpsSlow
self.otherButtons["Slow"].highlightTrue()
self.otherButtons["Fast"].highlightFalse()
elif self.otherButtons["Fast"].clicked(x, y):
self.FPS = self.fpsFast
self.otherButtons["Fast"].highlightTrue()
self.otherButtons["Slow"].highlightFalse()
def changeToNewMap(self, newMap = None):
if newMap == None:
for h in range(len(self.tiles)):
for w in range(len(self.tiles[0])):
if self.tiles[h][w] != self.originTile and self.tiles[h][w] != self.targetTile:
self.tiles[h][w].updateState("tile")
else:
for h in range(len(newMap)):
for w in range(len(newMap[h])):
if self.tiles[h][w] != self.originTile and self.tiles[h][w] != self.targetTile:
if newMap[h][w]:
self.tiles[h][w].updateState("wall")
else:
self.tiles[h][w].updateState("tile")
def updateAlgorithm(self, newAlgorithm):
for button in self.algButtons:
button.highlightFalse()
newAlgorithm.highlightTrue()
self.algorithmSelected = newAlgorithm.text
def clickUp(self):
self.leftBeingClicked = False
self.rightBeingClicked = False
self.originDragged = False
self.targetDragged = False
def defineAlgorithm(self):
# the map is not solved
self.solved = False
# if this is not the first time running an algorithm we have to clean all non wall / tile tiles
self.removePathGrid()
originPos = self.pixelsToGrid(*self.originTile.getPosition())
targetPos = self.pixelsToGrid(*self.targetTile.getPosition())
if self.algorithmSelected == "Breadth FS":
self.algorithm = BFS(originPos, targetPos, self.getGrid())
elif self.algorithmSelected == "Depth FS":
self.algorithm = DFS(originPos, targetPos, self.getGrid())
elif self.algorithmSelected == "Greedy FS":
self.algorithm = GFS(originPos, targetPos, self.getGrid())
elif self.algorithmSelected == "A-Star":
self.algorithm = AStar(originPos, targetPos, self.getGrid())
def removePathGrid(self):
for row in self.tiles:
for tile in row:
tmp = tile.getState()
if tmp != "wall" and tmp != "origin" and tmp != "target":
tile.updateState("tile")
def getGrid(self):
grid = []
for row in self.tiles:
boolRow = []
for tile in row:
if tile.getState() == "wall":
boolRow.append(True)
else:
boolRow.append(False)
grid.append(boolRow)
return grid
def updateTilesState(self, coords, state):
for coord in coords:
(x, y) = coord
if self.tiles[y][x] != self.originTile and self.tiles[y][x] != self.targetTile:
self.tiles[y][x].updateState(state)
def pixelsToGrid(self, x, y):
return (floor(x / self.TILE_W), floor((y - self.Y_OFFSET) / self.TILE_W))
|
nilq/baby-python
|
python
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import operator
import os
import time
from google.api_core.exceptions import ResourceExhausted
from google.cloud import bigquery_datatransfer_v1
from google.protobuf.timestamp_pb2 import Timestamp
RETRY_DELAY = 10
class TimeoutError(Exception):
"""Raised when the BQ transfer jobs haven't all finished within the allotted time"""
pass
def main(
source_project_id: str,
source_bq_dataset: str,
target_project_id: str,
target_bq_dataset: str,
service_account: str,
timeout: int,
):
client = bigquery_datatransfer_v1.DataTransferServiceClient()
transfer_config_name = f"{source_project_id}-{source_bq_dataset}-copy"
existing_config = find_existing_config(
client, target_project_id, transfer_config_name
)
if not existing_config:
existing_config = create_transfer_config(
client,
source_project_id,
source_bq_dataset,
target_project_id,
target_bq_dataset,
transfer_config_name,
service_account,
)
trigger_config(client, existing_config)
wait_for_completion(client, existing_config, timeout)
def find_existing_config(
client: bigquery_datatransfer_v1.DataTransferServiceClient,
gcp_project: str,
transfer_config_name: str,
) -> bigquery_datatransfer_v1.types.TransferConfig:
all_transfer_configs = client.list_transfer_configs(
request=bigquery_datatransfer_v1.types.ListTransferConfigsRequest(
parent=f"projects/{gcp_project}"
)
)
return next(
(
config
for config in all_transfer_configs
if config.display_name == transfer_config_name
),
None,
)
def wait_for_completion(
client: bigquery_datatransfer_v1.DataTransferServiceClient,
running_config: bigquery_datatransfer_v1.types.TransferConfig,
timeout: int,
) -> None:
_start = int(time.time())
while True:
latest_runs = []
latest_runs.append(latest_transfer_run(client, running_config))
logging.info(f"States: {[str(run.state) for run in latest_runs]}")
# Mark as complete when all runs have succeeded
if all([str(run.state) == "TransferState.SUCCEEDED" for run in latest_runs]):
return
# Stop the process when it's longer than the allotted time
if int(time.time()) - _start > timeout:
raise TimeoutError
time.sleep(RETRY_DELAY)
def latest_transfer_run(
client: bigquery_datatransfer_v1.DataTransferServiceClient,
config: bigquery_datatransfer_v1.types.TransferConfig,
) -> bigquery_datatransfer_v1.types.TransferRun:
transfer_runs = client.list_transfer_runs(parent=config.name)
return max(transfer_runs, key=operator.attrgetter("run_time"))
def create_transfer_config(
client: bigquery_datatransfer_v1.DataTransferServiceClient,
source_project_id: str,
source_dataset_id: str,
target_project_id: str,
target_dataset_id: str,
display_name: str,
service_account: str,
) -> bigquery_datatransfer_v1.types.TransferConfig:
transfer_config = bigquery_datatransfer_v1.TransferConfig(
destination_dataset_id=target_dataset_id,
display_name=display_name,
data_source_id="cross_region_copy",
dataset_region="US",
params={
"overwrite_destination_table": True,
"source_project_id": source_project_id,
"source_dataset_id": source_dataset_id,
},
schedule_options=bigquery_datatransfer_v1.ScheduleOptions(
disable_auto_scheduling=True
),
)
request = bigquery_datatransfer_v1.types.CreateTransferConfigRequest(
parent=client.common_project_path(target_project_id),
transfer_config=transfer_config,
service_account_name=service_account,
)
return client.create_transfer_config(request=request)
def trigger_config(
client: bigquery_datatransfer_v1.DataTransferServiceClient,
config: bigquery_datatransfer_v1.types.TransferConfig,
) -> None:
now = time.time()
seconds = int(now)
nanos = int((now - seconds) * pow(10, 9))
try:
client.start_manual_transfer_runs(
request=bigquery_datatransfer_v1.types.StartManualTransferRunsRequest(
parent=config.name,
requested_run_time=Timestamp(seconds=seconds, nanos=nanos),
)
)
except ResourceExhausted:
logging.info(
f"Transfer job is currently running for config ({config.display_name}) {config.name}."
)
return
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
main(
source_project_id=os.environ["SOURCE_PROJECT_ID"],
source_bq_dataset=os.environ["SOURCE_BQ_DATASET"],
target_project_id=os.environ["TARGET_PROJECT_ID"],
target_bq_dataset=os.environ["TARGET_BQ_DATASET"],
service_account=os.environ["SERVICE_ACCOUNT"],
timeout=int(os.getenv("TIMEOUT", 1200)),
)
|
nilq/baby-python
|
python
|
from discord.ext import commands
from discord_bot.bot import Bot
class Admin(commands.Cog):
"""Admin commands that only bot owner can run"""
def __init__(self, bot: Bot):
self.bot = bot
@commands.command(name="shutdown", hidden=True)
@commands.is_owner()
async def shutdow(self, ctx: commands.Context):
"""Closes all connections and shuts down the bot"""
await ctx.send("Shutting down the bot...")
await self.bot.close()
@commands.group(name="extension", aliases=["ext"], hidden=True)
@commands.is_owner()
async def ext(self, ctx: commands.Context):
"""A command to load, reload, unload extensions."""
if ctx.invoked_subcommand is None:
await ctx.reply("This command requires a subcommand to be passed")
@ext.command(name="load", aliases=["l"])
async def load(self, ctx: commands.Context, arg: str):
"""A command to load extensions."""
try:
self.bot.load_extension(f"discord_bot.cogs.{arg}")
await ctx.reply(f"Successfully loaded extension {arg}")
except Exception as e:
await ctx.reply(f"Failed to load ext {arg}\n{e}")
@ext.command(name="unload", aliases=["u"])
async def unload(self, ctx: commands.Context, arg: str):
"""A command to unload extensions"""
try:
self.bot.unload_extension(f"discord_bot.cogs.{arg}")
await ctx.reply(f"Successfully unloaded extension {arg}")
except Exception as e:
await ctx.reply(f"Failed to unload ext {arg}\n{e}")
@ext.command(name="reload", aliases=["r"])
async def reload(self, ctx: commands.Context, arg: str):
"""A command to reload extensions."""
try:
self.bot.reload_extension(f"discord_bot.cogs.{arg}")
await ctx.reply(f"Successfully reloaded extension {arg}")
except Exception as e:
await ctx.reply(f"Failed to reload ext {arg}\n{e}")
def setup(bot: Bot):
bot.add_cog(Admin(bot))
|
nilq/baby-python
|
python
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceMetadata(Model):
"""Represents a Resource metadata.
:param kind: Possible values include: 'bearerAuthenticationConnection',
'sshKeyAuthenticationConnection', 'apiKeyAuthenticationConnection',
'basicAuthenticationConnection', 'firstPartyADConnection',
'amazonS3Connection', 'adlsGen2', 'd365Sales', 'd365Marketing',
'attachCds', 'ftp', 'facebookAds', 'amlWorkspace', 'mlStudioWebservice',
'adRoll', 'rollWorks', 'constantContact', 'campaignMonitor', 'http',
'dotDigital', 'mailchimp', 'linkedIn', 'googleAds', 'marketo',
'microsoftAds', 'omnisend', 'sendGrid', 'sendinblue', 'activeCampaign',
'autopilot', 'klaviyo', 'snapchat', 'powerBI', 'azureSql', 'synapse'
:type kind: str or ~dynamics.customerinsights.api.models.enum
:param resource_id: Gets the Id of the resource.
:type resource_id: str
:param operation_id: Gets the Id of the operation being performed on the
resource.
:type operation_id: str
:param name: Gets the Name of the resource.
:type name: str
:param description: Gets the Description of the resource.
:type description: str
:param key_vault_metadata_id: MetadataId for Linked KeyVaultMetadata
:type key_vault_metadata_id: str
:param mapped_secrets:
:type mapped_secrets:
~dynamics.customerinsights.api.models.MappedSecretMetadata
:param version: Version number of this object.
:type version: long
:param updated_by: UPN of the user who last updated this record.
:type updated_by: str
:param updated_utc: Time this object was last updated.
:type updated_utc: datetime
:param created_by: Email address of the user who created this record.
:type created_by: str
:param created_utc: Time this object was initially created.
:type created_utc: datetime
:param instance_id: Customer Insights instance id associated with this
object.
:type instance_id: str
"""
_attribute_map = {
'kind': {'key': 'kind', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'operation_id': {'key': 'operationId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'key_vault_metadata_id': {'key': 'keyVaultMetadataId', 'type': 'str'},
'mapped_secrets': {'key': 'mappedSecrets', 'type': 'MappedSecretMetadata'},
'version': {'key': 'version', 'type': 'long'},
'updated_by': {'key': 'updatedBy', 'type': 'str'},
'updated_utc': {'key': 'updatedUtc', 'type': 'iso-8601'},
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_utc': {'key': 'createdUtc', 'type': 'iso-8601'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
}
def __init__(self, *, kind=None, resource_id: str=None, operation_id: str=None, name: str=None, description: str=None, key_vault_metadata_id: str=None, mapped_secrets=None, version: int=None, updated_by: str=None, updated_utc=None, created_by: str=None, created_utc=None, instance_id: str=None, **kwargs) -> None:
super(ResourceMetadata, self).__init__(**kwargs)
self.kind = kind
self.resource_id = resource_id
self.operation_id = operation_id
self.name = name
self.description = description
self.key_vault_metadata_id = key_vault_metadata_id
self.mapped_secrets = mapped_secrets
self.version = version
self.updated_by = updated_by
self.updated_utc = updated_utc
self.created_by = created_by
self.created_utc = created_utc
self.instance_id = instance_id
|
nilq/baby-python
|
python
|
from ctypes import CDLL, sizeof, create_string_buffer
def test_hello_world(workspace):
workspace.src('greeting.c', r"""
#include <stdio.h>
void greet(char *somebody) {
printf("Hello, %s!\n", somebody);
}
""")
workspace.src('hello.py', r"""
import ctypes
lib = ctypes.CDLL('./greeting.so') # leading ./ is required
lib.greet(b'World')
""")
# -fPIC: Position Independent Code, -shared: shared object (so)
workspace.run('gcc -fPIC -shared -o greeting.so greeting.c')
r = workspace.run('python hello.py')
assert r.out == 'Hello, World!'
def test_mutable_buffer(workspace):
workspace.src('mylib.c', r"""\
#include <ctype.h>
void upper(char *chars, int len) {
for (int i = 0; i <= len; i++)
*(chars + i) = toupper(*(chars + i));
}
""")
workspace.run('gcc -fPIC -shared -o mylib.so mylib.c')
chars = b'abc123'
buffer = create_string_buffer(chars)
assert sizeof(buffer) == 7 # len(chars) + 1 (NUL-terminated)
assert buffer.raw == b'abc123\x00' # raw: memory block content
assert buffer.value == b'abc123' # value: as NUL-terminated string
lib = CDLL('./mylib.so')
lib.upper(buffer, len(chars))
assert buffer.value == b'ABC123' # changed in-place
assert chars == b'abc123' # unchanged
|
nilq/baby-python
|
python
|
from gui import GUI
program = GUI()
program.run()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# RoarCanvasCommandsEdit.py
# Copyright (c) 2018, 2019 Lucio Andrés Illanes Albornoz <lucio@lucioillanes.de>
#
from GuiFrame import GuiCommandDecorator, GuiCommandListDecorator, GuiSelectDecorator
import wx
class RoarCanvasCommandsEdit():
@GuiCommandDecorator("Hide assets window", "Hide assets window", ["toolHideAssetsWindow.png"], None, False)
def canvasAssetsWindowHide(self, event):
self.parentFrame.assetsWindow.Show(False)
self.parentFrame.menuItemsById[self.canvasAssetsWindowHide.attrDict["id"]].Enable(False)
self.parentFrame.menuItemsById[self.canvasAssetsWindowShow.attrDict["id"]].Enable(True)
toolBar = self.parentFrame.toolBarItemsById[self.canvasAssetsWindowHide.attrDict["id"]][0]
toolBar.EnableTool(self.canvasAssetsWindowHide.attrDict["id"], False)
toolBar.EnableTool(self.canvasAssetsWindowShow.attrDict["id"], True)
toolBar.Refresh()
@GuiCommandDecorator("Show assets window", "Show assets window", ["toolShowAssetsWindow.png"], None, False)
def canvasAssetsWindowShow(self, event):
self.parentFrame.assetsWindow.Show(True)
self.parentFrame.menuItemsById[self.canvasAssetsWindowHide.attrDict["id"]].Enable(True)
self.parentFrame.menuItemsById[self.canvasAssetsWindowShow.attrDict["id"]].Enable(False)
toolBar = self.parentFrame.toolBarItemsById[self.canvasAssetsWindowHide.attrDict["id"]][0]
toolBar.EnableTool(self.canvasAssetsWindowHide.attrDict["id"], True)
toolBar.EnableTool(self.canvasAssetsWindowShow.attrDict["id"], False)
toolBar.Refresh()
@GuiSelectDecorator(0, "Solid brush", "Solid brush", None, None, True)
def canvasBrush(self, f, idx):
def canvasBrush_(self, event):
pass
setattr(canvasBrush_, "attrDict", f.attrList[idx])
setattr(canvasBrush_, "isSelect", True)
return canvasBrush_
@GuiCommandListDecorator(0, "Decrease brush width", "Decrease brush width", ["toolDecrBrushW.png"], None, None)
@GuiCommandListDecorator(1, "Decrease brush height", "Decrease brush height", ["toolDecrBrushH.png"], None, None)
@GuiCommandListDecorator(2, "Decrease brush size", "Decrease brush size", ["toolDecrBrushHW.png"], [wx.ACCEL_CTRL, ord("-")], None)
@GuiCommandListDecorator(3, "Increase brush width", "Increase brush width", ["toolIncrBrushW.png"], None, None)
@GuiCommandListDecorator(4, "Increase brush height", "Increase brush height", ["toolIncrBrushH.png"], None, None)
@GuiCommandListDecorator(5, "Increase brush size", "Increase brush size", ["toolIncrBrushHW.png"], [wx.ACCEL_CTRL, ord("+")], None)
def canvasBrushSize(self, f, dimension, incrFlag):
def canvasBrushSize_(event):
if (dimension < 2) and not incrFlag:
if self.parentCanvas.brushSize[dimension] > 1:
self.parentCanvas.brushSize[dimension] -= 1
self.update(brushSize=self.parentCanvas.brushSize)
elif (dimension < 2) and incrFlag:
self.parentCanvas.brushSize[dimension] += 1
self.update(brushSize=self.parentCanvas.brushSize)
elif dimension == 2:
[self.canvasBrushSize(f, dimension_, incrFlag)(None) for dimension_ in [0, 1]]
viewRect = self.parentCanvas.GetViewStart()
eventDc = self.parentCanvas.backend.getDeviceContext(self.parentCanvas.GetClientSize(), self.parentCanvas, viewRect)
self.parentCanvas.applyTool(eventDc, True, None, None, None, self.parentCanvas.brushPos, *self.parentCanvas.lastMouseState, self.currentTool, viewRect, force=True)
setattr(canvasBrushSize_, "attrDict", f.attrList[dimension + (0 if not incrFlag else 3)])
return canvasBrushSize_
@GuiCommandListDecorator(0, "Decrease canvas height", "Decrease canvas height", ["toolDecrCanvasH.png"], [wx.ACCEL_CTRL, wx.WXK_UP], None)
@GuiCommandListDecorator(1, "Decrease canvas width", "Decrease canvas width", ["toolDecrCanvasW.png"], [wx.ACCEL_CTRL, wx.WXK_LEFT], None)
@GuiCommandListDecorator(2, "Decrease canvas size", "Decrease canvas size", ["toolDecrCanvasHW.png"], None, None)
@GuiCommandListDecorator(3, "Increase canvas height", "Increase canvas height", ["toolIncrCanvasH.png"], [wx.ACCEL_CTRL, wx.WXK_DOWN], None)
@GuiCommandListDecorator(4, "Increase canvas width", "Increase canvas width", ["toolIncrCanvasW.png"], [wx.ACCEL_CTRL, wx.WXK_RIGHT], None)
@GuiCommandListDecorator(5, "Increase canvas size", "Increase canvas size", ["toolIncrCanvasHW.png"], None, None)
def canvasCanvasSize(self, f, dimension, incrFlag):
def canvasCanvasSize_(event):
if (dimension < 2) and not incrFlag:
if dimension == 0:
if self.parentCanvas.canvas.size[1] > 1:
self.parentCanvas.resize([self.parentCanvas.canvas.size[0], self.parentCanvas.canvas.size[1] - 1])
elif dimension == 1:
if self.parentCanvas.canvas.size[0] > 1:
self.parentCanvas.resize([self.parentCanvas.canvas.size[0] - 1, self.parentCanvas.canvas.size[1]])
elif (dimension < 2) and incrFlag:
if dimension == 0:
self.parentCanvas.resize([self.parentCanvas.canvas.size[0], self.parentCanvas.canvas.size[1] + 1])
elif dimension == 1:
self.parentCanvas.resize([self.parentCanvas.canvas.size[0] + 1, self.parentCanvas.canvas.size[1]])
elif dimension == 2:
[self.canvasCanvasSize(f, dimension_, incrFlag)(None) for dimension_ in [0, 1]]
setattr(canvasCanvasSize_, "attrDict", f.attrList[dimension + (0 if not incrFlag else 3)])
return canvasCanvasSize_
@GuiSelectDecorator(0, "Colour #00", "Colour #00 (Bright White)", None, [wx.ACCEL_CTRL, ord("0")], False)
@GuiSelectDecorator(1, "Colour #01", "Colour #01 (Black)", None, [wx.ACCEL_CTRL, ord("1")], False)
@GuiSelectDecorator(2, "Colour #02", "Colour #02 (Blue)", None, [wx.ACCEL_CTRL, ord("2")], False)
@GuiSelectDecorator(3, "Colour #03", "Colour #03 (Green)", None, [wx.ACCEL_CTRL, ord("3")], False)
@GuiSelectDecorator(4, "Colour #04", "Colour #04 (Red)", None, [wx.ACCEL_CTRL, ord("4")], False)
@GuiSelectDecorator(5, "Colour #05", "Colour #05 (Light Red)", None, [wx.ACCEL_CTRL, ord("5")], False)
@GuiSelectDecorator(6, "Colour #06", "Colour #06 (Pink)", None, [wx.ACCEL_CTRL, ord("6")], False)
@GuiSelectDecorator(7, "Colour #07", "Colour #07 (Yellow)", None, [wx.ACCEL_CTRL, ord("7")], False)
@GuiSelectDecorator(8, "Colour #08", "Colour #08 (Light Yellow)", None, [wx.ACCEL_CTRL, ord("8")], False)
@GuiSelectDecorator(9, "Colour #09", "Colour #09 (Light Green)", None, [wx.ACCEL_CTRL, ord("9")], False)
@GuiSelectDecorator(10, "Colour #10", "Colour #10 (Cyan)", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("0")], False)
@GuiSelectDecorator(11, "Colour #11", "Colour #11 (Light Cyan)", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("1")], False)
@GuiSelectDecorator(12, "Colour #12", "Colour #12 (Light Blue)", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("2")], False)
@GuiSelectDecorator(13, "Colour #13", "Colour #13 (Light Pink)", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("3")], False)
@GuiSelectDecorator(14, "Colour #14", "Colour #14 (Grey)", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("4")], False)
@GuiSelectDecorator(15, "Colour #15", "Colour #15 (Light Grey)", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("5")], False)
def canvasColour(self, f, idx):
def canvasColour_(event):
if event.GetEventType() == wx.wxEVT_TOOL:
self.parentCanvas.brushColours[0] = idx
elif event.GetEventType() == wx.wxEVT_TOOL_RCLICKED:
self.parentCanvas.brushColours[1] = idx
self.update(colours=self.parentCanvas.brushColours)
viewRect = self.parentCanvas.GetViewStart()
eventDc = self.parentCanvas.backend.getDeviceContext(self.parentCanvas.GetClientSize(), self.parentCanvas, viewRect)
self.parentCanvas.applyTool(eventDc, True, None, None, None, self.parentCanvas.brushPos, *self.parentCanvas.lastMouseState, self.currentTool, viewRect, force=True)
setattr(canvasColour_, "attrDict", f.attrList[idx])
setattr(canvasColour_, "isSelect", True)
return canvasColour_
@GuiSelectDecorator(0, "Transparent colour", "Transparent colour", None, [wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord("6")], False)
def canvasColourAlpha(self, f, idx):
def canvasColourAlpha_(event):
if event.GetEventType() == wx.wxEVT_TOOL:
self.parentCanvas.brushColours[0] = -1
elif event.GetEventType() == wx.wxEVT_TOOL_RCLICKED:
self.parentCanvas.brushColours[1] = -1
self.update(colours=self.parentCanvas.brushColours)
viewRect = self.parentCanvas.GetViewStart()
eventDc = self.parentCanvas.backend.getDeviceContext(self.parentCanvas.GetClientSize(), self.parentCanvas, viewRect)
self.parentCanvas.applyTool(eventDc, True, None, None, None, self.parentCanvas.brushPos, *self.parentCanvas.lastMouseState, self.currentTool, viewRect, force=True)
setattr(canvasColourAlpha_, "attrDict", f.attrList[idx])
setattr(canvasColourAlpha_, "isSelect", True)
return canvasColourAlpha_
@GuiSelectDecorator(0, "Transparent colour", "Transparent colour", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("6")], False)
def canvasColourAlphaBackground(self, f, idx):
def canvasColourAlphaBackground_(event):
self.parentCanvas.brushColours[1] = -1
self.update(colours=self.parentCanvas.brushColours)
viewRect = self.parentCanvas.GetViewStart()
eventDc = self.parentCanvas.backend.getDeviceContext(self.parentCanvas.GetClientSize(), self.parentCanvas, viewRect)
self.parentCanvas.applyTool(eventDc, True, None, None, None, self.parentCanvas.brushPos, *self.parentCanvas.lastMouseState, self.currentTool, viewRect, force=True)
setattr(canvasColourAlphaBackground_, "attrDict", f.attrList[idx])
setattr(canvasColourAlphaBackground_, "isSelect", True)
return canvasColourAlphaBackground_
@GuiSelectDecorator(0, "Colour #00", "Colour #00 (Bright White)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("0")], False)
@GuiSelectDecorator(1, "Colour #01", "Colour #01 (Black)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("1")], False)
@GuiSelectDecorator(2, "Colour #02", "Colour #02 (Blue)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("2")], False)
@GuiSelectDecorator(3, "Colour #03", "Colour #03 (Green)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("3")], False)
@GuiSelectDecorator(4, "Colour #04", "Colour #04 (Red)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("4")], False)
@GuiSelectDecorator(5, "Colour #05", "Colour #05 (Light Red)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("5")], False)
@GuiSelectDecorator(6, "Colour #06", "Colour #06 (Pink)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("6")], False)
@GuiSelectDecorator(7, "Colour #07", "Colour #07 (Yellow)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("7")], False)
@GuiSelectDecorator(8, "Colour #08", "Colour #08 (Light Yellow)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("8")], False)
@GuiSelectDecorator(9, "Colour #09", "Colour #09 (Light Green)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT, ord("9")], False)
@GuiSelectDecorator(10, "Colour #10", "Colour #10 (Cyan)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("0")], False)
@GuiSelectDecorator(11, "Colour #11", "Colour #11 (Light Cyan)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("1")], False)
@GuiSelectDecorator(12, "Colour #12", "Colour #12 (Light Blue)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("2")], False)
@GuiSelectDecorator(13, "Colour #13", "Colour #13 (Light Pink)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("3")], False)
@GuiSelectDecorator(14, "Colour #14", "Colour #14 (Grey)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("4")], False)
@GuiSelectDecorator(15, "Colour #15", "Colour #15 (Light Grey)", None, [wx.ACCEL_CTRL | wx.ACCEL_ALT | wx.ACCEL_SHIFT, ord("5")], False)
def canvasColourBackground(self, f, idx):
def canvasColourBackground_(event):
self.parentCanvas.brushColours[1] = idx
self.update(colours=self.parentCanvas.brushColours)
viewRect = self.parentCanvas.GetViewStart()
eventDc = self.parentCanvas.backend.getDeviceContext(self.parentCanvas.GetClientSize(), self.parentCanvas, viewRect)
self.parentCanvas.applyTool(eventDc, True, None, None, None, self.parentCanvas.brushPos, *self.parentCanvas.lastMouseState, self.currentTool, viewRect, force=True)
setattr(canvasColourBackground_, "attrDict", f.attrList[idx])
setattr(canvasColourBackground_, "isSelect", True)
return canvasColourBackground_
@GuiCommandDecorator("Flip colours", "Flip colours", ["toolColoursFlip.png"], [wx.ACCEL_CTRL, ord("I")], True)
def canvasColoursFlip(self, event):
self.parentCanvas.brushColours = [self.parentCanvas.brushColours[1], self.parentCanvas.brushColours[0]]
self.update(colours=self.parentCanvas.brushColours)
viewRect = self.parentCanvas.GetViewStart()
eventDc = self.parentCanvas.backend.getDeviceContext(self.parentCanvas.GetClientSize(), self.parentCanvas, viewRect)
self.parentCanvas.applyTool(eventDc, True, None, None, None, self.parentCanvas.brushPos, *self.parentCanvas.lastMouseState, self.currentTool, viewRect, force=True)
@GuiCommandDecorator("Copy", "&Copy", ["", wx.ART_COPY], None, False)
def canvasCopy(self, event):
pass
@GuiCommandDecorator("Cut", "Cu&t", ["", wx.ART_CUT], None, False)
def canvasCut(self, event):
pass
@GuiCommandDecorator("Delete", "De&lete", ["", wx.ART_DELETE], None, False)
def canvasDelete(self, event):
pass
@GuiCommandDecorator("Paste", "&Paste", ["", wx.ART_PASTE], None, False)
def canvasPaste(self, event):
pass
@GuiCommandDecorator("Redo", "&Redo", ["", wx.ART_REDO], [wx.ACCEL_CTRL, ord("Y")], False)
def canvasRedo(self, event):
self.parentCanvas.undo(redo=True); self.update(size=self.parentCanvas.canvas.size, undoLevel=self.parentCanvas.canvas.patchesUndoLevel);
@GuiCommandDecorator("Undo", "&Undo", ["", wx.ART_UNDO], [wx.ACCEL_CTRL, ord("Z")], False)
def canvasUndo(self, event):
self.parentCanvas.undo(); self.update(size=self.parentCanvas.canvas.size, undoLevel=self.parentCanvas.canvas.patchesUndoLevel);
# vim:expandtab foldmethod=marker sw=4 ts=4 tw=0
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
"""
Module installation file
"""
from setuptools import Extension
from setuptools import setup
extension = Extension(
name='fipv',
include_dirs=['include'],
sources=['fipv/fipv.c'],
extra_compile_args=['-O3'],
)
setup(ext_modules=[extension])
|
nilq/baby-python
|
python
|
import os
import time
import hashlib
import gzip
import shutil
from subprocess import run
from itertools import chain
CACHE_DIRECTORY = "downloads_cache"
def decompress_gzip_file(filepath):
decompressed = filepath + ".decompressed"
if not os.path.exists(decompressed):
with gzip.open(filepath, 'rb') as f_in:
with open(decompressed, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
return decompressed
def download_if_modified(url):
"""Download a file only if is has been modified via curl, see https://superuser.com/a/1159510"""
url_hash = hashlib.md5(url.encode()).hexdigest()
curr_dir = os.path.dirname(os.path.realpath(__file__))
filename = f'{curr_dir}/{CACHE_DIRECTORY}/{url_hash}'
print(f'Download {url} if it has been modified, destination is {filename}')
# If file exists and was modified today do not check for update
check_for_update = True
if os.path.exists(filename):
file_stat = os.stat(filename)
file_age_seconds = (time.time() - file_stat.st_mtime)
if file_age_seconds < 60 * 60 * 24:
check_for_update = False
print('File on disk is less than a day old, do not check for update.')
if check_for_update:
run(chain(
('curl', '-s', url),
('-o', filename),
('-z', filename) if os.path.exists(filename) else (),
))
filepath = os.path.abspath(filename)
# Auto decompress gzip files
if url.endswith('.gz'):
return decompress_gzip_file(filepath)
return filepath
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.