id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11550828
|
import discord
from .base import BaseRule
MAX_WORDS_KEY = "max_words"
class MaxWordsRule(BaseRule):
def __init__(
self, config,
):
super().__init__(config)
self.name = "MaxWordsRule"
async def get_max_words_length(
self, guild: discord.Guild,
):
"""Method to get the max words allowed / set"""
try:
return await self.config.guild(guild).get_raw(self.rule_name, MAX_WORDS_KEY)
except KeyError:
return None
async def set_max_words_length(self, guild: discord.Guild, max_length: int):
"""Set the max words length into config - this overrides :)"""
await self.config.guild(guild).set_raw(self.rule_name, MAX_WORDS_KEY, value=max_length)
@staticmethod
async def message_is_max_length(message_content: str, max_length) -> bool:
"""
Check if message word length is greater than threshold
Parameters
----------
message_content
The message content to test
max_length
The upper amount of messages allowed
"""
message_content = message_content.split()
return len(message_content) >= max_length
async def is_offensive(self, message: discord.Message):
guild = message.guild
max_length = await self.get_max_words_length(guild)
if not max_length:
return False
return await self.message_is_max_length(message.content, max_length)
|
11550884
|
from .trainer import TOCTrainer
from .default_args import ARGS_TOC_TRAIN, ARGS_TOC_EVAl, ARGS_TOC_TEST
name = "happytransformer.toc"
|
11550904
|
import base64
import json
import os
from typing import Dict, TypeVar
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import Encoding
import const
import rogue_ca
T = TypeVar('T')
C = TypeVar('C')
def modify_jws_and_forge_signature(raw_jws: bytes, payload_modify_function=None) -> bytes:
"""Take in a JWS in raw form (concatenated URL-safe base64), modify its payload
using payload_modify_function, and then forge a signature with a new CA that will be attached
to the cert chain.
Arguments:
raw_jws: string -- the raw JWS value (from SafetyNet or similar)
payload_modify_function (optional): function -- will be applied to the payload
before forging signature
"""
# Step 1: parse out the header, payload, signature
header, payload, signature = decode_jws_parts(raw_jws)
_header_parsed = parse_jws_header(header)
# Step 2a: create a new rogue CA and add it to the x5c chain
_rogue_ca_pub, rogue_ca_priv, rogue_ca_cert = rogue_ca.generate_ca('chain-of-fools rogue CA')
# Step 2b: create a leaf cert issued by the CA
leaf_cert, leaf_cert_priv = rogue_ca.generate_leaf_cert('attest.android.com', rogue_ca_cert, rogue_ca_priv)
# Step 3: transform the payload
if payload_modify_function:
payload = payload_modify_function(payload)
# Step 4: package it back up
b64_header, b64_payload, b64_signature = rogue_sign_jws(
leaf_cert_priv, rogue_ca_cert, leaf_cert, header, payload, signature)
return b'.'.join([b64_header, b64_payload, b64_signature])
def _fix_base64_padding(data: bytes) -> bytes:
"""Extend the base64 padding until it's correct. This is needed for some
forms of URL-safe base64 which do not include padding.
"""
missing_padding = len(data) % 4
if missing_padding:
data += b'=' * (4 - missing_padding)
return data
def _urlsafe_b64encode_without_padding(data: bytes) -> bytes:
"""urlsafe_b64encode but remove the padding.
"""
return base64.urlsafe_b64encode(data).rstrip(b'=')
def decode_jws_parts(raw_jws: bytes) -> (bytes, bytes, bytes):
"""Take a raw JWS string and base64 decode it, returning the header, payload, and signature.
"""
jws_parts = raw_jws.split(b".")
if len(jws_parts) != 3:
raise Exception("JWS input does not appear to be valid JWS")
header_b64, payload_b64, signature_b64 = map(_fix_base64_padding, jws_parts)
header, payload, signature = map(base64.urlsafe_b64decode, (header_b64, payload_b64, signature_b64))
return header, payload, signature
def parse_jws_header(header: bytes) -> Dict[str, T]:
return json.loads(header)
# ================
def set_safetynet_passing(payload: bytes) -> bytes:
"""Modify the SafetyNet JSON payload to set integrity / profile match to True.
"""
decoded_payload = json.loads(payload)
decoded_payload['basicIntegrity'] = True
decoded_payload['ctsProfileMatch'] = True
return json.dumps(decoded_payload).encode()
def rogue_sign_jws(leaf_cert_priv: T, rogue_ca_cert: C, leaf_cert: C, header: bytes, payload: bytes, signature: bytes) -> bytes:
'''param rogue_ca: A (non URL-safe) base64 encoded DER certificate.
'''
encoded_rogue_ca_cert = base64.b64encode(rogue_ca_cert.public_bytes(Encoding.DER)).decode('utf-8')
encoded_leaf_cert = base64.b64encode(leaf_cert.public_bytes(Encoding.DER)).decode('utf-8')
h = json.loads(header)
h['x5c'] = [encoded_leaf_cert, encoded_rogue_ca_cert] + h['x5c']
header = bytes(json.dumps(h), 'utf-8')
b64_header = _urlsafe_b64encode_without_padding(header)
b64_payload = _urlsafe_b64encode_without_padding(payload)
signature = leaf_cert_priv.sign(
b64_header + b'.' + b64_payload,
padding.PKCS1v15(),
hashes.SHA256()
)
return b64_header, b64_payload, _urlsafe_b64encode_without_padding(signature)
if __name__ == '__main__':
ret = modify_jws_and_forge_signature(const.RAW_JWS, set_safetynet_passing)
print('# Encoded Modified JWS')
print(ret.decode('utf-8'))
|
11550926
|
from gen import Tree, gentree
from operator import lt, gt
from sys import stdout, maxint
minint = -maxint - 1
class DrawTree:
def __init__(self, tree, depth=-1):
self.x = -1
self.y = depth
self.tree = tree
self.children = []
self.thread = None
self.mod = 0
def left(self):
return self.thread or len(self.children) and self.children[0]
def right(self):
return self.thread or len(self.children) and self.children[-1]
#traverse to the bottom of the tree, and place the leaves at an arbitrary
# x coordinate
#if the node is a parent, draw its subtrees, then shift the right one as close
# to the left as possible
#place the parent in the middle of the two trees.
def layout(tree):
dt = reingold_tilford(tree)
return addmods(dt)
def addmods(tree, mod=0):
tree.x += mod
for c in tree.children:
addmods(c, mod+tree.mod)
return tree
def reingold_tilford(tree, depth=0):
dt = DrawTree(tree, depth)
if len(tree) == 0:
dt.x = 0
return dt
if len(tree) == 1:
dt.children = [reingold_tilford(tree[0], depth+1)]
dt.x = dt.children[0].x
return dt
left = reingold_tilford(tree[0], depth+1)
right = reingold_tilford(tree[1], depth+1)
dt.children = [left, right]
dt.x = fix_subtrees(left, right)
return dt
#place the right subtree as close to the left subtree as possible
def fix_subtrees(left, right):
li, ri, diff, loffset, roffset, lo, ro \
= contour(left, right)
diff += 1
diff += (right.x + diff + left.x) % 2 #stick to the integers
right.mod = diff
right.x += diff
if right.children:
roffset += diff
#right was deeper than left
if ri and not li:
lo.thread = ri
lo.mod = roffset - loffset
#left was deeper than right
elif li and not ri:
ro.thread = li
ro.mod = loffset - roffset
return (left.x + right.x) / 2
def contour(left,
right,
max_offset=None,
loffset=0,
roffset=0,
left_outer=None,
right_outer=None):
if not max_offset \
or left.x + loffset - (right.x + roffset) > max_offset:
max_offset = left.x + loffset - (right.x + roffset)
if not left_outer:
left_outer = left
if not right_outer:
right_outer = right
lo = left_outer.left()
li = left.right()
ri = right.left()
ro = right_outer.right()
if li and ri:
loffset += left.mod
roffset += right.mod
return contour(li, ri, max_offset, loffset, roffset, lo, ro)
return li, ri, max_offset, loffset, roffset, left_outer, right_outer
#given an array of nodes, print them out reasonably on one line
def printrow(level):
x = dict((t.x, t.tree) for t in level)
for i in range(max(x.keys())+1):
try: stdout.write(str(x[i])[:4])
except: stdout.write(" ")
def p(tree):
level = [tree]
while 1:
newlevel = []
printrow(level)
for t in level:
newlevel.extend(t.children[:2])
print
if not newlevel: break
level = newlevel
if __name__ == "__main__":
def mirror(t):
if len(t.children) > 1:
t.children = (t.children[1], t.children[0])
for c in t.children:
mirror(c)
return t
from demo_trees import trees
layout(mirror(trees[10]))
#root = gentree("/Users/llimllib/Movies")
#root.children.reverse()
#drawtree = reingold_tilford(root)
#p(drawtree)
|
11550962
|
from lego.apps.users.constants import GROUP_COMMITTEE, GROUP_GRADE
from lego.apps.users.models import AbakusGroup
from lego.utils.functions import insert_abakus_groups
# isort:skip
"""
The structure of the tree is key and a list of two dicts.
The first dict is the parameters of the current group
and the second dict are the children of the current group.
E.g. Abakus: [
{
description: 'ABAKUSGRUPPE',
permissions: ['/sudo/...']
...
},
{
'Webkom': [{
description: 'WEBKOMGRUPPE',
permissions: ['/sudo/']
...
}, {}]
}
]
"""
initial_tree = {
"Users": [{"description": "Brukere på Abakus.no"}, {}],
"Abakus": [
{
"description": "Medlemmer av Abakus",
"permissions": [
"/sudo/admin/meetings/create",
"/sudo/admin/meetinginvitations/create",
"/sudo/admin/registrations/create/",
"/sudo/admin/events/payment/",
"/sudo/admin/comments/create",
],
},
{
"Abakom": [
{
"description": "Medlemmer av Abakom",
"permissions": [
"/sudo/admin/events/",
"/sudo/admin/pools/",
"/sudo/admin/registrations/",
"/sudo/admin/companies/",
"/sudo/admin/joblistings/",
],
},
{
"Arrkom": [
{"type": GROUP_COMMITTEE, "logo_id": "abakus_arrkom.png"},
{},
],
"backup": [
{"type": GROUP_COMMITTEE, "logo_id": "abakus_backup.png"},
{},
],
"Bedkom": [
{
"type": GROUP_COMMITTEE,
"logo_id": "abakus_bedkom.png",
"permissions": [
"/sudo/admin/companyinterest/",
"/sudo/admin/surveys/",
"/sudo/admin/submissions/",
],
},
{},
],
"Fagkom": [
{
"type": GROUP_COMMITTEE,
"logo_id": "abakus_fagkom.png",
"permissions": [
"/sudo/admin/companyinterest/",
"/sudo/admin/surveys/",
"/sudo/admin/submissions/",
],
},
{},
],
"Koskom": [
{"type": GROUP_COMMITTEE, "logo_id": "abakus_koskom.png"},
{},
],
"LaBamba": [
{"type": GROUP_COMMITTEE, "logo_id": "abakus_labamba.png"},
{},
],
"PR": [{"type": GROUP_COMMITTEE, "logo_id": "abakus_pr.png"}, {}],
"readme": [
{"type": GROUP_COMMITTEE, "logo_id": "abakus_readme.png"},
{},
],
"Webkom": [
{
"type": GROUP_COMMITTEE,
"logo_id": "abakus_webkom.png",
"permissions": ["/sudo/"],
"text": "hei",
},
{},
],
"Hovedstyret": [
{
"logo_id": "abakus_hs.png",
"permissions": ["/sudo/admin/"],
"contact_email": "<EMAIL>",
},
{},
],
},
],
"Interessegrupper": [
{"description": "Super-gruppe for alle interessegrupper i Abakus"},
{},
],
},
],
"Students": [
{},
{
"Datateknologi": [
{},
{
"1. klasse Datateknologi": [{"type": GROUP_GRADE}, {}],
"2. klasse Datateknologi": [{"type": GROUP_GRADE}, {}],
"3. klasse Datateknologi": [{"type": GROUP_GRADE}, {}],
"4. klasse Datateknologi": [{"type": GROUP_GRADE}, {}],
"5. klasse Datateknologi": [{"type": GROUP_GRADE}, {}],
},
],
"Kommunikasjonsteknologi": [
{},
{
"1. klasse Kommunikasjonsteknologi": [{"type": GROUP_GRADE}, {}],
"2. klasse Kommunikasjonsteknologi": [{"type": GROUP_GRADE}, {}],
"3. klasse Kommunikasjonsteknologi": [{"type": GROUP_GRADE}, {}],
"4. klasse Kommunikasjonsteknologi": [{"type": GROUP_GRADE}, {}],
"5. klasse Kommunikasjonsteknologi": [{"type": GROUP_GRADE}, {}],
},
],
},
],
}
def load_abakus_groups():
insert_abakus_groups(initial_tree)
AbakusGroup.objects.rebuild()
|
11550996
|
import os
import numpy as np
from nobos_commons.data_structures.constants.dataset_part import DatasetPart
from nobos_commons.data_structures.dimension import ImageSize
from nobos_commons.utils.file_helper import get_create_path
from nobos_torch_lib.datasets.action_recognition_datasets.ehpi_dataset import NormalizeEhpi, \
RemoveJointsOutsideImgEhpi
from torch.utils.data import DataLoader, ConcatDataset
from torchvision.transforms import transforms
from ehpi_action_recognition.config import data_dir, models_dir, ehpi_dataset_path
from ehpi_action_recognition.tester_ehpi import TesterEhpi
from ehpi_action_recognition.paper_reproduction_code.datasets.ehpi_lstm_dataset import EhpiLSTMDataset
from ehpi_action_recognition.paper_reproduction_code.models.ehpi_lstm import EhpiLSTM
def get_test_set_lab(dataset_path: str, image_size: ImageSize):
num_joints = 15
datasets = [
EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_03_TEST_VUE01_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_03_TEST_VUE02_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
]
for dataset in datasets:
dataset.print_label_statistics()
return ConcatDataset(datasets)
def get_test_set_office(dataset_path: str, image_size: ImageSize):
num_joints = 15
dataset = EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_04_TEST_EVAL2_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
# ScaleEhpi(image_size),
# TranslateEhpi(image_size),
# FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST)
dataset.print_label_statistics()
return dataset
if __name__ == '__main__':
model_names = [
"ehpi_journal_2019_03_gt_seed_0_cp0200",
"ehpi_journal_2019_03_gt_seed_104_cp0200",
"ehpi_journal_2019_03_gt_seed_123_cp0200",
"ehpi_journal_2019_03_gt_seed_142_cp0200",
"ehpi_journal_2019_03_gt_seed_200_cp0200",
#
"ehpi_journal_2019_03_pose_seed_0_cp0200",
"ehpi_journal_2019_03_pose_seed_104_cp0200",
"ehpi_journal_2019_03_pose_seed_123_cp0200",
"ehpi_journal_2019_03_pose_seed_142_cp0200",
"ehpi_journal_2019_03_pose_seed_200_cp0200",
#
"ehpi_journal_2019_03_both_seed_0_cp0200",
"ehpi_journal_2019_03_both_seed_104_cp0200",
"ehpi_journal_2019_03_both_seed_123_cp0200",
"ehpi_journal_2019_03_both_seed_142_cp0200",
"ehpi_journal_2019_03_both_seed_200_cp0200",
]
# Test set
test_set = get_test_set_lab(ehpi_dataset_path, ImageSize(1280, 720))
result_path = get_create_path(os.path.join(data_dir, "results", "its_journal_experiment_results", "lab"))
# test_set = get_test_set_office(ImageSize(1280, 720))
# result_path = get_create_path(os.path.join(data_dir, "results", "its_journal_experiment_results", "office"))
test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
for model_name in model_names:
print("Model name: {}".format(model_name))
weights_path = os.path.join(models_dir, "{}.pth".format(model_name))
tester = TesterEhpi()
ehpi_results, seq_results = tester.test(test_loader, weights_path, model=EhpiLSTM(15, 5))
ehpi_results_np = np.array(ehpi_results, dtype=np.uint32)
seq_results_np = np.array(seq_results, dtype=np.uint32)
np.save(os.path.join(result_path, "{}_ehpis".format(model_name)), ehpi_results_np)
np.save(os.path.join(result_path, "{}_seqs".format(model_name)), seq_results_np)
|
11551065
|
from collections import Counter
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
from .mapview import MapView
from sompy.visualization.plot_tools import plot_hex_map
class BmuHitsView(MapView):
def _set_labels(self, cents, ax, labels, onlyzeros, fontsize, hex=False):
for i, txt in enumerate(labels):
if onlyzeros == True:
if txt > 0:
txt = ""
c = cents[i] if hex else (cents[i, 1] + 0.5, cents[-(i + 1), 0] + 0.5)
ax.annotate(txt, c, va="center", ha="center", size=fontsize)
def show(self, som, anotate=True, onlyzeros=False, labelsize=7, cmap="jet", logaritmic = False):
org_w = self.width
org_h = self.height
(self.width, self.height, indtoshow, no_row_in_plot, no_col_in_plot,
axis_num) = self._calculate_figure_params(som, 1, 1)
self.width /= (self.width/org_w) if self.width > self.height else (self.height/org_h)
self.height /= (self.width / org_w) if self.width > self.height else (self.height / org_h)
counts = Counter(som._bmu[0])
counts = [counts.get(x, 0) for x in range(som.codebook.mapsize[0] * som.codebook.mapsize[1])]
mp = np.array(counts).reshape(som.codebook.mapsize[0],
som.codebook.mapsize[1])
if not logaritmic:
norm = matplotlib.colors.Normalize(
vmin=0,
vmax=np.max(mp.flatten()),
clip=True)
else:
norm = matplotlib.colors.LogNorm(
vmin=1,
vmax=np.max(mp.flatten()))
msz = som.codebook.mapsize
cents = som.bmu_ind_to_xy(np.arange(0, msz[0] * msz[1]))
self.prepare()
if som.codebook.lattice == "rect":
ax = plt.gca()
if anotate:
self._set_labels(cents, ax, counts, onlyzeros, labelsize)
pl = plt.pcolor(mp[::-1], norm=norm, cmap=cmap)
plt.axis([0, som.codebook.mapsize[1], 0, som.codebook.mapsize[0]])
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.colorbar(pl)
#plt.show()
elif som.codebook.lattice == "hexa":
ax, cents = plot_hex_map(mp[::-1], colormap=cmap, fig=self._fig)
if anotate:
self._set_labels(cents, ax, reversed(counts), onlyzeros, labelsize, hex=True)
#plt.show()
#return ax, cents
|
11551095
|
import os
import re
import json
import tqdm
import torch
import sqlite3
import converter
import argparse
import embeddings as E
from vocab import Vocab
from collections import defaultdict, Counter
from transformers import DistilBertTokenizer
from eval_scripts import evaluation
import editsql_preprocess
import editsql_postprocess
BERT_MODEL = 'cache/bert'
SQL_PRIMITIVES = {'select', 'from', 'not', 'in', 'where', 'max', 'min', 'avg'}
bad_query_replace = [
('ryan___goodwin', 'rylan___goodwin'),
('distric_', 'district_'),
('districtt_', 'district_'),
('northcarolina', 'north___carolina'),
('beetle___!', 'beetle'),
('caribbean', 'carribean'),
('noth', 'north'),
('asstprof', 'assistant___professor'),
('parallax', 'puzzling'),
('region0', 'bay___area'),
('timothy', 'timmothy'),
('engineering', 'engineer'),
('goergia', 'georgia'),
('director_name0', 'kevin___spacey'),
('actor_name0', 'kevin___spacey'),
('category_category_name0', 'mgm___grand___buffet'),
]
bad_question_replace = bad_query_replace
bad_question_replace += [
('_one_', '_1_'),
('_two_', '_2_'),
('_three_', '_3_'),
('_four_', '_4_'),
('_five_', '_5_'),
('_internation_', '_international_'),
]
value_replace = {'_'+k+'_': '_'+v+'_' for k, v in {
'usa': 'us',
'africa': 'african',
'europe': 'european',
'asia': 'asian',
'france': 'french',
'italy': 'italian',
'2014': '2013',
'cat': 'cats',
'dog': 'dogs',
'male': 'males',
'female': 'females',
'student': 'students',
'engineer': 'engineers',
'states': 'us',
'united': 'us',
'y': 'yes',
'n': 'no',
'herbs': 'herb',
'canada': 'canadian',
'la': 'louisiana',
'##ie': '##ies',
'fl': 'florida',
'australia': 'australian',
'professor': 'professors',
'drive': 'drives',
'usa': 'united',
'instructor': 'instructors',
'completed': 'complete',
'nominated': 'nomination',
'game': 'games',
'card': 'cards',
'park': 'parking',
'room': 'rooms',
}.items()}
class ValueAlignmentException(Exception):
pass
class QueryBuildError(Exception):
pass
class SQLDataset:
def __init__(self):
pass
@classmethod
def align_values(cls, no_value, yes_value):
if yes_value[-1] == ';':
yes_value.pop()
yes_value = '___'.join(yes_value)
for f, t in bad_query_replace:
yes_value = yes_value.replace(f, t)
yes_value = yes_value.split('___')
def find_match(no_value, i, yes_value):
before = None if i == 0 else no_value[i-1].lower()
after = None if i+1 == len(no_value) else no_value[i+1].lower()
candidates = []
for j in range(len(yes_value)):
mybefore = None if j == 0 else yes_value[j-1].lower()
if mybefore == before:
for k in range(j, len(yes_value)):
yk = yes_value[k].lower()
# if '_' in yk and 'mk_man' not in yk and 'pu_man' not in yk or 't1' in yk or 't2' in yk or 't3' in yk or 't4' in yk:
# break
myafter = None if k+1 == len(yes_value) else yes_value[k+1].lower()
if myafter == after:
candidates.append((j, k+1))
break
if len(candidates) == 0:
raise ValueAlignmentException('Cannot align values: {}'.format(yes_value))
candidates.sort(key=lambda x: x[1] - x[0])
return candidates[0]
values = []
num_slots = 0
for i, t in enumerate(no_value):
t = t.lower()
if t in {'value', 'limit_value'}:
start, end = find_match(no_value, i, yes_value)
values.append(yes_value[start:end])
num_slots += 1
if num_slots != len(values):
raise Exception('Found {} values for {} slots'.format(len(values), num_slots))
return values
@classmethod
def execute(cls, db, p_str, p_sql, remap=True):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
if remap:
p_val_units = [unit[1] for unit in p_sql['select'][1]]
return res_map(p_res, p_val_units)
else:
return p_res
except Exception as e:
return []
@classmethod
def build_sql(cls, schema, p_str, kmap):
try:
p_sql = evaluation.get_sql(schema, p_str)
except Exception as e:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = evaluation.EMPTY_QUERY.copy()
p_valid_col_units = evaluation.build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql_val = evaluation.rebuild_sql_val(p_sql)
p_sql_col = evaluation.rebuild_sql_col(p_valid_col_units, p_sql_val, kmap)
return p_sql_col
@classmethod
def strip_aliases(cls, query_toks):
final_sql = []
invalid = False
for query_tok in query_toks:
if query_tok != '.' and '.' in query_tok:
# invalid sql; didn't use table alias in join
final_sql.extend(query_tok.replace('.',' . ').split())
invalid = True
else:
final_sql.append(query_tok)
if 'from' in final_sql:
sel = final_sql[:final_sql.index('from')]
all_aliases = Counter([t for t in final_sql if re.match(r't\d+', t)])
sel_aliases = Counter([t for t in sel if re.match(r't\d+', t)])
if '*' in sel and len(all_aliases) > len(sel_aliases):
m = all_aliases.most_common()[-1][0]
final_sql[final_sql.index('*')] = '{}.*'.format(m)
return final_sql, invalid
@classmethod
def tokenize_question(cls, orig_question_toks, bert):
question = '___'.join(orig_question_toks).lower()
for f, t in bad_question_replace:
question = question.replace(f, t)
question = ' '.join(question.split('___'))
question_toks = bert.tokenize(question)
return question_toks
@classmethod
def build_contexts(cls, question_toks, db, bert):
query_context = []
for table_id, (to, t) in enumerate(zip(db['table_names_original'] + ['NULL'], db['table_names'] + ['NULL'] + [{}])):
keys = set(db['primary_keys'])
for a, b in db['foreign_keys']:
keys.add(a)
keys.add(b)
# insert a NULL table at the end
columns = [{'oname': '*', 'name': '*', 'type': 'all', 'key': '{}.*'.format(to).replace('NULL.', '').lower()}]
for i, ((tid, co), (_, c), ct) in enumerate(zip(db['column_names_original'], db['column_names'], db['column_types'])):
ct = ct if i not in keys else 'key'
if tid == table_id:
columns.append({
'oname': co, 'name': c, 'type': ct,
'key': '{}.{}'.format(to, co).lower(),
})
query_cols = [c.copy() for c in columns]
# context for generating queries
query_context_toks = [bert.cls_token] + question_toks + [bert.sep_token] + bert.tokenize(t) + [bert.sep_token]
for col in query_cols:
col['start'] = len(query_context_toks)
query_context_toks.extend(bert.tokenize('{} : {}'.format(col['type'], col['name'])) + [bert.sep_token])
col['end'] = len(query_context_toks)
col['table_id'] = table_id
query_context.append({
'oname': to,
'name': t,
'columns': query_cols,
'toks': query_context_toks[:512],
})
return query_context
@classmethod
def make_column_cands(cls, context):
cands = []
for tab in context:
for col in tab['columns']:
cands.append(col)
return cands
@classmethod
def make_sup_query(cls, norm_query_toks, cands, values, voc, bert, train=True):
query = {}
query['column_pointer'] = pointer = []
query['column_toks'] = toks = []
query['value_toks'] = []
for v in values:
for t in v:
query['value_toks'].extend(bert.tokenize(t))
query['value_toks'].append('SEP')
query['value_toks'].append('EOS')
for t in [t.lower() for t in norm_query_toks]:
matched = False
if t not in SQL_PRIMITIVES:
for i, c in enumerate(cands):
if t == c['key'] and t:
toks.append('pointer')
pointer.append(i)
matched = True
break
if not matched and (train or t in voc._word2index):
toks.append(t)
pointer.append(None)
toks.append('EOS')
pointer.append(None)
voc.word2index(toks, train=True)
return query
@classmethod
def make_example(cls, ex, bert, sql_voc, kmaps, conv, train=False, execute=True, evaluation=False):
db_id = ex['db_id']
db_path = os.path.join('data', 'database', db_id, db_id + ".sqlite")
invalid = False
if evaluation:
query_norm = query_norm_toks = em = g_sql = g_query = query_recov = g_values = None
else:
try:
# normalize query
query_norm = conv.convert_tokens(ex['query_toks'], ex['query_toks_no_value'], db_id)
except Exception as e:
print('preprocessing error')
print(ex['query'])
return None
if query_norm is None:
return None
query_recov = query_norm_toks = g_values = None
try:
query_recov = conv.recover(query_norm, db_id)
query_norm_toks = query_norm.split()
em, g_sql, r_sql = conv.match(ex['query'], query_recov, db_id)
if not em:
invalid = True
g_values = cls.align_values(ex['query_toks_no_value'], ex['query_toks'])
except ValueAlignmentException as e:
print(ex['query'])
print(repr(e))
invalid = True
except QueryBuildError as e:
print(ex['query'])
print(repr(e))
invalid = True
except Exception as e:
print(e)
invalid = True
raise
g_sql = conv.build_sql(ex['query'], db_id)
g_query = ex['query']
# make utterance
question_toks = cls.tokenize_question(ex['question_toks'], bert)
# print(bert.convert_tokens_to_string(question_toks))
# encode tables
query_context = cls.build_contexts(question_toks, conv.database_schemas[db_id], bert)
# print(bert.convert_tokens_to_string(query_context[0]['toks']))
new = dict(
id=ex['id'],
question=ex['question'],
db_id=db_id,
g_question_toks=question_toks,
g_sql=g_sql,
query=g_query,
g_query_norm=query_norm,
g_query_recov=query_recov,
g_values=g_values,
value_context=[bert.cls_token] + question_toks + [bert.sep_token],
query_context=query_context,
invalid=invalid,
cands_query=cls.make_column_cands(query_context),
)
if train and not invalid:
new['sup_query'] = cls.make_sup_query(query_norm_toks, new['cands_query'], g_values, sql_voc, bert)
# print(new['sup_query']['column_toks'])
return new
@classmethod
def recover_slots(cls, pointer, candidates, eos, key='key'):
if eos in pointer:
pointer = pointer[:pointer.index(eos)+1]
toks = []
for i, p in enumerate(pointer):
c = candidates[p]
if isinstance(c, dict):
c = c[key]
toks.append(c)
if 'EOS' in toks:
toks = toks[:toks.index('EOS')]
return toks
@classmethod
def recover_query(cls, pointer, candidates, value_pointer, value_candidates, voc):
toks = cls.recover_slots(pointer, candidates, key='key', eos=voc.word2index('EOS'))
value = [value_candidates[p] for p in value_pointer]
if 'EOS' in value:
value = value[:value.index('EOS')]
return toks, value
@classmethod
def make_cands(cls, ex, sql_voc):
query_cands = sql_voc._index2word + ex['cands_query']
value_cands = sql_voc._index2word + ex['value_context']
return query_cands, value_cands
@classmethod
def make_query_pointer(cls, sup_query, query_cands, value_cands, sql_voc):
# map slots
pointer = []
for w, p in zip(sup_query['column_toks'], sup_query['column_pointer']):
if p is None:
# this is a vocab word
pointer.append(sql_voc.word2index(w))
else:
# this is a column, need to add offset for vocab candidates
pointer.append(p + len(sql_voc))
for i in pointer:
assert i < len(query_cands)
# map values
value_pointer = []
for w in sup_query['value_toks']:
if w not in value_cands:
if w in value_replace:
w = value_replace[w]
else:
w = w + 's'
if w not in value_cands:
# print('OOV word in value {}:\n{}\n{}'.format(w, ex['utterance']['toks'], ex['query_toks']))
continue
value_pointer.append(value_cands.index(w))
# print(cls.recover_query(pointer, cands, value_pointer, value_cands, voc=sql_voc))
return pointer, value_pointer
@classmethod
def from_file(cls, root, dcache, debug=False):
conv = converter.Converter()
kmaps = evaluation.build_foreign_key_map_from_json(os.path.join(root, 'tables.json'))
splits = {}
for k in ['train', 'dev']:
with open(os.path.join(root, '{}.json'.format(k)), 'rb') as f:
splits[k] = []
for ex in json.load(f):
ex['query_orig'] = ex['query']
splits[k].append(ex)
if debug and len(splits[k]) > 100:
break
tokenizer = DistilBertTokenizer.from_pretrained(BERT_MODEL, cache_dir=dcache)
sql_voc = Vocab(['PAD', 'EOS', 'GO', 'SEP', '`', "'", '1', '%', 'yes', '2', '.', '5', 'f', 'm', 'name', 'song', 't', 'l'])
# make contexts and populate vocab
for s, data in splits.items():
proc = []
for i, ex in enumerate(tqdm.tqdm(data, desc='preprocess {}'.format(s))):
ex['id'] = '{}/{}'.format(ex['db_id'], i)
new = cls.make_example(ex, tokenizer, sql_voc, kmaps, conv, train=s=='train')
if new is not None and (s != 'train' or not new['invalid']):
proc.append(new)
splits[s] = proc
# make candidate list using vocab
for s, data in splits.items():
for ex in data:
ex['cands_query'], ex['cands_value'] = cls.make_cands(ex, sql_voc)
splits[s] = data
# make pointers for training data
for ex in splits['train']:
ex['pointer_query'], ex['pointer_value'] = cls.make_query_pointer(ex['sup_query'], ex['cands_query'], ex['cands_value'], sql_voc)
# look up pretrained word embeddings
emb = E.ConcatEmbedding([E.GloveEmbedding(), E.KazumaCharEmbedding()], default='zero')
sql_emb = torch.tensor([emb.emb(w) for w in sql_voc._index2word])
ext = dict(sql_voc=sql_voc, sql_emb=sql_emb)
return splits, ext
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--data', default='spider')
args = parser.parse_args()
proc = SQLDataset.from_file(os.path.join('data', args.data), 'cache', debug=args.debug)
torch.save(proc, 'cache/data_nl2sql_spider.debug.pt' if args.debug else 'cache/data_nl2sql_spider.pt')
|
11551105
|
import unittest
from solution import findMissing
class TestRanges(unittest.TestCase):
def test1(self):
# Testing input with integer missing other than zero
assert findMissing([0,1,2,4]) == 3
def test2(self):
# Testing input with zero missing
assert findMissing([1,2,3]) == 0
if __name__ == '__main__':
unittest.main()
|
11551120
|
import tensorflow as tf
import numpy as np
import random
import gym
import gym_gazebo
import math
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.pyplot as plt
env = None
class LivePlot(object):
def __init__(self, outdir, data_key='episode_rewards', line_color='blue'):
"""
Liveplot renders a graph of either episode_rewards or episode_lengths
Args:
outdir (outdir): Monitor output file location used to populate the graph
data_key (Optional[str]): The key in the json to graph (episode_rewards or episode_lengths).
line_color (Optional[dict]): Color of the plot.
"""
self.outdir = outdir
self._last_data = None
self.data_key = data_key
self.line_color = line_color
#styling options
matplotlib.rcParams['toolbar'] = 'None'
plt.style.use('ggplot')
plt.xlabel("")
plt.ylabel(data_key)
fig = plt.gcf().canvas.set_window_title('simulation_graph')
def plot(self, reward):
# results = monitoring.load_results(self.outdir)
# print(results)
# if(results==None): return
data = reward
#only update plot if data is different (plot calls are expensive)
# if data != self._last_data:
self._last_data = data
plt.plot(data, color=self.line_color)
# pause so matplotlib will display
# may want to figure out matplotlib animation or use a different library in the future
plt.pause(0.05)
def softmax(x):
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out
def policy_gradient():
with tf.variable_scope("policy"):
params = tf.get_variable("policy_parameters",[4,2])
state = tf.placeholder("float",[None,4])
actions = tf.placeholder("float",[None,2])
advantages = tf.placeholder("float",[None,1])
linear = tf.matmul(state,params)
probabilities = tf.nn.softmax(linear)
good_probabilities = tf.reduce_sum(tf.multiply(probabilities, actions),reduction_indices=[1])
eligibility = tf.log(good_probabilities) * advantages
loss = -tf.reduce_sum(eligibility)
optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)
return probabilities, state, actions, advantages, optimizer
def value_gradient():
with tf.variable_scope("value"):
state = tf.placeholder("float",[None,4])
newvals = tf.placeholder("float",[None,1])
w1 = tf.get_variable("w1",[4,10])
b1 = tf.get_variable("b1",[10])
h1 = tf.nn.relu(tf.matmul(state,w1) + b1)
w2 = tf.get_variable("w2",[10,1])
b2 = tf.get_variable("b2",[1])
calculated = tf.matmul(h1,w2) + b2
diffs = calculated - newvals
loss = tf.nn.l2_loss(diffs)
optimizer = tf.train.AdamOptimizer(0.1).minimize(loss)
return calculated, state, newvals, optimizer, loss
def run_episode(env, policy_grad, value_grad, sess):
pl_calculated, pl_state, pl_actions, pl_advantages, pl_optimizer = policy_grad
vl_calculated, vl_state, vl_newvals, vl_optimizer, vl_loss = value_grad
observation = env.reset()
totalreward = 0
states = []
actions = []
advantages = []
transitions = []
update_vals = []
for _ in range(20000):
# calculate policy
obs_vector = np.expand_dims(observation, axis=0)
probs = sess.run(pl_calculated,feed_dict={pl_state: obs_vector})
action = 0 if random.uniform(0,1) < probs[0][0] else 1
# record the transition
states.append(observation)
print("angle: ", observation[2]*180/3.14)
actionblank = np.zeros(2)
actionblank[action] = 1
actions.append(actionblank)
# take the action in the environment
old_observation = observation
observation, reward, done, info = env.step(action)
transitions.append((old_observation, action, reward))
totalreward += reward
if done:
break
for index, trans in enumerate(transitions):
obs, action, reward = trans
# calculate discounted monte-carlo return
future_reward = 0
future_transitions = len(transitions) - index
decrease = 1
for index2 in range(future_transitions):
future_reward += transitions[(index2) + index][2] * decrease
decrease = decrease * 0.97
obs_vector = np.expand_dims(obs, axis=0)
currentval = sess.run(vl_calculated,feed_dict={vl_state: obs_vector})[0][0]
# advantage: how much better was this action than normal
advantages.append(future_reward - currentval)
# update the value function towards new return
update_vals.append(future_reward)
# update value function
update_vals_vector = np.expand_dims(update_vals, axis=1)
sess.run(vl_optimizer, feed_dict={vl_state: states, vl_newvals: update_vals_vector})
# real_vl_loss = sess.run(vl_loss, feed_dict={vl_state: states, vl_newvals: update_vals_vector})
advantages_vector = np.expand_dims(advantages, axis=1)
sess.run(pl_optimizer, feed_dict={pl_state: states, pl_advantages: advantages_vector, pl_actions: actions})
return totalreward
env = gym.make('GazeboCartPole-v0')
outdir = '/tmp/gazebo_gym_experiments'
plotter = LivePlot(outdir)
import time
env = gym.wrappers.Monitor(env, outdir, force=True)
policy_grad = policy_gradient()
value_grad = value_gradient()
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
list_rewards = []
for i in range(2000):
print("Episode ", i)
reward = run_episode(env, policy_grad, value_grad, sess)
list_rewards.append(reward)
if(i%20==0):
plotter.plot(list_rewards)
# if reward == 200:
# print "reward 200"
# print i
# break
time.sleep(0.05)
t = 0
for _ in range(1000):
reward = run_episode(env, policy_grad, value_grad, sess)
t += reward
print (t / 1000)
print ("END!")
|
11551127
|
import numpy
import pytest
from aydin.analysis.image_metrics import (
mutual_information,
joint_information,
spectral_mutual_information,
spectral_psnr,
)
from aydin.io.datasets import camera, add_noise, normalise
def test_spectral_psnr():
camera_image = normalise(camera()).astype(numpy.float)
camera_image_with_noise_high = add_noise(camera_image)
camera_image_with_noise_low = add_noise(
camera_image, intensity=10, variance=0.1, sap=0.000001
)
ji_high = spectral_psnr(camera_image, camera_image_with_noise_high)
ji_low = spectral_psnr(camera_image, camera_image_with_noise_low)
assert ji_high > ji_low
def test_mutual_information():
camera_image = camera()
camera_image_with_noise = add_noise(camera())
mi = mutual_information(camera_image, camera_image, normalised=False)
mi_n = mutual_information(camera_image, camera_image_with_noise, normalised=False)
assert mi > mi_n
def test_normalised_mutual_information():
camera_image = camera()
camera_image_with_noise = add_noise(camera())
assert pytest.approx(
mutual_information(camera_image, camera_image, normalised=True), 1
)
assert pytest.approx(
mutual_information(
camera_image_with_noise, camera_image_with_noise, normalised=True
),
1,
)
assert (
mutual_information(camera_image, camera_image_with_noise, normalised=True) < 1
)
def test_spectral_mutual_information():
camera_image = camera()
camera_image_with_noise = add_noise(camera())
smi = spectral_mutual_information(camera_image, camera_image)
smi_n = spectral_mutual_information(camera_image, camera_image_with_noise)
assert smi_n < smi
def test_joint_information():
camera_image = camera()
camera_image_with_noise = add_noise(camera(), intensity=5, variance=3)
ji = joint_information(camera_image, camera_image)
ji_n = joint_information(camera_image, camera_image_with_noise)
assert ji < ji_n
|
11551131
|
from pathlib import Path
from typing import List
from asciimatics.exceptions import ResizeScreenError # type: ignore
from asciimatics.scene import Scene # type: ignore
from asciimatics.screen import Screen # type: ignore
from hesiod.cfg.cfghandler import CFG_T
from hesiod.ui.tui.baseform import BaseForm
from hesiod.ui.tui.editform import EditForm
from hesiod.ui.tui.recapform import RecapForm
from hesiod.ui.ui import UI
class TUI(UI):
def __init__(
self,
template_cfg: CFG_T,
base_cfg_dir: Path,
) -> None:
"""Create a new terminal user interface (TUI).
Args:
template_file: path to the config template file.
base_cfg_dir: path to the base configs directory.
"""
UI.__init__(self, template_cfg, base_cfg_dir)
self.run_cfg: CFG_T = {}
@staticmethod
def run(screen: Screen, scene: Scene, tui: "TUI") -> None:
"""Define the sequence of forms to be shown and play them.
Args:
screen: the screen where the TUI will be displayed.
scene: the start scene.
tui: the TUI instance.
"""
scenes: List[Scene] = []
scenes.append(Scene([EditForm(screen, tui)], duration=-1, name=BaseForm.EDIT_FORM))
scenes.append(Scene([RecapForm(screen, tui)], duration=-1, name=BaseForm.RECAP_FORM))
screen.play(scenes, stop_on_resize=True, start_scene=scene)
def show(self) -> CFG_T:
"""Show the terminal user interface.
Returns:
The run configuration selected by the user.
"""
last_scene = None
while True:
try:
Screen.wrapper(TUI.run, arguments=[last_scene, self])
break
except ResizeScreenError as e:
last_scene = e.scene
return self.run_cfg
|
11551208
|
import os, glob
import numpy as np
import tempfile
import tables as tb
from pandas.util.testing import assert_frame_equal
from flaky import flaky
from isochrones.mist import MIST_Isochrone
from isochrones import StarModel
from isochrones.starfit import starfit
from isochrones.logger import getLogger
mnest = True
try:
import pymultinest
except:
getLogger().warning("No PyMultiNest; fits will use emcee")
mnest = False
chainsdir = tempfile.gettempdir()
props = dict(Teff=(5800, 100), logg=(4.5, 0.1), J=(3.58, 0.05), K=(3.22, 0.05))
def test_fitting():
mod_mist = _check_fitting(StarModel(MIST_Isochrone, **props))
_check_saving(mod_mist)
@flaky
def test_starfit():
rootdir = os.path.abspath(os.path.join(os.path.dirname(__file__)))
testdir = os.path.join(rootdir, "star1")
if mnest:
basename = "{}/{}-".format(chainsdir, np.random.randint(1000000))
kwargs = dict(n_live_points=20, max_iter=100, basename=basename, verbose=False)
getLogger().info("Testing starfit function with multinest...")
else:
kwargs = dict(nburn=20, niter=20, ninitial=10)
getLogger().info("Testing starfit function with emcee...")
mod, _ = starfit(testdir, overwrite=True, use_emcee=not mnest, no_plots=True, **kwargs)
mod.samples
if mnest:
files = glob.glob("{}*".format(basename))
for f in files:
os.remove(f)
###############
def _check_saving(mod):
filename = os.path.join(chainsdir, "{}.h5".format(np.random.randint(1000000)))
mod.save_hdf(filename)
assert len(tb.file._open_files.get_handlers_by_name(filename)) == 0
newmod = StarModel.load_hdf(filename)
assert len(tb.file._open_files.get_handlers_by_name(filename)) == 0
assert_frame_equal(mod.samples, newmod.samples)
assert mod.ic.bands == newmod.ic.bands
os.remove(filename)
def _check_fitting(mod):
_fit_emcee(mod)
if mnest:
_fit_mnest(mod)
return mod
def _fit_mnest(mod):
basename = "{}/{}-".format(chainsdir, np.random.randint(1000000))
mod.fit_multinest(n_live_points=5, max_iter=50, basename=basename, verbose=False)
foo = mod.mnest_analyzer
files = glob.glob("{}*".format(basename))
for f in files:
os.remove(f)
def _fit_emcee(mod):
mod.use_emcee = True
mod.fit_mcmc(nburn=20, niter=20, ninitial=20)
mod.samples
|
11551245
|
from appdaemontestframework.hass_mocks import HassMocks
import datetime
class TimeTravelWrapper:
"""
AppDaemon Test Framework Utility to simulate going forward in time
"""
def __init__(self, hass_mocks: HassMocks):
self._hass_mocks = hass_mocks
def fast_forward(self, duration):
"""
Simulate going forward in time.
It calls all the functions that have been registered with AppDaemon
for a later schedule run. A function is only called if it's scheduled
time is before or at the simulated time.
You can chain the calls and call `fast_forward` multiple times in a single test
Format:
> time_travel.fast_forward(10).minutes()
> # Or
> time_travel.fast_forward(30).seconds()
"""
return UnitsWrapper(duration, self._fast_forward_seconds)
def assert_current_time(self, expected_current_time):
"""
Assert the current time is as expected
Expected current time is expressed as a duration from T = 0
Format:
> time_travel.assert_current_time(10).minutes()
> # Or
> time_travel.assert_current_time(30).seconds()
"""
return UnitsWrapper(expected_current_time, self._assert_current_time_seconds)
def _fast_forward_seconds(self, seconds_to_fast_forward):
self._hass_mocks.AD.sched.sim_fast_forward(datetime.timedelta(seconds=seconds_to_fast_forward))
def _assert_current_time_seconds(self, expected_seconds_from_start):
sched = self._hass_mocks.AD.sched
elapsed_seconds = (sched.get_now_sync() - sched.sim_get_start_time()).total_seconds()
assert elapsed_seconds == expected_seconds_from_start
class UnitsWrapper:
def __init__(self, duration, function_with_arg_in_seconds):
self.duration = duration
self.function_with_arg_in_seconds = function_with_arg_in_seconds
def minutes(self):
self.function_with_arg_in_seconds(self.duration * 60)
def seconds(self):
self.function_with_arg_in_seconds(self.duration)
|
11551270
|
import requests
import string
import random
import base64
import time
import json
from io import BytesIO
from requests_toolbelt import MultipartEncoder
'''
<form method="post" action="http://2captcha.com/in.php">
<input type="hidden" name="method" value="base64">
<input type="hidden" name="coordinatescaptcha" value="1">
Your key:
<input type="text" name="key" value="YOUR_APIKEY">
ClickCaptcha file body in base64 format:
<textarea name="body">BASE64_CLICKCAPTCHA_FILE</textarea>
</form>
'''
key = None
def send_base64_image(img):
if key is None:
return None
url = 'http://2captcha.com/in.php'
mp_encoder = MultipartEncoder(
fields={
'method': 'base64',
'coordinatescaptcha': '1',
'key': key,
'body': img
}
)
headers = {'Content-Type': mp_encoder.content_type}
resp = requests.post(url, data=mp_encoder, headers=headers)
resp_text_arr = resp.text.split('|')
if resp_text_arr[0] == 'OK':
print("Receive tid: {}".format(resp_text_arr[1]))
return resp_text_arr[1]
else:
raise RuntimeError('2Captcha Error: {}'.format(resp.text))
def get_answer(tid):
if key is None:
return None
url = 'http://2captcha.com/res.php?key={}&action=get&id={}&json=1'.format(key, tid)
resp_text = requests.get(url).text
return resp_text
def refund(tid):
url = "http://2captcha.com/res.php?key={}&action=reportbad&id={}".format(key, tid)
resp_text = requests.get(url).text
def solve_verification(img):
if key is None:
return None
img = img.quantize(colors=64, method=2)
buffered = BytesIO()
img.save(buffered, format="PNG", optimize=True, quality=5)
img_base64 = base64.b64encode(buffered.getvalue())
tid = send_base64_image(img_base64)
time.sleep(5)
ans = None
while ans is None or ans['status'] != 1:
ans = json.loads(get_answer(tid))
if ans['request'] == 'CAPCHA_NOT_READY':
time.sleep(5)
elif ans['status'] == 0:
raise RuntimeError('2Captcha Error: {}'.format(ans['request']))
points = []
try:
for p in ans['request']:
points.append([int(p['x']), int(p['y'])])
except:
refund(tid)
return points
|
11551274
|
from django.forms.forms import BoundField
class BaseForm(object):
"""
This is the main implementation of all the Form logic. Note that this
class is different than Form. See the comments by the Form class for more
information. Any improvements to the form API should be made to *this*
class, not to the Form class.
"""
def __iter__(self):
"""
:rtype: collections.Iterator[BoundField]
"""
pass
def __getitem__(self, index):
"""
:rtype: BoundField
"""
pass
|
11551276
|
def plot_kinematics(signal, background, nbins=100,
mass_range=(50., 110.), pt_range=(200., 500.),
mass_pad=10, pt_pad=50,
linewidth=1, title=None):
import numpy as np
from matplotlib import pyplot as plt
import h5py
pt_min, pt_max = pt_range
mass_min, mass_max = mass_range
plt.style.use('seaborn-white')
signal_h5file_events = h5py.File(signal, 'r')
signal_aux = signal_h5file_events['auxvars']
background_h5file_events = h5py.File(background, 'r')
background_aux = background_h5file_events['auxvars']
signal_selection = ((signal_aux['mass_trimmed'] > mass_min) &
(signal_aux['mass_trimmed'] < mass_max) &
(signal_aux['pt_trimmed'] > pt_min) &
(signal_aux['pt_trimmed'] < pt_max))
background_selection = ((background_aux['mass_trimmed'] > mass_min) &
(background_aux['mass_trimmed'] < mass_max) &
(background_aux['pt_trimmed'] > pt_min) &
(background_aux['pt_trimmed'] < pt_max))
#signal_selection = slice(0, None)
#background_selection = slice(0, None)
if 'weights' in signal_aux.dtype.names:
signal_weights = signal_aux['weights']
else:
signal_weights = np.ones(len(signal_aux))
if 'weights' in background_aux.dtype.names:
background_weights = background_aux['weights']
else:
background_weights = np.ones(len(background_aux))
signal_weights = signal_weights[signal_selection]
background_weights = background_weights[background_selection]
fig, ax = plt.subplots(2, 2, figsize=(10, 10))
if title is not None:
fig.suptitle(title, fontsize=16)
vals1, _, _ = ax[0, 0].hist(signal_aux['pt_trimmed'][signal_selection],
bins=np.linspace(pt_min - pt_pad, pt_max + pt_pad, nbins),
histtype='stepfilled', facecolor='none', edgecolor='blue', normed=1,
linewidth=linewidth,
label=r'Signal', weights=signal_weights)
vals2, _, _ = ax[0, 0].hist(background_aux['pt_trimmed'][background_selection],
bins=np.linspace(pt_min - pt_pad, pt_max + pt_pad, nbins),
histtype='stepfilled', facecolor='none', edgecolor='black', normed=1,
linestyle='dotted', linewidth=linewidth,
label='QCD Background', weights=background_weights)
ax[0, 0].set_ylim((0, 1.3 * max(np.max(vals1), np.max(vals2))))
ax[0, 0].set_ylabel('Normalized to Unity')
ax[0, 0].set_xlabel(r'Trimmed $p_{T}$ [GeV]', fontsize=12)
p1, = ax[0, 0].plot([0, 0], label='Signal', color='blue')
p2, = ax[0, 0].plot([0, 0], label='QCD Background', color='black', linestyle='dotted')
ax[0, 0].legend([p1, p2], ['Signal', 'QCD Background'], frameon=False, handlelength=3)
ax[0, 0].set_xlim((pt_min - pt_pad, pt_max + pt_pad))
ax[0, 0].ticklabel_format(style='sci', scilimits=(0,0), axis='y')
vals1, _, _ = ax[0, 1].hist(signal_aux['mass_trimmed'][signal_selection],
bins=np.linspace(mass_min - mass_pad, mass_max + mass_pad, nbins),
histtype='stepfilled', facecolor='none', edgecolor='blue', normed=1,
linewidth=linewidth,
label=r'Signal', weights=signal_weights)
vals2, _, _ = ax[0, 1].hist(background_aux['mass_trimmed'][background_selection],
bins=np.linspace(mass_min - mass_pad, mass_max + mass_pad, nbins),
histtype='stepfilled', facecolor='none', edgecolor='black', normed=1,
linestyle='dotted', linewidth=linewidth,
label='QCD Background', weights=background_weights)
ax[0, 1].set_ylim((0, 1.3 * max(np.max(vals1), np.max(vals2))))
ax[0, 1].set_ylabel('Normalized to Unity')
ax[0, 1].set_xlabel(r'Trimmed Mass [GeV]', fontsize=12)
p1, = ax[0, 1].plot([0, 0], label='Signal', color='blue')
p2, = ax[0, 1].plot([0, 0], label='QCD Background', color='black', linestyle='dotted')
ax[0, 1].legend([p1, p2], ['Signal', 'QCD Background'], frameon=False, handlelength=3)
ax[0, 1].set_xlim((mass_min - mass_pad, mass_max + mass_pad))
signal_tau32 = np.true_divide(signal_aux['tau_3'], signal_aux['tau_2'])[signal_selection]
background_tau32 = np.true_divide(background_aux['tau_3'], background_aux['tau_2'])[background_selection]
# remove NaN infinity and zero
signal_tau32_nonan = ~np.isnan(signal_tau32) & ~np.isinf(signal_tau32) & (signal_tau32 != 0)
background_tau32_nonan = ~np.isnan(background_tau32) & ~np.isinf(background_tau32) & (background_tau32 != 0)
vals1, _, _ = ax[1, 0].hist(signal_tau32[signal_tau32_nonan],
bins=np.linspace(0, 1, nbins),
histtype='stepfilled', facecolor='none', edgecolor='blue', normed=1,
linewidth=linewidth,
label=r'W jets', weights=signal_weights[signal_tau32_nonan])
vals2, _, _ = ax[1, 0].hist(background_tau32[background_tau32_nonan],
bins=np.linspace(0, 1, nbins),
histtype='stepfilled', facecolor='none', edgecolor='black', normed=1,
linestyle='dotted', linewidth=linewidth,
label='QCD jets', weights=background_weights[background_tau32_nonan])
ax[1, 0].set_ylim((0, 1.3 * max(np.max(vals1), np.max(vals2))))
ax[1, 0].set_ylabel('Normalized to Unity')
ax[1, 0].set_xlabel(r'$\tau_{32}$', fontsize=12)
p1, = ax[1, 0].plot([0, 0], label='W jets', color='blue')
p2, = ax[1, 0].plot([0, 0], label='QCD jets', color='black', linestyle='dotted')
ax[1, 0].legend([p1, p2], ['W jets', 'QCD jets'], frameon=False, handlelength=3)
ax[1, 0].set_xlim((0, 1))
vals1, _, _ = ax[1, 1].hist(signal_aux['subjet_dr'][signal_selection],
bins=np.linspace(0, 1.2, nbins),
histtype='stepfilled', facecolor='none', edgecolor='blue', normed=1,
linewidth=linewidth,
label=r'W jets', weights=signal_weights)
vals2, _, _ = ax[1, 1].hist(background_aux['subjet_dr'][background_selection],
bins=np.linspace(0, 1.2, nbins),
histtype='stepfilled', facecolor='none', edgecolor='black', normed=1,
linestyle='dotted', linewidth=linewidth,
label='QCD jets', weights=background_weights)
ax[1, 1].set_ylim((0, 1.3 * max(np.max(vals1), np.max(vals2))))
ax[1, 1].set_ylabel('Normalized to Unity')
ax[1, 1].set_xlabel(r'Subjets $\Delta R$', fontsize=12)
p1, = ax[1, 1].plot([0, 0], label='W jets', color='blue')
p2, = ax[1, 1].plot([0, 0], label='QCD jets', color='black', linestyle='dotted')
ax[1, 1].legend([p1, p2], ['W jets', 'QCD jets'], frameon=False, handlelength=3)
ax[1, 1].set_xlim((0, 1.2))
fig.tight_layout()
if title is not None:
plt.subplots_adjust(top=0.93)
return fig
|
11551288
|
import ptypes
from ptypes import *
from ..__base__ import stackable
layers = {
2 : 0x0800
}
class header(pstruct.type, stackable):
_fields_ = [
(pint.littleendian(pint.uint32_t), 'family'),
]
def nextlayer_id(self):
res = self['family'].li.int()
return layers[res]
|
11551340
|
balance = 4213
annualInterestRate = 0.2
monthlyPaymentRate = 0.04
monthlyInterestRate = annualInterestRate / 12
annualPayment = 0.0
for month in range(1, 13):
minimumMonthlyPayment = monthlyPaymentRate * balance
updatedBalanceEachMonth = (balance - minimumMonthlyPayment) * (1 + monthlyInterestRate)
annualPayment += minimumMonthlyPayment
print("Month: {0}".format(month))
print("Minimum monthly payment: {0:.2f}".format(minimumMonthlyPayment))
print("Remaining balance: {0:.2f}".format(updatedBalanceEachMonth))
balance = updatedBalanceEachMonth
print("Total paid: {0:.2f}".format(annualPayment))
print("Remaining balance: {0:.2f}".format(balance))
|
11551347
|
from Proceso import Proceso, crea_procesos, medias_procesos, llega_proceso
"""
Algoritmo de Planificacion SPN
"""
def genera_lista_procesos():
lista = []
p1 = Proceso(0,3,"A")
p2 = Proceso(1,5,"B")
p3 = Proceso(3,2,"C")
p4 = Proceso(9,5,"D")
p5 = Proceso(12,5,"E")
lista.append(p1)
lista.append(p2)
lista.append(p3)
lista.append(p4)
lista.append(p5)
return lista
def SPN(lista_procesos):
print("SPN")
t_sistema = 0
# El numero de procesos que deben ser atendidos
num_procesos = len(lista_procesos)
#El proceso actual en ejecucion
proceso_actual = None
cola_procesos = []
def proximo_mas_corto(cola_procesos):
min = cola_procesos[0].duracion_trabajo
indice = 0
for i in range(0,len(cola_procesos) ):
if(cola_procesos[i].duracion_trabajo <= min):
min = cola_procesos[i].duracion_trabajo
indice = i
return indice
while(num_procesos != 0):
cola_llegada = llega_proceso(lista_procesos, t_sistema)
if(len(cola_llegada) != 0):
if(len(cola_procesos) == 0):
p = cola_llegada.pop(0)
cola_procesos.append(p)
proceso_actual = cola_procesos[0]
proceso_actual.ejecucion = True
for p in cola_llegada:
cola_procesos.append(p)
else:
for p in cola_llegada:
cola_procesos.append(p)
if(len(cola_procesos) != 0 ):
#Se porcede a buscar el de menor duracion y a mover el que seugia
#Despues se le da la ejecucion al proximo mas corto
if(proceso_actual == None):
indice = proximo_mas_corto(cola_procesos)
proceso_actual = cola_procesos[indice]
cola_procesos.pop(indice)
cola_procesos.insert(0,proceso_actual)
proceso_actual.ejecucion = True
# Se ponen a trabajar a todos los procesos, en caso de no tener la ejecucion, estos descansan
for proceso in cola_procesos:
proceso.trabaja()
# Si el proceso en ejecucion ya acabo se le cede la ejecucion al siguente proceso en la cola
if(proceso_actual.terminado == True):
cola_procesos.pop(0)
num_procesos -= 1
proceso_actual = None
# Sigue pasando el tiempo en el sistema
t_sistema += 1
medias = medias_procesos(lista_procesos)
print("")
print(medias)
|
11551356
|
import unittest
from ctree.c.nodes import Constant, String
class TestConstants(unittest.TestCase):
"""Check that all constants convert properly."""
def test_float_00(self):
assert str(Constant(0)) == "0"
def test_float_01(self):
assert str(Constant(1)) == "1"
def test_float_02(self):
assert str(Constant(1.2)) == "1.2"
def test_int_00(self):
assert str(Constant(0)) == "0"
def test_int_01(self):
assert str(Constant(1)) == "1"
def test_int_02(self):
assert str(Constant(12)) == "12"
def test_char_00(self):
assert str(Constant("a")) == "'a'"
def test_char_01(self):
assert str(Constant("A")) == "'A'"
def test_char_02(self):
assert str(Constant("!")) == "'!'"
class TestStrings(unittest.TestCase):
"""Check that strings work."""
def test_string_full(self):
self.assertEqual(str(String("foo")), '"foo"')
def test_string_empty(self):
self.assertEqual(str(String("")), '""')
def test_string_newline(self):
self.assertEqual(str(String(r"\n")), r'"\n"')
def test_string_tab(self):
self.assertEqual(str(String(r"\t")), r'"\t"')
def test_string_multi_two(self):
self.assertEqual(str(String("foo", "bar")), '"foo" "bar"')
def test_string_multi_three(self):
self.assertEqual(str(String("foo", "bar", "baz")), '"foo" "bar" "baz"')
def test_string_none(self):
self.assertEqual(str(String()), '""')
|
11551442
|
from .connection import Connection
from .http_connection import HTTPConnection
from .lockfile import Lockfile
from .websocket import WebSocket
|
11551462
|
import os
import sys
import cv2
from PIL import Image
import numpy as np
import paddle
import torch
from reprod_log import ReprodLogger, ReprodDiffHelper
def build_paddle_transform():
sys.path.insert(0, "./AlexNet_paddle/")
import AlexNet_paddle.presets as presets
paddle_transform = presets.ClassificationPresetEval(
crop_size=224,
resize_size=256, )
sys.path.pop(0)
return paddle_transform
def build_torch_transform():
sys.path.insert(0, "./AlexNet_torch/")
import AlexNet_torch.presets as presets
torch_transform = presets.ClassificationPresetEval(
crop_size=224,
resize_size=256, )
sys.path.pop(0)
return torch_transform
def build_paddle_data_pipeline():
sys.path.insert(0, "./AlexNet_paddle/")
import AlexNet_paddle.presets as presets
import AlexNet_paddle.paddlevision as paddlevision
dataset_test = paddlevision.datasets.ImageFolder(
"./lite_data/val/",
presets.ClassificationPresetEval(
crop_size=224, resize_size=256))
test_sampler = paddle.io.SequenceSampler(dataset_test)
test_batch_sampler = paddle.io.BatchSampler(
sampler=test_sampler, batch_size=4)
data_loader_test = paddle.io.DataLoader(
dataset_test, batch_sampler=test_batch_sampler, num_workers=0)
sys.path.pop(0)
return dataset_test, data_loader_test
def build_torch_data_pipeline():
sys.path.insert(0, "./AlexNet_torch")
import AlexNet_torch.presets as presets
import AlexNet_torch.torchvision as torchvision
dataset_test = torchvision.datasets.ImageFolder(
"./lite_data/val/",
presets.ClassificationPresetEval(
crop_size=224, resize_size=256))
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=4,
sampler=test_sampler,
num_workers=0,
pin_memory=True)
sys.path.pop(0)
return dataset_test, data_loader_test
def test_transform():
paddle_transform = build_paddle_transform()
torch_transform = build_torch_transform()
img = Image.open("./lite_data/val/n12057211/ILSVRC2012_val_00021765.JPEG")
paddle_img = paddle_transform(img)
torch_img = torch_transform(img)
np.testing.assert_allclose(paddle_img, torch_img)
def test_data_pipeline():
diff_helper = ReprodDiffHelper()
paddle_dataset, paddle_dataloader = build_paddle_data_pipeline()
torch_dataset, torch_dataloader = build_torch_data_pipeline()
logger_paddle_data = ReprodLogger()
logger_torch_data = ReprodLogger()
logger_paddle_data.add("length", np.array(len(paddle_dataset)))
logger_torch_data.add("length", np.array(len(torch_dataset)))
# random choose 5 images and check
for idx in range(5):
rnd_idx = np.random.randint(0, len(paddle_dataset))
logger_paddle_data.add(f"dataset_{idx}",
paddle_dataset[rnd_idx][0].numpy())
logger_torch_data.add(f"dataset_{idx}",
torch_dataset[rnd_idx][0].detach().cpu().numpy())
for idx, (paddle_batch, torch_batch
) in enumerate(zip(paddle_dataloader, torch_dataloader)):
if idx >= 5:
break
logger_paddle_data.add(f"dataloader_{idx}", paddle_batch[0].numpy())
logger_torch_data.add(f"dataloader_{idx}",
torch_batch[0].detach().cpu().numpy())
diff_helper.compare_info(logger_paddle_data.data, logger_torch_data.data)
diff_helper.report()
if __name__ == "__main__":
test_data_pipeline()
|
11551475
|
from typing import List
import time
from pathlib import Path
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.common.exceptions import TimeoutException as SeleniumTimeoutException
from build_assets.selenium_runner.SeleniumRunner import SeleniumRunner
from build_assets.selenium_runner.enums import IcomoonPage, IcomoonAlerts, IcomoonOptionState
class BuildSeleniumRunner(SeleniumRunner):
def build_icons(self, icomoon_json_path: str,
zip_path: Path, svgs: List[str], screenshot_folder: str):
self.upload_icomoon(icomoon_json_path)
# necessary so we can take screenshot of only the
# recently uploaded icons later
self.deselect_all_icons_in_top_set()
self.upload_svgs(svgs, screenshot_folder)
self.take_icon_screenshot(screenshot_folder)
self.download_icomoon_fonts(zip_path)
def upload_icomoon(self, icomoon_json_path: str):
"""
Upload the icomoon.json to icomoon.io.
:param icomoon_json_path: a path to the iconmoon.json.
:raises TimeoutException: happens when elements are not found.
"""
print("Uploading icomoon.json file...", file=self.log_output)
# find the file input and enter the file path
import_btn = self.driver.find_element_by_css_selector(
SeleniumRunner.GENERAL_IMPORT_BUTTON_CSS
)
import_btn.send_keys(icomoon_json_path)
try:
confirm_btn = WebDriverWait(self.driver, SeleniumRunner.MED_WAIT_IN_SEC).until(
ec.element_to_be_clickable((By.XPATH, "//div[@class='overlay']//button[text()='Yes']"))
)
confirm_btn.click()
except SeleniumTimeoutException as e:
raise Exception("Cannot find the confirm button when uploading the icomoon.json" \
"Ensure that the icomoon.json is in the correct format for Icomoon.io")
print("JSON file uploaded.", file=self.log_output)
def upload_svgs(self, svgs: List[str], screenshot_folder: str):
"""
Upload the SVGs provided in svgs. This will upload the
:param svgs: a list of svg Paths that we'll upload to icomoon.
:param screenshot_folder: the name of the screenshot_folder.
"""
print("Uploading SVGs...", file=self.log_output)
import_btn = self.driver.find_element_by_css_selector(
SeleniumRunner.SET_IMPORT_BUTTON_CSS
)
# there could be at most 2 alerts when we upload an SVG.
possible_alerts_amount = 2
err_messages = []
for i in range(len(svgs)):
import_btn.send_keys(svgs[i])
print(f"Uploading {svgs[i]}", file=self.log_output)
# see if there are stroke messages or replacing icon message
# there should be none of the second kind
for j in range(possible_alerts_amount):
alert = self.test_for_possible_alert(self.SHORT_WAIT_IN_SEC)
if alert == None:
pass # all good
elif alert == IcomoonAlerts.STROKES_GET_IGNORED_WARNING:
message = f"SVG contained strokes: {svgs[i]}."
err_messages.append(message)
self.click_alert_button(self.ALERTS[alert]["buttons"]["DISMISS"])
elif alert == IcomoonAlerts.REPLACE_OR_REIMPORT_ICON:
message = f"Duplicated SVG: {svgs[i]}."
err_messages.append(message)
self.click_alert_button(self.ALERTS[alert]["buttons"]["REIMPORT"])
else:
raise Exception(f"Unexpected alert found: {alert}")
self.edit_svg()
print(f"Finished editing icon.", file=self.log_output)
print("Finished uploading all files.", file=self.log_output)
if err_messages != []:
message = "BuildSeleniumRunner - Issues found when uploading SVGs:\n"
raise Exception(message + '\n'.join(err_messages))
# take a screenshot of the svgs that were just added
# select the latest icons
self.switch_toolbar_option(IcomoonOptionState.SELECT)
self.select_all_icons_in_top_set()
new_svgs_path = str(Path(screenshot_folder, "new_svgs.png").resolve())
self.driver.save_screenshot(new_svgs_path)
print("Finished uploading the svgs...", file=self.log_output)
def take_icon_screenshot(self, screenshot_folder: str):
"""
Take the overview icon screenshot of the uploaded icons.
:param svgs: a list of svg Paths that we'll upload to icomoon.
:param screenshot_folder: the name of the screenshot_folder.
"""
# take pictures
print("Taking screenshot of the new icons...", file=self.log_output)
self.go_to_generate_font_page()
# take an overall screenshot of the icons that were just added
# also include the glyph count
new_icons_path = str(Path(screenshot_folder, "new_icons.png").resolve())
main_content_xpath = "/html/body/div[4]/div[2]/div/div[1]"
main_content = self.driver.find_element_by_xpath(main_content_xpath)
# wait a bit for all the icons to load before we take a pic
time.sleep(SeleniumRunner.MED_WAIT_IN_SEC)
main_content.screenshot(new_icons_path)
print("Saved screenshot of the new icons...", file=self.log_output)
def go_to_generate_font_page(self):
"""
Go to the generate font page. Also handles the "Deselect Icons
with Strokes" alert.
"""
self.go_to_page(IcomoonPage.GENERATE_FONT)
alert = self.test_for_possible_alert(self.MED_WAIT_IN_SEC)
if alert == None:
pass # all good
elif alert == IcomoonAlerts.DESELECT_ICONS_CONTAINING_STROKES:
message = f"One of SVGs contained strokes. This should not happen."
raise Exception(message)
else:
raise Exception(f"Unexpected alert found: {alert}")
def download_icomoon_fonts(self, zip_path: Path):
"""
Download the icomoon.zip from icomoon.io. Also take a picture of
what the icons look like.
:param zip_path: the path to the zip file after it's downloaded.
"""
print("Downloading Font files...", file=self.log_output)
if self.current_page != IcomoonPage.SELECTION:
self.go_to_page(IcomoonPage.SELECTION)
self.select_all_icons_in_top_set()
self.go_to_generate_font_page()
download_btn = WebDriverWait(self.driver, SeleniumRunner.LONG_WAIT_IN_SEC).until(
ec.presence_of_element_located((By.CSS_SELECTOR, "button.btn4 span"))
)
download_btn.click()
if self.wait_for_zip(zip_path):
print("Font files downloaded.", file=self.log_output)
else:
raise TimeoutError(f"Couldn't find {zip_path} after download button was clicked.")
def wait_for_zip(self, zip_path: Path) -> bool:
"""
Wait for the zip file to be downloaded by checking for its existence
in the download path. Wait time is self.LONG_WAIT_IN_SEC and check time
is 1 sec.
:param zip_path: the path to the zip file after it's
downloaded.
:return: True if the file is found within the allotted time, else
False.
"""
end_time = time.time() + self.LONG_WAIT_IN_SEC
while time.time() <= end_time:
if zip_path.exists():
return True
time.sleep(1) # wait so we don't waste sys resources
return False
|
11551482
|
TRAINING_DATA = [
(
"去年アスムテルダムに行った。運河がきれいだった。",
{"entities": [(2, 9, "TOURIST_DESTINATION")]},
),
(
"人生で一度はパリに行くべきだけど、エッフェル塔はちょっとつまらないな。",
{"entities": [(6, 8, "TOURIST_DESTINATION")]},
),
("アーカンソーにもパリはあるw", {"entities": []}),
(
"ベルリンは夏が最高!公園がたくさんあって、夜遊びが充実していて、ビールが安い!",
{"entities": [(0, 4, "TOURIST_DESTINATION")]},
),
]
|
11551504
|
from kafka import KafkaProducer
import json
import random
from datetime import datetime
# BROKERS = "b-1.stocks.8e6izk.c12.kafka.us-east-1.amazonaws.com:9092,b-2.stocks.8e6izk.c12.kafka.us-east-1.amazonaws.com:9092"
BROKERS = "localhost:9092"
producer = KafkaProducer(
bootstrap_servers=BROKERS,
value_serializer=lambda v: json.dumps(v).encode('utf-8'),
key_serializer=str.encode,
retry_backoff_ms=500,
request_timeout_ms=20000,
security_protocol='PLAINTEXT')
def getReferrer():
data = {}
now = datetime.now()
str_now = now.strftime("%Y-%m-%d %H:%M:%S")
data['event_time'] = str_now
data['ticker'] = random.choice(['AAPL', 'AMZN', 'MSFT', 'INTC', 'TBV'])
price = random.random() * 100
data['price'] = round(price, 2)
return data
while True:
data =getReferrer()
# print(data)
try:
future = producer.send("stocktopic", value=data,key=data['ticker'])
producer.flush()
record_metadata = future.get(timeout=10)
print("sent event to Kafka! topic {} partition {} offset {}".format(record_metadata.topic, record_metadata.partition, record_metadata.offset))
except Exception as e:
print(e.with_traceback())
|
11551536
|
import abc
from typing import Dict
import numpy as np
from robogym.observation.common import Observation, ObservationProvider
class ImageObservationProvider(ObservationProvider, abc.ABC):
"""
Interface for observation provider which can provide rendered image.
"""
@property
@abc.abstractmethod
def images(self) -> Dict[str, np.ndarray]:
pass
class ImageObservation(Observation[ImageObservationProvider]):
"""
Observation class which provides image observation.
"""
def get(self):
return self.provider.images
class MobileImageObservationProvider(ObservationProvider, abc.ABC):
"""
Interface for observation provider for mobile camera images.
"""
@property
@abc.abstractmethod
def mobile_images(self) -> Dict[str, np.ndarray]:
pass
class MobileImageObservation(Observation[MobileImageObservationProvider]):
"""
Observation class which provides mobile image observation.
"""
def get(self):
return self.provider.mobile_images
|
11551537
|
saveFrames = False
waitForClick = False
frameLimit = 1200
n = 20
rings = []
rate = 10
h = 0
class Ring:
def __init__(self, centerRadius, thickness, pattern=[1], c=color(255,255,255,128), rotation=0, zoom=0):
self.cR = centerRadius
self.t = thickness
self.pat = pattern
self.c = c
self.r = rotation
self.z = zoom
self.angle = 0
def display(self):
sweep = 2 * PI / len(self.pat)
s = self.angle
e = s + sweep
with pushStyle():
stroke(self.c)
strokeWeight(self.t)
fill(0,0,0,0)
for p in self.pat:
if p:
arc(0,0, self.cR,self.cR, s,e)
s = e
e += sweep
def step(self):
self.angle += self.r
self.cR += self.z
def setup():
if saveFrames:
size(1920, 1080, "processing.core.PGraphicsRetina2D")
else:
size(1920, 1080)
background(0)
if waitForClick:
noLoop()
def randomPattern():
pattern = [1] + [int(random(2)) for i in range(random(10)+1)]
#pattern = [1, 0] * int(random(10) + 1) #+ [0] * int(random(5))
#pattern = [1]
print(pattern)
return pattern
def draw():
translate(width/2, height/2) # center origin
rotate(-PI/2) # 0 radians is up
background(0)
for ring in rings:
ring.display()
ring.step()
maxR = dist(0,0, width,height)
r = maxR / n
radius = r / 2
v = 128 + int(random(128))
c = color(255, 255, 255, v)
if random(10) < 1:
h = (h + random(30)) % 360
rings.append(Ring(r, random(1) * r, randomPattern(), c, random(PI/12) - PI/24, (1 + random(2)) * r / rate))
radius += r
rings = [r for r in rings if r.cR < maxR]
if saveFrames:
saveFrame("frames/####.png")
print("Frame {} Rings {}".format(frameCount, len(rings)))
if frameLimit and frameCount >= frameLimit:
noLoop()
print("{}\n".format(frameRate));
def mouseClicked():
if waitForClick:
redraw()
|
11551539
|
import shodan
import requests
import time
import sys
from colorama import Fore, Back, Style
def vulnscan(host, api_key):
try:
print(f'[{Fore.YELLOW}?{Style.RESET_ALL}] Vulnerability scanning on {Fore.YELLOW}{host}{Style.RESET_ALL}...')
target = host
api = shodan.Shodan(api_key)
dnsResolve = f'https://api.shodan.io/dns/resolve?hostnames={target}&key={api_key}'
# Resolve target domain to an IP
resolved = requests.get(dnsResolve)
hostIP = resolved.json()[target]
# Do a Shodan search on that IP
host = api.host(hostIP)
print(f'[{Fore.GREEN}+{Style.RESET_ALL}] Target: {target}') #\n
print(f"[{Fore.GREEN}+{Style.RESET_ALL}] IP: {host['ip_str']}")
print(f"[{Fore.GREEN}+{Style.RESET_ALL}] Organization: {host.get('org', 'n/a')}")
print(f"[{Fore.GREEN}+{Style.RESET_ALL}] Operating System: {host.get('os', 'n/a')}\n")
# Print all banners
for item in host['data']:
print(f"[{Fore.GREEN}+{Style.RESET_ALL}] Port: {Fore.GREEN}{item['port']}{Style.RESET_ALL}")
print(f"[{Fore.GREEN}+{Style.RESET_ALL}] Banner: {Fore.GREEN}{item['data']}{Style.RESET_ALL}")
# Print vulnerability information
if 'vulns' in host and len(host['vulns']) > 0:
print(f"[{Fore.GREEN}+{Style.RESET_ALL}] {len(host['vulns'])} vulnerability(ies) found on {Fore.YELLOW}{target}{Style.RESET_ALL}")
for item in host['vulns']:
CVE = item.replace('!','')
print(f"\n[{Fore.GREEN}+{Style.RESET_ALL}] Vulnerability: {Fore.GREEN} {item} {Style.RESET_ALL}")
# Wait a second
time.sleep(1)
exploits = api.exploits.search(CVE)
for item in exploits['matches']:
print(item.get('description'))
#print('\n')
else:
print(f"[{Fore.GREEN}+{Style.RESET_ALL}] No vulnerabilities found on {Fore.YELLOW}{target}{Style.RESET_ALL}.\n{Fore.YELLOW}Disclaimer{Style.RESET_ALL}: This doesn't mean that the host isn't vulnerable.\n")
except KeyboardInterrupt:
sys.exit('^C\n')
except Exception as e:
print(f'[{Fore.RED}!{Style.RESET_ALL}] Error: {Fore.RED}{e}{Style.RESET_ALL}\n')
|
11551576
|
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import x2paddle.torch2paddle as init
import pickle
import numpy as np
import os
import time
from constants import *
"""
step4: generate the POI id embedding files for each city. These embeddings are fixed during training.
TODO: according to the paper, you may pre-train the POI id embeddings by NeuMF model on the data
collected before the dataset time span. Here we use random embeddings for simplicity.
"""
class NullEmbedder(nn.Layer):
def __init__(self, poi_size, embed_dim):
super(NullEmbedder, self).__init__()
self.poi_embedding = nn.Embedding(poi_size, embed_dim, padding_idx=0, weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform()))
# init.xavier_uniform_(self.poi_embedding.weight)
def forward(self, save_file):
w = self.poi_embedding.weight.detach().numpy()
np.save(save_file, w)
def get_null_id_emb(city):
EMBED_DIM = 50
poiid_pkl_file = '{}{}/poiid_to_id.pkl'.format(pkl_path, city)
poiid_pkl = pickle.load(open(poiid_pkl_file, 'rb'))
poi_size = len(poiid_pkl)
null_embedder = NullEmbedder(poi_size, EMBED_DIM)
save_file = '{}{}_poiid_embed.npy'.format(save_path, city)
null_embedder.forward(save_file)
if __name__ == '__main__':
pkl_path = root_path + 'pkls/'
save_path = root_path + 'id_embs/'
if not os.path.exists(save_path):
os.mkdir(save_path)
cities = get_cities('base') + get_cities('valid') + get_cities('target')
for city in cities:
get_null_id_emb(city)
|
11551594
|
from elasticsearch_dsl import Integer, DocType, Text, Boolean
from .index_aliases import tracker_index_alias
from search.analyzers import autocomplete, autocomplete_search, text_analyzer, text_search_analyzer
@tracker_index_alias.doc_type
class AttachmentFileDocType(DocType):
id = Integer()
crid = Text(analyzer=autocomplete, search_analyzer=autocomplete_search)
title = Text(analyzer=autocomplete, search_analyzer=autocomplete_search)
text_content = Text(analyzer=text_analyzer, search_analyzer=text_search_analyzer)
show = Boolean()
class Meta:
doc_type = 'attachment_file_doc_type'
|
11551602
|
import sys
sys.path.insert(0, "Core")
sys.path.insert(0, "unit_Tests")
from constants import *
import pwmServo as servo
import kinematics as ik
import time
import math
from helpers import *
import JGpio as gpio
import IMU as imu
class Quadruped:
def __init__(self,servoIndexes=None):
servo.init() #Open Port and Set Baud Rate
self.Legs = [Leg(),Leg(),Leg(),Leg()]
if servoIndexes!=None:
self.setID(servoIndexes)
# Setup the GAIT Pattern Variables
self.setDefaults()
def setDefaults(self):
self.creep = Creep()
self.trot = Trot()
def setID(self,ID):
for i in range(0,4):
self.Legs[i].setIDs(ID[i*3:i*3+3])
def setParams(self,dirParams,fixedPtsParams):
for i in range(0,4):
self.Legs[i].setParams(dirParams[i*3:i*3+3],fixedPtsParams[i*3:i*3+3])
self.Legs[A].setKit(kit2)
self.Legs[B].setKit(kit2)
self.Legs[C].setKit(kit1)
self.Legs[D].setKit(kit1)
def stanceBackward(self,totalShift,diffFactor=None):
#Push the Legs Backwards Together i.e Push THe Bot Forward (Creep Gait)
# If Differential_Factor -> 1 then there is no push from either side of the Leg
# If Differential_Factor -> 0 then there is slow Differential Turning from either Side of the Leg
# Calculate the Destination Y Position
# For Non Turning Part (differential Factor == 0)
if diffFactor==None:
self.creep.currentYa = self.creep.currentYa - totalShift
self.creep.currentYb = self.creep.currentYb - totalShift
self.creep.currentYc = self.creep.currentYc - totalShift
self.creep.currentYd = self.creep.currentYd - totalShift
else: # For Differential Turning ( -1 <-> +1)
if diffFactor>=0:
#Turn Left
self.creep.currentYa = self.creep.currentYa - totalShift
self.creep.currentYb = self.creep.currentYb - totalShift*(1-abs(diffFactor))
self.creep.currentYc = self.creep.currentYc - totalShift*(1-abs(diffFactor))
self.creep.currentYd = self.creep.currentYd - totalShift
else:
#Turn Right
self.creep.currentYa = self.creep.currentYa - totalShift*(1-abs(diffFactor))
self.creep.currentYb = self.creep.currentYb - totalShift
self.creep.currentYc = self.creep.currentYc - totalShift
self.creep.currentYd = self.creep.currentYd - totalShift*(1-abs(diffFactor))
# Write the Input Position to the Legs
self.Legs[A].setLegPos(self.creep.DEFAULT_X ,self.creep.currentYa , self.Legs[A].z)
self.Legs[B].setLegPos(self.creep.DEFAULT_X ,self.creep.currentYb , self.Legs[B].z)
self.Legs[C].setLegPos(self.creep.DEFAULT_X ,self.creep.currentYc , self.Legs[C].z)
self.Legs[D].setLegPos(self.creep.DEFAULT_X ,self.creep.currentYd ,self.Legs[D].z)
def go2CreepStartPosition(self):
# Starting Position for Creep Position
self.Legs[A].setLegPos(self.creep.DEFAULT_X, self.creep.Y_MIN,self.creep.DEFAULT_Z)
self.Legs[B].setLegPos(self.creep.DEFAULT_X,-self.creep.Y_MIN,self.creep.DEFAULT_Z)
self.Legs[C].setLegPos(self.creep.DEFAULT_X,-self.creep.Y_MEAN,self.creep.DEFAULT_Z)
self.Legs[D].setLegPos(self.creep.DEFAULT_X,self.creep.Y_MEAN,self.creep.DEFAULT_Z)
self.creep.currentYa = self.creep.Y_MIN
self.creep.currentYb = -self.creep.Y_MIN
self.creep.currentYc = -self.creep.Y_MEAN
self.creep.currentYd = self.creep.Y_MEAN
self.Legs[A].z=self.creep.DEFAULT_Z
self.Legs[B].z=self.creep.DEFAULT_Z
self.Legs[C].z=self.creep.DEFAULT_Z
self.Legs[D].z=self.creep.DEFAULT_Z
def Trot(self,diffFactor=None,direction=1):
if diffFactor!=None:
if diffFactor>=0: #Turn Right
left_Y_MAX = self.trot.Y_MAX
left_Y_MIN = self.trot.Y_MIN
right_Y_MAX = self.trot.Y_MAX*(1.0-diffFactor)/2
right_Y_MIN = self.trot.Y_MIN*(1.0-diffFactor)/2
else: #Turn Left
left_Y_MAX = self.trot.Y_MAX*(1.0+diffFactor)/2
left_Y_MIN = self.trot.Y_MIN*(1.0+diffFactor)/2
right_Y_MAX = self.trot.Y_MAX
right_Y_MIN = self.trot.Y_MIN
else:
left_Y_MAX = self.trot.Y_MAX
left_Y_MIN = self.trot.Y_MIN
right_Y_MAX = self.trot.Y_MAX
right_Y_MIN = self.trot.Y_MIN
if direction <0: # Change the Direction of Motion
# Swap the right_Y and Left Y
right_Y_MAX,right_Y_MIN = right_Y_MIN,right_Y_MAX
left_Y_MAX,left_Y_MIN = left_Y_MIN,left_Y_MAX
# Step 1 - Step Leg B And D Forward and PushBack Leg A and C Back
# 1.Pickup the Leg
self.Legs[B].setLegPos(self.trot.DEFAULT_X,right_Y_MIN,self.trot.Z_STEP_UP_HEIGHT)
self.Legs[D].setLegPos(self.trot.DEFAULT_X,-left_Y_MAX,self.trot.Z_STEP_UP_HEIGHT)
time.sleep(self.trot.trotDelay)
# 1.Rotate Top
self.Legs[B].setLegPos(self.trot.DEFAULT_X,right_Y_MAX,self.trot.Z_STEP_UP_HEIGHT)
self.Legs[D].setLegPos(self.trot.DEFAULT_X,-left_Y_MIN,self.trot.Z_STEP_UP_HEIGHT)
self.Legs[A].setLegPos(self.trot.DEFAULT_X,left_Y_MIN,self.trot.DEFAULT_Z)
self.Legs[C].setLegPos(self.trot.DEFAULT_X,-right_Y_MAX,self.trot.DEFAULT_Z)
time.sleep(self.trot.trotDelay)
# 1.Drop Down the Leg
self.Legs[B].setLegPos(self.trot.DEFAULT_X,right_Y_MAX,self.trot.DEFAULT_Z)
self.Legs[D].setLegPos(self.trot.DEFAULT_X,-left_Y_MIN,self.trot.DEFAULT_Z)
time.sleep(self.trot.trotDelay)
# Step 2 - Step Leg A And C Forward and PushBack Leg B and D Back
# 2.Pickup the Leg
self.Legs[A].setLegPos(self.trot.DEFAULT_X,left_Y_MIN,self.trot.Z_STEP_UP_HEIGHT)
self.Legs[C].setLegPos(self.trot.DEFAULT_X,-right_Y_MAX,self.trot.Z_STEP_UP_HEIGHT)
time.sleep(self.trot.trotDelay)
# 2.Rotate Top
self.Legs[A].setLegPos(self.trot.DEFAULT_X,left_Y_MAX,self.trot.Z_STEP_UP_HEIGHT)
self.Legs[C].setLegPos(self.trot.DEFAULT_X,-right_Y_MIN,self.trot.Z_STEP_UP_HEIGHT)
self.Legs[B].setLegPos(self.trot.DEFAULT_X,right_Y_MIN,self.trot.DEFAULT_Z)
self.Legs[D].setLegPos(self.trot.DEFAULT_X,-left_Y_MAX,self.trot.DEFAULT_Z)
time.sleep(self.trot.trotDelay)
# 2.Drop Down the Leg
self.Legs[A].setLegPos(self.trot.DEFAULT_X,left_Y_MAX,self.trot.DEFAULT_Z)
self.Legs[C].setLegPos(self.trot.DEFAULT_X,-right_Y_MIN,self.trot.DEFAULT_Z)
time.sleep(self.trot.trotDelay)
def Creep(self,diffFactor=None):
if diffFactor==None:
left_Y_MAX=self.creep.Y_MAX
left_Y_MIN=self.creep.Y_MIN
right_Y_MAX=self.creep.Y_MAX
right_Y_MIN=self.creep.Y_MIN
else:
if diffFactor>=0: #Turn Right
left_Y_MAX=self.creep.Y_MAX
left_Y_MIN=self.creep.Y_MIN
right_Y_MAX=self.creep.Y_MAX*(1.0-diffFactor)/2
right_Y_MIN=self.creep.Y_MIN*(1.0-diffFactor)/2
else: #Turn Left
left_Y_MAX=self.creep.Y_MAX*(1.0+diffFactor)/2
left_Y_MIN=self.creep.Y_MIN*(1.0+diffFactor)/2
right_Y_MAX=self.creep.Y_MAX
right_Y_MIN=self.creep.Y_MIN
# Step 2.1 - Step Leg A Forward
# input("Step A Forward")
self.Legs[A].StepInY(left_Y_MIN,left_Y_MAX)
self.creep.currentYa = left_Y_MAX
time.sleep(0.1)
# input("Press Any Key to PushBack1")
# Step 1.2 - Push Forward
self.stanceBackward(self.creep.totalShiftSize,diffFactor)
time.sleep(0.1)
# Step 3 - Step Leg C Forward
# input("Step C Forward")
self.Legs[C].StepInY(-right_Y_MAX,-right_Y_MIN)
self.creep.currentYc = -right_Y_MIN
time.sleep(0.1)
# Step 2 - Step Leg D Forward
# input("Step D Forward")
self.Legs[D].StepInY(left_Y_MIN,left_Y_MAX)
self.creep.currentYd = left_Y_MAX
time.sleep(0.1)
# input("Press Any Key to PushBack2")
# Step 2.2 - Push Forward
self.stanceBackward(self.creep.totalShiftSize,diffFactor)
# Step 1 - Step Leg B Forward
# input("Step B Forward")
self.Legs[B].StepInY(-right_Y_MAX,-right_Y_MIN)
self.creep.currentYb = -right_Y_MIN
time.sleep(0.1)
def Creep_w_bump(self,diffFactor=None):
if diffFactor==None:
left_Y_MAX=self.creep.Y_MAX
left_Y_MIN=self.creep.Y_MIN
right_Y_MAX=self.creep.Y_MAX
right_Y_MIN=self.creep.Y_MIN
else:
if diffFactor>=0: #Turn Right
left_Y_MAX=self.creep.Y_MAX
left_Y_MIN=self.creep.Y_MIN
right_Y_MAX=self.creep.Y_MAX*(1.0-diffFactor)/2
right_Y_MIN=self.creep.Y_MIN*(1.0-diffFactor)/2
else: #Turn Left
left_Y_MAX=self.creep.Y_MAX*(1.0+diffFactor)/2
left_Y_MIN=self.creep.Y_MIN*(1.0+diffFactor)/2
right_Y_MAX=self.creep.Y_MAX
right_Y_MIN=self.creep.Y_MIN
# Step 2.1 - Step Leg A Forward
# input("Step A Forward")
self.Legs[A].StepInY_feedback(left_Y_MIN,left_Y_MAX)
self.creep.currentYa = left_Y_MAX
time.sleep(0.1)
# input("Press Any Key to PushBack1")
# Step 1.2 - Push Forward
self.stanceBackward(self.creep.totalShiftSize,diffFactor)
time.sleep(0.1)
# Step 3 - Step Leg C Forward
# input("Step C Forward")
self.Legs[C].StepInY_feedback(-right_Y_MAX,-right_Y_MIN)
self.creep.currentYc = -right_Y_MIN
time.sleep(0.1)
# Step 2 - Step Leg D Forward
# input("Step D Forward")
self.Legs[D].StepInY_feedback(left_Y_MIN,left_Y_MAX)
self.creep.currentYd = left_Y_MAX
time.sleep(0.1)
# input("Press Any Key to PushBack2")
# Step 2.2 - Push Forward
self.stanceBackward(self.creep.totalShiftSize,diffFactor)
# Step 1 - Step Leg B Forward
# input("Step B Forward")
self.Legs[B].StepInY_feedback(-right_Y_MAX,-right_Y_MIN)
self.creep.currentYb = -right_Y_MIN
time.sleep(0.1)
def walk(self,Mode,diffFactor=None):
self.go2CreepStartPosition()
# input("Press Enter")
while True:
if Mode == CREEP:
self.Creep(diffFactor)
elif Mode == TROT:
self.Trot(diffFactor)
elif Mode == TROT_BACK:
self.Trot(diffFactor,direction=-1)
elif Mode == CREEP_DYN:
self.Creep_w_bump(diffFactor)
else:
print("Walking Mode is Not Specified")
quit()
def walkOnce(self,Mode,command=None,diffFactor=None):
if Mode == CREEP:
self.Creep(diffFactor)
elif Mode == TROT:
self.Trot(diffFactor)
elif Mode == TROT_BACK:
self.Trot(diffFactor,direction=-1)
elif Mode == CREEP_DYN:
self.Creep_w_bump(diffFactor)
else:
print("Walking Mode is Not Specified")
class Leg:
def __init__(self,ID = None):
self.joints = [servo.pwmServo(),servo.pwmServo(),servo.pwmServo()]
if (ID != None):
self.setIDs(ID)
self.Z_STEP_UP_HEIGHT = -13
self.STEP_UP_DELAY = 0.25
self.BUMP_STATUS=False
self.bumpPin=None
def setIDs(self,ID):
self.joints[TOP].setIndex(ID[TOP])
self.joints[MIDDLE].setIndex(ID[MIDDLE])
self.joints[BOTTOM].setIndex(ID[BOTTOM])
def setParams(self,dirParams,fixedPointParams):
self.joints[TOP].setParams(dirParams[TOP],fixedPointParams[TOP])
self.joints[MIDDLE].setParams(dirParams[MIDDLE],fixedPointParams[MIDDLE])
self.joints[BOTTOM].setParams(dirParams[BOTTOM],fixedPointParams[BOTTOM])
self.doOnce = True
def setKit(self,kit):
for x in self.joints:
x.setKit(kit)
x.setPWM(500,2500)
def setBumpPin(self,pin):
self.bPin = pin
gpio.setInput(pin)
def setLegPos(self,x,y,z):
t1,t2,t3,isPossible = ik.getInverse(x,y,z)
if isPossible:
# Store the Current Value of X,Y,Z
if self.doOnce:
self.doOnce=False
self.x = x
self.y = y
self.z = z
self.joints[TOP].writeAngle(t1)
self.joints[MIDDLE].writeAngle(t2)
self.joints[BOTTOM].writeAngle(t3)
else:
print("Inverse Not Possible")
def updateBumpStatus(self):
pass
def StepInY(self,from_y,to_y):
# input("Press Any Key:Leg Pickup")
# Pickup the Leg
self.setLegPos(self.x,from_y,self.Z_STEP_UP_HEIGHT)
time.sleep(self.STEP_UP_DELAY)
# input("Press Any Key:Leg Rotate")
# Rotate Top
self.setLegPos(self.x,to_y,self.Z_STEP_UP_HEIGHT)
time.sleep(self.STEP_UP_DELAY)
self.y = to_y
# input("Press Any Key:Leg Drop")
# Drop Down the Leg
self.setLegPos(self.x,to_y,self.z)
time.sleep(self.STEP_UP_DELAY)
def StepInY_feedback(self,from_y,to_y):
# input("Press Any Key:Leg Pickup")
# Pickup the Leg
self.setLegPos(self.x,from_y,self.Z_STEP_UP_HEIGHT)
time.sleep(self.STEP_UP_DELAY)
# input("Press Any Key:Leg Rotate")
# Rotate Top
self.setLegPos(self.x,to_y,self.Z_STEP_UP_HEIGHT)
time.sleep(self.STEP_UP_DELAY)
self.y = to_y
self.setLegPos(self.x,to_y,self.Z_STEP_UP_HEIGHT-2)
time.sleep(0.1)
initialYPR =imu.read()
ERROR_THRESH = 1.0
Kp = 0.1
# Check Drop Down Leg
currentZ = self.Z_STEP_UP_HEIGHT-2
while True:
currentYPR = imu.read()
error = ((initialYPR[1]-currentYPR[1])**2 + (initialYPR[2]-currentYPR[2])**2)**0.5
print("Error:",error," , currentZ:",currentZ)
# if(error>ERROR_THRESH):
# break
if(error>ERROR_THRESH):
break
print("Looking for Base Unstability")
self.setLegPos(self.x,to_y,currentZ)
# input("Press Any Key to Drop Down")
currentZ-=dropDownIncrements
time.sleep(dropDownDelay)
Kp=0.3
ERROR_THRESH=0.4
while True:
currentYPR = imu.read()
error = ((initialYPR[1]-currentYPR[1])**2 + (initialYPR[2]-currentYPR[2])**2)**0.5
correction = Kp*error
print("Error:",error," , currentZ:",currentZ," , correction:",correction)
if(error<ERROR_THRESH):
break
# print("Stabilizing Base")
self.setLegPos(self.x,to_y,currentZ)
# input("Stabilizing Base ,Press Any key to go Up")
currentZ+=correction
time.sleep(dropDownDelay)
self.z=currentZ
# input("Press Any Key:Leg Drop")
# Drop Down the Leg
self.setLegPos(self.x,to_y,self.z)
input("DropDown Complete")
time.sleep(self.STEP_UP_DELAY)
if __name__=="__main__":
imu.start()
venom = Quadruped(servoId)
venom.setParams(dirVector,FixedPoints)
venom.go2CreepStartPosition()
input("Press Enter")
venom.walk(CREEP_DYN)
|
11551620
|
import gzip
import json
import os
import shutil
import time
from pathlib import Path
import dns.resolver
import geoip2.database
import requests
from folium import Map, Marker, Popup
from core.utils import DOMAIN, Helpers, logger
helpers = Helpers()
# Working program directories
prog_root = Path(os.path.dirname(os.path.dirname(__file__)))
geomap_root = prog_root / "geomap"
# Create the geomap directory
if not os.path.exists(geomap_root):
os.mkdir(geomap_root)
# Working files
gl_zipped = geomap_root / "GeoLite2-City.mmdb.gz"
gl_file = geomap_root / "GeoLite2-City.mmdb"
ip_map_file = os.path.join(geomap_root, "ip_map.html")
url = "https://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz"
def geo_query_map(QRY):
# Check if Geolite file exists
geolite_check()
# Used to resolve domains to ip address
resolver = dns.resolver.Resolver()
resolver.nameservers = ["1.1.1.1", "8.8.8.8", "8.8.4.4"]
if DOMAIN.findall(QRY):
try:
response = resolver.query(QRY, "A")
QRY = response.rrset[-1]
map_maxmind(str(QRY))
except dns.resolver.NoAnswer as err:
logger.error(f"[error] {err}")
else:
map_maxmind(QRY)
# ---[ GeoLite File Check/Download ]---
def geolite_check():
if os.path.exists(gl_zipped):
print(f"{gl_zipped} exists, unzipping...")
with gzip.open(gl_zipped, "rb") as f_in:
with open(gl_file, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(gl_zipped)
if not os.path.exists(gl_file):
print("-" * 80)
logger.warning(f"[-] {gl_file} does not exist.")
geoip_download = input("\n[+] Would you like to download the GeoLite2-City file (yes/no)? ")
if geoip_download.lower() == "yes":
os.chdir(geomap_root)
helpers.download_file(url)
with gzip.open(gl_zipped, "rb") as f_in, open(gl_file, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(gl_zipped)
# ---[ Geolocate and Map IP Address ]---
# Ref: https://github.com/maxmind/GeoIP2-python
def map_maxmind(QRY):
try:
geo_reader = geoip2.database.Reader(gl_file)
ip_map = Map([40, -5], tiles="OpenStreetMap", zoom_start=3)
response = geo_reader.city(QRY)
if response.location:
lat = response.location.latitude
lon = response.location.longitude
popup = Popup(QRY)
Marker([lat, lon], popup=popup).add_to(ip_map)
ip_map.save(ip_map_file)
except geoip2.errors.AddressNotFoundError:
logger.warning(f"[-] Address {QRY} is not in the geoip database.")
except FileNotFoundError:
logger.info("\n[*] Please download the GeoLite2-City database file: ")
print(" --> https://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz")
time.sleep(2)
def map_free_geo(QRY):
ip_map = Map([40, -5], tiles="OpenStreetMap", zoom_start=3)
freegeoip = f"https://freegeoip.live/json/{QRY}"
try:
req = requests.get(freegeoip)
req.raise_for_status()
except ConnectionError as err:
logger.warning(f"[error] {err}\n")
else:
if req.status_code == 200:
data = json.loads(req.content.decode("utf-8"))
lat = data["latitude"]
lon = data["longitude"]
Marker([lat, lon], popup=QRY).add_to(ip_map)
ip_map.save(ip_map_file)
def multi_map(input_file):
os.chdir(geomap_root)
# Check if Geolite file exists
geolite_check()
file_path = os.path.abspath(os.pardir)
input_file = f"{file_path}/{input_file}"
with open(input_file) as f:
line = [line.strip() for line in f.readlines()]
ip_map = Map([40, -5], tiles="OpenStreetMap", zoom_start=3)
try:
geo_reader = geoip2.database.Reader("GeoLite2-City.mmdb")
for addr in line:
response = geo_reader.city(addr)
if response.location:
logger.success(f"[+] Mapping {addr}")
lat = response.location.latitude
lon = response.location.longitude
Marker([lat, lon], popup=addr).add_to(ip_map)
ip_map.save("multi_map.html")
except ValueError as err:
print(f"[error] {err}")
except geoip2.errors.AddressNotFoundError:
logger.warning("[-] Address is not in the geoip database.")
except FileNotFoundError:
geolite_check()
|
11551635
|
from datetime import timedelta
from backend.common.decorators import cached_public
from backend.web.profiled_render import render_template
@cached_public(ttl=timedelta(weeks=1))
def apidocs_trusted_v1() -> str:
template_values = {
"title": "Trusted APIv1",
"swagger_url": "/swagger/api_trusted_v1.json",
}
return render_template("apidocs_swagger.html", template_values)
@cached_public(ttl=timedelta(weeks=1))
def apidocs_v3() -> str:
template_values = {
"title": "APIv3",
"swagger_url": "/swagger/api_v3.json",
}
return render_template("apidocs_swagger.html", template_values)
|
11551641
|
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
# Load all of the BBS data into a DataFrame
bbs = pd.merge(
pd.read_csv("data/cleaned/bbs.csv"),
pd.read_csv("data/cleaned/clean_routes.csv"),
how="left",
on="route_id",
)
# Create dictionaries that map categories to integers
categorical_covariates = ["english", "genus", "family", "order", "L1_KEY"]
cat_ix = {}
for cat in categorical_covariates:
cat_dict = {}
category_series = bbs[cat].unique()
for i, item in enumerate(category_series):
cat_dict[item] = i
cat_ix[cat] = cat_dict
class BBSData(Dataset):
"""North American Breeding Bird Survey data."""
def __init__(self, df):
"""
Args:
df (pandas.DataFrame): data frame with bbs data.
"""
self.df = df
self.cat_ix = cat_ix
self.bbs_y = self.get_cont("^[0-9]{4}$", df)
self.x_p = torch.stack(
(
self.get_cont("^StartTemp_", df),
self.get_cont("^StartWind_", df),
self.get_cont("^EndTemp_", df),
self.get_cont("^EndWind_", df),
self.get_cont("^StartSky_", df),
self.get_cont("^EndSky_", df),
self.get_cont("^duration_", df),
),
-1,
)
self.bbs_species = self.get_cat("english", df)
self.bbs_genus = self.get_cat("genus", df)
self.bbs_family = self.get_cat("family", df)
self.bbs_order = self.get_cat("order", df)
self.bbs_l1 = self.get_cat("L1_KEY", df)
# x is a covariate vector, e.g., PC1, PC2, ...
self.bbs_x = torch.tensor(
df.filter(regex="^PC|^c_", axis=1).values, dtype=torch.float64
)
def __len__(self):
return len(self.df.index)
def get_cont(self, regex, df):
""" Extract continuous valued data from columns matching regex. """
res = torch.tensor(
df.filter(regex=regex, axis=1).values, dtype=torch.float64
)
return res
def get_cat(self, name, df):
""" Find an integer index for a particular category. """
res = [self.cat_ix[name][i[0]] for i in self.df[[name]].values]
res = np.array(res, dtype=np.long)
return torch.tensor(res, dtype=torch.long)
def __getitem__(self, idx):
""" Get an item from the data.
This is one training example: a route X species time series with feats
"""
y = self.bbs_y[idx, :].squeeze(0)
x_p = self.x_p[idx, :, :].squeeze(0)
x = self.bbs_x[idx, :].squeeze(0)
species = self.bbs_species[idx]
genus = self.bbs_genus[idx]
family = self.bbs_family[idx]
order = self.bbs_order[idx]
l1 = self.bbs_l1[idx]
return species, genus, family, order, l1, x, x_p, y
|
11551676
|
from pathlib import Path
from urllib.parse import urlparse
import click
import requests
import database_connection # noqa: F401
from matrix_connection import get_download_url
from schema import Message
def download_stem(message, prefer_thumbnails):
image_url = (message.thumbnail_url if prefer_thumbnails else None) \
or message.image_url
return urlparse(image_url).path.lstrip('/')
def run_downloads(messages, download_dir, prefer_thumbnails):
"""Run downloads
:param messages: List of messages
:param download_dir: Location where the images shall be stored
:param prefer_thumbnails: Whether to prefer thumbnails than full images.
"""
s = requests.Session()
for msg in messages:
image_url = (msg.thumbnail_url if prefer_thumbnails else None) or msg.image_url
try:
download_url = get_download_url(image_url)
try:
res = s.head(download_url)
res.raise_for_status()
mtype, subtype = res.headers['content-type'].split('/', 2)
if mtype != 'image':
print(f"Skipping {image_url}: {res.headers['content-type']}")
continue
except requests.exceptions.RequestException as e:
print("{} Skipping...".format(e))
continue
try:
res = s.get(download_url)
res.raise_for_status()
filename = (download_dir / download_stem(msg, prefer_thumbnails)
).with_suffix('.' + subtype)
print('Downloading', image_url, '->', filename)
with open(filename, 'wb') as fp:
fp.write(res.content)
except requests.exceptions.RequestException as e:
print("{} Skipping...".format(e))
except AssertionError:
print('Assertion Error in get_download_url("{}"). Skipping...'.format(image_url))
@click.command()
@click.option('--thumbnails/--no-thumbnails', default=True)
@click.argument('output', required=False)
def download_images(thumbnails, output):
"""Download thumbnails."""
noun = 'thumbnails' if thumbnails else 'images'
download_dir = Path(output or noun)
messages = [msg for msg in Message.objects
if msg.content.get('msgtype') == 'm.image']
download_dir.mkdir(exist_ok=True)
current_stems = {p.stem for p in download_dir.glob('*')}
new_messages = [msg for msg in messages
if download_stem(msg, thumbnails)
not in current_stems]
skip_count = len(messages) - len(new_messages)
if skip_count:
print(f"Skipping {skip_count} already-downloaded {noun}")
if new_messages:
print(f"Downloading {len(new_messages)} new {noun}...")
else:
print("Nothing to do")
run_downloads(new_messages, download_dir, prefer_thumbnails=thumbnails)
if __name__ == '__main__':
download_images()
|
11551691
|
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
app = FastAPI()
async def fake_video_streamer():
for i in range(10):
yield b"some fake video bytes"
@app.get("/")
async def main():
return StreamingResponse(fake_video_streamer())
|
11551734
|
from typing import Mapping
from colorama import Fore, Style
from typing_extensions import Literal
SupportedColorName = Literal[
"RED", "YELLOW", "GREEN", "CYAN", "GRAY", "MAGENTA", "RESET", "BLUE"
]
class LogColorProvider:
def __init__(self):
self.is_ci_mode = False
def get_color(
self,
color_name: SupportedColorName,
) -> str:
if self.is_ci_mode:
return ""
mapping: Mapping[SupportedColorName, str] = {
"RED": Fore.RED,
"YELLOW": Fore.YELLOW,
"GREEN": Fore.GREEN,
"CYAN": Fore.CYAN,
"GRAY": Fore.LIGHTBLACK_EX,
"MAGENTA": Fore.LIGHTMAGENTA_EX,
"RESET": Fore.RESET,
}
if color_name in mapping:
return mapping[color_name]
return ""
def colorize(self, color_name: SupportedColorName, content: str):
return f"{self.get_color(color_name)}{content}{self.get_color('RESET')}"
# pylint: disable=no-self-use
def bold(self, content: str):
return f"{Style.BRIGHT}{content}{Style.RESET_ALL}"
log_color_provider = LogColorProvider()
|
11551745
|
import sys, os
curmodulepath = os.path.dirname(os.path.abspath(__file__))
if hasattr(sys, 'pypy_version_info'):
sys.path.insert(0, os.path.abspath(os.path.join(curmodulepath, '..', 'pypylib')))
sys.path.insert(0, os.path.abspath(os.path.join(curmodulepath, '..')))
sys.path.insert(0, os.path.abspath(os.path.join(curmodulepath, '..', 'lib')))
|
11551753
|
import os, re
import pyric.pyw as pyw
ROOT = os.geteuid() == 0
MWINTERFACES = [x for x in pyw.winterfaces() if pyw.modeget(x) == "monitor"]
INTERFACES = [x for x in pyw.interfaces()]
BAD_MAC = [
"ff:ff:ff:ff:ff:ff",
"00:00:00:00:00:00", # Multicast
"01:80:c2:00:00:00", # Multicast
"01:00:5e", # Multicast
"01:80:c2", # Multicast
"33:33", # Multicast
]
MACFILTER = "[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$"
WPS_QUERY = {
b"\x00\x10\x18": "Broadcom", # Broadcom */
b"\x00\x03\x7f": "AtherosC", # Atheros Communications */
b"\x00\x0c\x43": "RalinkTe", # Ralink Technology, Corp. */
b"\x00\x17\xa5": "RalinkTe", # Ralink Technology Corp */
b"\x00\xe0\x4c": "RealtekS", # Realtek Semiconductor Corp. */
b"\x00\x0a\x00": "Mediatek", # Mediatek Corp. */
b"\x00\x0c\xe7": "Mediatek", # Mediatek MediaTek Inc. */
b"\x00\x1c\x51": "CelenoCo", # Celeno Communications */
b"\x00\x50\x43": "MarvellS", # Marvell Semiconductor, Inc. */
b"\x00\x26\x86": "Quantenn", # Quantenna */
b"\x00\x09\x86": "LantiqML", # Lantiq/MetaLink */
b"\x00\x50\xf2": "Microsof"
}
# This is for the future ;)
WPS_ATTRIBUTES = {
0x104A : {'name' : 'Version ', 'type' : 'hex'},
0x1044 : {'name' : 'WPS State ', 'type' : 'hex'},
0x1057 : {'name' : 'AP Setup Locked ', 'type' : 'hex'},
0x1041 : {'name' : 'Selected Registrar ', 'type' : 'hex'},
0x1012 : {'name' : 'Device Password ID ', 'type' : 'hex'},
0x1053 : {'name' : 'Selected Registrar Config Methods', 'type' : 'hex'},
0x103B : {'name' : 'Response Type ', 'type' : 'hex'},
0x1047 : {'name' : 'UUID-E ', 'type' : 'hex'},
0x1021 : {'name' : 'Manufacturer ', 'type' : 'str'},
0x1023 : {'name' : 'Model Name ', 'type' : 'str'},
0x1024 : {'name' : 'Model Number ', 'type' : 'str'},
0x1042 : {'name' : 'Serial Number ', 'type' : 'str'},
0x1054 : {'name' : 'Primary Device Type ', 'type' : 'hex'},
0x1011 : {'name' : 'Device Name ', 'type' : 'str'},
0x1008 : {'name' : 'Config Methods ', 'type' : 'hex'},
0x103C : {'name' : 'RF Bands ', 'type' : 'hex'},
0x1045 : {'name' : 'SSID ', 'type' : 'str'},
0x102D : {'name' : 'OS Version ', 'type' : 'str'}
}
|
11551771
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from imlib.basic import *
from imlib.dtype import *
from imlib.encode import *
from imlib.transform import *
|
11551775
|
import os
import tempfile
import unittest
from io import BytesIO
import odil
class TestWriter(unittest.TestCase):
def test_constructor_1(self):
stream = odil.iostream(BytesIO())
writer = odil.Writer(stream, odil.ByteOrdering.LittleEndian, False)
self.assertEqual(writer.byte_ordering, odil.ByteOrdering.LittleEndian)
self.assertFalse(writer.explicit_vr)
self.assertEqual(
writer.item_encoding, odil.Writer.ItemEncoding.ExplicitLength)
self.assertFalse(writer.use_group_length)
def test_constructor_2(self):
stream = odil.iostream(BytesIO())
writer = odil.Writer(stream, odil.registry.ExplicitVRBigEndian)
self.assertEqual(writer.byte_ordering, odil.ByteOrdering.BigEndian)
self.assertTrue(writer.explicit_vr)
self.assertEqual(
writer.item_encoding, odil.Writer.ItemEncoding.ExplicitLength)
self.assertFalse(writer.use_group_length)
def test_write_data_set(self):
data_set = odil.DataSet()
data_set.add("PatientName", ["Foo^Bar"])
data_set.add("PatientID", ["FOO"])
string_io = BytesIO()
stream = odil.iostream(string_io)
writer = odil.Writer(stream, odil.registry.ExplicitVRLittleEndian)
writer.write_data_set(data_set)
self.assertEqual(
string_io.getvalue(),
b"\x10\x00\x10\x00" b"PN" b"\x08\x00" b"Foo^Bar "
b"\x10\x00\x20\x00" b"LO" b"\x04\x00" b"FOO "
)
def test_write_tag(self):
string_io = BytesIO()
stream = odil.iostream(string_io)
writer = odil.Writer(stream, odil.registry.ExplicitVRLittleEndian)
writer.write_tag(odil.registry.PatientID)
self.assertEqual(string_io.getvalue(), b"\x10\x00\x20\x00")
def test_write_element(self):
string_io = BytesIO()
stream = odil.iostream(string_io)
writer = odil.Writer(stream, odil.registry.ExplicitVRLittleEndian)
writer.write_element(odil.Element(["Foo^Bar"], odil.VR.PN))
self.assertEqual(string_io.getvalue(), b"PN\x08\x00Foo^Bar ")
def test_write_file_stream(self):
data_set = odil.DataSet()
data_set.add("SOPClassUID", ["1.2.3.4"])
data_set.add("SOPInstanceUID", ["1.2.3.4.5"])
data_set.add("PatientName", ["Foo^Bar"])
string_io = BytesIO()
stream = odil.iostream(string_io)
odil.Writer.write_file(
data_set, stream, odil.DataSet(),
odil.registry.ExplicitVRLittleEndian)
data = (
128*b"\0"+b"DICM"+
b"\x02\x00\x00\x00" b"UL" b"\x04\x00" b"\x80\x00\x00\x00"
b"\x02\x00\x01\x00" b"OB" b"\x00\x00" b"\x02\x00\x00\x00" b"\x00\x01"
b"\x02\x00\x02\x00" b"UI" b"\x08\x00" b"1.2.3.4\x00"
b"\x02\x00\x03\x00" b"UI" b"\x0a\x00" b"1.2.3.4.5\x00"
b"\x02\x00\x10\x00" b"UI" b"\x14\x00" b"1.2.840.10008.1.2.1\x00"
b"\x02\x00\x12\x00" b"UI" b"\x1e\x00" b"1.2.826.0.1.3680043.9.55600.0\x00"
b"\x02\x00\x13\x00" b"SH" b"\x06\x00" b"Odil 0"
b"\x08\x00\x16\x00" b"UI" b"\x08\x00" b"1.2.3.4\x00"
b"\x08\x00\x18\x00" b"UI" b"\x0a\x00" b"1.2.3.4.5\x00"
b"\x10\x00\x10\x00" b"PN" b"\x08\x00" b"Foo^Bar ")
self.assertEqual(string_io.getvalue(), data)
def test_write_file_path(self):
data_set = odil.DataSet()
data_set.add("SOPClassUID", ["1.2.3.4"])
data_set.add("SOPInstanceUID", ["1.2.3.4.5"])
data_set.add("PatientName", ["Foo^Bar"])
fd, path = tempfile.mkstemp()
os.close(fd)
string_io = BytesIO()
stream = odil.iostream(string_io)
odil.Writer.write_file(
data_set, path, odil.DataSet(),
odil.registry.ExplicitVRLittleEndian)
data = (
128*b"\0"+b"DICM"+
b"\x02\x00\x00\x00" b"UL" b"\x04\x00" b"\x80\x00\x00\x00"
b"\x02\x00\x01\x00" b"OB" b"\x00\x00" b"\x02\x00\x00\x00" b"\x00\x01"
b"\x02\x00\x02\x00" b"UI" b"\x08\x00" b"1.2.3.4\x00"
b"\x02\x00\x03\x00" b"UI" b"\x0a\x00" b"1.2.3.4.5\x00"
b"\x02\x00\x10\x00" b"UI" b"\x14\x00" b"1.2.840.10008.1.2.1\x00"
b"\x02\x00\x12\x00" b"UI" b"\x1e\x00" b"1.2.826.0.1.3680043.9.55600.0\x00"
b"\x02\x00\x13\x00" b"SH" b"\x06\x00" b"Odil 0"
b"\x08\x00\x16\x00" b"UI" b"\x08\x00" b"1.2.3.4\x00"
b"\x08\x00\x18\x00" b"UI" b"\x0a\x00" b"1.2.3.4.5\x00"
b"\x10\x00\x10\x00" b"PN" b"\x08\x00" b"Foo^Bar ")
contents = open(path, "rb").read()
os.remove(path)
self.assertEqual(contents, data)
if __name__ == "__main__":
unittest.main()
|
11551784
|
from waltz_ducktape.services.cli.base_cli import Cli
class ClientCli(Cli):
"""
ClientCli is an utility class to interact with com.wepay.waltz.tools.client.ClientCli.
"""
def __init__(self, cli_config_path):
"""
Construct a new 'ClientCli' object.
:param cli_config_path: The path to client cli config file
"""
super(ClientCli, self).__init__(cli_config_path)
def validate_txn_cmd(self, log_file_path, num_active_partitions, txn_per_client, num_clients, interval):
"""
Return validation cli command to submit and validate transactions, which
includes validating high water mark, transaction data and optimistic lock.
java com.wepay.waltz.tools.client.ClientCli \
validate \
--txn-per-client <number of transactions per client> \
--num-clients <number of total clients> \
--interval <average interval(millisecond) between transactions> \
--cli-config-path <client cli config file path> \
--num-active-partitions <number of partitions to interact with>
"""
cmd_arr = [
"java -Dlog4j.configuration=file:{}".format(log_file_path), self.java_cli_class_name(),
"validate",
"--txn-per-client", txn_per_client,
"--num-clients", num_clients,
"--interval", interval,
"--cli-config-path", self.cli_config_path,
"--num-active-partitions {}".format(num_active_partitions) if num_active_partitions is not None else ""
]
return self.build_cmd(cmd_arr)
def create_producer_cmd(self, log_file_path, txn_per_client, interval, num_active_partitions):
"""
Return producer cli command to submit client transactions in a single process,
which includes validation of each and every transaction successful completion.
java com.wepay.waltz.tools.client.ClientCli \
create-producer \
--txn-per-client <number of transactions to be generated by this producer> \
--interval <average interval(millisecond) between transactions> \
--cli-config-path <client cli config file path>
"""
cmd_arr = [
"java -Dlog4j.configuration=file:{}".format(log_file_path), self.java_cli_class_name(),
"create-producer",
"--txn-per-client", txn_per_client,
"--interval", interval,
"--num-active-partitions {}".format(num_active_partitions) if num_active_partitions is not None else "",
"--cli-config-path", self.cli_config_path
]
return self.build_cmd(cmd_arr)
def create_consumer_cmd(self, log_file_path, txn_per_client, num_active_partitions):
"""
Return consumer cli command to create a consumer client process, which reads
transactions stored in waltz. Validation is successful if consumer consumes
specified number of transactions.
java com.wepay.waltz.tools.client.ClientCli \
create-consumer \
--txn-per-client <number of transactions to be consumed by this consumer> \
--interval <average interval(millisecond) between transactions> \
--cli-config-path <client cli config file path>
"""
cmd_arr = [
"java -Dlog4j.configuration=file:{}".format(log_file_path), self.java_cli_class_name(),
"create-consumer",
"--txn-per-client", txn_per_client,
"--num-active-partitions {}".format(num_active_partitions) if num_active_partitions is not None else "",
"--cli-config-path", self.cli_config_path
]
return self.build_cmd(cmd_arr)
def java_cli_class_name(self):
return "com.wepay.waltz.tools.client.ClientCli"
|
11551785
|
import prettytable
from schedule import Schedule
from genetic import GeneticOptimize
def vis(schedule):
"""visualization Class Schedule.
Arguments:
schedule: List, Class Schedule
"""
col_labels = ['week/slot', '1', '2', '3', '4', '5']
table_vals = [[i + 1, '', '', '', '', ''] for i in range(5)]
table = prettytable.PrettyTable(col_labels, hrules=prettytable.ALL)
for s in schedule:
weekDay = s.weekDay
slot = s.slot
text = 'course: {} \n class: {} \n room: {} \n teacher: {}'.format(s.courseId, s.classId, s.roomId, s.teacherId)
table_vals[weekDay - 1][slot] = text
for row in table_vals:
table.add_row(row)
print(table)
if __name__ == '__main__':
schedules = []
# add schedule
schedules.append(Schedule(201, 1201, 11101))
schedules.append(Schedule(201, 1201, 11101))
schedules.append(Schedule(202, 1201, 11102))
schedules.append(Schedule(202, 1201, 11102))
schedules.append(Schedule(203, 1201, 11103))
schedules.append(Schedule(203, 1201, 11103))
schedules.append(Schedule(206, 1201, 11106))
schedules.append(Schedule(206, 1201, 11106))
schedules.append(Schedule(202, 1202, 11102))
schedules.append(Schedule(202, 1202, 11102))
schedules.append(Schedule(204, 1202, 11104))
schedules.append(Schedule(204, 1202, 11104))
schedules.append(Schedule(206, 1202, 11106))
schedules.append(Schedule(206, 1202, 11106))
schedules.append(Schedule(203, 1203, 11103))
schedules.append(Schedule(203, 1203, 11103))
schedules.append(Schedule(204, 1203, 11104))
schedules.append(Schedule(204, 1203, 11104))
schedules.append(Schedule(205, 1203, 11105))
schedules.append(Schedule(205, 1203, 11105))
schedules.append(Schedule(206, 1203, 11106))
schedules.append(Schedule(206, 1203, 11106))
# optimization
ga = GeneticOptimize(popsize=50, elite=10, maxiter=500)
res = ga.evolution(schedules, 3)
# visualization
vis_res = []
for r in res:
if r.classId == 1203:
vis_res.append(r)
vis(vis_res)
|
11551802
|
from compileall import compile_dir
from ._base import DanubeCloudCommand, CommandOption
class Command(DanubeCloudCommand):
help = 'Recursively byte-compile all modules in ERIGONES_HOME.'
options = (
CommandOption('-q', '--que', '--node', action='store_true', dest='que_only', default=False,
help='Byte-compile only compute node related stuff.'),
)
def handle(self, que_only=False, **options):
if que_only:
target_folders = [self._path(self.PROJECT_DIR, i) for i in ('envs', 'core', 'que')]
else:
target_folders = [self.PROJECT_DIR]
quiet = int(int(options.get('verbosity', self.default_verbosity)) <= self.default_verbosity)
for folder in target_folders:
self.display('Byte-compiling all modules in %s' % folder, color='white')
rc = compile_dir(folder, maxlevels=20, quiet=quiet)
if rc:
self.display('Byte-compiled all modules in %s' % folder, color='green')
else:
self.display('Error while byte-compiling some modules in %s' % folder, color='yellow')
|
11551807
|
import numpy as np
import pytest
from pymoo.factory import get_problem
from pymoo.indicators.kktpm import KKTPM
from pymoo.problems.autodiff import AutomaticDifferentiation
from pymoo.problems.bounds_as_constr import BoundariesAsConstraints
from tests.util import path_to_test_resource
SETUP = {
"bnh": {'utopian_eps': 0.0, "ideal": np.array([-0.05, -0.05]), "rho": 0.0},
"zdt1": {'utopian_eps': 1e-3},
"zdt2": {'utopian_eps': 1e-4},
"zdt3": {'utopian_eps': 1e-4, "ideal": np.array([0.0, -1.0])},
# "osy": {'utopian_eps': 0.0, "ideal": np.array([-300, -0.05]), "rho": 0.0}
}
@pytest.mark.parametrize('str_problem,params', SETUP.items())
def test_kktpm_correctness(str_problem, params):
problem = BoundariesAsConstraints(AutomaticDifferentiation(get_problem(str_problem)))
# problem = AutomaticDifferentiation(BoundariesAsConstraints(get_problem(str_problem)))
def load_file(f):
return np.loadtxt(path_to_test_resource("kktpm", "%s_%s.txt" % (str_problem, f)))
X = load_file("x")
_F, _G, _dF, _dG = problem.evaluate(X, return_values_of=["F", "G", "dF", "dG"])
# the gradient needs to be implemented again!
assert _dF is not None and _dG is not None
F, G, dF, dG = load_file("f"), load_file("g"), load_file("df"), load_file("dg")
dF = dF.reshape(_dF.shape)
np.testing.assert_almost_equal(F, _F, decimal=5)
np.testing.assert_almost_equal(dF, _dF, decimal=5)
if problem.n_constr > 0:
G = G[:, :problem.n_constr]
dG = dG[:, :problem.n_constr * problem.n_var].reshape(_dG.shape)
np.testing.assert_almost_equal(G, _G, decimal=5)
np.testing.assert_almost_equal(dG, _dG, decimal=5)
# indices = np.random.permutation(X.shape[0])[:100]
indices = np.arange(X.shape[0])
# load the correct results
kktpm = load_file("kktpm")[indices]
# calculate the KKTPM measure
# _kktpm, _ = KKTPM(var_bounds_as_constraints=True).calc(np.array([[4.8, 3.0]]), problem, **params)
# _kktpm, _ = KKTPM(var_bounds_as_constraints=True).calc(X[[55]], problem, rho=0, **params)
_kktpm = KKTPM().calc(X[indices], problem, **params)
error = np.abs(_kktpm[:, 0] - kktpm)
for i in range(len(error)):
if error[i] > 0.0001:
print("Error for ", str_problem)
print("index: ", i)
print("Error: ", error[i])
print("X", ",".join(np.char.mod('%f', X[i])))
print("Python: ", _kktpm[i])
print("Correct: ", kktpm[i])
# os._exit(1)
# make sure the results are almost equal
np.testing.assert_almost_equal(kktpm, _kktpm[:, 0], decimal=4)
print(str_problem, error.mean())
|
11551831
|
from globals import *
from percent_change import percent_change
def current_pattern(average_line, pattern_for_recognition: list):
"""
Create a pattern that will be compared to in-memory patterns formed by the last dots_for_pattern entries of the data
"""
# This pattern will be based on the last 10 elements of the data
for index in reversed(range(1, dots_for_pattern + 1)):
pattern = percent_change(average_line[- dots_for_pattern - 1],
average_line[- index])
pattern_for_recognition.append(pattern)
|
11551839
|
from random import choice, random, sample
import numpy as np
import networkx as nx
from BanditAlg.BanditAlgorithms import ArmBaseStruct
import datetime
class LinUCBUserStruct:
def __init__(self, featureDimension,lambda_, userID, RankoneInverse = False):
self.userID = userID
self.d = featureDimension
self.A = lambda_*np.identity(n = self.d)
self.b = np.zeros(self.d)
self.AInv = np.linalg.inv(self.A)
self.UserTheta = np.zeros(self.d)
self.RankoneInverse = RankoneInverse
self.pta_max = 1
def updateParameters(self, updated_A, updated_b):
self.A += updated_A
self.b += updated_b
self.AInv = np.linalg.inv(self.A)
self.UserTheta = np.dot(self.AInv, self.b)
def getTheta(self):
return self.UserTheta
def getA(self):
return self.A
def getProb(self, alpha, article_FeatureVector):
mean = np.dot(self.UserTheta, article_FeatureVector)
var = np.sqrt(np.dot(np.dot(article_FeatureVector, self.AInv), article_FeatureVector))
pta = mean + alpha * var
if pta > self.pta_max:
pta = self.pta_max
#print self.UserTheta
#print article_FeatureVector
#print pta, mean, alpha*var
# if mean >0:
# print 'largerthan0', mean
return pta
class N_LinUCBAlgorithm:
def __init__(self, parameter, node_list, seed_size, oracle, dimension, alpha, lambda_ , feedback = 'edge'):
self.param = parameter
self.node_list = node_list
self.oracle = oracle
self.seed_size = seed_size
self.dimension = dimension
self.alpha = alpha
self.lambda_ = lambda_
self.feedback = feedback
self.users = [] #Nodes
self.Theta = np.zeros((len(node_list), dimension))
for idx, u in enumerate(self.node_list):
self.users.append(LinUCBUserStruct(dimension, lambda_ , u))
self.Theta[idx, :] = self.users[-1].UserTheta
def decide(self):
n = len(self.node_list)
MG = np.zeros((n, 2))
MG[:, 0] = np.arange(n)
influence_UCB = np.matmul(self.Theta, self.param[:, :self.dimension].T)
np.fill_diagonal(influence_UCB, 1)
np.clip(influence_UCB, 0, 1)
MG[:, 1] = np.sum(influence_UCB, axis=1)
# print('initialize time', datetime.datetime.now() - startTime)
S = []
args = []
temp = np.zeros(n)
prev_spread = 0
for k in range(self.seed_size):
MG = MG[MG[:,1].argsort()]
for i in range(0, n-k-1):
iStartTime = datetime.datetime.now()
select_node = int(MG[-1, 0])
MG[-1, 1] = np.sum(np.maximum(influence_UCB[select_node, :], temp)) - prev_spread
if MG[-1, 1] >= MG[-2, 1]:
prev_spread = prev_spread + MG[-1, 1]
break
else:
val = MG[-1, 1]
idx = np.searchsorted(MG[:, 1], val)
MG_new = np.zeros(MG.shape)
MG_new[:idx, :] = MG[:idx, :]
MG_new[idx, :] = MG[-1, :]
MG_new[idx+1: , :] = MG[idx:-1, :]
MG = MG_new
args.append(int(MG[-1, 0]))
S.append(self.node_list[int(MG[-1, 0])])
temp = np.amax(influence_UCB[np.array(args), :], axis=0)
MG[-1, 1] = -1
return S
def updateParameters(self, S, live_nodes, live_edges, _iter):
A_item = np.array([self.node_list.index(x) for x in self.node_list if x not in S])
b_item = np.array([self.node_list.index(x) for x in live_nodes if x not in S])
update_A = self.param[A_item, :self.dimension]
add_A = np.sum(np.matmul(update_A[:, :, np.newaxis], update_A[:, np.newaxis,:]), axis=0)
add_b = np.sum(self.param[b_item, :self.dimension], axis=0)
for u in S:
u_idx = self.node_list.index(u)
self.users[u_idx].updateParameters(add_A, add_b)
self.Theta[u_idx, :] = self.users[u_idx].UserTheta
def getCoTheta(self, userID):
return self.users[userID].UserTheta
def getP(self):
return self.currentP
class LinUCBAlgorithm:
def __init__(self, G, seed_size, oracle, dimension, alpha, lambda_ , FeatureDic, feedback = 'edge'):
self.G = G
self.oracle = oracle
self.seed_size = seed_size
self.dimension = dimension
self.alpha = alpha
self.lambda_ = lambda_
self.FeatureDic = FeatureDic
self.feedback = feedback
self.currentP =nx.DiGraph()
self.USER = LinUCBUserStruct(dimension, lambda_ , 0)
for u in self.G.nodes():
for v in self.G[u]:
self.currentP.add_edge(u,v, weight=0)
def decide(self):
S = self.oracle(self.G, self.seed_size, self.currentP)
return S
def updateParameters(self, S, live_nodes, live_edges):
for u in S:
for (u, v) in self.G.edges(u):
featureVector = self.FeatureDic[(u,v)]
if (u,v) in live_edges:
reward = live_edges[(u,v)]
else:
reward = 0
self.USER.updateParameters(featureVector, reward)
self.currentP[u][v]['weight'] = self.USER.getProb(self.alpha, featureVector)
def getCoTheta(self, userID):
return self.USER.UserTheta
def getP(self):
return self.currentP
|
11551852
|
import asyncio
from typing import AsyncGenerator, Any, Type, List
from datetime import timedelta
from deepdiff import DeepDiff
from pytest import fixture, mark
from core.message_bus import MessageBus, Message, Event, Action, ActionDone, ActionError
from core.model.typed_model import to_js, from_js
from core.util import AnyT, utc, first
@fixture
def message_bus() -> MessageBus:
return MessageBus()
@fixture
async def all_events(message_bus: MessageBus) -> AsyncGenerator[List[Message], None]:
events: List[Message] = []
async def gather_events() -> None:
async with message_bus.subscribe("test") as event_queue:
while True:
events.append(await event_queue.get())
run_gather = asyncio.create_task(gather_events())
try:
yield events
finally:
run_gather.cancel()
async def wait_for_message(
all_events: List[Message], message_type: str, t: Type[AnyT], timeout: timedelta = timedelta(seconds=1)
) -> AnyT:
stop_at = utc() + timeout
async def find() -> AnyT:
result = first(lambda m: isinstance(m, t) and m.message_type == message_type, all_events) # type: ignore
if result:
return result # type: ignore
elif utc() > stop_at:
raise TimeoutError()
else:
await asyncio.sleep(0.1)
return await find()
return await find()
@mark.asyncio
async def test_handler(message_bus: MessageBus) -> None:
foos: List[Message] = []
blas: List[Message] = []
async def emit() -> None:
await message_bus.emit(Event("foo"))
await message_bus.emit(Event("foo"))
await message_bus.emit(Event("bla"))
await message_bus.emit(Event("bar"))
async def wait_for(name: str, list: List[Message]) -> None:
async with message_bus.subscribe("test", [name]) as events:
while True:
list.append(await events.get())
foo_t = asyncio.create_task(wait_for("foo", foos))
bla_t = asyncio.create_task(wait_for("bla", blas))
await asyncio.sleep(0.1)
await emit()
await asyncio.sleep(0.1)
assert len(foos) == 2
assert len(blas) == 1
foo_t.cancel()
await emit()
await asyncio.sleep(0.1)
assert len(foos) == 2
assert len(blas) == 2
bla_t.cancel()
def test_message_serialization() -> None:
roundtrip(Event("test", {"a": "b", "c": 1, "d": "bla"}))
roundtrip(Action("test", "123", "step_name"))
roundtrip(Action("test", "123", "step_name", {"test": 1}))
roundtrip(ActionDone("test", "123", "step_name", "sub"))
roundtrip(ActionDone("test", "123", "step_name", "sub", {"test": 1}))
roundtrip(ActionError("test", "123", "step_name", "sub", "oops"))
roundtrip(ActionError("test", "123", "step_name", "sub", "oops", {"test": 23}))
def roundtrip(obj: Any) -> None:
js = to_js(obj)
again = from_js(js, type(obj))
assert DeepDiff(obj, again) == {}, f"Json: {js} serialized as {again}"
|
11551861
|
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import Conv2d, ShapeSpec, cat, get_norm
from detectron2.utils.registry import Registry
import numpy as np
import fvcore.nn.weight_init as weight_init
from .cross_fetaure_affinity_pooling import CrossFeatureAffinityPooling
from .spatial_attention import SpatialAttention
ROI_CONTACT_HEAD_REGISTRY = Registry("ROI_CONTACT_HEAD")
ROI_CONTACT_HEAD_REGISTRY.__doc__ == """
Registry for contact heads, which make contact predictions from per-region features.
The registered object will be called with obj(cfg,, input_shape).
"""
def contact_loss(pred_raw_scores, instances, pos_weight, device):
"""
Compute the binary cross-entropy loss (Multi-label class loss)
Args:
pred_raw_scores (Tensor): A tensor of shape (B, num_cats), where B is the
total number of predicted contacts in all the images, num_cats is the total
number of possible contact states.
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. These instances are in 1:1
correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask,
cats) associated with each instance are stored in fields.
Returns:
contact_loss (Tensor): A scalar tensor containing the loss.
"""
gt_cats = []
for instances_per_image in instances:
if len(instances_per_image) == 0:
continue
gt_cats.append(instances_per_image.gt_cats)
if len(gt_cats) == 0:
return pred_raw_scores.sum() * 0
gt_cats = cat(gt_cats, dim=0)
# Do not consider categories marked "unsure (marked by 2)"
row_mask = (gt_cats < 2).sum(dim=1) == 4
gt_cats = gt_cats[row_mask, :]
pred_raw_scores = pred_raw_scores[row_mask, :]
if not gt_cats.shape[0]:
return pred_raw_scores.sum() * 0
if pos_weight:
pos_weight = torch.FloatTensor(pos_weight).to(device)
else:
pos_weight = None
contact_loss = F.binary_cross_entropy_with_logits(pred_raw_scores, gt_cats, pos_weight=pos_weight)
return contact_loss
def contact_head_inference(pred_raw_scores, pred_instances):
"""
Convert the raw scores of the contact head to sigmoid scores and add new
"pred_cats" field to pred_instances.
Args:
pred_raw_scores (Tensor): A tensor of shape (B, num_cats), where B is the
total number of predicted contact states in all the images, num_cats
is the total number of possible contact states.
pred_instances (list[Instances]): A list of N Instances, where N is the
number of images in the batch.
Returns:
None. pred_instances will contain an extra "pred_cats" field storing a Tensor
of shape (num_cats) for predicted class
"""
pred_cats = pred_raw_scores.sigmoid()
num_boxes_per_image = [len(i) for i in pred_instances]
pred_cats = pred_cats.split(num_boxes_per_image, dim=0)
for cats, instances in zip(pred_cats, pred_instances):
instances.pred_cats = cats
@ROI_CONTACT_HEAD_REGISTRY.register()
class ContactHead(nn.Module):
"""
A head with several fc layers (each followed by relu if there is more than one FC)
"""
def __init__(self, cfg, input_shape: ShapeSpec):
super(ContactHead, self).__init__()
hand_fcs = cfg.MODEL.ROI_CONTACT_HEAD.HAND_FCS
hand_object_fcs = cfg.MODEL.ROI_CONTACT_HEAD.HAND_OBJECT_FCS
cross_attention_fcs = cfg.MODEL.ROI_CONTACT_HEAD.CROSS_ATTENTION_FCS
project_dims = cfg.MODEL.ROI_CONTACT_HEAD.PROJECT_DIMS
num_spatial_attns = cfg.MODEL.ROI_CONTACT_HEAD.NUM_SPATIAL_ATTENTION
self.pos_weight = cfg.MODEL.ROI_CONTACT_HEAD.POS_WEIGHT
self.device = cfg.MODEL.DEVICE
assert len(hand_fcs) > 0
self._output_size = (input_shape.channels, input_shape.height, input_shape.width)
self.hand_fcs = []
for k, fc_dim in enumerate(hand_fcs):
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self.add_module("fc_hand_{}".format(k + 1), fc)
self.hand_fcs.append(fc)
self._output_size = fc_dim
for layer in self.hand_fcs:
weight_init.c2_xavier_fill(layer)
self._output_size = (input_shape.channels, input_shape.height, input_shape.width)
self.hand_object_fcs = []
for k, fc_dim in enumerate(hand_object_fcs):
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self.add_module("fc_hand_object_{}".format(k + 1), fc)
self.hand_object_fcs.append(fc)
self._output_size = fc_dim
for layer in self.hand_object_fcs:
weight_init.c2_xavier_fill(layer)
self.cross_attention = CrossFeatureAffinityPooling(input_shape.channels)
self._output_size = (input_shape.channels, input_shape.height, input_shape.width)
self.cross_attention_fcs = []
for k, fc_dim in enumerate(cross_attention_fcs):
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self.add_module("cross_attention_fc{}".format(k + 1), fc)
self.cross_attention_fcs.append(fc)
self._output_size = fc_dim
for layer in self.cross_attention_fcs:
weight_init.c2_xavier_fill(layer)
self.spatial_attention = SpatialAttention(input_shape.channels, num_spatial_attns)
self.project_fcs = []
for k, fc_dim in enumerate(project_dims):
if k==0:
fc = nn.Linear(np.prod(self._output_size*3), fc_dim)
else:
fc = nn.Linear(np.prod(self._output_size), fc_dim)
self.add_module("project_fc{}".format(k + 1), fc)
self.project_fcs.append(fc)
self._output_size = fc_dim
for layer in self.project_fcs:
weight_init.c2_xavier_fill(layer)
self.classifier =nn. Linear(self._output_size, 4)
weight_init.c2_xavier_fill(self.classifier)
def forward(self, hand_features, hand_object_features, instances):
"""
Returns:
A dict of losses in training. The predicted "instances" in inference.
"""
K_cross_attention_hands = []
num_hands = hand_features.shape[0]
for i in range(num_hands):
if hand_object_features[i].shape[0]:
hand_ftr = hand_features[i:i+1].repeat(hand_object_features[i].shape[0], 1, 1, 1) # [M_i, 7, 7, 256]
hand_obj_ftr = hand_object_features[i] #[M_i, 7, 7, 256]
cross_attn_ftrs = self.cross_attention(hand_ftr, hand_obj_ftr) #[M_i, 7, 7, 256]
K_cross_attention_hands.append(cross_attn_ftrs)
else:
hand_ftr = hand_features[i:i+1]
K_cross_attention_hands.append(hand_ftr)
K_cross_attention_hands_fcs = []
for feature in K_cross_attention_hands:
feature = torch.flatten(feature, start_dim=1) #[M_i, -1]
for layer in self.cross_attention_fcs:
feature = F.relu(layer(feature))
K_cross_attention_hands_fcs.append(feature)
hand_features = torch.flatten(hand_features, start_dim=1) #[K, -1]
for layer in self.hand_fcs:
hand_features = F.relu(layer(hand_features))
K_spatial_attention_scores = []
for feature in hand_object_features:
scores = self.spatial_attention(feature) #[M_i, 4]
K_spatial_attention_scores.append(scores)
K_hand_object_features = []
for feature in hand_object_features:
feature = torch.flatten(feature, start_dim=1) #[M, -1]
for layer in self.hand_object_fcs:
feature = F.relu(layer(feature))
K_hand_object_features.append(feature)
num_hands = hand_features.shape[0]
K_scores = []
for i in range(num_hands):
if hand_object_features[i].shape[0]:
hand_ftr = hand_features[i:i+1].repeat(hand_object_features[i].shape[0], 1) #[M_i, -1]
projected_features = torch.cat(
[
hand_ftr, K_cross_attention_hands_fcs[i], K_hand_object_features[i]
], dim=1
) #[M_i, -1]
for layer in self.project_fcs:
projected_features = layer(projected_features)
scores = self.classifier(projected_features) #[M_i, 4]
scores = scores + K_spatial_attention_scores[i]
scores = torch.max(scores, dim=0).values.unsqueeze(0) # MIL with max operaton, shape #[1, 4]
K_scores.append(scores)
else:
projected_features = torch.cat([hand_features[i:i+1] for j in range(3)], dim=1)
for layer in self.project_fcs:
projected_features = layer(projected_features)
scores = self.classifier(projected_features)
K_scores.append(scores)
if num_hands:
out = torch.cat(K_scores, dim=0)
else:
out = hand_features.view(0, 4)
if self.training:
return {"loss_contact": contact_loss(out, instances, self.pos_weight, self.device)}
else:
contact_head_inference(out, instances)
return instances
def build_contact_head(cfg, input_shape):
"""
Build a contact head defined by `cfg.MODEL.ROI_CONTACT_HEAD.NAME`
"""
name = cfg.MODEL.ROI_CONTACT_HEAD.NAME
return ROI_CONTACT_HEAD_REGISTRY.get(name)(cfg, input_shape)
|
11551862
|
from selenium import webdriver
import requests
import os
import hashlib
from PIL import Image
import time
import io
def get_image_urls(wd:webdriver, search_url):
def scroll_to_end(wd):
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
wd.get(search_url)
time.sleep(2)
image_urls = []
count = 0
time.sleep(15)
scroll_to_end(wd)
tmp = ['key']
# scroll till there is load more button
while(len(tmp)!=0):
tmp = wd.find_elements_by_class_name("loadMore")
time.sleep(15)
thumbnail_results = wd.find_elements_by_class_name("image-placeholder")
num_results = len(thumbnail_results)
print(f"Found {num_results} search result. Getting source of {num_results}:..")
for img in thumbnail_results:
count += 1
img_link = img.get_attribute('src')
print(f"image: {img_link}")
img_link = img_link[:img_link.index("_d")] + ".jpeg"
if img_link not in image_urls:
image_urls.append(img_link)
print(f"{count+1}:{img.get_attribute('src')}")
tmp = wd.find_elements_by_class_name("loadMore")
if len(tmp)!=0:
tmp[0].click()
scroll_to_end(wd)
return image_urls
def persist_image(folder_path, query, url, count):
query="dfkaj"
try:
image_content = requests.get(url).content
except Exception as e:
print(f"ERROR - Could not download {url} - {e}")
try:
image_file = io.BytesIO(image_content)
image = Image.open(image_file).convert('RGB')
# folder_path = os.path.join(folder_path)
if os.path.exists(folder_path):
file_path = os.path.join(folder_path,hashlib.sha1(image_content).hexdigest()[:10] + '.jpg')
else:
os.mkdir(folder_path)
file_path = os.path.join(folder_path,hashlib.sha1(image_content).hexdigest()[:10] + '.jpg')
with open(file_path, 'wb') as f:
image.save(f, "JPEG", quality=85)
print(f"SUCCESS - saved {url} - as {file_path}")
except Exception as e:
print(f"ERROR - Could not save {url} - {e}")
except Exception as e:
print(f"ERROR - Could not save {url} - {e}")
if __name__ == '__main__':
# wd = webdriver.Chrome(executable_path='/chromedriver')
query = input("Enter query to download...")
# add your path to chrome webdriver here
wd = webdriver.Chrome(executable_path="/usr/lib/chromium-browser/chromedriver")
links = get_image_urls(wd, query)
print(f"image ulrls: {links}")
wd.quit()
if not os.path.exists('pics/'):
os.makedirs("pics")
count = 0
for img in links:
count += 1
persist_image("pics/", query, img, count)
wd.quit()
|
11551872
|
import nbformat
def empty_notebook(fname):
with open(fname, 'r', encoding='utf-8') as fp:
nb = nbformat.read(fp, as_version=4)
for cell in nb.cells:
if cell['cell_type'] == 'code':
source = cell['source']
if '# aeropython: preserve' in source:
continue
elif 'Image(url=' in source:
continue
elif 'HTML(' in source:
continue
else:
# Don't preserve cell
cell['outputs'].clear()
cell['execution_count'] = None
cell['source'] = '\n'.join([l for l in source.splitlines() if l.startswith('#')])
return nb
if __name__ == '__main__':
import glob
import os.path
if os.path.isdir('notebooks_completos'):
prepath = '.'
elif os.path.isdir(os.path.join('..','notebooks_completos')):
prepath = '..'
else: raise OSError('Carpeta de notebooks no encontrada')
vacios_path = os.path.join(prepath , 'notebooks_vacios')
completos_path = os.path.join(prepath , 'notebooks_completos')
if not os.path.isdir(vacios_path):
os.makedirs(vacios_path)
for fname in glob.glob(os.path.join(completos_path , '*.ipynb')):
new_fname = os.path.join(vacios_path, os.path.basename(fname))
with open(new_fname, 'w', encoding='utf-8') as fp:
nbformat.write(empty_notebook(fname), fp)
|
11551890
|
import os
import random, math
import torch
import numpy as np
import glob
import cv2
from tqdm import tqdm
from skimage import io
from ISP_implement import ISP
if __name__ == '__main__':
isp = ISP()
source_dir = './source/'
target_dir = './target/'
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
fns = glob.glob(os.path.join(source_dir, '*.png'))
patch_size = 256
for fn in tqdm(fns):
img_rgb = cv2.imread(fn)[:, :, ::-1] / 255.0
H = img_rgb.shape[0]
W = img_rgb.shape[1]
H_s = H // patch_size
W_s = W // patch_size
patch_id = 0
for i in range(H_s):
for j in range(W_s):
yy = i * patch_size
xx = j * patch_size
patch_img_rgb = img_rgb[yy:yy+patch_size, xx:xx+patch_size, :]
gt, noise, sigma = isp.noise_generate_srgb(patch_img_rgb)
sigma = np.uint8(np.round(np.clip(sigma * 15 , 0, 1) * 255)) # store in uint8
filename = os.path.basename(fn)
foldername = filename.split('.')[0]
out_folder = os.path.join(target_dir, foldername)
if not os.path.isdir(out_folder):
os.makedirs(out_folder)
io.imsave(os.path.join(out_folder, 'GT_SRGB_%d_%d.png' % (i, j)), gt)
io.imsave(os.path.join(out_folder, 'NOISY_SRGB_%d_%d.png' % (i, j)), noise)
io.imsave(os.path.join(out_folder, 'SIGMA_SRGB_%d_%d.png' % (i, j)), sigma)
|
11551924
|
from django.test import TestCase
from django.core.exceptions import ValidationError
from wagtail.core.models import Site
from wagtailmenus.tests.models import LinkPage
class TestLinkPage(TestCase):
fixtures = ['test.json']
def setUp(self):
# Create a few of link pages for testing
site = Site.objects.select_related('root_page').get(is_default_site=True)
self.site = site
linkpage_to_page = LinkPage(
title='Find out about Spiderman',
link_page_id=30,
url_append='?somevar=value'
)
site.root_page.add_child(instance=linkpage_to_page)
# Check that the above page was saved and has field values we expect
self.assertTrue(linkpage_to_page.id)
self.assertTrue(linkpage_to_page.show_in_menus)
self.assertTrue(linkpage_to_page.show_in_menus_custom())
self.assertEqual(linkpage_to_page.get_sitemap_urls(), [])
self.linkpage_to_page = linkpage_to_page
linkpage_to_url = LinkPage(
title='Do a google search',
link_url="https://www.google.co.uk",
url_append='?somevar=value',
extra_classes='google external',
)
site.root_page.add_child(instance=linkpage_to_url)
# Check that the above page was saved and has field values we expect
self.assertTrue(linkpage_to_url.id)
self.assertTrue(linkpage_to_url.show_in_menus)
self.assertTrue(linkpage_to_url.show_in_menus_custom())
self.assertEqual(linkpage_to_url.get_sitemap_urls(), [])
self.linkpage_to_url = linkpage_to_url
linkpage_to_non_routable_page = LinkPage(
title='Go to this unroutable page',
link_page_id=2,
url_append='?somevar=value'
)
site.root_page.add_child(instance=linkpage_to_non_routable_page)
self.linkpage_to_non_routable_page = linkpage_to_non_routable_page
def test_url_methods(self):
# When linking to a page
self.assertEqual(
self.linkpage_to_page.relative_url(self.site),
'/superheroes/marvel-comics/spiderman/?somevar=value'
)
self.assertEqual(
self.linkpage_to_page.full_url,
'http://www.wagtailmenus.co.uk:8000/superheroes/marvel-comics/spiderman/?somevar=value'
)
# When linking to a non-routable page
self.assertEqual(self.linkpage_to_non_routable_page.relative_url(self.site), '')
self.assertEqual(self.linkpage_to_non_routable_page.full_url, '')
# When linking to a custom url
self.assertEqual(
self.linkpage_to_url.relative_url(self.site), 'https://www.google.co.uk?somevar=value'
)
self.assertEqual(
self.linkpage_to_url.full_url, 'https://www.google.co.uk?somevar=value'
)
def test_linkpage_visibility(self):
page_link_html = (
'<a href="/superheroes/marvel-comics/spiderman/?somevar=value">Find out about Spiderman</a>'
)
url_link_html = (
'<li class="google external"><a href="https://www.google.co.uk?somevar=value">Do a google search</a></li>'
)
# When the target page is live, both the 'Spiderman' and 'Google' link
# should appear
response = self.client.get('/')
self.assertContains(response, page_link_html, html=True)
self.assertContains(response, url_link_html, html=True)
# When the target page is not live, the linkpage shouldn't appear
target_page = self.linkpage_to_page.link_page
target_page.live = False
target_page.save()
response = self.client.get('/')
self.assertNotContains(response, page_link_html, html=True)
# When the target page isn't set to appear in menus, the linkpage
# shouldn't appear
target_page.live = True
target_page.show_in_menus = False
target_page.save()
response = self.client.get('/')
self.assertNotContains(response, page_link_html, html=True)
# When the target page is 'expired', the linkpage shouldn't appear
target_page.show_in_menus = True
target_page.expired = True
target_page.save()
response = self.client.get('/')
self.assertNotContains(response, page_link_html, html=True)
def test_linkpage_clean(self):
linkpage = self.linkpage_to_page
linkpage.link_url = 'https://www.rkh.co.uk/'
self.assertRaisesMessage(
ValidationError,
"Linking to both a page and custom URL is not permitted",
linkpage.clean
)
linkpage.link_url = ''
linkpage.link_page = None
self.assertRaisesMessage(
ValidationError,
"Please choose an internal page or provide a custom URL",
linkpage.clean
)
linkpage.link_page = linkpage
self.assertRaisesMessage(
ValidationError,
"A link page cannot link to another link page",
linkpage.clean
)
def test_linkpage_redirects_when_served(self):
response = self.client.get('/find-out-about-spiderman/')
self.assertRedirects(
response,
'/superheroes/marvel-comics/spiderman/?somevar=value'
)
|
11551948
|
import numpy as np
import tensorflow as tf
def deepfuse_triple_untied(inp1, inp2, inp3):
layers = tf.layers
leaky_relu=tf.nn.leaky_relu
with tf.variable_scope('DeepFuse'):
prepooled_feats = [[],[],[]]
num_downsample_layers = 4
branch_enc = []
num_feats = [32,64,128,256]
for inp, name, idx in zip([inp1, inp2, inp3], ['inp1_', 'inp2_', 'inp3_'], [0,1,2]):
conv = inp
for i in range(num_downsample_layers):
conv = layers.conv2d(conv, num_feats[i], kernel_size = 3, strides = 1, padding = 'SAME', activation = leaky_relu,
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(), name = 'enc_conv_'+name+str(i))
prepooled_feats[idx].append(conv)
conv = layers.conv2d(conv, num_feats[i], kernel_size = 3, strides = 2, padding = 'SAME',activation = leaky_relu,
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(), name = 'downsample_'+name+str(i))
branch_enc.append(conv)
merge=tf.concat([branch_enc[0],branch_enc[1], branch_enc[2]],axis=-1)
conv = layers.conv2d(merge, 256, kernel_size = 3, strides = 1, padding = 'SAME', activation = leaky_relu,
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(), name = 'dec_conv1')
conv = tf.keras.layers.UpSampling2D((2,2))(conv)
merge = tf.concat([conv, prepooled_feats[0][-1],prepooled_feats[1][-1], prepooled_feats[2][-1]], axis=-1)
conv = layers.conv2d(merge, 128, kernel_size = 3, strides = 1, padding = 'SAME', activation = leaky_relu,
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(), name = 'dec_conv2')
conv = tf.keras.layers.UpSampling2D((2,2))(conv)
merge = tf.concat([conv, prepooled_feats[0][-2],prepooled_feats[1][-2], prepooled_feats[2][-2]], axis=-1)
conv = layers.conv2d(merge, 64, kernel_size = 3, strides = 1, padding = 'SAME', activation = leaky_relu,
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(), name = 'dec_conv3')
conv = tf.keras.layers.UpSampling2D((2,2))(conv)
merge = tf.concat([conv, prepooled_feats[0][-3],prepooled_feats[1][-3], prepooled_feats[2][-3]], axis=-1)
conv = layers.conv2d(merge, 32, kernel_size = 3, strides = 1, padding = 'SAME', activation = leaky_relu,
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(), name = 'dec_conv4')
conv = tf.keras.layers.UpSampling2D((2,2))(conv)
merge = tf.concat([conv, prepooled_feats[0][-4],prepooled_feats[1][-4], prepooled_feats[2][-4]], axis=-1)
# conv = layers.conv2d(merge, 8, kernel_size = 3, strides = 1, padding = 'SAME', activation = leaky_relu,
# kernel_initializer = tf.contrib.layers.variance_scaling_initializer(), name = 'dec_conv5')
conv = layers.conv2d(merge,3,kernel_size=3,strides=1,padding='SAME',activation=tf.nn.sigmoid,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),name='fusion_output')
return tf.clip_by_value(conv, 0.0, 1.0)
|
11552008
|
from . import encode
import numpy
def pygame_play(data, rate=44100):
''' Send audio array to pygame for playback
'''
import pygame
pygame.mixer.init(rate, -16, 1, 1024)
sound = pygame.sndarray.numpysnd.make_sound(encode.as_int16(data))
length = sound.get_length()
sound.play()
pygame.time.wait(int(length * 1000))
pygame.mixer.quit()
def pygame_supported():
''' Return True is pygame playback is supported
'''
try:
import pygame
except:
return False
return True
def oss_play(data, rate=44100):
''' Send audio array to oss for playback
'''
import ossaudiodev
audio = ossaudiodev.open('/dev/audio','w')
formats = audio.getfmts()
if ossaudiodev.AFMT_S16_LE & formats:
# Use 16 bit if available
audio.setfmt(ossaudiodev.AFMT_S16_LE)
data = encode.as_int16(data)
elif ossaudiodev.AFMT_U8 & formats:
# Otherwise use 8 bit
audio.setfmt(ossaudiodev.AFMT_U8)
data = encode.as_uint8(data)
audio.speed(rate)
while len(data):
audio.write(data[:1024])
data = data[1024:]
audio.flush()
audio.sync()
audio.close()
def oss_supported():
''' Return True is oss playback is supported
'''
try:
import ossaudiodev
except:
return False
return True
def pyaudio_play(data, rate=44100):
''' Send audio array to pyaudio for playback
'''
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=rate, output=1)
stream.write(data.astype(numpy.float32).tostring())
stream.close()
p.terminate()
def pyaudio_supported():
''' Return True is pyaudio playback is supported
'''
try:
import pyaudio
except:
return False
return True
def play(data, rate=44100):
''' Send audio to first available playback method
'''
if pygame_supported():
return pygame_play(data, rate)
elif oss_supported():
return oss_play(data, rate)
elif pyaudio_supported():
return pyaudio_play(data, rate)
else:
raise Exception("No supported playback method found")
|
11552019
|
import numpy as np
def print_array(data):
datas = []
for i in data:
datas.append(i)
print(datas)
x = np.array([1, 2, 3])
print_array(x)
y = np.copy(x)
print_array(y)
x[0] = 10
print_array(x)
print_array(y)
|
11552030
|
from .base import *
import os
import sys
DEBUG = False
ADMINS = (("ch1huizong", "<EMAIL>"),)
allowed_hosts = os.get('ALLOWED_HOSTS')
if allowed_hosts:
ALLOWED_HOSTS = allowed_hosts.split(",")
else:
print("ERROR ! Please Input ALLOWED_HOSTS env settings !")
sys.exit(1)
db_host = os.getenv("DB_HOST")
db_name = os.getenv("DB_NAME")
db_user = os.getenv("DB_USER")
db_password = os.getenv("DB_PASSWORD")
if db_host and db_name and db_user and db_password:
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": db_name,
"HOST": db_host,
"USER": db_user,
"PASSWORD": <PASSWORD>,
}
}
else:
print("ERROR ! Check DB SETTINGS !")
sys.exit(1)
SECURE_SSL_REDIRECT = True
CSRF_COOKIE_SECURE = True
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
11552053
|
import copy
from typing import Any, Iterator, List, Union
import numpy as np
import torch
from detectron2.layers.roi_align import ROIAlign
from torchvision.ops import RoIPool
class MyMaps(object):
"""# NOTE: This class stores the maps (NOCS, coordinates map, pvnet vector
maps, offset maps, heatmaps) for all objects in one image, support cpu_only
option.
Attributes:
tensor: bool Tensor of N,C,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray], cpu_only: bool = True):
"""
Args:
tensor: float Tensor of N,C,H,W, representing N instances in the image.
cpu_only: keep the maps on cpu even when to(device) is called
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
assert tensor.dim() == 4, tensor.size()
self.image_size = tensor.shape[-2:]
self.tensor = tensor
self.cpu_only = cpu_only
def to(self, device: str, **kwargs) -> "MyMaps":
if not self.cpu_only:
return MyMaps(self.tensor.to(device, **kwargs), cpu_only=False)
else:
return MyMaps(self.tensor.to("cpu", **kwargs), cpu_only=True)
def to_device(self, device: str = "cuda", **kwargs) -> "MyMaps":
# force to device
return MyMaps(self.tensor.to(device, **kwargs), cpu_only=False)
def crop_and_resize(
self,
boxes: torch.Tensor,
map_size: int,
interpolation: str = "bilinear",
) -> torch.Tensor:
"""# NOTE: if self.cpu_only, convert boxes to cpu
Crop each map by the given box, and resize results to (map_size, map_size).
This can be used to prepare training targets.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each map
map_size (int): the size of the rasterized map.
interpolation (str): bilinear | nearest
Returns:
Tensor:
A bool tensor of shape (N, C, map_size, map_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
if self.cpu_only:
device = "cpu"
else:
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes.to(device)], dim=1) # Nx5
maps = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
# on cpu, speed compared to cv2?
if interpolation == "nearest":
op = RoIPool((map_size, map_size), 1.0)
elif interpolation == "bilinear":
op = ROIAlign((map_size, map_size), 1.0, 0, aligned=True)
else:
raise ValueError(f"Unknown interpolation type: {interpolation}")
output = op.forward(maps, rois)
return output
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "MyMaps":
"""
Returns:
MyMaps: Create a new :class:`MyMaps` by indexing.
The following usage are allowed:
1. `new_maps = maps[3]`: return a `MyMaps` which contains only one map.
2. `new_maps = maps[2:10]`: return a slice of maps.
3. `new_maps = maps[vector]`, where vector is a torch.BoolTensor
with `length = len(maps)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return MyMaps(self.tensor[item].view(1, -1))
m = self.tensor[item]
assert m.dim() == 4, "Indexing on MyMaps with {} returns a tensor with shape {}!".format(item, m.shape)
return MyMaps(m)
def __iter__(self) -> torch.Tensor:
yield from self.tensor
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""Find maps that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each map is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
|
11552070
|
import asyncio
import dragonfly as df
import title_menu, menu_utils, server, df_utils, game, letters
LOAD_GAME_MENU = 'loadGameMenu'
def validate_load_game_menu(menu):
return title_menu.get_submenu(menu, LOAD_GAME_MENU)
async def load_game(menu, game_idx: int):
button_index = game_idx
try:
btn = menu['slotButtons'][button_index]
except IndexError:
return
await menu_utils.click_component(btn)
async def delete_game(menu, game_idx: int):
button_index = game_idx
try:
btn = menu['deleteButtons'][button_index]
except IndexError:
return
await menu_utils.click_component(btn)
mapping = {
"[go] back": menu_utils.simple_click("backButton"),
"(yes | ok)": menu_utils.simple_click("okDeleteButton"),
"(no | cancel)": menu_utils.simple_click("cancelDeleteButton"),
"(load [game] | [load] game) <positive_index>": df_utils.async_action(load_game, "positive_index"),
"delete [game] <positive_index>": df_utils.async_action(delete_game, "positive_index"),
**menu_utils.scroll_commands()
}
def load_grammar():
grammar = menu_utils.build_menu_grammar("load_game_menu", mapping, validate_load_game_menu, extras=[df_utils.positive_index, df_utils.positive_num])
grammar.load()
|
11552097
|
from typing import Any, Dict, List, Optional
from dirty_models import ModelField, HashMapField, ArrayField, IntegerField, StringIdField
from . import BaseModelManager, BaseCollectionManager
from .contact import Contact, ContactManager
from ..models import BaseModel, DateTimeField
from ..driver import BaseWhalesongDriver
from ..results import Result, IteratorResult
class StatusV3(BaseModel):
"""
StatusV3 model
"""
unread_count = IntegerField()
"""
Unread statuses
"""
expire_ts = DateTimeField()
"""
Status expiration date
"""
contact = ModelField(model_class=Contact)
"""
Contact object
"""
last_received_key = StringIdField()
"""
Last encryption key received (¿?).
"""
read_keys = HashMapField(field_type=StringIdField())
class StatusV3Manager(BaseModelManager[StatusV3]):
"""
StatusV3 manager. Allow to manage a WhatsApp status.
.. attribute:: msgs
:class: `~whalesong.managers.message.MessageCollectionManager`
StatusV3 message collection manager.
.. attribute:: contact
:class: `~whalesong.managers.contact.ContactManager`
StatusV3 contact manager.
"""
MODEL_CLASS = StatusV3
def __init__(self, driver: BaseWhalesongDriver, manager_path: str = ''):
super(StatusV3Manager, self).__init__(driver=driver, manager_path=manager_path)
from .message import MessageCollectionManager
self.add_submanager('msgs', MessageCollectionManager(driver=self._driver,
manager_path=self._build_command('msgs')))
self.add_submanager('contact', ContactManager(driver=self._driver,
manager_path=self._build_command('contact')))
def send_read_status(self, message_id: str) -> Result[bool]:
"""
Mark a statusV3 as read.
:param message_id: Message serialized ID to be marked
"""
params = {
'messageId': message_id,
}
return self._execute_command('sendReadStatus', params)
class StatusV3CollectionManager(BaseCollectionManager):
"""
Manage a collection of StatusV3.
"""
MODEL_MANAGER_CLASS = StatusV3Manager
def get_unexpired(self, unread: bool = True) -> IteratorResult[StatusV3]:
"""
Get the read or unread StatusV3 collection
:param unread: List read or unread statuses
:return: List of StatusV3
"""
params = {
'unread': unread
}
return self._execute_command('getUnexpired', params, result_class=self.get_iterator_result_class())
def sync(self) -> Result[None]:
"""
Sync Statuses
:return: None
"""
return self._execute_command('sync')
def get_my_status(self) -> Result[StatusV3]:
"""
Get the own user StatusV3
:return: StatusV3 object
"""
return self._execute_command('getMyStatus', result_class=self.get_item_result_class())
|
11552181
|
import os
import pytest
import pandas as pd
from unittest.mock import patch, ANY
from pyrestcli.exceptions import ServerErrorException
from cartoframes.auth import Credentials
from cartoframes.data.observatory.catalog.entity import CatalogList
from cartoframes.data.observatory.catalog.dataset import Dataset
from cartoframes.data.observatory.catalog.repository.variable_repo import VariableRepository
from cartoframes.data.observatory.catalog.repository.variable_group_repo import VariableGroupRepository
from cartoframes.data.observatory.catalog.repository.dataset_repo import DatasetRepository
from cartoframes.data.observatory.catalog.subscription_info import SubscriptionInfo
from cartoframes.data.observatory.catalog.repository.constants import DATASET_FILTER
from .examples import (
test_dataset1, test_datasets, test_variables, test_variables_groups, db_dataset1, test_dataset2,
db_dataset2, test_subscription_info
)
from carto.do_dataset import DODataset
class TestDataset(object):
@patch.object(DatasetRepository, 'get_by_id')
def test_get_dataset_by_id(self, mocked_repo):
# Given
mocked_repo.return_value = test_dataset1
# When
dataset = Dataset.get(test_dataset1.id)
# Then
assert isinstance(dataset, object)
assert isinstance(dataset, Dataset)
assert dataset == test_dataset1
@patch.object(VariableRepository, 'get_all')
def test_get_variables_by_dataset(self, mocked_repo):
# Given
mocked_repo.return_value = test_variables
# When
variables = test_dataset1.variables
# Then
mocked_repo.assert_called_once_with({DATASET_FILTER: test_dataset1.id})
assert isinstance(variables, list)
assert isinstance(variables, CatalogList)
assert variables == test_variables
@patch.object(VariableGroupRepository, 'get_all')
def test_get_variables_groups_by_dataset(self, mocked_repo):
# Given
mocked_repo.return_value = test_variables_groups
# When
variables_groups = test_dataset1.variables_groups
# Then
mocked_repo.assert_called_once_with({DATASET_FILTER: test_dataset1.id})
assert isinstance(variables_groups, list)
assert isinstance(variables_groups, CatalogList)
assert variables_groups == test_variables_groups
def test_dataset_properties(self):
# Given
dataset = Dataset(db_dataset1)
# When
dataset_id = dataset.id
slug = dataset.slug
name = dataset.name
description = dataset.description
provider = dataset.provider
category = dataset.category
data_source = dataset.data_source
country = dataset.country
language = dataset.language
geography = dataset.geography
temporal_aggregation = dataset.temporal_aggregation
time_coverage = dataset.time_coverage
update_frequency = dataset.update_frequency
version = dataset.version
is_public_data = dataset.is_public_data
summary = dataset.summary
# Then
assert dataset_id == db_dataset1['id']
assert slug == db_dataset1['slug']
assert name == db_dataset1['name']
assert description == db_dataset1['description']
assert provider == db_dataset1['provider_id']
assert category == db_dataset1['category_id']
assert data_source == db_dataset1['data_source_id']
assert country == db_dataset1['country_id']
assert language == db_dataset1['lang']
assert geography == db_dataset1['geography_id']
assert temporal_aggregation == db_dataset1['temporal_aggregation']
assert time_coverage == db_dataset1['time_coverage']
assert update_frequency == db_dataset1['update_frequency']
assert version == db_dataset1['version']
assert is_public_data == db_dataset1['is_public_data']
assert summary == db_dataset1['summary_json']
def test_dataset_is_exported_as_series(self):
# Given
dataset = test_dataset1
# When
dataset_series = dataset.to_series()
# Then
assert isinstance(dataset_series, pd.Series)
assert dataset_series['id'] == dataset.id
def test_dataset_is_exported_as_dict(self):
# Given
dataset = Dataset(db_dataset1)
excluded_fields = ['summary_json']
expected_dict = {key: value for key, value in db_dataset1.items() if key not in excluded_fields}
# When
dataset_dict = dataset.to_dict()
# Then
assert isinstance(dataset_dict, dict)
assert dataset_dict == expected_dict
def test_dataset_is_represented_with_classname_and_slug(self):
# Given
dataset = Dataset(db_dataset1)
# When
dataset_repr = repr(dataset)
# Then
assert dataset_repr == "<Dataset.get('{id}')>".format(id=db_dataset1['slug'])
def test_dataset_is_printed_with_classname(self):
# Given
dataset = Dataset(db_dataset1)
# When
dataset_str = str(dataset)
# Then
assert dataset_str == "<Dataset.get('{id}')>".format(id=db_dataset1['slug'])
def test_summary_values(self):
# Given
dataset = Dataset(db_dataset2)
# When
summary = dataset.summary
# Then
assert summary == dataset.data['summary_json']
def test_summary_head(self):
# Given
dataset = Dataset(db_dataset2)
# When
summary = dataset.head()
# Then
assert isinstance(summary, pd.DataFrame)
def test_summary_tail(self):
# Given
dataset = Dataset(db_dataset2)
# When
summary = dataset.tail()
# Then
assert isinstance(summary, pd.DataFrame)
def test_summary_counts(self):
# Given
dataset = Dataset(db_dataset2)
# When
summary = dataset.counts()
# Then
assert isinstance(summary, pd.Series)
def test_summary_fields_by_type(self):
# Given
dataset = Dataset(db_dataset2)
# When
summary = dataset.fields_by_type()
# Then
assert isinstance(summary, pd.Series)
@patch.object(pd, 'set_option')
@patch.object(VariableRepository, 'get_all')
def test_summary_describe(self, mocked_repo, mocked_set):
# Given
dataset = Dataset(db_dataset2)
# When
summary = dataset.describe()
# Then
assert isinstance(summary, pd.DataFrame)
mocked_set.assert_called_once_with('display.float_format', ANY)
@patch.object(pd, 'set_option')
@patch.object(VariableRepository, 'get_all')
def test_summary_describe_custom_format(self, mocked_repo, mocked_set):
# Given
dataset = Dataset(db_dataset2)
# When
summary = dataset.describe(autoformat=False)
# Then
assert isinstance(summary, pd.DataFrame)
mocked_set.assert_not_called()
@patch.object(DatasetRepository, 'get_all')
def test_get_all_datasets(self, mocked_repo):
# Given
mocked_repo.return_value = test_datasets
# When
datasets = Dataset.get_all()
# Then
assert isinstance(datasets, list)
assert isinstance(datasets, CatalogList)
assert datasets == test_datasets
@patch.object(DatasetRepository, 'get_all')
def test_get_all_datasets_credentials(self, mocked_repo):
# Given
mocked_repo.return_value = test_datasets
credentials = Credentials('fake_user', '1<PASSWORD>')
# When
datasets = Dataset.get_all(credentials=credentials)
# Then
mocked_repo.assert_called_once_with(None, credentials)
assert isinstance(datasets, list)
assert isinstance(datasets, CatalogList)
assert datasets == test_datasets
@patch.object(DatasetRepository, 'get_all')
def test_get_all_datasets_credentials_without_do_enabled(self, mocked_repo):
# Given
def raise_exception(a, b):
raise ServerErrorException(['The user does not have Data Observatory enabled'])
mocked_repo.side_effect = raise_exception
credentials = Credentials('fake_user', '<PASSWORD>')
# When
with pytest.raises(Exception) as e:
Dataset.get_all(credentials=credentials)
# Then
assert str(e.value) == (
'We are sorry, the Data Observatory is not enabled for your account yet. '
'Please contact your customer success manager or send an email to '
'<EMAIL> to request access to it.')
def test_dataset_list_is_printed_with_classname_and_slugs(self):
# Given
datasets = CatalogList([test_dataset1, test_dataset2])
# When
datasets_str = str(datasets)
# Then
assert datasets_str == "[<Dataset.get('{id1}')>, <Dataset.get('{id2}')>]"\
.format(id1=db_dataset1['slug'], id2=db_dataset2['slug'])
def test_dataset_list_is_represented_with_classname_and_slugs(self):
# Given
datasets = CatalogList([test_dataset1, test_dataset2])
# When
datasets_repr = repr(datasets)
# Then
assert datasets_repr == "[<Dataset.get('{id1}')>, <Dataset.get('{id2}')>]"\
.format(id1=db_dataset1['slug'], id2=db_dataset2['slug'])
def test_datasets_items_are_obtained_as_dataset(self):
# Given
datasets = test_datasets
# When
dataset = datasets[0]
# Then
assert isinstance(dataset, Dataset)
assert dataset == test_dataset1
def test_datasets_are_exported_as_dataframe(self):
# Given
datasets = test_datasets
dataset = datasets[0]
expected_dataset_df = dataset.to_series()
del expected_dataset_df['summary_json']
# When
dataset_df = datasets.to_dataframe()
sliced_dataset = dataset_df.iloc[0]
# Then
assert isinstance(dataset_df, pd.DataFrame)
assert isinstance(sliced_dataset, pd.Series)
assert sliced_dataset.equals(expected_dataset_df)
@patch('cartoframes.data.observatory.catalog.subscriptions.get_subscription_ids')
@patch.object(DatasetRepository, 'get_by_id')
@patch.object(DODataset, 'download_stream')
def test_dataset_download(self, mock_download_stream, mock_get_by_id, mock_subscription_ids):
# Given
mock_get_by_id.return_value = test_dataset1
dataset = Dataset.get(test_dataset1.id)
mock_download_stream.return_value = []
mock_subscription_ids.return_value = [test_dataset1.id]
credentials = Credentials('fake_user', '<PASSWORD>')
# Then
dataset.to_csv('fake_path', credentials)
os.remove('fake_path')
@patch('cartoframes.data.observatory.catalog.subscriptions.get_subscription_ids')
@patch.object(DatasetRepository, 'get_by_id')
def test_dataset_not_subscribed_download_not_subscribed(self, mock_get_by_id, mock_subscription_ids):
# Given
mock_get_by_id.return_value = test_dataset2 # is private
dataset = Dataset.get(test_dataset2.id)
mock_subscription_ids.return_value = []
credentials = Credentials('fake_user', '<PASSWORD>')
# When
with pytest.raises(Exception) as e:
dataset.to_csv('fake_path', credentials)
# Then
assert str(e.value) == (
'You are not subscribed to this Dataset yet. '
'Please, use the subscribe method first.')
@patch.object(DatasetRepository, 'get_by_id')
@patch.object(DODataset, 'download_stream')
def test_dataset_download_not_subscribed_but_public(self, mock_download_stream, mock_get_by_id):
# Given
mock_get_by_id.return_value = test_dataset1 # is public
dataset = Dataset.get(test_dataset1.id)
mock_download_stream.return_value = []
credentials = Credentials('fake_user', '<PASSWORD>')
dataset.to_csv('fake_path', credentials)
os.remove('fake_path')
@patch.object(DatasetRepository, 'get_by_id')
@patch.object(DODataset, 'download_stream')
def test_dataset_download_without_do_enabled(self, mock_download_stream, mock_get_by_id):
# Given
mock_get_by_id.return_value = test_dataset1
dataset = Dataset.get(test_dataset1.id)
def raise_exception(limit=None, order_by=None, sql_query=None, add_geom=None, is_geography=None):
raise ServerErrorException(['The user does not have Data Observatory enabled'])
mock_download_stream.side_effect = raise_exception
credentials = Credentials('fake_user', '<PASSWORD>')
# When
with pytest.raises(Exception) as e:
dataset.to_csv('fake_path', credentials)
# Then
assert str(e.value) == (
'We are sorry, the Data Observatory is not enabled for your account yet. '
'Please contact your customer success manager or send an email to '
'<EMAIL> to request access to it.')
@patch('cartoframes.data.observatory.catalog.subscriptions.get_subscription_ids')
@patch('cartoframes.data.observatory.catalog.utils.display_subscription_form')
@patch('cartoframes.data.observatory.catalog.utils.display_existing_subscription_message')
def test_dataset_subscribe(self, mock_display_message, mock_display_form, mock_subscription_ids):
# Given
expected_id = db_dataset1['id']
expected_subscribed_ids = []
mock_subscription_ids.return_value = expected_subscribed_ids
credentials = Credentials('fake_user', '<PASSWORD>')
dataset = Dataset(db_dataset1)
# When
dataset.subscribe(credentials)
# Then
mock_subscription_ids.assert_called_once_with(credentials, 'dataset')
mock_display_form.assert_called_once_with(expected_id, 'dataset', credentials)
assert not mock_display_message.called
@patch('cartoframes.data.observatory.catalog.subscriptions.get_subscription_ids')
@patch('cartoframes.data.observatory.catalog.utils.display_subscription_form')
@patch('cartoframes.data.observatory.catalog.utils.display_existing_subscription_message')
def test_dataset_subscribe_existing(self, mock_display_message, mock_display_form, mock_subscription_ids):
# Given
expected_id = db_dataset1['id']
expected_subscribed_ids = [expected_id]
mock_subscription_ids.return_value = expected_subscribed_ids
credentials = Credentials('fake_user', '<PASSWORD>')
dataset = Dataset(db_dataset1)
# When
dataset.subscribe(credentials)
# Then
mock_subscription_ids.assert_called_once_with(credentials, 'dataset')
mock_display_message.assert_called_once_with(expected_id, 'dataset')
assert not mock_display_form.called
@patch('cartoframes.data.observatory.catalog.subscriptions.get_subscription_ids')
@patch('cartoframes.data.observatory.catalog.utils.display_subscription_form')
@patch('cartoframes.auth.defaults.get_default_credentials')
def test_dataset_subscribe_default_credentials(self, mock_credentials, mock_display_form, mock_subscription_ids):
# Given
expected_credentials = Credentials('fake_user', '1234')
mock_credentials.return_value = expected_credentials
dataset = Dataset(db_dataset1)
# When
dataset.subscribe()
# Then
mock_subscription_ids.assert_called_once_with(expected_credentials, 'dataset')
mock_display_form.assert_called_once_with(db_dataset1['id'], 'dataset', expected_credentials)
def test_dataset_subscribe_wrong_credentials(self):
# Given
wrong_credentials = 1234
dataset = Dataset(db_dataset1)
# When
with pytest.raises(ValueError) as e:
dataset.subscribe(wrong_credentials)
# Then
assert str(e.value) == ('Credentials attribute is required. '
'Please pass a `Credentials` instance '
'or use the `set_default_credentials` function.')
@patch('cartoframes.data.observatory.catalog.subscriptions.get_subscription_ids')
@patch('cartoframes.data.observatory.catalog.utils.display_subscription_form')
def test_dataset_subscribe_without_do_enabled(self, mock_display_form, mock_subscription_ids):
# Given
def raise_exception(a, b, c):
raise ServerErrorException(['The user does not have Data Observatory enabled'])
mock_display_form.side_effect = raise_exception
dataset = Dataset(db_dataset1)
credentials = Credentials('fake_user', '<PASSWORD>')
# When
with pytest.raises(Exception) as e:
dataset.subscribe(credentials)
# Then
assert str(e.value) == (
'We are sorry, the Data Observatory is not enabled for your account yet. '
'Please contact your customer success manager or send an email to '
'<EMAIL> to request access to it.')
@patch('cartoframes.data.observatory.catalog.subscription_info.fetch_subscription_info')
def test_dataset_subscription_info(self, mock_fetch):
# Given
mock_fetch.return_value = test_subscription_info
credentials = Credentials('fake_user', '<PASSWORD>')
dataset = Dataset(db_dataset1)
# When
info = dataset.subscription_info(credentials)
# Then
mock_fetch.assert_called_once_with(db_dataset1['id'], 'dataset', credentials)
assert isinstance(info, SubscriptionInfo)
assert info.id == test_subscription_info['id']
assert info.estimated_delivery_days == test_subscription_info['estimated_delivery_days']
assert info.subscription_list_price == test_subscription_info['subscription_list_price']
assert info.tos == test_subscription_info['tos']
assert info.tos_link == test_subscription_info['tos_link']
assert info.licenses == test_subscription_info['licenses']
assert info.licenses_link == test_subscription_info['licenses_link']
assert info.rights == test_subscription_info['rights']
assert str(info) == 'Properties: id, estimated_delivery_days, ' + \
'subscription_list_price, tos, tos_link, ' + \
'licenses, licenses_link, rights'
@patch('cartoframes.data.observatory.catalog.subscription_info.fetch_subscription_info')
@patch('cartoframes.auth.defaults.get_default_credentials')
def test_dataset_subscription_info_default_credentials(self, mock_credentials, mock_fetch):
# Given
expected_credentials = Credentials('fake_user', '<PASSWORD>')
mock_credentials.return_value = expected_credentials
dataset = Dataset(db_dataset1)
# When
dataset.subscription_info()
# Then
mock_fetch.assert_called_once_with(db_dataset1['id'], 'dataset', expected_credentials)
def test_dataset_subscription_info_wrong_credentials(self):
# Given
wrong_credentials = 1234
dataset = Dataset(db_dataset1)
# When
with pytest.raises(ValueError) as e:
dataset.subscription_info(wrong_credentials)
# Then
assert str(e.value) == ('Credentials attribute is required. '
'Please pass a `Credentials` instance '
'or use the `set_default_credentials` function.')
@patch('cartoframes.data.observatory.catalog.subscription_info.fetch_subscription_info')
def test_dataset_subscription_info_without_do_enabled(self, mock_fetch):
# Given
def raise_exception(a, b, c):
raise ServerErrorException(['The user does not have Data Observatory enabled'])
mock_fetch.side_effect = raise_exception
dataset = Dataset(db_dataset1)
credentials = Credentials('fake_user', '<PASSWORD>')
# When
with pytest.raises(Exception) as e:
dataset.subscription_info(credentials)
# Then
assert str(e.value) == (
'We are sorry, the Data Observatory is not enabled for your account yet. '
'Please contact your customer success manager or send an email to '
'<EMAIL> to request access to it.')
|
11552186
|
import _plotly_utils.basevalidators
class TypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="type", parent_name="layout.updatemenu", **kwargs):
super(TypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["dropdown", "buttons"]),
**kwargs
)
|
11552217
|
import base64
import hashlib
import logging
import random
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.urls import reverse
from accounts.events import post_group_membership_updates
from . import SESSION_KEY
logger = logging.getLogger("zentral.realms.utils")
try:
random = random.SystemRandom()
except NotImplementedError:
logger.warning('No secure pseudo random number generator available.')
def get_realm_user_mapped_groups(realm_user):
mapped_groups = set([])
claims = realm_user.claims
if "ava" in claims:
# special case for SAML
claims = claims["ava"]
for realm_group_mapping in realm_user.realm.realmgroupmapping_set.select_related("group").all():
claim_values = claims.get(realm_group_mapping.claim)
if not isinstance(claim_values, list):
claim_values = [claim_values]
for v in claim_values:
if not isinstance(v, str):
v = str(v)
if v == realm_group_mapping.value:
mapped_groups.add(realm_group_mapping.group)
break
return mapped_groups
def _update_remote_user_groups(request, realm_user):
user = request.user
if not user.is_remote:
return
mapped_groups = get_realm_user_mapped_groups(realm_user)
current_groups = set(user.groups.all())
if current_groups != mapped_groups:
user.groups.set(mapped_groups)
post_group_membership_updates(request, mapped_groups - current_groups, current_groups - mapped_groups)
def login_callback(request, realm_authentication_session, next_url=None):
"""
Realm authorization session callback used to log realm users in,
as Zentral users
"""
# login
realm_user = realm_authentication_session.user
user = authenticate(request=request, realm_user=realm_user)
if not user:
raise ValueError("Could not authenticate realm user")
# update session
# need to update the session before the login to be able to get the information from the auth signal
request.session[SESSION_KEY] = str(realm_authentication_session.pk)
login(request, user)
request.session.set_expiry(realm_authentication_session.computed_expiry())
# apply realm group mappings
_update_remote_user_groups(request, realm_user)
return next_url or settings.LOGIN_REDIRECT_URL
def test_callback(request, realm_authentication_session):
"""
Realm authorization session callback used to test the realm
"""
return reverse("realms:authentication_session",
args=(realm_authentication_session.realm.pk,
realm_authentication_session.pk))
def build_password_hash_dict(password):
# see https://developer.apple.com/documentation/devicemanagement/setautoadminpasswordcommand/command
# for the compatibility
password = password.encode("<PASSWORD>")
salt = bytearray(random.getrandbits(8) for i in range(32))
iterations = 39999
# see https://github.com/micromdm/micromdm/blob/master/pkg/crypto/password/password.go macKeyLen !!!
# Danke github.com/groob !!!
dklen = 128
dk = hashlib.pbkdf2_hmac("sha512", password, salt, iterations, dklen=dklen)
return {
"SALTED-SHA512-PBKDF2": {
"entropy": base64.b64encode(dk).decode("ascii").strip(),
"salt": base64.b64encode(salt).decode("ascii").strip(),
"iterations": iterations
}
}
|
11552227
|
from .response import Response
from .driver import Driver
__all__ = ["Response", "Driver"]
__version__ = "0.2.2"
|
11552286
|
import numpy as np
from dataset_specifications.real_dataset import RealDataset
from sklearn import preprocessing as skpp
import sklearn.datasets as skl_ds
class HousingSet(RealDataset):
def __init__(self):
super().__init__()
self.name = "housing"
self.requires_path = False
self.x_dim = 8
self.support = (0.,1.)
self.val_percent = 0.10
self.test_percent = 0.10
# California housing dataset
# Full dataset is available at http://lib.stat.cmu.edu/datasets/houses.zip
# Here it is loaded from sci-kit learn librbary, so no file has to
# be manually downloaded
def preprocess(self, file_path):
x,y = skl_ds.fetch_california_housing(return_X_y=True)
loaded_data = np.concatenate((x, np.expand_dims(y, axis=1)), axis=1)
# Standardize all data
loaded_data = skpp.StandardScaler().fit(loaded_data).transform(loaded_data)
return loaded_data
|
11552301
|
hyper = {
"GCN": {
"model": {
"name": "GCN",
"inputs": [
{"shape": [None, 8710], "name": "node_attributes", "dtype": "float32", "ragged": True},
{"shape": [None, 1], "name": "edge_weights", "dtype": "float32", "ragged": True},
{"shape": [None, 2], "name": "edge_indices", "dtype": "int64", "ragged": True}],
"input_embedding": {"node": {"input_dim": 95, "output_dim": 64},
"edge": {"input_dim": 10, "output_dim": 64}},
"gcn_args": {"units": 140, "use_bias": True, "activation": "relu"},
"depth": 3, "verbose": 10,
"output_embedding": "node",
"output_mlp": {"use_bias": [True, True, False], "units": [140, 70, 70],
"activation": ["relu", "relu", "softmax"]},
},
"training": {
"fit": {
"batch_size": 1,
"epochs": 300,
"validation_freq": 10,
"verbose": 2,
"callbacks": [
{
"class_name": "kgcnn>LinearLearningRateScheduler", "config": {
"learning_rate_start": 1e-03, "learning_rate_stop": 1e-04, "epo_min": 260, "epo": 300,
"verbose": 0
}
}
]
},
"compile": {
"optimizer": {"class_name": "Adam", "config": {"lr": 1e-03}},
"loss": "categorical_crossentropy",
"weighted_metrics": ["categorical_accuracy"]
},
"cross_validation": {"class_name": "KFold",
"config": {"n_splits": 5, "random_state": None, "shuffle": True}},
},
"data": {
"dataset": {"class_name": "CoraDataset", "config": {}},
"methods": {
"make_undirected_edges": {},
"add_edge_self_loops": {},
"normalize_edge_weights_sym": {}
}
},
"info": {
"postfix": "",
"kgcnn_version": "2.0.0"
}
}
}
|
11552312
|
import lemoncheesecake.api as lcc
@lcc.suite("Suite 2")
class suite_2:
@lcc.test("Test 1")
@lcc.prop("priority", "low")
def test_1(self, fixt_7):
pass
@lcc.test("Test 2")
@lcc.prop("priority", "low")
def test_2(self, fixt_4):
pass
@lcc.test("Test 3")
@lcc.prop("priority", "high")
def test_3(self, fixt_7):
pass
@lcc.test("Test 4")
@lcc.prop("priority", "medium")
def test_4(self):
pass
@lcc.test("Test 5")
@lcc.prop("priority", "low")
def test_5(self, fixt_2):
pass
@lcc.test("Test 6")
@lcc.prop("priority", "low")
def test_6(self, fixt_2):
pass
@lcc.test("Test 7")
@lcc.prop("priority", "medium")
def test_7(self, fixt_3):
pass
@lcc.test("Test 8")
@lcc.prop("priority", "low")
@lcc.tags("slow")
@lcc.link("http://example.com/1234", "#1234")
def test_8(self, fixt_9):
pass
@lcc.test("Test 9")
@lcc.prop("priority", "medium")
@lcc.tags("slow")
def test_9(self):
pass
|
11552386
|
load("@bazel_skylib//:lib.bzl", "asserts", "unittest")
load("//lib:json_parser.bzl", "json_parse")
load(":json_parse_test_data.bzl", "get_pkg_jsons")
def _valid_json_parse_test(ctx):
env = unittest.begin(ctx)
asserts.equals(env, json_parse("[]"), [])
asserts.equals(
env,
json_parse('["x", "y", 22, [7], {"z": 1, "y": null}]'),
["x", "y", 22, [7], {"z": 1, "y": None}],
)
asserts.equals(
env,
" ".join(reversed(json_parse('["plain", "the", "on", "mainly", "falls", "spain", "in", "rain", "the"]'))),
"the rain in spain falls mainly on the plain",
)
asserts.equals(
env,
json_parse('["a", "b", "c", [1, 2, 3, [4], [5], [6]]]'),
["a", "b", "c", [1, 2, 3, [4], [5], [6]]],
)
asserts.equals(env, json_parse("{}"), {})
asserts.equals(env, json_parse('{"a" : "b"}'), {"a": "b"})
asserts.equals(
env,
json_parse('{"key1": [1, 2, ["nested"]], "key2": "val2", "key3": {"nested_key1" : [null, true, false]}}'),
{
"key1": [1, 2, ["nested"]],
"key2": "val2",
"key3": {
"nested_key1": [None, True, False],
},
},
)
asserts.equals(
env,
json_parse('{"key:with:colon" : [{ "nested:with:colon" : true }]}'),
{"key:with:colon": [{"nested:with:colon": True}]},
)
expected_escapes = {"escaped": r'\"quotes\"'}
# Ughh... need to double escape the escape.
asserts.equals(env, expected_escapes, json_parse('{"escaped" : "\\"quotes\\""}'))
# Unless it's a raw literal
asserts.equals(env, expected_escapes, json_parse(r'{"escaped" : "\"quotes\""}'))
asserts.equals(
env,
expected_escapes,
json_parse(r'''
{"escaped" : "\"quotes\""}
'''),
)
unittest.end(env)
def _scalar_types_test(ctx):
env = unittest.begin(ctx)
asserts.equals(env, json_parse('[""]')[0], "")
asserts.equals(env, json_parse('["a string"]')[0], "a string")
asserts.equals(env, json_parse("[true]")[0], True)
asserts.equals(env, json_parse("[false]")[0], False)
asserts.equals(env, json_parse("[null]")[0], None)
asserts.equals(env, json_parse("[100]")[0], 100)
asserts.equals(env, json_parse("[-100]")[0], -100)
unittest.end(env)
def _number_parse_test(ctx):
env = unittest.begin(ctx)
# :( this sucks, but technically it's legal JSON:
# https://tools.ietf.org/html/rfc8259#section-6
# "This specification allows implementations to set limits on the range
# and precision of numbers accepted."
asserts.equals(env, 2147483647, json_parse("[99e100]")[0]) # MAX int
asserts.equals(env, -2147483647, json_parse("[-99e100]")[0]) # MIN int
asserts.equals(env, 0, json_parse("[99e-10]")[0])
asserts.equals(env, 9, json_parse("[999e-2]")[0])
asserts.equals(env, 43, json_parse("[43.11]")[0])
asserts.equals(env, 0, json_parse("[0.12345]")[0])
asserts.equals(env, -120, json_parse("[-120.12345]")[0])
unittest.end(env)
def _max_depth_json_parse_test(ctx):
env = unittest.begin(ctx)
asserts.equals(
env,
json_parse("[[[[[[[[[[[[[[[[[[[[20]]]]]]]]]]]]]]]]]]]]"),
[[[[[[[[[[[[[[[[[[[[20]]]]]]]]]]]]]]]]]]]],
)
asserts.equals(
env,
json_parse('[[["too_deep"]]]', fail_on_invalid = False, max_depth = 2),
None,
)
unittest.end(env)
def _package_json_parse_test(ctx):
env = unittest.begin(ctx)
pkg_jsons_for_testing = get_pkg_jsons()
# Use rollup to spot check specific values.
rollup_pkg_json = json_parse(pkg_jsons_for_testing["rollup"])
asserts.equals(env, "0.57.1", rollup_pkg_json["version"])
asserts.equals(env, "dist/rollup.browser.js", rollup_pkg_json["files"][0])
asserts.equals(env, "<NAME> <<EMAIL>>", rollup_pkg_json["contributors"][0])
for project in pkg_jsons_for_testing:
print("checking %s/package.json" % project)
parsed_pkg_json = json_parse(pkg_jsons_for_testing[project])
asserts.equals(env, "dict", type(parsed_pkg_json))
unittest.end(env)
valid_json_parse_test = unittest.make(_valid_json_parse_test)
scalar_types_test = unittest.make(_scalar_types_test)
number_parse_test = unittest.make(_number_parse_test)
max_depth_json_parse_test = unittest.make(_max_depth_json_parse_test)
package_json_parse_test = unittest.make(_package_json_parse_test)
def json_parse_test_suite():
"""Creates the test targets and test suite for //lib:json_parse.bzl."""
unittest.suite(
"json_parse_tests",
valid_json_parse_test,
scalar_types_test,
number_parse_test,
max_depth_json_parse_test,
package_json_parse_test,
)
|
11552466
|
class Solution:
def minAddToMakeValid(self, S: str) -> int:
stack = [] # Creating a stack
if len(S)==0:
return 0
stack = [S[0]]
for i in range(1,len(S)): # Updating stacks according to the input
if stack and stack[-1]=='(' and S[i]==')':
stack.pop()
else:
stack.append(S[i])
return len(stack) # Returning result
|
11552469
|
import unittest
from pynYNAB.schema import BudgetVersion, MasterCategory, SubCategory, Payee
from tests.common_mock import factory, MockConnection
class TestCommonMock(unittest.TestCase):
def setUp(self):
self.client = factory.create_client(budget_name='TestBudget',
connection=MockConnection(),
sync=False)
session = self.client.session
budget_version = BudgetVersion(version_name='TestBudget')
master_category = MasterCategory(name='master')
subcategory = SubCategory(name='Immediate Income',
internal_name='Category/__ImmediateIncome__',
entities_master_category=master_category)
payee = Payee(name='Starting Balance Payee', internal_name='StartingBalancePayee')
session.add(master_category)
session.add(subcategory)
session.add(payee)
self.client.catalog.ce_budget_versions.append(budget_version)
self.client.budget.be_master_categories.append(master_category)
self.client.budget.be_subcategories.append(subcategory)
self.client.budget.be_payees.append(payee)
session.commit()
self.client.budgetClient.clear_changed_entities()
self.client.catalogClient.clear_changed_entities()
self.client.budgetClient.device_knowledge_of_server = 0
self.client.catalogClient.device_knowledge_of_server = 0
self.client.budgetClient.current_device_knowledge = 0
self.client.catalogClient.current_device_knowledge = 0
|
11552477
|
import numpy as np
import math
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import backend as K
import gpflow
import pickle
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import NMF
from .graphnn import models as molan_model
from .graphnn import training
from .graphnn import mol2graph
import torch
from torch_geometric.data import DataLoader
from collections import OrderedDict
from rdkit import Chem
class DNN(keras.Model):
_n_layers = 1
_layer_size = 16
batch_size = 32
learning_rate = 0.0001
epochs = 500
seed = 9700
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.generate_fcn()
def generate_fcn(self):
self.pipeline = []
for i, layer in enumerate(range(self.n_layers)):
self.pipeline.append(layers.BatchNormalization())
self.pipeline.append(layers.Dense(self.layer_size, activation='relu'))
self.pipeline.append(layers.BatchNormalization())
self.pipeline.append(layers.Dense(1, activation='linear'))
@property
def n_layers(self):
return self._n_layers
@n_layers.setter
def n_layers(self, value):
self._n_layers = value
self.generate_fcn()
@property
def layer_size(self):
return self._layer_size
@layer_size.setter
def layer_size(self, value):
self._layer_size = value
self.generate_fcn()
def call(self, inputs):
x = inputs
for layer in self.pipeline:
x = layer(x)
return x
def fit(self, x_train, y_train, **kwargs):
tf.random.set_seed(self.seed)
adam = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
super().build(input_shape=x_train.shape)
super().compile(optimizer=adam, loss='mse', metrics=['mse', 'mae'])
super().fit(x_train, y_train, epochs=self.epochs, batch_size=self.batch_size, **kwargs)
class DNN_Mordred(DNN):
_n_layers = 4
_layer_size = 256
batch_size = 256
learning_rate = 0.01
epochs = 1000
class DNN_ECFP(DNN):
_n_layers = 1
_layer_size = 2048
batch_size = 512
learning_rate = 0.001
epochs = 1000
class RF:
seed = 9700
n_estimators = 4096
max_depth = 32
min_samples_split = 2
min_samples_leaf = 1
def fit(self, x_train, y_train):
np.random.seed(self.seed)
self.estimator = RandomForestRegressor(n_estimators = self.n_estimators,
max_depth = self.max_depth,
min_samples_split = self.min_samples_split,
min_samples_leaf = self.min_samples_leaf,
n_jobs=-1)
self.estimator.fit(x_train, y_train.ravel())
return self
def predict(self, x):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict(x).reshape(-1,1)
def save_weights(self, fn):
with open(fn, 'wb') as file:
pickle.dump(self.estimator, file)
def load_weights(self, fn):
with open(fn, 'rb') as file:
self.estimator = pickle.load(file)
class RF_NMF_ECFP(RF):
def fit(self, x_train, y_train, seed=9700):
self.estimator = make_pipeline(
NMF(n_components=12, solver='mu', init='random', max_iter=500, random_state=0, alpha=.1, l1_ratio=.5),
RandomForestRegressor(min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
max_depth = self.max_depth,
n_estimators=self.n_estimators,
n_jobs=-1)
)
self.estimator.fit(x_train, y_train.ravel())
return self
class GP:
def fit(self, x_train, y_train):
kernels = []
i = 0
for no, (x, reducer, k) in enumerate(zip(x_train,
self.rf_feature_selectors,
self.rf_feature_reduce_to)):
indices = (-reducer.estimator.feature_importances_).argsort()[:k]
x_train[no] = x[:,indices]
kernels.append(gpflow.kernels.RBF(active_dims=i+np.arange(k)))
i += k
x_train = np.hstack(x_train)
kernel = gpflow.kernels.Sum(kernels)
self.model = gpflow.models.GPR(data=(x_train.astype(np.float64), y_train.astype(np.float64)), kernel=kernel,
mean_function=None)
opt = gpflow.optimizers.Scipy()
opt.minimize(lambda: -self.model.log_marginal_likelihood(), self.model.trainable_variables,
options={'maxiter': 500})
def predict(self, x_in):
for no, (x, reducer, k) in enumerate(zip(x_in,
self.rf_feature_selectors,
self.rf_feature_reduce_to)):
indices = (-reducer.estimator.feature_importances_).argsort()[:k]
x_in[no] = x[:,indices]
x = np.hstack(x_in)
return self.model.predict_y(x.astype(np.float64))[0]
def save_weights(self, fn):
checkpoint = tf.train.Checkpoint(a=self.model)
manager = tf.train.CheckpointManager(checkpoint, fn, max_to_keep=9999)
manager.save()
class SN_Mordred:
batch_size = 256
learning_rate = 0.004663515283240011
epochs = 1000
seed = 9700
def __init__(self, input_shape=None):
tf.random.set_seed(self.seed)
np.random.seed(self.seed)
x = layers.Input(shape=input_shape)
body = layers.BatchNormalization()(x)
body = layers.Dense(128, activation='relu')(body)
body = layers.BatchNormalization()(body)
body = layers.Dense(128, activation='relu')(body)
body = layers.BatchNormalization()(body)
body = layers.Dense(128, activation='relu')(body)
body = layers.BatchNormalization()(body)
prediction = layers.Dense(1, activation='linear', name='prediction')(body)
selection = layers.Dense(16, activation='relu')(body)
selection = layers.BatchNormalization()(selection)
selection = layers.Dense(1, activation='sigmoid', name='selection')(selection)
selection_out = layers.Concatenate(axis=1, name='selection_head')([prediction, selection])
auxiliary_out = layers.Dense(1, activation='linear', name='auxiliary_head')(body)
self.model = tf.keras.models.Model(inputs=x, outputs=[selection_out, auxiliary_out, body])
def fit(self, x_train, y_train, **kwargs):
tf.random.set_seed(self.seed)
np.random.seed(self.seed)
adam = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
def coverage(y_true, y_pred):
return K.mean(K.round(y_pred[:,1]))
def empirical_risk(y_true, y_pred):
loss = (y_true[:,0] - y_pred[:,0])**2
mse = K.mean(loss * y_pred[:,1])
emp_risk_num = mse
emp_risk_denom = K.mean(y_pred[:,1]) #K.mean(K.round(y_pred[:,1]))
return emp_risk_num / emp_risk_denom
def selective_loss(y_true, y_pred):
emp_risk = empirical_risk(y_true, y_pred)
cov = K.mean(y_pred[:,1])
lamda = 32 #converge later
loss = emp_risk + (lamda * K.maximum(self.c_coverage-cov,0)**2)
return loss
self.model.compile(optimizer=adam, loss=[selective_loss, 'mse'], loss_weights=[0.5, 0.5],
metrics={'selection_head': [selective_loss, empirical_risk, coverage]})
self.model.fit(x_train, y_train, epochs=self.epochs, batch_size=self.batch_size, **kwargs)
def predict(self, *args):
return self.model.predict(*args)
def save_weights(self, fn):
return self.model.save_weights(fn)
def load_weights(self, fn):
return self.model.load_weights(fn)
class GCN:
# Some code here taken directly from MOLAN
seed = 9700
conv_n_layers = 5
conv_base_size = 64
conv_ratio = 1.25
conv_batchnorm = True
conv_act = 'relu'
emb_dim = 100
emb_set2set = False
emb_act = 'softmax'
mlp_layers = 2
mlp_dim_ratio = 0.5
mlp_dropout = 0.15306049825909776
mlp_act = 'relu'
mlp_batchnorm = True
residual = False
learning_rate = 0.008117123009364938
batch_size = 64
epochs = 500
node_dim = mol2graph.n_atom_features()
edge_dim = mol2graph.n_bond_features()
def fit(self, x_train, y_train):
torch.manual_seed(self.seed)
hparams = OrderedDict([('conv_n_layers', self.conv_n_layers), ('conv_base_size', self.conv_base_size),
('conv_ratio', self.conv_ratio), ('conv_batchnorm', self.conv_batchnorm),
('conv_act', self.conv_act), ('emb_dim', self.emb_dim),
('emb_set2set', self.emb_set2set), ('emb_act', self.emb_act),
('mlp_layers', self.mlp_layers), ('mlp_dim_ratio', self.mlp_dim_ratio),
('mlp_dropout', self.mlp_dropout), ('mlp_act', self.mlp_act),
('mlp_batchnorm', self.mlp_batchnorm), ('residual', self.residual)])
hparams['lr'] = self.learning_rate
hparams['batch_size'] = self.batch_size
hparams['model'] = 'GCN'
x_train = [mol2graph.mol2torchdata(Chem.MolFromSmiles(smile)) for smile in x_train.flatten()]
for data, y in zip(x_train, y_train):
data.y = torch.tensor(y, dtype=torch.float)
loader = DataLoader(x_train, batch_size=self.batch_size,
shuffle=False, drop_last=True)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = molan_model.GCN(hparams, self.node_dim, self.edge_dim).to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=hparams['lr'])
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode = 'min',
factor = 0.5,
patience = 20,
verbose = True)
for i in range(self.epochs):
print('Step %d/%d' % (i+1, self.epochs))
training.train_step(self.model, loader, optimizer, scheduler, self.device)
def predict(self, x_in):
# should drop_last=False
x_in = [mol2graph.mol2torchdata(Chem.MolFromSmiles(smile)) for smile in x_in.flatten()]
loader = DataLoader(x_in, batch_size=1,
shuffle=False, drop_last=False)
results = []
with torch.no_grad():
self.model.eval()
for data in loader:
data = data.to(self.device)
output = self.model(data)
results.extend(output.cpu().numpy())
return np.array(results).reshape(-1,1)
def save_weights(self, fn):
torch.save(self.model.state_dict(), fn)
def load_weights(self, fn):
hparams = OrderedDict([('conv_n_layers', self.conv_n_layers), ('conv_base_size', self.conv_base_size),
('conv_ratio', self.conv_ratio), ('conv_batchnorm', self.conv_batchnorm),
('conv_act', self.conv_act), ('emb_dim', self.emb_dim),
('emb_set2set', self.emb_set2set), ('emb_act', self.emb_act),
('mlp_layers', self.mlp_layers), ('mlp_dim_ratio', self.mlp_dim_ratio),
('mlp_dropout', self.mlp_dropout), ('mlp_act', self.mlp_act),
('mlp_batchnorm', self.mlp_batchnorm), ('residual', self.residual)])
hparams['lr'] = self.learning_rate
hparams['batch_size'] = self.batch_size
hparams['model'] = 'GCN'
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = molan_model.GCN(hparams, self.node_dim, self.edge_dim)
self.model.load_state_dict(torch.load(fn))
|
11552522
|
import asyncio
import json
from typing import IO, Any, Callable, Dict, List, Optional, Tuple, Type, Union
import aiohttp
import requests
API = 'https://api.zhconvert.org'
class FanhuajiEngine():
"""繁化姬轉換引擎"""
def _request(self, endpoint: str, payload: dict):
with requests.get(f'{API}{endpoint}', data=payload) as response:
if response.status_code == 200:
response.encoding = 'utf-8'
return json.loads(response.text)
raise RequestError(
f'zhconvert Request error. status code: {response.status_code}')
async def _async_request(self, session, endpoint: str, payload: dict):
async with session.get(f'{API}{endpoint}', data=payload) as response:
if response.status == 200:
content = await response.json()
await session.close()
return content
raise AsyncRequestError(
f'zhconvert AsyncRequest error. status code: {response.status}')
def _slice(self, content: str) -> Optional[List[str]]:
"""文字內容每 50,000 字進行一次切片處理
Args:
content ([str]): 文字內容
Returns:
Optional[List[str]]: 回傳為 list 且裡面為切片後的 str
"""
chunks = []
chunks_count = len(content)//50_000+1
for i in range(0, chunks_count):
chunks.append(content[50_000*i:50_000*(i+1)])
return chunks
def convert(self, **kwargs):
"""繁化姬轉換
API doc : https://docs.zhconvert.org/api/convert/
Arguments:
text : 欲轉換的文字\n\n
converter : 所要使用的轉換器。有 Simplified (簡體化)、 Traditional (繁體化)、
China (中國化)、 Taiwan (台灣化)、WikiSimplified (維基簡體化)、
WikiTraditional (維基繁體化)。\n\n
ignoreTextStyles : 由那些不希望被繁化姬處理的 "樣式" 以逗號分隔所組成的字串。
通常用於保護特效字幕不被轉換,
例如字幕組的特效字幕若是以 OPJP 與 OPCN 作為樣式名。
可以設定 "OPJP,OPCN" 來作保護。\n\n
jpTextStyles : 告訴繁化姬哪些樣式要當作日文處理(預設為伺服器端自動猜測)。
若要自行設定,則必須另外再加入 *noAutoJpTextStyles 這個樣式。
所有樣式以逗號分隔組成字串,
例如: "OPJP,EDJP,*noAutoJpTextStyles" 表示不讓伺服器自動猜測,
並指定 OPJP 與 EDJP 為日文樣式。\n\n
jpStyleConversionStrategy : 對於日文樣式該如何處理。
"none" 表示 無(當成中文處理) 、 "protect" 表示 保護 、
"protectOnlySameOrigin" 表示 僅保護原文與日文相同的字 、
"fix" 表示 修正 。\n\n
jpTextConversionStrategy : 對於繁化姬自己發現的日文區域該如何處理。
"none" 表示 無(當成中文處理) 、 "protect" 表示 保護 、
"protectOnlySameOrigin" 表示 僅保護原文與日文相同的字 、
"fix" 表示 修正 。\n\n
modules : 強制設定模組啟用/停用 。 -1 / 0 / 1 分別表示 自動 / 停用 / 啟用 。
字串使用 JSON 格式編碼。使用 * 可以先設定所有模組的狀態。
例如:{"*":0,"Naruto":1,"Typo":1} 表示停用所有模組,
但啟用 火影忍者 與 錯別字修正 模組。\n\n
userPostReplace : 轉換後再進行的額外取代。
格式為 "搜尋1=取代1\\n搜尋2=取代2\\n..." 。
搜尋1 會在轉換後再被取代為 取代1 。\n\n
userPreReplace : 轉換前先進行的額外取代。
格式為 "搜尋1=取代1\\n搜尋2=取代2\\n..." 。
搜尋1 會在轉換前先被取代為 取代1 。\n\n
userProtectReplace : 保護字詞不被繁化姬修改。
格式為 "保護1\\n保護2\\n..." 。
保護1 、 保護2 等字詞將不會被繁化姬修改。
"""
ALLOW_KEYS = [
'text',
'converter',
'ignoreTextStyles',
'jpTextStyles',
'jpStyleConversionStrategy',
'jpTextConversionStrategy',
'modules',
'userPostReplace',
'userPreReplace',
'userProtectReplace',
]
error_keys = [key for key in kwargs.keys() if key not in ALLOW_KEYS]
if error_keys:
raise FanhuajiInvalidKey(f"Invalid key: {', '.join(error_keys)}")
if kwargs.get('text', None) is None or kwargs.get('converter', None) is None:
raise FanhuajiMissNecessarykey(f"Miss necessary key")
response = self._request('/convert', kwargs)
return self._text(response)
def _text(self, response) -> Union[None, str]:
if response['code'] != 0:
return None
return response['data']['text']
async def async_convert(self, **kwargs):
ALLOW_KEYS = [
'text',
'converter',
'ignoreTextStyles',
'jpTextStyles',
'jpStyleConversionStrategy',
'jpTextConversionStrategy',
'modules',
'userPostReplace',
'userPreReplace',
'userProtectReplace',
]
error_keys = [key for key in kwargs.keys() if key not in ALLOW_KEYS]
if error_keys:
raise FanhuajiInvalidKey(f"Invalid key: {', '.join(error_keys)}")
content = kwargs.get('text', None)
converter = kwargs.get('converter', None)
if content is None or converter is None:
raise FanhuajiMissNecessarykey(f"Miss necessary key")
session = aiohttp.ClientSession()
chunks = self._slice(kwargs.get('text'))
texts = []
for chunk in chunks:
payload = {
'text': chunk,
'converter': converter
}
response = await self._async_request(session, '/convert', payload)
texts.append(self._text(response))
return ''.join(texts)
class RequestError(Exception):
pass
class AsyncRequestError(Exception):
pass
class FanhuajiInvalidKey(Exception):
pass
class FanhuajiMissNecessarykey(Exception):
pass
|
11552539
|
from dataclasses import dataclass, field
from typing import Optional
import os
from dotenv import load_dotenv, find_dotenv
@dataclass(frozen=True)
class Env:
"""Loads all environment variables into a predefined set of properties
"""
dotenv_path = find_dotenv()
print(f"find_dotenv returns: {dotenv_path}")
load_dotenv(dotenv_path)
app_insights_connection_string: Optional[str] = os.environ.get("APPLICATIONINSIGHTS_CONNECTION_STRING") # NOQA: E501
log_to_console: Optional[bool] = os.environ.get("LOG_TO_CONSOLE", "true").lower().strip() == "true" # NOQA: E501
log_text_to_aml: bool = field(init=False)
log_level: Optional[str] = os.environ.get("LOG_LEVEL", "WARNING") # NOQA: E501
log_sampling_rate: float = float(os.environ.get("LOG_SAMPLING_RATE", 1.0)) # NOQA: E501
trace_sampling_rate: float = float(os.environ.get("TRACE_SAMPLING_RATE", 1.0)) # NOQA: E501
metrics_export_interval: int = int(os.environ.get("METRICS_EXPORT_INTERVAL", 15)) # NOQA: E501
enable_standard_metrics: Optional[bool] = os.environ.get("ENABLE_STANDARD_METRICS", "false").lower().strip() == "true" # NOQA: E501
build_id: Optional[str] = str(os.environ.get("BUILD_ID", "local")) # NOQA: E501
def __post_init__(self):
# aml and console both print messages
object.__setattr__(self, 'log_text_to_aml', not self.log_to_console)
|
11552595
|
from __future__ import absolute_import
from django.db import models
from .. import exc
class ProtocolType(models.Model):
"""
Representation of protocol types (e.g. bgp, is-is, ospf, etc.)
"""
name = models.CharField(
max_length=16, db_index=True,
help_text='Name of this type of protocol (e.g. OSPF, BGP, etc.)',
)
description = models.CharField(
max_length=255, default='', blank=True, null=False,
help_text='A description for this ProtocolType',
)
required_attributes = models.ManyToManyField(
'Attribute', db_index=True, related_name='protocol_types',
help_text=(
'All Attributes which are required by this ProtocolType. If a'
' Protocol of this type is saved and is missing one of these'
' attributes, a ValidationError will be raised.'
)
)
site = models.ForeignKey(
'Site', db_index=True, related_name='protocol_types',
on_delete=models.PROTECT, verbose_name='Site',
help_text='Unique ID of the Site this ProtocolType is under.'
)
def __unicode__(self):
return u'%s' % self.name
class Meta:
unique_together = ('site', 'name')
def get_required_attributes(self):
"""Return a list of the names of ``self.required_attributes``."""
# FIXME(jathan): These should probably cached on the model and updated
# on write. Revisit after we see how performance plays out in practice.
return list(self.required_attributes.values_list('name', flat=True))
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'description': self.description,
'required_attributes': self.get_required_attributes(),
'site': self.site_id,
}
# Signals
def required_attributes_changed(sender, instance, action, reverse, model,
pk_set, **kwargs):
"""
Signal handler that disallows anything but Protocol attributes to be added
to a ProtocolType.required_attributes.
"""
if action == 'pre_add':
# First filter in Protocol attributes.
attrs = model.objects.filter(pk__in=pk_set)
if attrs.exclude(resource_name='Protocol').exists():
raise exc.ValidationError({
'required_attributes': 'Only Protocol attributes are allowed'
})
# Then make sure that they match the site of the incoming instance.
wrong_site = attrs.exclude(site_id=instance.site_id)
if wrong_site.exists():
bad_attrs = [str(w) for w in wrong_site]
raise exc.ValidationError({
'required_attributes': (
'Attributes must share the same site as '
'ProtocolType.site. Got: %s' % bad_attrs
)
})
# Register required_attributes_changed -> ProtocolType.required_attributes
models.signals.m2m_changed.connect(
required_attributes_changed,
sender=ProtocolType.required_attributes.through
)
|
11552606
|
from django.contrib.auth.models import User
from nose.tools import ok_, eq_
from airmozilla.base.tests.testbase import DjangoTestCase
from airmozilla.search.models import SavedSearch
from airmozilla.main.models import Event, Tag, Channel
class SavedSearchTestCase(DjangoTestCase):
def test_get_events(self):
user = User.objects.create(username='bob')
savedsearch = SavedSearch.objects.create(
user=user,
filters={
'title': {'include': 'firefox'},
'tags': {'include': [], 'exclude': []},
'channels': {'include': [], 'exclude': []},
}
)
eq_(savedsearch.get_events().count(), 0)
event = Event.objects.get(title='Test event')
savedsearch.filters['title']['include'] = 'EVENT'
savedsearch.save()
ok_(event in savedsearch.get_events())
savedsearch.filters['title']['exclude'] = 'TEST'
savedsearch.save()
ok_(event not in savedsearch.get_events())
tag = Tag.objects.create(name='tag')
tag2 = Tag.objects.create(name='tag2')
event.tags.add(tag)
event.tags.add(tag2)
savedsearch.filters['title']['include'] = ''
savedsearch.filters['title']['exclude'] = ''
savedsearch.filters['tags']['include'] = [tag.id]
savedsearch.save()
ok_(event in savedsearch.get_events())
savedsearch.filters['tags']['exclude'] = [tag2.id]
savedsearch.save()
ok_(event not in savedsearch.get_events())
channel = Channel.objects.create(name='channel', slug='c')
channel2 = Channel.objects.create(name='channel2', slug='c2')
event.channels.add(channel)
event.channels.add(channel2)
savedsearch.filters['tags']['include'] = []
savedsearch.filters['tags']['exclude'] = []
savedsearch.filters['channels']['include'] = [channel.id]
savedsearch.save()
ok_(event in savedsearch.get_events())
savedsearch.filters['channels']['exclude'] = [channel2.id]
savedsearch.save()
ok_(event not in savedsearch.get_events())
assert event.privacy == Event.PRIVACY_PUBLIC
savedsearch.filters['channels']['include'] = []
savedsearch.filters['channels']['exclude'] = []
savedsearch.filters['privacy'] = [Event.PRIVACY_PUBLIC]
savedsearch.save()
ok_(event in savedsearch.get_events())
savedsearch.filters['privacy'] = [Event.PRIVACY_COMPANY]
savedsearch.save()
ok_(event not in savedsearch.get_events())
|
11552631
|
from cloudinary.forms import CloudinaryFileField
from django import forms
integerfields = {
'max_quantity_available': True,
'original_price': True,
'quorum': True,
'current_price': True,
}
class DealForm(forms.Form):
"""
Handles verification of form inputs
"""
active = forms.BooleanField(label='Is active?', required=False)
current_price = forms.IntegerField(label='Current price', required=False)
max_quantity_available = forms.IntegerField(
label='Max. quantity available', required=False
)
original_price = forms.IntegerField(label='Original price', required=False)
image = CloudinaryFileField(required=False)
quorum = forms.IntegerField(label='Quorum', required=False)
title = forms.CharField(label='Title', required=False, max_length=200)
address = forms.CharField(label='Address', required=False, max_length=200)
@staticmethod
def construct_int(value):
"""Contructs an integer value from a string or a comma separated list
"""
if value == '' or value is None:
return 0
print value
index = value.find(',')
if index is -1:
return int(value)
else:
return int(value[:index] + value[index+1:])
def is_valid(self):
"""
Checks if form data is valid.
"""
super(DealForm, self).is_valid()
for key, value in self.data.items():
if value is None or value == '':
if key != 'quorum':
continue
if integerfields.get(key, False):
if type(str(value)) is str:
value = DealForm.construct_int(value)
# remove errors upon normalization of uncleaned data
self.errors.pop(key, None)
# update value of cleaned data
self.cleaned_data[key] = value
return len(self.errors) is 0
def save(self, deal):
"""
Updates information about a deal
"""
for key, value in self.cleaned_data.items():
if value is None or value == '':
# skip the quorum field
if key != 'quorum':
continue
setattr(deal, key, value)
deal.save()
|
11552635
|
from datasets.base.storage_engine.memory_mapped import ListMemoryMapped
from datasets.types.incompatible_error import IncompatibleError
from datasets.base.common.operator.filters import filter_list_deserialize
from datasets.base.common.dataset_context_dao import DatasetContextDAO
from datasets.types.data_split import DataSplit
class LazyAttributesLoader:
def __init__(self, storage, index):
self.storage = storage
self.index = index
self.attributes = None
def _try_load_attributes(self):
if self.attributes is None:
self.attributes = self.storage[self.index]
assert self.attributes is not None
def get_attribute(self, key):
self._try_load_attributes()
return self.attributes[key]
def has_attribute(self, key):
self._try_load_attributes()
return key in self.attributes
def get_all_attribute_name(self):
self._try_load_attributes()
return self.attributes.keys()
def get_attribute_tree_query(self, key):
self._try_load_attributes()
value = self.attributes
for param in key:
value = value[param]
return value
def has_attribute_tree_query(self, key):
self._try_load_attributes()
value = self.attributes
for key in key:
if key not in value:
return False
value = value[key]
return True
def get_all_attribute_name_tree_query(self, key):
self._try_load_attributes()
value = self.get_attribute_tree_query(key)
return value.keys()
class DummyAttributesLoader:
def get_attribute(self, _):
raise KeyError
def has_attribute(self, _):
return False
def get_all_attribute_name(self):
return None
def get_attribute_tree_query(self, _):
raise KeyError
def has_attribute_tree_query(self, _):
return False
def get_all_attribute_name_tree_query(self, _):
return None
class MemoryMappedDataset:
def __init__(self, root_path: str, storage: ListMemoryMapped, schema_version, dataset_type_name):
self.root_path = root_path
self.storage = storage
'''
dataset_attributes:
{
'name': str
'category_id_name_map': dict[nullable], mapping category_id => category_name, null means no category info
...
}
'''
self.dataset_attributes: dict = self.storage[0]
if self.dataset_attributes['version'][0] != schema_version or self.dataset_attributes['type'] != dataset_type_name:
del self.dataset_attributes
del self.storage
raise IncompatibleError
self.index_matrix = self.storage[1].copy()
self.context = DatasetContextDAO(self.dataset_attributes)
@staticmethod
def load_storage(path: str):
return ListMemoryMapped(path)
def has_category_id_name_map(self):
return 'category_id_name_map' in self.dataset_attributes
def get_category_id_name_map(self):
return self.dataset_attributes['category_id_name_map']
def get_category_name_by_id(self, id_: int):
return self.dataset_attributes['category_id_name_map'][id_]
def get_data_split(self):
return DataSplit[self.dataset_attributes['split']]
def get_version(self):
return self.dataset_attributes['version'][1]
def set_root_path(self, root_path: str):
self.root_path = root_path
def get_root_path(self):
return self.root_path
def get_applied_filter_list(self):
if 'filters' in self.dataset_attributes:
return filter_list_deserialize(self.dataset_attributes['filters'])
return None
def get_attribute(self, name):
return self.dataset_attributes[name]
def has_attribute(self, name):
return name in self.dataset_attributes
def get_all_attribute_name(self):
return self.dataset_attributes.keys()
def get_name(self):
return self.dataset_attributes['name']
def __len__(self):
return self.index_matrix.shape[0]
def get_adhoc_manipulator(self):
from datasets.base.common.manipulator import SimpleAdHocManipulator
return SimpleAdHocManipulator(self.dataset_attributes)
def get_bounding_box_format(self):
return self.context.get_bounding_box_format()
def get_pixel_definition(self):
return self.context.get_pixel_definition()
def get_pixel_coordinate_system(self):
return self.context.get_pixel_coordinate_system()
def get_bounding_box_coordinate_system(self):
return self.context.get_bounding_box_coordinate_system()
def get_bounding_box_data_type(self):
return self.context.get_bounding_box_data_type()
def get_unique_id(self):
import hashlib
from miscellanies.slugify import slugify
m = hashlib.md5()
m.update(bytes(self.get_name(), encoding='utf-8'))
dataset_filters = self.get_applied_filter_list()
if dataset_filters is not None:
m.update(bytes(str(dataset_filters), encoding='utf-8'))
unique_id = f'{slugify(self.get_name())}-{str(self.get_data_split())}-{m.digest().hex()}'
return unique_id
|
11552645
|
from discord.ext.commands import Context, check
from discord.ext.commands.errors import CheckFailure
class NotModerator(CheckFailure):
"""Exception raised when the command invoker isn't on the moderator list."""
def __init__(self):
super().__init__(
"You are not a moderator on this server. See the `moderators` command."
)
def is_moderator():
async def predicate(ctx: Context):
if not ctx.bot.cache.sismember(
f"{ctx.guild.id}:moderators",
ctx.author.id,
):
raise NotModerator()
return True
return check(predicate)
|
11552651
|
from typing import List, Dict, Optional
from src.metrics.partial_match_eval.utils import get_recursively, flatten
class JSQLReader:
@staticmethod
def parse_sql_to_parsed_body(parsed_sql: Dict, anonymize_values: bool, parse_on_clause: bool) -> Dict:
select_bodies = []
for key, value in parsed_sql.items():
if key == "selectBody":
if "selects" in value:
for select in value["selects"]:
select_bodies.append(select)
else:
select_bodies.append(value)
elif key == "withItemsList":
for with_value in parsed_sql["withItemsList"]:
select_body = with_value["selectBody"]
if "selects" in select_body:
for select in select_body["selects"]:
select_bodies.append(select)
else:
select_bodies.append(select_body)
parsed_dict = {"select_body_{}".format(i): [] for i in range(len(select_bodies))}
for num, body in enumerate(select_bodies):
while isinstance(body, list):
body = body[0]
body_dict = JSQLReader._get_parse_body(
body, anonymize_values=anonymize_values, parse_on_clause=parse_on_clause
)
parsed_dict["select_body_{}".format(num)].append(body_dict)
return parsed_dict
@staticmethod
def _get_parse_body(body: Dict, anonymize_values: bool, parse_on_clause: bool) -> Dict:
# select
select_items = JSQLReader._get_select_items(body.get("selectItems", []), anonymize_values, parse_on_clause)
# top
top_items = JSQLReader._get_top_clause(body.get("top", {}), body.get("limit", {}))
# from
from_items = JSQLReader._get_from_clause(
body.get("fromItem", {}), body.get("joins", []), anonymize_values, parse_on_clause
)
# where
where_items = JSQLReader._get_all_where_items(body.get("where"), anonymize_values, parse_on_clause)
# order by
order_by_items = JSQLReader._get_all_order_items(
body.get("orderByElements", []), anonymize_values, parse_on_clause
)
# group by
group_by_items = JSQLReader._get_all_group_by_columns(
body.get("groupBy", {}), anonymize_values, parse_on_clause
)
# having
having_items = JSQLReader._get_having_items(body.get("having", {}), anonymize_values, parse_on_clause)
body_dict = {
"select_items": select_items,
"top_items": top_items,
"from_items": from_items,
"where_items": where_items,
"order_items": order_by_items,
"groupby_items": group_by_items,
"having_items": having_items,
}
return body_dict
# pylint: disable=too-many-branches
@staticmethod
def _get_column_items_inner(
expression: Dict, anonymize_values: bool, parse_on_clause: bool, left_expression: bool
) -> List:
if not expression:
return []
items = []
left_right_items = JSQLReader._get_left_right_expressions(expression, anonymize_values, parse_on_clause)
if left_right_items:
items.extend(left_right_items)
inner_sql_items = JSQLReader._get_items_from_inner_sql(expression, anonymize_values, parse_on_clause)
if inner_sql_items:
items.extend(inner_sql_items)
# add terminals
if "leftExpression" not in expression and "rightExpression" not in expression:
column_name = JSQLReader._get_terminal(expression, "columnName")
if column_name:
if anonymize_values and not left_expression:
items.append("terminal")
else:
items.append(column_name)
string_expression = JSQLReader._get_terminal(expression, "stringExpression")
if string_expression:
if anonymize_values:
items.append("terminal")
else:
items.append(string_expression)
value_value = JSQLReader._get_terminal(expression, "value")
if value_value:
if anonymize_values:
items.append("terminal")
else:
items.append(value_value)
if "allColumns" in expression and expression["allColumns"]:
items.append("*")
aggregator = JSQLReader._get_aggregator(expression)
if aggregator:
items.append(aggregator)
not_expr = JSQLReader._get_not(expression)
if not_expr:
items.append(not_expr)
return items
@staticmethod
def _get_not(expression: Dict) -> Optional[str]:
not_expr = None
first_not_expr = expression.get("not", None)
if first_not_expr:
if expression["not"]:
not_expr = "not"
return not_expr
@staticmethod
def _get_aggregator(expression: Dict) -> Optional[str]:
return expression.get("name", None)
@staticmethod
def _get_when_items(expression: Dict, anonymize_values: bool, parse_on_clause: bool) -> List:
items = []
when_clauses = expression.get("whenClauses", [])
if not when_clauses:
return []
for when_clause in when_clauses:
items.append("case")
when_expression = when_clause.get("whenExpression", {})
if when_expression:
when_expression_items = JSQLReader._get_items_from_expression(
when_expression, anonymize_values, parse_on_clause
)
items.extend(when_expression_items)
then_expression = when_clause.get("thenExpression", {})
if then_expression:
then_expression_items = JSQLReader._get_items_from_expression(
then_expression, anonymize_values, parse_on_clause
)
items.extend(then_expression_items)
return items
# pylint: disable=too-many-branches
@staticmethod
def _get_items_from_expression(expression: Dict, anonymize_values: bool, parse_on_clause: bool) -> List:
items = []
if "parameters" in expression:
parameters_items = []
for parameter_expression in expression["parameters"]["expressions"]:
parameters_items.extend(
JSQLReader._get_items_from_expression(parameter_expression, anonymize_values, parse_on_clause)
)
column_name = parameters_items
else:
column_name = expression.get("columnName")
if column_name:
if isinstance(column_name, list):
if len(column_name) == 1:
column_name = column_name[0]
items.append(column_name)
else:
items.extend(column_name)
else:
items.append(column_name)
inner_sql_items = JSQLReader._get_items_from_inner_sql(expression, anonymize_values, parse_on_clause)
items.extend(inner_sql_items)
when_expressions_items = JSQLReader._get_when_items(expression, anonymize_values, parse_on_clause)
items.extend(when_expressions_items)
aggregator = JSQLReader._get_aggregator(expression)
if aggregator:
items.append(aggregator)
if column_name:
items.append([aggregator, column_name])
else:
if "allColumns" in expression and expression["allColumns"]:
items.append("*")
items.append([aggregator, "*"])
left_right_expression = JSQLReader._get_left_right_expressions(expression, anonymize_values, parse_on_clause)
items.extend(left_right_expression)
if "type" in expression and expression["type"] == "OVER":
items.append("OVER")
order_by_items = JSQLReader._get_all_order_items(
expression.get("orderByElements", []), anonymize_values, parse_on_clause
)
if order_by_items:
items.extend(order_by_items)
return items
@staticmethod
def _get_left_right_expressions(expression: Dict, anonymize_values: bool, parse_on_clause: bool) -> List:
items = []
# leftExpression
left_expression_items = JSQLReader._get_column_items_inner(
expression.get("leftExpression"), anonymize_values, parse_on_clause=parse_on_clause, left_expression=True
)
if left_expression_items:
items.extend(left_expression_items)
# rightExpression
right_expression_items = JSQLReader._get_column_items_inner(
expression.get("rightExpression"), anonymize_values, parse_on_clause=parse_on_clause, left_expression=False
)
if right_expression_items:
items.extend(right_expression_items)
# rightItemsList
right_items_list_dict = expression.get("rightItemsList", {})
if right_items_list_dict:
items.append("IN")
for right_item_expression in right_items_list_dict.get("expressions", []):
right_items_expression_items = JSQLReader._get_column_items_inner(
right_item_expression, anonymize_values, parse_on_clause=parse_on_clause, left_expression=False
)
if right_items_expression_items:
items.extend(right_items_expression_items)
string_expression = expression.get("stringExpression")
if string_expression:
items.append(string_expression)
if left_expression_items and right_expression_items:
items.append([left_expression_items, right_expression_items, string_expression])
return items
@staticmethod
def _get_column_items(
select_item: Dict, anonymize_values: bool, parse_on_clause: bool, add_alias: bool = False
) -> List:
if len(select_item.keys()) == 1: # select * from ..
return ["*"]
expression = select_item.get("expression")
if not expression:
return []
items = JSQLReader._get_items_from_expression(expression, anonymize_values, parse_on_clause)
if "alias" in select_item and select_item["alias"]:
alias = select_item["alias"]["name"]
if add_alias:
items.append(alias)
return items
@staticmethod
def _get_terminal(body: Dict, item: str) -> Optional[str]:
item = body.get(item)
if item:
while isinstance(item, list):
item = item[0]
return str(item)
return None
# pylint: disable=too-many-nested-blocks
@staticmethod
def _get_items_from_inner_sql(expression: Dict, anonymize_values: bool, parse_on_clause: bool) -> List:
items = []
inner_select_body_items = JSQLReader.parse_sql_to_parsed_body(
expression, anonymize_values=anonymize_values, parse_on_clause=parse_on_clause
)
if inner_select_body_items:
for select_body_list in inner_select_body_items.values():
select_body_items_dict = select_body_list[0]
for clause_items in select_body_items_dict.values():
if clause_items:
items.extend(clause_items)
return items
@staticmethod
def _get_top_clause(top_item: Dict, limit_item: Dict) -> List:
items = []
expression = top_item.get("expression")
if expression:
if "stringValue" in expression:
items.append(expression["stringValue"])
elif "name" in expression:
items.append(expression["name"])
# add LIMIT clause
if limit_item:
row_count = limit_item["rowCount"]
items.append(row_count["stringValue"])
return items
@staticmethod
def get_all_tables(parsed: Dict) -> Dict[str, str]:
alias_to_table_name_dict: Dict[str, str] = {}
table_items = flatten(get_recursively(parsed, ["fromItem"]))
join_items = get_recursively(parsed, ["joins"])
join_items = flatten(join_items)
for join_item in join_items:
table_items.extend(flatten(get_recursively(join_item, ["rightItem"])))
for from_item in table_items:
table_name = None
if from_item.get("name"):
if from_item.get("database"):
table_name = from_item.get("name")
table_alias = None
if from_item.get("alias", {}).get("name"):
table_alias = from_item.get("alias", {}).get("name")
if table_name:
if not table_alias:
table_alias = table_name
alias_to_table_name_dict[table_alias] = table_name
return alias_to_table_name_dict
@staticmethod
def _get_from_clause(from_dict: Dict, join_list: List, anonymize_values: bool, parse_on_clause: bool) -> List:
items = []
if "fullyQualifiedName" in from_dict:
items.append(from_dict["fullyQualifiedName"])
elif "multipartName" in from_dict:
items.append(from_dict["multipartName"])
elif "name" in from_dict:
items.append(from_dict["name"])
inner_sql_items = JSQLReader._get_items_from_inner_sql(from_dict, anonymize_values, parse_on_clause)
items.extend(inner_sql_items)
join_items = JSQLReader._get_join_clause(join_list, anonymize_values, parse_on_clause)
if join_items:
items.extend(join_items)
if len(items) > 1:
items.append(items.copy())
return items
@staticmethod
def _get_select_items(select_items: List[Dict], anonymize_values: bool, parse_on_clause: bool) -> List:
items = []
for select_item in select_items:
column_items = JSQLReader._get_column_items(
select_item, anonymize_values=anonymize_values, parse_on_clause=parse_on_clause
)
items.extend(column_items)
if len(items) > 1:
items.append(items.copy())
return items
@staticmethod
def _get_items_from_join(join_dict: Dict, anonymize_values: bool, parse_on_clause: bool) -> List:
items = []
if "rightItem" in join_dict:
right_item = join_dict["rightItem"]
if "name" in right_item:
items.append(right_item["name"])
# inner sql in join
if "selectBody" in right_item:
inner_sql_items = JSQLReader._get_items_from_inner_sql(right_item, anonymize_values, parse_on_clause)
items.extend(inner_sql_items)
if "onExpression" in join_dict and parse_on_clause:
on_expression = join_dict["onExpression"]
left_right_expression = JSQLReader._get_left_right_expressions(
on_expression, anonymize_values, parse_on_clause
)
items.extend(left_right_expression)
# inner sql in on expression
if "selectBody" in on_expression:
inner_sql_items = JSQLReader._get_items_from_inner_sql(on_expression, anonymize_values, parse_on_clause)
items.extend(inner_sql_items)
if len(items) > 1:
items.append(items.copy())
return items
@staticmethod
def _get_join_clause(join_items: List[Dict], anonymize_values: bool, parse_on_clause: bool) -> List:
items = []
for join_dict in join_items:
join_clause_items = JSQLReader._get_items_from_join(join_dict, anonymize_values, parse_on_clause)
items.extend(join_clause_items)
return items
@staticmethod
def _get_items_from_group_by_expression(group_by_expression: Dict, anonymize_values: bool) -> List:
items = []
string_value = group_by_expression.get("stringValue")
if string_value:
if anonymize_values:
items.append(["terminal"])
else:
items.append(string_value)
return items
@staticmethod
def _get_all_group_by_columns(group_by_dict: Dict, anonymize_values: bool, parse_on_clause: bool) -> List:
items = []
group_by_expressions = group_by_dict.get("groupByExpressions", [])
for group_by_expression in group_by_expressions:
group_by_expression_items = JSQLReader._get_items_from_group_by_expression(
group_by_expression, anonymize_values
)
if group_by_expression_items:
items.extend(group_by_expression_items)
group_by_expression_more_items = JSQLReader._get_items_from_expression(
group_by_expression, anonymize_values, parse_on_clause
)
if group_by_expression_more_items:
items.extend(group_by_expression_more_items)
if len(items) > 1:
items.append(items.copy())
return items
@staticmethod
def _get_all_order_items(order_by_elements: List[Dict], anonymize_values: bool, parse_on_clause: bool) -> List:
items = []
for order_by_element in order_by_elements:
expression = order_by_element.get("expression")
if expression:
expression_items = JSQLReader._get_items_from_expression(expression, anonymize_values, parse_on_clause)
if "asc" in order_by_element and order_by_element["asc"]:
order = "asc"
else:
order = "desc"
expression_items.append(order)
items.extend(expression_items)
if len(expression_items) > 1:
items.append(expression_items)
if len(items) > 1:
items.append(items.copy())
return items
@staticmethod
def _get_all_where_items(where_dict: Dict, anonymize_values: bool, parse_on_clause: bool) -> List:
if not where_dict:
return []
left_right_items = JSQLReader._get_left_right_expressions(where_dict, anonymize_values, parse_on_clause)
return left_right_items
@staticmethod
def _get_having_items(having_dict: Dict, anonymize_values: bool, parse_on_clause: bool) -> List:
if not having_dict:
return []
left_right_items = JSQLReader._get_left_right_expressions(having_dict, anonymize_values, parse_on_clause)
return left_right_items
|
11552675
|
import os
import threading
import warnings
from collections import OrderedDict
from io import BytesIO
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from matplotlib.backends.backend_pdf import PdfPages
class Collect(object):
""" Collect figures and export to a file
Args:
args (Callable): A function, not neccessary
Returns:
obj (Collect)
Example:
>>> import random
>>>
>>> collect_files = Collect()
>>>
>>> def show(plot_data):
>>> plt.figure()
>>> plt.plot(plot_data)
>>> plt.show()
>>> data = [random.randint(0, 10) for _ in range(1000)]
>>> show(data)
>>> name = plt.get_figlabels()[-1]
>>> ff = plt.gcf()
>>> collect_files.update_info(name, ff)
>>> collect_files.export('./result', save_mode='folder')
"""
_infos = OrderedDict()
_instance_lock = threading.Lock()
def check_name(self, name):
if name in self._infos:
# if the name has been collected, it will be replaced
# by the newest one,
self._infos.pop(name)
def update_info(self, name, res):
# collect the name and corresponding figure
self.check_name(name)
self._infos[name] = res
def __new__(cls, *args, **kwargs):
if not hasattr(Collect, "_instance"):
with Collect._instance_lock:
if not hasattr(Collect, "_instance"):
Collect._instance = object.__new__(cls)
return Collect._instance
def export(self, save_path, save_mode=None, exist_ok=True, *args, **kwargs):
""" Export the saved files into a folder, pdf or a png.
Args:
save_path (str): File path or a dir path.
save_mode (str, None): Save mode, 'pdf', 'png' or 'folder'.
exist_ok (bool): If True, the exist file will be covered, else not export.
"""
if len(self._infos) == 0:
warnings.warn('Currently, there is no figure collected. '
'But it will still generate pdf file.',
category=UserWarning)
return
if save_mode is None:
save_mode = 'folder'
assert save_mode in ['pdf', 'png', 'folder'], "support export mode" \
" are ['pdf', 'png', 'folder']"
dir_path = save_path if save_mode == 'folder' else os.path.dirname(save_path)
if dir_path and not os.path.exists(dir_path):
os.makedirs(dir_path, exist_ok=exist_ok)
if save_mode == 'pdf':
self.export_pdf(save_path, *args, **kwargs)
elif save_mode == 'png':
self.export_png(save_path, *args, **kwargs)
else:
self.export_to_folder(save_path, *args, **kwargs)
def figure_to_array(self, name=None):
""" Transfer plt.figure to np.ndarray
Args:
name (list, optional): Transfer {name} file to np.ndarray if name is not None,
else transfer all files to np.ndarray.
Returns:
shape (tuple): The max shape of figures.
tmp_file (list): Transferred array of plt.figure.
tmp_name (list): Corresponding names of tmp_file.
"""
shape = [0, 0, 4]
tmp_file = []
tmp_name = []
for key, value in self._infos.items():
if name is not None and key not in name:
continue
buffer_ = BytesIO()
value.savefig(buffer_, format='png')
buffer_.seek(0)
img = Image.open(buffer_)
data = np.asarray(img)
shape = list(map(lambda x: max(x), zip(shape, data.shape)))
tmp_file.append(data)
tmp_name.append(key)
for k in tmp_name:
self.clear(k)
return shape, tmp_file, tmp_name
def export_to_folder(self, save_path, name=None, cover=False):
""" save the plt.figure into a folder
Args:
save_path (str): The dir path.
name (str): If name is not None, export the {name} file, else export all.
cover (bool): If True, the existed file will be covered,
else generate a new file name.
"""
_, tmp_file, tmp_name = self.figure_to_array(name)
if tmp_file:
for idx, (d, n) in enumerate(zip(tmp_file, tmp_name)):
ll = Image.fromarray(d)
s_i = str(idx) + '.png'
while os.path.isfile(os.path.join(save_path, s_i)) and not cover:
s_i = s_i[:-4]
s_i = s_i + '_new.png'
ll.save(os.path.join(save_path, s_i))
def export_png(self, save_path, name=None):
""" combine all the plt.figure as a png file and export to {save_path}
Args:
save_path (str): The file path.
name (str): If name is not None, export the {name} file, else export all.
"""
shape, tmp_file, _ = self.figure_to_array(name)
if tmp_file:
canvas = np.zeros((shape[0] * len(tmp_file), shape[1], shape[2]), dtype=np.uint8)
start = 0
for idx, d in enumerate(tmp_file):
h_, w_, c_ = d.shape
end = start + h_
# canvas[idx * shape[0]:idx * shape[0] + h_, :w_, :c_] = d
canvas[start:end, :w_, :c_] = d
start = end
canvas = canvas[:end, :, :]
ll = Image.fromarray(canvas)
ll.save(save_path)
def export_pdf(self, save_path, name=None):
""" export the plt.figure to a pdf file
Args:
save_path (str): The file path.
name (str): If name is not None, export the {name} file, else export all.
"""
with PdfPages(save_path) as pdf:
if name is None:
for key, value in self._infos.items():
pdf.savefig(value)
self.clear()
else:
for n in name:
if self._infos.get(n, False):
ff = self._infos.get(n)
pdf.savefig(ff)
self.clear(n)
def clear(self, v=None):
if v is None:
self._infos.clear()
else:
self._infos.pop(v)
def __len__(self):
return len(self._infos)
def get_collect():
"""
Returns:
obj (Collect)
"""
return Collect()
def get_fig():
""" get the current plt.figure and saved to obj (Collect)
Examples:
>>> import random
>>> def show():
>>> plt.figure('example')
>>> plt.hist([random.randint(0,100) for _ in range(100)])
>>> get_fig()
>>> show()
>>> cc = get_collect()
>>> cc._infos.get('example')
"""
plt.tight_layout()
collect = get_collect()
name = plt.get_figlabels()[-1]
ff = plt.gcf()
collect.update_info(name, ff)
|
11552678
|
import os
import subprocess
import pandas as pd
import copy
import yaml
import copy
from .factories.mysql import MySqlFactory
from .factories.pandas import PandasFactory
from .factories.postgres import PostgresFactory
from .factories.spark import SparkFactory
from .factories.sqlserver import SqlServerFactory
root_url = subprocess.check_output("git rev-parse --show-toplevel".split(" ")).decode("utf-8").strip()
pums_schema_path = os.path.join(root_url,"datasets", "PUMS.yaml")
pums_large_schema_path = os.path.join(root_url,"datasets", "PUMS_large.yaml")
pums_pid_schema_path = os.path.join(root_url,"datasets", "PUMS_pid.yaml")
pums_schema_path = os.path.join(root_url,"datasets", "PUMS.yaml")
pums_dup_schema_path = os.path.join(root_url,"datasets", "PUMS_dup.yaml")
pums_null_schema_path = os.path.join(root_url,"datasets", "PUMS_dup.yaml")
class DbCollection:
# Collection of test databases keyed by engine and database name.
# Automatically connects to databases listed in connections-unit.yaml
def __init__(self):
from snsql.metadata import Metadata
self.metadata = {
'PUMS': Metadata.from_file(pums_schema_path),
'PUMS_large': Metadata.from_file(pums_large_schema_path),
'PUMS_pid': Metadata.from_file(pums_pid_schema_path),
'PUMS_dup': Metadata.from_file(pums_dup_schema_path),
'PUMS_null': Metadata.from_file(pums_dup_schema_path)
}
self.engines = {}
home = os.path.expanduser("~")
p = os.path.join(home, ".smartnoise", "connections-unit.yaml")
if not os.environ.get('SKIP_PANDAS'):
self.engines['pandas'] = PandasFactory()
else:
print("Skipping pandas database tests")
if os.environ.get('TEST_SPARK'):
self.engines['spark'] = SparkFactory()
else:
print("TEST_SPARK not set, so skipping Spark tests")
if not os.path.exists(p):
print ("No config file at ~/.smartnoise/connections-unit.yaml")
else:
with open(p, 'r') as stream:
conns = yaml.safe_load(stream)
if conns is None:
print("List of installed test engines is empty")
else:
for engine in conns:
eng = conns[engine]
host = conns[engine]["host"]
port = conns[engine]["port"]
user = conns[engine]["user"]
if 'databases' in eng:
raise ValueError(f"connections-unit.yaml has a 'databases' section for engine {engine}. Please update to use 'datasets' syntax.")
datasets = eng['datasets']
if engine == "postgres":
self.engines[engine] = PostgresFactory(engine, user, host, port, datasets)
elif engine == "sqlserver":
self.engines[engine] = SqlServerFactory(engine, user, host, port, datasets)
elif engine == "mysql":
self.engines[engine] = MySqlFactory(engine, user, host, port, datasets)
def __str__(self):
description = ""
for engine in self.engines:
eng = self.engines[engine]
description += f"{eng.user}@{engine}://{eng.host}:{eng.port}\n"
for dataset in eng.datasets:
dbdest = eng.datasets[dataset]
connected = "(connected)" if dataset in eng.connections else ""
description += f"\t{dataset} -> {dbdest} {connected}\n"
return description
def get_private_readers(self, *ignore, metadata=None, privacy, database, engine=None, overrides={}, **kwargs):
readers = []
if metadata is None and database in self.metadata:
metadata = self.metadata[database]
if metadata is None:
print(f"No metadata available for {database}")
return []
if isinstance(metadata, str):
from snsql.metadata import Metadata
metadata = Metadata.from_file(metadata)
if len(overrides) > 0:
# make a copy
metadata = copy.deepcopy(metadata)
# apply overrides to only the first table in the metadata
table_name = list(metadata.m_tables)[0]
table = metadata.m_tables[table_name]
for propname in overrides:
propval = overrides[propname]
if propname == 'censor_dims':
table.censor_dims = propval
elif propname == 'clamp_counts':
table.clamp_counts = propval
elif propname == 'clamp_columns':
table.clamp_columns = propval
elif propname == 'max_ids' or propname == 'max_contrib':
table.max_ids = propval
else:
print(f"Unable to set override for {propname}={propval}")
if engine is not None:
engines = [engine]
else:
engines = [eng for eng in self.engines]
for engine in engines:
if engine in self.engines:
eng = self.engines[engine]
reader = None
try:
reader = eng.get_private_reader(metadata=metadata, privacy=privacy, dataset=database)
except Exception as e:
print(str(e))
raise ValueError(f"Unable to get a private reader for dataset {database} using {engine}")
finally:
if reader:
readers.append(reader)
return readers
def get_private_reader(self, *ignore, metadata=None, privacy, database, engine, overrides={}, **kwargs):
readers = self.get_private_readers(metadata=metadata, privacy=privacy, database=database, engine=engine, overrides=overrides)
return None if len(readers) == 0 else readers[0]
def get_connection(self, *ignore, database, engine, **kwargs):
if engine in self.engines:
eng = self.engines[engine]
return eng.get_connection(database=database)
else:
return None
def get_dialect(self, *ignore, engine, **kwargs):
if engine in self.engines:
eng = self.engines[engine]
return eng.dialect
else:
return None
def to_tuples(self, rowset):
if hasattr(rowset, 'toLocalIterator'): # it's RDD
if hasattr(rowset, 'columns'):
colnames = rowset.columns
try:
return [colnames] + [[c for c in r] for r in rowset.toLocalIterator()]
except:
return [colnames]
else:
return [[c for c in r] for r in rowset.collect()]
else:
return rowset
def download_data_files():
from dataloader.download_reddit import download_reddit
from dataloader.download_pums import download_pums
from dataloader.make_sqlite import make_sqlite
download_reddit()
download_pums()
make_sqlite()
|
11552714
|
import time
import pytest
from dockupdater.lib.config import OptionRegex
from dockupdater.update.container import Container
from dockupdater.update.service import Service
def prepare_containers(scanner):
print("creating containers and services")
if not scanner.client.swarm.attrs:
scanner.client.swarm.init(force_new_cluster=True)
try:
scanner.client.services.create(
"busybox:latest",
name="TestService1",
tty=True,
labels={
"dockupdater.wait": "1",
},
container_labels={
"dockupdater.disable": "true",
},
)
scanner.client.services.create(
"busybox",
tty=True,
name="TestService2",
container_labels={
"dockupdater.disable": "true",
},
)
scanner.client.containers.run(
"busybox",
tty=True,
detach=True,
name="Test1",
)
scanner.client.containers.run(
"busybox",
tty=True,
detach=True,
name="Test2",
labels={
"dockupdater.disable": "true",
"dockupdater.stops": "Test1",
"dockupdater.starts": "Test1",
},
)
scanner.client.containers.run(
"busybox",
tty=True,
detach=True,
name="Test3",
labels={
"dockupdater.enable": "false",
},
)
scanner.client.containers.run(
"busybox",
tty=True,
detach=True,
name="Test4",
labels={
"dockupdater.enable": "true",
},
)
except:
print("Tests containers already exist")
print("Done")
@pytest.mark.docker
def test_scanner_get_containers(scanner):
prepare_containers(scanner)
containers = scanner.get_containers(OptionRegex("Test[1-2]"))
assert len(containers) == 2
containers = scanner.get_containers(OptionRegex("Nomatch"))
assert len(containers) == 0
@pytest.mark.docker
def test_scanner_get_services(scanner):
prepare_containers(scanner)
services = scanner.get_services(OptionRegex("TestService[2-3]"))
assert len(services) == 1
services = scanner.get_services(OptionRegex("Nomatch"))
assert len(services) == 0
@pytest.mark.docker
def test_scanner_starts_stops_before_update(docker_client, scanner):
prepare_containers(scanner)
container = Container(docker_client, scanner.get_containers(OptionRegex("Test2"))[0])
container.load_new_config()
assert container.name == "Test2"
assert container.config.stops[0].regex == "Test1"
assert container.config.starts[0].regex == "Test1"
assert docker_client, scanner.get_containers(OptionRegex("Test1"))[0].status == "running"
scanner.stops_before_update(container)
assert docker_client, scanner.get_containers(OptionRegex("Test1"))[0].status != "running"
scanner.starts_after_update(container)
assert docker_client, scanner.get_containers(OptionRegex("Test1"))[0].status == "running"
@pytest.mark.docker
def test_scanner_scan_monitored(scanner):
prepare_containers(scanner)
monitored = scanner.scan_monitored()
assert len([object.name for object in monitored if object.name in ["Test1", "Test3", "Test4"]]) == 3
assert len([object.name for object in monitored if object.name in ["Test2"]]) == 0
assert len([object.name for object in monitored if object.name in ["TestService1", "TestService2"]]) == 2
scanner.config.label = True
monitored = scanner.scan_monitored()
assert len([object.name for object in monitored if object.name in ["Test4"]]) == 1
assert len([object.name for object in monitored if object.name in ["Test1", "Test2", "Test3"]]) == 0
assert len([object.name for object in monitored if object.name in ["TestService1", "TestService2"]]) == 0
@pytest.mark.docker
@pytest.mark.slow
def test_scanner_update(scanner, mocker, monkeypatch):
prepare_containers(scanner)
mocker.patch("dockupdater.update.container.Container.update")
mocker.patch("dockupdater.update.service.Service.update")
monkeypatch.setattr(time, 'sleep', lambda s: None)
scanner.update()
Service.update.assert_any_call()
Container.update.assert_not_called()
|
11552718
|
import os, tempfile, time
from os import getenv as _
from utils import uploader
from dotenv import load_dotenv
load_dotenv()
os.environ['UPLOAD_DRIVE'] = os.sys.argv[1]
handle, params = uploader().handle, uploader().params()
def upload(size):
fd, path = tempfile.mkstemp()
with open(path, 'wb') as f:
f.write(os.urandom(size * 1048576 - params['padding']))
r = handle(path)
os.close(fd)
os.unlink(path)
return r
def test(curr, step):
maps = {}
reve = False
reve_inc = None
print(f'Starting test {_("UPLOAD_DRIVE")}:')
while True:
if curr in maps:
result = maps[curr]
else:
result = maps[curr] = upload(curr)
print('%dM\t%s\t%s' % (curr, 'OK' if result else 'FAIL', result))
if not result:
reve = True
if not reve and curr > 20:
step = 20
if not reve and curr > 50:
step = 30
if not result and not reve_inc == None:
print(f'\n---\nFinally ... {reve_inc}M')
exit(0)
if reve and result:
reve_inc = curr
curr += 1
elif reve and not result:
if (curr - 1) % 5 == 0:
curr -= 1
else:
step //= 2
curr -= max(1, step)
if curr < 1:
curr = 1
reve_inc = 0
elif not reve:
curr += step
if __name__ == '__main__':
# print(handle('/Users/ika/Desktop/test/9913509E9DE4492E0E903B4C2C66E98D.gif'))
# print(handle('/Users/ika/Desktop/test/ACFC928140EE4FA072F4D6EB7CB35245.jpg'))
# print(handle('/Users/ika/Desktop/test/out00006.ts'))
test(1, 10)
|
11552844
|
import re, nltk, sys
from nltk.tokenize import StanfordTokenizer
tokenizer = StanfordTokenizer(r'../common/stanford-postagger-2015-04-20/stanford-postagger.jar')
for line in open("TEST_FILE.txt"):
m = re.match(r'^([0-9]+)\s"(.+)"$', line.strip())
if m is not None:
txtfile = open("test/%s.txt" % m.group(1), 'w')
annfile = open("test/%s.ann" % m.group(1), 'w')
line = m.group(2)
text = []
t = line.split("<e1>")
text.append(t[0])
e1start = len(t[0])
t = t[1].split("</e1>")
e1 = t[0]
text.append(t[0])
e1end = len(t[0])+e1start
t = t[1].split("<e2>")
text.append(t[0])
e2start = len(t[0])+e1end
t = t[1].split("</e2>")
text.append(t[0])
e2 = t[0]
e2end = len(t[0])+e2start
text.append(t[1])
text = " ".join(tokenizer.tokenize("".join(text)))
txtfile.write(text)
txtfile.write("\n")
offset = 0
err = False
while e1 != text[e1start+offset:e1end+offset]:
offset += 1
if e1end+offset > len(text):
break
if e1end+offset > len(text):
offset = 0
e1 = " ".join(tokenizer.tokenize(e1))
e1end = e1start + len(e1)
while e1 != text[e1start+offset:e1end+offset]:
offset += 1
if e1end+offset > len(text):
print("%d\t%s" % (m.group(1), text))
err = True
break
if not err:
annfile.write("T1\tTerm %d %d\t%s\n" % (e1start+offset, e1end+offset, e1))
err = False
offset = 0
while e2 != text[e2start+offset:e2end+offset]:
offset+=1
if e2end+offset > len(text):
break
if e2end+offset > len(text):
offset = 0
e2 = " ".join(tokenizer.tokenize(e2))
e2end = e2start + len(e2)
while e2 != text[e2start+offset:e2end+offset]:
offset += 1
if e2end+offset > len(text):
print("%d\t%s" % (m.group(1), text))
err = True
break
if not err:
annfile.write("T2\tTerm %d %d\t%s\n" % (e2start+offset, e2end+offset, e2))
txtfile.close()
annfile.close()
|
11552857
|
from misc.utils import *
from misc.priority_queue import PriorityQueue
Node = namedtuple('Node', ['cost', 'operators'])
"""
class Node(Set, object):
def __init__(self, operators):
self.operators = operators
self.cost = self._cost()
def __contains__(self, operator):
return operator in self.operators
def __iter__(self):
return iter(self.operators)
def __len__(self):
return len(self.operators)
def _cost(self):
return sum(operator.cost for operator in self.operators)
# https://docs.python.org/2/library/stdtypes.html#set.union
# set.union
class UnitNode(Node):
def _cost(self):
return len(self.operators)
"""
# TODO - merge process that attempts to reason about operators that accomplish each others goals by extending union
def relaxed_plan(state, goal, operators, unit=False, greedy=True):
variable_nodes = defaultdict(dict)
operator_nodes = {}
conditions = defaultdict(lambda: defaultdict(list))
unsatisfied = {}
queue = PriorityQueue()
def union(nodes):
operators = set(flatten(node.operators for node in nodes))
cost = sum(operator.cost for operator in operators) if not unit else len(operators)
return Node(cost, operators)
def applicable_operator(operator):
operator_nodes[operator] = union(variable_nodes[v][val] for v, val in operator.cond())
if operator != goal:
variable_node = union([operator_nodes[operator], Node(None, {operator})])
for var2, value2 in operator.eff():
if value2 not in variable_nodes[var2] or variable_node.cost < variable_nodes[var2][value2].cost:
variable_nodes[var2][value2] = variable_node
queue.push(variable_node.cost, (var2, value2))
for operator in operators + [goal]:
unsatisfied[operator] = len(operator.conditions)
for var, value in operator.cond(): # TODO - store this in memory
conditions[var][value].append(operator)
if unsatisfied[operator] == 0:
applicable_operator(operator)
if greedy and operator == goal:
return operator_nodes, variable_nodes
for var in conditions:
for value in conditions[var]:
if state[var] == value:
variable_nodes[var][value] = Node(0, {})
queue.push(variable_nodes[var][value].cost, (var, value))
processed = defaultdict(set)
while not queue.empty():
var, value = queue.pop()
if value in processed[var]: continue
processed[var].add(value)
for operator in conditions[var][value]:
unsatisfied[operator] -= 1
if unsatisfied[operator] == 0:
applicable_operator(operator)
if greedy and operator == goal:
return operator_nodes, variable_nodes
return operator_nodes, variable_nodes
# TODO - propagate relaxed plan backwards to find goals
def none(*args):
return []
def applicable(goal, operator_nodes, *args):
return list(filter(lambda o: len(operator_nodes[o].operators) == 0, operator_nodes))
def any_goals(goal, operator_nodes, variable_nodes):
goals = set(flatten(o.cond() for o in (operator_nodes[goal].operators | {goal})))
return list(filter(lambda o: any(e in goals for e in o.eff()), applicable(goal, operator_nodes, variable_nodes)))
def first_goals(goal, operator_nodes, variable_nodes):
goals = list(filter(lambda item: len(variable_nodes[item[0]][item[1]].operators) == 1,
set(flatten(o.cond() for o in (operator_nodes[goal].operators | {goal})))))
return list(filter(lambda o: o != goal and any(e in goals for e in o.eff()),
applicable(goal, operator_nodes, variable_nodes)))
def first_operators(goal, operator_nodes, *args):
return [operator for operator in operator_nodes[goal].operators if len(operator_nodes[operator].operators) == 0]
def first_combine(goal, operator_nodes, variable_nodes):
one = first_operators(goal, operator_nodes, variable_nodes)
two = first_goals(goal, operator_nodes, variable_nodes) # TODO - include applicable
return list(one) + list(set(two) - set(one))
def sa(state, goal, operators, helpful_actions, unit=False):
operator_nodes, variable_nodes = relaxed_plan(state, goal, operators, unit=unit)
if goal not in operator_nodes: return None, []
return operator_nodes[goal].cost, helpful_actions(goal, operator_nodes, variable_nodes)
def sa_fn(helpful_actions, unit=False):
return lambda s, g, o: sa(s, g, o, helpful_actions, unit=unit)
def h_sa(state, goal, operators):
operator_costs = relaxed_plan(state, goal, operators)[0]
return operator_costs[goal].cost if goal in operator_costs else None
def h_sa_unit(state, goal, operators):
operator_costs = relaxed_plan(state, goal, operators, unit=True)[0]
return operator_costs[goal].cost if goal in operator_costs else None
|
11552875
|
from crudlfap import shortcuts as crudlfap
from .models import Post
class AuthBackend:
def authenticate(self, *args):
return None # prevent auth from this backend
def has_perm(self, user_obj, perm, obj=None):
view = obj
if view.model != Post:
return False
user = user_obj
code = view.permission_shortcode
if code in ('list', 'detail'):
return True
elif code == 'add':
return user.is_authenticated
elif code == 'change':
return view.object.editable(user)
elif code == 'delete':
if hasattr(view, 'object'):
return view.object.editable(user)
# DeleteObjects relies on get_queryset to secure runtime
return user.is_authenticated
return super().has_perm(user_obj, perm, obj)
class PostMixin:
def get_exclude(self):
if not self.request.user.is_staff:
return ['owner']
return super().get_exclude()
class PostCreateView(PostMixin, crudlfap.CreateView):
def form_valid(self):
self.form.instance.owner = self.request.user
return super().form_valid()
class PostUpdateView(PostMixin, crudlfap.UpdateView):
pass
class PostListView(crudlfap.ListView):
def get_filter_fields(self):
if self.request.user.is_staff:
return ['owner']
return []
class PostRouter(crudlfap.Router):
fields = '__all__'
icon = 'book'
model = Post
views = [
crudlfap.DeleteObjectsView,
crudlfap.DeleteView,
PostUpdateView,
PostCreateView,
crudlfap.DetailView,
PostListView.clone(
search_fields=['name'],
),
]
def get_queryset(self, view):
qs = self.model.objects.get_queryset()
if view.request.user.is_superuser:
return qs
elif view.permission_shortcode in ('change', 'delete'):
return qs.editable(view.request.user)
elif view.permission_shortcode in ('list', 'detail'):
return qs.readable(view.request.user)
return qs.none()
PostRouter().register()
|
11552889
|
from reaction_filters import ReactionFiltersEnum
from reaction_filters.base_reaction_filter import BaseReactionFilter
from reaction_filters.non_selective_filter import NonSelectiveFilter
from reaction_filters.selective_filter import SelectiveFilter
from running_modes.configurations.reaction_filter_configuration import ReactionFilterConfiguration
class ReactionFilter:
def __new__(cls, configuration: ReactionFilterConfiguration) -> BaseReactionFilter:
enum = ReactionFiltersEnum()
if configuration.type == enum.NON_SELECTIVE:
return NonSelectiveFilter(configuration)
elif configuration.type == enum.SELECTIVE:
return SelectiveFilter(configuration)
else:
raise TypeError(f"Requested filter type: '{configuration.type}' is not implemented.")
|
11552965
|
import fire
import os
import re
import torch
from models import *
from utils.audio import *
from utils.display import simple_table
from hparams import hparams
use_cuda = torch.cuda.is_available()
batch_size = 1
def _pad_2d(x, max_len, constant_values=0):
x = np.pad(x, [(0, max_len - len(x)), (0, 0)],
mode="constant",
constant_values=constant_values)
return x
def get_output_base_path(checkpoint_path):
base_dir = os.path.dirname(checkpoint_path)
match = re.compile(r'.*checkpoint_step([0-9]+)\.pth').match(
checkpoint_path)
name = 'eval-%d' % int(match.group(1)) if match else 'eval'
return os.path.join(base_dir, name)
def gen_from_file(model, mel, save_path, batched, target, overlap):
if isinstance(mel, list):
upsample = int(hparams.sample_rate * hparams.frame_shift_ms / 1000)
for i in range(0, len(mel), batch_size):
inputs = mel[i:min(i + batch_size, len(mel))]
input_lengths = [x.shape[0] for x in inputs]
max_length = max(input_lengths)
inputs = [_pad_2d(x, max_length, -4) for x in inputs]
inputs = torch.tensor(np.stack(inputs)).permute(0, 2, 1)
inputs = inputs.cuda() if use_cuda else inputs
samples = model.generate(inputs, batched, target, overlap,
hparams.mu_law)
for bi in range(inputs.size(0)):
input_length = input_lengths[bi] * upsample
output = samples[bi, :input_length]
#if hparams.preemphasis > 0:
# output = inv_preemphasis(output)
save_wav(output, save_path[i + bi])
else:
mel = np.load(mel).T
mel = torch.tensor(mel).unsqueeze(0)
mel = mel.cuda() if use_cuda else mel
samples = model.generate(mel, batched, target, overlap, hparams.mu_law)
output = samples[0]
#if hparams.preemphasis > 0:
# output = inv_preemphasis(output)
save_wav(output, save_path)
def main(ckpt_path, input_path, output_path=None, list_path=None, config=''):
hparams.parse(config)
Model = get_model(hparams)
batched = hparams.batched
samples = hparams.gen_at_checkpoint
target = hparams.target
overlap = hparams.overlap
if output_path is None:
output_path = get_output_base_path(ckpt_path)
os.makedirs(output_path, exist_ok=True)
checkpoint = torch.load(ckpt_path, map_location='cpu')
if list_path is not None:
with open(list_path) as fin:
fids = [line.strip() for line in fin.readlines()]
else:
fids = []
for filename in os.listdir(input_path):
if '.npy' in filename:
fids.append(filename.split('.')[0])
mel, output = [], []
for fid in fids:
mel.append(np.load(os.path.join(input_path, fid + '.npy')))
output.append(os.path.join(output_path, fid + '.wav'))
print('\nInitialising Model...\n')
hop_length = int(hparams.frame_shift_ms * hparams.sample_rate / 1000)
model = Model(rnn_dims=hparams.rnn_dims,
fc_dims=hparams.fc_dims,
bits=hparams.bits,
pad=hparams.pad,
upsample_factors=hparams.upsample_factors,
feat_dims=hparams.num_mels,
compute_dims=hparams.compute_dims,
res_out_dims=hparams.res_out_dims,
res_blocks=hparams.res_blocks,
hop_length=hop_length,
sample_rate=hparams.sample_rate,
mode=hparams.mode)
if use_cuda:
model = model.cuda()
model.load_state_dict(checkpoint["state_dict"])
with torch.no_grad():
gen_from_file(model, mel, output, batched, target, overlap)
if __name__ == '__main__':
fire.Fire(main)
print('\nDone!\n')
|
11552978
|
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath,
BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined)
from nipype.interfaces.ants import registration, segmentation
from nipype.interfaces.ants.segmentation import Atropos
from nipype.interfaces.ants import Registration, ApplyTransforms
from nipype.interfaces.utility import Rename
from nipype.interfaces.ants.registration import CompositeTransformUtil, CompositeTransformUtilInputSpec
from nipype.interfaces.ants.resampling import ApplyTransformsInputSpec
from nipype.interfaces.base import InputMultiPath
from src.utils import splitext, cmd
from scipy.io import loadmat
from scipy.ndimage import center_of_mass
import numpy as np
import nibabel as nib
import nipype.pipeline.engine as pe
import SimpleITK as sitk
import os
import re
class APPIANCompositeTransformUtilInputSpec(CompositeTransformUtilInputSpec) :
in_file_1 = traits.File()
in_file_2 = traits.File()
in_file = InputMultiPath(File(exists=True), argstr='%s...', position=3, desc='Input transform file(s)')
class APPIANCompositeTransformUtil(CompositeTransformUtil):
input_spec = APPIANCompositeTransformUtilInputSpec
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
self.inputs.in_file = [self.inputs.in_file_1, self.inputs.in_file_2]
self.inputs.out_file = os.getcwd()+os.sep+"composite.h5"
return super(APPIANCompositeTransformUtil, self)._parse_inputs(skip=skip)
class APPIANApplyTransformsInputSpec(BaseInterfaceInputSpec) :
transform_1 = traits.File()
transform_2 = traits.File()
transform_3 = traits.File()
invert_1 = traits.Bool(default_value=False, usedefault=True)
invert_2 = traits.Bool(default_value=False, usedefault=True)
invert_3 = traits.Bool(default_value=False, usedefault=True)
reference_image=traits.File(mandatory=True, exists=True)
input_image=traits.File(mandatory=True, exists=True)
output_image = traits.File()
target_space=traits.Str(default_value="undefined", usedefault=True)
interpolation = traits.Str(usedefault=True, default_value='BSpline')
class APPIANApplyTransformsOutputSpec(TraitedSpec) :
output_image = traits.File(exists=True)
class APPIANApplyTransforms(BaseInterface):
input_spec = APPIANApplyTransformsInputSpec
output_spec = APPIANApplyTransformsOutputSpec
def _run_interface(self, runtime):
transforms = []
invert_transform_flags = []
if isdefined(self.inputs.transform_1) :
transforms.append(self.inputs.transform_1)
invert_transform_flags.append(self.inputs.invert_1)
if isdefined(self.inputs.transform_2) :
transforms.append(self.inputs.transform_2)
invert_transform_flags.append(self.inputs.invert_2)
if isdefined(self.inputs.transform_3) :
transforms.append(self.inputs.transform_3)
invert_transform_flags.append(self.inputs.invert_3)
flip = lambda x : 0 if x == 1 else 1
flipped_invert_transform_flags = map(flip, invert_transform_flags)
#output files
split =splitext(os.path.basename( self.inputs.input_image))
self.inputs.output_image =os.getcwd() + os.sep + split[0] + split[1]
if '_space-' in self.inputs.output_image :
self.inputs.output_image = re.sub('_space-[A-z]*_',"_space-"+self.inputs.target_space+"_", self.inputs.output_image)
self.inputs.output_image_inverse = re.sub('_space-[A-z]*_',"_space-"+self.inputs.source_space+"_", self.inputs.output_image)
#combine transformation files and output flags
transforms_zip = zip(transforms, invert_transform_flags)
flipped_transforms_zip = zip(transforms, flipped_invert_transform_flags)
transform_string = ' '.join( [ '-t [ '+str(t)+' , '+str(int(f))+' ]' for t, f in transforms_zip if t != None ])
flipped_transform_string = ' '.join( [ '-t [ '+str(t)+' , '+str(int(f))+' ]' for t, f in flipped_transforms_zip if t != None ])
# apply forward transform
cmdline = "antsApplyTransforms --float -v 1 -e 3 -d 3 -n "+ self.inputs.interpolation + " -i "+self.inputs.input_image+" "+ transform_string +" -r "+self.inputs.reference_image+" -o "+self.inputs.output_image
cmd(cmdline)
# apply inverse transform
cmdline = "antsApplyTransforms --float -v 1 -e 3 -d 3 -n "+ self.inputs.interpolation + " -r "+self.inputs.input_image+" "+ flipped_transform_string +" -i "+self.inputs.reference_image+" -o "+self.inputs.output_image_inverse
cmd(cmdline)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["output_image"] = self.inputs.output_image
outputs["inverse_output_image"] = self.inputs.output_image_inverse
return outputs
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
return super(APPIANApplyTransforms, self)._parse_inputs(skip=skip)
class APPIANConcatenateTransformsInputSpec(BaseInterfaceInputSpec) :
transform_1 = traits.File(mandatory=True, exists=True)
transform_2 = traits.File(mandatory=True, exists=True)
#reference_image = traits.File()
out_file = traits.File(desc="Composite transorfmation matrix")
class APPIANConcatenateTransformsOutputSpec(TraitedSpec):
out_file = traits.File(desc="Composite transorfmation matrix")
class APPIANConcatenateTransforms(BaseInterface):
input_spec = APPIANConcatenateTransformsInputSpec
output_spec= APPIANConcatenateTransformsOutputSpec
def _run_interface(self, runtime):
#Get extension for input transformation files
ext_1=splitext(self.inputs.transform_1)[1]
ext_2=splitext(self.inputs.transform_2)[1]
if ext_1 in ['.mat','.txt'] and ext_2 in ['.mat','.txt']:
self.inputs.out_file=os.getcwd()+os.sep+'composite_affine.mat'
elif ext_1 == '.h5' or ext_2 == '.h5':
self.inputs.out_file=os.getcwd()+os.sep+'composite_warp.h5'
cmd("CompositeTransformUtil --assemble " + ' '.join([self.inputs.out_file, self.inputs.transform_1, self.inputs.transform_2]) )
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = self.inputs.out_file
return outputs
class APPIANRegistrationInputs(BaseInterfaceInputSpec):
fixed_image = traits.File(mandatory=True, exits=True, desc="Fixed Image")
fixed_image_mask = traits.File(desc="Mask for fixed image")
moving_image = traits.File(mandatory=True, exits=True, desc="Moving Image")
moving_image_mask = traits.File(desc="Mask for moving image")
warped_image = traits.File(desc="Warped image")
inverse_warped_image = traits.File(desc="Inverse warped image")
composite_transform = traits.File(desc="Composite transorfmation matrix")
inverse_composite_transform = traits.File(desc="Inverse composite transorfmation matrix")
user_ants_command = traits.File(desc="User provided normalization file")
normalization_type = traits.Str(desc="Type of registration: rigid, affine, nl", usedefault=True, default_value="nl")
moving_image_space = traits.Str(desc="Name of coordinate space for moving image", usedefault=True, default_value="source")
fixed_image_space = traits.Str(desc="Name of coordinate space for fixed image", usedefault=True, default_value="target")
interpolation = traits.Str(desc="Type of registration: Linear, NearestNeighbor, MultiLabel[<sigma=imageSpacing>,<alpha=4.0>], Gaussian[<sigma=imageSpacing>,<alpha=1.0>], BSpline[<order=3>], CosineWindowedSinc, WelchWindowedSinc, HammingWindowedSinc, LanczosWindowedSinc, GenericLabel", usedefault=True, default_value="Linear")
#misalign_matrix = traits.Str(desc="Misalignment matrix", usedefault=True, default_value=" ")
rotation_error = traits.List( desc="Rotation Error")
translation_error = traits.List(desc="Translation Error" )
out_matrix = traits.File(desc="Composite transorfmation matrix")
out_matrix_inverse = traits.File(desc="Composite transorfmation matrix")
class APPIANRegistrationOutputs(TraitedSpec):
warped_image = traits.File(desc="Warped image")
inverse_warped_image = traits.File(desc="Inverse warped image")
composite_transform = traits.File(desc="Composite transorfmation matrix")
out_matrix = traits.File(desc="Composite transorfmation matrix")
out_matrix_inverse = traits.File(desc="Composite transorfmation matrix")
inverse_composite_transform = traits.File(desc="Inverse composite transorfmation matrix")
class APPIANRegistration(BaseInterface):
input_spec = APPIANRegistrationInputs
output_spec= APPIANRegistrationOutputs
def read_user_command_line(self) :
cmdline=''
if not os.path.exists(self.inputs.user_ants_command) :
print("Error : could not read --user-ants-command file specified by user ", self.inputs.user_ants_command)
exit(1)
else :
with open(self.inputs.user_ants_command) as f:
for l in f.readlines():
print('read', l)
cmdline += ' ' + l.rstrip("\n")
if 'SyN' in cmdline :
normalization_type = 'nl'
elif 'Affine' in cmdline :
normalization_type = 'affine'
else :
normalization_type = 'rigid'
return cmdline, normalization_type
def replace_user_command_line(self, cmdline):
replacement=[ ['fixed_image',self.inputs.fixed_image],
['moving_image',self.inputs.moving_image],
['fixed_image_mask', self.inputs.fixed_image_mask],
['moving_image_mask', self.inputs.moving_image_mask],
['composite_transform', self.inputs.composite_transform],
['inverse_composite_transform', self.inputs.inverse_composite_transform],
['inverse_warped_image', self.inputs.inverse_warped_image],
#Warning, inverse_warped_image must come before warped_image
['warped_image', self.inputs.warped_image],
['interpolation_method', self.inputs.interpolation]
]
for string, variable in replacement :
if isdefined(variable) :
cmdline = re.sub(string, variable, cmdline)
print("User provided ANTs command line")
return cmdline
def default_command_line(self):
# If user has not specified their own file with an ANTs command line argument
# create a command line argument based on whether the normalization type is set to
# rigid, affine, or non-linear.
mask_string=""
if isdefined(self.inputs.fixed_image_mask) and isdefined(self.inputs.moving_image_mask) :
if os.path.exists(self.inputs.fixed_image_mask) and os.path.exists(self.inputs.moving_image_mask) :
mask_string=" --masks ["+self.inputs.fixed_image_mask+","+self.inputs.moving_image_mask+"] "
### Base Options
cmdline="antsRegistration --verbose 1 --float --collapse-output-transforms 1 --dimensionality 3 "+mask_string+" --initial-moving-transform [ "+self.inputs.fixed_image+", "+self.inputs.moving_image+", 1 ] --initialize-transforms-per-stage 0 --interpolation "+self.inputs.interpolation+' '
### Rigid
cmdline+=" --transform Rigid[ 0.1 ] --metric Mattes[ "+self.inputs.fixed_image+", "+self.inputs.moving_image+", 1, 32, Regular, 0.3 ] --convergence [ 500x250x200x100, 1e-08, 20 ] --smoothing-sigmas 8.0x4.0x2.0x1.0vox --shrink-factors 8x4x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 0 "
#output = " --output [ transform ] "
### Affine
if self.inputs.normalization_type == 'affine' or self.inputs.normalization_type == 'nl':
cmdline += " --transform Affine[ 0.1 ] --metric Mattes[ "+self.inputs.fixed_image+", "+self.inputs.moving_image+", 1, 32, Regular, 0.3 ] --convergence [ 500x400x300 , 1e-08, 20 ] --smoothing-sigmas 4.0x2.0x1.0vox --shrink-factors 4x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 0 "
### Non-linear
if self.inputs.normalization_type == 'nl':
#cmdline += " --transform SyN[ 0.1, 3.0, 0.0] --metric Mattes[ "+self.inputs.fixed_image+", "+self.inputs.moving_image+", 0.5, 64, None ] --convergence [ 100x100x100x100, 1e-6,10 ] --smoothing-sigmas 4.0x2.0x1.0x0.0vox --shrink-factors 4x2x1x1 --winsorize-image-intensities [ 0.005, 0.995 ] --write-composite-transform 1 "
cmdline += " --transform SyN[ 0.1, 3.0, 0.0] --metric Mattes[ "+self.inputs.fixed_image+", "+self.inputs.moving_image+", 0.5, 64, None ] --convergence [ 500x400x300x200, 1e-6,10 ] --smoothing-sigmas 4.0x2.0x1.0x0.0vox --shrink-factors 4x2x1x1 --winsorize-image-intensities [ 0.005, 0.995 ] --write-composite-transform 1 "
output = " --output [ transform, "+self.inputs.warped_image+", "+self.inputs.inverse_warped_image+" ] "
cmdline += output
return cmdline
def apply_misalignment(self) :
com = center_of_mass( nib.load(self.inputs.fixed_image).get_data() )
img = nib.load(self.inputs.fixed_image)
com_world = [img.affine[0,3]+com[0] * img.affine[0,2],
img.affine[1,3]+com[1] * img.affine[1,1],
img.affine[2,3]+com[2] * img.affine[2,0]
]
tfm = sitk.VersorRigid3DTransform()
rotations_radians = list(np.pi * np.array(self.inputs.rotation_error)/180.)
tfm.SetParameters(rotations_radians + self.inputs.translation_error)
tfm.SetFixedParameters(com_world)
print('Center of Mass :', com_world)
print(tfm.GetParameters())
print(tfm.GetFixedParameters())
misalign_matrix=os.getcwd()+os.sep+'misalignment_rot_x-{}_y-{}_z-{}_trans_x-{}_y-{}_z-{}.tfm'.format(*self.inputs.rotation_error,*self.inputs.translation_error)
sitk.WriteTransform(tfm, misalign_matrix)
print('Warning: misaligning PET to MRI alignment using file', misalign_matrix)
cmdline = "antsApplyTransforms -e 3 -d 3 -n Linear -i "+self.inputs.moving_image+" -t "+ misalign_matrix+" "+self.inputs.out_matrix +" -r "+self.inputs.fixed_image+" -o Linear["+self.inputs.out_matrix+",0]"
print(cmdline)
cmd( cmdline )
cmdline = "antsApplyTransforms -e 3 -d 3 -n Linear -i "+self.inputs.moving_image+" -t "+ misalign_matrix+" "+self.inputs.out_matrix +" -r "+self.inputs.fixed_image+" -o Linear["+self.inputs.out_matrix_inverse+",1]"
print(cmdline)
cmd( cmdline )
cmdline = "antsApplyTransforms -e 3 -d 3 -n Linear -i "+self.inputs.moving_image+" -t "+self.inputs.out_matrix +" -r "+self.inputs.fixed_image+" -o "+self.inputs.warped_image
print(cmdline)
cmd( cmdline )
cmdline = "antsApplyTransforms -e 3 -d 3 -n Linear -i "+self.inputs.fixed_image+" -t "+self.inputs.out_matrix_inverse +" -r "+self.inputs.moving_image+" -o "+self.inputs.inverse_warped_image
print(cmdline)
cmd( cmdline )
def apply_linear_transforms(self):
#Command line to
if not os.path.exists(self.inputs.warped_image) :
cmdline = "antsApplyTransforms -e 3 -d 3 -n Linear -i "+self.inputs.moving_image+" -t "+ self.inputs.out_matrix +" -r "+self.inputs.fixed_image+" -o "+self.inputs.warped_image
print(cmdline)
cmd( cmdline )
if not os.path.exists(self.inputs.out_matrix_inverse) :
cmdline = "antsApplyTransforms -e 3 -d 3 -n Linear -i "+self.inputs.moving_image+" -t "+self.inputs.out_matrix +" -r "+self.inputs.fixed_image+" -o Linear["+self.inputs.out_matrix_inverse+",1]"
print(cmdline)
cmd( cmdline )
def mat2txt(self, ii_fn, oo_fn):
print(ii_fn, oo_fn)
tfm=sitk.ReadTransform(ii_fn)
sitk.WriteTransform( tfm, oo_fn )
return 0
def _run_interface(self, runtime):
normalization_type = self.inputs.normalization_type
#Setup ANTs command line arguments
if isdefined(self.inputs.user_ants_command):
cmdline, self.inputs.normalization_type = self.read_user_command_line()
self._set_outputs()
cmdline = self.replace_user_command_line(cmdline)
else :
self._set_outputs()
cmdline = self.default_command_line()
print(self.inputs);
#Run antsRegistration on command line
print("Ants command line:\n", cmdline)
p = cmd(cmdline)
if self.inputs.normalization_type in ['rigid', 'affine']:
#Convert linear transforms from .mat to .txt. antsRegistration produces .mat file based on output
#prefix, but this format seems to be harder to work with / lead to downstream errors
#If linear transform, then have to apply transformations to input image
self.apply_linear_transforms()
if isdefined( self.inputs.rotation_error) or isdefined( self.inputs.translation_error ) :
if self.inputs.rotation_error != [0,0,0] and self.inputs.translation_error != [0,0,0] :
print('Warning: Applying misalignment')
print("\tRotation:",self.inputs.rotation_error)
print("\tTranslation:",self.inputs.translation_error)
exit(1)
self.apply_misalignment()
return runtime
def _create_output_file(self, fn, space):
basefn = os.path.basename(fn)
if not '_space-' in basefn :
basefn_split = splitext(basefn)
return basefn_split[0] + '_space-' + space + basefn_split[1]
else :
return '_'.join( [ f if not 'space-' in f else 'space-'+space for f in basefn.split('_') ] )
def _set_outputs(self):
self.inputs.warped_image=os.getcwd()+os.sep+ self._create_output_file(self.inputs.moving_image,self.inputs.fixed_image_space )
self.inputs.inverse_warped_image=os.getcwd()+os.sep+self._create_output_file(self.inputs.fixed_image, self.inputs.moving_image_space )
if self.inputs.normalization_type == 'nl' :
self.inputs.composite_transform=os.getcwd()+os.sep+'transformComposite.h5'
self.inputs.inverse_composite_transform=os.getcwd()+os.sep+'transformInverseComposite.h5'
else :
self.inputs.out_matrix=os.getcwd()+os.sep+'transform0GenericAffine.mat'
self.inputs.out_matrix_inverse=os.getcwd()+os.sep+'transform0GenericAffine_inverse.mat'
def _list_outputs(self):
outputs = self.output_spec().get()
self._set_outputs()
if isdefined(self.inputs.warped_image):
outputs["warped_image"] = self.inputs.warped_image
if isdefined(self.inputs.inverse_warped_image):
outputs["inverse_warped_image"] = self.inputs.inverse_warped_image
if isdefined(self.inputs.composite_transform):
outputs["composite_transform"]=self.inputs.composite_transform
if isdefined(self.inputs.out_matrix):
outputs["out_matrix"]=self.inputs.out_matrix
if isdefined(self.inputs.out_matrix_inverse):
outputs["out_matrix_inverse"]=self.inputs.out_matrix_inverse
if isdefined(self.inputs.inverse_composite_transform):
outputs["inverse_composite_transform"]= self.inputs.inverse_composite_transform
return outputs
|
11552991
|
import logging
from fcntl import ioctl
import v4l2
class V4L2Ctrls:
def __init__(self, device, fd):
self.device = device
self.fd = fd
self.get_device_cap()
self.get_device_controls()
def setup_v4l2_ctrls(self, params):
for k, v in params.items():
if k in ['width', 'height', 'fps', 'auto_sleep', 'rotation',
'capture_format', 'capture_memory',
'decoder', 'decoder_input_format', 'decoder_memory',
'encoder', 'encoder_input_format', 'encoder_memory',
] or k.startswith('uvcx_'):
continue
ctrl = find_by_name(self.ctrls, k)
if ctrl == None:
logging.warning(f'Can\'t find {k} v4l2 control')
continue
intvalue = 0
if ctrl.type == v4l2.V4L2_CTRL_TYPE_INTEGER:
intvalue = int(v)
elif ctrl.type == v4l2.V4L2_CTRL_TYPE_BOOLEAN:
intvalue = int(bool(v))
elif ctrl.type == v4l2.V4L2_CTRL_TYPE_MENU:
menu = find_by_name(ctrl.menus, v)
if menu == None:
logging.warning(f'Can\'t find {v} in {[str(c.name, "utf-8") for c in ctrl.menus]}')
continue
intvalue = menu.index
elif ctrl.type == v4l2.V4L2_CTRL_TYPE_INTEGER_MENU:
menu = find_by_value(ctrl.menus, int(v))
if menu == None:
logging.warning(f'Can\'t find {v} in {[c.value for c in ctrl.menus]}')
continue
intvalue = menu.index
else:
logging.warning(f'Can\'t set {k} to {v} (Unsupported control type {ctrl.type})')
continue
try:
new_ctrl = v4l2.v4l2_control(ctrl.id, intvalue)
ioctl(self.fd, v4l2.VIDIOC_S_CTRL, new_ctrl)
if new_ctrl.value != intvalue:
logging.warning(f'Can\'t set {k} to {v} using {new_ctrl.value} instead of {intvalue}')
continue
ctrl.value = intvalue
except Exception as e:
logging.warning(f'Can\'t set {k} to {v} ({e})')
def get_device_cap(self):
cap = v4l2.v4l2_capability()
try:
ioctl(self.fd, v4l2.VIDIOC_QUERYCAP, cap)
except Exception as e:
logging.warning(f'v4l2ctrls: VIDIOC_QUERYCAP failed: ({e})')
self.card = str(cap.card, 'utf-8')
self.driver = str(cap.driver, 'utf-8')
def get_device_controls(self):
ctrls = []
strtrans = bytes.maketrans(b' -', b'__')
next_fl = v4l2.V4L2_CTRL_FLAG_NEXT_CTRL | v4l2.V4L2_CTRL_FLAG_NEXT_COMPOUND
qctrl = v4l2.v4l2_queryctrl(next_fl)
while True:
try:
ioctl(self.fd, v4l2.VIDIOC_QUERYCTRL, qctrl)
except:
break
if qctrl.type in [v4l2.V4L2_CTRL_TYPE_INTEGER, v4l2.V4L2_CTRL_TYPE_BOOLEAN,
v4l2.V4L2_CTRL_TYPE_MENU,v4l2.V4L2_CTRL_TYPE_INTEGER_MENU]:
try:
ctrl = v4l2.v4l2_control(qctrl.id)
ioctl(self.fd, v4l2.VIDIOC_G_CTRL, ctrl)
qctrl.value = ctrl.value
except:
logging.warning(f'Can\'t get ctrl {qctrl.name} value')
qctrl.name = qctrl.name.lower().translate(strtrans, delete = b',&(.)').replace(b'__', b'_')
if qctrl.type in [v4l2.V4L2_CTRL_TYPE_MENU, v4l2.V4L2_CTRL_TYPE_INTEGER_MENU]:
qctrl.menus = []
for i in range(qctrl.minimum, qctrl.maximum + 1):
try:
qmenu = v4l2.v4l2_querymenu(qctrl.id, i)
ioctl(self.fd, v4l2.VIDIOC_QUERYMENU, qmenu)
except:
continue
qctrl.menus.append(qmenu)
ctrls.append(qctrl)
qctrl = v4l2.v4l2_queryctrl(qctrl.id | next_fl)
self.ctrls = ctrls
def print_ctrls(self):
print(f'Device: {self.device}')
print(f'Name: {self.card}')
print(f'Driver: {self.driver}')
print(f'\nControls')
for c in self.ctrls:
if c.type == v4l2.V4L2_CTRL_TYPE_CTRL_CLASS:
print('\n' + str(c.name, 'utf-8')+'\n')
else:
print(str(c.name, 'utf-8'), end = ' = ')
if c.type in [v4l2.V4L2_CTRL_TYPE_MENU, v4l2.V4L2_CTRL_TYPE_INTEGER_MENU]:
defmenu = None
valmenu = None
for m in c.menus:
if m.index == c.value:
valmenu = m
if m.index == c.default:
defmenu = m
if valmenu:
print(f'{str(valmenu.name, "utf-8") if c.type == v4l2.V4L2_CTRL_TYPE_MENU else valmenu.value}\t(', end = ' ')
if defmenu:
print(f'default: {str(defmenu.name, "utf-8") if c.type == v4l2.V4L2_CTRL_TYPE_MENU else defmenu.value}', end = ' ')
print('values:', end = ' ')
for m in c.menus:
print('%a' % (str(m.name, 'utf-8') if c.type == v4l2.V4L2_CTRL_TYPE_MENU else m.value),
end = ' ')
print(')')
elif c.type in [v4l2.V4L2_CTRL_TYPE_INTEGER, v4l2.V4L2_CTRL_TYPE_BOOLEAN]:
print('%a\t(' % c.value, 'default:', c.default, 'min:', c.minimum, 'max:', c.maximum, end = '')
if c.step != 1:
print(' step:', c.step, end = '')
print(')')
else:
print()
def request_key_frame(self):
try:
ctrl = v4l2.v4l2_control(v4l2.V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME, 0)
ioctl(self.fd, v4l2.VIDIOC_S_CTRL, ctrl)
except:
logging.warning(f'{self.device} can\'t request keyframe')
def find_by_name(ctrls, name):
for c in ctrls:
if name == str(c.name, 'utf-8'):
return c
return None
def find_by_value(menus, value):
for m in menus:
if value == m.value:
return m
return None
|
11552994
|
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
# from pprint import pprint as p
# p(sys.path)
from exploredata.order import ExploreOrder
from exploredata.traffic import ExploreTraffic
from exploredata.weather import ExploreWeather
from prepareholdoutset import PrepareHoldoutSet
from utility.datafilepath import g_singletonDataFilePath
from utility.dumpload import DumpLoad
import numpy as np
import pandas as pd
from splittrainvalidation import SplitTrainValidation
from splittrainvalidation import HoldoutSplitMethod
from preprocess.historicaldata import HistoricalData
from preparegapcsv import prepareGapCsvForPrediction
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from exploredata.poi import ExplorePoi
class PrepareData(ExploreOrder, ExploreWeather, ExploreTraffic, PrepareHoldoutSet, SplitTrainValidation,HistoricalData, prepareGapCsvForPrediction,ExplorePoi):
""" Aggreate all data source, transform them into a big table with features and labels that can be fed into learnig algorithms
This class keep all the feature engineerining/trnasformation transparent to cleint. Cient can call any of its public method to get data/cv folds, it will start preprocessing
the data, stoe them in dictionalry (res_data_dict), and return the result. Next time when the client try fetching data/cv folds, this class will look up the dictionary and
return the result.
"""
def __init__(self):
ExploreOrder.__init__(self)
# self.usedFeatures = []
# self.usedFeatures = [101,102,103,104,105,106,107,
# 201, 202, 203,204,205,206,
# 301, 302,
# 401,402,
# 501,502,503,504,505,506,507,
# 601,602,603,604,605,606,
# 8801,8802
# ]
# self.usedFeatures = [101,102,103]
self.usedFeatures = [101,102,103,104,105,106,107,
201, 203,204,205,206,
301,
401,402,
501,502,503,504,505,506,507,
601,602,603,604,605,606,
8801,8802
] # Features that is actually used by learning algorith
# self.override_used_features = ['gap1', 'time_id', 'gap2', 'gap3', 'traffic2', 'traffic1', 'traffic3',
# 'preweather', 'start_district_id_28', 'start_district_id_8',
# 'start_district_id_7', 'start_district_id_48']
self.usedLabel = 'gap'
self.excludeZerosActual = True # whether to exclud row with gap being zero
# the resultant data dictionary after preprocessing
self.res_data_dict = {} # dictionary that
# self.randomSate = None
# self.test_size = 0.25
self.holdout_split = HoldoutSplitMethod.IMITTATE_TEST2_PLUS2 # cross validation split strategy
# self.holdout_split = HoldoutSplitMethod.KFOLD_BYDATE
# self.holdout_split = HoldoutSplitMethod.kFOLD_FORWARD_CHAINING
self.train_validation_foldid = -2
return
def __get_all_features_dict(self):
"""return all the featurs engineered """
featureDict ={}
# preGaps = ['gap1', 'gap2', 'gap3']
districtids = ['start_district_id_' + str(i + 1) for i in range(66)]
timeids = ['time_id_' + str(i + 1) for i in range(144)]
# gap features
featureDict[101] = ['gap1']
featureDict[102] = ['gap2']
featureDict[103] = ['gap3']
featureDict[104] = ['gap_diff1']
featureDict[105] = ['gap_diff2']
featureDict[106] = ['gap_mean']
featureDict[107] = ['gap_std']
#district features
featureDict[201] = ['start_district_id']
featureDict[202] = districtids
featureDict[203] = ['start_district_id_51', 'start_district_id_23','start_district_id_8','start_district_id_37']
featureDict[204] = ['district_gap_sum']
featureDict[205] = self.get_district_type_list()
featureDict[206] = ['poi_sum']
#time features
featureDict[301] = ['time_id']
featureDict[302] = timeids
#weatehr features
featureDict[401] = ['preweather']
featureDict[402] = ["rain_check"]
# Traffic features
featureDict[501] = ['traffic1']
featureDict[502] = ['traffic2']
featureDict[503] = ['traffic3']
featureDict[504] = ['traffic_diff1']
featureDict[505] = ['traffic_diff2']
featureDict[506] = ['traffic_mean']
featureDict[507] = ['traffic_std']
#historical features
featureDict[601] = ['history_mean']
featureDict[602] = ['history_median']
featureDict[603] = ['history_mode']
featureDict[604] = ['history_plus_mean']
featureDict[605] = ['history_plus_median']
featureDict[606] = ['history_plus_mode']
#cross features
featureDict[8801] = ['district_time']
featureDict[8802] = ['weather_time']
return featureDict
def __get_label_encode_dict(self):
le_dict = {}
le_dict[('start_district_id', 'time_id')] = 'district_time'
le_dict[('preweather', 'time_id')] = 'weather_time'
le_dict[('time_id')] = 'time_id'
le_dict[('start_district_id')] = 'start_district_id'
return le_dict
# def __translate_used_features(self):
# if hasattr(self, 'override_used_features'):
# self.usedFeatures = self.override_used_features
# return
#
# res = []
# featureDict = self.__get_all_features_dict()
# [res.extend(featureDict[fea]) for fea in self.usedFeatures]
#
# return res
def get_used_features(self):
res = []
featureDict = self.__get_all_features_dict()
[res.extend(featureDict[fea]) for fea in self.usedFeatures]
return res
def add_pre_gaps(self, data_dir):
dumpfile_path = '../data_preprocessed/' + data_dir.split('/')[-2] + '_prevgap.df.pickle'
dumpload = DumpLoad(dumpfile_path)
if dumpload.isExisiting():
df = dumpload.load()
else:
gap_dict = self.get_gap_dict(data_dir)
df = self.X_y_Df[['start_district_id', 'time_slotid']].apply(self.find_prev_gap, axis = 1, pre_num = 3, gap_dict = gap_dict)
dumpload.dump(df)
self.X_y_Df = pd.concat([self.X_y_Df, df], axis=1)
return
def add_rain_check(self):
rain_dict ={1:1, 2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0}
self.X_y_Df["rain_check"] = self.X_y_Df["preweather"].map(rain_dict)
return
def add_prev_weather(self, data_dir):
dumpfile_path = '../data_preprocessed/' + data_dir.split('/')[-2] + '_prevweather.df.pickle'
dumpload = DumpLoad(dumpfile_path)
if dumpload.isExisiting():
df = dumpload.load()
else:
weather_dict = self.get_weather_dict(data_dir)
df = self.X_y_Df['time_slotid'].apply(self.find_prev_weather_mode, weather_dict=weather_dict)
dumpload.dump(df)
self.X_y_Df = pd.concat([self.X_y_Df, df], axis=1)
self.add_rain_check()
return
def add_prev_traffic(self, data_dir):
dumpfile_path = '../data_preprocessed/' + data_dir.split('/')[-2] + '_prevtraffic.df.pickle'
dumpload = DumpLoad(dumpfile_path)
if dumpload.isExisiting():
df = dumpload.load()
else:
traffic_dict = self.get_traffic_dict(data_dir)
df = self.X_y_Df[['start_district_id', 'time_slotid']].apply(self.find_prev_traffic,axis = 1, traffic_dict=traffic_dict, pre_num = 3)
dumpload.dump(df)
self.X_y_Df = pd.concat([self.X_y_Df, df], axis=1)
return
def __add_poi(self, data_dir):
dumpfile_path = '../data_preprocessed/' + data_dir.split('/')[-2] + '_poi.df.pickle'
dumpload = DumpLoad(dumpfile_path)
if dumpload.isExisiting():
df = dumpload.load()
else:
poi_dict = self.get_district_type_dict()
df = self.X_y_Df[['start_district_id']].apply(self.find_poi,axis = 1, poi_dict=poi_dict)
dumpload.dump(df)
self.X_y_Df = pd.concat([self.X_y_Df, df], axis=1)
return
def remove_zero_gap(self):
if not 'gap' in self.X_y_Df.columns:
# when we perform validation on test set, we do not expect to have 'gap' column
return
if self.excludeZerosActual:
bNonZeros = self.X_y_Df['gap'] != 0
self.X_y_Df = self.X_y_Df[bNonZeros]
return
def __add_gap_statistics(self):
self.X_y_Df['gap_diff1'] = self.X_y_Df['gap2'] - self.X_y_Df['gap1']
self.X_y_Df['gap_diff2'] = self.X_y_Df['gap3'] - self.X_y_Df['gap2']
self.X_y_Df['gap_mean'] = self.X_y_Df[['gap1','gap2','gap3']].mean(axis=1)
self.X_y_Df['gap_std'] = self.X_y_Df[['gap1','gap2','gap3']].std(axis=1)
return
def __add_traffic_statistics(self):
self.X_y_Df['traffic_diff1'] = self.X_y_Df['traffic2'] - self.X_y_Df['traffic1']
self.X_y_Df['traffic_diff2'] = self.X_y_Df['traffic3'] - self.X_y_Df['traffic2']
self.X_y_Df['traffic_mean'] = self.X_y_Df[['traffic1','traffic2','traffic3']].mean(axis=1)
self.X_y_Df['traffic_std'] = self.X_y_Df[['traffic1','traffic2','traffic3']].std(axis=1)
return
def add_district_gap_sum(self):
dumpfile_path = '../data_preprocessed/' +'training_data_district_gap_sum.dict.pickle'
dumpload = DumpLoad(dumpfile_path)
if dumpload.isExisiting():
district_gap_sum_dict = dumpload.load()
else:
district_gap_sum_dict = self.X_y_Df.groupby('start_district_id')['gap'].sum().to_dict()
dumpload.dump(district_gap_sum_dict)
self.X_y_Df["district_gap_sum"] = self.X_y_Df["start_district_id"].map(district_gap_sum_dict)
return
def add_history_data(self,data_dir):
dumpfile_path = '../data_preprocessed/' + data_dir.split('/')[-2] + '_history_data.df.pickle'
dumpload = DumpLoad(dumpfile_path)
if dumpload.isExisiting():
df = dumpload.load()
else:
temp_dict = self.get_history_data_dict()
df = self.X_y_Df[['start_district_id', 'time_id']].apply(self.find_history_data, axis = 1, history_dict = temp_dict)
dumpload.dump(df)
self.X_y_Df = pd.concat([self.X_y_Df, df], axis=1)
return
def __add_cross_features(self):
cross_feature_dict = self.__get_label_encode_dict()
for exising_feature_names, new_feature_name in cross_feature_dict.iteritems():
if isinstance(exising_feature_names, basestring):
# such items in the dict only need to do label encoding, and not cross feature
continue
self.__add_cross_feature(exising_feature_names, new_feature_name)
return
def __add_cross_feature(self, exising_feature_names, new_feature_name):
for i in range(len(exising_feature_names)):
if i ==0:
self.X_y_Df[new_feature_name] = self.X_y_Df[exising_feature_names[i]].astype(str)
continue
self.X_y_Df[new_feature_name] = self.X_y_Df[new_feature_name] + '_' + self.X_y_Df[exising_feature_names[i]].astype(str)
return
def __add_poi_sum(self):
self.X_y_Df['poi_sum'] = self.X_y_Df[self.get_district_type_list()].sum(axis = 1)
return
def __engineer_feature(self, data_dir = None):
self.add_pre_gaps(data_dir)
self.add_district_gap_sum()
self.add_prev_weather(data_dir)
self.add_prev_traffic(data_dir)
self.__add_gap_statistics()
self.__add_traffic_statistics()
self.__add_poi(data_dir)
self.__add_poi_sum()
self.add_history_data(data_dir)
self.remove_zero_gap()
self.__add_cross_features()
return
def get_train_validationset(self):
data_dir = g_singletonDataFilePath.getTrainDir()
self.__do_prepare_data()
df, cv = self.res_data_dict[data_dir]
folds = []
for train_index, test_index in cv:
folds.append((train_index, test_index))
train_index = folds[self.train_validation_foldid][0]
test_index = folds[self.train_validation_foldid][1]
X_train = df.iloc[train_index][self.get_used_features()]
y_train = df.iloc[train_index][self.usedLabel]
X_test = df.iloc[test_index][self.get_used_features()]
y_test = df.iloc[test_index][self.usedLabel]
return X_train, y_train,X_test,y_test
def getFeaturesLabel(self):
data_dir = g_singletonDataFilePath.getTrainDir()
self.__do_prepare_data()
df, cv = self.res_data_dict[data_dir]
return df[self.get_used_features()], df[self.usedLabel],cv
return
def __get_feature_label(self):
data_dir = g_singletonDataFilePath.getTrainDir()
self.X_y_Df = self.load_gapdf(data_dir)
self.__engineer_feature(data_dir)
if self.holdout_split == HoldoutSplitMethod.kFOLD_FORWARD_CHAINING:
cv = self.get_kfold_forward_chaining(self.X_y_Df)
elif self.holdout_split == HoldoutSplitMethod.KFOLD_BYDATE:
cv = self.get_kfold_bydate(self.X_y_Df)
else:
cv = self.get_imitate_testset2(self.X_y_Df, split_method = self.holdout_split)
self.res_data_dict[data_dir] = self.X_y_Df,cv
return
def __get_feature_for_test_set(self,data_dir):
self.X_y_Df = self.load_prediction_csv(data_dir)
self.__engineer_feature(data_dir)
self.res_data_dict[data_dir] = self.X_y_Df
return
def getFeaturesforTestSet(self, data_dir):
self.__do_prepare_data()
return self.res_data_dict[data_dir]
def __do_label_encoding(self):
df_train, _ = self.res_data_dict[g_singletonDataFilePath.getTrainDir()]
df_testset1 = self.res_data_dict[g_singletonDataFilePath.getTest1Dir()]
df_testset2 = self.res_data_dict[g_singletonDataFilePath.getTest2Dir()]
le = LabelEncoder()
cross_feature_dict = self.__get_label_encode_dict()
for _, new_feature_name in cross_feature_dict.iteritems():
to_be_stacked = [df_train[new_feature_name], df_testset1[new_feature_name], df_testset2[new_feature_name]]
le.fit(pd.concat(to_be_stacked, axis=0))
df_train[new_feature_name] = le.transform(df_train[new_feature_name])
df_testset1[new_feature_name] = le.transform(df_testset1[new_feature_name])
df_testset2[new_feature_name] = le.transform(df_testset2[new_feature_name])
return
def __save_final_data(self):
df_train, _ = self.res_data_dict[g_singletonDataFilePath.getTrainDir()]
df_testset1 = self.res_data_dict[g_singletonDataFilePath.getTest1Dir()]
df_testset2 = self.res_data_dict[g_singletonDataFilePath.getTest2Dir()]
df_train.to_csv('temp/df_train_final.csv')
df_testset1.to_csv('temp/df_testset1_final.csv')
df_testset2.to_csv('temp/df_testset2_final.csv')
return
def __get_expanded_col_names(self, cols, sub_cols):
"""
helper method to generate expanded columns after one hot encoding
cols, original column names ['a', 'b', 'c']
sub_cols, one hot code lenght for each original column [2,3,4]
res, the new column names, ['a_1', 'a_2', 'b_1', 'b_2', 'b_3', 'c_1', 'c_2', 'c_3', 'c_4']
"""
res = []
if len(cols) != len(sub_cols):
raise "cols and expanded sub columns are not consistent"
for i in range(len(cols)):
prefix = cols[i]
sub_num = sub_cols[i]
for j in range(sub_num):
res.append(prefix + '_' + str(j + 1))
return res
def __filter_too_big_onehot_encoding(self, enc, to_be_encoded_old, df_train, df_testset1, df_testset2):
print "Filter out too big one hot encoding (>=200)", np.array(to_be_encoded_old)[enc.n_values_ >= 200]
to_be_encoded = np.array(to_be_encoded_old)[enc.n_values_ < 200]
to_be_stacked_df = pd.concat([df_train[to_be_encoded], df_testset1[to_be_encoded], df_testset2[to_be_encoded]], axis = 0)
enc.fit(to_be_stacked_df)
return enc, to_be_encoded
def __do_one_hot_encodings(self):
df_train, cv = self.res_data_dict[g_singletonDataFilePath.getTrainDir()]
df_testset1 = self.res_data_dict[g_singletonDataFilePath.getTest1Dir()]
df_testset2 = self.res_data_dict[g_singletonDataFilePath.getTest2Dir()]
enc = OneHotEncoder(sparse=False)
cross_feature_dict = self.__get_label_encode_dict()
to_be_encoded = []
for _, new_feature_name in cross_feature_dict.iteritems():
to_be_encoded.append(new_feature_name)
#fix all data source
to_be_stacked_df = pd.concat([df_train[to_be_encoded], df_testset1[to_be_encoded], df_testset2[to_be_encoded]], axis = 0)
enc.fit(to_be_stacked_df)
enc, to_be_encoded = self.__filter_too_big_onehot_encoding(enc, to_be_encoded, df_train, df_testset1, df_testset2)
# transform on seprate data source
self.res_data_dict[g_singletonDataFilePath.getTrainDir()] = self.__do_one_hot_encoding(df_train, enc, to_be_encoded),cv
self.res_data_dict[g_singletonDataFilePath.getTest1Dir()] = self.__do_one_hot_encoding(df_testset1,enc, to_be_encoded)
self.res_data_dict[g_singletonDataFilePath.getTest2Dir()] = self.__do_one_hot_encoding(df_testset2, enc, to_be_encoded)
return
def __do_one_hot_encoding(self, df, enc, to_be_encoded):
arr = enc.transform(df[to_be_encoded])
new_col_names = self.__get_expanded_col_names(to_be_encoded, enc.n_values_)
df_res = pd.DataFrame(arr, columns=new_col_names)
df = pd.concat([df, df_res], axis = 1)
return df
def __do_prepare_data(self):
if len(self.res_data_dict) != 0:
# the data has already been preprocessed
return
self.__get_feature_label()
self.__get_feature_for_test_set(g_singletonDataFilePath.getTest2Dir())
self.__get_feature_for_test_set(g_singletonDataFilePath.getTest1Dir())
self.__do_label_encoding()
self.__do_one_hot_encodings()
return
def run(self):
self.getFeaturesLabel()
self.getFeaturesforTestSet(g_singletonDataFilePath.getTest2Dir())
self.getFeaturesforTestSet(g_singletonDataFilePath.getTest1Dir())
self.get_train_validationset()
self.__save_final_data()
return
if __name__ == "__main__":
obj= PrepareData()
obj.run()
|
11553037
|
from os import path
from os.path import dirname, abspath
import sys
import numpy as np
from math import pi
from scipy.stats import norm
from sklearn.mixture import GaussianMixture
import matplotlib.pyplot as plt
try:
sys.path.append(dirname(dirname(dirname(abspath(__file__)))))
except IndexError:
pass
from agents.learning.model import Model
from agents.learning.model import GMM
# Load data from a file
def load_data(file_name):
train_path = path.join(dirname(abspath(__file__)), file_name)
states = None
if path.exists(train_path):
with open(train_path, 'rb') as f:
states = np.loadtxt(f, delimiter=",")
states = np.atleast_2d(states)
else:
print("%s does not exist."% file_name)
return states
# Show some of the data set
def show_data_set(driver, file_name, range):
states = load_data(driver+"/" + file_name + "_states.csv")
states = states[range[0]:range[1] ,:]
model = Model()
model._state_list = states.tolist()
if file_name == "target_speed":
model.update_target_speed(debug=True)
if file_name == "safe_distance":
model.update_safe_distance(debug=True)
if file_name == "GMM":
model.update_sin_param(debug=True)
# Plot comparison
def plot_comparison(file_name):
d1 = load_data("Driver1/" + file_name + "_train_data.csv")
d2 = load_data("Driver2/" + file_name + "_train_data.csv")
d3 = load_data("Driver3/" + file_name + "_train_data.csv")
d = [d1, d2, d3]
plt.figure()
for driver_num, driver_data in enumerate(d):
mean = driver_data[:,0]
cov = driver_data[:,1]
order = np.sort(mean)
for i in range(mean.size):
x = np.linspace(order[0]-3, order[-1]+3, 300).reshape(-1,1)
y = norm.pdf(x, mean, np.sqrt(cov))
if i == int(mean.size)-1:
plt.plot(x, y, c="C"+str(driver_num), label='Driver '+str(driver_num+1))
else:
plt.plot(x, y, c="C"+str(driver_num))
plt.xlabel(file_name)
plt.ylabel("density of probability")
plt.legend()
plt.show()
# GMM train and predict
def gmm_train_and_predict(driver, standard_case):
gmm = GMM()
data = load_data(driver + "/GMM_train_data.csv")
gmm.train(data)
long_v = np.sum(data[:,5])
if gmm.GMM_model is not None:
GMM_v = standard_case
dt = gmm.predict_value(GMM_v)[0][0]
if np.isnan(dt) or dt < 0:
print("GMM model failed, send dt = 4")
else:
print("Predict dt: %s from GMM" % (dt))
t = np.linspace(0, dt, 200)
x = np.linspace(0, long_v*dt, 200)
y = -(-3.5)/(2*pi) * np.sin(2*pi * t/dt) + (-3.5) * t/dt
return t, x, y
# Plot lane change comparison
def plot_gmm_comparison(standard_case=np.array([[10, -3.5, 15, -12]])):
t1, x1, y1 = gmm_train_and_predict("Driver1", standard_case)
t2, x2, y2 = gmm_train_and_predict("Driver2", standard_case)
t3, x3, y3 = gmm_train_and_predict("Driver3", standard_case)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.plot(t1, y1, c='r', label="Driver1")
ax1.plot(t2, y2, c='g', label="Driver2")
ax1.plot(t3, y3, c='b', label="Driver3")
ax1.set_xlabel("Time s")
ax1.set_ylabel("Lateral distance m")
ax2 = fig.add_subplot(212)
ax2.plot(t1, x1, c='r', label="Driver1")
ax2.plot(t2, x2, c='g', label="Driver2")
ax2.plot(t3, x3, c='b', label="Driver3")
ax2.set_xlabel("Time s")
ax2.set_ylabel("Longitudinal distance m")
plt.legend()
plt.show()
if __name__ == "__main__":
show_data_set("Driver1", "target_speed", [0, 630])
show_data_set("Driver1", "safe_distance", [0, 665])
show_data_set("Driver1", "GMM", [0, 310])
plot_comparison("target_speed")
plot_comparison("safe_distance")
plot_gmm_comparison()
|
11553060
|
from securityheaders.models import Keyword
class FeaturePolicyKeyword(Keyword):
SELF = "'self'"
NONE = "'none'"
STAR = "*"
@staticmethod
def isKeyword(keyword):
""" Checks whether a given string is a FeaturePolicyKeyword.
Args:
keyword (str): the string to validate
"""
return hasattr(FeaturePolicyKeyword, keyword)
@staticmethod
def isValue(keyword):
return keyword in list(map(str, FeaturePolicyKeyword))
|
11553072
|
import re
from collections import OrderedDict
def _replace_end(string, end, new_end):
if string == end:
return new_end
if string.endswith('.' + end):
return string[:-len('.' + end)] + '.' + new_end
return string
def zip_th_tf_parameters(torch_module, tf_module, permute_torch=False):
def replace_tf_key(key):
key = key.replace('/', '.').rstrip(':0')
key = _replace_end(key, 'moving_mean', 'running_mean')
key = _replace_end(key, 'moving_variance', 'running_var')
key = _replace_end(key, 'kernel', 'weight')
key = _replace_end(key, 'beta', 'bias')
key = _replace_end(key, 'gamma', 'weight')
return key
def endswith(string, end):
if string == end:
return True
if string.endswith('.' + end):
return True
return False
weights = torch_module.state_dict()
variables = tf_module.variables
ignored = set()
if hasattr(tf_module, '_metrics'):
# Remove metrics from weights
delete_vars = {v.ref() for x in tf_module._metrics for v in x.variables}
variables = [v for v in variables if v.ref() not in delete_vars]
tf_weights = {replace_tf_key(v.name): v for v in variables}
# print([x.name for x in tf_module.variables])
try:
tf_model_name, _ = next(iter(tf_weights.keys())).split('.', 1)
if all(x.startswith(f'{tf_model_name}.') for x in tf_weights.keys()):
tf_weights = {k[len(f'{tf_model_name}.'):]: v for k, v in tf_weights.items()}
except StopIteration:
pass
if hasattr(tf_module, '_ignore_checkpoint_attributes'):
pattern = re.compile('|'.join(map(lambda x: f'({x})', tf_module._ignore_checkpoint_attributes)))
ignored = {k for k in weights.keys() if pattern.match(k)}
# print([x for x in tf_weights.keys()])
# print([x for x in weights.keys()])
unmatched = set(tf_weights.keys()).difference(set(weights.keys()) - ignored)
# print(unmatched)
assert len(unmatched) == 0, f'There are some unmatched keys in model parameters ({len(unmatched)}), e.g., ' + ', '.join(list(unmatched)[:4])
for k, val in weights.items():
if k in ignored:
continue
if endswith(k, 'num_batches_tracked'):
# This property is ignored in tensorflow
continue
if permute_torch:
if endswith(k, 'weight') and len(val.shape) == 4:
# val = val.permute(2, 3, 1, 0)
val = val.permute(2, 3, 1, 0)
pass
elif endswith(k, 'weight') and len(val.shape) == 2:
val = val.permute(1, 0)
assert k in tf_weights, f'There are some weights ({k}) not found on the tf_module'
matching_weight = tf_weights[k]
assert matching_weight.shape == val.shape, f'Shape does not match for parameter {k}, {matching_weight.shape} != {val.shape}'
yield val, matching_weight
def convert_weights_th_to_tf(torch_module, tf_module):
for th_weight, tf_variable in zip_th_tf_parameters(torch_module, tf_module, permute_torch=True):
tf_variable.assign(th_weight.clone().numpy())
def convert_model_th_to_tf(torch_module, checkpoint):
import onnx
from onnx_tf.backend import prepare
import tensorflow as tf
class TFStoredModel:
def __init__(self, model, output_type, output_names):
self.model = model
self.output_type = output_type
self.output_names = output_names
@staticmethod
def _nhwc_to_nchw(x):
if isinstance(x, list):
return [TFStoredModel._nhwc_to_nchw(y) for y in x]
if isinstance(x, tuple):
return tuple(TFStoredModel._nhwc_to_nchw(y) for y in x)
if isinstance(x, dict):
return x.__class__([(k, TFStoredModel._nhwc_to_nchw(y)) for k, y in x.items()])
if len(tf.shape(x)) == 4 and tf.shape(x)[-1] == 3:
return tf.transpose(x, (0, 3, 1, 2))
return x
@staticmethod
def _nchw_to_nhwc(x):
if isinstance(x, list):
return [TFStoredModel._nchw_to_nhwc(y) for y in x]
if isinstance(x, tuple):
return tuple(TFStoredModel._nchw_to_nhwc(y) for y in x)
if isinstance(x, dict):
return {k: TFStoredModel._nchw_to_nhwc(y) for k, y in x.items()}
if len(tf.shape(x)) == 4 and tf.shape(x)[-3] == 3:
return tf.transpose(x, (0, 2, 3, 1))
return x
def __call__(self, *args, **kwargs):
args = self._nhwc_to_nchw(args)
kwargs = self._nhwc_to_nchw(kwargs)
output = self.model.signatures['serving_default'](*args, **kwargs)
output = self._nchw_to_nhwc(output)
len_output = len(output.keys())
output = tuple(output[f'output_{i}'] for i in range(len_output))
if self.output_type == list:
return list(output)
elif self.output_type is None:
return output[0]
elif self.output_type == tuple:
return output
else:
assert self.output_type in {dict, OrderedDict}
return self.output_type(zip(self.output_names, output))
onnx_path = f'{checkpoint}.onnx'
output_type = torch_module.to_onnx(onnx_path, export_params=True)
onnx_model = onnx.load(onnx_path)
output_names = [x.name for x in onnx_model.graph.output]
if output_type is None and len(output_names) > 0:
output_type = tuple
assert output_type in {OrderedDict, dict, tuple, list, None}
model = prepare(onnx_model, auto_cast=True)
model.export_graph(f'{checkpoint}.pb')
path = f'{checkpoint}.pb'
model = tf.saved_model.load(path)
model = TFStoredModel(model, output_type=output_type, output_names=output_names)
if hasattr(torch_module, 'config'):
setattr(model, 'config', torch_module.config)
return model
|
11553076
|
from capnpy.util import extend
from capnpy.enum import BaseEnum
@extend(nullable)
class nullable:
def check(self, m):
field = self.target
assert field.is_group()
name = m.py_field_name(field)
def error(msg):
raise ValueError('%s: %s' % (name, msg))
#
group = field.group.get_node(m)
if len(group.struct.fields) != 2:
error()
f_is_null, f_value = group.struct.fields
if (f_is_null.name != b'isNull' or
f_value.name != b'value'):
error('nullable groups must have exactly two fields: '
'"isNull" and "value"')
if f_value.is_pointer():
error('cannot use pointer types for nullable values. '
'Pointers are already nullable.')
return name, f_is_null, f_value
@extend(group)
class group:
def check(self, m):
field = self.target
assert field.is_void()
@extend(BoolOption)
class BoolOption:
def __bool__(self):
if self == BoolOption.notset:
raise ValueError("Cannot get the truth value of a 'notset'")
return bool(int(self))
__nonzero__ = __bool__ # for Python2.7
@extend(TextType)
class TextType:
@classmethod
def parse(cls, s):
if s == 'bytes':
return cls.bytes
elif s == 'unicode':
return cls.unicode
else:
raise ValueError('Unknown TextType: %s' % s)
@Options.__extend__
class Options:
FIELDS = ('version_check', 'convert_case', 'text_type', 'include_reflection_data')
@classmethod
def from_dict(cls, d):
"""
Create an Options instance from the given dict.
Each option is expressed as either a normal bool or a string; strings
are parsed (e.g. "bytes" becomes TextType.bytes)
"""
kwargs = {}
for key, value in d.items():
if key in ('version_check', 'convert_case', 'include_reflection_data'):
kwargs[key] = value
elif key == 'text_type':
kwargs[key] = TextType.parse(value)
else:
raise ValueError("Unknown option: %s" % key)
return cls(**kwargs)
def combine(self, other):
"""
Combine the options of ``self`` and ``other``. ``other``'s options take
the precedence, if they are set.
"""
values = {}
for fname in self.FIELDS:
values[fname] = getattr(self, fname)
otherval = getattr(other, fname)
enumcls = otherval.__class__
assert issubclass(enumcls, BaseEnum), 'Only Enums supported for now'
assert hasattr(enumcls, 'notset'), 'An Option enum must have a "notset" field'
if otherval != enumcls.notset:
values[fname] = otherval
return self.__class__(**values)
|
11553108
|
import os
import sys
try:
flac_dir = sys.argv[1]
output_wav_dir = sys.argv[2]
except IndexError:
print("Need two command line parameters.")
list = os.listdir(flac_dir)
for sub_dir in list:
if sub_dir == ".DS_Store":
continue
for s_sub_dir in os.listdir(os.path.join(flac_dir, sub_dir)):
if s_sub_dir == ".DS_Store":
continue
output_dir = os.path.join(output_wav_dir, flac_dir.split("/")[-1], sub_dir, s_sub_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for file in os.listdir(os.path.join(flac_dir, sub_dir, s_sub_dir)):
if "trans.txt" in file:
os.system("cp " + os.path.join(flac_dir, sub_dir, s_sub_dir, file) + " " + os.path.join(output_dir, file))
else:
os.system("ffmpeg -i " + os.path.join(flac_dir, sub_dir, s_sub_dir, file) + " " + os.path.join(output_dir, file.split(".")[0] + ".wav"))
|
11553148
|
from . import openmath as om
from lxml.etree import QName
openmath_ns = "http://www.openmath.org/OpenMath"
omtags = {
"OMOBJ": om.OMObject,
"OMR": om.OMReference,
"OMI": om.OMInteger,
"OMF": om.OMFloat,
"OMSTR": om.OMString,
"OMB": om.OMBytes,
"OMS": om.OMSymbol,
"OMV": om.OMVariable,
"OMFOREIGN": om.OMForeign,
"OMA": om.OMApplication,
"OMATTR": om.OMAttribution,
"OMATP": om.OMAttributionPairs,
"OMBIND": om.OMBinding,
"OMBVAR": om.OMBindVariables,
"OME": om.OMError
}
inv_omtags = dict((v,k) for k,v in omtags.items())
def tag_to_object(tag, check_ns=False):
q = QName(tag)
if check_ns and q.namespace != openmath_ns:
raise ValueError('Invalid namespace')
return omtags[q.localname]
def object_to_tag(obj, ns=True):
tpl = '{%(ns)s}%(tag)s' if ns else '%(tag)s'
# FR: I changed this to allow for other classes that extend an OMXXX class.
# tag = inv_omtags[obj.__class__]
for t,c in omtags.items():
if isinstance(obj, c):
tag = t
return tpl % { "ns": openmath_ns, "tag": tag }
|
11553171
|
from bilibiliuploader.bilibiliuploader import BilibiliUploader
from bilibiliuploader.core import VideoPart
import datetime
import os
import glob
import json
import time
from moviepy.editor import *
# change MD5 value
def fileAppend(filename):
myfile = open(filename,'a')
myfile.write("###&&&&#&&&&########&&&&&")
myfile.close
# add the begin and end to the video
def fileadd_begin_end(video_path, v_name, dir):
if(v_name[-3:] == 'addedflag'):
return video_path, v_name
L = []
begin = VideoFileClip('begin.mp4')
L.append(begin)
video = VideoFileClip(video_path)
L.append(video)
end = VideoFileClip('ending.mp4')
L.append(end)
final_clip = concatenate_videoclips(L)
# final_clip.to_videofile(dir + '/' + v_name + 'addedflag.mp4', remove_temp=False)
final_clip.write_videofile(dir + '/' + v_name + 'addedflag.mp4', audio=True, audio_codec='aac')
os.remove(video_path)
os.rename(dir + '/' + v_name + '.info.json',dir + '/' + v_name + 'addedflag.info.json')
## since there are some covers' tail is not jpg
if(os.path.exists(dir + '/' + v_name + '.jpg')):
os.rename(dir + '/' + v_name + '.jpg',dir + '/' + v_name + 'addedflag.jpg')
v_cover = dir + '/' + v_name + 'addedflag.jpg'
else:
v_cover = ''
return dir + '/' + v_name + 'addedflag.mp4', v_name + 'addedflag', v_cover
# upload the video
def upload_f(uploader, video_path=None, v_title=None, v_desc=None, v_cover=None,
v_tag=None, v_url=None):
for i in range(len(v_tag)):
if(len(v_tag[i]) > 19):
v_tag[i] = v_tag[i][:19]
if(len(v_tag) < 1):
v_tag = ['studing']
print(v_tag)
if(len(v_title) >=80):
v_title = v_title[:80]
# processing video file
parts = []
parts.append(VideoPart(
path=video_path,
title=v_title,
desc=v_title
))
'''
parts.append(VideoPart(
path="C:/Users/xxx/Videos/2.mp4",
title="",
desc=""
))
'''
# upload
# copyright =2 move, =1 selfmade
# tid = category
avid, bvid = uploader.upload(
parts=parts,
copyright=2,
title=v_title,
tid=208,
tag=",".join(v_tag),
desc=v_title,
source=v_url,
cover=v_cover,
thread_pool_workers=1,
)
# tmp = [video_path, v_title, v_desc, ",".join(v_tag), v_url]
# print(tmp)
if __name__ == '__main__':
uploader = BilibiliUploader()
# login
user_id = ''
user_passwd = ''
uploader.login(user_id, user_passwd)
# uploader = None
record_file = 'up_history.json'
record_id_list = []
record_title_list = []
record_js = []
if(os.path.exists(record_file)):
record_js = json.load(open(record_file))
for item in record_js:
record_id_list.append(item['id'])
record_title_list.append(item['title'])
date = datetime.datetime.now()
date = date.strftime('%Y%m%d')
print('date---------'+date)
dir = 'video_file/file_' + date
video_list = os.listdir(dir)
for v in video_list:
v_type = v[-4:]
v_name = v[:-4]
# print(v_name)
# print(v_type)
if(v_type != '.mp4'):
continue
meta_file = v_name + '.info.json'
with open(dir+'/'+meta_file,'r',encoding='utf8')as js:
meta_info = json.load(js)
v_id = meta_info['id']
v_title = meta_info['title']
v_url = meta_info['uploader_url']
v_tag = meta_info['tags'][:3]
v_desc = v_title
video_path = dir + '/'+v
v_cover = dir + '/' + v_name + '.jpg'
# fileAppend(video_path)
if(v_id in record_id_list):
continue
record_id_list.append(v_id)
record_title_list.append(v_title)
record_js.append({'id':v_id, 'title':v_title, 'url':v_url,
'time':datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
print('uploading ' + v)
fileAppend(video_path)
# video_path, v_name, v_cover = fileadd_begin_end(video_path, v_name, dir)
upload_f(uploader=uploader, video_path=video_path, v_title=v_title,
v_desc=v_desc, v_cover=v_cover, v_tag=v_tag, v_url=v_url)
with open(record_file,"w") as dump_f:
json.dump(record_js,dump_f)
dump_f.close()
# seconds
time.sleep(30)
# print(record_js)
|
11553190
|
from collections import defaultdict
import numpy as np
import pandas as pd
from hypothesis import given
from hypothesis.extra.numpy import arrays as h_arrays
from hypothesis.strategies import characters as h_char
from hypothesis.strategies import floats as h_float
from iglovikov_helper_functions.utils.tabular_utils import (
CyclicEncoder,
GeneralEncoder,
LabelEncoderUnseen,
MinMaxScaler,
)
MIN_VALUE = -11
MAX_VALUE = 17
ARRAY_SHAPE = 3
@given(x=h_arrays(dtype=float, shape=ARRAY_SHAPE, elements=h_float(-MIN_VALUE, MAX_VALUE)))
def test_cyclic_day_hours(x):
amplitude = MAX_VALUE - MIN_VALUE
encoder = CyclicEncoder(amplitude)
transformed = encoder.fit_transform(x)
assert transformed.shape == (len(x), 2)
encoder2 = CyclicEncoder(amplitude)
encoder2.fit(x)
transformed2 = encoder2.transform(x)
assert transformed.shape == (len(x), 2)
assert np.array_equal(transformed, transformed2)
reverse_transformed = encoder.inverse_transform(transformed)
assert x.shape == reverse_transformed.shape
assert np.allclose(x, reverse_transformed)
@given(x=h_arrays(dtype=float, shape=ARRAY_SHAPE, elements=h_float(-MIN_VALUE, MAX_VALUE)))
def test_min_max_scaler(x):
feature_range = (-1, 1)
encoder = MinMaxScaler(feature_range)
transformed = encoder.fit_transform(x)
assert transformed.shape == x.shape
encoder2 = MinMaxScaler(feature_range)
encoder2.fit(x)
transformed2 = encoder2.transform(x.T)
assert transformed2.shape == x.T.shape
assert np.array_equal(transformed, transformed2.T)
reverse_transformed = encoder.inverse_transform(transformed)
assert x.shape == reverse_transformed.shape
assert np.allclose(x, reverse_transformed)
@given(
x=h_arrays(
dtype="object", shape=ARRAY_SHAPE, elements=h_char(whitelist_categories=["Lu", "Ll", "Lt", "Lm", "Lo", "Nl"])
)
)
def test_label_encoder_unseen(x):
e = LabelEncoderUnseen()
e.fit(x)
transformed_1 = e.transform(x)
transformed = e.fit_transform(x)
assert np.all(transformed == transformed_1), f"{transformed} {transformed_1}"
assert np.all(x == e.inverse_transform(transformed))
transformed_2 = e.transform(list(x) + ["qwe"])
assert np.all(transformed_2 == list(transformed) + list(e.transform([np.nan])))
@given(numerical=h_arrays(dtype=float, shape=(ARRAY_SHAPE, 2), elements=h_float(-MIN_VALUE, MAX_VALUE)))
def test_encoder_numerical(numerical):
columns_map = defaultdict(list)
result = {}
category_type = "numerical"
joined = {}
for i in range(numerical.shape[1]):
column_name = f"{category_type} {i}"
result[column_name] = numerical[:, i]
columns_map[category_type] += [column_name]
joined[column_name] = [column_name + "1", column_name + "2"]
result[column_name + "1"] = numerical[:, i] / 2
result[column_name + "2"] = numerical[:, i] + 10
columns_map["joined_encoders"] = joined
df = pd.DataFrame(result)
encoder = GeneralEncoder(columns_map)
transformed = encoder.fit_transform(df)
assert set(transformed.keys()) == {category_type}
assert set(columns_map.keys()) == set(transformed.keys())
inverse_transform = encoder.inverse_transform(transformed)
assert list(df.columns) == list(inverse_transform.columns)
for i in range(numerical.shape[1]):
column_name = f"{category_type} {i}"
assert encoder.encoders[column_name + "1"] == encoder.encoders[column_name]
assert encoder.encoders[column_name + "2"] == encoder.encoders[column_name]
for column_name in df.columns:
assert np.allclose(df[column_name].values, inverse_transform[column_name].values)
assert np.allclose(df.values, inverse_transform.values), f"{df.values - inverse_transform.values}"
@given(cyclical=h_arrays(dtype=float, shape=(ARRAY_SHAPE, 5), elements=h_float(-MIN_VALUE, MAX_VALUE)))
def test_encoder_cyclical(cyclical):
columns_map = defaultdict(list)
result = {}
category_type = "cyclical"
for i in range(cyclical.shape[1]):
column_name = f"{category_type} {i}"
result[column_name] = cyclical[:, i]
element = (column_name, MAX_VALUE - MIN_VALUE)
columns_map[category_type] += [element]
df = pd.DataFrame(result)
encoder = GeneralEncoder(columns_map)
transformed = encoder.fit_transform(df)
assert set(transformed.keys()) == {category_type}
assert set(columns_map.keys()) == set(transformed.keys())
inverse_transform = encoder.inverse_transform(transformed)
assert inverse_transform.shape == df.shape
assert np.all(inverse_transform.columns == df.columns)
assert np.all(df.dtypes == inverse_transform.dtypes)
assert np.allclose(df.values, inverse_transform.values)
@given(
categorical=h_arrays(
dtype="object",
shape=(ARRAY_SHAPE, 7),
elements=h_char(whitelist_categories=["Lu", "Ll", "Lt", "Lm", "Lo", "Nl"]),
)
)
def test_encoder_categorical(categorical):
columns_map = defaultdict(list)
result = {}
category_type = "categorical"
joined_encoders = {}
for i in range(categorical.shape[1]):
column_name = f"{category_type} {i}"
result[column_name] = categorical[:, i]
element = column_name
columns_map[category_type] += [element]
joined_encoders[column_name] = [column_name + "1", column_name + "2"]
result[column_name + "1"] = np.random.permutation(categorical[:, i])
result[column_name + "2"] = np.random.permutation(
np.concatenate([categorical[:5, i], ["Vladimir" + x for x in categorical[5:i]]])
)
columns_map["joined_encoders"] = joined_encoders
df = pd.DataFrame(result)
encoder = GeneralEncoder(columns_map)
transformed = encoder.fit_transform(df)
# We add "unknow category" => number of categories in encoder should be +1 to the ones in df
for column in columns_map["categorical"]:
assert df[column].nunique() + 1 == len(encoder.encoders[column].set_classes)
# We know that the number of the unique categories shoudl be equal to max + 1
for column in transformed["categorical"]:
assert len(set(column)) <= max(column) + 1, f"{len(set(column))} {max(column)}"
assert set(transformed.keys()) == {category_type}
assert set(transformed.keys()).intersection(columns_map.keys()) == set(transformed.keys())
inverse_transform = encoder.inverse_transform(transformed)
assert df.equals(inverse_transform)
@given(
numerical=h_arrays(dtype=float, shape=(ARRAY_SHAPE, 5), elements=h_float(-MIN_VALUE, MAX_VALUE)),
cyclical=h_arrays(dtype=float, shape=(ARRAY_SHAPE, 2), elements=h_float(-MIN_VALUE, MAX_VALUE)),
categorical=h_arrays(
dtype="object",
shape=(ARRAY_SHAPE, 3),
elements=h_char(whitelist_categories=["Lu", "Ll", "Lt", "Lm", "Lo", "Nl"]),
),
)
def test_encoder(numerical, cyclical, categorical):
columns_map = defaultdict(list)
result = {}
joined_encoders = {}
for feature, category_type in [(numerical, "numerical"), (cyclical, "cyclical"), (categorical, "categorical")]:
for i in range(feature.shape[1]):
column_name = f"{category_type} {i}"
result[column_name] = feature[:, i]
if category_type == "numerical":
joined_encoders[column_name] = [column_name + "1", column_name + "2"]
result[column_name + "1"] = feature[:, i] / 2
result[column_name + "2"] = feature[:, i] + 10
element = column_name
elif category_type == "categorical":
joined_encoders[column_name] = [column_name + "1", column_name + "2"]
result[column_name + "1"] = np.random.permutation(categorical[:, i])
result[column_name + "2"] = np.random.permutation(
np.concatenate([categorical[:5, i], ["Vladimir" + x for x in categorical[5:i]]])
)
element = column_name
else:
element = (column_name, MAX_VALUE - MIN_VALUE)
columns_map[category_type] += [element]
columns_map["joined_encoders"] = joined_encoders
df = pd.DataFrame(result)
encoder = GeneralEncoder(columns_map)
transformed = encoder.fit_transform(df)
# We add "unknow category" => number of categories in encoder should be +1 to the ones in df
for column in columns_map["categorical"]:
assert df[column].nunique() + 1 == len(encoder.encoders[column].set_classes)
for category_type in encoder.category_types:
for encoded in transformed[category_type]:
if category_type == "cyclical":
assert (df.shape[0], 2) == encoded.shape, f"{category_type} {encoded.shape}"
else:
assert (df.shape[0],) == encoded.shape, f"{category_type} {encoded.shape} {df.values.shape}"
assert set(columns_map.keys()) == set(transformed.keys()), f"{transformed.keys()} {columns_map.keys()}"
inverse_transform = encoder.inverse_transform(transformed)
assert inverse_transform.shape == df.shape
assert np.all(inverse_transform.columns == df.columns)
assert np.all(df.dtypes == inverse_transform.dtypes)
@given(
numerical=h_arrays(dtype=float, shape=(ARRAY_SHAPE, 5), elements=h_float(-MIN_VALUE, MAX_VALUE)),
cyclical=h_arrays(dtype=float, shape=(ARRAY_SHAPE, 2), elements=h_float(-MIN_VALUE, MAX_VALUE)),
categorical=h_arrays(
dtype="object",
shape=(ARRAY_SHAPE, 3),
elements=h_char(whitelist_categories=["Lu", "Ll", "Lt", "Lm", "Lo", "Nl"]),
),
)
def test_encoder_on_nonfull(numerical, cyclical, categorical):
"""The test checks that if the dataframe has more columns than columns map - extra columns are ignored."""
columns_map = defaultdict(list)
result = {}
for feature, category_type in [(numerical, "numerical"), (cyclical, "cyclical"), (categorical, "categorical")]:
for i in range(feature.shape[1]):
column_name = f"{category_type} {i}"
result[column_name] = feature[:, i]
if category_type == "numerical":
result[column_name] = feature[:, i]
element = column_name
elif category_type == "categorical":
result[column_name] = feature[:, i]
element = column_name
else:
element = (column_name, MAX_VALUE - MIN_VALUE)
columns_map[category_type] += [element]
for category_type in columns_map:
columns_map[category_type].pop()
df = pd.DataFrame(result)
encoder = GeneralEncoder(columns_map)
transformed = encoder.fit_transform(df)
# We add "unknow category" => number of categories in encoder should be +1 to the ones in df
for column in columns_map["categorical"]:
assert df[column].nunique() + 1 == len(encoder.encoders[column].set_classes)
for category_type in encoder.category_types:
for encoded in transformed[category_type]:
if category_type == "cyclical":
assert (df.shape[0], 2) == encoded.shape, f"{category_type} {encoded.shape}"
else:
assert (df.shape[0],) == encoded.shape, f"{category_type} {encoded.shape} {df.values.shape}"
assert set(columns_map.keys()) == set(transformed.keys()), f"{transformed.keys()} {columns_map.keys()}"
inverse_transform = encoder.inverse_transform(transformed)
assert len(df.columns) == len(inverse_transform.columns) + len(columns_map)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.