id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1681990
|
from functools import reduce
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import F, Sum
from . import exceptions
class QuotaLimitField(models.IntegerField):
""" Django virtual model field.
Could be used to manage quotas transparently as model fields.
"""
concrete = False
def __init__(self, quota_field=None, *args, **kwargs):
super(QuotaLimitField, self).__init__(*args, **kwargs)
self._quota_field = quota_field
def db_type(self, connection):
# virtual field -- ignore in migrations
return None
def contribute_to_class(self, cls, name):
self.model = cls
self.name = self.attname = name
# setting column as none will tell django to not consider this a concrete field
self.column = None
# connect myself as the descriptor for this field
setattr(cls, name, property(self._get_func(), self._set_func()))
cls._meta.add_field(self, private=True)
def deconstruct(self, *args, **kwargs):
name, path, args, kwargs = super(QuotaField, self).deconstruct(*args, **kwargs)
return (name, path, args, {'default': kwargs.get('default'), 'to': None})
def _get_func(self):
# retrieve quota limit from related object
def func(instance, quota_field=self._quota_field):
if instance is None:
raise AttributeError("Can only be accessed via instance")
try:
return instance.quotas.get(name=quota_field).limit
except instance.quotas.model.DoesNotExist:
return quota_field.default_limit
return func
def _set_func(self):
# store quota limit as related object
def func(instance, value, quota_field=self._quota_field):
# a hook to properly init quota after object saved to DB
quota_field.scope_default_limit(instance, value)
instance.set_quota_limit(quota_field, value)
return func
class FieldsContainerMeta(type):
""" Initiates quota fields names.
Quotas fields should be located in class with FieldsContainerMeta metaclass.
Example:
example_quota = QuotaField() # this quota field will have name 'example_quota'
"""
def __new__(self, name, bases, attrs):
for key in attrs:
if isinstance(attrs[key], QuotaField):
attrs[key].name = key
return type.__new__(self, name, bases, attrs)
class QuotaField:
""" Base quota field.
Links quota to its scope right after its creation.
Allows to define:
- default_limit
- default_usage
- is_backend - is quota represents backend limitation. It is impossible to modify backend quotas.
- creation_condition - function that receive quota scope and return True if quota should be created
for given scope. Quota will be created automatically if creation_condition is None.
Default limit and usage can be defined as callable function.
Example:
quota_name = QuotaField(default_limit=lambda scope: scope.attr)
"""
def __init__(
self,
default_limit=-1,
default_usage=0,
is_backend=False,
creation_condition=None,
):
self.default_limit = default_limit
self.default_usage = default_usage
self.is_backend = is_backend
self.creation_condition = creation_condition
def is_connected_to_scope(self, scope):
if self.creation_condition is None:
return True
return self.creation_condition(scope)
def scope_default_limit(self, scope, value=None):
attr_name = '_default_quota_limit_%s' % self.name
if value is not None:
setattr(scope, attr_name, value)
try:
return getattr(scope, attr_name)
except AttributeError:
return (
self.default_limit(scope)
if callable(self.default_limit)
else self.default_limit
)
def get_or_create_quota(self, scope):
if not self.is_connected_to_scope(scope):
raise exceptions.CreationConditionFailedQuotaError(
'Wrong scope: Cannot create quota "%s" for scope "%s".'
% (self.name, scope)
)
defaults = {
'limit': self.scope_default_limit(scope),
'usage': self.default_usage(scope)
if callable(self.default_usage)
else self.default_usage,
}
return scope.quotas.get_or_create(name=self.name, defaults=defaults)
def get_aggregator_quotas(self, quota):
""" Fetch ancestors quotas that have the same name and are registered as aggregator quotas. """
ancestors = quota.scope.get_quota_ancestors()
aggregator_quotas = []
for ancestor in ancestors:
for ancestor_quota_field in ancestor.get_quotas_fields(
field_class=AggregatorQuotaField
):
if ancestor_quota_field.get_child_quota_name() == quota.name:
aggregator_quotas.append(
ancestor.quotas.get(name=ancestor_quota_field)
)
return aggregator_quotas
def __str__(self):
return self.name
def recalculate(self, scope):
if not self.is_connected_to_scope(scope):
return
self.recalculate_usage(scope)
def recalculate_usage(self, scope):
pass
class CounterQuotaField(QuotaField):
""" Provides limitation on target models instances count.
Automatically increases/decreases usage on target instances creation/deletion.
By default usage is increased by 1. You may tweak this delta by defining get_delta function,
which accepts target instance and returns number.
Example:
# This quota will increase/decrease own values on any resource creation/deletion
nc_resource_count = CounterQuotaField(
target_models=lambda: Resource.get_all_models(), # list or function that return list of target models
path_to_scope='project', # path from target model to scope
)
It is possible to define trickier calculation by passing `get_current_usage` function as parameter.
Function should accept two parameters:
- models - list of target models
- scope - quota scope
And return count of current usage.
"""
def __init__(
self,
target_models,
path_to_scope,
get_current_usage=None,
get_delta=None,
**kwargs
):
self._raw_target_models = target_models
self._raw_get_current_usage = get_current_usage
self._raw_get_delta = get_delta
self.path_to_scope = path_to_scope
super(CounterQuotaField, self).__init__(**kwargs)
def get_delta(self, target_instance):
if not self._raw_get_delta:
return 1
return self._raw_get_delta(target_instance)
def get_current_usage(self, models, scope):
if self._raw_get_current_usage is not None:
return self._raw_get_current_usage(models, scope)
else:
filter_path_to_scope = self.path_to_scope.replace('.', '__')
return sum(
[
m.objects.filter(**{filter_path_to_scope: scope}).count()
for m in models
]
)
@property
def target_models(self):
if not hasattr(self, '_target_models'):
self._target_models = (
self._raw_target_models()
if callable(self._raw_target_models)
else self._raw_target_models
)
return self._target_models
def recalculate_usage(self, scope):
current_usage = self.get_current_usage(self.target_models, scope)
scope.set_quota_usage(self.name, current_usage)
def add_usage(self, target_instance, delta):
try:
scope = self._get_scope(target_instance)
except ObjectDoesNotExist:
# ignore as scope has been deleted
return
delta *= self.get_delta(target_instance)
if self.is_connected_to_scope(scope):
scope.add_quota_usage(self.name, delta, validate=True)
def _get_scope(self, target_instance):
return reduce(getattr, self.path_to_scope.split('.'), target_instance)
class TotalQuotaField(CounterQuotaField):
"""
This field aggregates sum of value for the same field of children objects.
For example, it allows to compute total volume size for the project.
class Quotas(quotas_models.QuotaModelMixin.Quotas):
nc_volume_size = quotas_fields.TotalQuotaField(
target_models=lambda: Volume.get_all_models(),
path_to_scope='project',
target_field='size',
)
"""
def __init__(self, target_models, path_to_scope, target_field):
self.target_field = target_field
super(TotalQuotaField, self).__init__(target_models, path_to_scope)
def get_current_usage(self, models, scope):
total_usage = 0
filter_path_to_scope = self.path_to_scope.replace('.', '__')
query = {filter_path_to_scope: scope}
for model in models:
resources = model.objects.filter(**query)
subtotal = resources.values(self.target_field).aggregate(
total_usage=Sum(self.target_field)
)['total_usage']
if subtotal:
total_usage += subtotal
return total_usage
def get_delta(self, target_instance):
return getattr(target_instance, self.target_field)
class AggregatorQuotaField(QuotaField):
""" Aggregates sum of quota scope children with the same name.
Automatically increases/decreases usage if corresponding child quota <aggregation_field> changed.
Example:
# This quota will store sum of all customer projects resources
nc_resource_count = quotas_fields.UsageAggregatorQuotaField(
get_children=lambda customer: customer.projects.all(),
)
"""
aggregation_field = NotImplemented
def __init__(self, get_children, child_quota_name=None, **kwargs):
self.get_children = get_children
self._child_quota_name = child_quota_name
super(AggregatorQuotaField, self).__init__(**kwargs)
def get_child_quota_name(self):
return (
self._child_quota_name if self._child_quota_name is not None else self.name
)
def recalculate_usage(self, scope):
children = self.get_children(scope)
current_usage = 0
for child in children:
child_quota = child.quotas.get(name=self.get_child_quota_name())
current_usage += getattr(child_quota, self.aggregation_field)
scope.set_quota_usage(self.name, current_usage)
def post_child_quota_save(self, scope, child_quota, created=False):
current_value = getattr(child_quota, self.aggregation_field)
if created:
diff = current_value
else:
diff = current_value - child_quota.tracker.previous(self.aggregation_field)
if diff:
scope.quotas.filter(name=self.name).update(usage=F('usage') + diff)
def pre_child_quota_delete(self, scope, child_quota):
diff = getattr(child_quota, self.aggregation_field)
if diff:
scope.quotas.filter(name=self.name).update(usage=F('usage') - diff)
class UsageAggregatorQuotaField(AggregatorQuotaField):
""" Aggregates sum children quotas usages.
Note! It is impossible to aggregate usage of another usage aggregator quotas.
This restriction was added to avoid calls duplications on quota usage field update.
"""
aggregation_field = 'usage'
class LimitAggregatorQuotaField(AggregatorQuotaField):
""" Aggregates sum children quotas limits. """
aggregation_field = 'limit'
# TODO: Implement GlobalQuotaField and GlobalCounterQuotaField
|
1682003
|
import sqlite3
conn = sqlite3.connect('spider.sqlite')
cur = conn.cursor()
cur.execute('SELECT * FROM Twitter')
count = 0
for row in cur :
print row
count = count + 1
print count, 'rows.'
cur.close()
|
1682012
|
from __future__ import division, print_function
from climlab.process.energy_budget import EnergyBudget
from climlab import constants as const
class SimpleAbsorbedShortwave(EnergyBudget):
'''A class for the shortwave radiation process in a one-layer EBM.
The basic assumption is that all the shortwave absorption occurs at the surface.
Computes the diagnostic ``ASR`` (absorbed shortwave radiation)
from the formula ``self.ASR = (1-self.albedo) * self.insolation``
and applies this as a tendency on the surface temperature ``self.Ts``
``albedo`` and ``insolation`` are given as inputs.
These should either be scalars
or have same dimensions as state variable ``Ts``
User can supply constants, or link to diagnostics of specific insolation
and albedo processes.
'''
def __init__(self,
insolation=const.S0/4,
albedo=0.3,
**kwargs):
super(SimpleAbsorbedShortwave, self).__init__(**kwargs)
self.add_input('albedo', albedo)
self.add_input('insolation', insolation)
self.add_diagnostic('ASR', 0.*self.Ts)
self.topdown = False # call subprocess compute methods first
def _compute_heating_rates(self):
self.ASR = (1-self.albedo) * self.insolation
self.heating_rate['Ts'] = self.ASR
|
1682043
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class HighscoresDialog(QDialog):
def __init__(self, scorelist, parent=None):
super(HighscoresDialog, self).__init__(parent)
self.setWindowTitle('High Scores')
frame = QFrame(self)
frame.setFrameStyle(QFrame.Panel | QFrame.Sunken)
grid = QGridLayout()
for i in range(10):
name, score = ('', '') if i >= len(scorelist) else scorelist[i]
place_label = QLabel('%3s.' % (i + 1))
name_label = QLabel('%-50s' % name)
score_label = QLabel('%7s' % score)
score_label.setAlignment(Qt.AlignRight)
grid.addWidget(place_label, i, 0)
grid.addWidget(name_label, i, 1)
grid.addWidget(score_label, i, 2)
frame.setLayout(grid)
#
# Dialog layout
#
okbutton = QPushButton('&OK')
self.connect(okbutton, SIGNAL('clicked()'), self, SLOT('accept()'))
bbox = QHBoxLayout()
bbox.addStretch()
bbox.addWidget(okbutton)
bbox.addStretch()
layout = QVBoxLayout()
layout.addWidget(frame)
layout.addLayout(bbox)
self.setLayout(layout)
if __name__ == "__main__":
import sys
import highscores
FILENAME = 'pyqtris_highscores'
ds = highscores.HighScores(10)
ds.load_from_file(FILENAME)
app = QApplication(sys.argv)
dialog = HighscoresDialog(ds.get_list())
dialog.exec_()
|
1682069
|
import math
import os
from typing import Tuple, List, Dict
import torch
import sys
import json
import h5py
import numpy as np
import time
def cur_time():
return time.strftime('%Y,%b,%d,%X')
def log_important(message, log_file):
print(message, cur_time())
with open(log_file, 'a') as f:
print(message, cur_time(), file=f)
def extract_deps_from_weights_file(file_path):
weight_dic = read_hdf5(file_path)
if 'deps' in weight_dic:
return weight_dic['deps']
else:
return None
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def read_hdf5(file_path):
result = {}
with h5py.File(file_path, 'r') as f:
for k in f.keys():
value = np.asarray(f[k])
if representsInt(k):
result[int(k)] = value
else:
result[str(k).replace('+','/')] = value
print('read {} arrays from {}'.format(len(result), file_path))
f.close()
return result
def save_hdf5(numpy_dict, file_path):
with h5py.File(file_path, 'w') as f:
for k,v in numpy_dict.items():
f.create_dataset(str(k).replace('/','+'), data=v)
print('saved {} arrays to {}'.format(len(numpy_dict), file_path))
f.close()
def start_exp():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--try_arg", type=str, default='')
args = parser.parse_args()
try_arg = args.try_arg
print('the try_arg is ', try_arg)
print('we have {} torch devices'.format(torch.cuda.device_count()),
'the allocated GPU memory is {}'.format(torch.cuda.memory_allocated()))
return try_arg
def torch_accuracy(output, target, topk=(1,)) -> List[torch.Tensor]:
'''
param output, target: should be torch Variable
'''
# assert isinstance(output, torch.cuda.Tensor), 'expecting Torch Tensor'
# assert isinstance(target, torch.Tensor), 'expecting Torch Tensor'
# print(type(output))
topn = max(topk)
batch_size = output.size(0)
_, pred = output.topk(topn, 1, True, True)
pred = pred.t()
is_correct = pred.eq(target.view(1, -1).expand_as(pred))
ans = []
for i in topk:
is_correct_i = is_correct[:i].view(-1).float().sum(0, keepdim=True)
ans.append(is_correct_i.mul_(100.0 / batch_size))
return ans
class AvgMeter(object):
'''
Computing mean
'''
name = 'No name'
def __init__(self, name='No name', fmt = ':.2f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.sum = 0
self.mean = 0
self.num = 0
self.now = 0
def update(self, mean_var, count=1):
if math.isnan(mean_var):
mean_var = 1e6
print('Avgmeter getting Nan!')
self.now = mean_var
self.num += count
self.sum += mean_var * count
self.mean = float(self.sum) / self.num
def __str__(self):
print_str = self.name + '-{' + self.fmt + '}'
return print_str.format(self.mean)
def save_args(args, save_dir = None):
if save_dir == None:
param_path = os.path.join(args.resume, "params.json")
else:
param_path = os.path.join(save_dir, 'params.json')
#logger.info("[*] MODEL dir: %s" % args.resume)
#logger.info("[*] PARAM path: %s" % param_path)
with open(param_path, 'w') as fp:
json.dump(args.__dict__, fp, indent=4, sort_keys=True)
def mkdir(path):
if not os.path.exists(path):
print('creating dir {}'.format(path))
os.mkdir(path)
# def save_checkpoint(cur_iters, net, optimizer, lr_scheduler, file_name):
# checkpoint = {'cur_iters': cur_iters,
# 'state_dict': net.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# 'lr_scheduler_state_dict':lr_scheduler.state_dict()}
# if os.path.exists(file_name):
# print('Overwriting {}'.format(file_name))
# torch.save(checkpoint, file_name)
# link_name = os.path.join('/', *file_name.split(os.path.sep)[:-1], 'last.checkpoint')
# #print(link_name)
# make_symlink(source = file_name, link_name=link_name)
def load_checkpoint(file_name, net = None, optimizer = None, lr_scheduler = None):
if os.path.isfile(file_name):
print("=> loading checkpoint '{}'".format(file_name))
check_point = torch.load(file_name)
if net is not None:
print('Loading network state dict')
net.load_state_dict(check_point['state_dict'])
if optimizer is not None:
print('Loading optimizer state dict')
optimizer.load_state_dict(check_point['optimizer_state_dict'])
if lr_scheduler is not None:
print('Loading lr_scheduler state dict')
lr_scheduler.load_state_dict(check_point['lr_scheduler_state_dict'])
return check_point['cur_iters']
else:
print("=> no checkpoint found at '{}'".format(file_name))
def make_symlink(source, link_name):
'''
Note: overwriting enabled!
'''
if os.path.exists(link_name):
#print("Link name already exist! Removing '{}' and overwriting".format(link_name))
os.remove(link_name)
if os.path.exists(source):
os.symlink(source, link_name)
return
else:
print('Source path not exists')
#print('SymLink Wrong!')
def add_path(path):
if path not in sys.path:
print('Adding {}'.format(path))
sys.path.append(path)
def format_metric_dict_to_line(metric_dict):
msg = ''
for key, value in metric_dict.items():
msg += '{}={:.5f},'.format(key, value)
return msg
|
1682124
|
import tensorflow as tf
from tensorflow import keras
<EMAIL>
def create_ids(ind, bag_size):
ids = ind + bag_size * tf.range(ind.shape[-1], dtype=tf.int32)
return tf.reshape(ids, [ind.shape[0], -1])
<EMAIL>
def unique_with_inverse(x):
y, idx = tf.unique(x)
num_segments = tf.shape(y)[0]
num_elems = tf.shape(x)[0]
return (y, idx, tf.math.unsorted_segment_max(tf.range(num_elems),
idx, num_segments))
#<EMAIL>
def reduced_score(inp, relation_unit, training):
solo_fea, ids0, ids1 = inp
u_ids0, idx_map0, inverse0 = unique_with_inverse(ids0)
u_ids1, idx_map1, inverse1 = unique_with_inverse(ids1)
reduced_fea0 = tf.gather(solo_fea[0], u_ids0)
reduced_fea1 = tf.gather(solo_fea[1], u_ids1)
# [1, m', l', 1]
reduced_scores = relation_unit([reduced_fea0[tf.newaxis,...],
reduced_fea1[tf.newaxis,...]], training=training)
reduced_scores_shape = tf.shape(reduced_scores)
nscores = reduced_scores_shape[1]*reduced_scores_shape[2]
# [m', l', 1]
reduced_scores = tf.reshape(reduced_scores, reduced_scores_shape[1:])
# [m, l', 1]
scores_0 = tf.gather(reduced_scores, idx_map0)
# [m, l, 1]
scores = tf.gather(scores_0, idx_map1, axis=1)
return scores, nscores
<EMAIL>
def sum_kxk_patches(inp, k):
'''
inp: A tensor with shape [m, h, w, c]
k: A scalar tensor
'''
# [m, h, w, c]
inp_shape = tf.shape(inp)
hk = tf.cast(inp_shape[1]/k, dtype=tf.int32)
wk = tf.cast(inp_shape[2]/k, dtype=tf.int32)
k = tf.cast(k, dtype=tf.int32)
# Reshape to [m, h/k, k, w/k, k, c]
inp = tf.reshape(inp, [inp_shape[0], hk, k, wk, k, inp_shape[3]])
return tf.reduce_sum(inp, [2, 4])
<EMAIL>
def get_pairs(N, m, l):
# [m, l, 2]
pairs = tf.stack(tf.meshgrid(tf.range(m),
tf.range(l), indexing='ij'),
axis=-1)
# [N, m, l, 2]
pairs = tf.tile(pairs[tf.newaxis], [N, 1, 1, 1])
# [N, m*l, 2]
pairs_shape = tf.shape(pairs)
pairs = tf.reshape(pairs, [pairs_shape[0],
pairs_shape[1]*pairs_shape[2],
pairs_shape[3]])
return pairs
class PairwiseCrossSimilarity(keras.layers.Layer):
def __init__(self, relation_unit):
'''
Args:
relation_unit: a RelationModule object used to compare pairwise features
k: current level's number of co-objects
'''
super(PairwiseCrossSimilarity, self).__init__()
self._relation_unit = relation_unit
#@<EMAIL>
def call(self, inputs, training=None):
'''
Args:
inputs: a list of [orig_fea,ind0,ind1] tensors. where
orig_fea: is a tensor with shape [MBS*K, B, D] representing original features
ind0,ind1: has shape [MBS*ncobj, num_top, cobj_size] representing current subtree
k: k is the size of output cobj_size
'''
assert(isinstance(inputs,list) and len(inputs) == 4
), 'inputs must be a list of 3 tensors'
orig_fea, ind0, ind1, k = inputs
bag_size = tf.shape(orig_fea)[1]
ids0 = create_ids(ind0, bag_size)
ids1 = create_ids(ind1, bag_size)
num = tf.shape(ids0)[0]
rsolo_fea = tf.reshape(orig_fea, [num, 2, -1, tf.shape(orig_fea)[2]])
#from IPython import embed;embed()
scores = tf.TensorArray(tf.float32, size=num)
nscores = tf.TensorArray(tf.int32, size=num)
for i in tf.range(num):
ret = reduced_score((rsolo_fea[i], ids0[i], ids1[i]),
self._relation_unit, training)
scores = scores.write(i, ret[0])
nscores = nscores.write(i, ret[1])
scores = scores.stack()
nscores = nscores.stack()
#PairwiseCrossSimilarity.n_unique_scores += tf.reduce_sum(nscores)
m, l = tf.shape(ind0)[1], tf.shape(ind1)[1]
# [N, m, l, 1]
scores = sum_kxk_patches(scores, k//2)
# [N, m*l, 1]
scores = tf.reshape(scores, [num, m*l, 1])
pairs = get_pairs(num, m, l)
return scores, pairs
|
1682132
|
import sys
import matplotlib.pyplot
import pandas
import numpy
import struct
import os
data = None
with open(sys.argv[1], "rb") as f:
data = f.read()
data = struct.unpack("{}Q".format(len(data) // 8), data)
data = numpy.array(data, dtype=numpy.uint64)[1:]
data = numpy.array([x for x in data if x < 100000])
rt = numpy.cumsum(data) / 1000000
lTime = rt[-1]
lTime += 5
lScalar = ((lTime // 60) + 1)
lTime = lScalar * 60
data = 1000000 / data
highest = numpy.max(data)
vertScale = ((highest) // 300) + 1
print(vertScale)
#pd = pandas.DataFrame(data)
print(lTime)
fig = matplotlib.pyplot.figure(figsize=(lScalar*9, 2 * vertScale))
ax = matplotlib.pyplot.axes()
ax.plot(rt, data)
#ax = pd.plot()
ax.plot([-5,lTime], [30,30])
ax.plot([-5,lTime], [60,60])
ax.plot([-5,lTime], [300,300])
ax.set_xlabel("Time (Seconds)")
ax.set_ylabel("Frames Per Second")
ax.set_ylim(top=vertScale*300, bottom=-1)
ax.set_xlim(left=-5, right=lTime)
name = os.path.basename(sys.argv[1])
ax.get_figure().savefig("FullSize_{}.png".format(name))
fig = matplotlib.pyplot.figure(figsize=(lScalar*9, min((lScalar * 81 / 16), 16)))
ax = matplotlib.pyplot.axes()
ax.plot(rt, data)
ax.plot([-5,lTime], [30,30])
ax.plot([-5,lTime], [60,60])
#ax = pd.plot()
ax.set_xlabel("Time (Seconds)")
ax.set_ylabel("Frames Per Second")
ax.set_ylim(top=300, bottom=-1)
ax.set_xlim(left=-5, right=lTime)
ax.get_figure().savefig("CapSize_300_{}.png".format(name))
print((lScalar*9, min((lScalar * 81 / 16), 16)))
fig = matplotlib.pyplot.figure(figsize=(lScalar*9, min((lScalar * 81 / 16), 16)))
ax = matplotlib.pyplot.axes()
ax.plot(rt, data)
ax.plot([-5,lTime], [30,30])
ax.plot([-5,lTime], [60,60])
#ax = pd.plot()
ax.set_xlabel("Time (Seconds)")
ax.set_ylabel("Frames Per Second")
ax.set_ylim(top=150, bottom=-1)
ax.set_xlim(left=-5, right=lTime)
ax.get_figure().savefig("CapSize_150_{}.png".format(name))
print((lScalar*9, min((lScalar * 81 / 16), 16)))
fig = matplotlib.pyplot.figure(figsize=(16, 9))
ax = matplotlib.pyplot.axes()
ax.plot(rt, data)
ax.plot([-5,lTime], [30,30])
ax.plot([-5,lTime], [60,60])
#ax = pd.plot()
ax.set_xlabel("Time (Seconds)")
ax.set_ylabel("Frames Per Second")
ax.set_ylim(top=vertScale*300, bottom=-1)
ax.set_xlim(left=-5, right=lTime)
ax.get_figure().savefig("CapSize_500_{}.png".format(name))
smoothfactor = 60
cumsum = numpy.cumsum(numpy.insert(data, 0, 0))
comp = (cumsum[smoothfactor:] - cumsum[:-smoothfactor]) / float(smoothfactor)
comp = numpy.convolve(data, numpy.ones((smoothfactor,))/smoothfactor, mode='valid')
print(lScalar*9, min((lScalar * 81 / 16), max(16, (lScalar * 18 / 16))))
fig = matplotlib.pyplot.figure(figsize=(lScalar*9, min((lScalar * 81 / 16), max(16, (lScalar * 9 / 16)))))
ax = matplotlib.pyplot.axes()
ax.plot(rt[:-(smoothfactor-1)], comp)
ax.plot([-5,lTime], [30,30])
ax.plot([-5,lTime], [60,60])
#ax = pd.plot()
ax.set_xlabel("Time (Seconds)")
ax.set_ylabel("Frames Per Second")
ax.set_ylim(top=500, bottom=-1)
ax.set_xlim(left=-5, right=lTime)
ax.get_figure().savefig("Sliding_{}.png".format(name))
|
1682176
|
import pytest
from wemake_python_styleguide.violations.refactoring import (
ImplicitPrimitiveViolation,
)
from wemake_python_styleguide.visitors.ast.functions import (
UselessLambdaDefinitionVisitor,
)
@pytest.mark.parametrize('code', [
'lambda x: 0',
'lambda *x: []',
'lambda **x: ()',
'lambda x, y: 0',
'lambda: 1',
'lambda x=1: 0',
'lambda: [0]',
'lambda: [some]',
'lambda: None',
'lambda: True',
'lambda: "a"',
'lambda: b"a"',
'lambda: (1, 2)',
'lambda: name',
])
def test_correct_lambda(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that isinstance is callable with correct types."""
tree = parse_ast_tree(code)
visitor = UselessLambdaDefinitionVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
'lambda: 0',
'lambda: 0.0',
'lambda: 0j',
'lambda: b""',
'lambda: ""',
'lambda: []',
'lambda: ()',
'lambda: False',
'lambda: lambda: ""',
'lambda: {}', # noqa: P103
])
def test_wrong_lambda(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that isinstance is callable with correct types."""
tree = parse_ast_tree(code)
visitor = UselessLambdaDefinitionVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ImplicitPrimitiveViolation])
|
1682187
|
import FWCore.ParameterSet.Config as cms
tauRegionalPixelSeedGenerator = cms.EDProducer("SeedGeneratorFromRegionHitsEDProducer",
OrderedHitsFactoryPSet = cms.PSet(
ComponentName = cms.string('StandardHitPairGenerator'),
SeedingLayers = cms.InputTag('PixelLayerPairs')
),
SeedComparitorPSet = cms.PSet(
ComponentName = cms.string('none')
),
RegionFactoryPSet = cms.PSet(
ComponentName = cms.string('TauRegionalPixelSeedGenerator'),
RegionPSet = cms.PSet(
precise = cms.bool(True),
deltaPhiRegion = cms.double(0.1),
originHalfLength = cms.double(0.2),
originRadius = cms.double(0.2),
deltaEtaRegion = cms.double(0.1),
ptMin = cms.double(5.0),
JetSrc = cms.InputTag("icone5Tau1"),
originZPos = cms.double(0.0),
vertexSrc = cms.InputTag("pixelVertices"),
howToUseMeasurementTracker = cms.string("ForSiStrips"),
measurementTrackerName = cms.InputTag("MeasurementTrackerEvent"),
)
),
TTRHBuilder = cms.string('WithTrackAngle')
)
|
1682202
|
import torch
import torch.nn.functional as F
def rot90(x, k=1):
"""rotate batch of images by 90 degrees k times"""
return torch.rot90(x, k, (2, 3))
def hflip(x):
"""flip batch of images horizontally"""
return x.flip(3)
def vflip(x):
"""flip batch of images vertically"""
return x.flip(2)
def sum(x1, x2):
"""sum of two tensors"""
return x1 + x2
def add(x, value):
"""add value to tensor"""
return x + value
def max(x1, x2):
"""compare 2 tensors and take max values"""
return torch.max(x1, x2)
def min(x1, x2):
"""compare 2 tensors and take min values"""
return torch.min(x1, x2)
def multiply(x, factor):
"""multiply tensor by factor"""
return x * factor
def scale(x, scale_factor, interpolation="nearest", align_corners=None):
"""scale batch of images by `scale_factor` with given interpolation mode"""
h, w = x.shape[2:]
new_h = int(h * scale_factor)
new_w = int(w * scale_factor)
return F.interpolate(
x, size=(new_h, new_w), mode=interpolation, align_corners=align_corners
)
def resize(x, size, interpolation="nearest", align_corners=None):
"""resize batch of images to given spatial size with given interpolation mode"""
return F.interpolate(x, size=size, mode=interpolation, align_corners=align_corners)
def crop(x, x_min=None, x_max=None, y_min=None, y_max=None):
"""perform crop on batch of images"""
return x[:, :, y_min:y_max, x_min:x_max]
def crop_lt(x, crop_h, crop_w):
"""crop left top corner"""
return x[:, :, 0:crop_h, 0:crop_w]
def crop_lb(x, crop_h, crop_w):
"""crop left bottom corner"""
return x[:, :, -crop_h:, 0:crop_w]
def crop_rt(x, crop_h, crop_w):
"""crop right top corner"""
return x[:, :, 0:crop_h, -crop_w:]
def crop_rb(x, crop_h, crop_w):
"""crop right bottom corner"""
return x[:, :, -crop_h:, -crop_w:]
def center_crop(x, crop_h, crop_w):
"""make center crop"""
center_h = x.shape[2] // 2
center_w = x.shape[3] // 2
half_crop_h = crop_h // 2
half_crop_w = crop_w // 2
y_min = center_h - half_crop_h
y_max = center_h + half_crop_h + crop_h % 2
x_min = center_w - half_crop_w
x_max = center_w + half_crop_w + crop_w % 2
return x[:, :, y_min:y_max, x_min:x_max]
def _disassemble_keypoints(keypoints):
x = keypoints[..., 0]
y = keypoints[..., 1]
return x, y
def _assemble_keypoints(x, y):
return torch.stack([x, y], dim=-1)
def keypoints_hflip(keypoints):
x, y = _disassemble_keypoints(keypoints)
return _assemble_keypoints(1. - x, y)
def keypoints_vflip(keypoints):
x, y = _disassemble_keypoints(keypoints)
return _assemble_keypoints(x, 1. - y)
def keypoints_rot90(keypoints, k=1):
if k not in {0, 1, 2, 3}:
raise ValueError("Parameter k must be in [0:3]")
if k == 0:
return keypoints
x, y = _disassemble_keypoints(keypoints)
if k == 1:
xy = [y, 1. - x]
elif k == 2:
xy = [1. - x, 1. - y]
elif k == 3:
xy = [1. - y, x]
return _assemble_keypoints(*xy)
|
1682223
|
load("@rules_pkg//:pkg.bzl", "pkg_zip")
def copy_file(name, src, out):
native.genrule(
name = name,
srcs = [src],
outs = [out],
cmd = "cp $< $@"
)
def pkg_asset(name, srcs = [], **kwargs):
"""Package MediaPipe assets
This task renames asset files so that they can be added to an AssetBundle (e.g. x.tflte -> x.bytes) and zip them.
Args:
name: the name of the output zip file
srcs: files to be packaged
"""
rename_target = "normalize_%s_exts" % name
_normalize_exts(name = rename_target, srcs = srcs)
pkg_zip(
name = name,
srcs = [":" + rename_target],
**kwargs,
)
def _normalize_exts_impl(ctx):
output_files = []
for src in ctx.files.srcs:
ext = "bytes" if src.extension in ctx.attr.bytes_exts else ("txt" if src.extension in ctx.attr.txt_exts else src.extension)
if ext == src.extension:
output_files.append(src)
else:
dest = ctx.actions.declare_file(src.path[:-1 * len(src.extension)] + ext)
ctx.actions.run_shell(
inputs = [src],
outputs = [dest],
arguments = [src.path, dest.path],
command = "test $1 != $2 && cp $1 $2",
progress_message = "Copying {} to {}...".format(src.path, dest.path),
)
output_files.append(dest)
return [
DefaultInfo(files = depset(output_files)),
]
_normalize_exts = rule(
implementation = _normalize_exts_impl,
attrs = {
"srcs": attr.label_list(allow_files = True),
"bytes_exts": attr.string_list(default = ["binarypb", "jpg", "png", "tflite", "uuu"]),
"txt_exts": attr.string_list(default = ["pbtxt"]),
},
)
|
1682228
|
from buffalo import utils
from playerConsole import PlayerConsole
from playerConsole import EventText
from playerConsole import TextWrapper
import pygame
utils.init()
PlayerConsole.init()
ipsum = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum lobortis ac enim vel feugiat. Duis ac metus id est lobortis euismod. Vestibulum finibus est eget odio rhoncus consequat. Vestibulum sed lectus justo. Aenean fringilla mi et ultricies condimentum. Donec sapien quam, congue vitae felis at, bibendum tincidunt purus. Nulla in nunc consequat, laoreet nibh non, pulvinar eros. Quisque at justo mauris."
def test_event_text():
e = EventText("test",(1,2,3,4))
assert e.text == 'test'
assert e.color == (1,2,3,4)
def test_text_wrapper():
myfont = pygame.font.SysFont("monospace", 15)
global ipsum
assert len(TextWrapper.wrap_line(ipsum,myfont,10)) == 414
assert len(TextWrapper.wrap_line(ipsum,myfont,100)) == 50
assert len(TextWrapper.wrap_line(ipsum,myfont,250)) == 17
def test_register_text():
currLen = len(PlayerConsole.TEXT_EVENTS)
PlayerConsole.registerNewEvent("test")
assert len(PlayerConsole.TEXT_EVENTS) == currLen + 1
assert isinstance(PlayerConsole.TEXT_EVENTS[0], EventText)
def test_flash_on():
PlayerConsole.flashOn()
assert PlayerConsole.ALPHA == 255
assert PlayerConsole.ALPHA_COUNTER == 0
def test_update():
PlayerConsole.flashOn()
PlayerConsole.update()
assert PlayerConsole.ALPHA_COUNTER == 1
tray_size = PlayerConsole.tray.surface.get_size()
PlayerConsole.ALPHA_COUNTER = PlayerConsole.ALPHA_WAIT + 1
PlayerConsole.update()
#Test tray 'deletion' upon timeout
assert PlayerConsole.tray.surface.get_size != tray_size
def test_render_labels_one_text():
PlayerConsole.flashOn()
PlayerConsole.clearTextEvents()
PlayerConsole.registerNewEvent('test')
texts, height = PlayerConsole.renderTextLabels()
assert len(texts) == 1
assert height <= PlayerConsole.tray.surface.get_height()
assert PlayerConsole.renderTextsToSurface(texts,height).get_size() == (PlayerConsole.tray.surface.get_width(),height)
def test_render_labels_overload():
PlayerConsole.flashOn()
PlayerConsole.clearTextEvents()
global ipsum
for x in range(5):
PlayerConsole.registerNewEvent(ipsum)
texts, height = PlayerConsole.renderTextLabels()
assert len(texts) == 14
print PlayerConsole.tray.surface.get_height()
assert height <= PlayerConsole.tray.surface.get_height()
assert PlayerConsole.renderTextsToSurface(texts,height).get_size()[0] == PlayerConsole.tray.surface.get_width()
assert PlayerConsole.renderTextsToSurface(texts,height).get_size()[1] <= PlayerConsole.tray.surface.get_height()
|
1682237
|
from django_markup.filter import MarkupFilter
class MarkdownMarkupFilter(MarkupFilter):
"""
Applies Markdown conversion to a string, and returns the HTML.
"""
title = 'Markdown'
kwargs = {'safe_mode': True}
def render(self, text, **kwargs):
if kwargs:
self.kwargs.update(kwargs)
from markdown import markdown
text = markdown(text, **self.kwargs)
# Markdowns safe_mode is deprecated. We replace it with Bleach
# to keep it backwards compatible.
# https://python-markdown.github.io/change_log/release-2.6/#safe_mode-deprecated
if self.kwargs.get('safe_mode') is True:
from bleach import clean
# fmt: off
markdown_tags = [
"h1", "h2", "h3", "h4", "h5", "h6",
"b", "i", "strong", "em", "tt",
"p", "br",
"span", "div", "blockquote", "pre", "code", "hr",
"ul", "ol", "li", "dd", "dt",
"img",
"a",
"sub", "sup",
]
markdown_attrs = {
"*": ["id"],
"img": ["src", "alt", "title"],
"a": ["href", "alt", "title"],
}
# fmt: on
text = clean(text, markdown_tags, markdown_attrs)
return text
|
1682299
|
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import TestObj, setup
class IfTagTests(SimpleTestCase):
@setup({'if-tag01': '{% if foo %}yes{% else %}no{% endif %}'})
def test_if_tag01(self):
output = self.engine.render_to_string('if-tag01', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'if-tag02': '{% if foo %}yes{% else %}no{% endif %}'})
def test_if_tag02(self):
output = self.engine.render_to_string('if-tag02', {'foo': False})
self.assertEqual(output, 'no')
@setup({'if-tag03': '{% if foo %}yes{% else %}no{% endif %}'})
def test_if_tag03(self):
output = self.engine.render_to_string('if-tag03')
self.assertEqual(output, 'no')
@setup({'if-tag04': '{% if foo %}foo{% elif bar %}bar{% endif %}'})
def test_if_tag04(self):
output = self.engine.render_to_string('if-tag04', {'foo': True})
self.assertEqual(output, 'foo')
@setup({'if-tag05': '{% if foo %}foo{% elif bar %}bar{% endif %}'})
def test_if_tag05(self):
output = self.engine.render_to_string('if-tag05', {'bar': True})
self.assertEqual(output, 'bar')
@setup({'if-tag06': '{% if foo %}foo{% elif bar %}bar{% endif %}'})
def test_if_tag06(self):
output = self.engine.render_to_string('if-tag06')
self.assertEqual(output, '')
@setup({'if-tag07': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'})
def test_if_tag07(self):
output = self.engine.render_to_string('if-tag07', {'foo': True})
self.assertEqual(output, 'foo')
@setup({'if-tag08': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'})
def test_if_tag08(self):
output = self.engine.render_to_string('if-tag08', {'bar': True})
self.assertEqual(output, 'bar')
@setup({'if-tag09': '{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}'})
def test_if_tag09(self):
output = self.engine.render_to_string('if-tag09')
self.assertEqual(output, 'nothing')
@setup({'if-tag10': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag10(self):
output = self.engine.render_to_string('if-tag10', {'foo': True})
self.assertEqual(output, 'foo')
@setup({'if-tag11': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag11(self):
output = self.engine.render_to_string('if-tag11', {'bar': True})
self.assertEqual(output, 'bar')
@setup({'if-tag12': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag12(self):
output = self.engine.render_to_string('if-tag12', {'baz': True})
self.assertEqual(output, 'baz')
@setup({'if-tag13': '{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}'})
def test_if_tag13(self):
output = self.engine.render_to_string('if-tag13')
self.assertEqual(output, 'nothing')
# Filters
@setup({'if-tag-filter01': '{% if foo|length == 5 %}yes{% else %}no{% endif %}'})
def test_if_tag_filter01(self):
output = self.engine.render_to_string('if-tag-filter01', {'foo': 'abcde'})
self.assertEqual(output, 'yes')
@setup({'if-tag-filter02': '{% if foo|upper == \'ABC\' %}yes{% else %}no{% endif %}'})
def test_if_tag_filter02(self):
output = self.engine.render_to_string('if-tag-filter02')
self.assertEqual(output, 'no')
# Equality
@setup({'if-tag-eq01': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq01(self):
output = self.engine.render_to_string('if-tag-eq01')
self.assertEqual(output, 'yes')
@setup({'if-tag-eq02': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq02(self):
output = self.engine.render_to_string('if-tag-eq02', {'foo': 1})
self.assertEqual(output, 'no')
@setup({'if-tag-eq03': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq03(self):
output = self.engine.render_to_string('if-tag-eq03', {'foo': 1, 'bar': 1})
self.assertEqual(output, 'yes')
@setup({'if-tag-eq04': '{% if foo == bar %}yes{% else %}no{% endif %}'})
def test_if_tag_eq04(self):
output = self.engine.render_to_string('if-tag-eq04', {'foo': 1, 'bar': 2})
self.assertEqual(output, 'no')
@setup({'if-tag-eq05': '{% if foo == \'\' %}yes{% else %}no{% endif %}'})
def test_if_tag_eq05(self):
output = self.engine.render_to_string('if-tag-eq05')
self.assertEqual(output, 'no')
# Inequality
@setup({'if-tag-noteq01': '{% if foo != bar %}yes{% else %}no{% endif %}'})
def test_if_tag_noteq01(self):
output = self.engine.render_to_string('if-tag-noteq01')
self.assertEqual(output, 'no')
@setup({'if-tag-noteq02': '{% if foo != bar %}yes{% else %}no{% endif %}'})
def test_if_tag_noteq02(self):
output = self.engine.render_to_string('if-tag-noteq02', {'foo': 1})
self.assertEqual(output, 'yes')
@setup({'if-tag-noteq03': '{% if foo != bar %}yes{% else %}no{% endif %}'})
def test_if_tag_noteq03(self):
output = self.engine.render_to_string('if-tag-noteq03', {'foo': 1, 'bar': 1})
self.assertEqual(output, 'no')
@setup({'if-tag-noteq04': '{% if foo != bar %}yes{% else %}no{% endif %}'})
def test_if_tag_noteq04(self):
output = self.engine.render_to_string('if-tag-noteq04', {'foo': 1, 'bar': 2})
self.assertEqual(output, 'yes')
@setup({'if-tag-noteq05': '{% if foo != "" %}yes{% else %}no{% endif %}'})
def test_if_tag_noteq05(self):
output = self.engine.render_to_string('if-tag-noteq05')
self.assertEqual(output, 'yes')
# Comparison
@setup({'if-tag-gt-01': '{% if 2 > 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_gt_01(self):
output = self.engine.render_to_string('if-tag-gt-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-gt-02': '{% if 1 > 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_gt_02(self):
output = self.engine.render_to_string('if-tag-gt-02')
self.assertEqual(output, 'no')
@setup({'if-tag-gte-01': '{% if 1 >= 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_gte_01(self):
output = self.engine.render_to_string('if-tag-gte-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-gte-02': '{% if 1 >= 2 %}yes{% else %}no{% endif %}'})
def test_if_tag_gte_02(self):
output = self.engine.render_to_string('if-tag-gte-02')
self.assertEqual(output, 'no')
@setup({'if-tag-lt-01': '{% if 1 < 2 %}yes{% else %}no{% endif %}'})
def test_if_tag_lt_01(self):
output = self.engine.render_to_string('if-tag-lt-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-lt-02': '{% if 1 < 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_lt_02(self):
output = self.engine.render_to_string('if-tag-lt-02')
self.assertEqual(output, 'no')
@setup({'if-tag-lte-01': '{% if 1 <= 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_lte_01(self):
output = self.engine.render_to_string('if-tag-lte-01')
self.assertEqual(output, 'yes')
@setup({'if-tag-lte-02': '{% if 2 <= 1 %}yes{% else %}no{% endif %}'})
def test_if_tag_lte_02(self):
output = self.engine.render_to_string('if-tag-lte-02')
self.assertEqual(output, 'no')
# Contains
@setup({'if-tag-in-01': '{% if 1 in x %}yes{% else %}no{% endif %}'})
def test_if_tag_in_01(self):
output = self.engine.render_to_string('if-tag-in-01', {'x': [1]})
self.assertEqual(output, 'yes')
@setup({'if-tag-in-02': '{% if 2 in x %}yes{% else %}no{% endif %}'})
def test_if_tag_in_02(self):
output = self.engine.render_to_string('if-tag-in-02', {'x': [1]})
self.assertEqual(output, 'no')
@setup({'if-tag-not-in-01': '{% if 1 not in x %}yes{% else %}no{% endif %}'})
def test_if_tag_not_in_01(self):
output = self.engine.render_to_string('if-tag-not-in-01', {'x': [1]})
self.assertEqual(output, 'no')
@setup({'if-tag-not-in-02': '{% if 2 not in x %}yes{% else %}no{% endif %}'})
def test_if_tag_not_in_02(self):
output = self.engine.render_to_string('if-tag-not-in-02', {'x': [1]})
self.assertEqual(output, 'yes')
# AND
@setup({'if-tag-and01': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and01(self):
output = self.engine.render_to_string('if-tag-and01', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-and02': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and02(self):
output = self.engine.render_to_string('if-tag-and02', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and03': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and03(self):
output = self.engine.render_to_string('if-tag-and03', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-and04': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and04(self):
output = self.engine.render_to_string('if-tag-and04', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and05': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and05(self):
output = self.engine.render_to_string('if-tag-and05', {'foo': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and06': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and06(self):
output = self.engine.render_to_string('if-tag-and06', {'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-and07': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and07(self):
output = self.engine.render_to_string('if-tag-and07', {'foo': True})
self.assertEqual(output, 'no')
@setup({'if-tag-and08': '{% if foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_and08(self):
output = self.engine.render_to_string('if-tag-and08', {'bar': True})
self.assertEqual(output, 'no')
# OR
@setup({'if-tag-or01': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or01(self):
output = self.engine.render_to_string('if-tag-or01', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or02': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or02(self):
output = self.engine.render_to_string('if-tag-or02', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-or03': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or03(self):
output = self.engine.render_to_string('if-tag-or03', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or04': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or04(self):
output = self.engine.render_to_string('if-tag-or04', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-or05': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or05(self):
output = self.engine.render_to_string('if-tag-or05', {'foo': False})
self.assertEqual(output, 'no')
@setup({'if-tag-or06': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or06(self):
output = self.engine.render_to_string('if-tag-or06', {'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-or07': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or07(self):
output = self.engine.render_to_string('if-tag-or07', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or08': '{% if foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_or08(self):
output = self.engine.render_to_string('if-tag-or08', {'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-or09': '{% if foo or bar or baz %}yes{% else %}no{% endif %}'})
def test_if_tag_or09(self):
"""
multiple ORs
"""
output = self.engine.render_to_string('if-tag-or09', {'baz': True})
self.assertEqual(output, 'yes')
# NOT
@setup({'if-tag-not01': '{% if not foo %}no{% else %}yes{% endif %}'})
def test_if_tag_not01(self):
output = self.engine.render_to_string('if-tag-not01', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not02': '{% if not not foo %}no{% else %}yes{% endif %}'})
def test_if_tag_not02(self):
output = self.engine.render_to_string('if-tag-not02', {'foo': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not06': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not06(self):
output = self.engine.render_to_string('if-tag-not06')
self.assertEqual(output, 'no')
@setup({'if-tag-not07': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not07(self):
output = self.engine.render_to_string('if-tag-not07', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not08': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not08(self):
output = self.engine.render_to_string('if-tag-not08', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not09': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not09(self):
output = self.engine.render_to_string('if-tag-not09', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not10': '{% if foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not10(self):
output = self.engine.render_to_string('if-tag-not10', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not11': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not11(self):
output = self.engine.render_to_string('if-tag-not11')
self.assertEqual(output, 'no')
@setup({'if-tag-not12': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not12(self):
output = self.engine.render_to_string('if-tag-not12', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not13': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not13(self):
output = self.engine.render_to_string('if-tag-not13', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not14': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not14(self):
output = self.engine.render_to_string('if-tag-not14', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not15': '{% if not foo and bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not15(self):
output = self.engine.render_to_string('if-tag-not15', {'foo': False, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not16': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not16(self):
output = self.engine.render_to_string('if-tag-not16')
self.assertEqual(output, 'yes')
@setup({'if-tag-not17': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not17(self):
output = self.engine.render_to_string('if-tag-not17', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not18': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not18(self):
output = self.engine.render_to_string('if-tag-not18', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not19': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not19(self):
output = self.engine.render_to_string('if-tag-not19', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not20': '{% if foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not20(self):
output = self.engine.render_to_string('if-tag-not20', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not21': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not21(self):
output = self.engine.render_to_string('if-tag-not21')
self.assertEqual(output, 'yes')
@setup({'if-tag-not22': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not22(self):
output = self.engine.render_to_string('if-tag-not22', {'foo': True, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not23': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not23(self):
output = self.engine.render_to_string('if-tag-not23', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not24': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not24(self):
output = self.engine.render_to_string('if-tag-not24', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not25': '{% if not foo or bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not25(self):
output = self.engine.render_to_string('if-tag-not25', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not26': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not26(self):
output = self.engine.render_to_string('if-tag-not26')
self.assertEqual(output, 'yes')
@setup({'if-tag-not27': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not27(self):
output = self.engine.render_to_string('if-tag-not27', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not28': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not28(self):
output = self.engine.render_to_string('if-tag-not28', {'foo': True, 'bar': False})
self.assertEqual(output, 'no')
@setup({'if-tag-not29': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not29(self):
output = self.engine.render_to_string('if-tag-not29', {'foo': False, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not30': '{% if not foo and not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not30(self):
output = self.engine.render_to_string('if-tag-not30', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not31': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not31(self):
output = self.engine.render_to_string('if-tag-not31')
self.assertEqual(output, 'yes')
@setup({'if-tag-not32': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not32(self):
output = self.engine.render_to_string('if-tag-not32', {'foo': True, 'bar': True})
self.assertEqual(output, 'no')
@setup({'if-tag-not33': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not33(self):
output = self.engine.render_to_string('if-tag-not33', {'foo': True, 'bar': False})
self.assertEqual(output, 'yes')
@setup({'if-tag-not34': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not34(self):
output = self.engine.render_to_string('if-tag-not34', {'foo': False, 'bar': True})
self.assertEqual(output, 'yes')
@setup({'if-tag-not35': '{% if not foo or not bar %}yes{% else %}no{% endif %}'})
def test_if_tag_not35(self):
output = self.engine.render_to_string('if-tag-not35', {'foo': False, 'bar': False})
self.assertEqual(output, 'yes')
# Various syntax errors
@setup({'if-tag-error01': '{% if %}yes{% endif %}'})
def test_if_tag_error01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error01')
@setup({'if-tag-error02': '{% if foo and %}yes{% else %}no{% endif %}'})
def test_if_tag_error02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error02', {'foo': True})
@setup({'if-tag-error03': '{% if foo or %}yes{% else %}no{% endif %}'})
def test_if_tag_error03(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error03', {'foo': True})
@setup({'if-tag-error04': '{% if not foo and %}yes{% else %}no{% endif %}'})
def test_if_tag_error04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error04', {'foo': True})
@setup({'if-tag-error05': '{% if not foo or %}yes{% else %}no{% endif %}'})
def test_if_tag_error05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-error05', {'foo': True})
@setup({'if-tag-error06': '{% if abc def %}yes{% endif %}'})
def test_if_tag_error06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error06')
@setup({'if-tag-error07': '{% if not %}yes{% endif %}'})
def test_if_tag_error07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error07')
@setup({'if-tag-error08': '{% if and %}yes{% endif %}'})
def test_if_tag_error08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error08')
@setup({'if-tag-error09': '{% if or %}yes{% endif %}'})
def test_if_tag_error09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error09')
@setup({'if-tag-error10': '{% if == %}yes{% endif %}'})
def test_if_tag_error10(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error10')
@setup({'if-tag-error11': '{% if 1 == %}yes{% endif %}'})
def test_if_tag_error11(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error11')
@setup({'if-tag-error12': '{% if a not b %}yes{% endif %}'})
def test_if_tag_error12(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('if-tag-error12')
@setup({'else-if-tag-error01': '{% if foo is bar %} yes {% else if foo is not bar %} no {% endif %}'})
def test_else_if_tag_error01(self):
error_message = 'Malformed template tag at line 1: "else if foo is not bar"'
with self.assertRaisesMessage(TemplateSyntaxError, error_message):
self.engine.get_template('else-if-tag-error01')
@setup({'if-tag-shortcircuit01': '{% if x.is_true or x.is_bad %}yes{% else %}no{% endif %}'})
def test_if_tag_shortcircuit01(self):
"""
If evaluations are shortcircuited where possible
"""
output = self.engine.render_to_string('if-tag-shortcircuit01', {'x': TestObj()})
self.assertEqual(output, 'yes')
@setup({'if-tag-shortcircuit02': '{% if x.is_false and x.is_bad %}yes{% else %}no{% endif %}'})
def test_if_tag_shortcircuit02(self):
"""
The is_bad() function should not be evaluated. If it is, an
exception is raised.
"""
output = self.engine.render_to_string('if-tag-shortcircuit02', {'x': TestObj()})
self.assertEqual(output, 'no')
@setup({'if-tag-badarg01': '{% if x|default_if_none:y %}yes{% endif %}'})
def test_if_tag_badarg01(self):
"""
Non-existent args
"""
output = self.engine.render_to_string('if-tag-badarg01')
self.assertEqual(output, '')
@setup({'if-tag-badarg02': '{% if x|default_if_none:y %}yes{% endif %}'})
def test_if_tag_badarg02(self):
output = self.engine.render_to_string('if-tag-badarg02', {'y': 0})
self.assertEqual(output, '')
@setup({'if-tag-badarg03': '{% if x|default_if_none:y %}yes{% endif %}'})
def test_if_tag_badarg03(self):
output = self.engine.render_to_string('if-tag-badarg03', {'y': 1})
self.assertEqual(output, 'yes')
@setup({'if-tag-badarg04': '{% if x|default_if_none:y %}yes{% else %}no{% endif %}'})
def test_if_tag_badarg04(self):
output = self.engine.render_to_string('if-tag-badarg04')
self.assertEqual(output, 'no')
@setup({'if-tag-single-eq': '{% if foo = bar %}yes{% else %}no{% endif %}'})
def test_if_tag_single_eq(self):
# A single equals sign is a syntax error.
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('if-tag-single-eq', {'foo': 1})
@setup({'template': '{% if foo is True %}yes{% else %}no{% endif %}'})
def test_if_is_match(self):
output = self.engine.render_to_string('template', {'foo': True})
self.assertEqual(output, 'yes')
@setup({'template': '{% if foo is True %}yes{% else %}no{% endif %}'})
def test_if_is_no_match(self):
output = self.engine.render_to_string('template', {'foo': 1})
self.assertEqual(output, 'no')
@setup({'template': '{% if foo is bar %}yes{% else %}no{% endif %}'})
def test_if_is_variable_missing(self):
output = self.engine.render_to_string('template', {'foo': 1})
self.assertEqual(output, 'no')
@setup({'template': '{% if foo is bar %}yes{% else %}no{% endif %}'})
def test_if_is_both_variables_missing(self):
output = self.engine.render_to_string('template', {})
self.assertEqual(output, 'yes')
@setup({'template': '{% if foo is not None %}yes{% else %}no{% endif %}'})
def test_if_is_not_match(self):
# For this to act as a regression test, it's important not to use
# foo=True because True is (not None)
output = self.engine.render_to_string('template', {'foo': False})
self.assertEqual(output, 'yes')
@setup({'template': '{% if foo is not None %}yes{% else %}no{% endif %}'})
def test_if_is_not_no_match(self):
output = self.engine.render_to_string('template', {'foo': None})
self.assertEqual(output, 'no')
@setup({'template': '{% if foo is not bar %}yes{% else %}no{% endif %}'})
def test_if_is_not_variable_missing(self):
output = self.engine.render_to_string('template', {'foo': False})
self.assertEqual(output, 'yes')
@setup({'template': '{% if foo is not bar %}yes{% else %}no{% endif %}'})
def test_if_is_not_both_variables_missing(self):
output = self.engine.render_to_string('template', {})
self.assertEqual(output, 'no')
|
1682310
|
import os
import time
import tensorflow as tf
import numpy as np
from scipy.stats import spearmanr
from sklearn.metrics import r2_score
import mnist_input
import multi_mnist_cnn
from sinkhorn import sinkhorn_operator
import util
import random
os.environ['TF_CUDNN_DETERMINISTIC'] = 'true'
tf.set_random_seed(94305)
random.seed(94305)
np.random.seed(94305)
flags = tf.app.flags
flags.DEFINE_integer('M', 1, 'batch size')
flags.DEFINE_integer('n', 3, 'number of elements to compare at a time')
flags.DEFINE_integer('l', 5, 'number of digits')
flags.DEFINE_integer('repetition', 0, 'number of repetition')
flags.DEFINE_float('pow', 1, 'softsort exponent for pairwise difference')
flags.DEFINE_float('tau', 5, 'temperature (dependent meaning)')
flags.DEFINE_string('method', 'deterministic_neuralsort',
'which method to use?')
flags.DEFINE_integer('n_s', 5, 'number of samples')
flags.DEFINE_integer('num_epochs', 200, 'number of epochs to train')
flags.DEFINE_float('lr', 1e-4, 'initial learning rate')
FLAGS = flags.FLAGS
n_s = FLAGS.n_s
NUM_EPOCHS = FLAGS.num_epochs
M = FLAGS.M
n = FLAGS.n
l = FLAGS.l
repetition = FLAGS.repetition
power = FLAGS.pow
tau = FLAGS.tau
method = FLAGS.method
initial_rate = FLAGS.lr
train_iterator, val_iterator, test_iterator = mnist_input.get_iterators(
l, n, 10 ** l - 1, minibatch_size=M)
false_tensor = tf.convert_to_tensor(False)
evaluation = tf.placeholder_with_default(false_tensor, ())
temp = tf.cond(evaluation,
false_fn=lambda: tf.convert_to_tensor(tau, dtype=tf.float32),
true_fn=lambda: tf.convert_to_tensor(1e-10, dtype=tf.float32)
)
experiment_id = 'median-%s-M%d-n%d-l%d-t%d-p%.2f' % (method, M, n, l, tau * 10, power)
checkpoint_path = 'checkpoints/%s/' % experiment_id
predictions_path = 'predictions/'
handle = tf.placeholder(tf.string, ())
X_iterator = tf.data.Iterator.from_string_handle(
handle,
(tf.float32, tf.float32, tf.float32, tf.float32),
((M, n, l * 28, 28), (M,), (M, n), (M, n))
)
X, y, median_scores, true_scores = X_iterator.get_next()
true_scores = tf.expand_dims(true_scores, 2)
P_true = util.neuralsort(true_scores, 1e-10)
n_prime = n
def get_median_probs(P):
median_strip = P[:, n // 2, :]
median_total = tf.reduce_sum(median_strip, axis=1, keepdims=True)
probs = median_strip / median_total
# print(probs)
return probs
if method == 'vanilla':
with tf.variable_scope("phi"):
representations = multi_mnist_cnn.deepnn(l, X, 10)
representations = tf.reshape(representations, [M, n * 10])
fc1 = tf.layers.dense(representations, 10, tf.nn.relu)
fc2 = tf.layers.dense(fc1, 10, tf.nn.relu)
fc3 = tf.layers.dense(fc2, 10, tf.nn.relu)
y_hat = tf.layers.dense(fc3, 1)
y_hat = tf.squeeze(y_hat)
loss_phi = tf.reduce_sum(tf.squared_difference(y_hat, y))
loss_theta = loss_phi
prob_median_eval = 0
elif method == 'sinkhorn':
with tf.variable_scope('phi'):
representations = multi_mnist_cnn.deepnn(l, X, n)
pre_sinkhorn = tf.reshape(representations, [M, n, n])
with tf.variable_scope('theta'):
regression_candidates = multi_mnist_cnn.deepnn(l, X, 1)
regression_candidates = tf.reshape(
regression_candidates, [M, n])
P_hat = sinkhorn_operator(pre_sinkhorn, temp=temp)
prob_median = get_median_probs(P_hat)
point_estimates = tf.reduce_sum(
prob_median * regression_candidates, axis=1)
exp_loss = tf.squared_difference(y, point_estimates)
loss_phi = tf.reduce_mean(exp_loss)
loss_theta = loss_phi
P_hat_eval = sinkhorn_operator(pre_sinkhorn, temp=1e-20)
prob_median_eval = get_median_probs(P_hat_eval)
elif method == 'gumbel_sinkhorn':
with tf.variable_scope('phi'):
representations = multi_mnist_cnn.deepnn(l, X, n)
pre_sinkhorn_orig = tf.reshape(representations, [M, n, n])
pre_sinkhorn = tf.tile(pre_sinkhorn_orig, [
n_s, 1, 1])
pre_sinkhorn += util.sample_gumbel([n_s * M, n, n])
with tf.variable_scope('theta'):
regression_candidates = multi_mnist_cnn.deepnn(l, X, 1)
regression_candidates = tf.reshape(
regression_candidates, [M, n])
P_hat = sinkhorn_operator(pre_sinkhorn, temp=temp)
prob_median = get_median_probs(P_hat)
prob_median = tf.reshape(prob_median, [n_s, M, n])
point_estimates = tf.reduce_sum(
prob_median * regression_candidates, axis=2)
exp_loss = tf.squared_difference(y, point_estimates)
loss_phi = tf.reduce_mean(exp_loss)
loss_theta = loss_phi
P_hat_eval = sinkhorn_operator(pre_sinkhorn_orig, temp=1e-20)
prob_median_eval = get_median_probs(P_hat_eval)
elif method == 'deterministic_neuralsort':
with tf.variable_scope('phi'):
scores = multi_mnist_cnn.deepnn(l, X, 1)
scores = tf.reshape(scores, [M, n, 1])
P_hat = util.neuralsort(scores, temp)
P_hat_eval = util.neuralsort(scores, 1e-20)
with tf.variable_scope('theta'):
regression_candidates = multi_mnist_cnn.deepnn(l, X, 1)
regression_candidates = tf.reshape(
regression_candidates, [M, n])
losses = tf.squared_difference(
regression_candidates, tf.expand_dims(y, 1))
prob_median = get_median_probs(P_hat)
prob_median_eval = get_median_probs(P_hat_eval)
point_estimates = tf.reduce_sum(
prob_median * regression_candidates, axis=1)
exp_loss = tf.squared_difference(y, point_estimates)
point_estimates_eval = tf.reduce_sum(
prob_median_eval * regression_candidates, axis=1)
exp_loss_eval = tf.squared_difference(y, point_estimates)
loss_phi = tf.reduce_mean(exp_loss)
loss_theta = tf.reduce_mean(exp_loss_eval)
elif method == 'deterministic_softsort':
with tf.variable_scope('phi'):
scores = multi_mnist_cnn.deepnn(l, X, 1)
scores = tf.reshape(scores, [M, n, 1])
P_hat = util.softsort(scores, temp, power)
P_hat_eval = util.softsort(scores, 1e-20, power)
with tf.variable_scope('theta'):
regression_candidates = multi_mnist_cnn.deepnn(l, X, 1)
regression_candidates = tf.reshape(
regression_candidates, [M, n])
losses = tf.squared_difference(
regression_candidates, tf.expand_dims(y, 1))
prob_median = get_median_probs(P_hat)
prob_median_eval = get_median_probs(P_hat_eval)
point_estimates = tf.reduce_sum(
prob_median * regression_candidates, axis=1)
exp_loss = tf.squared_difference(y, point_estimates)
point_estimates_eval = tf.reduce_sum(
prob_median_eval * regression_candidates, axis=1)
exp_loss_eval = tf.squared_difference(y, point_estimates)
loss_phi = tf.reduce_mean(exp_loss)
loss_theta = tf.reduce_mean(exp_loss_eval)
elif method == 'stochastic_neuralsort':
with tf.variable_scope('phi'):
scores = multi_mnist_cnn.deepnn(l, X, 1)
scores = tf.reshape(scores, [M, n, 1])
scores = tf.tile(scores, [n_s, 1, 1])
scores += util.sample_gumbel([M * n_s, n, 1])
P_hat = util.neuralsort(scores, temp)
P_hat_eval = util.neuralsort(scores, 1e-20)
with tf.variable_scope('theta'):
regression_candidates = multi_mnist_cnn.deepnn(l, X, 1)
regression_candidates = tf.reshape(
regression_candidates, [M, n])
res_y = tf.expand_dims(y, 1)
losses = tf.squared_difference(regression_candidates, res_y)
prob_median = get_median_probs(P_hat)
prob_median = tf.reshape(prob_median, [n_s, M, n])
prob_median_eval = get_median_probs(P_hat_eval)
prob_median_eval = tf.reshape(prob_median_eval, [n_s, M, n])
exp_losses = tf.reduce_sum(prob_median * losses, axis=2)
exp_losses_eval = tf.reduce_sum(
prob_median_eval * losses, axis=2)
point_estimates_eval = tf.reduce_mean(tf.reduce_sum(prob_median_eval * regression_candidates, axis=2), axis=0)
loss_phi = tf.reduce_mean(exp_losses)
loss_theta = tf.reduce_mean(exp_losses_eval)
elif method == 'stochastic_softsort':
with tf.variable_scope('phi'):
scores = multi_mnist_cnn.deepnn(l, X, 1)
scores = tf.reshape(scores, [M, n, 1])
scores = tf.tile(scores, [n_s, 1, 1])
scores += util.sample_gumbel([M * n_s, n, 1])
P_hat = util.softsort(scores, temp, power)
P_hat_eval = util.softsort(scores, 1e-20, power)
with tf.variable_scope('theta'):
regression_candidates = multi_mnist_cnn.deepnn(l, X, 1)
regression_candidates = tf.reshape(
regression_candidates, [M, n])
res_y = tf.expand_dims(y, 1)
losses = tf.squared_difference(regression_candidates, res_y)
prob_median = get_median_probs(P_hat)
prob_median = tf.reshape(prob_median, [n_s, M, n])
prob_median_eval = get_median_probs(P_hat_eval)
prob_median_eval = tf.reshape(prob_median_eval, [n_s, M, n])
exp_losses = tf.reduce_sum(prob_median * losses, axis=2)
exp_losses_eval = tf.reduce_sum(
prob_median_eval * losses, axis=2)
point_estimates_eval = tf.reduce_mean(tf.reduce_sum(prob_median_eval * regression_candidates, axis=2), axis=0)
loss_phi = tf.reduce_mean(exp_losses)
loss_theta = tf.reduce_mean(exp_losses_eval)
else:
raise ValueError("No such method.")
num_losses = M * n_s if method == 'stochastic_neuralsort' \
or method == 'stochastic_softsort' \
or method == 'gumbel_sinkhorn' else M
correctly_identified = tf.reduce_sum(
prob_median_eval * median_scores) / num_losses
phi = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='phi')
theta = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='theta')
train_phi = tf.train.AdamOptimizer(
initial_rate).minimize(loss_phi, var_list=phi)
if method != 'vanilla':
train_theta = tf.train.AdamOptimizer(initial_rate).minimize(
loss_phi, var_list=theta)
train_step = tf.group(train_phi, train_theta)
else:
train_step = train_phi
saver = tf.train.Saver()
sess = tf.Session()
logfile = open('./logs/%s.log' % experiment_id, 'w')
def prnt(*args):
print(*args)
print(*args, file=logfile)
sess.run(tf.global_variables_initializer())
train_sh, validate_sh, test_sh = sess.run([
train_iterator.string_handle(),
val_iterator.string_handle(),
test_iterator.string_handle()
])
TRAIN_PER_EPOCH = mnist_input.TRAIN_SET_SIZE // (l * M)
VAL_PER_EPOCH = mnist_input.VAL_SET_SIZE // (l * M)
TEST_PER_EPOCH = mnist_input.TEST_SET_SIZE // (l * M)
best_val = float('inf')
tiebreaker_val = -1
def save_model(epoch):
saver.save(sess, checkpoint_path + 'checkpoint', global_step=epoch)
def load_model():
filename = tf.train.latest_checkpoint(checkpoint_path)
if filename is None:
raise Exception("No model found.")
print("Loaded model %s." % filename)
saver.restore(sess, filename)
def train(epoch):
loss_train = []
for _ in range(TRAIN_PER_EPOCH):
_, l = sess.run([train_step, loss_phi],
feed_dict={handle: train_sh})
loss_train.append(l)
prnt('Average loss:', sum(loss_train) / len(loss_train))
def test(epoch, val=False):
global best_val
c_is = []
l_vs = []
y_evals = []
point_estimates_eval_evals = []
for _ in range(VAL_PER_EPOCH if val else TEST_PER_EPOCH):
if method.startswith('deterministic'):
c_i, l_v, y_eval, point_estimates_eval_eval =\
sess.run([correctly_identified, loss_phi, y, point_estimates_eval], feed_dict={
handle: validate_sh if val else test_sh, evaluation: True})
elif method.startswith('stochastic'):
c_i, l_v, y_eval, point_estimates_eval_eval =\
sess.run([correctly_identified, loss_phi, res_y, point_estimates_eval], feed_dict={
handle: validate_sh if val else test_sh, evaluation: True})
else:
raise ValueError('Cannot handle other methods because I need their prediction tensors and they are '
'named differently.')
c_is.append(c_i)
l_vs.append(l_v)
y_evals.append(y_eval.reshape(-1))
point_estimates_eval_evals.append(point_estimates_eval_eval.reshape(-1))
y_eval = np.concatenate(y_evals)
point_estimates_eval_eval = np.concatenate(point_estimates_eval_evals)
id_suffix = "_N_%s_%s_TAU_%s_LR_%s_E_%s_REP_%s.txt" % (
str(n), str(method), str(tau), str(initial_rate), str(NUM_EPOCHS), str(repetition))
if not val:
np.savetxt(predictions_path + 'y_eval' + id_suffix, y_eval)
np.savetxt(predictions_path + 'point_estimates_eval_eval' + id_suffix, point_estimates_eval_eval)
c_i = sum(c_is) / len(c_is)
l_v = sum(l_vs) / len(l_vs)
r2 = r2_score(y_eval, point_estimates_eval_eval)
spearman_r = spearmanr(y_eval, point_estimates_eval_eval).correlation
if val:
prnt("Validation set: correctly identified %f, mean squared error %f, R2 %f, spearmanr %f" %
(c_i, l_v, r2, spearman_r))
if l_v < best_val:
best_val = l_v
prnt('Saving...')
save_model(epoch)
else:
prnt("Test set: correctly identified %f, mean squared error %f, R2 %f, spearmanr %f" %
(c_i, l_v, r2, spearman_r))
total_training_time = 0
for epoch in range(1, NUM_EPOCHS + 1):
prnt('Epoch', epoch, '(%s)' % experiment_id)
start_time = time.time()
train(epoch)
end_time = time.time()
total_training_time += (end_time - start_time)
test(epoch, val=True)
logfile.flush()
load_model()
test(epoch, val=False)
training_time_per_epoch = total_training_time / NUM_EPOCHS
print("total_training_time: %f" % total_training_time)
print("training_time_per_epoch: %f" % training_time_per_epoch)
sess.close()
logfile.close()
|
1682338
|
import os
def setup(app):
current_dir = os.path.abspath(os.path.dirname(__file__))
app.add_html_theme(
'python_docs_theme', current_dir)
return {
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
1682374
|
import pytz
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.db.models import Q
from invoices.models import Invoice
from invoices.tasks import (
send_email,
send_invoice_email,
send_invoice_email_cancel,
create_invoice_history,
)
from invoices import swagger_params
from invoices.serializer import (
InvoiceSerailizer,
InvoiceHistorySerializer,
InvoiceCreateSerializer,
)
from accounts.models import Account
from accounts.serializer import AccountSerializer
from common.models import User, Attachments, Comment
from common.custom_auth import JSONWebTokenAuthentication
from common.serializer import (
UserSerializer,
CommentSerializer,
AttachmentsSerializer,
BillingAddressSerializer,
)
from common.utils import (
COUNTRIES,
CURRENCY_CODES,
)
from teams.serializer import TeamsSerializer
from teams.models import Teams
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.pagination import LimitOffsetPagination
from drf_yasg.utils import swagger_auto_schema
import json
INVOICE_STATUS = (
("Draft", "Draft"),
("Sent", "Sent"),
("Paid", "Paid"),
("Pending", "Pending"),
("Cancelled", "Cancel"),
)
class InvoiceListView(APIView, LimitOffsetPagination):
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
model = Invoice
def get_context_data(self, **kwargs):
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
queryset = self.model.objects.filter(company=self.request.company)
accounts = Account.objects.filter(company=self.request.company)
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
queryset = queryset.filter(
Q(created_by=self.request.user) | Q(assigned_to=self.request.user)
).distinct()
accounts = accounts.filter(
Q(created_by=self.request.user) | Q(assigned_to=self.request.user)
).distinct()
if params:
if params.get("invoice_title_or_number"):
queryset = queryset.filter(
Q(invoice_title__icontains=params.get("invoice_title_or_number"))
| Q(invoice_number__icontains=params.get("invoice_title_or_number"))
).distinct()
if params.get("created_by"):
queryset = queryset.filter(created_by=params.get("created_by"))
if params.get("assigned_users"):
queryset = queryset.filter(
assigned_to__in=json.loads(params.get("assigned_users"))
)
if params.get("status"):
queryset = queryset.filter(status=params.get("status"))
if params.get("total_amount"):
queryset = queryset.filter(
total_amount__icontains=params.get("total_amount")
)
context = {}
search = False
if (
params.get("invoice_title_or_number")
or params.get("created_by")
or params.get("assigned_users")
or params.get("status")
or params.get("total_amount")
):
search = True
context["search"] = search
results_invoice = self.paginate_queryset(
queryset.distinct(), self.request, view=self
)
invoices = InvoiceSerailizer(results_invoice, many=True).data
context["per_page"] = 10
context.update(
{
"invoices_count": self.count,
"next": self.get_next_link(),
"previous": self.get_previous_link(),
"page_number": int(self.offset / 10) + 1,
}
)
context["invoices"] = invoices
context["users"] = UserSerializer(
User.objects.filter(is_active=True, company=self.request.company).order_by(
"email"
),
many=True,
).data
context["accounts_list"] = AccountSerializer(accounts, many=True).data
if self.request.user == "ADMIN":
context["teams_list"] = TeamsSerializer(
Teams.objects.filter(company=self.request.company), many=True
).data
context["status"] = INVOICE_STATUS
context["currency"] = CURRENCY_CODES
context["countries"] = COUNTRIES
return context
@swagger_auto_schema(
tags=["Invoices"], manual_parameters=swagger_params.invoice_list_get_params
)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return Response(context)
@swagger_auto_schema(
tags=["Invoices"], manual_parameters=swagger_params.invoice_create_post_params
)
def post(self, request, *args, **kwargs):
params = request.query_params if len(request.data) == 0 else request.data
data = {}
serializer = InvoiceCreateSerializer(data=params, request_obj=request)
from_address_serializer = BillingAddressSerializer(data=params)
to_address_serializer = BillingAddressSerializer(data=params)
if not from_address_serializer.is_valid():
data["from_address_errors"] = from_address_serializer.errors
if not to_address_serializer.is_valid():
data["to_address_errors"] = to_address_serializer.errors
if data:
return Response({"error": True}, data)
if serializer.is_valid():
quality_hours = int(params.get("quality_hours"))
rate = float(params.get("rate"))
quantity = quality_hours * rate
tax = quantity * float(params.get("tax")) / 100
total_amount = quantity + tax
from_address_obj = from_address_serializer.save(
address_line=params.get("from_address_line"),
street=params.get("from_street"),
city=params.get("from_city"),
state=params.get("from_state"),
postcode=params.get("from_postcode"),
country=params.get("from_country"),
)
to_address_obj = to_address_serializer.save(
address_line=params.get("to_address_line"),
street=params.get("to_street"),
city=params.get("to_city"),
state=params.get("to_state"),
postcode=params.get("to_postcode"),
country=params.get("to_country"),
)
invoice_obj = serializer.save(
created_by=request.user,
company=request.company,
quantity=params.get("quality_hours"),
total_amount=total_amount,
from_address_id=from_address_obj.id,
to_address_id=to_address_obj.id,
)
if params.get("accounts"):
accounts = json.loads(params.get("accounts"))
for account in accounts:
obj_account = Account.objects.filter(
id=account, company=request.company
)
if obj_account.exists():
invoice_obj.accounts.add(account)
else:
invoice_obj.delete()
data["accounts"] = "Please enter valid account"
return Response({"error": True}, data)
if self.request.user.role == "ADMIN":
if params.get("teams"):
teams = json.loads(params.get("teams"))
for team in teams:
obj_team = Teams.objects.filter(
id=team, company=request.company
)
if obj_team.exists():
invoice_obj.teams.add(team)
else:
invoice_obj.delete()
data["team"] = "Please enter valid Team"
return Response({"error": True}, data)
if params.get("assigned_to"):
assinged_to_users_ids = json.loads(params.get("assigned_to"))
for user_id in assinged_to_users_ids:
user = User.objects.filter(id=user_id, company=request.company)
if user.exists():
invoice_obj.assigned_to.add(user_id)
else:
invoice_obj.delete()
data["assigned_to"] = "Please enter valid user"
return Response({"error": True}, data)
create_invoice_history(invoice_obj.id, request.user.id, [])
assigned_to_list = list(
invoice_obj.assigned_to.all().values_list("id", flat=True)
)
recipients = assigned_to_list
send_email.delay(
recipients,
invoice_obj.id,
domain=settings.DOMAIN_NAME,
protocol=self.request.scheme,
)
return Response({"error": False, "message": "Invoice Created Successfully"})
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
class InvoiceDetailView(APIView):
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
model = Invoice
def get_object(self, pk):
return self.model.objects.filter(id=pk).first()
@swagger_auto_schema(
tags=["Invoices"], manual_parameters=swagger_params.invoice_create_post_params
)
def put(self, request, pk, format=None):
params = request.query_params if len(request.data) == 0 else request.data
invoice_obj = self.get_object(pk=pk)
from_address_obj = invoice_obj.from_address
to_address_obj = invoice_obj.to_address
data = {}
if invoice_obj.company != request.company:
return Response(
{"error": True, "errors": "User company doesnot match with header...."},
status=status.HTTP_404_NOT_FOUND,
)
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
if not (
(self.request.user == invoice_obj.created_by)
or (self.request.user in invoice_obj.assigned_to.all())
):
return Response(
{
"error": True,
"errors": "You don't have Permission to perform this action",
},
status=status.HTTP_401_UNAUTHORIZED,
)
serializer = InvoiceCreateSerializer(
invoice_obj,
data=params,
request_obj=request,
invoice=True,
)
from_address_serializer = BillingAddressSerializer(
data=params, instance=from_address_obj
)
to_address_serializer = BillingAddressSerializer(
data=params, instance=to_address_obj
)
if not from_address_serializer.is_valid():
data["from_address_errors"] = from_address_serializer.errors
if not to_address_serializer.is_valid():
data["to_address_errors"] = to_address_serializer.errors
if data:
return Response({"error": True}, data)
if serializer.is_valid():
invoice_obj = serializer.save()
previous_assigned_to_users = list(
invoice_obj.assigned_to.all().values_list("id", flat=True)
)
from_address_obj = from_address_serializer.save(
address_line=params.get("from_address_line"),
street=params.get("from_street"),
city=params.get("from_city"),
state=params.get("from_state"),
postcode=params.get("from_postcode"),
country=params.get("from_country"),
)
to_address_obj = to_address_serializer.save(
address_line=params.get("to_address_line"),
street=params.get("to_street"),
city=params.get("to_city"),
state=params.get("to_state"),
postcode=params.get("to_postcode"),
country=params.get("to_country"),
)
invoice_obj.from_address = from_address_obj
invoice_obj.to_address = to_address_obj
quality_hours = int(params.get("quality_hours"))
rate = float(params.get("rate"))
quantity = quality_hours * rate
tax = quantity * float(params.get("tax")) / 100
invoice_obj.total_amount = quantity + tax
invoice_obj.save()
invoice_obj.accounts.clear()
if params.get("accounts"):
accounts = json.loads(params.get("accounts"))
for account in accounts:
obj_account = Account.objects.filter(
id=account, company=request.company
)
if obj_account.exists():
invoice_obj.accounts.add(account)
else:
data["accounts"] = "Please enter valid account"
return Response({"error": True}, data)
if self.request.user.role == "ADMIN":
invoice_obj.teams.clear()
if params.get("teams"):
teams = json.loads(params.get("teams"))
for team in teams:
obj_team = Teams.objects.filter(
id=team, company=request.company
)
if obj_team.exists():
invoice_obj.teams.add(team)
else:
data["team"] = "Please enter valid Team"
return Response({"error": True}, data)
invoice_obj.assigned_to.clear()
if params.get("assigned_to"):
assinged_to_users_ids = json.loads(params.get("assigned_to"))
for user_id in assinged_to_users_ids:
user = User.objects.filter(id=user_id, company=request.company)
if user.exists():
invoice_obj.assigned_to.add(user_id)
else:
data["assigned_to"] = "Please enter valid User"
return Response({"error": True}, data)
assigned_to_list = list(
invoice_obj.assigned_to.all().values_list("id", flat=True)
)
recipients = list(set(assigned_to_list) - set(previous_assigned_to_users))
send_email.delay(
recipients,
invoice_obj.id,
domain=settings.DOMAIN_NAME,
protocol=self.request.scheme,
)
return Response(
{"error": False, "message": "Invoice Updated Successfully"},
status=status.HTTP_200_OK,
)
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
@swagger_auto_schema(
tags=["Invoices"], manual_parameters=swagger_params.invoice_delete_params
)
def delete(self, request, pk, format=None):
self.object = self.get_object(pk)
if self.object.company != request.company:
return Response(
{"error": True, "errors": "User company doesnot match with header...."}
)
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
if self.request.user != self.object.created_by:
return Response(
{
"error": True,
"errors": "You do not have Permission to perform this action",
}
)
if self.object.from_address_id:
self.object.from_address.delete()
if self.object.to_address_id:
self.object.to_address.delete()
self.object.delete()
return Response(
{"error": False, "message": "Invoice Deleted Successfully."},
status=status.HTTP_200_OK,
)
@swagger_auto_schema(
tags=["Invoices"], manual_parameters=swagger_params.invoice_delete_params
)
def get(self, request, pk, format=None):
self.invoice = self.get_object(pk=pk)
if self.invoice.company != request.company:
return Response(
{"error": True, "errors": "User company doesnot match with header...."},
status=status.HTTP_404_NOT_FOUND,
)
context = {}
context["invoice_obj"] = InvoiceSerailizer(self.invoice).data
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
if not (
(self.request.user == self.invoice.created_by)
or (self.request.user in self.invoice.assigned_to.all())
):
return Response(
{
"error": True,
"errors": "You don't have Permission to perform this action",
}
)
comment_permission = (
True
if (
self.request.user == self.invoice.created_by
or self.request.user.is_superuser
or self.request.user.role == "ADMIN"
)
else False
)
if self.request.user.is_superuser or self.request.user.role == "ADMIN":
users_mention = list(
User.objects.filter(
is_active=True,
company=self.request.company,
).values("username")
)
elif self.request.user != self.invoice.created_by:
if self.invoice.created_by:
users_mention = [{"username": self.invoice.created_by.username}]
else:
users_mention = []
else:
users_mention = []
attachments = Attachments.objects.filter(invoice=self.invoice).order_by("-id")
comments = Comment.objects.filter(invoice=self.invoice).order_by("-id")
context.update(
{
"attachments": AttachmentsSerializer(attachments, many=True).data,
"comments": CommentSerializer(comments, many=True).data,
"invoice_history": InvoiceHistorySerializer(
self.invoice.invoice_history.all(), many=True
).data,
"accounts": AccountSerializer(
self.invoice.accounts.all(), many=True
).data,
"users": UserSerializer(
User.objects.filter(
is_active=True,
company=self.request.company,
).order_by("email"),
many=True,
).data,
"comment_permission": comment_permission,
"users_mention": users_mention,
"status": INVOICE_STATUS,
"currency": CURRENCY_CODES,
"countries": COUNTRIES,
}
)
return Response(context)
@swagger_auto_schema(
tags=["Invoices"], manual_parameters=swagger_params.invoice_detail_post_params
)
def post(self, request, pk, **kwargs):
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
context = {}
self.invoice_obj = Invoice.objects.get(pk=pk)
if self.invoice_obj.company != request.company:
return Response(
{"error": True, "errors": "User company doesnot match with header...."}
)
comment_serializer = CommentSerializer(data=params)
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
if not (
(self.request.user == self.invoice_obj.created_by)
or (self.request.user in self.invoice_obj.assigned_to.all())
):
return Response(
{
"error": True,
"errors": "You don't have Permission to perform this action",
},
status=status.HTTP_401_UNAUTHORIZED,
)
if comment_serializer.is_valid():
if params.get("comment"):
comment_serializer.save(
invoice_id=self.invoice_obj.id,
commented_by_id=self.request.user.id,
)
if self.request.FILES.get("invoice_attachment"):
attachment = Attachments()
attachment.created_by = self.request.user
attachment.file_name = self.request.FILES.get("invoice_attachment").name
attachment.invoice = self.invoice_obj
attachment.attachment = self.request.FILES.get("invoice_attachment")
attachment.save()
comments = Comment.objects.filter(invoice=self.invoice_obj).order_by("-id")
attachments = Attachments.objects.filter(invoice=self.invoice_obj).order_by(
"-id"
)
context.update(
{
"invoice_obj": InvoiceSerailizer(self.invoice_obj).data,
"attachments": AttachmentsSerializer(attachments, many=True).data,
"comments": CommentSerializer(comments, many=True).data,
}
)
return Response(context)
class InvoiceCommentView(APIView):
model = Comment
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
return self.model.objects.get(pk=pk)
@swagger_auto_schema(
tags=["Invoices"], manual_parameters=swagger_params.invoice_comment_edit_params
)
def put(self, request, pk, format=None):
params = request.query_params if len(request.data) == 0 else request.data
obj = self.get_object(pk)
if (
request.user.role == "ADMIN"
or request.user.is_superuser
or request.user == obj.commented_by
):
serializer = CommentSerializer(obj, data=params)
if params.get("comment"):
if serializer.is_valid():
serializer.save()
return Response(
{"error": False, "message": "Comment Submitted"},
status=status.HTTP_200_OK,
)
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
else:
return Response(
{
"error": True,
"errors": "You don't have permission to perform this action.",
}
)
@swagger_auto_schema(
tags=["Invoices"], manual_parameters=swagger_params.invoice_delete_params
)
def delete(self, request, pk, format=None):
self.object = self.get_object(pk)
if (
request.user.role == "ADMIN"
or request.user.is_superuser
or request.user == self.object.commented_by
):
self.object.delete()
return Response(
{"error": False, "message": "Comment Deleted Successfully"},
status=status.HTTP_200_OK,
)
else:
return Response(
{
"error": True,
"errors": "You don't have permission to perform this action",
}
)
class InvoiceAttachmentView(APIView):
model = Attachments
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
tags=["Invoices"], manual_parameters=swagger_params.invoice_delete_params
)
def delete(self, request, pk, format=None):
self.object = self.model.objects.get(pk=pk)
if (
request.user.role == "ADMIN"
or request.user.is_superuser
or request.user == self.object.created_by
):
self.object.delete()
return Response(
{"error": False, "message": "Attachment Deleted Successfully"},
status=status.HTTP_200_OK,
)
else:
return Response(
{
"error": True,
"errors": "You don't have permission to perform this action.",
}
)
|
1682378
|
import os
import json
import typing as t
from unittest.mock import patch, MagicMock
import jsonlines
from pyfakefs.fake_filesystem_unittest import TestCase
from starwhale.utils.fs import ensure_dir, ensure_file
from starwhale.consts.env import SWEnv
from starwhale.api._impl.model import _RunConfig, PipelineHandler
from starwhale.api._impl.loader import get_data_loader, S3StorageBackend
from .. import ROOT_DIR
class SimpleHandler(PipelineHandler):
def ppl(self, data: bytes, batch_size: int, **kw: t.Any) -> t.Any:
return [1, 2], 0.1
def cmp(self, _data_loader: t.Any) -> t.Any:
for _data in _data_loader:
print(_data)
return {"summary": {"a": 1}, "kind": "test", "labels": {"1": 1}}
class TestModelPipelineHandler(TestCase):
swds_dir = os.path.join(ROOT_DIR, "data", "dataset", "swds")
def setUp(self) -> None:
self.setUpPyfakefs()
self.root = "/home/starwhale/model_test"
self.status_dir = os.path.join(self.root, "status")
self.log_dir = os.path.join(self.root, "log")
self.result_dir = os.path.join(self.root, "result")
self.config_dir = os.path.join(self.root, "config")
ensure_dir(self.config_dir)
self.fs.add_real_directory(self.swds_dir)
@patch("starwhale.api._impl.loader.boto3")
def test_s3_loader(self, m_resource: MagicMock) -> None:
swds_config = {
"backend": "s3",
"kind": "swds",
"secret": {
"access_key": "username",
"secret_key": "password",
},
"service": {
"endpoint": "127.1.1.1:1123",
"region": "local",
},
"swds": [
{
"bucket": "starwhale",
"key": {
"data": "data1",
"label": "label1",
},
}
],
}
_loader = get_data_loader(swds_config)
assert isinstance(_loader.storage, S3StorageBackend)
def test_set_run_env(self) -> None:
_RunConfig.set_env(
{
"status_dir": "status",
"log_dir": "log",
"result_dir": "result",
"input_config": "input_config",
}
)
assert os.environ.get(SWEnv.input_config) == "input_config"
def test_cmp(self) -> None:
ppl_result_dir = os.path.join(self.root, "ppl")
ensure_dir(ppl_result_dir)
ppl_result_path = os.path.join(ppl_result_dir, "current")
with jsonlines.open(ppl_result_path, mode="w") as _jl:
_jl.write(
{
"index": 0,
"result": [1, 2],
"pr": 0.1,
"batch": 10,
"label": "\\u0007\\u0002\\u0001\\u0000\\u0004\\u0001\\u0004\\t\\u0005\\t",
}
)
config_json_path = os.path.join(self.config_dir, "input.json")
local_ppl_result_config = {
"backend": "fuse",
"kind": "jsonl",
"swds": [
{
"bucket": ppl_result_dir,
"key": {
"data": "current",
},
}
],
}
ensure_file(config_json_path, json.dumps(local_ppl_result_config))
os.environ[SWEnv.status_dir] = self.status_dir
os.environ[SWEnv.log_dir] = self.log_dir
os.environ[SWEnv.result_dir] = self.result_dir
os.environ[SWEnv.input_config] = config_json_path
with SimpleHandler() as _handler:
_handler._starwhale_internal_run_cmp()
status_file_path = os.path.join(self.status_dir, "current")
assert os.path.exists(status_file_path)
assert "success" in open(status_file_path).read()
assert os.path.exists(os.path.join(self.status_dir, "timeline"))
result_file_path = os.path.join(self.result_dir, "current")
assert os.path.exists(result_file_path)
with jsonlines.open(result_file_path) as reader:
lines = [_l for _l in reader]
assert len(lines) == 1
assert lines[0]["summary"] == {"a": 1}
assert lines[0]["kind"] == "test"
def test_ppl(self) -> None:
config_json_path = os.path.join(self.config_dir, "input.json")
local_swds_config = {
"backend": "fuse",
"kind": "swds",
"swds": [
{
"bucket": self.swds_dir,
"key": {
"data": "data_ubyte_0.swds_bin",
"label": "label_ubyte_0.swds_bin",
},
}
],
}
ensure_file(config_json_path, json.dumps(local_swds_config))
os.environ[SWEnv.status_dir] = self.status_dir
os.environ[SWEnv.log_dir] = self.log_dir
os.environ[SWEnv.result_dir] = self.result_dir
os.environ[SWEnv.input_config] = config_json_path
with SimpleHandler() as _handler:
_handler._starwhale_internal_run_ppl()
status_file_path = os.path.join(self.status_dir, "current")
assert os.path.exists(status_file_path)
assert "success" in open(status_file_path).read()
assert os.path.exists(os.path.join(self.status_dir, "timeline"))
result_file_path = os.path.join(self.result_dir, "current")
assert os.path.exists(result_file_path)
with jsonlines.open(result_file_path) as reader:
lines = [_l for _l in reader]
assert len(lines) == 1
assert lines[0]["index"] == 0
assert lines[0]["result"] == [1, 2]
assert "pr" in lines[0]
assert lines[0]["batch"] == 10
|
1682396
|
import numpy as np
import matplotlib.pyplot as plt
phi = -0.8
times = list(range(16))
y1 = [phi**k / (1 - phi**2) for k in times]
y2 = [np.cos(np.pi * k) for k in times]
y3 = [a * b for a, b in zip(y1, y2)]
num_rows, num_cols = 3, 1
fig, axes = plt.subplots(num_rows, num_cols, figsize=(10, 8))
plt.subplots_adjust(hspace=0.25)
# Autocovariance when phi = -0.8
ax = axes[0]
ax.plot(times, y1, 'bo-', alpha=0.6, label=r'$\gamma(k)$')
ax.legend(loc='upper right')
ax.set_xlim(0, 15)
ax.set_yticks((-2, 0, 2))
ax.hlines(0, 0, 15, linestyle='--', alpha=0.5)
# Cycles at frequence pi
ax = axes[1]
ax.plot(times, y2, 'bo-', alpha=0.6, label=r'$\cos(\pi k)$')
ax.legend(loc='upper right')
ax.set_xlim(0, 15)
ax.set_yticks((-1, 0, 1))
ax.hlines(0, 0, 15, linestyle='--', alpha=0.5)
# Product
ax = axes[2]
ax.stem(times, y3, label=r'$\gamma(k) \cos(\pi k)$')
ax.legend(loc='upper right')
ax.set_xlim((0, 15))
ax.set_ylim(-3, 3)
ax.set_yticks((-1, 0, 1, 2, 3))
ax.hlines(0, 0, 15, linestyle='--', alpha=0.5)
plt.show()
|
1682397
|
import os
from celery.utils.log import get_task_logger
from sqlalchemy import MetaData
from ether_sql.globals import get_current_session
from ether_sql.tasks.worker import app
logger = get_task_logger(__name__)
@app.task()
def export_to_csv(directory='.'):
"""
Export the data in the psql to a csv
:param session ether_sql_session: ether_sql session
:param str directory: Directory where the data should be exported
"""
current_session = get_current_session()
# create the directory is it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
metadata = MetaData(current_session.db_engine)
metadata.reflect()
conn = current_session.db_engine.raw_connection()
cursor = conn.cursor()
for _table_name in metadata.tables:
dbcopy_to = open('{}/{}.csv'.format(directory, _table_name), 'wb')
copy_sql = 'COPY {} TO STDOUT WITH CSV HEADER'.format(_table_name)
cursor.copy_expert(copy_sql, dbcopy_to)
logger.debug('exported table {}'.format(_table_name))
conn.close()
|
1682442
|
from .utils import Serializable
class CommandLineBinding(Serializable):
"""
The binding behavior when building the command line depends on the data type of the value.
If there is a mismatch between the type described by the input schema and the effective value,
such as resulting from an expression evaluation, an implementation must use the data type of the effective value.
Documentation: https://www.commonwl.org/v1.0/CommandLineTool.html#CommandLineBinding
"""
def __init__(self, load_contents=None, position=None, prefix=None, separate=None,
item_separator=None, value_from=None, shell_quote=None):
"""
:param load_contents: Read up to the fist 64 KiB of text from the file and
place it in the "contents" field of the file object
:type load_contents: BOOLEAN
:param position: The sorting key
:type position: INT
:param prefix: Command line prefix to add before the value
:type prefix: STRING
:param separate:
:type separate: BOOLEAN
:param item_separator: Join the array elements into a single string separated by this item
:type item_separator: STRING
:param value_from: Use this as the value
:type value_from: STRING
:param shell_quote: Value is quoted on the command line
:type shell_quote: BOOLEAN
"""
self.loadContents = load_contents
self.position = position
self.prefix = prefix
self.separate = separate
self.itemSeparator = item_separator
self.valueFrom = value_from
self.shellQuote = shell_quote
|
1682446
|
import numpy as np
import torch
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn.functional as F
import point_utils_cuda
from pytorch3d.loss import chamfer_distance
from pytorch3d.ops import knn_points, knn_gather
from scipy.spatial.transform import Rotation
import random
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
'''
ctx:
xyz: [B,N,3]
npoint: int
'''
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
point_utils_cuda.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class WeightedFurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, weights: torch.Tensor, npoint: int) -> torch.Tensor:
'''
ctx:
xyz: [B,N,3]
weights: [B,N]
npoint: int
'''
assert xyz.is_contiguous()
assert weights.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
point_utils_cuda.weighted_furthest_point_sampling_wrapper(B, N, npoint, xyz, weights, temp, output);
return output
@staticmethod
def backward(xyz, a=None):
return None, None
weighted_furthest_point_sample = WeightedFurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
'''
ctx
features: [B,C,N]
idx: [B,npoint]
'''
assert features.is_contiguous()
assert idx.is_contiguous()
B, npoint = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, npoint)
point_utils_cuda.gather_points_wrapper(B, C, N, npoint, features, idx, output)
ctx.for_backwards = (idx, C, N)
return output
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
B, npoint = idx.size()
grad_features = Variable(torch.cuda.FloatTensor(B,C,N).zero_())
grad_out_data = grad_out.data.contiguous()
point_utils_cuda.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data)
return grad_features, None
gather_operation = GatherOperation.apply
def generate_rand_rotm(x_lim=5.0, y_lim=5.0, z_lim=180.0):
'''
Input:
x_lim
y_lim
z_lim
return:
rotm: [3,3]
'''
rand_z = np.random.uniform(low=-z_lim, high=z_lim)
rand_y = np.random.uniform(low=-y_lim, high=y_lim)
rand_x = np.random.uniform(low=-x_lim, high=x_lim)
rand_eul = np.array([rand_z, rand_y, rand_x])
r = Rotation.from_euler('zyx', rand_eul, degrees=True)
rotm = r.as_matrix()
return rotm
def generate_rand_trans(x_lim=10.0, y_lim=1.0, z_lim=0.1):
'''
Input:
x_lim
y_lim
z_lim
return:
trans [3]
'''
rand_x = np.random.uniform(low=-x_lim, high=x_lim)
rand_y = np.random.uniform(low=-y_lim, high=y_lim)
rand_z = np.random.uniform(low=-z_lim, high=z_lim)
rand_trans = np.array([rand_x, rand_y, rand_z])
return rand_trans
def apply_transform(pts, trans):
R = trans[:3, :3]
T = trans[:3, 3]
pts = pts @ R.T + T
return pts
def calc_error_np(pred_R, pred_t, gt_R, gt_t):
tmp = (np.trace(pred_R.transpose().dot(gt_R))-1)/2
tmp = np.clip(tmp, -1.0, 1.0)
L_rot = np.arccos(tmp)
L_rot = 180 * L_rot / np.pi
L_trans = np.linalg.norm(pred_t - gt_t)
return L_rot, L_trans
def set_seed(seed):
'''
Set random seed for torch, numpy and python
'''
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark=False
torch.backends.cudnn.deterministic=True
|
1682449
|
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import VGG
from mmcv.runner import BaseModule, Sequential
from ..builder import BACKBONES
@BACKBONES.register_module()
class SSDVGG(VGG, BaseModule):
"""VGG Backbone network for single-shot-detection.
Args:
input_size (int): width and height of input, from {300, 512}.
depth (int): Depth of vgg, from {11, 13, 16, 19}.
out_indices (Sequence[int]): Output from which stages.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Example:
>>> self = SSDVGG(input_size=300, depth=11)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 300, 300)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 1024, 19, 19)
(1, 512, 10, 10)
(1, 256, 5, 5)
(1, 256, 3, 3)
(1, 256, 1, 1)
"""
extra_setting = {
300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
}
def __init__(self,
input_size,
depth,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
l2_norm_scale=20.,
pretrained=None,
init_cfg=None):
# TODO: in_channels for mmcv.VGG
super(SSDVGG, self).__init__(
depth,
with_last_pool=with_last_pool,
ceil_mode=ceil_mode,
out_indices=out_indices)
assert input_size in (300, 512)
self.input_size = input_size
self.features.add_module(
str(len(self.features)),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(
str(len(self.features)),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(
str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
self.inplanes = 1024
self.extra = self._make_extra_layers(self.extra_setting[input_size])
self.l2_norm = L2Norm(
self.features[out_feature_indices[0] - 1].out_channels,
l2_norm_scale)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be setting at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
'please use "init_cfg" instead')
self.init_cfg = [dict(type='Pretrained', checkpoint=pretrained)]
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
dict(type='Normal', std=0.01, layer='Linear'),
]
else:
raise TypeError('pretrained must be a str or None')
if init_cfg is None:
self.init_cfg += [
dict(
type='Xavier',
distribution='uniform',
override=dict(name='extra')),
dict(
type='Constant',
val=self.l2_norm.scale,
override=dict(name='l2_norm'))
]
def init_weights(self, pretrained=None):
super(VGG, self).init_weights()
def forward(self, x):
"""Forward function."""
outs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_feature_indices:
outs.append(x)
for i, layer in enumerate(self.extra):
x = F.relu(layer(x), inplace=True)
if i % 2 == 1:
outs.append(x)
outs[0] = self.l2_norm(outs[0])
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def _make_extra_layers(self, outplanes):
layers = []
kernel_sizes = (1, 3)
num_layers = 0
outplane = None
for i in range(len(outplanes)):
if self.inplanes == 'S':
self.inplanes = outplane
continue
k = kernel_sizes[num_layers % 2]
if outplanes[i] == 'S':
outplane = outplanes[i + 1]
conv = nn.Conv2d(
self.inplanes, outplane, k, stride=2, padding=1)
else:
outplane = outplanes[i]
conv = nn.Conv2d(
self.inplanes, outplane, k, stride=1, padding=0)
layers.append(conv)
self.inplanes = outplanes[i]
num_layers += 1
if self.input_size == 512:
layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1))
return Sequential(*layers)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
|
1682478
|
study='ds102'
f=open(study+'/condition_key.txt')
ckey=f.readlines()
f.close()
f=open(study+'/task_key.txt')
tkey=f.readlines()
f.close()
print '<table border="1">'
print '<caption><EM>Description of tasks and conditions</EM></caption>'
for task in range(len(tkey)):
print '<tr><th colspan="2">%s'%tkey[task].strip().replace(' ',': ',1)
for c in ckey:
if c.strip().split(' ')[0]==tkey[task].strip().split(' ')[0]:
print '<tr><td>%s<td>%s'%(c.strip().split(' ')[1],' '.join(c.strip().split(' ')[2:]))
print '</table>'
## <TABLE border="1"
## summary="This table gives some statistics about fruit
## flies: average height and weight, and percentage
## with red eyes (for both males and females).">
## <CAPTION><EM>A test table with merged cells</EM></CAPTION>
## <TR><TH rowspan="2"><TH colspan="2">Average
## <TH rowspan="2">Red<BR>eyes
## <TR><TH>height<TH>weight
## <TR><TH>Males<TD>1.9<TD>0.003<TD>40%
## <TR><TH>Females<TD>1.7<TD>0.002<TD>43%
## </TABLE>
|
1682541
|
import random
from typing import Iterable
from unittest import TestCase
import gym
import numpy as np
from envs.connect_four_env import ResultType, Player
from gym_connect_four import ConnectFourEnv, RandomPlayer
BOARD_VALIDATION = np.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, -1, 0, 0, 0],
[0, 0, -1, 1, 0, -1, 0],
[0, 0, 1, 1, 0, 1, 1],
[-1, -1, -1, -1, 0, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_WIN_ROW = np.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, -1, 0, 0, 0],
[0, 0, -1, 1, 0, -1, 0],
[0, 0, 1, 1, 0, 1, 1],
[-1, -1, -1, -1, 0, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_WIN_COLUMN = np.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, -1, 1, 0, -1, 0],
[0, 0, 1, 1, 0, 1, 1],
[-1, 0, -1, -1, 0, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_WIN_DIAGONAL = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, -1, 1, 0, -1, 0],
[0, 0, 1, 1, 1, 1, 1],
[-1, 1, -1, -1, 0, 1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_WIN_BDIAGONAL = np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, -1, 1, 0, -1, 0],
[0, 0, 1, 1, 0, 1, 1],
[-1, 1, -1, -1, 0, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_AVAILABLE_0123 = np.array([[0, 0, 0, 0, -1, 1, -1],
[0, 0, 0, 1, 1, -1, 1],
[0, 0, -1, 1, 1, -1, -1],
[0, 0, 1, 1, 1, 1, 1],
[-1, 1, -1, -1, -1, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_AVAILABLE_2 = np.array([[1, 1, 0, -1, -1, 1, -1],
[1, 1, -1, 1, 1, -1, 1],
[1, 1, -1, 1, 1, -1, -1],
[1, 1, 1, 1, 1, 1, 1],
[-1, 1, -1, -1, -1, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_AVAILABLE_6 = np.array([[1, 1, 1, 1, -1, 1, 0],
[1, 1, -1, 1, 1, -1, 1],
[1, 1, -1, 1, 1, -1, -1],
[1, 1, 1, 1, 1, 1, 1],
[-1, 1, -1, -1, -1, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
BOARD_AVAILABLE_NONE = np.array([[1, 1, 1, 1, -1, 1, 1],
[1, 1, -1, 1, 1, -1, 1],
[1, 1, -1, 1, 1, -1, -1],
[1, 1, 1, 1, 1, 1, 1],
[-1, 1, -1, -1, -1, -1, -1],
[1, 1, -1, 1, -1, 1, 1]])
class DeterministicPlayer(Player):
def __init__(self, env: 'ConnectFourEnv', moves: Iterable[int], name='DeterministicPlayer'):
super().__init__(env, name)
self._moves = moves
self.reset()
def reset(self):
self._moves_itr = iter(self._moves)
self.action_log = []
self.reward_log = []
self.done_log = []
self.states = []
self.l_states = []
self.l_new_states = []
def get_next_action(self, state: np.ndarray) -> int:
self.states.append(state)
next_move = next(self._moves_itr)
valid_moves = self.env.available_moves()
while next_move not in valid_moves:
next_move += 1
next_move %= self.env.action_space.n
return next_move
def learn(self, state, action, state_next, reward, done) -> None:
self.action_log.append(action)
self.reward_log.append(reward)
self.done_log.append(done)
self.l_states.append(state)
self.l_new_states.append(state_next)
class TestConnectFourEnv(TestCase):
def setUp(self) -> None:
self.env = gym.make('ConnectFour-v0')
def test_is_valid_action(self):
self.env = self.env
self.env.reset(BOARD_VALIDATION)
self.assertTrue(self.env.is_valid_action(0))
self.assertFalse(self.env.is_valid_action(3))
def test_is_win_state(self):
self.env = self.env
self.env.reset(BOARD_WIN_ROW)
self.assertTrue(self.env.is_win_state())
self.env.reset(BOARD_WIN_COLUMN)
self.assertTrue(self.env.is_win_state())
self.env.reset(BOARD_WIN_DIAGONAL)
self.assertTrue(self.env.is_win_state())
self.env.reset(BOARD_WIN_BDIAGONAL)
self.assertTrue(self.env.is_win_state())
def test_available_moves(self):
self.env = self.env
self.env.reset(BOARD_AVAILABLE_0123)
self.assertEqual(set(self.env.available_moves()), {0, 1, 2, 3})
self.env.reset(BOARD_AVAILABLE_2)
self.assertEqual(set(self.env.available_moves()), {2})
self.env.reset(BOARD_AVAILABLE_6)
self.assertEqual(set(self.env.available_moves()), {6})
self.env.reset(BOARD_AVAILABLE_NONE)
self.assertEqual(set(self.env.available_moves()), set([]))
def test_run_win_p1(self):
env = self.env
act_space = env.action_space.n
moves1 = [i % act_space for i in range(100)]
moves2 = [2 * i % act_space for i in range(1, 100)]
p1 = DeterministicPlayer(env=env, moves=moves1, name="P1")
p2 = DeterministicPlayer(env=env, moves=moves2, name="P2")
res = env.run(p1, p2, None)
self.assertEqual(ResultType.WIN1.value, res.value)
self.assertEqual(moves1[:11], p1.action_log)
self.assertEqual(moves2[:10], p2.action_log)
self.assertEqual([ConnectFourEnv.DEF_REWARD] * 10 + [ConnectFourEnv.WIN_REWARD], p1.reward_log)
self.assertEqual([ConnectFourEnv.DEF_REWARD] * 9 + [ConnectFourEnv.LOSS_REWARD], p2.reward_log)
self.assertEqual([False] * 10 + [True], p1.done_log)
self.assertEqual([False] * 9 + [True], p2.done_log)
np.testing.assert_array_equal(p1.l_states[1], p1.states[1])
np.testing.assert_array_equal(p1.l_new_states[0], p1.states[1])
np.testing.assert_array_equal(p1.l_states[-1], p1.states[-1])
np.testing.assert_array_equal(p1.l_new_states[-3], p1.states[-2])
np.testing.assert_array_equal(p1.l_states[-2], p1.states[-2])
np.testing.assert_array_equal(p2.l_new_states[0], p2.states[1])
np.testing.assert_array_equal(p2.l_states[1], p2.states[1])
np.testing.assert_array_equal(p2.l_states[-1], p2.states[-1])
np.testing.assert_array_equal(p2.l_new_states[-3], p2.states[-2])
np.testing.assert_array_equal(p2.l_states[-2], p2.states[-2])
def test_run_win_p2(self):
env = self.env
act_space = env.action_space.n
moves1 = [2 * i % act_space for i in range(100)]
moves2 = [(2 * i + 1) % act_space for i in range(0, 100)]
p1 = DeterministicPlayer(env=env, moves=moves1, name="P1")
p2 = DeterministicPlayer(env=env, moves=moves2, name="P2")
res = env.run(p1, p2, None)
self.assertEqual(ResultType.WIN2.value, res.value)
self.assertListEqual(moves1[:11], p1.action_log)
self.assertListEqual(moves2[:11], p2.action_log)
self.assertEqual([ConnectFourEnv.DEF_REWARD] * 10 + [ConnectFourEnv.LOSS_REWARD], p1.reward_log)
self.assertEqual([ConnectFourEnv.DEF_REWARD] * 10 + [ConnectFourEnv.WIN_REWARD], p2.reward_log)
self.assertEqual([False] * 10 + [True], p1.done_log)
self.assertEqual([False] * 10 + [True], p2.done_log)
def test_run_draw(self):
random.seed(0)
env = self.env
act_space = env.action_space.n
moves2 = [(2 * i + 1) % act_space for i in range(0, 100)]
p1 = RandomPlayer(env=env, name="P1", seed=88)
p2 = DeterministicPlayer(env=env, moves=moves2, name="P2")
res = env.run(p1, p2, None)
self.assertEqual(ResultType.DRAW.value, res.value)
self.assertEqual([1, 3, 5, 0, 2, 4, 6, 1, 3, 5, 0, 2, 4, 6, 1, 3, 5, 0, 3, 4, 4], p2.action_log)
self.assertEqual([ConnectFourEnv.DEF_REWARD] * 20 + [ConnectFourEnv.DRAW_REWARD], p2.reward_log)
self.assertEqual([False] * 20 + [True], p2.done_log)
def test_reset(self):
env = self.env
env.run(RandomPlayer(env=env, seed=0), RandomPlayer(env=env, seed=1), None)
sum_steps = np.sum(np.sum(np.absolute(env.board)))
self.assertEqual(17, sum_steps)
env.reset()
sum_steps = np.sum(np.sum(np.absolute(env.board)))
self.assertEqual(0, sum_steps)
|
1682563
|
import info
class subinfo(info.infoclass):
def setTargets(self):
self.versionInfo.setDefaultValues()
self.displayName = "JuK"
self.patchToApply["18.08.1"] = [("juk-18.08.1-20181029.diff", 1)]
self.description = "JuK is a simple music player and helps manage your music collection"
self.webpage = "https://juk.kde.org/"
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
self.buildDependencies["kde/frameworks/extra-cmake-modules"] = None
self.runtimeDependencies["libs/qt5/qtbase"] = None
self.runtimeDependencies["libs/qt5/qtsvg"] = None
self.runtimeDependencies["qt-libs/phonon"] = None
self.runtimeDependencies["libs/taglib"] = None
self.runtimeDependencies["kde/frameworks/tier1/kconfig"] = None
self.runtimeDependencies["kde/frameworks/tier1/kconfig"] = None
self.runtimeDependencies["kde/frameworks/tier1/kcoreaddons"] = None
self.runtimeDependencies["kde/frameworks/tier1/ki18n"] = None
self.runtimeDependencies["kde/frameworks/tier1/kwidgetsaddons"] = None
self.runtimeDependencies["kde/frameworks/tier1/kwindowsystem"] = None
self.runtimeDependencies["kde/frameworks/tier2/kcompletion"] = None
self.runtimeDependencies["kde/frameworks/tier2/kcrash"] = None
self.runtimeDependencies["kde/frameworks/tier2/kdoctools"] = None
self.runtimeDependencies["kde/frameworks/tier2/kjobwidgets"] = None
self.runtimeDependencies["kde/frameworks/tier3/kglobalaccel"] = None
self.runtimeDependencies["kde/frameworks/tier3/kiconthemes"] = None
self.runtimeDependencies["kde/frameworks/tier3/kio"] = None
self.runtimeDependencies["kde/frameworks/tier3/knotifications"] = None
self.runtimeDependencies["kde/frameworks/tier3/ktextwidgets"] = None
self.runtimeDependencies["kde/frameworks/tier3/kwallet"] = None
self.runtimeDependencies["kde/frameworks/tier3/kxmlgui"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
|
1682617
|
import cas
from cas.common.config import DefaultValidatingDraft7Validator
from cas.common.models import BuildEnvironment, BuildResult, BuildSubsystem
from cas.common.cache import FileCache
from cas.common.assets.models import (
Asset,
AssetBuildContext,
BaseDriver,
SerialDriver,
BatchedDriver,
)
import os
import json
import logging
import pathlib
import importlib
import multiprocessing
from multiprocessing.synchronize import Lock
from typing import Mapping, Sequence, Callable, Any
from pathlib import Path
_schema_path = Path(cas.__file__).parent.absolute().joinpath("schemas")
# set up our process shared logger
lock: Lock = None
logger: logging.Logger = None
def _async_mod_init(_lock: Lock):
global lock, logger
lock = _lock
logger = multiprocessing.get_logger()
def _run_async_serial(
context: AssetBuildContext, driver: SerialDriver, asset: Asset
) -> bool:
relpath = os.path.relpath(asset.path, driver.env.root)
context.logger = logger
if not context.logger:
context.logger = logging.getLogger(driver.__class__.__module__)
with lock:
context.logger.info(f"Compiling {str(relpath)}")
success = driver.compile(context, asset)
if not success:
with lock:
context.logger.error(f" Failed compile {str(relpath)}")
return success
def _run_async_batched(
context: AssetBuildContext, driver: BatchedDriver, assets: Sequence[Asset]
) -> bool:
context.logger = logger
with lock:
for asset in assets:
relpath = os.path.relpath(asset.path, driver.env.root)
context.logger.info(f"Compiling {str(relpath)}")
return driver.compile_all(context, assets)
class AssetSubsystem(BuildSubsystem):
def __init__(self, env: BuildEnvironment, config: Mapping[str, Any]):
super().__init__(env, config)
self._drivers = {}
self._validators = {}
self._args = self.env.config.args
self._dry_run = self._args.dry_run
self._file_cache = FileCache(self.env.cache, self._cache["files"])
def _get_asset_driver(self, name: str) -> BaseDriver:
driver = self._drivers.get(name)
if driver is not None:
return driver
mod = importlib.import_module(f"cas.common.assets.drivers.{name}")
if mod is None:
raise Exception(f"Invalid type {name}")
self._logger.debug(f"loaded '{name}' driver")
driver = mod._driver(self.env)
self._drivers[name] = driver
return driver
def _load_asset_context(self, config: Mapping[str, Any]) -> AssetBuildContext:
# validate the schema
if config.type not in self._validators:
driver_path = _schema_path.joinpath("drivers", f"{config.type}.json")
if not driver_path.exists():
raise Exception(
f"Unable to find schema for asset driver '{config.type}'"
)
with open(driver_path, "r") as f:
self._validators[config.type] = DefaultValidatingDraft7Validator(
json.load(f)
)
if config.get("options") is not None:
self._validators[config.type].validate(config.options._data)
srcpath = Path(config.src)
if not srcpath.exists():
raise Exception(f'The asset source folder "{srcpath}" does not exist.')
# find everything by the patterns
patterns = []
if isinstance(config.files, str):
patterns.append(config.files)
elif isinstance(config.files, Sequence):
patterns += config.files
else:
raise NotImplementedError()
files = []
for pattern in patterns:
for path in pathlib.Path(srcpath).absolute().rglob(pattern):
if not path.is_file():
continue
files.append(path)
# create context and add assets
context = AssetBuildContext(config)
for f in files:
context.assets.append(Asset(f, {}))
return context
def _build_assets(
self,
contexts: Sequence[AssetBuildContext],
callback: Callable[[Callable[[Mapping[str, Any]], bool], Sequence[Any]], None],
):
for context in contexts:
if len(context.assets) <= 0:
self._logger.warning(
f"no files found for a context with type {context.config.type}"
)
continue
if isinstance(context.driver, BatchedDriver):
callback(_run_async_batched, (context, context.driver, context.assets))
elif isinstance(context.driver, SerialDriver):
for asset in context.assets:
callback(_run_async_serial, (context, context.driver, asset))
else:
raise Exception("Unknown driver type")
def _build_assets_sync(self, contexts: Sequence[AssetBuildContext]) -> bool:
"""
Builds assets synchronously.
"""
jobs = []
lock = multiprocessing.Lock()
_async_mod_init(lock)
def callback(func: Callable[[Mapping[str, Any]], bool], params: Sequence[Any]):
jobs.append(func(*params))
self._build_assets(contexts, callback)
return all(job for job in jobs)
def _build_assets_async(self, contexts: Sequence[AssetBuildContext]) -> bool:
"""
Builds assets asynchronously.
"""
jobs = []
lock = multiprocessing.Lock()
pool = multiprocessing.Pool(
self._args.threads, initializer=_async_mod_init, initargs=(lock,)
)
def callback(func: Callable[[Mapping[str, Any]], bool], params: Sequence[Any]):
jobs.append(pool.apply_async(func, params))
try:
self._build_assets(contexts, callback)
pool.close()
except KeyboardInterrupt:
pool.terminate()
pool.join()
return all(job.get() for job in jobs)
def _run_asset_build(self, clean: bool = False) -> bool:
contexts = []
for entry in self.config.assets:
contexts.append(self._load_asset_context(entry))
hash_inputs = {}
hash_outputs = {}
total_build = 0
# prebuild
for context in contexts:
assets = context.assets.copy()
context.assets = []
context.driver = self._get_asset_driver(context.config.type)
for asset in assets:
result = context.driver.precompile(context, asset)
if not result:
self._logger.error("Asset dependency error!")
return False
# clean outputs if requested
if clean is True:
for f in result.outputs:
if not f.exists():
continue
f.unlink()
continue
# check hashes
invalidated = False
for f in result.inputs:
f = f.resolve()
if not os.path.exists(f):
self._logger.error(
f"Required dependency '{f}' could not be located!"
)
return False
if not self._file_cache.validate(f):
invalidated = True
for f in result.outputs:
f = f.resolve()
if not self._file_cache.validate(f):
invalidated = True
aid = asset.get_id()
hash_inputs[aid] = result.inputs
hash_outputs[aid] = result.outputs
if invalidated:
total_build += 1
context.assets.append(asset)
if clean is True:
self._logger.info("assets cleaned")
return True
self._logger.info(
f"{len(hash_inputs)} input files, {len(hash_outputs)} output files"
)
self._logger.info(f"{total_build} files total will be rebuilt")
if self._dry_run or total_build == 0:
return True
# build
threaded = self._args.threads > 1
if threaded:
self._logger.info(
f"running multithreaded build with {self._args.threads} threads"
)
else:
self._logger.info("running singlethreaded build")
# sort out drivers that should run synchronously regardless of threading
sync_assets = [
context
for context in contexts
if not context.driver.threadable() or not threaded
]
async_assets = [
context for context in contexts if context.driver.threadable() and threaded
]
multiprocessing.log_to_stderr(logging.INFO)
if not self._build_assets_sync(sync_assets):
self._logger.error("Build failed")
return False
if not self._build_assets_async(async_assets):
self._logger.error("Build failed")
return False
self._logger.info("recalculating asset hashes...")
for context in contexts:
for asset in context.assets:
# save updated hashes
aid = asset.get_id()
for f in hash_inputs[aid]:
self._file_cache.put(f)
for f in hash_outputs[aid]:
self._file_cache.put(f)
self.env.cache.save()
return True
def build(self, force: bool = False) -> BuildResult:
# wipe the cache if we're forcing a rebuild
if force:
self._logger.info("rebuild forced, clearing cache")
self._file_cache.clear()
return BuildResult(self._run_asset_build())
def clean(self) -> bool:
return self._run_asset_build(True)
_subsystem = AssetSubsystem
|
1682648
|
from ._xxhash import (
xxh32,
xxh32_digest,
xxh32_intdigest,
xxh32_hexdigest,
xxh64,
xxh64_digest,
xxh64_intdigest,
xxh64_hexdigest,
xxh3_64,
xxh3_64_digest,
xxh3_64_intdigest,
xxh3_64_hexdigest,
xxh3_128,
xxh3_128_digest,
xxh3_128_intdigest,
xxh3_128_hexdigest,
XXHASH_VERSION,
)
VERSION = "2.0.2"
xxh128 = xxh3_128
xxh128_hexdigest = xxh3_128_hexdigest
xxh128_intdigest = xxh3_128_intdigest
xxh128_digest = xxh3_128_digest
__all__ = [
"xxh32",
"xxh32_digest",
"xxh32_intdigest",
"xxh32_hexdigest",
"xxh64",
"xxh64_digest",
"xxh64_intdigest",
"xxh64_hexdigest",
"xxh3_64",
"xxh3_64_digest",
"xxh3_64_intdigest",
"xxh3_64_hexdigest",
"xxh3_128",
"xxh3_128_digest",
"xxh3_128_intdigest",
"xxh3_128_hexdigest",
"xxh128",
"xxh128_digest",
"xxh128_intdigest",
"xxh128_hexdigest",
"VERSION",
"XXHASH_VERSION",
]
|
1682654
|
from unittest.mock import patch, Mock
from urllib.parse import urlencode
import django.test
from django.urls import reverse
from rest_framework import status
@django.test.override_settings(OIDC_PIK_CLIENT_ID="TEST_CLIENT_ID")
@patch("social_core.backends.open_id_connect.OpenIdConnectAuth.oidc_config",
Mock(return_value={
'end_session_endpoint': 'http://op/openid/end-session/'}))
def test_logout(api_client):
api_client.session['id_token'] = <PASSWORD>}'
resp = api_client.get(reverse('admin:logout'))
assert resp.status_code == status.HTTP_302_FOUND
assert resp['Location'] == 'http://op/openid/end-session/?{}'.format(
urlencode({'post_logout_redirect_uri': 'http://testserver/logout/'}))
assert api_client.session.get('id_token') is None
|
1682662
|
import pytest
import numpy as np
import levitate
# Tests created with these air properties
from levitate.materials import air
air.c = 343
air.rho = 1.2
array = levitate.arrays.RectangularArray(shape=(4, 5))
pos_0 = np.array([0.1, 0.2, 0.3])
pos_1 = np.array([-0.15, 1.27, 0.001])
pos_both = np.stack((pos_0, pos_1), axis=1)
phases = array.focus_phases((pos_0 + pos_1) / 2) + array.signature(stype='twin')
amps = levitate.utils.complex(phases)
spat_ders = array.pressure_derivs(pos_both, orders=3)
ind_ders = np.einsum('i, ji...->ji...', amps, spat_ders)
sum_ders = np.sum(ind_ders, axis=1)
sph_harm = array.spherical_harmonics(pos_both, orders=6)
sph_harm_ind = np.einsum('i, ji...->ji...', amps, sph_harm)
sph_harm_sum = np.sum(sph_harm_ind, axis=1)
requirements = {
'pressure_derivs_summed': sum_ders, 'pressure_derivs_individual': ind_ders,
'spherical_harmonics_summed': sph_harm_sum, 'spherical_harmonics_individual': sph_harm_ind
}
# Defines the fields to use for testing.
# Note that the field implementations themselves are tested elsewhere.
has_jabobians_fields = [
levitate.fields.GorkovPotential,
levitate.fields.GorkovGradient,
levitate.fields.GorkovLaplacian,
]
no_jacobians_fields = [
levitate.fields.RadiationForceGradient,
lambda arr: levitate.fields.SphericalHarmonicsForce(arr, orders=5, radius=1e-3),
]
values_fields = has_jabobians_fields + no_jacobians_fields
@pytest.mark.parametrize("func", values_fields)
def test_Field(func):
field = func(array)
calc_values = field.values
val_0 = field(amps, pos_0)
val_1 = field(amps, pos_1)
val_both = field(amps, pos_both)
np.testing.assert_allclose(val_0, calc_values(**{key: requirements[key][..., 0] for key in field.values_require}))
np.testing.assert_allclose(val_1, calc_values(**{key: requirements[key][..., 1] for key in field.values_require}))
np.testing.assert_allclose(val_both, np.stack([val_0, val_1], axis=field.ndim))
@pytest.mark.parametrize("pos", [pos_0, pos_1, pos_both])
@pytest.mark.parametrize("func", values_fields)
def test_FieldPoint(func, pos):
field = func(array)
np.testing.assert_allclose((field@pos)(amps), field(amps, pos))
@pytest.mark.parametrize("weight", [1, 1e-3, 1e3])
@pytest.mark.parametrize("func", has_jabobians_fields)
def test_CostField(func, weight):
field = func(array)
weight = np.random.uniform(-weight, weight, (1,) * field.ndim)
field = field * weight
calc_values, calc_jacobians = field.values, field.jacobians
raw_0 = calc_values(**{key: requirements[key][..., 0] for key in field.values_require})
raw_1 = calc_values(**{key: requirements[key][..., 1] for key in field.values_require})
raw_both = calc_values(**{key: requirements[key] for key in field.values_require})
val_0 = np.einsum(field._sum_str, weight, raw_0)
val_1 = np.einsum(field._sum_str, weight, raw_1)
val_both = np.einsum(field._sum_str, weight, raw_both)
raw_0 = calc_jacobians(**{key: requirements[key][..., 0] for key in field.jacobians_require})
raw_1 = calc_jacobians(**{key: requirements[key][..., 1] for key in field.jacobians_require})
raw_both = calc_jacobians(**{key: requirements[key] for key in field.jacobians_require})
jac_0 = np.einsum(field._sum_str, weight, raw_0)
jac_1 = np.einsum(field._sum_str, weight, raw_1)
jac_both = np.einsum(field._sum_str, weight, raw_both)
field_val_0, field_jac_0 = field(amps, pos_0)
field_val_1, field_jac_1 = field(amps, pos_1)
field_val_both, field_jac_both = field(amps, pos_both)
np.testing.assert_allclose(val_0, field_val_0)
np.testing.assert_allclose(val_1, field_val_1)
np.testing.assert_allclose(val_both, field_val_both)
np.testing.assert_allclose(jac_0, field_jac_0)
np.testing.assert_allclose(jac_1, field_jac_1)
np.testing.assert_allclose(jac_both, field_jac_both)
@pytest.mark.parametrize("weight", [1, 10])
@pytest.mark.parametrize("pos", [pos_0, pos_1])
@pytest.mark.parametrize("func", has_jabobians_fields)
def test_CostFieldPoint(func, weight, pos):
field = func(array)
weight = np.random.uniform(-weight, weight, (1,) * field.ndim)
field = field * weight
val, jac = (field@pos)(amps)
val_ub, jac_ub = field(amps, pos)
np.testing.assert_allclose(val, val_ub)
np.testing.assert_allclose(jac, jac_ub)
@pytest.mark.parametrize("pos", [pos_0, pos_both])
@pytest.mark.parametrize("func", values_fields)
@pytest.mark.parametrize("target_scale", [1, 1e-3, 1e3])
def test_SquaredField(func, target_scale, pos):
field = func(array)
target = np.random.uniform(-target_scale, target_scale, (1,) * field.ndim)
value = np.abs(field(amps, pos) - np.asarray(target).reshape(target.shape + (pos.ndim - 1) * (1,)))**2
np.testing.assert_allclose((field - target)(amps, pos), value)
@pytest.mark.parametrize("pos", [pos_0, pos_both])
@pytest.mark.parametrize("func", values_fields)
@pytest.mark.parametrize("target_scale", [1, 10])
def test_SquaredFieldPoint(func, target_scale, pos):
field = func(array)
target = np.random.uniform(-target_scale, target_scale, (1,) * field.ndim)
field = field - target
np.testing.assert_allclose((field@pos)(amps), field(amps, pos))
@pytest.mark.parametrize("func", has_jabobians_fields)
@pytest.mark.parametrize("weight", [1, 6])
@pytest.mark.parametrize("target_scale", [1, 1e-4])
def test_SquaredCostField(func, target_scale, weight):
field = func(array)
target = np.random.uniform(-target_scale, target_scale, (1,) * field.ndim)
weight = np.random.uniform(-weight, weight, (1,) * field.ndim)
field = (field - target) * weight
calc_values, calc_jacobians = field.values, field.jacobians
raw_0 = calc_values(**{key: requirements[key][..., 0] for key in field.values_require})
raw_1 = calc_values(**{key: requirements[key][..., 1] for key in field.values_require})
raw_both = calc_values(**{key: requirements[key] for key in field.values_require})
val_0 = np.einsum(field._sum_str, weight, raw_0)
val_1 = np.einsum(field._sum_str, weight, raw_1)
val_both = np.einsum(field._sum_str, weight, raw_both)
raw_0 = calc_jacobians(**{key: requirements[key][..., 0] for key in field.jacobians_require})
raw_1 = calc_jacobians(**{key: requirements[key][..., 1] for key in field.jacobians_require})
raw_both = calc_jacobians(**{key: requirements[key] for key in field.jacobians_require})
jac_0 = np.einsum(field._sum_str, weight, raw_0)
jac_1 = np.einsum(field._sum_str, weight, raw_1)
jac_both = np.einsum(field._sum_str, weight, raw_both)
field_val_0, field_jac_0 = field(amps, pos_0)
field_val_1, field_jac_1 = field(amps, pos_1)
field_val_both, field_jac_both = field(amps, pos_both)
np.testing.assert_allclose(val_0, field_val_0)
np.testing.assert_allclose(val_1, field_val_1)
np.testing.assert_allclose(val_both, field_val_both)
np.testing.assert_allclose(jac_0, field_jac_0)
np.testing.assert_allclose(jac_1, field_jac_1)
np.testing.assert_allclose(jac_both, field_jac_both)
@pytest.mark.parametrize("func", has_jabobians_fields)
@pytest.mark.parametrize("weight", [1, 200])
@pytest.mark.parametrize("target_scale", [1, 30])
@pytest.mark.parametrize("pos", [pos_0, pos_both])
def test_SquaredCostFieldPoint(func, weight, target_scale, pos):
field = func(array)
target = np.random.uniform(-target_scale, target_scale, (1,) * field.ndim)
weight = np.random.uniform(-weight, weight, (1,) * field.ndim)
field = (field - target) * weight
# field = (func(array) - target) * weight
val, jac = (field@pos)(amps)
val_ub, jac_ub = field(amps, pos)
np.testing.assert_allclose(val, val_ub)
np.testing.assert_allclose(jac, jac_ub)
@pytest.mark.parametrize("func0", values_fields)
@pytest.mark.parametrize("func1", values_fields)
def test_MultiField(func0, func1):
field_0 = func0(array)
field_1 = func1(array)
val0 = field_0(amps, pos_both)
val1 = field_1(amps, pos_both)
val_both = (field_0 + field_1)(amps, pos_both)
np.testing.assert_allclose(val0, val_both[0])
np.testing.assert_allclose(val1, val_both[1])
@pytest.mark.parametrize("func0", values_fields)
@pytest.mark.parametrize("func1", values_fields)
def test_MultiFieldPoint(func0, func1):
field = func0(array) + func1(array)
val_field = field(amps, pos_both)
val_bound = (field@pos_both)(amps)
np.testing.assert_allclose(val_field[0], val_bound[0])
np.testing.assert_allclose(val_field[1], val_bound[1])
@pytest.mark.parametrize("func0", has_jabobians_fields)
@pytest.mark.parametrize("func1", has_jabobians_fields)
@pytest.mark.parametrize("pos", [pos_0])
@pytest.mark.parametrize("weight0", [1, 1e-3, 1e3])
@pytest.mark.parametrize("weight1", [1, 1e-3, 1e3])
def test_MultiCostField(func0, func1, pos, weight0, weight1):
field_0 = func0(array)
weight0 = np.random.uniform(-weight0, weight0, (1,) * field_0.ndim)
field_0 = field_0 * weight0
field_1 = func1(array)
weight1 = np.random.uniform(-weight1, weight1, (1,) * field_1.ndim)
field_1 = field_1 * weight1
val0, jac0 = field_0(amps, pos)
val1, jac1 = field_1(amps, pos)
val_both, jac_both = (field_0 + field_1)(amps, pos)
np.testing.assert_allclose(val0 + val1, val_both)
np.testing.assert_allclose(jac0 + jac1, jac_both)
@pytest.mark.parametrize("func0", has_jabobians_fields)
@pytest.mark.parametrize("func1", has_jabobians_fields)
@pytest.mark.parametrize("pos", [pos_0, pos_1])
@pytest.mark.parametrize("weight0", [1, 8])
@pytest.mark.parametrize("weight1", [1, 1e-5])
def test_MultiCostFieldPoint(func0, func1, pos, weight0, weight1):
field_0 = func0(array)
weight0 = np.random.uniform(-weight0, weight0, (1,) * field_0.ndim)
field_0 = field_0 * weight0 @ pos
field_1 = func1(array)
weight1 = np.random.uniform(-weight1, weight1, (1,) * field_1.ndim)
field_1 = field_1 * weight1 @ pos
val0, jac0 = field_0(amps)
val1, jac1 = field_1(amps)
val_both, jac_both = (field_0 + field_1)(amps)
np.testing.assert_allclose(val0 + val1, val_both)
np.testing.assert_allclose(jac0 + jac1, jac_both)
@pytest.mark.parametrize("func0", values_fields)
@pytest.mark.parametrize("func1", values_fields)
@pytest.mark.parametrize("pos0", [pos_0, pos_1])
@pytest.mark.parametrize("pos1", [pos_0, pos_1])
def test_MultiFieldMultiPoint(func0, func1, pos0, pos1):
field_0 = func0(array)@pos0
field_1 = func1(array)@pos1
field_both = field_0 + field_1
val0 = field_0(amps)
val1 = field_1(amps)
val_both = field_both(amps)
np.testing.assert_allclose(val0, val_both[0])
np.testing.assert_allclose(val1, val_both[1])
@pytest.mark.parametrize("func0", has_jabobians_fields)
@pytest.mark.parametrize("func1", has_jabobians_fields)
@pytest.mark.parametrize("pos0", [pos_0, pos_1])
@pytest.mark.parametrize("pos1", [pos_0, pos_1])
@pytest.mark.parametrize("weight0", [1, 1e-3])
@pytest.mark.parametrize("weight1", [1, 1e3])
def test_MultiCostFieldMultiPoint(func0, func1, pos0, pos1, weight0, weight1):
field_0 = func0(array)
weight0 = np.random.uniform(-weight0, weight0, (1,) * field_0.ndim)
field_0 = field_0 * weight0 @ pos0
field_1 = func1(array)
weight1 = np.random.uniform(-weight1, weight1, (1,) * field_1.ndim)
field_1 = field_1 * weight1 @ pos1
field_both = field_0 + field_1
val0, jac0 = field_0(amps)
val1, jac1 = field_1(amps)
val_both, jac_both = field_both(amps)
np.testing.assert_allclose(val0 + val1, val_both)
np.testing.assert_allclose(jac0 + jac1, jac_both)
|
1682671
|
from django.test import TestCase
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.locations.models import LocationType, make_location
class SiteCodeTest(TestCase):
domain = 'test-site-code'
@classmethod
def setUpClass(cls):
super(SiteCodeTest, cls).setUpClass()
cls.project = create_domain(cls.domain)
LocationType(domain=cls.domain, name='type').save()
@classmethod
def tearDownClass(cls):
cls.project.delete()
super(SiteCodeTest, cls).tearDownClass()
def testSimpleName(self):
location = make_location(
name="Some Location",
domain=self.domain,
location_type="type"
)
location.save()
self.assertEqual(location.site_code, 'some_location')
def testOtherCharacters(self):
location = make_location(
name="Somé$ #Location (Old)",
domain=self.domain,
location_type="type"
)
location.save()
self.assertEqual(location.site_code, 'some_location_old')
def testDoesntDuplicate(self):
location = make_location(
name="Location",
domain=self.domain,
location_type="type"
)
location.save()
self.assertEqual(location.site_code, 'location')
location = make_location(
name="Location",
domain=self.domain,
location_type="type"
)
location.save()
self.assertEqual(location.site_code, 'location1')
|
1682673
|
import tensorflow as tf
class DM_pignistic(tf.keras.layers.Layer):
def __init__(self, num_class):
super(DM_pignistic, self).__init__()
self.num_class=num_class
def call(self, inputs):
aveage_Pignistic=inputs[:,-1]/self.num_class
aveage_Pignistic=tf.expand_dims(aveage_Pignistic, -1)
Pignistic_prob=inputs[:,:] + aveage_Pignistic
Pignistic_prob=Pignistic_prob[:,0:-1]
return Pignistic_prob
class DM(tf.keras.layers.Layer):
def __init__(self, nu, num_class):
super(DM, self).__init__()
self.nu = nu
self.num_class=num_class
def call(self, inputs):
upper = tf.expand_dims((1-self.nu) * inputs[:,-1], -1)#here 0.1 = 1 - \nu
upper = tf.tile(upper, [1,self.num_class+1])
outputs = tf.add(inputs, upper, name=None)[:,0:-1]
return outputs
|
1682686
|
import os
import shutil
import subprocess
import sys
from pathlib import Path
URL_PREFIX = "aurin://"
ILLEGAL_PKG_NAME_CONTENTS = (".", "..", "/")
temp_dir = Path("/var", "tmp", "aurin")
def error_out(message: str):
print(f"ERROR: {message}", file=sys.stderr)
sys.exit(1)
def notify(icon: str, title: str, message: str):
# Respect the user's settings by querying the variable that is already set.
# If not, use the value computed by the script
xdg_runtime_dir = os.environ.get("XDG_RUNTIME_DIR", f"/run/user/{os.getuid()}")
subprocess.check_call(
[
"notify-send",
"-i", icon,
title,
message
],
env={
"XDG_RUNTIME_DIR": xdg_runtime_dir
}
)
if len(sys.argv) < 1:
error_out(f"Invalid number of arguments, you must provide a package name")
input_pkg_name = sys.argv[1]
# Only accept input values that are an actual valid URI
if not input_pkg_name.startswith(URL_PREFIX):
error_out(f"Input URL {input_pkg_name} does not start with {URL_PREFIX}")
# Use slicing to extract the package name.
pkg_name = input_pkg_name[len(URL_PREFIX):]
# Deny certain characters to avoid path pollution that can enable path traversal and such.
for thing in ILLEGAL_PKG_NAME_CONTENTS:
if thing in pkg_name:
error_out(f"Illegal character pattern {thing} in package name {pkg_name}")
# Make sure the temporary directory for aurin always exists
temp_dir.mkdir(parents=True, exist_ok=True)
build_root = temp_dir / pkg_name
subprocess.check_call([
"git", "clone",
f"https://aur.archlinux.org/{pkg_name}.git",
str(build_root)
])
subprocess.check_call(
[
"bash",
"/opt/aurin/installpkg.sh",
str(build_root)
],
cwd=build_root
)
os.chdir(os.environ.get("HOME", "/"))
shutil.rmtree(build_root, ignore_errors=True)
notify("/opt/aurin/aurin48.png", "Install Successful", f"{pkg_name} has been installed!")
|
1682703
|
import random
import torch
from torch.utils.data.sampler import Sampler
# Adapted from
# https://github.com/pytorch/pytorch/pull/3062/files
class RandomCycleIter(object):
def __init__(self, data):
self.data_list = list(data)
self.length = len(self.data_list)
self.i = self.length - 1
def __iter__(self):
return self
def __next__(self):
self.i += 1
if self.i == self.length:
self.i = 0
random.shuffle(self.data_list)
return self.data_list[self.i]
next = __next__ # Py2
def multi_data_generator(data_iters, index_data, n, size):
i = 0
while i < n:
index = i % size
d = index_data[index]
yield d, next(data_iters[d])
i += 1
class MSampler(object):
def __init__(self, batch_sizes, sizes, num_samples=None, num_iters=None):
self.batch_size = sum(batch_sizes)
self.index_data = {}
size, c = 0, -1
for i in range(self.batch_size):
if i == size:
c += 1
size += batch_sizes[c]
self.index_data[i] = c
self.num_samples = num_samples or num_iters*self.batch_size or sum(sizes)
self.data_iters = [RandomCycleIter(range(n)) for n in sizes]
def __iter__(self):
return multi_data_generator(
self.data_iters, self.index_data,
self.num_samples, self.batch_size)
def __len__(self):
return self.num_samples
def single_data_generator(data_iter, n):
i = 0
while i < n:
yield next(data_iter)
i += 1
class CycleSampler(Sampler):
def __init__(self, size, num_samples=None, num_epochs=0):
self.num_samples = num_samples or size*num_epochs
self.data_iter = RandomCycleIter(range(size))
def __iter__(self):
return single_data_generator(self.data_iter, self.num_samples)
def __len__(self):
return self.num_samples
import numpy as np
class RandomSampler(object):
def __init__(self, data_source, state=None, seed=None):
self.data_source = data_source
self.rng = np.random.RandomSatate(seed)
def __iter__(self):
return iter(torch.randperm(len(self.data_source)).long())
def __len__(self):
return len(self.data_source)
def get_state(self):
return self.rng.get_state()
def set_state(self, state):
self.rng.set_state(state)
|
1682745
|
import argparse
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.nn.functional as F
import yaml
_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'utils')
sys.path.append(_path)
os.environ["CUDA_VISIBLE_DEVICES"] = "6"
ncentroids = 10
from tqdm import tqdm
from data import create_dataset
from utils.utils import get_logger
from models.adaptation_model import CustomModel
from metrics import runningScore, averageMeter
from loss import get_loss_function
from tensorboardX import SummaryWriter
def train(cfg, writer, logger):
torch.manual_seed(cfg.get('seed', 1337))
torch.cuda.manual_seed(cfg.get('seed', 1337))
np.random.seed(cfg.get('seed', 1337))
random.seed(cfg.get('seed', 1337))
## create dataset
default_gpu = cfg['model']['default_gpu']
device = torch.device("cuda:{}".format(default_gpu) if torch.cuda.is_available() else 'cpu')
datasets = create_dataset(cfg, writer, logger) # source_train\ target_train\ source_valid\ target_valid + _loader
model = CustomModel(cfg, writer, logger)
# Setup Metrics
running_metrics_val = runningScore(cfg['data']['target']['n_class'])
source_running_metrics_val = runningScore(cfg['data']['target']['n_class'])
val_loss_meter = averageMeter()
source_val_loss_meter = averageMeter()
time_meter = averageMeter()
loss_fn = get_loss_function(cfg)
flag_train = True
epoches = cfg['training']['epoches']
source_train_loader = datasets.source_train_loader
target_train_loader = datasets.target_train_loader
active_train_loader = datasets.active_train_loader
logger.info('source train batchsize is {}'.format(source_train_loader.args.get('batch_size')))
print('source train batchsize is {}'.format(source_train_loader.args.get('batch_size')))
logger.info('target train batchsize is {}'.format(target_train_loader.batch_size))
print('target train batchsize is {}'.format(target_train_loader.batch_size))
logger.info('active train batchsize is {}'.format(active_train_loader.args.get('batch_size')))
print('active train batchsize is {}'.format(active_train_loader.args.get('batch_size')))
val_loader = None
if cfg.get('valset') == 'gta5':
val_loader = datasets.source_valid_loader
logger.info('valset is gta5')
print('valset is gta5')
else:
val_loader = datasets.target_valid_loader
logger.info('valset is cityscapes')
print('valset is cityscapes')
logger.info('val batchsize is {}'.format(val_loader.batch_size))
print('val batchsize is {}'.format(val_loader.batch_size))
# load CAU
CAU_full = torch.load('anchors/cluster_centroids_full_target_{}.pkl'.format(ncentroids))
CAU_full = CAU_full.reshape(ncentroids, 19, 256)
model.centroids = CAU_full
# begin training
model.iter = 0
for epoch in range(epoches):
if not flag_train:
break
if model.iter > cfg['training']['train_iters']:
break
for (target_image, target_label, target_img_name) in datasets.target_train_loader:
model.iter += 1
i = model.iter
if i > cfg['training']['train_iters']:
break
source_batchsize = cfg['data']['source']['batch_size']
images, labels, source_img_name = datasets.source_train_loader.next()
active_images, active_labels, _ = datasets.active_train_loader.next()
start_ts = time.time()
images = images.to(device)
labels = labels.to(device)
target_image = target_image.to(device)
target_label = target_label.to(device)
active_images = active_images.to(device)
active_labels = active_labels.to(device)
model.scheduler_step()
model.train(logger=logger)
if cfg['training'].get('freeze_bn') == True:
model.freeze_bn_apply()
model.optimizer_zerograd()
loss, loss_cls_L2, loss_pseudo, loss_active = model.step_active_stage2(images, labels, target_image,
target_label,
active_images, active_labels)
if loss_cls_L2 > 10:
logger.info('loss_cls_l2 abnormal!!')
time_meter.update(time.time() - start_ts)
if (i + 1) % cfg['training']['print_interval'] == 0:
unchanged_cls_num = 0
fmt_str = "Epoches [{:d}/{:d}] Iter [{:d}/{:d}] Loss: {:.4f} Loss_cls_L2: {:.4f} Loss_pseudo: {" \
":.4f} Loss active: {:.4f} Time/Image: {:.4f} "
print_str = fmt_str.format(
epoch + 1,
epoches,
i + 1,
cfg['training']['train_iters'],
loss.item(),
loss_cls_L2,
loss_pseudo,
loss_active,
time_meter.avg / cfg['data']['source']['batch_size'])
print(print_str)
logger.info(print_str)
logger.info('unchanged number of objective class vector: {}'.format(unchanged_cls_num))
writer.add_scalar('loss/train_loss', loss.item(), i + 1)
writer.add_scalar('loss/train_cls_L2Loss', loss_cls_L2, i + 1)
writer.add_scalar('loss/train_pseudoLoss', loss_pseudo, i + 1)
writer.add_scalar('loss/train_activeLoss', loss_active, i + 1)
time_meter.reset()
# evaluation
if (i + 1) % cfg['training']['val_interval'] == 0 or \
(i + 1) == cfg['training']['train_iters']:
validation(
model, logger, writer, datasets, device, running_metrics_val, val_loss_meter, loss_fn,
source_val_loss_meter, source_running_metrics_val, iters=model.iter
)
torch.cuda.empty_cache()
logger.info('Best iou until now is {}'.format(model.best_iou))
if (i + 1) == cfg['training']['train_iters']:
flag = False
break
def validation(model, logger, writer, datasets, device, running_metrics_val, val_loss_meter, loss_fn,
source_val_loss_meter, source_running_metrics_val, iters):
iters = iters
_k = -1
for v in model.optimizers:
_k += 1
for param_group in v.param_groups:
_learning_rate = param_group.get('lr')
logger.info("learning rate is {} for {} net".format(_learning_rate, model.nets[_k].__class__.__name__))
model.eval(logger=logger)
torch.cuda.empty_cache()
with torch.no_grad():
validate(
datasets.target_valid_loader, device, model, running_metrics_val,
val_loss_meter, loss_fn
)
writer.add_scalar('loss/val_loss', val_loss_meter.avg, iters + 1)
logger.info("Iter %d Loss: %.4f" % (iters + 1, val_loss_meter.avg))
writer.add_scalar('loss/source_val_loss', source_val_loss_meter.avg, iters + 1)
logger.info("Iter %d Source Loss: %.4f" % (iters + 1, source_val_loss_meter.avg))
score, class_iou = running_metrics_val.get_scores()
for k, v in score.items():
print(k, v)
logger.info('{}: {}'.format(k, v))
writer.add_scalar('val_metrics/{}'.format(k), v, iters + 1)
for k, v in class_iou.items():
logger.info('{}: {}'.format(k, v))
writer.add_scalar('val_metrics/cls_{}'.format(k), v, iters + 1)
val_loss_meter.reset()
running_metrics_val.reset()
source_val_loss_meter.reset()
source_running_metrics_val.reset()
torch.cuda.empty_cache()
state = {}
_k = -1
for net in model.nets:
_k += 1
new_state = {
"model_state": net.state_dict(),
"optimizer_state": model.optimizers[_k].state_dict(),
"scheduler_state": model.schedulers[_k].state_dict(),
}
state[net.__class__.__name__] = new_state
state['iter'] = iters + 1
state['best_iou'] = score["Mean IoU : \t"]
save_path = os.path.join(writer.file_writer.get_logdir(),
"from_{}_to_{}_on_{}_current_model.pkl".format(
cfg['data']['source']['name'],
cfg['data']['target']['name'],
cfg['model']['arch'], ))
torch.save(state, save_path)
if score["Mean IoU : \t"] >= model.best_iou:
torch.cuda.empty_cache()
model.best_iou = score["Mean IoU : \t"]
state = {}
_k = -1
for net in model.nets:
_k += 1
new_state = {
"model_state": net.state_dict(),
"optimizer_state": model.optimizers[_k].state_dict(),
"scheduler_state": model.schedulers[_k].state_dict(),
}
state[net.__class__.__name__] = new_state
state['iter'] = iters + 1
state['best_iou'] = model.best_iou
save_path = os.path.join(writer.file_writer.get_logdir(),
"from_{}_to_{}_on_{}_best_model.pkl".format(
cfg['data']['source']['name'],
cfg['data']['target']['name'],
cfg['model']['arch'], ))
torch.save(state, save_path)
return score["Mean IoU : \t"]
def validate(valid_loader, device, model, running_metrics_val, val_loss_meter, loss_fn):
for (images_val, labels_val, filename) in tqdm(valid_loader):
images_val = images_val.to(device)
labels_val = labels_val.to(device)
_, _, feat_cls, outs = model.forward(images_val)
outputs = F.interpolate(outs, size=images_val.size()[2:], mode='bilinear', align_corners=True)
val_loss = loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels_val.data.cpu().numpy()
running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="config")
parser.add_argument(
"--config",
nargs="?",
type=str,
default='configs/active_from_gta_to_city_stage2.yml',
help="Configuration file to use"
)
args = parser.parse_args()
with open(args.config) as fp:
cfg = yaml.load(fp)
run_id = random.randint(1, 100000)
logdir = os.path.join('runs', os.path.basename(args.config)[:-4], str(run_id))
writer = SummaryWriter(log_dir=logdir)
print('RUNDIR: {}'.format(logdir))
shutil.copy(args.config, logdir)
logger = get_logger(logdir)
logger.info('Let the games begin')
train(cfg, writer, logger)
|
1682754
|
class DiachronicVisualizer(object):
@staticmethod
def visualize(display_df):
raise NotImplementedError()
|
1682757
|
from copy import deepcopy
from functools import lru_cache
import json
from typing import Any, Dict
# from electionguard.serializable import read_json_file
__all__ = ["get_ballot", "get_election_description"]
_DATA_DIRECTORY = "tests/integration/data"
def get_ballot(ballot_id: str) -> Any:
ballot = deepcopy(_get_ballot_template())
ballot["object_id"] = ballot_id
return ballot
def get_election_description() -> Any:
return deepcopy(_get_election_description_template())
def _get_ballot_template() -> Dict:
return _read_json_file(f"{_DATA_DIRECTORY}/ballot.json")
@lru_cache
def _get_election_description_template() -> Dict:
return _read_json_file(f"{_DATA_DIRECTORY}/election_description.json")
@lru_cache
def _read_json_file(path: str) -> Dict:
with open(path, "r") as file:
return json.load(file)
|
1682806
|
import redis
import utils.config_handler
def create_redis_client():
REDIS_CONFIG = utils.config_handler.load_json_config()['redis_server']
redis_client = redis.Redis(host=REDIS_CONFIG['host'], port=int(REDIS_CONFIG['port']))
return redis_client
|
1682810
|
from time import sleep
import tellopy
from tellopy._internal.utils import *
prev_flight_data = None
def handler(event, sender, data, **args):
global prev_flight_data
drone = sender
if event is drone.EVENT_CONNECTED:
print('connected')
drone.start_video()
drone.set_exposure(0)
drone.set_video_encoder_rate(4)
elif event is drone.EVENT_FLIGHT_DATA:
if prev_flight_data != str(data):
print(data)
prev_flight_data = str(data)
elif event is drone.EVENT_TIME:
print('event="%s" data=%d' % (event.getname(), data[0] + data[1] << 8))
elif event is drone.EVENT_VIDEO_FRAME:
pass
else:
print('event="%s" data=%s' % (event.getname(), str(data)))
def test():
drone = tellopy.Tello()
try:
# drone.set_loglevel(d.LOG_ALL)
drone.subscribe(drone.EVENT_CONNECTED, handler)
# drone.subscribe(drone.EVENT_WIFI, handler)
# drone.subscribe(drone.EVENT_LIGHT, handler)
drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)
# drone.subscribe(drone.EVENT_LOG, handler)
drone.subscribe(drone.EVENT_TIME, handler)
drone.subscribe(drone.EVENT_VIDEO_FRAME, handler)
drone.connect()
# drone.takeoff()
# time.sleep(5)
drone.down(50)
sleep(3)
drone.up(50)
sleep(3)
drone.down(0)
sleep(2)
drone.land()
sleep(5)
except Exception as ex:
print(ex)
show_exception(ex)
finally:
drone.quit()
print('end.')
if __name__ == '__main__':
test()
|
1682836
|
import numpy as np
import xarray as xr
from wavespectra.core.attributes import attrs, set_spec_attributes
def spread(dp_matrix, dspr_matrix, dirs):
"""Generic spreading function.
Args:
dp_matrix:
dspr_matrix:
dirs:
Returns:
G1:
Note:
Function defined such that \\int{G1 d\\theta}=1*
"""
adirs = np.array(dirs).reshape((1, -1))
pidirs = np.deg2rad(270.0 - np.array(adirs))
st1 = np.sin(0.5 * np.deg2rad(270.0 - dp_matrix))
ct1 = np.cos(0.5 * np.deg2rad(270.0 - dp_matrix))
a = np.maximum(np.cos(0.5 * pidirs) * ct1 + np.sin(0.5 * pidirs) * st1, 0.0)
G1 = a ** (2.0 * dspr_matrix)
G1 /= np.expand_dims(G1.sum(axis=-1) * abs(dirs[1] - dirs[0]), axis=-1)
return G1
def arrange_inputs(*args):
"""Check all inputs are same shape and add frequency and direction dims."""
argout = []
shape0 = np.array(args[0]).shape
for arg in args:
argm = np.array(arg)
if argm.shape == () and shape0 != (): # Broadcast scalar across matrix
argm = arg * np.ones(shape0)
elif argm.shape != shape0:
raise Exception("Input shapes must be the same")
argout.append(argm[..., np.newaxis, np.newaxis])
return argout
def make_dataset(spec, freqs, dirs, coordinates=[]):
"""Package spectral matrix to xarray.
Args:
spec:
freqs:
dirs:
coordinates:
Returns:
dset: SpecDset object
"""
coords = tuple(coordinates) + ((attrs.FREQNAME, freqs), (attrs.DIRNAME, dirs))
dimensions = tuple([c[0] for c in coords])
dset = xr.DataArray(
data=spec, coords=coords, dims=dimensions, name=attrs.SPECNAME
).to_dataset()
set_spec_attributes(dset)
return dset
def check_coordinates(param, coordinates):
"""Check coordinates are consistent with parameter.
Args:
param:
coordinates:
"""
pshape = np.array(param).shape
if len(pshape) != len(coordinates):
raise Exception("Incorrect number of coordinates for parameter")
for idim, dim in enumerate(pshape):
if dim != len(coordinates[idim][1]):
raise Exception(
"Dimension of coordinate %s at position %d does not match parameter"
% (coordinates[idim][0], dim)
)
|
1682974
|
import pytest
from nylas.client.restful_models import Folder, Thread, Message
@pytest.mark.usefixtures("mock_folder")
def test_get_change_folder(api_client):
folder = api_client.folders.get("anuep8pe5ug3xrupchwzba2o8")
assert folder is not None
assert isinstance(folder, Folder)
assert folder.display_name == "My Folder"
folder.display_name = "My New Folder"
folder.save()
assert folder.display_name == "My New Folder"
@pytest.mark.usefixtures("mock_folder", "mock_threads")
def test_folder_threads(api_client):
folder = api_client.folders.get("anuep8pe5ug3xrupchwzba2o8")
assert folder.threads
assert all(isinstance(thread, Thread) for thread in folder.threads)
@pytest.mark.usefixtures("mock_folder", "mock_messages")
def test_folder_messages(api_client):
folder = api_client.folders.get("anuep8pe5ug3xrupchwzba2o8")
assert folder.messages
assert all(isinstance(message, Message) for message in folder.messages)
|
1682989
|
import logging
import pytest
import sys
import os
import subprocess
import uuid
from contextlib import contextmanager
logger = logging.getLogger(__name__)
@contextmanager
def conda_env(env_name):
# Set env name for shell script
os.environ["JOB_COMPATIBILITY_TEST_TEMP_ENV"] = env_name
# Delete conda env if it already exists
try:
yield
finally:
# Clean up created conda env upon test exit to prevent leaking
del os.environ["JOB_COMPATIBILITY_TEST_TEMP_ENV"]
subprocess.run(
f"conda env remove -y --name {env_name}", shell=True, stdout=subprocess.PIPE
)
def _compatibility_script_path(file_name: str) -> str:
return os.path.join(
os.path.dirname(__file__), "backwards_compatibility_scripts", file_name
)
class TestBackwardsCompatibility:
# TODO (architkulkarni): Reenable test after #22368 is merged, and make the
# it backwards compatibility script install the commit from #22368.
@pytest.mark.skip("#22368 breaks backwards compatibility of the package REST API.")
def test_cli(self):
"""
1) Create a new conda environment with ray version X installed
inherits same env as current conda envionment except ray version
2) Start head node and dashboard with ray version X
3) Use current commit's CLI code to do sample job submission flow
4) Deactivate the new conda environment and back to original place
"""
# Shell script creates and cleans up tmp conda environment regardless
# of the outcome
env_name = f"jobs-backwards-compatibility-{uuid.uuid4().hex}"
with conda_env(env_name):
shell_cmd = f"{_compatibility_script_path('test_backwards_compatibility.sh')}" # noqa: E501
try:
subprocess.check_output(shell_cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logger.error(str(e))
logger.error(e.stdout.decode())
raise e
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
1683011
|
import numpy as np
import mxnet as mx
from mxnet import nd
from mxnet.gluon.data import dataset
from mxnet.gluon.data import dataloader
import collections
import os
class ImageWithMaskDataset(dataset.Dataset):
"""
A dataset for loading images (with masks) stored as `xyz.jpg` and `xyz_mask.png`.
Parameters
----------
root : str
Path to root directory.
num_classes : int
The number of classes in your data set.
transform : callable, default None
A function that takes data and label and transforms them:
::
transform = lambda data, label: (data.astype(np.float32)/255, label)
"""
def __init__(self, root, num_classes, transform=None):
self._root = os.path.expanduser(root)
self._transform = transform
self._exts = ['.jpg', '.jpeg', '.png']
self._list_images(self._root)
self.num_classes = num_classes
def _list_images(self, root):
images = collections.defaultdict(dict)
for filename in sorted(os.listdir(root)):
name, ext = os.path.splitext(filename)
mask_flag = name.endswith("_mask")
if ext.lower() not in self._exts:
continue
if not mask_flag:
images[name]["base"] = filename
else:
name = name[:-5] # to remove '_mask'
images[name]["mask"] = filename
self._image_list = list(images.values())
def one_hot(self, Y):
one_hot_mask = nd.zeros(
(Y.shape[0],) + (self.num_classes,) + Y.shape[1:])
for c in range(self.num_classes):
one_hot_mask[:, c, :, :] = (Y == c)
return one_hot_mask
def preprocess(self, data, label):
gray_data = nd.sum_axis(nd.array([[[[0.3]], [[0.59]], [[0.11]]]]) * data, 1, keepdims=True)
gray_label = nd.sum_axis(nd.array([[[[1]], [[1]], [[1]]]]) * label, 1)
one_hot_label = self.one_hot(gray_label)
return gray_data, one_hot_label
def __getitem__(self, idx):
assert 'base' in self._image_list[idx], "Couldn't find base image for: " + \
self._image_list[idx]["mask"]
base_filepath = os.path.join(self._root, self._image_list[idx]["base"])
base = nd.transpose(mx.image.imread(base_filepath, 0), (2, 0, 1)).astype(np.float32)
assert 'mask' in self._image_list[idx], "Couldn't find mask image for: " + \
self._image_list[idx]["base"]
mask_filepath = os.path.join(self._root, self._image_list[idx]["mask"])
mask = nd.transpose(mx.image.imread(mask_filepath, 0), (2, 0, 1)).astype(np.float32)
mask = mask.astype(np.float32)
one_hot_mask = nd.zeros((self.num_classes,) + mask.shape[1:], dtype=np.float32)
for c in range(self.num_classes):
one_hot_mask[c, :, :] = (mask == c)[0]
if self._transform is not None:
return self._transform(base, one_hot_mask)
else:
return base, one_hot_mask
def __len__(self):
return len(self._image_list)
def DataLoaderGenerator(data_loader):
"""
A generator wrapper for loading images (with masks) from a 'ImageWithMaskDataset' dataset.
Parameters
----------
data_loader : 'Dataset' instance
Instance of Gluon 'Dataset' object from which image / mask pairs are yielded.
"""
for data, label in data_loader:
data_desc = mx.io.DataDesc(name='data', shape=data.shape, dtype=np.float32)
label_desc = mx.io.DataDesc(name='label', shape=label.shape, dtype=np.float32)
batch = mx.io.DataBatch(
data=[data],
label=[label],
provide_data=[data_desc],
provide_label=[label_desc])
yield batch
class DataLoaderIter(mx.io.DataIter):
"""
An iterator wrapper for loading images (with masks) from an 'ImageWithMaskDataset' dataset.
Allows for MXNet Module API to train using Gluon data loaders.
Parameters
----------
root : str
Root directory containg image / mask pairs stored as `xyz.jpg` and `xyz_mask.png`.
num_classes : int
Number of classes in data set.
batch_size : int
Size of batch.
shuffle : Bool
Whether or not to shuffle data.
num_workers : int
Number of sub-processes to spawn for loading data. Default 0 means none.
"""
def __init__(self, root, num_classes, batch_size, shuffle=False, num_workers=0):
self.batch_size = batch_size
self.dataset = ImageWithMaskDataset(root=root, num_classes=num_classes)
if mx.__version__ == "0.11.0":
self.dataloader = mx.gluon.data.DataLoader(
self.dataset, batch_size=batch_size, shuffle=shuffle, last_batch='rollover')
else:
self.dataloader = mx.gluon.data.DataLoader(
self.dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
last_batch='rollover')
self.dataloader_generator = DataLoaderGenerator(self.dataloader)
def __iter__(self):
return self
def reset(self):
self.dataloader_generator = DataLoaderGenerator(self.dataloader)
def __next__(self):
return self.next()
@property
def provide_data(self):
return [
mx.io.DataDesc(name='data', shape=(self.batch_size,) + self.dataset[0][0].shape, dtype=np.float32)
]
@property
def provide_label(self):
return [
mx.io.DataDesc(name='label', shape=(self.batch_size,) + self.dataset[0][1].shape, dtype=np.float32)
]
def next(self):
return next(self.dataloader_generator)
|
1683012
|
from distutils.core import setup, Extension
CFLAGS=[]
cPolymagic = Extension("cPolymagic", sources = ["gpc.c", "polymagic.m"], extra_compile_args=CFLAGS)
setup (name = "polymagic",
version = "0.1",
author = "<NAME>",
description = "Additional utility functions for NSBezierPath using GPC.",
ext_modules = [cPolymagic])
|
1683015
|
def tail_swap(arr):
fmt = '{}:{}'.format
(head, tail), (head_2, tail_2) = (a.split(':') for a in arr)
return [fmt(head, tail_2), fmt(head_2, tail)]
|
1683026
|
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.typedef import (
TypeDef, generic_new_descr, GetSetProperty)
from pypy.interpreter.gateway import interp2app, unwrap_spec
from rpython.rlib.rStringIO import RStringIO
from rpython.rlib.rarithmetic import r_longlong
from rpython.rlib.objectmodel import import_from_mixin
from pypy.module._io.interp_bufferedio import W_BufferedIOBase
from pypy.module._io.interp_iobase import convert_size
import sys
class W_BytesIO(W_BufferedIOBase):
import_from_mixin(RStringIO)
def __init__(self, space):
W_BufferedIOBase.__init__(self, space, add_to_autoflusher=False)
self.init()
def descr_init(self, space, w_initial_bytes=None):
self.init()
if not space.is_none(w_initial_bytes):
self.write_w(space, w_initial_bytes)
self.seek(0)
def _check_closed(self, space, message=None):
if self.is_closed():
if message is None:
message = "I/O operation on closed file"
raise OperationError(space.w_ValueError, space.newtext(message))
def read_w(self, space, w_size=None):
self._check_closed(space)
size = convert_size(space, w_size)
return space.newbytes(self.read(size))
def read1_w(self, space, w_size):
return self.read_w(space, w_size)
def readline_w(self, space, w_limit=None):
self._check_closed(space)
limit = convert_size(space, w_limit)
return space.newbytes(self.readline(limit))
def readinto_w(self, space, w_buffer):
self._check_closed(space)
rwbuffer = space.writebuf_w(w_buffer)
size = rwbuffer.getlength()
output = self.read(size)
self.output_slice(space, rwbuffer, 0, output)
return space.newint(len(output))
def write_w(self, space, w_data):
self._check_closed(space)
buf = space.buffer_w(w_data, space.BUF_CONTIG_RO).as_str()
length = len(buf)
if length <= 0:
return space.newint(0)
self.write(buf)
return space.newint(length)
def truncate_w(self, space, w_size=None):
self._check_closed(space)
pos = self.tell()
if space.is_none(w_size):
size = pos
else:
size = space.r_longlong_w(w_size)
if size < 0:
raise oefmt(space.w_ValueError, "negative size value")
self.truncate(size)
if size == pos:
self.seek(0, 2)
else:
self.seek(pos)
return space.newint(size)
def getvalue_w(self, space):
self._check_closed(space)
return space.newbytes(self.getvalue())
def tell_w(self, space):
self._check_closed(space)
return space.newint(self.tell())
@unwrap_spec(pos=r_longlong, whence=int)
def seek_w(self, space, pos, whence=0):
self._check_closed(space)
if whence == 0:
if pos < 0:
raise oefmt(space.w_ValueError, "negative seek value")
elif whence == 1:
if pos > sys.maxint - self.tell():
raise oefmt(space.w_OverflowError, "new position too large")
elif whence == 2:
if pos > sys.maxint - self.getsize():
raise oefmt(space.w_OverflowError, "new position too large")
else:
raise oefmt(space.w_ValueError,
"whence must be between 0 and 2, not %d", whence)
self.seek(pos, whence)
return space.newint(self.tell())
def readable_w(self, space):
self._check_closed(space)
return space.w_True
def writable_w(self, space):
self._check_closed(space)
return space.w_True
def seekable_w(self, space):
self._check_closed(space)
return space.w_True
def close_w(self, space):
self.close()
def closed_get_w(self, space):
return space.newbool(self.is_closed())
def getstate_w(self, space):
self._check_closed(space)
return space.newtuple([
space.newbytes(self.getvalue()),
space.newint(self.tell()),
self.getdict(space)])
def setstate_w(self, space, w_state):
self._check_closed(space)
if space.len_w(w_state) != 3:
raise oefmt(space.w_TypeError,
"%T.__setstate__ argument should be 3-tuple, got %T",
self, w_state)
w_content, w_pos, w_dict = space.unpackiterable(w_state, 3)
self.truncate(0)
self.write_w(space, w_content)
pos = space.int_w(w_pos)
if pos < 0:
raise oefmt(space.w_ValueError,
"position value cannot be negative")
self.seek(pos)
if not space.is_w(w_dict, space.w_None):
space.call_method(self.getdict(space), "update", w_dict)
W_BytesIO.typedef = TypeDef(
'_io.BytesIO', W_BufferedIOBase.typedef,
__new__ = generic_new_descr(W_BytesIO),
__init__ = interp2app(W_BytesIO.descr_init),
read = interp2app(W_BytesIO.read_w),
read1 = interp2app(W_BytesIO.read1_w),
readline = interp2app(W_BytesIO.readline_w),
readinto = interp2app(W_BytesIO.readinto_w),
write = interp2app(W_BytesIO.write_w),
truncate = interp2app(W_BytesIO.truncate_w),
getvalue = interp2app(W_BytesIO.getvalue_w),
seek = interp2app(W_BytesIO.seek_w),
tell = interp2app(W_BytesIO.tell_w),
readable = interp2app(W_BytesIO.readable_w),
writable = interp2app(W_BytesIO.writable_w),
seekable = interp2app(W_BytesIO.seekable_w),
close = interp2app(W_BytesIO.close_w),
closed = GetSetProperty(W_BytesIO.closed_get_w),
__getstate__ = interp2app(W_BytesIO.getstate_w),
__setstate__ = interp2app(W_BytesIO.setstate_w),
)
|
1683031
|
import infra.basetest
class TestSysLinuxBase(infra.basetest.BRTest):
x86_toolchain_config = \
"""
BR2_x86_i686=y
BR2_TOOLCHAIN_EXTERNAL=y
BR2_TOOLCHAIN_EXTERNAL_CUSTOM=y
BR2_TOOLCHAIN_EXTERNAL_DOWNLOAD=y
BR2_TOOLCHAIN_EXTERNAL_URL="http://toolchains.bootlin.com/downloads/releases/toolchains/x86-i686/tarballs/x86-i686--glibc--bleeding-edge-2018.11-1.tar.bz2"
BR2_TOOLCHAIN_EXTERNAL_GCC_8=y
BR2_TOOLCHAIN_EXTERNAL_HEADERS_4_14=y
BR2_TOOLCHAIN_EXTERNAL_CUSTOM_GLIBC=y
BR2_TOOLCHAIN_EXTERNAL_CXX=y
"""
x86_64_toolchain_config = \
"""
BR2_x86_64=y
BR2_x86_corei7=y
BR2_TOOLCHAIN_EXTERNAL=y
BR2_TOOLCHAIN_EXTERNAL_CUSTOM=y
BR2_TOOLCHAIN_EXTERNAL_DOWNLOAD=y
BR2_TOOLCHAIN_EXTERNAL_URL="http://toolchains.bootlin.com/downloads/releases/toolchains/x86-64-core-i7/tarballs/x86-64-core-i7--glibc--stable-2018.11-1.tar.bz2"
BR2_TOOLCHAIN_EXTERNAL_GCC_7=y
BR2_TOOLCHAIN_EXTERNAL_HEADERS_4_1=y
BR2_TOOLCHAIN_EXTERNAL_CXX=y
BR2_TOOLCHAIN_EXTERNAL_HAS_SSP=y
BR2_TOOLCHAIN_EXTERNAL_CUSTOM_GLIBC=y
"""
syslinux_legacy_config = \
"""
BR2_TARGET_SYSLINUX=y
BR2_TARGET_SYSLINUX_ISOLINUX=y
BR2_TARGET_SYSLINUX_PXELINUX=y
BR2_TARGET_SYSLINUX_MBR=y
"""
syslinux_efi_config = \
"""
BR2_TARGET_SYSLINUX=y
BR2_TARGET_SYSLINUX_EFI=y
"""
class TestSysLinuxX86LegacyBios(TestSysLinuxBase):
config = \
TestSysLinuxBase.x86_toolchain_config + \
infra.basetest.MINIMAL_CONFIG + \
TestSysLinuxBase.syslinux_legacy_config
def test_run(self):
pass
class TestSysLinuxX86EFI(TestSysLinuxBase):
config = \
TestSysLinuxBase.x86_toolchain_config + \
infra.basetest.MINIMAL_CONFIG + \
TestSysLinuxBase.syslinux_efi_config
def test_run(self):
pass
class TestSysLinuxX86_64LegacyBios(TestSysLinuxBase):
config = \
TestSysLinuxBase.x86_64_toolchain_config + \
infra.basetest.MINIMAL_CONFIG + \
TestSysLinuxBase.syslinux_legacy_config
def test_run(self):
pass
class TestSysLinuxX86_64EFI(TestSysLinuxBase):
config = \
TestSysLinuxBase.x86_64_toolchain_config + \
infra.basetest.MINIMAL_CONFIG + \
TestSysLinuxBase.syslinux_efi_config
def test_run(self):
pass
|
1683037
|
import tensorflow as tf
import tensorflow.contrib as tf_contrib
from tensorflow.python.util import nest
from las.ops import lstm_cell
from las.ops import pyramidal_bilstm
from utils import TrainingSigmoidHelper, ScheduledSigmoidHelper, DenseBinfDecoder,\
TPUScheduledEmbeddingTrainingHelper, decoders_factory
__all__ = [
'listener',
'speller',
]
"""Reference: https://github.com/tensorflow/nmt/blob/master/nmt/gnmt_model.py"""
class AttentionMultiCell(tf.nn.rnn_cell.MultiRNNCell):
"""A MultiCell with attention style."""
def __init__(self, attention_cell, cells, use_new_attention=False):
"""Creates a AttentionMultiCell.
Args:
attention_cell: An instance of AttentionWrapper.
cells: A list of RNNCell wrapped with AttentionInputWrapper.
use_new_attention: Whether to use the attention generated from current
step bottom layer's output. Default is False.
"""
cells = [attention_cell] + cells
self.use_new_attention = use_new_attention
super(AttentionMultiCell, self).__init__(
cells, state_is_tuple=True)
def __call__(self, inputs, state, scope=None):
"""Run the cell with bottom layer's attention copied to all upper layers."""
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
with tf.variable_scope(scope or "multi_rnn_cell"):
new_states = []
with tf.variable_scope("cell_0_attention"):
attention_cell = self._cells[0]
attention_state = state[0]
cur_inp, new_attention_state = attention_cell(
inputs, attention_state)
new_states.append(new_attention_state)
for i in range(1, len(self._cells)):
with tf.variable_scope("cell_%d" % i):
cell = self._cells[i]
cur_state = state[i]
if self.use_new_attention:
cur_inp = tf.concat(
[cur_inp, new_attention_state.attention], -1)
else:
cur_inp = tf.concat(
[cur_inp, attention_state.attention], -1)
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
return cur_inp, tuple(new_states)
class CustomAttention(tf_contrib.seq2seq.LuongAttention):
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="CustomAttention"):
super(CustomAttention, self).__init__(
num_units=num_units,
memory=memory,
memory_sequence_length=memory_sequence_length,
scale=scale,
probability_fn=probability_fn,
score_mask_value=score_mask_value,
dtype=dtype,
name=name)
self._query_layer = tf.layers.Dense(
num_units, name='query_layer', use_bias=False, dtype=dtype)
self._keys = tf.nn.relu(self.keys)
def __call__(self, query, state):
processed_query = tf.nn.relu(self.query_layer(query))
return super(CustomAttention, self).__call__(processed_query, state)
def listener(encoder_inputs,
source_sequence_length,
mode,
hparams):
if hparams.use_pyramidal:
return pyramidal_bilstm(encoder_inputs, source_sequence_length, mode, hparams)
else:
forward_cell_list, backward_cell_list = [], []
for layer in range(hparams.num_layers):
with tf.variable_scope('fw_cell_{}'.format(layer)):
cell = lstm_cell(hparams.num_units, hparams.dropout, mode)
forward_cell_list.append(cell)
if not hparams.unidirectional:
with tf.variable_scope('bw_cell_{}'.format(layer)):
cell = lstm_cell(hparams.num_units, hparams.dropout, mode)
backward_cell_list.append(cell)
forward_cell = tf.nn.rnn_cell.MultiRNNCell(forward_cell_list)
if not hparams.unidirectional:
backward_cell = tf.nn.rnn_cell.MultiRNNCell(backward_cell_list)
encoder_outputs, encoder_state = tf.nn.bidirectional_dynamic_rnn(
forward_cell,
backward_cell,
encoder_inputs,
sequence_length=source_sequence_length,
dtype=tf.float32)
encoder_outputs = tf.concat(encoder_outputs, -1)
else:
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
forward_cell,
encoder_inputs,
sequence_length=source_sequence_length,
dtype=tf.float32)
return (encoder_outputs, source_sequence_length), encoder_state
def attend(encoder_outputs,
source_sequence_length,
mode,
hparams):
memory = encoder_outputs
att_kwargs = {}
if hparams.attention_type == 'luong':
attention_fn = tf_contrib.seq2seq.LuongAttention
elif hparams.attention_type == 'bahdanau':
attention_fn = tf_contrib.seq2seq.BahdanauAttention
elif hparams.attention_type == 'luong_monotonic':
attention_fn = tf_contrib.seq2seq.LuongMonotonicAttention
elif hparams.attention_type == 'bahdanau_monotonic':
attention_fn = tf_contrib.seq2seq.BahdanauMonotonicAttention
if mode == tf.estimator.ModeKeys.TRAIN:
att_kwargs['sigmoid_noise'] = 1.0
else:
att_kwargs['mode'] = 'hard'
elif hparams.attention_type == 'custom':
attention_fn = CustomAttention
attention_mechanism = attention_fn(
hparams.num_units, memory, source_sequence_length, **att_kwargs)
cell_list = []
for layer in range(hparams.num_layers):
with tf.variable_scope('decoder_cell_'.format(layer)):
cell = lstm_cell(hparams.num_units, hparams.dropout, mode)
cell_list.append(cell)
alignment_history = (mode != tf.estimator.ModeKeys.TRAIN)
if hparams.binf_projection and not hparams.binf_sampling:
attention_layer_size = hparams.binf_count * 2
else:
attention_layer_size = hparams.attention_layer_size
if hparams.bottom_only:
attention_cell = cell_list.pop(0)
attention_cell = tf_contrib.seq2seq.AttentionWrapper(
attention_cell, attention_mechanism,
attention_layer_size=attention_layer_size,
alignment_history=alignment_history)
decoder_cell = AttentionMultiCell(attention_cell, cell_list)
else:
decoder_cell = tf.nn.rnn_cell.MultiRNNCell(cell_list)
decoder_cell = tf_contrib.seq2seq.AttentionWrapper(
decoder_cell, attention_mechanism,
attention_layer_size=attention_layer_size,
alignment_history=alignment_history)
return decoder_cell
def speller(encoder_outputs,
encoder_state,
decoder_inputs,
source_sequence_length,
target_sequence_length,
mode,
hparams,
binary_outputs=False,
binf_embedding=None,
transparent_projection=False):
batch_size = tf.shape(encoder_outputs)[0]
beam_width = hparams.beam_width
if mode == tf.estimator.ModeKeys.PREDICT and beam_width > 0:
encoder_outputs = tf_contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
source_sequence_length = tf_contrib.seq2seq.tile_batch(
source_sequence_length, multiplier=beam_width)
encoder_state = tf_contrib.seq2seq.tile_batch(
encoder_state, multiplier=beam_width)
batch_size = batch_size * beam_width
def embedding_fn(ids):
# pass callable object to avoid OOM when using one-hot encoding
if hparams.embedding_size != 0:
target_embedding = tf.get_variable(
'target_embedding', [
hparams.target_vocab_size,
hparams.embedding_size],
dtype=tf.float32, initializer=tf_contrib.layers.xavier_initializer())
return tf.nn.embedding_lookup(target_embedding, ids)
elif binary_outputs:
if binf_embedding is None or mode == tf.estimator.ModeKeys.TRAIN:
return tf.cast(ids, tf.float32)
else:
return tf.nn.embedding_lookup(tf.transpose(binf_embedding), ids)
elif hparams.binf_projection:
return tf.nn.embedding_lookup(tf.transpose(binf_embedding), ids)
else:
return tf.one_hot(ids, hparams.target_vocab_size)
decoder_cell = attend(
encoder_outputs, source_sequence_length, mode, hparams)
projection_layer = DenseBinfDecoder(
hparams.target_vocab_size if not binary_outputs else hparams.binf_count,
binf_to_ipa=binf_embedding if mode != tf.estimator.ModeKeys.TRAIN or hparams.binf_projection else None,
use_bias=True, name='projection_layer',
inner_projection_layer=not hparams.binf_projection,
concat_cell_outputs=hparams.binf_projection and mode == tf.estimator.ModeKeys.TRAIN,
kernel_initializer=tf.random_uniform_initializer(minval=-0.075, maxval=0.075))
def get_initial_state():
if hparams.pass_hidden_state and hparams.bottom_only:
return tuple(
zs.clone(cell_state=es)
if isinstance(zs, tf_contrib.seq2seq.AttentionWrapperState) else es
for zs, es in zip(
decoder_cell.zero_state(batch_size, tf.float32), encoder_state))
else:
return decoder_cell.zero_state(batch_size, tf.float32)
initial_state = get_initial_state()
maximum_iterations = hparams.max_symbols if hparams.max_symbols > 0 else None
if mode != tf.estimator.ModeKeys.TRAIN:
max_source_length = tf.reduce_max(source_sequence_length)
maximum_iterations = tf.to_int32(tf.round(tf.to_float(
max_source_length) * hparams.decoding_length_factor))
if mode == tf.estimator.ModeKeys.TRAIN:
decoder_inputs = embedding_fn(decoder_inputs)
if hparams.sampling_probability > 0.0:
if binary_outputs:
helper = ScheduledSigmoidHelper(decoder_inputs, target_sequence_length,
embedding_fn, hparams.sampling_probability, binf_to_ipa=binf_embedding)
else:
helper = TPUScheduledEmbeddingTrainingHelper(
decoder_inputs, target_sequence_length,
embedding_fn, hparams.sampling_probability,
outputs_count=binf_embedding.shape[-1] if hparams.binf_projection else None)
else:
if binary_outputs:
helper = TrainingSigmoidHelper(decoder_inputs, target_sequence_length,
binf_to_ipa=binf_embedding)
else:
helper = tf_contrib.seq2seq.TrainingHelper(
decoder_inputs, target_sequence_length)
decoder = tf_contrib.seq2seq.BasicDecoder(
decoder_cell, helper, initial_state, output_layer=projection_layer)
elif mode == tf.estimator.ModeKeys.PREDICT and beam_width > 0:
if decoder_inputs is not None:
start_tokens = decoder_inputs[:, 0]
decoder_inputs_batch = tf_contrib.seq2seq.tile_batch(
decoder_inputs, multiplier=beam_width)
initial_state = get_partial_targets_state(initial_state, decoder_cell, projection_layer,
embedding_fn(decoder_inputs_batch), batch_size, hparams)
with tf.variable_scope('init_partial_targets'):
zerostate = get_initial_state()
initial_state = initial_state._replace(alignment_history=zerostate.alignment_history, time=zerostate.time)
else:
start_tokens = tf.fill(
[tf.div(batch_size, beam_width)], hparams.sos_id)
decoder = tf_contrib.seq2seq.BeamSearchDecoder(
cell=decoder_cell,
embedding=embedding_fn,
start_tokens=start_tokens,
end_token=hparams.eos_id,
initial_state=initial_state,
beam_width=beam_width,
output_layer=projection_layer)
elif binary_outputs and binf_embedding is None:
start_inputs = tf.concat(
(tf.zeros([batch_size, hparams.binf_count - 2]),
tf.ones([batch_size, 1]),
tf.zeros([batch_size, 1])),
axis=1
)
helper = tf_contrib.seq2seq.InferenceHelper(
sample_fn=lambda x: tf.round(tf.sigmoid(x)),
# sample_fn=lambda x: tf.sigmoid(x), #TODO: experiment with non-binarlized outputs
sample_shape=[hparams.binf_count],
sample_dtype=tf.float32,
next_inputs_fn=embedding_fn,
start_inputs=start_inputs,
end_fn=lambda x: tf.greater(x[..., -1], 0.5))
decoder = tf_contrib.seq2seq.BasicDecoder(
decoder_cell, helper, initial_state, output_layer=projection_layer)
else:
start_tokens = tf.fill([batch_size], hparams.sos_id)
helper = tf_contrib.seq2seq.GreedyEmbeddingHelper(
embedding_fn, start_tokens, hparams.eos_id)
decoder = decoders_factory('basic_transparent' if transparent_projection else 'basic')(
decoder_cell, helper, initial_state, output_layer=projection_layer)
decoder_outputs, final_context_state, final_sequence_length = tf_contrib.seq2seq.dynamic_decode(
decoder, maximum_iterations=maximum_iterations)
return decoder_outputs, final_context_state, final_sequence_length
def get_partial_targets_state(initial_state, decoder_cell, projection_layer,
decoder_inputs, batch_size, hparams):
with tf.name_scope('partial_targets_state'):
seq_length = tf.shape(decoder_inputs)[1]
helper = tf_contrib.seq2seq.TrainingHelper(
decoder_inputs, tf.tile([seq_length], [batch_size]))
decoder = tf_contrib.seq2seq.BasicDecoder(
decoder_cell, helper, initial_state, output_layer=projection_layer)
_, final_context_state, _ = tf_contrib.seq2seq.dynamic_decode(
decoder, maximum_iterations=seq_length)
return final_context_state
|
1683078
|
from torch import nn
from ops.basic_ops import ConsensusModule
from ops.transforms import *
from torch.nn.init import normal_, constant_
import torch.nn.functional as F
from efficientnet_pytorch import EfficientNet
from ops.net_flops_table import feat_dim_dict
from torch.distributions import Categorical
def init_hidden(batch_size, cell_size):
init_cell = torch.Tensor(batch_size, cell_size).zero_()
if torch.cuda.is_available():
init_cell = init_cell.cuda()
return init_cell
class TSN_Ada(nn.Module):
def __init__(self, num_class, num_segments,
base_model='resnet101', consensus_type='avg', before_softmax=True, dropout=0.8,
crop_num=1, partial_bn=True, pretrain='imagenet', fc_lr5=False, args=None):
super(TSN_Ada, self).__init__()
self.num_segments = num_segments
self.reshape = True
self.before_softmax = before_softmax
self.dropout = dropout
self.crop_num = crop_num
self.consensus_type = consensus_type
self.pretrain = pretrain
self.fc_lr5 = fc_lr5
# TODO(yue)
self.args = args
self.rescale_to = args.rescale_to
if self.args.ada_reso_skip:
base_model = self.args.backbone_list[0] if len(self.args.backbone_list) >= 1 else None
self.base_model_name = base_model
self.num_class = num_class
self.multi_models = False
self.time_steps = self.num_segments
if self.args.ada_reso_skip:
self.reso_dim = self._get_resolution_dimension()
self.skip_dim = len(self.args.skip_list)
self.action_dim = self._get_action_dimension()
self._prepare_policy_net()
self._extends_to_multi_models()
self._prepare_base_model(base_model)
self._prepare_fc(num_class)
self.consensus = ConsensusModule(consensus_type, args=self.args)
if not self.before_softmax:
self.softmax = nn.Softmax()
self._enable_pbn = partial_bn
if partial_bn:
self.partialBN(True)
def _extends_to_multi_models(self):
if len(self.args.backbone_list) >= 1:
self.multi_models = True
self.base_model_list = nn.ModuleList()
self.new_fc_list = nn.ModuleList()
def _prep_a_net(self, model_name, shall_pretrain):
if "efficientnet" in model_name:
if shall_pretrain:
model = EfficientNet.from_pretrained(model_name)
else:
model = EfficientNet.from_named(model_name)
model.last_layer_name = "_fc"
else:
model = getattr(torchvision.models, model_name)(shall_pretrain)
if "resnet" in model_name:
model.last_layer_name = 'fc'
elif "mobilenet_v2" in model_name:
model.last_layer_name = 'classifier'
return model
def _get_resolution_dimension(self):
reso_dim = 0
for i in range(len(self.args.backbone_list)):
reso_dim += self.args.ada_crop_list[i]
if self.args.policy_also_backbone:
reso_dim += 1
return reso_dim
def _get_action_dimension(self):
action_dim = self.reso_dim + self.skip_dim
return action_dim
def _prepare_policy_net(self):
shall_pretrain = not self.args.policy_from_scratch
self.lite_backbone = self._prep_a_net(self.args.policy_backbone, shall_pretrain)
self.policy_feat_dim = feat_dim_dict[self.args.policy_backbone]
self.rnn = nn.LSTMCell(input_size=self.policy_feat_dim, hidden_size=self.args.hidden_dim, bias=True)
def _prepare_base_model(self, base_model):
self.input_size = 224
self.input_mean = [0.485, 0.456, 0.406]
self.input_std = [0.229, 0.224, 0.225]
if self.args.ada_reso_skip:
shall_pretrain = len(self.args.model_paths) == 0 or self.args.model_paths[0].lower() != 'none'
for bbi, backbone_name in enumerate(self.args.backbone_list):
model = self._prep_a_net(backbone_name, shall_pretrain)
self.base_model_list.append(model)
else:
self.base_model = self._prep_a_net(base_model, self.pretrain == 'imagenet')
def _prepare_fc(self, num_class):
def make_a_linear(input_dim, output_dim):
linear_model = nn.Linear(input_dim, output_dim)
normal_(linear_model.weight, 0, 0.001)
constant_(linear_model.bias, 0)
return linear_model
i_do_need_a_policy_network = True
if self.args.ada_reso_skip and i_do_need_a_policy_network:
setattr(self.lite_backbone, self.lite_backbone.last_layer_name, nn.Dropout(p=self.dropout))
feed_dim = self.args.hidden_dim if not self.args.frame_independent else self.policy_feat_dim
self.linear = make_a_linear(feed_dim, self.action_dim)
self.lite_fc = make_a_linear(feed_dim, num_class)
if self.multi_models:
multi_fc_list = [None]
for bbi, base_model in enumerate(self.base_model_list):
for fc_i, exit_index in enumerate(multi_fc_list):
last_layer_name = base_model.last_layer_name
feature_dim = getattr(base_model, last_layer_name).in_features
new_fc = make_a_linear(feature_dim, num_class)
self.new_fc_list.append(new_fc)
setattr(base_model, last_layer_name, nn.Dropout(p=self.dropout))
elif self.base_model_name is not None:
if "mobilenet_v2" == self.base_model_name:
feature_dim = getattr(self.base_model, self.base_model.last_layer_name)[1].in_features
else:
feature_dim = getattr(self.base_model, self.base_model.last_layer_name).in_features
setattr(self.base_model, self.base_model.last_layer_name, nn.Dropout(p=self.dropout))
self.new_fc = make_a_linear(feature_dim, num_class)
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
:return:
"""
super(TSN_Ada, self).train(mode)
if self._enable_pbn and mode:
print("Freezing BatchNorm2D except the first one.")
if self.args.ada_reso_skip:
models = [self.lite_backbone]
if self.multi_models:
models = models + self.base_model_list
else:
models = [self.base_model]
for the_model in models:
count = 0
bn_scale = 1
for m in the_model.modules():
if isinstance(m, nn.BatchNorm2d): # TODO(yue)
count += 1
if count >= (2 * bn_scale if self._enable_pbn else bn_scale):
m.eval()
# shutdown update in frozen mode
m.weight.requires_grad = False
m.bias.requires_grad = False
def partialBN(self, enable):
self._enable_pbn = enable
def get_optim_policies(self):
first_conv_weight = []
first_conv_bias = []
normal_weight = []
normal_bias = []
lr5_weight = []
lr10_bias = []
bn = []
custom_ops = []
conv_cnt = 0
bn_cnt = 0
for m in self.modules():
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d):
ps = list(m.parameters())
conv_cnt += 1
if conv_cnt == 1:
first_conv_weight.append(ps[0])
if len(ps) == 2:
first_conv_bias.append(ps[1])
else:
normal_weight.append(ps[0])
if len(ps) == 2:
normal_bias.append(ps[1])
elif isinstance(m, torch.nn.Linear):
ps = list(m.parameters())
if self.fc_lr5:
lr5_weight.append(ps[0])
else:
normal_weight.append(ps[0])
if len(ps) == 2:
if self.fc_lr5:
lr10_bias.append(ps[1])
else:
normal_bias.append(ps[1])
elif isinstance(m, torch.nn.BatchNorm2d):
bn_cnt += 1
# later BN's are frozen
if not self._enable_pbn or bn_cnt == 1:
bn.extend(list(m.parameters()))
elif isinstance(m, torch.nn.LSTMCell):
ps = list(m.parameters())
normal_weight.append(ps[0])
normal_weight.append(ps[1])
normal_bias.append(ps[2])
normal_bias.append(ps[3])
elif len(m._modules) == 0:
if len(list(m.parameters())) > 0:
raise ValueError("New atomic module type: {}. Need to give it a learning policy".format(type(m)))
return [
{'params': first_conv_weight, 'lr_mult': 1, 'decay_mult': 1,
'name': "first_conv_weight"},
{'params': first_conv_bias, 'lr_mult': 2, 'decay_mult': 0,
'name': "first_conv_bias"},
{'params': normal_weight, 'lr_mult': 1, 'decay_mult': 1,
'name': "normal_weight"},
{'params': normal_bias, 'lr_mult': 2, 'decay_mult': 0,
'name': "normal_bias"},
{'params': bn, 'lr_mult': 1, 'decay_mult': 0,
'name': "BN scale/shift"},
{'params': custom_ops, 'lr_mult': 1, 'decay_mult': 1,
'name': "custom_ops"},
# for fc
{'params': lr5_weight, 'lr_mult': 5, 'decay_mult': 1,
'name': "lr5_weight"},
{'params': lr10_bias, 'lr_mult': 10, 'decay_mult': 0,
'name': "lr10_bias"},
]
def backbone(self, input_data, the_base_model, new_fc, signal=-1, indices_list=[], boost=False, b_t_c=False,
**kwargs):
_b, _tc, _h, _w = input_data.shape # TODO(yue) input (B, T*C, H, W)
_t, _c = _tc // 3, 3
if b_t_c:
input_b_t_c = input_data.view(_b, _t, _c, _h, _w)
else:
input_2d = input_data.view(_b * _t, _c, _h, _w)
if b_t_c:
feat = the_base_model(input_b_t_c, signal=signal, **kwargs)
else:
feat = the_base_model(input_2d)
_base_out = None
if b_t_c:
if new_fc is not None:
_base_out = new_fc(feat.view(_b * _t, -1)).view(_b, _t, -1)
else:
if new_fc is not None:
_base_out = new_fc(feat).view(_b, _t, -1)
feat = feat.view(_b, _t, -1)
return feat, _base_out
def get_lite_j_and_r(self, input_list, online_policy, tau):
feat_lite, _ = self.backbone(input_list[self.args.policy_input_offset], self.lite_backbone, None)
r_list = []
lite_j_list = []
batch_size = feat_lite.shape[0]
hx = init_hidden(batch_size, self.args.hidden_dim)
cx = init_hidden(batch_size, self.args.hidden_dim)
remain_skip_vector = torch.zeros(batch_size, 1)
old_hx = None
old_r_t = None
if self.args.use_reinforce:
log_prob_r_list = []
prob_r_list = []
for t in range(self.time_steps):
if self.args.frame_independent:
feat_t = feat_lite[:, t]
else:
hx, cx = self.rnn(feat_lite[:, t], (hx, cx))
feat_t = hx
if self.args.use_reinforce:
p_t = F.softmax(self.linear(feat_t), dim=1).clamp(min=1e-8)
else:
p_t = torch.log(F.softmax(self.linear(feat_t), dim=1).clamp(min=1e-8))
j_t = self.lite_fc(feat_t)
lite_j_list.append(j_t) # TODO as pred
# TODO (yue) need a simple case to illustrate this
if online_policy:
if self.args.use_reinforce:
m = Categorical(p_t)
prob_r_list.append(p_t)
r_t_idx = m.sample()
r_t = torch.eye(self.action_dim)[r_t_idx].cuda()
log_prob_r_t = m.log_prob(r_t_idx)
log_prob_r_list.append(log_prob_r_t)
else:
r_t = torch.cat(
[F.gumbel_softmax(p_t[b_i:b_i + 1], tau, True) for b_i in range(p_t.shape[0])])
# TODO update states and r_t
if old_hx is not None:
take_bool = remain_skip_vector > 0.5
take_old = torch.tensor(take_bool, dtype=torch.float).cuda()
take_curr = torch.tensor(~take_bool, dtype=torch.float).cuda()
hx = old_hx * take_old + hx * take_curr
r_t = old_r_t * take_old + r_t * take_curr
# TODO update skipping_vector
for batch_i in range(batch_size):
for skip_i in range(self.action_dim - self.reso_dim):
# TODO(yue) first condition to avoid valuing skip vector forever
if remain_skip_vector[batch_i][0] < 0.5 and r_t[batch_i][self.reso_dim + skip_i] > 0.5:
remain_skip_vector[batch_i][0] = self.args.skip_list[skip_i]
old_hx = hx
old_r_t = r_t
r_list.append(r_t) # TODO as decision
remain_skip_vector = (remain_skip_vector - 1).clamp(0)
if online_policy:
if self.args.use_reinforce:
return lite_j_list, torch.stack(r_list, dim=1), torch.stack(log_prob_r_list, dim=1)
else:
return lite_j_list, torch.stack(r_list, dim=1)
else:
return lite_j_list, None
def using_online_policy(self):
if any([self.args.offline_lstm_all, self.args.offline_lstm_last]):
return False
elif any([self.args.random_policy, self.args.all_policy]):
return False
elif self.args.real_scsampler:
return False
else:
return True
def input_fusion(self, input_data, r):
# TODO data: B * TC * H * W
# TODO r : B * T * T
_b, _tc, _h, _w = input_data.shape
_c = _tc // self.args.num_segments
fuse_data_list = []
for bi in range(_b):
if self.args.identity_prior:
prior = torch.eye(self.args.num_segments).to(input_data.device)
else:
prior = 0
if self.args.lower_mask:
mask = torch.tril(torch.ones(self.args.num_segments, self.args.num_segments)).to(input_data.device)
else:
mask = 1
real_r = (r[bi] + prior) * mask
if self.args.direct_lower_mask:
real_r = torch.tril(real_r)
if self.args.row_normalization:
real_r = real_r / (real_r.sum(dim=1, keepdim=True).clamp_min(1e-6))
fused_data = torch.matmul(real_r, input_data[bi].view(self.args.num_segments, _c * _h * _w))
fuse_data_list.append(fused_data)
return torch.stack(fuse_data_list, dim=0).view(_b, _tc, _h, _w)
def get_feat_and_pred(self, input_list, r_all, **kwargs):
feat_out_list = []
base_out_list = []
ind_list = []
for bb_i, the_backbone in enumerate(self.base_model_list):
feat_out, base_out = self.backbone(input_list[bb_i], the_backbone, self.new_fc_list[bb_i])
feat_out_list.append(feat_out)
base_out_list.append(base_out)
return feat_out_list, base_out_list, ind_list
def late_fusion(self, base_out_list, in_matrix, out_matrix):
return base_out_list
def forward(self, *argv, **kwargs):
if not self.args.ada_reso_skip: # TODO simple TSN
_, base_out = self.backbone(kwargs["input"][0], self.base_model, self.new_fc,
signal=self.args.default_signal)
output = self.consensus(base_out)
return output.squeeze(1)
input_list = kwargs["input"]
batch_size = input_list[0].shape[0] # TODO(yue) input[0] B*(TC)*H*W
if self.args.use_reinforce:
lite_j_list, r_all, r_log_prob = self.get_lite_j_and_r(input_list, self.using_online_policy(),
kwargs["tau"])
else:
lite_j_list, r_all = self.get_lite_j_and_r(input_list, self.using_online_policy(), kwargs["tau"])
if self.multi_models:
if "tau" not in kwargs:
kwargs["tau"] = None
feat_out_list, base_out_list, ind_list = self.get_feat_and_pred(input_list, r_all, tau=kwargs["tau"])
else:
feat_out_list, base_out_list, ind_list = [], [], []
if self.args.policy_also_backbone:
base_out_list.append(torch.stack(lite_j_list, dim=1))
if self.args.offline_lstm_last: # TODO(yue) no policy - use policy net as backbone - just LSTM(last)
return lite_j_list[-1].squeeze(1), None, None, None
elif self.args.offline_lstm_all: # TODO(yue) no policy - use policy net as backbone - just LSTM(average)
return torch.stack(lite_j_list).mean(dim=0).squeeze(1), None, None, None
elif self.args.real_scsampler:
real_pred = base_out_list[0]
lite_pred = torch.stack(lite_j_list, dim=1)
output, ind = self.consensus(real_pred, lite_pred)
return output.squeeze(1), ind, real_pred, lite_pred
else:
if self.args.random_policy: # TODO(yue) random policy
r_all = torch.zeros(batch_size, self.time_steps, self.action_dim).cuda()
for i_bs in range(batch_size):
for i_t in range(self.time_steps):
r_all[i_bs, i_t, torch.randint(self.action_dim, [1])] = 1.0
elif self.args.all_policy: # TODO(yue) all policy: take all
r_all = torch.ones(batch_size, self.time_steps, self.action_dim).cuda()
output = self.combine_logits(r_all, base_out_list, ind_list)
if self.args.save_meta and self.args.save_all_preds:
return output.squeeze(1), r_all, torch.stack(base_out_list, dim=1)
else:
if self.args.use_reinforce:
return output.squeeze(1), r_all, r_log_prob, torch.stack(base_out_list, dim=1)
else:
return output.squeeze(1), r_all, None, torch.stack(base_out_list, dim=1)
def combine_logits(self, r, base_out_list, ind_list):
# TODO r N, T, K
# TODO base_out_list < K * (N, T, C)
pred_tensor = torch.stack(base_out_list, dim=2)
r_tensor = r[:, :, :self.reso_dim].unsqueeze(-1)
t_tensor = torch.sum(r[:, :, :self.reso_dim], dim=[1, 2]).unsqueeze(-1).clamp(1) # TODO sum T, K to count frame
return (pred_tensor * r_tensor).sum(dim=[1, 2]) / t_tensor
@property
def crop_size(self):
return self.input_size
@property
def scale_size(self):
return self.input_size * 256 // 224
def get_augmentation(self, flip=True):
if flip:
return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75, .66]),
GroupRandomHorizontalFlip(is_flow=False)])
else:
print('#' * 20, 'NO FLIP!!!')
return torchvision.transforms.Compose([GroupMultiScaleCrop(self.input_size, [1, .875, .75, .66])])
|
1683082
|
import torch
import torch.nn as nn
import onqg.dataset.Constants as Constants
from onqg.models.modules.Layers import GGNNEncoderLayer
class GGNNEncoder(nn.Module):
"""Combine GGNN (Gated Graph Neural Network) and GAT (Graph Attention Network)
Input: (1) nodes - [batch_size, node_num, d_model]
(2) edges - ([batch_size, node_num * node_num], [batch_size, node_num * node_num]) 1st-inlink, 2nd-outlink
(3) mask - ([batch_size, node_num, node_num], [batch_size, node_num, node_num]) 1st-inlink, 2nd-outlink
(4) node_feats - list of [batch_size, node_num]
"""
def __init__(self, n_edge_type, d_model, n_layer, alpha, d_feat_vec,
feat_vocab, layer_attn, dropout, attn_dropout):
self.name = 'graph'
super(GGNNEncoder, self).__init__()
self.layer_attn = layer_attn
self.hidden_size = d_model
self.d_model = d_model
###=== node features ===###
self.feature = True if feat_vocab else False
if self.feature:
self.feat_embs = nn.ModuleList([
nn.Embedding(n_f_vocab, d_feat_vec, padding_idx=Constants.PAD) for n_f_vocab in feat_vocab
])
self.feature_transform = nn.Linear(self.hidden_size + d_feat_vec * len(feat_vocab), self.hidden_size)
###=== edge embedding ===###
# self.edge_in_emb = nn.Embedding(n_edge_type, self.hidden_size * d_model, padding_idx=Constants.PAD)
# self.edge_out_emb = nn.Embedding(n_edge_type, self.hidden_size * d_model, padding_idx=Constants.PAD)
# self.edge_bias = edge_bias
# if edge_bias:
# self.edge_in_emb_bias = nn.Embedding(n_edge_type, d_model, padding_idx=Constants.PAD)
# self.edge_out_emb_bias = nn.Embedding(n_edge_type, d_model, padding_idx=Constants.PAD)
###=== graph encode layers===###
self.layer_stack = nn.ModuleList([
GGNNEncoderLayer(self.hidden_size, d_model, alpha, feature=self.feature,
dropout=dropout, attn_dropout=attn_dropout) for _ in range(n_layer)
])
###=== gated output ===###
self.gate = nn.Linear(2 * d_model, d_model, bias=False)
@classmethod
def from_opt(cls, opt):
return cls(opt['n_edge_type'], opt['d_model'], opt['n_layer'], opt['alpha'],
opt['d_feat_vec'], opt['feat_vocab'], opt['layer_attn'],
opt['dropout'], opt['attn_dropout'])
def forward(self, inputs):
nodes, mask = inputs['nodes'], inputs['mask']
node_feats, node_type = inputs['feat_seqs'], inputs['type']
nodes = self.activate(nodes)
node_output = nodes # batch_size x node_num x d_model
###=== get embeddings ===###
feat_hidden = None
if self.feature:
feat_hidden = [feat_emb(node_feat) for node_feat, feat_emb in zip(node_feats, self.feat_embs)]
feat_hidden = torch.cat(feat_hidden, dim=2) # batch_size x node_num x (hidden_size - d_model)
node_output = self.feature_transform(torch.cat((node_output, feat_hidden), dim=-1))
# # batch_size x (node_num * node_num) x hidden_size x d_model
# edge_in_hidden = self.edge_in_emb(edges[0]).view(nodes.size(0), -1, self.hidden_size, nodes.size(2))
# edge_out_hidden = self.edge_out_emb(edges[1]).view(nodes.size(0), -1, self.hidden_size, nodes.size(2))
# edge_hidden = (edge_in_hidden, edge_out_hidden)
# if self.edge_bias:
# # batch_size x (node_num * node_num) x d_model
# edge_in_hidden_bias, edge_out_hidden_bias = self.edge_in_emb_bias(edges[0]), self.edge_out_emb_bias(edges[1])
# edge_hidden_bias = (edge_in_hidden_bias, edge_out_hidden_bias) if self.edge_bias else None
##=== forward ===###
node_outputs = []
for enc_layer in self.layer_stack:
node_output = enc_layer(node_output, mask, node_type, feat_hidden=feat_hidden)
node_outputs.append(node_output)
node_outputs[-1] = node_output
hidden = [layer_output.transpose(0, 1)[0] for layer_output in node_outputs]
if self.layer_attn:
node_output = node_outputs
return node_output, hidden
|
1683097
|
import glob
import os
from invoke import task
from invoke.exceptions import Exit
from .libs.common.color import color_message
def get_package_path(glob_pattern):
package_paths = glob.glob(glob_pattern)
if len(package_paths) > 1:
raise Exit(code=1, message=color_message(f"Too many files matching {glob_pattern}: {package_paths}", "red"))
elif len(package_paths) == 0:
raise Exit(code=1, message=color_message(f"Couldn't find any file matching {glob_pattern}", "red"))
return package_paths[0]
@task
def compare_size(_, new_package, stable_package, package_type, last_stable, threshold):
mb = 1000000
new_package_size = os.path.getsize(get_package_path(new_package))
stable_package_size = os.path.getsize(get_package_path(stable_package))
threshold = int(threshold)
diff = new_package_size - stable_package_size
# For printing purposes
new_package_size_mb = new_package_size / mb
stable_package_size_mb = stable_package_size / mb
threshold_mb = threshold / mb
diff_mb = diff / mb
if diff > threshold:
print(
color_message(
f"""{package_type} size increase is too large:
New package size is {new_package_size_mb:.2f}MB
Stable package ({last_stable}) size is {stable_package_size_mb:.2f}MB
Diff is {diff_mb:.2f}MB > {threshold_mb:.2f}MB (max allowed diff)""",
"red",
)
)
raise Exit(code=1)
print(
f"""{package_type} size increase is OK:
New package size is {new_package_size_mb:.2f}MB
Stable package ({last_stable}) size is {stable_package_size_mb:.2f}MB
Diff is {diff_mb:.2f}MB (max allowed diff: {threshold_mb:.2f}MB)"""
)
|
1683101
|
import json
import psycopg2
import traceback
from colorama import Fore
from toolset.utils.output_helper import log
from toolset.databases.abstract_database import AbstractDatabase
class Database(AbstractDatabase):
@classmethod
def get_connection(cls, config):
db = psycopg2.connect(
host=config.database_host,
port="5432",
user="benchmarkdbuser",
password="<PASSWORD>",
database="hello_world")
cursor = db.cursor()
cursor.execute("CREATE EXTENSION IF NOT EXISTS pg_stat_statements")
return db
@classmethod
def get_current_world_table(cls, config):
results_json = []
try:
db = cls.get_connection(config)
cursor = db.cursor()
cursor.execute("SELECT * FROM \"World\"")
results = cursor.fetchall()
results_json.append(json.loads(json.dumps(dict(results))))
cursor = db.cursor()
cursor.execute("SELECT * FROM \"world\"")
results = cursor.fetchall()
results_json.append(json.loads(json.dumps(dict(results))))
db.close()
except Exception:
tb = traceback.format_exc()
log("ERROR: Unable to load current Postgres World table.",
color=Fore.RED)
log(tb)
return results_json
@classmethod
def test_connection(cls, config):
try:
db = cls.get_connection(config)
cursor = db.cursor()
cursor.execute("SELECT 1")
cursor.fetchall()
db.close()
return True
except:
return False
@classmethod
def get_queries(cls, config):
return cls.__exec_and_fetchone(config, "SELECT SUM(calls) FROM pg_stat_statements WHERE query ~* '[[:<:]]%s[[:>:]]'" % cls.tbl_name)
@classmethod
def get_rows(cls, config):
return cls.__exec_and_fetchone(config, "SELECT SUM(rows) FROM pg_stat_statements WHERE query ~* '[[:<:]]%s[[:>:]]' AND query ~* 'select'" % cls.tbl_name)
@classmethod
def get_rows_updated(cls, config):
return cls.__exec_and_fetchone(config, "SELECT SUM(rows) FROM pg_stat_statements WHERE query ~* '[[:<:]]%s[[:>:]]' AND query ~* 'update'" % cls.tbl_name)
@classmethod
def reset_cache(cls, config):
# To fix: DISCARD ALL cannot run inside a transaction block
# cursor = self.db.cursor()
# cursor.execute("END;DISCARD ALL;")
# self.db.commit()
return
@classmethod
def __exec_and_fetchone(cls, config, query):
db = cls.get_connection(config)
cursor = db.cursor()
cursor.execute(query)
record = cursor.fetchone()
return record[0]
|
1683119
|
import json
import logging
import os
from smda.Disassembler import Disassembler
def detectBackend():
backend = ""
version = ""
try:
import idaapi
import idautils
backend = "IDA"
version = idaapi.IDA_SDK_VERSION
except:
pass
return (backend, version)
if __name__ == "__main__":
BACKEND, VERSION = detectBackend()
if BACKEND == "IDA":
from smda.ida.IdaInterface import IdaInterface
ida_interface = IdaInterface()
binary = ida_interface.getBinary()
base_addr = ida_interface.getBaseAddr()
DISASSEMBLER = Disassembler(backend=BACKEND)
REPORT = DISASSEMBLER.disassembleBuffer(binary, base_addr)
output_path = ida_interface.getIdbDir()
output_filepath = output_path + "ConvertedFromIdb.smda"
with open(output_filepath, "w") as fout:
json.dump(REPORT.toDict(), fout, indent=1, sort_keys=True)
print("Output saved to: %s" % output_filepath)
else:
raise Exception("No supported backend found.")
|
1683126
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import torchvision.models as models
def mse_loss(output, target):
loss_ = F.mse_loss(output, target)
return loss_
def smooth_l1_loss(pred, target):
loss_ = F.smooth_l1_loss(pred, target)
return loss_
def gradient_loss(pred, gradient, target):
diff_ = (pred - target)
loss_ = torch.mean((diff_**2) * gradient)
return loss_
def CrossEntropyLoss2d(inputs, targets, weight=None):
n, c, h, w = inputs.size()
log_p = F.log_softmax(inputs, dim=1)
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c) #(n*h*w, c)
log_p = log_p[targets.view(n*h*w, 1).repeat(1, c) >= 0]
log_p = log_p.view(-1, c)
mask = targets >= 0
targets = targets[mask]
loss = F.nll_loss(log_p, targets, weight=weight)
return loss
class VGG16ContentModel(nn.Module):
def __init__(self, **kwargs):
super(VGG16ContentModel, self).__init__()
pretrained_model = getattr(models, kwargs['model'])(pretrained=True).features[:10]
self.weights = kwargs['weights']
# get certain layers to construct content-loss module
self.conv1= pretrained_model[:2] # relu 1
self.conv2 = pretrained_model[2:5] # maxpool 1
self.conv3 = pretrained_model[5:7]
self.conv4 = pretrained_model[7:10] # maxpool 2
def forward(self, pred, gt):
content_pred_conv1 = self.conv1(pred)
content_gt_conv1 = self.conv1(gt)
content_pred_conv2 = self.conv2(content_pred_conv1)
content_gt_conv2 = self.conv2(content_gt_conv1)
content_pred_conv3 = self.conv3(content_pred_conv2)
content_gt_conv3 = self.conv3(content_gt_conv2)
content_pred_conv4 = self.conv4(content_pred_conv3)
content_gt_conv4 = self.conv4(content_gt_conv3)
loss = self.weights[0] * F.mse_loss(content_pred_conv1, content_gt_conv1) +\
self.weights[1] * F.mse_loss(content_pred_conv2, content_gt_conv2) +\
self.weights[2] * F.mse_loss(content_pred_conv3, content_gt_conv3) +\
self.weights[3] * F.mse_loss(content_pred_conv4, content_gt_conv4)
return loss
|
1683130
|
from pypy.objspace.std import floatobject as fobj
from pypy.objspace.std.objspace import FailedToImplement
import py
class TestW_FloatObject:
def _unwrap_nonimpl(self, func, *args, **kwds):
""" make sure that the expected exception occurs, and unwrap it """
try:
res = func(*args, **kwds)
raise Exception, "should have failed but returned '%s'!" %repr(res)
except FailedToImplement, arg:
return arg.w_type
def test_pow_fff(self):
x = 10.0
y = 2.0
z = 13.0
f1 = fobj.W_FloatObject(x)
f2 = fobj.W_FloatObject(y)
f3 = fobj.W_FloatObject(z)
assert self.space.w_TypeError == (
self._unwrap_nonimpl(fobj.pow__Float_Float_ANY,
self.space, f1, f2, f3))
def test_pow_ffn(self):
x = 10.0
y = 2.0
f1 = fobj.W_FloatObject(x)
f2 = fobj.W_FloatObject(y)
v = fobj.pow__Float_Float_ANY(self.space, f1, f2, self.space.w_None)
assert v.floatval == x ** y
f1 = fobj.W_FloatObject(-1.23)
f2 = fobj.W_FloatObject(-4.56)
assert self.space.w_ValueError == (
self._unwrap_nonimpl(fobj.pow__Float_Float_ANY,
self.space, f1, f2,
self.space.w_None))
x = -10
y = 2.0
f1 = fobj.W_FloatObject(x)
f2 = fobj.W_FloatObject(y)
v = fobj.pow__Float_Float_ANY(self.space, f1, f2, self.space.w_None)
assert v.floatval == x**y
class AppTestAppFloatTest:
def test_negatives(self):
assert -1.1 < 0
assert -0.1 < 0
def test_float_callable(self):
assert 0.125 == float(0.125)
def test_float_int(self):
assert 42.0 == float(42)
def test_float_hash(self):
# these are taken from standard Python, which produces
# the same but for -1.
import math
assert hash(42.0) == 42
assert hash(42.125) == 1413677056
assert hash(math.ldexp(0.125, 1000)) in (
32, # answer on 32-bit machines
137438953472) # answer on 64-bit machines
# testing special overflow values
assert hash(1e200 * 1e200) == 314159
assert hash(-1e200 * 1e200) == -271828
def test_int_float(self):
assert int(42.1234) == 42
assert int(4e10) == 40000000000L
def test_float_string(self):
assert 42 == float("42")
assert 42.25 == float("42.25")
def test_float_unicode(self):
# u00A0 and u2000 are some kind of spaces
assert 42.75 == float(unichr(0x00A0)+unicode("42.75")+unichr(0x2000))
def test_float_long(self):
assert 42.0 == float(42L)
assert 10000000000.0 == float(10000000000L)
raises(OverflowError, float, 10**400)
def test_round(self):
assert 1.0 == round(1.0)
assert 1.0 == round(1.1)
assert 2.0 == round(1.9)
assert 2.0 == round(1.5)
assert -2.0 == round(-1.5)
assert -2.0 == round(-1.5)
assert -2.0 == round(-1.5, 0)
assert -2.0 == round(-1.5, 0)
assert 22.2 == round(22.222222, 1)
assert 20.0 == round(22.22222, -1)
assert 0.0 == round(22.22222, -2)
def test_special_float_method(self):
class a:
def __float__(self):
self.ar = True
return None
inst = a()
raises(TypeError, float, inst)
assert inst.ar
class b:
pass
raises((AttributeError, TypeError), float, b())
def test_getnewargs(self):
assert 0.0 .__getnewargs__() == (0.0,)
def test_pow(self):
def pw(x, y):
return x ** y
def espeq(x, y):
return not abs(x-y) > 1e05
raises(ZeroDivisionError, pw, 0.0, -1)
assert pw(0, 0.5) == 0.0
assert espeq(pw(4.0, 0.5), 2.0)
assert pw(4.0, 0) == 1.0
assert pw(-4.0, 0) == 1.0
raises(ValueError, pw, -1.0, 0.5)
assert pw(-1.0, 2.0) == 1.0
assert pw(-1.0, 3.0) == -1.0
assert pw(-1.0, 1e200) == 1.0
def test_pow_neg_base(self):
def pw(x, y):
return x ** y
assert pw(-2.0, 2.0) == 4
def test_float_cmp(self):
assert 12.5 == 12.5
assert 12.5 != -3.2
assert 12.5 < 123.4
assert .25 <= .25
assert -5744.23 <= -51.2
assert 4.3 > 2.3
assert 0.01 >= -0.01
# float+long
verylonglong = 10L**400
infinite = 1e200*1e200
assert 12.0 == 12L
assert 1e300 == long(1e300)
assert 12.1 != 12L
assert infinite != 123456789L
assert 12.9 < 13L
assert -infinite < -13L
assert 12.9 <= 13L
assert 13.0 <= 13L
assert 13.01 > 13L
assert 13.0 >= 13L
assert 13.01 >= 13L
assert infinite > verylonglong
assert infinite >= verylonglong
assert 1234.56 < verylonglong
assert 1234.56 <= verylonglong
# long+float
assert 12L == 12.0
assert long(1e300) == 1e300
assert 12L != 12.1
assert 123456789L != infinite
assert 13L > 12.9
assert -13L > -infinite
assert 13L >= 12.9
assert 13L >= 13.0
assert 13L < 13.01
assert 13L <= 13.0
assert 13L <= 13.01
assert verylonglong < infinite
assert verylonglong <= infinite
assert verylonglong > 1234.56
assert verylonglong >= 1234.56
|
1683150
|
from datadog_checks.base import ConfigurationError, OpenMetricsBaseCheck
class JfrogPlatformCheck(OpenMetricsBaseCheck):
"""
Collect metrics from JFrog
"""
def __init__(self, name, init_config, instances=None):
instance = instances[0]
instancetype = instance.get('instance_type')
endpoint = instance.get('prometheus_url')
if endpoint is None:
raise ConfigurationError("Unable to find prometheus_url in config file.")
if instancetype == 'artifactory':
instance.update(
{
'prometheus_url': endpoint,
'namespace': 'jfrog.artifactory',
'metrics': ['sys*', 'jfrt*', 'app*'],
'send_distribution_counts_as_monotonic': instance.get(
'send_distribution_counts_as_monotonic', True
),
'send_distribution_sums_as_monotonic': instance.get('send_distribution_sums_as_monotonic', True),
}
)
elif instancetype == 'xray':
instance.update(
{
'prometheus_url': endpoint,
'namespace': 'jfrog.xray',
'metrics': ['app*', 'db*', 'go*', 'queue*', 'sys*', 'jfxr*'],
'send_distribution_counts_as_monotonic': instance.get(
'send_distribution_counts_as_monotonic', True
),
'send_distribution_sums_as_monotonic': instance.get('send_distribution_sums_as_monotonic', True),
}
)
else:
raise ConfigurationError("Unable to recognize instance_type.")
super(JfrogPlatformCheck, self).__init__(name, init_config, [instance])
|
1683170
|
import torch
import torch.autograd
from torch import nn
from torch.autograd import Variable
def pairwise(data):
n_obs, dim = data.size()
xk = data.unsqueeze(0).expand(n_obs, n_obs, dim)
xl = data.unsqueeze(1).expand(n_obs, n_obs, dim)
dkl2 = ((xk - xl)**2.0).sum(2).squeeze()
return dkl2
class VTSNE(nn.Module):
def __init__(self, n_points, n_topics, **args):
super(VTSNE, self).__init__()
if args['pt_ver'] == '0.3':
self.device = 'null'
elif args['pt_ver'] == '0.4':
self.device = args['device']
# Logit of datapoint-to-topic weight
self.logits_mu = nn.Embedding(n_points, n_topics)
self.logits_lv = nn.Embedding(n_points, n_topics)
self.n_points = n_points
self.n_topics = n_topics
@property
def logits(self):
return self.logits_mu
def reparametrize(self, mu, logvar):
# From VAE example
# https://github.com/pytorch/examples/blob/master/vae/main.py
std = logvar.mul(0.5).exp_()
eps = torch.FloatTensor(std.size()).normal_()
if self.device == 'null':
eps = Variable(eps.cuda(), requires_grad=True)
else:
eps = eps.to(self.device)
z = eps.mul(std).add_(mu)
kld = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
kld = torch.sum(kld).mul_(-0.5)
return z, kld
def sample_logits(self, i=None):
if i is None:
return self.reparametrize(self.logits_mu.weight, self.logits_lv.weight)
else:
return self.reparametrize(self.logits_mu(i), self.logits_lv(i))
def forward(self, pij, i, j):
pij = pij.float()
# Get for all points
x, loss_kldrp = self.sample_logits() # x is z, [1083 x 2]
# Compute squared pairwise distances
dkl2 = pairwise(x)
# Compute partition function
n_diagonal = dkl2.size()[0]
part = (1 + dkl2).pow(-1.0).sum() - n_diagonal
# Compute the numerator
xi, _ = self.sample_logits(i) # xi, 4096 x 2
xj, _ = self.sample_logits(j)
num = ((1. + (xi - xj)**2.0).sum(1)).pow(-1.0).squeeze()
qij = num / part.expand_as(num)
# Compute KLD(pij || qij)
loss_kld = pij * (torch.log(pij) - torch.log(qij))
# Compute sum of all variational terms
return loss_kld.sum() + loss_kldrp.sum() * 1e-7
def __call__(self, *args):
return self.forward(*args)
|
1683206
|
from flask import url_for, json
from behave import when, then
@when('the user makes a request to the get planners endpoint')
def the_user_makes_a_request_to_get_planners(context):
with context.app.test_request_context():
url = url_for("planners.list_planners")
res = context.client.get(url)
context.last_response = res
@then(u'the api responds with {n_planners} planners')
def the_api_responsds_with_n_attacks(context, n_planners):
json_response = json.loads(context.last_response.get_data())
assert "planners" in json_response
assert int(n_planners) == len(json_response.get("planners"))
|
1683210
|
from django.conf.urls import patterns, include, url
from django.contrib.auth import views as auth_views
from django.views.generic.base import TemplateView
from browser.views import *
from http_handler.settings import WEBSITE
from registration.backends.default.views import ActivationView
from registration.forms import MurmurPasswordResetForm
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
website_context = {'website' : WEBSITE}
# shared URL patterns
urlpatterns = patterns('',
url(r'^$', 'browser.views.index'),
url(r'^lamson_status', 'browser.views.lamson_status'),
url(r'^settings', 'browser.views.settings'),
url(r'^404', 'browser.views.error'),
url(r'^thread$', 'browser.views.thread'),
url(r'^create_new_group', 'browser.views.create_group_view'),
url(r'^create_group', 'browser.views.create_group'),
url(r'^delete_group', 'browser.views.delete_group'),
url(r'^groups/(?P<group_name>[\w-]+)/edit_group_info', 'browser.views.edit_group_info_view'),
url(r'^edit_group_info', 'browser.views.edit_group_info'),
url(r'^group_info', 'browser.views.group_info'),
url(r'^groups/(?P<group_name>[\w-]+)$', 'browser.views.group_page'),
url(r'^groups/(?P<group_name>[\w-]+)/add_members', 'browser.views.add_members_view'),
url(r'^add_members', 'browser.views.add_members'),
url(r'^edit_members', 'browser.views.edit_members'),
url(r'^unsubscribe_group', 'browser.views.unsubscribe_group'),
url(r'^subscribe_group', 'browser.views.subscribe_group'),
url(r'^my_group_list', 'browser.views.my_group_list'),
url(r'^edit_group_settings', 'browser.views.edit_group_settings'),
url(r'^group_settings', 'browser.views.get_group_settings'),
url(r'^groups/(?P<group_name>[\w-]+)/edit_my_settings', 'browser.views.my_group_settings_view'),
url(r'^gmail_setup/', include('gmail_setup.urls', namespace="oauth2")),
#override the registration default urls - bug with django 1.6
url(r'^accounts/password/change/$',
murmur_acct,
{'acct_func': auth_views.password_change, 'template_name': 'registration/password_change_form.html'},
name='password_change',
),
url(r'^accounts/password/change/done/$',
murmur_acct,
{'acct_func': auth_views.password_change_done, 'template_name': 'registration/password_change_done.html'},
name='password_change_done',
),
url(r'^accounts/password/reset/$',
auth_views.password_reset,
{'password_reset_form' : MurmurPasswordResetForm,
'extra_context' : website_context},
name='password_reset'),
url(r'^accounts/password/reset/done/$',
auth_views.password_reset_done,
{'extra_context' : website_context},
name='password_reset_done'),
url(r'^accounts/password/reset/complete/$',
auth_views.password_reset_complete,
{'extra_context' : website_context},
name='password_reset_complete'),
url(r'^accounts/password/reset/confirm/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$',
auth_views.password_reset_confirm,
{'extra_context' : website_context},
name='password_reset_confirm'),
url(r'^attachment/(?P<hash_filename>[0-9A-Za-z_]+)', 'browser.views.serve_attachment'),
url(r'^accounts/activate/complete/$',
TemplateView.as_view(template_name='registration/activation_complete.html'),
website_context,
name='registration_activation_complete',
),
url(r'^accounts/activate/(?P<activation_key>\w+)/$',
ActivationView.as_view(),
name='registration_activate',
),
url(r'^accounts/register/complete/$',
TemplateView.as_view(template_name='registration/registration_complete.html'),
website_context,
name='registration_complete',
),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^subscribe/confirm/(?P<token>.+)$', 'browser.views.subscribe_confirm'),
url(r'^activate_group', 'browser.views.activate_group'),
url(r'^deactivate_group', 'browser.views.deactivate_group'),
)
# murmur-only patterns
if WEBSITE == 'murmur':
new_patterns = [
url(r'^about', 'browser.views.about'),
url(r'^posts$', 'browser.views.posts'),
url(r'^unsubscribe_get', 'browser.views.unsubscribe_get'),
url(r'^subscribe_get', 'browser.views.subscribe_get'),
url(r'^post_list', 'browser.views.post_list'),
url(r'^pub_group_list', 'browser.views.pub_group_list'),
url(r'^group_list', 'browser.views.group_list'),
url(r'^groups/(?P<group_name>[\w-]+)/add_list', 'browser.views.add_list_view'),
url(r'^groups/(?P<group_name>[\w-]+)/create_post', 'browser.views.my_group_create_post_view'),
url(r'^my_groups', 'browser.views.my_groups'),
url(r'^list_my_groups', 'browser.views.list_my_groups'),
url(r'^load_post', 'browser.views.load_post'),
url(r'^list_posts', 'browser.views.list_posts'),
url(r'^refresh_posts', 'browser.views.refresh_posts'),
url(r'^insert_post', 'browser.views.insert_post'),
url(r'^insert_reply', 'browser.views.insert_reply'),
url(r'^upvote_get', 'browser.views.upvote_get'),
url(r'^unupvote_get', 'browser.views.unupvote_get'),
url(r'^upvote', 'browser.views.upvote'),
url(r'^unupvote', 'browser.views.unupvote'),
url(r'^follow_tag_get', 'browser.views.follow_tag_get'),
url(r'^unfollow_tag_get', 'browser.views.unfollow_tag_get'),
url(r'^mute_tag_get', 'browser.views.mute_tag_get'),
url(r'^unmute_tag_get', 'browser.views.unmute_tag_get'),
url(r'^follow_tag', 'browser.views.follow_tag'),
url(r'^unfollow_tag', 'browser.views.unfollow_tag'),
url(r'^mute_tag', 'browser.views.mute_tag'),
url(r'^unmute_tag', 'browser.views.unmute_tag'),
url(r'^follow_thread', 'browser.views.follow_thread'),
url(r'^unfollow_thread', 'browser.views.unfollow_thread'),
url(r'^mute_thread', 'browser.views.mute_thread'),
url(r'^unmute_thread', 'browser.views.unmute_thread'),
url(r'^follow', 'browser.views.follow_thread_get'),
url(r'^unfollow', 'browser.views.unfollow_thread_get'),
url(r'^mute', 'browser.views.mute_thread_get'),
url(r'^unmute', 'browser.views.unmute_thread_get'),
url(r'^add_list', 'browser.views.add_list'),
url(r'^delete_list', 'browser.views.delete_list'),
url(r'^adjust_list_can_post', 'browser.views.adjust_list_can_post'),
url(r'^adjust_list_can_receive', 'browser.views.adjust_list_can_receive'),
]
urlpatterns.extend(new_patterns)
# squadbox-only patterns
elif WEBSITE == 'squadbox':
new_patterns = [
url(r'^mod_queue/(?P<group_name>[\w-]+)', 'browser.views.mod_queue'),
# url(r'^approve_get', 'browser.views.approve_get'),
# url(r'^reject_get', 'browser.views.reject_get'),
url(r'^approve_post', 'browser.views.approve_post'),
url(r'^reject_post', 'browser.views.reject_post'),
url(r'^delete_posts', 'browser.views.delete_posts'),
url(r'^delete_post', 'browser.views.delete_post'),
url(r'^whitelist_get', 'browser.views.whitelist_get'),
url(r'^whitelist', 'browser.views.whitelist'),
url(r'^groups/(?P<group_name>[\w-]+)/add_whitelist', 'browser.views.add_whitelist_view'),
url(r'^unblacklist_unwhitelist', 'browser.views.unblacklist_unwhitelist'),
url(r'^blacklist_get', 'browser.views.blacklist_get'),
url(r'^blacklist', 'browser.views.blacklist'),
url(r'^groups/(?P<group_name>[\w-]+)/add_blacklist', 'browser.views.add_blacklist_view'),
url(r'^groups/(?P<group_name>[\w-]+)/rejected', 'browser.views.rejected'),
url(r'^rejected_thread$', 'browser.views.rejected_thread'),
url(r'^moderate_user_for_thread_get', 'browser.views.moderate_user_for_thread_get'),
url(r'^resources', 'browser.views.resources'),
url(r'^email_filters', 'browser.views.email_filters'),
]
urlpatterns.extend(new_patterns)
|
1683230
|
import convokit
import numpy as np
import matplotlib.pyplot as plt
print("Loading corpus")
corpus = convokit.Corpus(filename=convokit.download("reddit-corpus-small"))
print("Computing hypergraph features")
hc = convokit.HyperConvo()
hc.fit_transform(corpus)
print("Computing low-dimensional embeddings")
te = convokit.ThreadEmbedder(n_components=7)
te.fit_transform(corpus)
ce = convokit.CommunityEmbedder(community_key="subreddit", method="tsne")
ce.fit_transform(corpus)
pts = corpus.meta["communityEmbedder"]["pts"]
labels = corpus.meta["communityEmbedder"]["labels"]
xs, ys = zip(*pts)
plt.scatter(xs, ys)
for i, txt in enumerate(labels):
plt.annotate(txt, (xs[i], ys[i]))
plt.savefig("tsne")
plt.show()
|
1683232
|
from threading import Event
from time import sleep
from epics import PV, ca
from py4syn.epics.StandardDevice import StandardDevice
class Shutter(StandardDevice):
#CALLBACK FUNCTION FOR THE SHUTTER STATUS PV
#def onStatusChange(self, **kw):
#self._open = not self._open
#CONSTRUCTOR OF SHUTTER CLASS
def __init__(self, pvStatusName="", pvControlName="", pvHutchName="", mnemonic="", invert=False):
StandardDevice.__init__(self, mnemonic)
self.delay = 0.01
self.invert = invert
self.pvStatus = PV(pvStatusName)
self.pvControl = PV(pvControlName)
self.pvHutch = PV(pvHutchName)
#IF POSSIBLE, OPEN THE SHUTTER AND WAIT UNTIL THE SHUTTER IS REALLY OPEN
def open(self):
if not self.isHutchReady():
raise Exception('Error: ','Hutch Not Ready')
try:
if not self.isOpen():
self.pvControl.put(1, wait=True)
while not self.isOpen():
sleep(self.delay)
else:
print('Warning: ','Shutter already open')
except Exception as e:
print(e.args[0],e.args[1])
#IF POSSIBLE, CLOSE THE SHUTTER AND WAIT UNTIL THE SHUTTER IS REALLY CLOSE
def close(self):
try:
if self.isOpen():
self.pvControl.put(1, wait=True)
while self.isOpen():
sleep(self.delay)
else:
print('Warning: ','Shutter already closed')
except Exception as e:
print(e.args[0],e.args[1])
def isHutchReady(self):
if(self.invert):
return 1 - self.pvHutch.get()
else:
return self.pvHutch.get()
def isOpen(self):
if(self.invert):
return 1 - self.pvStatus.get()
else:
return self.pvStatus.get()
class ToggleShutter(StandardDevice):
def __init__(self, mnemonic, shutter, shutterReadback):
super().__init__(mnemonic)
self.read = PV(shutterReadback)
self.toggle = PV(shutter)
self._open = self.read.get()
self.changed = Event()
self.read.add_callback(self.onReadChange)
def isOpen(self):
return self._open
def onReadChange(self, value, **kwargs):
self._open = value
self.changed.set()
def wait(self, timeout=3):
ca.flush_io()
self.changed.wait(timeout)
def change(self, open, wait=False):
if self.isOpen() == open:
self.changed.set()
return
self.changed.clear()
self.toggle.put(1)
ca.flush_io()
if wait:
self.wait()
def open(self, wait=False):
self.change(open=True, wait=wait)
def close(self, wait=False):
self.change(open=False, wait=wait)
class SimpleShutter(StandardDevice):
SHUTTER_OPEN = 0
SHUTTER_CLOSE = 1
def __init__(self, mnemonic, shutter, invert=False):
super().__init__(mnemonic)
self.shutter = PV(shutter)
self.invert = invert
def isOpen(self):
if (self.invert):
return (1 - self.shutter.get()) == self.SHUTTER_OPEN
else:
return self.shutter.get() == self.SHUTTER_OPEN
def wait(self, timeout=3):
pass
def open(self, wait=False):
if (self.invert):
self.shutter.put((1 - self.SHUTTER_OPEN), wait=wait)
else:
self.shutter.put(self.SHUTTER_OPEN, wait=wait)
def close(self, wait=False):
if (self.invert):
self.shutter.put((1 - self.SHUTTER_CLOSE), wait=wait)
else:
self.shutter.put(self.SHUTTER_CLOSE, wait=wait)
class NullShutter(StandardDevice):
def __init__(self, mnemonic):
super().__init__(mnemonic)
self.o = False
def isOpen(self):
return self.o
def wait(self, timeout=3):
pass
def open(self, wait=False):
self.o = True
def close(self, wait=False):
self.o = False
|
1683253
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable as V
from torch import autograd
import numpy as np
class Net(nn.Module):
def __init__(self, args):
super(Net, self).__init__()
#### SELF ARGS ####
self.dropout = args.dropout
# Model optimizer
self.optimizer = None
#### MODEL PARAMS ####
self.fc1 = nn.Linear(784, 400)
self.fc1_drop = nn.Dropout(0.5) if self.dropout else nn.Dropout(0)
self.fc2 = nn.Linear(400, 400)
self.fc2_drop = nn.Dropout(0.5) if self.dropout else nn.Dropout(0)
# self.fc3 = nn.Linear(400, 400)
# self.fc3_drop = nn.Dropout(0.5) if self.dropout else nn.Dropout(0)
self.fc_final = nn.Linear(400, 10)
# Init Matrix which will get Fisher Matrix
self.Fisher = {}
# Self Params
self.params = [param for param in self.parameters()]
def forward(self, x):
# Flatten input
x = x.view(-1, 784)
# Keep it for dropout
# FIRST FC
x_relu = F.relu(self.fc1(x))
x = self.fc1_drop(x_relu)
# SECOND FC
x_relu = F.relu(self.fc2(x))
x = self.fc2_drop(x_relu)
# # THIRD FC
# x_relu = F.relu(self.fc3(x))
# x = self.fc3_drop(x_relu)
# Classification
x = self.fc_final(x)
return x
def estimate_fisher(self, dataset, sample_size, batch_size=64):
# Get loglikelihoods from data
self.F_accum = []
for v, _ in enumerate(self.params):
self.F_accum.append(np.zeros(list(self.params[v].size())))
data_loader = dataset
loglikelihoods = []
for x, y in data_loader:
#print(x.size(), y.size())
x = x.view(batch_size, -1)
x = V(x).cuda() if self._is_on_cuda() else V(x)
y = V(y).cuda() if self._is_on_cuda() else V(y)
loglikelihoods.append(F.log_softmax(self(x), dim=1)[range(batch_size), y.data])
if len(loglikelihoods) >= sample_size // batch_size:
break
#loglikelihood = torch.cat(loglikelihoods).mean(0)
loglikelihood = torch.cat(loglikelihoods).mean(0)
loglikelihood_grads = autograd.grad(loglikelihood, self.parameters(),retain_graph=True)
#print("FINISHED GRADING", len(loglikelihood_grads))
for v in range(len(self.F_accum)):
#print(len(self.F_accum))
torch.add(torch.Tensor((self.F_accum[v])), torch.pow(loglikelihood_grads[v], 2).data)
for v in range(len(self.F_accum)):
self.F_accum[v] /= sample_size
parameter_names = [
n.replace('.', '__') for n, p in self.named_parameters()
]
#print("RETURNING", len(parameter_names))
return {n: g for n, g in zip(parameter_names, self.F_accum)}
def consolidate(self, fisher):
for n, p in self.named_parameters():
n = n.replace('.', '__')
self.register_buffer('{}_estimated_mean'.format(n), p.data.clone())
#print(dir(fisher[n].data))
self.register_buffer('{}_estimated_fisher'
.format(n), fisher[n].data)
def ewc_loss(self, lamda, cuda=False):
try:
losses = []
for n, p in self.named_parameters():
# retrieve the consolidated mean and fisher information.
n = n.replace('.', '__')
mean = getattr(self, '{}_estimated_mean'.format(n))
fisher = getattr(self, '{}_estimated_fisher'.format(n))
# wrap mean and fisher in Vs.
mean = V(mean)
fisher = V(fisher.data)
# calculate a ewc loss. (assumes the parameter's prior as
# gaussian distribution with the estimated mean and the
# estimated cramer-rao lower bound variance, which is
# equivalent to the inverse of fisher information)
losses.append((fisher * (p-mean)**2).sum())
return (lamda/2)*sum(losses)
except AttributeError:
# ewc loss is 0 if there's no consolidated parameters.
return (
V(torch.zeros(1)).cuda() if cuda else
V(torch.zeros(1))
)
def _is_on_cuda(self):
return next(self.parameters()).is_cuda
|
1683255
|
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class HTMLVisualizer():
def __init__(self, fn_html):
self.fn_html = fn_html
self.content = '<table>'
self.content += '<style> table, th, td {border: 1px solid black;} </style>'
def add_header(self, elements):
self.content += '<tr>'
for element in elements:
self.content += '<th>{}</th>'.format(element)
self.content += '</tr>'
def add_rows(self, rows):
for row in rows:
self.add_row(row)
def add_row(self, elements):
self.content += '<tr>'
# a list of cells
for element in elements:
self.content += '<td>'
# fill a cell
for key, val in element.items():
if key == 'text':
self.content += val
elif key == 'image':
self.content += '<img src="{}" style="max-height:256px;max-width:256px;">'.format(val)
elif key == 'audio':
self.content += '<audio controls><source src="{}"></audio>'.format(val)
elif key == 'video':
self.content += '<video src="{}" controls="controls" style="max-height:256px;max-width:256px;">'.format(val)
self.content += '</td>'
self.content += '</tr>'
def write_html(self):
self.content += '</table>'
with open(self.fn_html, 'w') as f:
f.write(self.content)
|
1683276
|
from django.test import TestCase
from rest_framework.test import APIClient
from backend.models import UserModel, RoleModel, TenantModel, AwsEnvironmentModel, Command, Document, Parameter
from backend.models.resource.ec2 import Ec2
from datetime import datetime
from unittest import mock
@mock.patch("backend.views.resource_view_set.ControlResourceUseCase")
class InstanceViewSetTestCase(TestCase):
api_path_in_tenant = '/api/tenants/{}/aws-environments/{}/resources/{}'
api_path = '/api/tenants/{}/aws-environments/{}' \
'/regions/ap-northeast-1/services/ec2/resources/i-123456789012/'
@staticmethod
def _create_aws_env_model(name, aws_account_id, tenant):
now = datetime.now()
aws = AwsEnvironmentModel.objects.create(
name=name,
aws_account_id=aws_account_id,
aws_role="test_role",
aws_external_id="test_external_id",
tenant=tenant,
created_at=now,
updated_at=now
)
aws.save()
return aws
@staticmethod
def _create_role_model(id, role_name):
now = datetime.now()
return RoleModel.objects.create(
id=id,
role_name=role_name,
created_at=now,
updated_at=now
)
@staticmethod
def _create_tenant_model(tenant_name):
now = datetime.now()
return TenantModel.objects.create(
tenant_name=tenant_name,
created_at=now,
updated_at=now
)
@staticmethod
def _create_user_model(email, name, password, tenant, role):
now = datetime.now()
user_model = UserModel(
email=email,
name=name,
password=password,
tenant=tenant,
role=role,
created_at=now,
updated_at=now,
)
user_model.save()
return user_model
@classmethod
def setUpClass(cls):
super(InstanceViewSetTestCase, cls).setUpClass()
# Company1に所属するMASTERユーザーの作成
role_model = cls._create_role_model(2, "test_role")
tenant_model1 = cls._create_tenant_model("test_tenant_users_in_tenant_1")
# Company1に所属するAWS環境の作成
aws1 = cls._create_aws_env_model("test_name1", "test_aws1", tenant_model1)
user1 = cls._create_user_model(
email="test_email",
name="test_name",
password="<PASSWORD>",
tenant=tenant_model1,
role=role_model,
)
user1.aws_environments.add(aws1)
# Company1に所属するUSERユーザーの作成
role_model_user = cls._create_role_model(3, "test_role")
user2 = cls._create_user_model(
email="test_email_USER",
name="test_name",
password="<PASSWORD>",
tenant=tenant_model1,
role=role_model_user,
)
user2.aws_environments.add(aws1)
# Company2に所属するユーザーの作成
tenant_model2 = cls._create_tenant_model("test_tenant_users_in_tenant_2")
cls._create_user_model(
email="test_email2",
name="test_name2",
password="<PASSWORD>",
tenant=tenant_model2,
role=role_model,
)
# Company2に所属するAWS環境の作成
cls._create_aws_env_model("test_name2", "test_aws2", tenant_model2)
# ログインしていない状態でAPIが使用できないことを確認する
def test_not_login(self, use_case):
client = APIClient()
# 検証対象の実行
response = client.get(self.api_path_in_tenant.format(1, 1, "?region=test"), format='json')
self.assertEqual(response.status_code, 401)
# 正常系
def test_get_resource(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="<PASSWORD>")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
fetch_resources = use_case.return_value.fetch_resources
fetch_resources.return_value = {}
# 検証対象の実行
response = client.get(
path=self.api_path_in_tenant.format(tenant_id, aws_id, "?region=test"),
format='json')
fetch_resources.assert_called_once()
self.assertEqual(response.status_code, 200)
# テナントが存在しない場合
def test_no_tenant(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="<PASSWORD>")
client.force_authenticate(user=user_model)
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
fetch_resources = use_case.return_value.fetch_resources
fetch_resources.return_value = {}
# 検証対象の実行
response = client.get(
path=self.api_path_in_tenant.format(100, aws_id, "?region=test"),
format='json')
fetch_resources.assert_not_called()
self.assertEqual(response.status_code, 404)
# AWS環境が存在しない場合
def test_no_aws_env(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="<PASSWORD>")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
fetch_resources = use_case.return_value.fetch_resources
fetch_resources.return_value = {}
# 検証対象の実行
response = client.get(
path=self.api_path_in_tenant.format(tenant_id, 100, "?region=test"),
format='json')
fetch_resources.assert_not_called()
self.assertEqual(response.status_code, 404)
# リソース起動:正常系
def test_start_resource(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
start_resource = use_case.return_value.start_resource
# 検証対象の実行
response = client.post(
path=self.api_path.format(tenant_id, aws_id) + "start/",
format='json')
start_resource.assert_called_once()
self.assertEqual(response.status_code, 200)
# リソース起動:テナントが存在しない場合
def test_start_resource_no_tenant(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
start_resource = use_case.return_value.start_resource
# 検証対象の実行
response = client.post(
path=self.api_path.format(100, aws_id) + "start/",
format='json')
start_resource.assert_not_called()
self.assertEqual(response.status_code, 404)
# リソース起動:AWS環境が存在しない場合
def test_start_resource_no_aws(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
start_resource = use_case.return_value.start_resource
# 検証対象の実行
response = client.post(
path=self.api_path.format(tenant_id, 100) + "start/",
format='json')
start_resource.assert_not_called()
self.assertEqual(response.status_code, 404)
# リソース再起動:正常系
def test_reboot_resource(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
reboot_resource = use_case.return_value.reboot_resource
# 検証対象の実行
response = client.post(
path=self.api_path.format(tenant_id, aws_id) + "reboot/",
format='json')
reboot_resource.assert_called_once()
self.assertEqual(response.status_code, 200)
# リソース再起動:テナントが存在しない場合
def test_reboot_resource_no_tenant(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
reboot_resource = use_case.return_value.reboot_resource
# 検証対象の実行
response = client.post(
path=self.api_path.format(100, aws_id) + "reboot/",
format='json')
reboot_resource.assert_not_called()
self.assertEqual(response.status_code, 404)
# リソース再起動:AWS環境が存在しない場合
def test_reboot_resource_no_aws(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
reboot_resource = use_case.return_value.reboot_resource
# 検証対象の実行
response = client.post(
path=self.api_path.format(tenant_id, 100) + "reboot/",
format='json')
reboot_resource.assert_not_called()
self.assertEqual(response.status_code, 404)
# リソース再起動:正常系
def test_stop_resource(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
stop_resource = use_case.return_value.stop_resource
# 検証対象の実行
response = client.post(
path=self.api_path.format(tenant_id, aws_id) + "stop/",
format='json')
stop_resource.assert_called_once()
self.assertEqual(response.status_code, 200)
# リソース再起動:テナントが存在しない場合
def test_stop_resource_no_tenant(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
stop_resource = use_case.return_value.stop_resource
# 検証対象の実行
response = client.post(
path=self.api_path.format(100, aws_id) + "stop/",
format='json')
stop_resource.assert_not_called()
self.assertEqual(response.status_code, 404)
# リソース再起動:AWS環境が存在しない場合
def test_stop_resource_no_aws(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
stop_resource = use_case.return_value.stop_resource
# 検証対象の実行
response = client.post(
path=self.api_path.format(tenant_id, 100) + "stop/",
format='json')
stop_resource.assert_not_called()
self.assertEqual(response.status_code, 404)
# リソース詳細取得:正常系
def test_retrieve_resource(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
describe_resource = use_case.return_value.describe_resource
mock_resource = mock.Mock()
mock_resource.serialize.return_value = "TEST"
describe_resource.return_value = mock_resource
# 検証対象の実行
response = client.get(
path=self.api_path.format(tenant_id, aws_id),
format='json')
describe_resource.assert_called_once()
mock_resource.serialize.assert_called_once()
self.assertEqual(response.status_code, 200)
# リソース詳細取得:テナントが存在しない場合
def test_retrieve_resource_no_tenant(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
describe_resource = use_case.return_value.describe_resource
# 検証対象の実行
response = client.get(
path=self.api_path.format(100, aws_id),
format='json')
describe_resource.assert_not_called()
self.assertEqual(response.status_code, 404)
# リソース詳細取得:AWS環境が存在しない場合
def test_retrieve_resource_no_aws(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
describe_resource = use_case.return_value.describe_resource
# 検証対象の実行
response = client.get(
path=self.api_path.format(tenant_id, 100),
format='json')
describe_resource.assert_not_called()
self.assertEqual(response.status_code, 404)
# コマンド実行:正常系
def test_run_command(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
run_command = use_case.return_value.run_command
run_command.return_value = Command(
Document("document_name", [Parameter(key="param", value="value")]),
Ec2("ap-northeast-1", "i-123456789012")
)
# 検証対象の実行
response = client.post(
path=self.api_path.format(tenant_id, aws_id) + "run_command/",
data=dict(
name="document_name",
parameters=[dict(key="param", value="value")]
),
format='json')
run_command.assert_called_once()
self.assertEqual(response.status_code, 200)
# コマンド実行:テナントが存在しない場合
def test_run_command_no_tenant(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
run_command = use_case.return_value.run_command
run_command.return_value = Command(
Document("document_name", [Parameter(key="param", value="value")]),
Ec2("ap-northeast-1", "i-123456789012")
)
# 検証対象の実行
response = client.post(
path=self.api_path.format(100, aws_id) + "run_command/",
data=dict(
name="document_name",
parameters=[dict(key="param", value="value")]
),
format='json')
run_command.assert_not_called()
self.assertEqual(response.status_code, 404)
# コマンド実行:AWS環境が存在しない場合
def test_run_command_no_aws(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
run_command = use_case.return_value.run_command
run_command.return_value = Command(
Document("document_name", [Parameter(key="param", value="value")]),
Ec2("ap-northeast-1", "i-123456789012")
)
# 検証対象の実行
response = client.post(
path=self.api_path.format(tenant_id, 100) + "run_command/",
data=dict(
name="document_name",
parameters=[dict(key="param", value="value")]
),
format='json')
run_command.assert_not_called()
self.assertEqual(response.status_code, 404)
# コマンド実行:指定されたサービスがEC2でない場合
def test_run_command_not_ec2(self, use_case: mock.Mock):
client = APIClient()
user_model = UserModel.objects.get(email="test_email")
client.force_authenticate(user=user_model)
# Company1のIDを取得
tenant_id = TenantModel.objects.get(tenant_name="test_tenant_users_in_tenant_1").id
# AWS環境のIDを取得
aws_id = AwsEnvironmentModel.objects.get(aws_account_id="test_aws1").id
run_command = use_case.return_value.run_command
run_command.return_value = Command(
Document("document_name", [Parameter(key="param", value="value")]),
Ec2("ap-northeast-1", "i-123456789012")
)
# 検証対象の実行
response = client.post(
path=self.api_path.format(tenant_id, aws_id).replace("ec2", "rds") + "run_command/",
data=dict(
name="document_name",
parameters=[dict(key="param", value="value")]
),
format='json')
run_command.assert_not_called()
self.assertEqual(response.status_code, 400)
|
1683321
|
import sys
import os
from datetime import datetime
from typing import List
sys.path.append(os.path.join('..', '..'))
import torch
import numpy as np
from torch.utils.data import Subset
from data import CUB200
from lens.utils.datasets import ImageToConceptDataset, ImageToConceptAndTaskDataset
from lens.utils import metrics
from lens.utils.base import set_seed, ClassifierNotTrainedError
from lens.utils.data import get_transform, get_splits_train_val_test, get_splits_for_fsc, show_batch
from lens.concept_extractor import cnn_models
from lens.concept_extractor.concept_extractor import CNNConceptExtractor
from lens.models.robust_cnn_classifier import RobustCNNClassifier
def concept_extractor_cub(dataset_root="..//data//CUB_200_2011", result_folder=".", epochs=200, seeds=None,
device=torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu"),
metric=metrics.F1Score(), cnn_model=cnn_models.RESNET18, pretrained=True, multi_label=False,
transfer_learning=False, show_image=True, data_augmentation=True, few_shot=False,
reduced=False, l_r=0.003, batch_size=128, binary_loss=True, robust=False,
save_predictions=False) \
-> List[RobustCNNClassifier]:
dataset_name = CUB200
if seeds is None:
seeds = [0]
if binary_loss:
loss = torch.nn.BCEWithLogitsLoss()
else:
loss = torch.nn.CrossEntropyLoss()
models = []
for seed in seeds:
set_seed(seed)
train_transform = get_transform(dataset=dataset_name, data_augmentation=data_augmentation,
inception=cnn_model == cnn_models.INCEPTION)
test_transform = get_transform(dataset=dataset_name, data_augmentation=False,
inception=cnn_model == cnn_models.INCEPTION)
if multi_label:
dataset = ImageToConceptAndTaskDataset(dataset_root, train_transform, dataset_name=dataset_name)
else:
dataset = ImageToConceptDataset(dataset_root, train_transform, dataset_name=dataset_name)
if reduced:
bill_attributes = 8
dataset.attributes = dataset.attributes[:, :bill_attributes]
dataset.attribute_names = dataset.attribute_names[:bill_attributes]
dataset.n_attributes = bill_attributes
if few_shot:
train_set, val_set = get_splits_for_fsc(dataset, train_split=0.8, test_transform=test_transform)
test_idx = train_set.indices + val_set.indices
test_set = Subset(val_set.dataset, test_idx)
else:
train_set, val_set, test_set = get_splits_train_val_test(dataset, test_transform=test_transform)
print("Concept_extractor: " + "Number of attributes", dataset.n_attributes)
if show_image:
show_batch(train_set, train_set.dataset.attribute_names)
show_batch(val_set, val_set.dataset.attribute_names)
show_batch(test_set, test_set.dataset.attribute_names)
name = f"model_{cnn_model}_robust_{robust}_prtr_{pretrained}_trlr_{transfer_learning}_da_{data_augmentation}" \
f"_bl_{binary_loss}_fs_{few_shot}_mlb_{multi_label}_dataset_{dataset_name}_r_{reduced}" \
f"_lr_{l_r}_epochs_{epochs}_seed_{seed}.pth"
name_model = os.path.join(result_folder, name)
print("Concept_extractor: " + name_model)
n_classes = dataset.n_attributes
main_classes = dataset.classes
attributes_names = [a for a in dataset.attribute_names if a not in main_classes]
if robust:
model = RobustCNNClassifier(n_classes, main_classes, attributes_names, cnn_model, loss, transfer_learning,
pretrained, name_model, device)
else:
model = CNNConceptExtractor(n_classes, cnn_model, loss, transfer_learning, pretrained, name_model, device)
try:
model.load(device)
except ClassifierNotTrainedError:
# It takes a few
results = model.fit(train_set=train_set, val_set=val_set, epochs=epochs, num_workers=8, l_r=l_r,
lr_scheduler=True, device=device, metric=metric, batch_size=batch_size)
results.to_csv(os.path.join(result_folder, "results_" + name) + ".csv")
if save_predictions:
with torch.no_grad():
model.eval()
preds, labels = model.predict(dataset, num_workers=8, batch_size=batch_size//8, device=device)
val = model.evaluate(dataset, metric=metric, device=device, outputs=preds, labels=labels)
if multi_label:
pred_path = os.path.join(dataset_root, f"{dataset_name}_multi_label_predictions.npy")
else:
pred_path = os.path.join(dataset_root, f"{dataset_name}_predictions.npy")
np.save(pred_path, preds.cpu().numpy())
print("Concept_extractor: " + "Performance on the whole dataset:", val)
models.append(model)
return models
if __name__ == '__main__':
concept_extractor_cub()
|
1683438
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from beast.util import Execute
from beast.util import String
def describe(**kwds):
return String.single_line(Execute.execute('git describe --tags', **kwds))
|
1683447
|
from ctypes import c_int, c_char_p, POINTER
from llvmlite.binding import ffi
def link_modules(dst, src):
with ffi.OutputString() as outerr:
err = ffi.lib.LLVMPY_LinkModules(dst, src, outerr)
# The underlying module was destroyed
src.detach()
if err:
raise RuntimeError(str(outerr))
ffi.lib.LLVMPY_LinkModules.argtypes = [
ffi.LLVMModuleRef,
ffi.LLVMModuleRef,
POINTER(c_char_p),
]
ffi.lib.LLVMPY_LinkModules.restype = c_int
|
1683524
|
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image, ImageOps
from src.models.class_patcher import patcher
from src.utils.imgproc import *
class patcher(patcher):
def __init__(self, body='./body/body_ramne.png', **options):
super().__init__(name='ラムネ', body=body, pantie_position=[412, 835], **options)
self.mask = io.imread('./mask/mask_ramne.png')
self.sign_position = [844, 666]
try:
self.add_sign = self.options['add_sign']
except:
self.add_sign = self.ask(question='Add immoral sign?', default=False)
if self.add_sign:
try:
sign = Image.open(self.options['fsign'])
except:
sign = Image.open('./material/anna_sign.png')
left = ImageOps.mirror(sign)
margin = 25
self.sign = Image.new("RGBA", (sign.size[0] * 2 + margin, sign.size[1]))
self.sign.paste(sign, (sign.size[0] + int(margin/2), 0))
self.sign.paste(left, (0, 0))
def convert(self, image):
pantie = np.array(image)
# Rear to front
patch = np.copy(pantie[-110:-5, 548:, :])[::-1, ::-1, :]
[pr, pc, d] = patch.shape
pantie[105:105 + pr, :pc, :] = patch
pantie = pantie[:-100, :, :]
pantie = np.pad(pantie, [(100, 0), (0, 0), (0, 0)], mode='constant')
pantie = perspective_transform(pantie, np.matrix('1, 0.01, 0; 0, 1, 0; -0.0008,0,1'))
# Affine transform
[r, c, d] = pantie.shape
src_cols = np.linspace(0, c, 10)
src_rows = np.linspace(0, r, 10)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
shifter_row = np.zeros(src.shape[0])
shifter_col = np.zeros(src.shape[0])
shifter_row = (np.sin(np.linspace(0, 1 * np.pi, src.shape[0]) - np.pi / 4) * 40)
shifter_col = -np.sin(np.linspace(0, 1 * np.pi, src.shape[0]) + np.pi / 8) * 20
shifter_row[shifter_row < 0] = 0
shifter_row = np.convolve(shifter_row, np.ones(10) / 10, mode='valid')
shifter_row = skt.resize(shifter_row, (100, 1), anti_aliasing=True, mode='reflect')[:, 0]
shifter_col = np.convolve(shifter_col, np.ones(10) / 10, mode='valid')
shifter_col = skt.resize(shifter_col, (100, 1), anti_aliasing=True, mode='reflect')[:, 0]
dst_rows = src[:, 1] + shifter_row
dst_cols = src[:, 0] + shifter_col
dst = np.vstack([dst_cols, dst_rows]).T
affin = skt.PiecewiseAffineTransform()
affin.estimate(src, dst)
pantie = skt.warp(pantie, affin)
# Mirroring
pantie = pantie[25:290, 19:430, :]
pantie = skt.resize(pantie, (np.int(pantie.shape[0] * 1.47), np.int(pantie.shape[1] * 1.49)), anti_aliasing=True, mode='reflect')
pantie = np.bitwise_and(np.uint8(pantie[7:, :, :] * 255), self.mask)
[r, c, d] = pantie.shape
npantie = np.zeros((r, c * 2, d), dtype=np.uint8)
npantie[:, c:, :] = pantie
npantie[:, :c, :] = pantie[:, ::-1, :]
return Image.fromarray(npantie)
def patch(self, image, transparent=False):
image = self.convert(image)
if transparent:
patched = Image.new("RGBA", self.body_size)
else:
patched = self.body.copy()
if self.add_sign:
self.paste(patched, self.sign, self.sign_position)
patched = self.paste(patched, image, self.pantie_position)
return patched
|
1683537
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import det_curve
from sklearn.metrics import plot_det_curve
@pytest.fixture(scope="module")
def data():
return load_iris(return_X_y=True)
@pytest.fixture(scope="module")
def data_binary(data):
X, y = data
return X[y < 2], y[y < 2]
@pytest.mark.parametrize(
"response_method", ["predict_proba", "decision_function"]
)
@pytest.mark.parametrize("with_sample_weight", [True, False])
@pytest.mark.parametrize("with_strings", [True, False])
def test_plot_det_curve(
pyplot,
response_method,
data_binary,
with_sample_weight,
with_strings
):
X, y = data_binary
pos_label = None
if with_strings:
y = np.array(["c", "b"])[y]
pos_label = "c"
if with_sample_weight:
rng = np.random.RandomState(42)
sample_weight = rng.randint(1, 4, size=(X.shape[0]))
else:
sample_weight = None
lr = LogisticRegression()
lr.fit(X, y)
viz = plot_det_curve(
lr, X, y, alpha=0.8, sample_weight=sample_weight,
)
y_pred = getattr(lr, response_method)(X)
if y_pred.ndim == 2:
y_pred = y_pred[:, 1]
fpr, fnr, _ = det_curve(
y, y_pred, sample_weight=sample_weight, pos_label=pos_label,
)
assert_allclose(viz.fpr, fpr)
assert_allclose(viz.fnr, fnr)
assert viz.estimator_name == "LogisticRegression"
# cannot fail thanks to pyplot fixture
import matplotlib as mpl # noqal
assert isinstance(viz.line_, mpl.lines.Line2D)
assert viz.line_.get_alpha() == 0.8
assert isinstance(viz.ax_, mpl.axes.Axes)
assert isinstance(viz.figure_, mpl.figure.Figure)
assert viz.line_.get_label() == "LogisticRegression"
expected_pos_label = 1 if pos_label is None else pos_label
expected_ylabel = (
f"False Negative Rate (Positive label: {expected_pos_label})"
)
expected_xlabel = (
f"False Positive Rate (Positive label: {expected_pos_label})"
)
assert viz.ax_.get_ylabel() == expected_ylabel
assert viz.ax_.get_xlabel() == expected_xlabel
|
1683540
|
import cv2
import myface.face as face
import myface.utils.utils as utils
from myface.classes.test import Face_test
import numpy as np
MODEL_PATH = '../model/'
# openCv video capture : webcam or video
video_capture = cv2.VideoCapture(0)
# video_capture = cv2.VideoCapture('/Users/zane/Movies/video/ET/ET.mp4')
# Load Trained model
model_name = 'A1'
trained_model = utils.load_model(MODEL_PATH, model_name)
Test = Face_test(trained_model)
PROCESS_FRAME_RATE = 2
SCALE_FRAME = 2
frame_cnt = 0
encoded_faces = []
recognize_result = {}
TOLERANCE = 0.55
while True:
ret, frame = video_capture.read()
if frame_cnt == 0:
# begin process frame
small_frame = cv2.resize(
frame, (0, 0), fx=1 / SCALE_FRAME, fy=1 / SCALE_FRAME)
# find all faces and encode
detect_result = face.detect_face_and_encode(small_frame)
encoded_faces = detect_result['encoded_faces']
# recognize all faces
recognize_result = Test.predict_with_encode_faces(
encoded_faces, TOLERANCE)
# print out recognized result
# print('!!!',recognize_result)
# count the frames
frame_cnt = frame_cnt + 1 if frame_cnt < PROCESS_FRAME_RATE - 1 else 0
# display the results
for rect, name in zip(detect_result['detected_faces'], recognize_result):
top = rect.top() * SCALE_FRAME
bottom = rect.bottom() * SCALE_FRAME
left = rect.left() * SCALE_FRAME
right = rect.right() * SCALE_FRAME
label = ''
if name['posibility'].size:
target_index = np.argmin(name['posibility'])
target_label = name['label'][target_index]
target_distance = name['posibility'][target_index]
label = str(target_label) + ' : ' + str(target_distance.round(2))
else:
label = 'Unknown'
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom),
(right, bottom + 35), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, label, (left + 6, bottom + 25),
font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
|
1683548
|
class Symbol():
def __init__(self, id, type_specific, data_type, valor):
self.id = id
self.type_specific = type_specific
self.data_type = data_type
self.valor = valor
|
1683576
|
from flask_admin.base import MenuLink
"""
By specifying a category that doesn't exist, a new tab will appear in the webserver, with the menu link objects
specified to it.
"""
pandora_plugin = \
MenuLink(
category='Plugins',
name='Pandora-Plugin',
url='https://github.com/airflow-plugins/pandora-plugin'
)
|
1683589
|
from django.db import models
from db.models import MySQLInst
class QuerySqlLog(models.Model):
"""
sql查询日志表
"""
operator = models.CharField(max_length=64, blank=True,null=True, verbose_name=u"查询人")
mysqlinst = models.ForeignKey(MySQLInst,blank=True,null=True,on_delete=models.SET_NULL,verbose_name=u"查询实例",related_name="querysqllog_mysqlinst")
dbname = models.CharField(max_length=64, blank=True,null=True, verbose_name=u"查询数据库")
sql = models.TextField(blank=True,verbose_name=u"查询sql")
create_time = models.DateTimeField( auto_now_add=True, verbose_name=u"创建时间")
update_time = models.DateTimeField(blank=True, auto_now=True, verbose_name=u"更新时间")
comment = models.CharField(max_length=64, blank=True, verbose_name=u"备注")
class Meta:
verbose_name = u"sql查询日志表"
verbose_name_plural = verbose_name
def __unicode__(self):
return self.operator
|
1683606
|
from os import path
import tempfile, textwrap, webbrowser
import sublime, sublime_plugin
class MermaidViewCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
title = path.splitext(path.basename(view.file_name() or 'untitled'))[0]
name = '{0}-{1}-mermaid-view.html'.format(title, view.id())
pathname = path.join(tempfile.gettempdir(), name)
selection = view.sel()
region = selection[0] if selection[0].size() else sublime.Region(0, view.size())
mermaid = view.substr(region)
with open(pathname, mode='w', encoding='utf-8') as f:
f.write(self.html({
'mermaid': mermaid,
'settings': sublime.load_settings('mermaid.sublime-settings'),
'title': title
}))
url = 'file://{}'.format(pathname.replace(' ', '%20').replace('(', '%28').replace(')', '%29'))
webbrowser.get().open_new_tab(url)
def html(self, parameters):
parameters['mermaid'] = textwrap.indent(parameters['mermaid'], ' ' * 10)
parameters['quiet_links_style'] = ''
if parameters['settings'].get('quiet_graph_links'):
parameters['quiet_links_style'] = textwrap.indent("""
svg .edgePath:not(:hover) path.arrowheadPath { fill-opacity: 0.3; }
svg .edgePath:not(:hover) .path { stroke-opacity: 0.3; }
""", ' ' * 4)
parameters['theme'] = parameters['settings'].get('theme')
return textwrap.dedent("""
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>Mermaid Viewer: %(title)s</title>
<style>
.info > * {
box-sizing: border-box;
display: inline-block;
font-size: 1rem;
margin: 0;
padding: 0.5rem;
text-decoration: none;
white-space: nowrap;
}
%(quiet_links_style)s
</style>
</head>
<body style="
font-family:'system-ui',sans-serif;
margin:0; overflow:hidden; text-align:center;
">
<div class="info" style="
background:rgba(255,255,255, 0.8); border:1px solid; font-size:0;
box-sizing:border-box; height:71px; margin:1rem; position:fixed;
">
<h1 style="border-bottom:1px solid; display:block;">
<span style="font-weight:normal;">Mermaid Viewer ·</span>
%(title)s
</h1>
<a
href="data:image/svg+xml;base64," download="%(title)s.svg"
style="border-right:1px solid; width:50%%;"
>
Save as SVG
</a>
<a
href="http://svgtopng.com" target="_blank"
style="width:50%%;"
>
Save as PNG
</a>
</div>
<div style="
box-sizing:border-box; height:100vh; overflow:auto;
padding-top:calc(71px + 1rem);
">
<div class="mermaid">
%(mermaid)s
</div>
</div>
<script src="https://unpkg.com/mermaid/dist/mermaid.min.js"></script>
<script>
mermaid.initialize({
flowchart: { useMaxWidth: false },
logLevel: 4,
theme: '%(theme)s',
});
setTimeout(() => {
document.querySelector('a[download]').href +=
btoa(document.querySelector('svg').outerHTML);
}, 1000);
</script>
</body>
</html>
""" % parameters)
|
1683678
|
from Objects.Item import Item
from Objects.Projectile import Projectile
from Engine.World import CreateObject, Object
class Taser(Item):
defName = "Taser"
defSprite = "taser"
def InteractWith(self, object):
if object is None:
return False
if not object.position or object.position == self.position:
return False
self.CreateProjectile(object.position - self.position)
return True
def ClickTile(self, tile):
if tile is None:
return False
if not tile.pos:
return False
self.CreateProjectile(tile.pos.xy() - self.position)
return True
def CreateProjectile(self, direction):
projectile = CreateObject("Objects.Projectile.Projectile", self.tile)
projectile.SetShotDirection(direction)
|
1683684
|
import numpy as np
from dyneusr import DyNeuGraph
from dyneusr.datasets import make_trefoil
from kmapper import KeplerMapper
from sklearn.decomposition import PCA
# Generate synthetic dataset
import tadasets
X = tadasets.sphere(n=500, r=1)
# Sort by first column
inds = np.argsort(X[:, 0])
X = X[inds].copy()
y = np.arange(X.shape[0])
# Generate shape graph using KeplerMapper
mapper = KeplerMapper(verbose=1)
lens = mapper.fit_transform(X, projection=PCA(2))
graph = mapper.map(lens, X, nr_cubes=6, overlap_perc=0.5)
# Visualize the shape graph using DyNeuSR's DyNeuGraph
dG = DyNeuGraph(G=graph, y=y)
dG.visualize('dyneusr4D_sphere.html', template='4D', static=True, show=True)
|
1683695
|
urls = []
with open('urls.txt', 'r') as f:
for url in f.readlines():
if url not in urls:
urls.append(url)
with open('urls.txt','w') as f:
f.writelines(urls)
urls = []
with open('archive.txt', 'r') as f:
for url in f.readlines():
if url not in urls:
urls.append(url)
with open('archive.txt','w') as f:
f.writelines(urls)
|
1683707
|
import time
from telethon import version
from uniborg.util import beastx_cmd, sudo_cmd
from beastx import ALIVE_NAME, CMD_HELP, Lastupdate
from beastx.Configs import Config
from beastx.modules import currentversion
from beastx import beast
from telethon.tl.functions.users import GetFullUserRequest
from . import OWNER_NAME,OWNER_ID
# Functions
def get_readable_time(seconds: int) -> str:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
uptime = get_readable_time((time.time() - Lastupdate))
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "firebot"
PM_IMG = Config.ALIVE_IMAGE
pm_caption = " __**✧✧ BEAST IS UP AND RUNNING SUCCESSFULLY ✧✧**__\n\n"
pm_caption += f"**━━━━━━━|━━━━━|━━━━━━**\n\n"
pm_caption += f"◉ᴍᴀsᴛᴇʀ◉:**[{OWNER_NAME}](tg://user?id={OWNER_ID})**\n\n"#[{OWNER_NAME}](tg://user?id={OWNER_ID})
pm_caption += f"┏━━━━━━━ɪɴғᴏ━━━━━━━━\n"
pm_caption += f"┣•➳➠ `ᴛᴇʟᴇᴛʜᴏɴ:` `{version.__version__}` \n"
pm_caption += f"┣•➳➠ `ᴠᴇʀsɪᴏɴ:` `{currentversion}`\n"
pm_caption += f"┣•➳➠ `ᴜᴘᴛɪᴍᴇ:` `{uptime}`\n"
pm_caption += f"┗━━━━━━━━━━━━━━━━━━━\n"
pm_caption += f" ||•|| sᴇᴄᴜʀɪᴛʏ ʙʏ Beast-x ||•||\n"
@beast.on(beastx_cmd(pattern=r"alive"))
@beast.on(sudo_cmd(pattern=r"alive", allow_sudo=True))
async def chris(alive):
await alive.get_chat()
""" For .alive command, check if the bot is running. """
await borg.send_file(alive.chat_id, PM_IMG, caption=pm_caption)
await alive.delete()
CMD_HELP.update(
{
"alive": "**ALive**\
\n\n**Syntax : **`.alive`\
\n**Usage :** Check if firebot UserBot is Alive"
}
)
|
1683727
|
import imghdr
import os
import tensorflow as tf
def is_image_valid(filepath):
return imghdr.what(filepath) is not None
def get_image_paths(image_dir):
image_paths = []
for root, directories, filenames in os.walk(image_dir):
image_paths += [os.path.join(root, filename) for filename in filenames]
image_paths = [filepath for filepath in image_paths if is_image_valid(filepath)]
return image_paths
def inputs(image_dir, batch_size, min_queue_examples, input_height, input_width):
def read_images(image_paths):
filename_queue = tf.train.string_input_producer(image_paths)
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
image = tf.image.decode_image(value)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image.set_shape([None, None, 3])
return image
image_paths = get_image_paths(image_dir)
images = read_images(image_paths)
images = tf.image.crop_to_bounding_box(images, 30, 0, 178, 178)
# images = tf.image.random_flip_left_right(images)
images = tf.image.resize_images(images, [input_height, input_width])
total_image_count = len(image_paths)
input_batch = tf.train.shuffle_batch([images],
batch_size=batch_size,
num_threads=16,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
return input_batch, total_image_count
if __name__ == '__main__':
pass
|
1683772
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
from Validation.MuonGEMHits.MuonGEMCommonParameters_cfi import GEMValidationCommonParameters
gemDigiHarvesting = DQMEDHarvester("MuonGEMDigisHarvestor",
GEMValidationCommonParameters,
regionIds = cms.untracked.vint32(-1, 1),
stationIds = cms.untracked.vint32(1),
layerIds = cms.untracked.vint32(1, 2, 3, 4, 5, 6),
)
MuonGEMDigisPostProcessors = cms.Sequence(gemDigiHarvesting)
from Configuration.Eras.Modifier_phase2_common_cff import phase2_common
phase2_common.toModify( gemDigiHarvesting, stationIds = (1, 2) )
|
1683782
|
import re
#____________________________________________________________________________
def read_file(file_name):
with open(file_name, 'r') as file:
ret = {}
for line in file.readlines():
if not (line.startswith('*') or line.startswith('C') or line.startswith('c')):
line = line.rstrip('\n\r')
if len(line) > 0:
ll = re.split('= |=\t|!|\t', line)
key = ll[0]
val = ll[1].strip()
if val.startswith('\"'):
val = val.strip('\"')
else:
val = float(val)
ret[key] = val
return ret
#____________________________________________________________________________
def UTC_now():
'''
Return int of unix time (in UTC) to nearest second
'''
import calendar
from datetime import datetime
d = datetime.utcnow()
unixtime = calendar.timegm(d.utctimetuple())
return unixtime
#____________________________________________________________________________
def strings_file(file_name):
li = []
with open(file_name, 'r') as file:
for line in file.readlines():
li.append(line)
return li
#____________________________________________________________________________
def printcol(text, fgcol='white', style='normal', bgcol='none'):
'''
Returns input text with some colour and style formatting
'''
fgcols = {
'dgrey': 2,
'ddgrey': 8,
'black': 30,
'dred': 31,
'dgreen': 32,
'dyellow': 33,
'dblue': 34,
'dpink' : 35,
'dcyan': 36,
'pgrey': 37,
'white': 38,
'grey': 90,
'red': 91,
'green': 92,
'yellow': 93,
'blue': 94,
'pink' : 95,
'cyan': 96,
}
bgcols = {
'none': 40,
'red': 41,
'green': 42,
'yellow': 43,
'blue': 44,
'pink' : 45,
'cyan': 46,
'grey': 47,
}
styles = {
'normal': 0,
'bold': 1,
'faded': 2,
'underlined': 4,
'flashing': 5,
'fgbgrev': 7,
'invisible': 8,
}
st = styles[style]
fg = fgcols[fgcol]
bg = bgcols[bgcol]
format = ';'.join([str(st), str(fg), str(bg)])
colstring = '\x1b[%sm%s\x1b[0m' % (format, text)
return colstring
#____________________________________________________________________________
def plot_multiscatter(dataframe, xvariables, xlabel, yvariable, ylabel, save_name):
"""Wrapper for matplotlib scatter
Plot multiple scatter plots on the same figure
Will also save with normal, log and symlog
"""
import matplotlib.pyplot as plt
maximum = find_set_maximum(dataframe, xvariables)
minimum = find_set_minimum(dataframe, xvariables)
for xvar in xvariables:
plt.scatter(dataframe[xvar], dataframe[yvariable],label=xvar)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.xlim(xmin=minimum, xmax=maximum)
plt.savefig(save_name)
# assumes only one "." in the save_name,
save_name_symlog = save_name.replace('.', '_symlog.')
plt.xscale('symlog')
plt.savefig(save_name_symlog)
plt.xscale('log')
save_name_log = save_name_symlog.replace('_symlog','_log')
plt.savefig(save_name_log)
# clear plot
plt.clf()
#____________________________________________________________________________
def plot_scatter(dataframe, xvariable, xlabel, yvariable, ylabel, save_name, do_log=False):
'''
Wrapper for matplotlib scatter
Plot and save a simple scatter plot of xvariable against yvariable
Will label the axes as xlabel, ylabel
Args:
dataframe: pandas DataFrame.
xvariable: Name of xvariable to be plotted, must be inside dataframe
yvariable: Name of yvariable to be plotted, must be inside dataframe
xlabel: string, x-axis label
ylabel: string, y-axis label
save_name: string, path of where to save the plot, must have file extension (e.g. .pdf)
do_log: bool, have x-axis log scale (default false)
'''
import matplotlib.pyplot as plt
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.scatter(dataframe[xvariable], dataframe[yvariable])
if do_log:
plt.xscale('log')
plt.savefig(save_name)
plt.clf()
#_____________________________________________________________________________
def find_set_minimum(df, columns):
'''
Find minimum of a set of columns from a pandas dataframe
'''
import numpy as np
return np.amin( df.loc[:, columns].min(axis=1) )
#_____________________________________________________________________________
def find_set_maximum(df, columns):
'''
Find maximum of a set of columns from a pandas dataframe
'''
import numpy as np
return np.amax( df.loc[:, columns].max(axis=1) )
|
1683872
|
import plotly.graph_objs as go
#import plotly.plotly as py
import plotly.offline as offline
from selenium import webdriver
# from matplotlib import rc
# #==========================
# # use these lines for latex
# #==========================
# rc('text',usetex=True)
# font = {'family' : 'serif',
# 'weight' : 'bold',
# 'size' : 18}
# rc('font', **font)
def draw_a_carpet(A,B,Z,X,Y,save_file,valid,cstr,xstr,ystr):
minval = round(min(Z),-1)
maxval = round(max(Z),-1)
wtstep = (maxval-minval)/10
print('range to plot is ',minval,maxval)
#define the font
font1 =dict(
family='Helvetica',
size=18
)
#draw color contour
trace1 = go.Contourcarpet(
a = A,
b = B,
z = Z,
autocontour = False,
contours = dict(
start = minval,
end = maxval,
size = wtstep
),
line = dict(
width = 0.5,
smoothing = 0
)
, colorbar = dict(
# len = 0.4,
x = 0.9,
title=cstr,
tickfont=font1,
titlefont=dict(size=16)
)
)
#define carpet location
trace2 = go.Carpet(
a = A,
b = B,
x = X,
y = Y,
aaxis = dict(
# tickprefix = 'DL = ',
smoothing = 0,
nticks = 4,
type = 'linear',
showticklabels='none',
# categoryorder='category '
),
baxis = dict(
# tickprefix = 'sigma=',
# smoothing = 0,
# tickmode = 'array',
# tickvals = [min(B),max(B)],
# tickfont = font1,
showticklabels='none',
# nticks = 3,
)
)
#=========================================================
# highlight valid designs
#=========================================================
v = [i for i, x in enumerate(valid) if x == 1]
x = X[v]
y = Y[v]
trace3 = go.Scatter(
mode = 'markers',
x = x,
y = y,
opacity = 0.2,
marker = dict(symbol='circle-dot',size = 10,line = dict(color = 'black', width = 2),color='white'),
showlegend = False )
#=========================================================
# highlight min and max
#=========================================================
Zlist = list(Z[v])
highest = max(Zlist)
lowest = min(Zlist)
ihigh = [ list(Z).index(highest) ]
ilow = [ list(Z).index(lowest) ]
print(ihigh,ilow,highest,lowest)
trace4 = go.Scatter(
mode = 'markers',
x = X[ilow],
y = Y[ilow],
marker = dict(symbol='circle-dot',size = 12,line = dict(color = 'green', width = 4),color='white'), showlegend = False )
trace5 = go.Scatter(
mode = 'markers',
x = X[ihigh],
y = Y[ihigh],
marker = dict(symbol='circle-dot',size = 12,line = dict(color = 'red', width = 4),color='white'), showlegend = False )
#=========================================================
# call plotly interface
#=========================================================
data = [trace1, trace2,trace3,trace4,trace5]
#data = [trace1]#, trace2]
layout = go.Layout(
margin = dict(
t = 30,
r = 60,
b = 60,
l = 70
),
yaxis = dict(
# range = [0.0,0.18],
title = ystr,
titlefont = font1,
tickfont = font1
),
xaxis = dict(
# range = [min(trace2.x)-2,max(trace2.x)+2],
title = xstr,
titlefont = font1,
tickfont = font1
)
)
# try:
# fig = go.Figure(data = data, layout = layout)
# py.image.save_as(fig, filename=save_file+'.png')
offline.plot({'data':data,'layout':layout},image='svg', auto_open=False, image_width=1000, image_height=500,show_link=False)
try:
driver = webdriver.PhantomJS(executable_path="/usr/local/bin/phantomjs")
except:
driver = webdriver.PhantomJS(executable_path="/usr/bin/phantomjs")
driver.set_window_size(1000, 500)
driver.get('temp-plot.html')
driver.save_screenshot(save_file + '.png')
print('saving file',save_file+'.png')
# except:
# py.sign_in('nenanth', 'eLGDoeHWkICx2mqrdpUu') # Replace the username, and API key with your credentials.
# fig = go.Figure(data = data, layout = layout)
# py.image.save_as(fig, filename=save_file+'.png')
return None
|
1683917
|
import argparse
import os
import torch
from compressai.models import MeanScaleHyperprior
from compressai.zoo.pretrained import load_pretrained
from torchdistill.common import yaml_util
from torchdistill.common.main_util import load_ckpt
from torchdistill.common.module_util import count_params
from torchdistill.models.custom.bottleneck.classification.resnet import CustomResNet
from torchdistill.models.official import OFFICIAL_MODEL_DICT, get_image_classification_model, \
get_object_detection_model, get_semantic_segmentation_model
from torchdistill.models.registry import get_model
from compression.registry import get_compression_model
from custom.classifier import InputCompressionClassifier, get_custom_model as get_custom_classifier
from custom.detector import InputCompressionDetector, get_custom_model as get_custom_detector
from custom.model import BottleneckResNet
from custom.segmenter import InputCompressionSegmenter, get_custom_model as get_custom_segmenter
from custom.util import check_if_module_exits, load_bottleneck_model_ckpt
def get_argparser():
parser = argparse.ArgumentParser(description='Check model size')
parser.add_argument('--classifier', help='classifier config file path')
parser.add_argument('--detector', help='detector config file path')
parser.add_argument('--segmenter', help='segmenter config file path')
parser.add_argument('--model_name', help='model name available in torchvision')
return parser
def get_model_config(config_file_path):
config = yaml_util.load_yaml_file(os.path.expanduser(config_file_path))
models_config = config['models']
model_config = models_config.get('student_model', None)
if model_config is None:
model_config = models_config['model']
return model_config
def check_model_size(model, model_name):
if not isinstance(model, (list, tuple)):
model = [model]
num_params = sum(count_params(m) for m in model)
print('{}: {} parameters'.format(model_name, num_params))
def load_classifier(model_config, distributed=False, sync_bn=False):
if 'compressor' not in model_config:
model = get_image_classification_model(model_config, distributed, sync_bn)
if model is None:
repo_or_dir = model_config.get('repo_or_dir', None)
model = get_model(model_config['name'], repo_or_dir, **model_config['params'])
model_ckpt_file_path = model_config['ckpt']
if not os.path.isfile(model_ckpt_file_path) and 'start_ckpt' in model_config:
model_ckpt_file_path = model_config['start_ckpt']
if load_bottleneck_model_ckpt(model, model_ckpt_file_path):
return model
load_ckpt(model_ckpt_file_path, model=model, strict=False)
return model
# Define compressor
compressor_config = model_config['compressor']
compressor = get_compression_model(compressor_config['name'], **compressor_config['params'])
compressor_ckpt_file_path = compressor_config['ckpt']
if os.path.isfile(compressor_ckpt_file_path):
print('Loading compressor parameters')
state_dict = torch.load(compressor_ckpt_file_path)
# Old parameter keys do not work with recent version of compressai
state_dict = load_pretrained(state_dict)
compressor.load_state_dict(state_dict)
print('Updating compression model')
compressor.update()
# Define classifier
classifier_config = model_config['classifier']
classifier = get_image_classification_model(classifier_config, distributed, sync_bn)
if classifier is None:
repo_or_dir = classifier_config.get('repo_or_dir', None)
classifier = get_model(classifier_config['name'], repo_or_dir, **classifier_config['params'])
classifier_ckpt_file_path = classifier_config['ckpt']
load_ckpt(classifier_ckpt_file_path, model=classifier, strict=True)
custom_model = get_custom_classifier(model_config['name'], compressor, classifier, **model_config['params'])
return custom_model
def check_classifier_size(classifier, model_name):
if isinstance(classifier, InputCompressionClassifier) and isinstance(classifier.compressor, MeanScaleHyperprior):
check_model_size([classifier.compressor.g_a, classifier.compressor.h_a, classifier.compressor.h_s],
'Encoder in {}'.format(model_name))
check_model_size([classifier.compressor.entropy_bottleneck, classifier.compressor.gaussian_conditional],
'Entropy bottleneck + Gaussian conditional in {}'.format(model_name))
check_model_size([classifier.compressor.h_s, classifier.compressor.g_s],
'Decoder in {}'.format(model_name))
elif isinstance(classifier, InputCompressionClassifier):
check_model_size(classifier.compressor.encoder, 'Encoder in {}'.format(model_name))
check_model_size(classifier.compressor.entropy_bottleneck,
'Entropy bottleneck in {}'.format(model_name))
check_model_size(classifier.compressor.decoder, 'Decoder in {}'.format(model_name))
elif isinstance(classifier, CustomResNet):
check_model_size(classifier.bottleneck.encoder, 'Encoder in {}'.format(model_name))
elif isinstance(classifier, BottleneckResNet):
check_model_size(classifier.backbone.bottleneck_layer.encoder, 'Encoder in {}'.format(model_name))
check_model_size(classifier.backbone.bottleneck_layer.entropy_bottleneck,
'Entropy bottleneck in {}'.format(model_name))
# Total model size
check_model_size(classifier, model_name)
def load_detector(model_config):
if 'compressor' not in model_config:
model = get_object_detection_model(model_config)
if model is None:
repo_or_dir = model_config.get('repo_or_dir', None)
model = get_model(model_config['name'], repo_or_dir, **model_config['params'])
model_ckpt_file_path = model_config['ckpt']
if load_bottleneck_model_ckpt(model, model_ckpt_file_path):
return model
load_ckpt(model_ckpt_file_path, model=model, strict=True)
return model
# Define compressor
compressor_config = model_config['compressor']
compressor = get_compression_model(compressor_config['name'], **compressor_config['params']) \
if compressor_config is not None else None
if compressor is not None:
compressor_ckpt_file_path = compressor_config['ckpt']
if os.path.isfile(compressor_ckpt_file_path):
print('Loading compressor parameters')
state_dict = torch.load(compressor_ckpt_file_path)
# Old parameter keys do not work with recent version of compressai
state_dict = load_pretrained(state_dict)
compressor.load_state_dict(state_dict)
print('Updating compression model')
compressor.update()
# Define detector
detector_config = model_config['detector']
detector = get_object_detection_model(detector_config)
if detector is None:
repo_or_dir = detector_config.get('repo_or_dir', None)
detector = get_model(detector_config['name'], repo_or_dir, **detector_config['params'])
detector_ckpt_file_path = detector_config['ckpt']
load_ckpt(detector_ckpt_file_path, model=detector, strict=True)
custom_model = get_custom_detector(model_config['name'], compressor, detector, **model_config['params'])
return custom_model
def check_detector_size(detector, model_name):
if isinstance(detector, InputCompressionDetector)\
and isinstance(detector.detector.transform.compressor, MeanScaleHyperprior):
transform = detector.detector.transform
check_model_size([transform.compressor.g_a, transform.compressor.h_a,
transform.compressor.h_s], 'Encoder in {}'.format(model_name))
check_model_size([transform.compressor.entropy_bottleneck,
transform.compressor.gaussian_conditional],
'Entropy bottleneck + Gaussian conditional in {}'.format(model_name))
check_model_size([transform.compressor.h_s, transform.compressor.g_s],
'Decoder in {}'.format(model_name))
elif isinstance(detector, InputCompressionDetector):
transform = detector.detector.transform
check_model_size(transform.compressor.encoder, 'Encoder in {}'.format(model_name))
check_model_size(transform.compressor.entropy_bottleneck,
'Entropy bottleneck in {}'.format(model_name))
check_model_size(transform.compressor.decoder, 'Decoder in {}'.format(model_name))
elif check_if_module_exits(detector, 'backbone.body.layer1.encoder'):
check_model_size([detector.backbone.body.conv1, detector.backbone.body.bn1,
detector.backbone.body.layer1.encoder], 'Encoder in {}'.format(model_name))
elif check_if_module_exits(detector, 'backbone.body.bottleneck_layer'):
check_model_size(detector.backbone.body.bottleneck_layer.encoder, 'Encoder in {}'.format(model_name))
check_model_size(detector.backbone.body.bottleneck_layer.entropy_bottleneck,
'Entropy bottleneck in {}'.format(model_name))
# Total model size
check_model_size(detector, model_name)
def load_segmenter(model_config):
if 'compressor' not in model_config:
model = get_semantic_segmentation_model(model_config)
if model is None:
repo_or_dir = model_config.get('repo_or_dir', None)
model = get_model(model_config['name'], repo_or_dir, **model_config['params'])
model_ckpt_file_path = model_config['ckpt']
if load_bottleneck_model_ckpt(model, model_ckpt_file_path):
return model
load_ckpt(model_ckpt_file_path, model=model, strict=True)
return model
# Define compressor
compressor_config = model_config['compressor']
compressor = get_compression_model(compressor_config['name'], **compressor_config['params'])
compressor_ckpt_file_path = compressor_config['ckpt']
if os.path.isfile(compressor_ckpt_file_path):
print('Loading compressor parameters')
state_dict = torch.load(compressor_ckpt_file_path)
# Old parameter keys do not work with recent version of compressai
state_dict = load_pretrained(state_dict)
compressor.load_state_dict(state_dict)
print('Updating compression model')
compressor.update()
# Define segmenter
segmenter_config = model_config['segmenter']
segmenter = get_semantic_segmentation_model(segmenter_config)
if segmenter is None:
repo_or_dir = segmenter_config.get('repo_or_dir', None)
segmenter = get_model(segmenter_config['name'], repo_or_dir, **segmenter_config['params'])
segmenter_ckpt_file_path = segmenter_config['ckpt']
load_ckpt(segmenter_ckpt_file_path, model=segmenter, strict=True)
custom_model = get_custom_segmenter(model_config['name'], compressor, segmenter, **model_config['params'])
return custom_model
def check_segmenter_size(segmenter, model_name):
if isinstance(segmenter, InputCompressionSegmenter)\
and isinstance(segmenter.compressor, MeanScaleHyperprior):
check_model_size([segmenter.compressor.g_a, segmenter.compressor.h_a,
segmenter.compressor.h_s], 'Encoder in {}'.format(model_name))
check_model_size([segmenter.compressor.entropy_bottleneck,
segmenter.compressor.gaussian_conditional],
'Entropy bottleneck + Gaussian conditional in {}'.format(model_name))
check_model_size([segmenter.compressor.h_s, segmenter.compressor.g_s],
'Decoder in {}'.format(model_name))
elif isinstance(segmenter, InputCompressionSegmenter):
check_model_size(segmenter.compressor.encoder, 'Encoder in {}'.format(model_name))
check_model_size(segmenter.compressor.entropy_bottleneck,
'Entropy bottleneck in {}'.format(model_name))
check_model_size(segmenter.compressor.decoder, 'Decoder in {}'.format(model_name))
elif check_if_module_exits(segmenter, 'backbone.layer1.encoder'):
check_model_size([segmenter.backbone.conv1, segmenter.backbone.bn1,
segmenter.backbone.layer1.encoder], 'Encoder in {}'.format(model_name))
elif check_if_module_exits(segmenter, 'backbone.bottleneck_layer'):
check_model_size(segmenter.backbone.bottleneck_layer.encoder, 'Encoder in {}'.format(model_name))
check_model_size(segmenter.backbone.bottleneck_layer.entropy_bottleneck,
'Entropy bottleneck in {}'.format(model_name))
# Total model size
check_model_size(segmenter, model_name)
def main(args):
torchvision_model_name = args.model_name
classifier_config_file_path = args.classifier
detector_config_file_path = args.detector
segmenter_config_file_path = args.segmenter
if torchvision_model_name is not None:
model = OFFICIAL_MODEL_DICT[torchvision_model_name](pretrained=False)
check_model_size(model, torchvision_model_name)
if classifier_config_file_path is not None:
model_config = get_model_config(classifier_config_file_path)
classifier = load_classifier(model_config)
check_classifier_size(classifier, model_config['name'])
if detector_config_file_path is not None:
model_config = get_model_config(detector_config_file_path)
detector = load_detector(model_config)
check_detector_size(detector, model_config['name'])
if segmenter_config_file_path is not None:
model_config = get_model_config(segmenter_config_file_path)
segmenter = load_segmenter(model_config)
check_segmenter_size(segmenter, model_config['name'])
if __name__ == '__main__':
argparser = get_argparser()
main(argparser.parse_args())
|
1683943
|
from unittest import TestCase
import json
from fac.utils import JSONDict, JSONList
class TestJSONDict(TestCase):
def setUp(self):
self.orig = {'foo': 42, 'bar': True, 'baz': [1, 2], 'qux': {'lok': 3}}
self.d = JSONDict(self.orig)
def test_json(self):
self.assertDictEqual(json.loads(str(self.d)), self.orig)
def test_read(self):
self.assertEqual(self.d.foo, 42)
self.assertEqual(self.d.bar, True)
self.assertEqual(self.d.baz, [1, 2])
self.assertEqual(self.d.qux, {'lok': 3})
self.assertEqual(self.d.qux.lok, 3)
def test_write(self):
self.d.foo = 43
self.assertEqual(self.d.foo, 43)
self.d.qux.lok = 45
self.assertEqual(self.d.qux.lok, 45)
self.d.other = "hey"
self.assertEqual(self.d.other, "hey")
def test_no_attr(self):
with self.assertRaises(AttributeError):
self.d.nope
self.assertTrue(self.d.qux)
with self.assertRaises(AttributeError):
self.d.qux.nope
class TestJSONList(TestCase):
def setUp(self):
self.orig = ["foo", "bar", 42, True]
self.l = JSONList(self.orig)
self.d = JSONDict({'foo': [{'bar': 0}, {'bar': 1}]})
def test_json(self):
self.assertListEqual(json.loads(str(self.l)), self.orig)
def test_read(self):
self.assertEqual(self.d.foo[0], {'bar': 0})
self.assertEqual(self.d.foo[1], {'bar': 1})
self.assertEqual(self.d.foo[0].bar, 0)
self.assertEqual(self.d.foo[1].bar, 1)
def test_write(self):
self.d.foo[0].bar = 42
self.assertEqual(self.d.foo[0].bar, 42)
self.d.foo[0] = {'baz': 1}
self.assertEqual(self.d.foo[0].baz, 1)
self.d.foo[0].baz = 42
self.assertEqual(self.d.foo[0].baz, 42)
self.d.foo.append({'baz': 10})
self.assertEqual(self.d.foo[2].baz, 10)
self.d.foo.insert(0, {'qux': 20})
self.assertEqual(self.d.foo[0].qux, 20)
|
1683989
|
from keras import backend as K
from keras.engine import Layer
class FusionLayer(Layer):
def call(self, inputs, mask=None):
imgs, embs = inputs
reshaped_shape = imgs.shape[:3].concatenate(embs.shape[1])
embs = K.repeat(embs, imgs.shape[1] * imgs.shape[2])
embs = K.reshape(embs, reshaped_shape)
return K.concatenate([imgs, embs], axis=3)
def compute_output_shape(self, input_shapes):
# Must have 2 tensors as input
assert input_shapes and len(input_shapes) == 2
imgs_shape, embs_shape = input_shapes
# The batch size of the two tensors must match
assert imgs_shape[0] == embs_shape[0]
# (batch_size, width, height, embedding_len + depth)
return imgs_shape[:3] + (imgs_shape[3] + embs_shape[1],)
|
1684006
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from Pruning.utils import prune_rate, arg_nonzero_min
def weight_prune(model, pruning_perc):
'''
Prune pruning_perc% weights globally (not layer-wise)
arXiv: 1606.09274
'''
all_weights = []
for p in model.parameters():
if len(p.data.size()) != 1:
all_weights += list(torch.abs(p).cpu().data.numpy().flatten())
threshold = np.percentile(np.array(all_weights), pruning_perc)
# generate mask
masks = []
for p in model.parameters():
if len(p.data.size()) != 1:
pruned_inds = torch.abs(p).data > threshold
masks.append(pruned_inds.float())
return masks
def per_layer_weight_prune(model, pruning_perc):
'''
On Progress...
pruning_perc[0] : prune rate of other weights
pruning_perc[1] : prune rate of point weights
'''
other_weights = []
point_weights = []
for name, p in model.named_parameters():
if len(p.data.size()) != 1:
if 'layer' in name:
if 'conv1' in name or 'conv3' in name:
point_weights += list(torch.abs(p).cpu().data.numpy().flatten())
else:
other_weights += list(torch.abs(p).cpu().data.numpy().flatten())
else:
other_weights += list(torch.abs(p).cpu().data.numpy().flatten())
threshold_other = np.percentile(np.array(other_weights), pruning_perc[0])
threshold_point = np.percentile(np.array(point_weights), pruning_perc[1])
num_of_params = [len(other_weights), len(point_weights)]
# generate mask
masks = []
for name, p in model.named_parameters():
if len(p.data.size()) != 1:
if 'layer' in name:
if 'conv1' in name or 'conv3' in name:
pruned_inds = torch.abs(p).data > threshold_point
masks.append(pruned_inds.float())
else:
pruned_inds = torch.abs(p).data > threshold_other
masks.append(pruned_inds.float())
else:
pruned_inds = torch.abs(p).data > threshold_other
masks.append(pruned_inds.float())
return masks, num_of_params
def prune_one_filter(model, masks):
'''
Pruning one least ``important'' feature map by the scaled l2norm of
kernel weights
arXiv:1611.06440
'''
NO_MASKS = False
# construct masks if there is not yet
if not masks:
masks = []
NO_MASKS = True
values = []
for p in model.parameters():
if len(p.data.size()) == 4: # nasty way of selecting conv layer
p_np = p.data.cpu().numpy()
# construct masks if there is not
if NO_MASKS:
masks.append(np.ones(p_np.shape).astype('float32'))
# find the scaled l2 norm for each filter this layer
value_this_layer = np.square(p_np).sum(axis=1).sum(axis=1)\
.sum(axis=1)/(p_np.shape[1]*p_np.shape[2]*p_np.shape[3])
# normalization (important)
value_this_layer = value_this_layer / \
np.sqrt(np.square(value_this_layer).sum())
min_value, min_ind = arg_nonzero_min(list(value_this_layer))
values.append([min_value, min_ind])
assert len(masks) == len(values), "something wrong here"
values = np.array(values)
# set mask corresponding to the filter to prune
to_prune_layer_ind = np.argmin(values[:, 0])
to_prune_filter_ind = int(values[to_prune_layer_ind, 1])
masks[to_prune_layer_ind][to_prune_filter_ind] = 0.
print('Prune filter #{} in layer #{}'.format(
to_prune_filter_ind,
to_prune_layer_ind))
return masks
def filter_prune(model, pruning_perc):
'''
Prune filters one by one until reach pruning_perc
(not iterative pruning)
'''
masks = []
current_pruning_perc = 0.
while current_pruning_perc < pruning_perc:
masks = prune_one_filter(model, masks)
model.set_masks(masks)
current_pruning_perc = prune_rate(model, verbose=False)
print('{:.2f} pruned'.format(current_pruning_perc))
return masks
|
1684076
|
class Instruccion:
'Clase abstracta de instruccion'
def __init__(self, type, line, column):
self.type = type
self.line = line
self.column = column
def execute(self):
return self.val
def __repr__(self):
return str(self.__dict__)
|
1684093
|
class Solution:
# @param {integer[]} nums
# @return {string}
def largestNumber(self, nums):
nums = sorted([str(num) for num in nums])
result = ""
while nums:
num = self.pickone(nums, None)
result += num
nums.remove(num)
if result[0] == '0':
result = '0'
return result
def pickone(self, nums, memo):
if len(nums) == 1:
return nums[0]
candidates = []
c = nums[-1][0]
idx = len(nums)-1
while nums[idx][0]==c and idx>=0:
candidates.append(nums[idx])
idx -= 1
if len(candidates) == 1:
return candidates[0]
else:
if not memo:
memo = c
candidates = sorted([candidate[1:] for candidate in candidates])
if '' in candidates:
while '' in candidates:
candidates.remove('')
if not candidates:
return c
max = self.pickone(candidates, memo)
if memo > max:
return c
else:
return c+max
else:
return c + self.pickone(candidates, memo)
solution = Solution()
print solution.largestNumber([4993,9779,9200])
|
1684100
|
import torch.nn as nn
def my_loss(classifier, regression, points, mode):
#classifier is the predicted class
#regression is an array of predicted coordinates
#points is an array of ground truth coordinates
#mode is the ground truth class
alpha = 0.5
MSE = nn.MSELoss()
MSEl = MSE(regression, points)
cross_entropy = nn.CrossEntropyLoss()
ce = cross_entropy(classifier, mode)
loss = ce*alpha + MSEl*(1-alpha)
return loss, MSEl, ce
|
1684103
|
import random
from multiprocessing import cpu_count
from transformers import (ConstantLRSchedule, WarmupLinearSchedule, WarmupConstantSchedule)
from modeling.modeling_rn import *
from utils.optimization_utils import OPTIMIZER_CLASSES
from utils.parser_utils import *
from utils.relpath_utils import *
def get_node_feature_encoder(encoder_name):
return encoder_name.replace('-cased', '-uncased')
def cal_2hop_rel_emb(rel_emb):
n_rel = rel_emb.shape[0]
u, v = np.meshgrid(np.arange(n_rel), np.arange(n_rel))
expanded = rel_emb[v.reshape(-1)] + rel_emb[u.reshape(-1)]
return np.concatenate([rel_emb, expanded], 0)
def evaluate_accuracy(eval_set, model):
n_samples, n_correct = 0, 0
model.eval()
with torch.no_grad():
for qids, labels, *input_data in eval_set:
logits, _ = model(*input_data)
n_correct += (logits.argmax(1) == labels).sum().item()
n_samples += labels.size(0)
return n_correct / n_samples
def main():
parser = get_parser()
args, _ = parser.parse_known_args()
parser.add_argument('--mode', default='train', choices=['train', 'eval', 'pred'], help='run training or evaluation')
parser.add_argument('--save_dir', default=f'./saved_models/rn/', help='model output directory')
# for finding relation paths
parser.add_argument('--cpnet_vocab_path', default='./data/cpnet/concept.txt')
parser.add_argument('--cpnet_graph_path', default='./data/cpnet/conceptnet.en.pruned.graph')
parser.add_argument('-p', '--nprocs', type=int, default=cpu_count(), help='number of processes to use')
# data
parser.add_argument('--train_rel_paths', default=f'./data/{args.dataset}/paths/train.relpath.2hop.jsonl')
parser.add_argument('--dev_rel_paths', default=f'./data/{args.dataset}/paths/dev.relpath.2hop.jsonl')
parser.add_argument('--test_rel_paths', default=f'./data/{args.dataset}/paths/test.relpath.2hop.jsonl')
parser.add_argument('--train_adj', default=f'./data/{args.dataset}/graph/train.graph.adj.pk')
parser.add_argument('--dev_adj', default=f'./data/{args.dataset}/graph/dev.graph.adj.pk')
parser.add_argument('--test_adj', default=f'./data/{args.dataset}/graph/test.graph.adj.pk')
parser.add_argument('--train_node_features', default=f'./data/{args.dataset}/features/train.{get_node_feature_encoder(args.encoder)}.features.pk')
parser.add_argument('--dev_node_features', default=f'./data/{args.dataset}/features/dev.{get_node_feature_encoder(args.encoder)}.features.pk')
parser.add_argument('--test_node_features', default=f'./data/{args.dataset}/features/test.{get_node_feature_encoder(args.encoder)}.features.pk')
parser.add_argument('--train_concepts', default=f'./data/{args.dataset}/grounded/train.grounded.jsonl')
parser.add_argument('--dev_concepts', default=f'./data/{args.dataset}/grounded/dev.grounded.jsonl')
parser.add_argument('--test_concepts', default=f'./data/{args.dataset}/grounded/test.grounded.jsonl')
parser.add_argument('--node_feature_type', choices=['full', 'cls', 'mention'])
parser.add_argument('--use_cache', default=True, type=bool_flag, nargs='?', const=True, help='use cached data to accelerate data loading')
parser.add_argument('--max_tuple_num', default=200, type=int)
# model architecture
parser.add_argument('--ablation', default=None, choices=['None', 'no_kg', 'no_2hop', 'no_1hop', 'no_qa', 'no_rel',
'mrloss', 'fixrel', 'fakerel', 'no_factor_mul', 'no_2hop_qa',
'randomrel', 'encode_qas', 'multihead_pool', 'att_pool'], nargs='?', const=None, help='run ablation test')
parser.add_argument('--att_head_num', default=2, type=int, help='number of attention heads')
parser.add_argument('--mlp_dim', default=128, type=int, help='number of MLP hidden units')
parser.add_argument('--mlp_layer_num', default=2, type=int, help='number of MLP layers')
parser.add_argument('--fc_dim', default=128, type=int, help='number of FC hidden units')
parser.add_argument('--fc_layer_num', default=0, type=int, help='number of FC layers')
parser.add_argument('--freeze_ent_emb', default=True, type=bool_flag, nargs='?', const=True, help='freeze entity embedding layer')
parser.add_argument('--init_range', default=0.02, type=float, help='stddev when initializing with normal distribution')
parser.add_argument('--emb_scale', default=1.0, type=float, help='scale pretrained embeddings')
# regularization
parser.add_argument('--dropoutm', type=float, default=0.3, help='dropout for mlp hidden units (0 = no dropout')
# optimization
parser.add_argument('-dlr', '--decoder_lr', default=3e-4, type=float, help='learning rate')
parser.add_argument('-mbs', '--mini_batch_size', default=1, type=int)
parser.add_argument('-ebs', '--eval_batch_size', default=4, type=int)
parser.add_argument('--unfreeze_epoch', default=0, type=int)
parser.add_argument('--refreeze_epoch', default=10000, type=int)
parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='show this help message and exit')
args = parser.parse_args()
if args.debug:
parser.set_defaults(batch_size=1, log_interval=1, eval_interval=5)
# set ablation defaults
elif args.ablation == 'mrloss':
parser.set_defaults(loss='margin_rank')
args = parser.parse_args()
find_relational_paths(args.cpnet_vocab_path, args.cpnet_graph_path, args.train_concepts, args.train_rel_paths, args.nprocs, args.use_cache)
find_relational_paths(args.cpnet_vocab_path, args.cpnet_graph_path, args.dev_concepts, args.dev_rel_paths, args.nprocs, args.use_cache)
if args.test_statements is not None:
find_relational_paths(args.cpnet_vocab_path, args.cpnet_graph_path, args.test_concepts, args.test_rel_paths, args.nprocs, args.use_cache)
if args.mode == 'train':
train(args)
elif args.mode == 'eval':
eval(args)
elif args.mode == 'pred':
pred(args)
else:
raise ValueError('Invalid mode')
def train(args):
print(args)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available() and args.cuda:
torch.cuda.manual_seed(args.seed)
config_path = os.path.join(args.save_dir, 'config.json')
model_path = os.path.join(args.save_dir, 'model.pt')
log_path = os.path.join(args.save_dir, 'log.csv')
export_config(args, config_path)
check_path(model_path)
with open(log_path, 'w') as fout:
fout.write('step,train_acc,dev_acc\n')
###################################################################################################
# Load data #
###################################################################################################
if 'lm' in args.ent_emb:
print('Using contextualized embeddings for concepts')
use_contextualized, cp_emb = True, None
else:
use_contextualized = False
cp_emb = [np.load(path) for path in args.ent_emb_paths]
cp_emb = torch.tensor(np.concatenate(cp_emb, 1))
concept_num, concept_dim = cp_emb.size(0), cp_emb.size(1)
rel_emb = np.load(args.rel_emb_path)
rel_emb = np.concatenate((rel_emb, -rel_emb), 0)
rel_emb = cal_2hop_rel_emb(rel_emb)
rel_emb = torch.tensor(rel_emb)
relation_num, relation_dim = rel_emb.size(0), rel_emb.size(1)
# print('| num_concepts: {} | num_relations: {} |'.format(concept_num, relation_num))
device = torch.device("cuda:0" if torch.cuda.is_available() and args.cuda else "cpu")
dataset = LMRelationNetDataLoader(args.train_statements, args.train_rel_paths,
args.dev_statements, args.dev_rel_paths,
args.test_statements, args.test_rel_paths,
batch_size=args.batch_size, eval_batch_size=args.eval_batch_size, device=device,
model_name=args.encoder,
max_tuple_num=args.max_tuple_num, max_seq_length=args.max_seq_len,
is_inhouse=args.inhouse, inhouse_train_qids_path=args.inhouse_train_qids,
use_contextualized=use_contextualized,
train_adj_path=args.train_adj, dev_adj_path=args.dev_adj, test_adj_path=args.test_adj,
train_node_features_path=args.train_node_features, dev_node_features_path=args.dev_node_features,
test_node_features_path=args.test_node_features, node_feature_type=args.node_feature_type,
format=args.format)
###################################################################################################
# Build model #
###################################################################################################
lstm_config = get_lstm_config_from_args(args)
model = LMRelationNet(model_name=args.encoder, concept_num=concept_num, concept_dim=relation_dim,
relation_num=relation_num, relation_dim=relation_dim,
concept_in_dim=(dataset.get_node_feature_dim() if use_contextualized else concept_dim),
hidden_size=args.mlp_dim, num_hidden_layers=args.mlp_layer_num, num_attention_heads=args.att_head_num,
fc_size=args.fc_dim, num_fc_layers=args.fc_layer_num, dropout=args.dropoutm,
pretrained_concept_emb=cp_emb, pretrained_relation_emb=rel_emb, freeze_ent_emb=args.freeze_ent_emb,
init_range=args.init_range, ablation=args.ablation, use_contextualized=use_contextualized,
emb_scale=args.emb_scale, encoder_config=lstm_config)
try:
model.to(device)
except RuntimeError as e:
print(e)
print('best dev acc: 0.0 (at epoch 0)')
print('final test acc: 0.0')
print()
return
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
grouped_parameters = [
{'params': [p for n, p in model.encoder.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay, 'lr': args.encoder_lr},
{'params': [p for n, p in model.encoder.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': args.encoder_lr},
{'params': [p for n, p in model.decoder.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay, 'lr': args.decoder_lr},
{'params': [p for n, p in model.decoder.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': args.decoder_lr},
]
optimizer = OPTIMIZER_CLASSES[args.optim](grouped_parameters)
if args.lr_schedule == 'fixed':
scheduler = ConstantLRSchedule(optimizer)
elif args.lr_schedule == 'warmup_constant':
scheduler = WarmupConstantSchedule(optimizer, warmup_steps=args.warmup_steps)
elif args.lr_schedule == 'warmup_linear':
max_steps = int(args.n_epochs * (dataset.train_size() / args.batch_size))
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=max_steps)
print('parameters:')
for name, param in model.decoder.named_parameters():
if param.requires_grad:
print('\t{:45}\ttrainable\t{}'.format(name, param.size()))
else:
print('\t{:45}\tfixed\t{}'.format(name, param.size()))
num_params = sum(p.numel() for p in model.decoder.parameters() if p.requires_grad)
print('\ttotal:', num_params)
if args.loss == 'margin_rank':
loss_func = nn.MarginRankingLoss(margin=0.1, reduction='mean')
elif args.loss == 'cross_entropy':
loss_func = nn.CrossEntropyLoss(reduction='mean')
###################################################################################################
# Training #
###################################################################################################
print()
print('-' * 71)
global_step, best_dev_epoch = 0, 0
best_dev_acc, final_test_acc, total_loss = 0.0, 0.0, 0.0
start_time = time.time()
model.train()
freeze_net(model.encoder)
try:
rel_grad = []
linear_grad = []
for epoch_id in range(args.n_epochs):
if epoch_id == args.unfreeze_epoch:
print('encoder unfreezed')
unfreeze_net(model.encoder)
if epoch_id == args.refreeze_epoch:
print('encoder refreezed')
freeze_net(model.encoder)
model.train()
for qids, labels, *input_data in dataset.train():
optimizer.zero_grad()
bs = labels.size(0)
for a in range(0, bs, args.mini_batch_size):
b = min(a + args.mini_batch_size, bs)
logits, _ = model(*[x[a:b] for x in input_data], layer_id=args.encoder_layer)
if args.loss == 'margin_rank':
num_choice = logits.size(1)
flat_logits = logits.view(-1)
correct_mask = F.one_hot(labels, num_classes=num_choice).view(-1) # of length batch_size*num_choice
correct_logits = flat_logits[correct_mask == 1].contiguous().view(-1, 1).expand(-1, num_choice - 1).contiguous().view(-1) # of length batch_size*(num_choice-1)
wrong_logits = flat_logits[correct_mask == 0] # of length batch_size*(num_choice-1)
y = wrong_logits.new_ones((wrong_logits.size(0),))
loss = loss_func(correct_logits, wrong_logits, y) # margin ranking loss
elif args.loss == 'cross_entropy':
loss = loss_func(logits, labels[a:b])
loss = loss * (b - a) / bs
loss.backward()
total_loss += loss.item()
if args.max_grad_norm > 0:
nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
rel_grad.append(model.decoder.rel_emb.weight.grad.abs().mean().item())
linear_grad.append(model.decoder.mlp.layers[8].weight.grad.abs().mean().item())
scheduler.step()
optimizer.step()
if (global_step + 1) % args.log_interval == 0:
total_loss /= args.log_interval
ms_per_batch = 1000 * (time.time() - start_time) / args.log_interval
print('| step {:5} | lr: {:9.7f} | loss {:7.4f} | ms/batch {:7.2f} |'.format(global_step, scheduler.get_lr()[0], total_loss, ms_per_batch))
# print('| rel_grad: {:1.2e} | linear_grad: {:1.2e} |'.format(sum(rel_grad) / len(rel_grad), sum(linear_grad) / len(linear_grad)))
total_loss = 0
rel_grad = []
linear_grad = []
start_time = time.time()
global_step += 1
model.eval()
dev_acc = evaluate_accuracy(dataset.dev(), model)
test_acc = evaluate_accuracy(dataset.test(), model) if args.test_statements else 0.0
print('-' * 71)
print('| epoch {:5} | dev_acc {:7.4f} | test_acc {:7.4f} |'.format(epoch_id, dev_acc, test_acc))
print('-' * 71)
with open(log_path, 'a') as fout:
fout.write('{},{},{}\n'.format(global_step, dev_acc, test_acc))
if dev_acc >= best_dev_acc:
best_dev_acc = dev_acc
final_test_acc = test_acc
best_dev_epoch = epoch_id
torch.save([model, args], model_path)
print(f'model saved to {model_path}')
model.train()
start_time = time.time()
if epoch_id > args.unfreeze_epoch and epoch_id - best_dev_epoch >= args.max_epochs_before_stop:
break
except (KeyboardInterrupt, RuntimeError) as e:
print(e)
print()
print('training ends in {} steps'.format(global_step))
print('best dev acc: {:.4f} (at epoch {})'.format(best_dev_acc, best_dev_epoch))
print('final test acc: {:.4f}'.format(final_test_acc))
print()
def eval(args):
raise NotImplementedError()
def pred(args):
raise NotImplementedError()
if __name__ == '__main__':
main()
|
1684132
|
import json
from com.huawei.iotplatform.client.invokeapi.Authentication import Authentication
from com.huawei.iotplatform.client.invokeapi.BatchProcess import BatchProcess
from com.huawei.iotplatform.client.dto.AuthOutDTO import AuthOutDTO
from com.huawei.iotplatform.client.dto.BatchTaskCreateInDTO import BatchTaskCreateInDTO
from com.huawei.iotplatform.client.dto.BatchTaskCreateOutDTO import BatchTaskCreateOutDTO
from com.huawei.iotplatform.client.dto.QueryTaskDetailsInDTO import QueryTaskDetailsInDTO
from com.huawei.iotplatform.constant.Constant import Constant
class BatchProcessTest(object):
def createBatchTaskInfo(self):
btcInDTO = BatchTaskCreateInDTO()
btcInDTO.appId = "3RQ9UnhymV409MfKPuiin75XroQa"
btcInDTO.timeout = 100
btcInDTO.taskName = "a1"
btcInDTO.taskType = "DeviceCmd"
return btcInDTO
def queryTaskDetailsInfo(self):
qtdInDTO = QueryTaskDetailsInDTO()
qtdInDTO.taskID = "5bf8f8567dd2d86eab87edd9"
return qtdInDTO
if __name__ == "__main__":
bpTest = BatchProcessTest()
authentication = Authentication()
batchProcess = BatchProcess()
# get accessToken at first
result = authentication.getAuthToken(Constant().clientInfo())
authOutDTO = AuthOutDTO()
authOutDTO.setAccessToken(json.loads(result)['accessToken'])
accessToken = authOutDTO.getAccessToken()
# create a task begin
bc = batchProcess.createBatchTask(bpTest.createBatchTaskInfo(), accessToken)
print("====== create a task begin ======")
print("result:", bc + "\n")
# get taskID
btcOutDTO = BatchTaskCreateOutDTO()
btcOutDTO.setTaskID(json.loads(bc)['taskID'])
taskID = btcOutDTO.getTaskID()
# taskID = "11"
print("taskID==", taskID+ "\n")
# query a specified task
bq = batchProcess.queryOneTask(taskID, None, None, accessToken)
print("====== query a specified task ======")
print("result:", bq + "\n")
# query a specified task detail
bq = batchProcess.queryTaskDetails(bpTest.queryTaskDetailsInfo(), accessToken)
print("====== query a specified task detail ======")
print("result:", bq + "\n")
|
1684133
|
from datetime import datetime
class DBHelper:
def __init__(self):
pass
def initDB(self):
'''
初始化数据库,主要是建表等工作
'''
pass
def writeBars(self, bars:list, period="day"):
'''
将K线存储到数据库中\n
@bars K线序列\n
@period K线周期
'''
pass
def writeFactors(self, factors:dict):
'''
将复权因子存储到数据库中\n
@factors 复权因子
'''
pass
class BaseDataHelper:
def __init__(self):
self.isAuthed = False
pass
def __check__(self):
if not self.isAuthed:
raise Exception("This module has not authorized yet!")
def auth(self, **kwargs):
'''
模块认证
'''
pass
def dmpCodeListToFile(self, filename:str, hasIndex:bool=True, hasStock:bool=True):
'''
将代码列表导出到文件\n
@filename 要输出的文件名,json格式\n
@hasIndex 是否包含指数\n
@hasStock 是否包含股票\n
'''
pass
def dmpAdjFactorsToFile(self, codes:list, filename:str):
'''
将除权因子导出到文件\n
@codes 股票列表,格式如["SSE.600000","SZSE.000001"]\n
@filename 要输出的文件名,json格式
'''
pass
def dmpBarsToFile(self, folder:str, codes:list, start_date:datetime=None, end_date:datetime=None, period:str="day"):
'''
将K线导出到指定的目录下的csv文件,文件名格式如SSE.600000_d.csv\n
@folder 要输出的文件夹\n
@codes 股票列表,格式如["SSE.600000","SZSE.000001"]\n
@start_date 开始日期,datetime类型,传None则自动设置为1990-01-01\n
@end_date 结束日期,datetime类型,传None则自动设置为当前日期\n
@period K线周期,支持day、min1、min5\n
'''
pass
def dmpAdjFactorsToDB(self, dbHelper:DBHelper, codes:list):
'''
将除权因子导出到数据库\n
@codes 股票列表,格式如["SSE.600000","SZSE.000001"]\n
@dbHelper 数据库辅助模块
'''
pass
def dmpBarsToDB(self, dbHelper:DBHelper, codes:list, start_date:datetime=None, end_date:datetime=None, period:str="day"):
'''
将K线导出到数据库\n
@dbHelper 数据库辅助模块\n
@codes 股票列表,格式如["SSE.600000","SZSE.000001"]\n
@start_date 开始日期,datetime类型,传None则自动设置为1990-01-01\n
@end_date 结束日期,datetime类型,传None则自动设置为当前日期\n
@period K线周期,支持day、min1、min5\n
'''
pass
|
1684287
|
from flask import Blueprint, g, jsonify
from rowboat.models.guild import Guild
from rowboat.util.decos import authed
users = Blueprint('users', __name__, url_prefix='/api/users')
@users.route('/@me')
@authed
def users_me():
return jsonify(g.user.serialize(us=True))
@users.route('/@me/guilds')
@authed
def users_me_guilds():
if g.user.admin:
guilds = list(Guild.select().where(
(Guild.enabled == True)
))
else:
guilds = list(Guild.select(
Guild,
Guild.config['web'][str(g.user.user_id)].alias('role')
).where(
(~(Guild.config['web'][str(g.user.user_id)] >> None)) &
(Guild.enabled == True)
))
return jsonify([
guild.serialize() for guild in guilds
])
|
1684300
|
from checkmate.lib.models import (Issue,
IssueOccurrence,
IssueCategory,
Diff,
Snapshot,
DiffIssueOccurrence,
ProjectIssueClass,
FileRevision)
from .user import AccessToken, User, UserRole
from .project import Project
from .task import Task
from .issue_class import IssueClass
from .tag import Tag
|
1684321
|
import numba
import torch
import numpy as np
from .common import check_numpy_to_torch
def limit_period(val, offset=0.5, period=np.pi):
val, is_numpy = check_numpy_to_torch(val)
ans = val - torch.floor(val / period + offset) * period
return ans.numpy() if is_numpy else ans
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def camera_to_lidar(points, r_rect, velo2cam):
pts = np.concatenate(
[points[:, :3], np.ones([points.shape[0], 1])], axis=1)
pts = pts @ np.linalg.inv((r_rect @ velo2cam).T)
points[:, :3] = pts[:, :3]
return points
def lidar_to_camera(points, r_rect, velo2cam):
pts = np.concatenate(
[points[:, :3], np.ones([points.shape[0], 1])], axis=1)
pts = pts @ (r_rect @ velo2cam).T
points[:, :3] = pts[:, :3]
return points
def box_camera_to_lidar(data, r_rect, velo2cam):
xyz = data[:, 0:3]
l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)
return np.concatenate([xyz_lidar, w, l, h, r], axis=1)
def box_lidar_to_camera(data, r_rect, velo2cam):
xyz_lidar = data[:, 0:3]
w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)
return np.concatenate([xyz, l, h, w, r], axis=1)
def projection_matrix_to_CRT_kitti(P):
"""
将投影矩阵P利用QR分解分解出摄像机内外参数
输入:
P:投影矩阵,3*4
输出:
K:内参数矩阵,3*3
R:旋转矩阵,3*3
T:平移向量,3*1
"""
# P = K @ [R|T]
# K is upper triangular matrix, so we need to inverse CR and use QR
# stable for all kitti camera projection matrix
CR = P[0:3, 0:3]
CT = P[0:3, 3]
RinvCinv = np.linalg.inv(CR)
Rinv, Cinv = np.linalg.qr(RinvCinv)
K = np.linalg.inv(Cinv)
R = np.linalg.inv(Rinv)
T = Cinv @ CT
return K, R, T
def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100):
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
z_points = np.array([near_clip] * 4 + [far_clip] *
4, dtype=C.dtype)[:, np.newaxis]
b = bbox_image
box_corners = np.array(
[[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype
)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype
)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype
)
ret_xy = np.concatenate(
[near_box_corners, far_box_corners], axis=0) # [8, 2]
ret_xyz = np.concatenate([ret_xy, z_points], axis=1)
return ret_xyz
@numba.jit(nopython=True)
def corner_to_surfaces_3d_jit(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3], must from corner functions in this module
num_boxes = corners.shape[0]
surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)
corner_idxes = np.array(
[0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7]
).reshape(6, 4)
for i in range(num_boxes):
for j in range(6):
for k in range(4):
surfaces[i, j, k] = corners[i, corner_idxes[j, k]]
return surfaces
def points_in_convex_polygon_3d_jit(points, polygon_surfaces, num_surfaces=None):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
if num_surfaces is None:
num_surfaces = np.full((num_polygons,), 9999999, dtype=np.int64)
normal_vec, d = surface_equ_3d_jitv2(polygon_surfaces[:, :, :3, :])
# normal_vec: [num_polygon, max_num_surfaces, 3]
# d: [num_polygon, max_num_surfaces]
return _points_in_convex_polygon_3d_jit(
points, polygon_surfaces, normal_vec, d, num_surfaces
)
@numba.njit
def surface_equ_3d_jitv2(surfaces):
# polygon_surfaces: [num_polygon, num_surfaces, num_points_of_polygon, 3]
num_polygon = surfaces.shape[0]
max_num_surfaces = surfaces.shape[1]
normal_vec = np.zeros((num_polygon, max_num_surfaces, 3), dtype=surfaces.dtype)
d = np.zeros((num_polygon, max_num_surfaces), dtype=surfaces.dtype)
sv0 = surfaces[0, 0, 0] - surfaces[0, 0, 1]
sv1 = surfaces[0, 0, 0] - surfaces[0, 0, 1]
for i in range(num_polygon):
for j in range(max_num_surfaces):
sv0[0] = surfaces[i, j, 0, 0] - surfaces[i, j, 1, 0]
sv0[1] = surfaces[i, j, 0, 1] - surfaces[i, j, 1, 1]
sv0[2] = surfaces[i, j, 0, 2] - surfaces[i, j, 1, 2]
sv1[0] = surfaces[i, j, 1, 0] - surfaces[i, j, 2, 0]
sv1[1] = surfaces[i, j, 1, 1] - surfaces[i, j, 2, 1]
sv1[2] = surfaces[i, j, 1, 2] - surfaces[i, j, 2, 2]
normal_vec[i, j, 0] = sv0[1] * sv1[2] - sv0[2] * sv1[1]
normal_vec[i, j, 1] = sv0[2] * sv1[0] - sv0[0] * sv1[2]
normal_vec[i, j, 2] = sv0[0] * sv1[1] - sv0[1] * sv1[0]
d[i, j] = (
-surfaces[i, j, 0, 0] * normal_vec[i, j, 0]
- surfaces[i, j, 0, 1] * normal_vec[i, j, 1]
- surfaces[i, j, 0, 2] * normal_vec[i, j, 2]
)
return normal_vec, d
@numba.njit
def _points_in_convex_polygon_3d_jit(
points, polygon_surfaces, normal_vec, d, num_surfaces=None
):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
ret = np.ones((num_points, num_polygons), dtype=np.bool_)
sign = 0.0
for i in range(num_points):
for j in range(num_polygons):
for k in range(max_num_surfaces):
if k > num_surfaces[j]:
break
sign = (
points[i, 0] * normal_vec[j, k, 0]
+ points[i, 1] * normal_vec[j, k, 1]
+ points[i, 2] * normal_vec[j, k, 2]
+ d[j, k]
)
if sign >= 0:
ret[i, j] = False
break
return ret
def mask_points_by_range(points, limit_range):
mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \
& (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4])
return mask
def remove_outside_points(points, rect, Trv2c, P2, image_shape):
# 5x faster than remove_outside_points_v1(2ms vs 10ms)
C, R, T = projection_matrix_to_CRT_kitti(P2)
image_bbox = [0, 0, image_shape[1], image_shape[0]]
frustum = get_frustum(image_bbox, C)
frustum -= T
frustum = np.linalg.inv(R) @ frustum.T
frustum = camera_to_lidar(frustum.T, rect, Trv2c)
frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...])
indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces)
points = points[indices.reshape([-1])]
return points
|
1684344
|
import numpy as np
class BinaryTree:
root = None
node_index = None
def __init__(self, index, value):
self.root = BinaryTreeNode(index, value)
self.node_index = {index: self.root}
def add_left_descendant(self, index, value, parent_index):
parent = self.node_index[parent_index]
new_node = BinaryTreeNode(index, value, parent)
self.node_index[index] = new_node
parent.add_left_descendant(new_node)
def has_left_descendant_at_node(self, index):
return self.node_index[index].has_left_descendant()
def add_right_descendant(self, index, value, parent_index):
parent = self.node_index[parent_index]
new_node = BinaryTreeNode(index, value, parent)
self.node_index[index] = new_node
parent.add_right_descendant(new_node)
def has_right_descendant_at_node(self, index):
return self.node_index[index].has_right_descendant()
def set_word(self, index, word):
self.node_index[index].set_word(word)
def print_tree(self):
self.root.recursive_print()
def get_sentence(self):
sentence = ' '.join([n.word for n in self.node_index.values() if n.word != '_'])
return sentence
def get_words(self):
return [n.word for n in self.node_index.values()]
def convert_to_ptb_format(self):
return self.root.convert_to_ptb()
def get_all_sequences_and_masks(self, root_only=False):
"""
Get the sequences, masks, and values associated with all subtrees in this tree
:param root_only: if True, only return the values for the whole tree, not the subtrees
:return: a list of (words, left_mask, right_mask, value) tuples
"""
seqs_and_masks = []
if root_only:
words, left_mask, right_mask, value = self.convert_to_sequence_and_masks(self.root)
seqs_and_masks.append((words, left_mask, right_mask, value))
else:
nodes = self.node_index.values()
for node in nodes:
words, left_mask, right_mask, value = self.convert_to_sequence_and_masks(node)
seqs_and_masks.append((words, left_mask, right_mask, value))
return seqs_and_masks
def convert_to_sequence_and_masks(self, head_node):
"""
Convert a subtree into a sequence of words, corresponding masks, and the value of the root
:param head_node: the node to treat as the root of the (sub)tree
:return words: list of words in tree order
:return left_mask, right_mask: masks denoting the tree structure
:return value: the sentiment value of the root of this (sub)tree
"""
sequence = head_node.get_children_in_sequence()
sequence.reverse()
sequence_map = {s: s_i for s_i, s in enumerate(sequence)}
n_elements = len(sequence)
left_mask = np.zeros([n_elements, n_elements], dtype=np.int32)
right_mask = np.zeros([n_elements, n_elements], dtype=np.int32)
for s_i, n_i in enumerate(sequence):
node = self.node_index[n_i]
if node.has_left_descendant():
left_mask[s_i, sequence_map[node.left_descendant.index]] = 1
if node.has_right_descendant():
right_mask[s_i, sequence_map[node.right_descendant.index]] = 1
words = [self.node_index[n_i].word for n_i in sequence]
value = int(self.node_index[sequence[-1]].value)
return words, left_mask, right_mask, value
class BinaryTreeNode:
word = None
index = None
value = None
parent = None
left_descendant = None
right_descendant = None
def __init__(self, index, value, parent=None):
self.value = value
self.index = index
self.word = '_'
if parent is not None:
self.parent = parent
def __str__(self):
return '(%s %s %s)' % (self.value, self.word, self.index)
def set_word(self, word):
self.word = word
def add_left_descendant(self, new_node):
self.left_descendant = new_node
def has_left_descendant(self):
return self.left_descendant is not None
def add_right_descendant(self, new_node):
self.right_descendant = new_node
def has_right_descendant(self):
return self.right_descendant is not None
def recursive_print(self, depth=0):
print ' '*depth, self.value, self.word, self.index
if self.left_descendant is not None:
self.left_descendant.recursive_print(depth+1)
if self.right_descendant is not None:
self.right_descendant.recursive_print(depth+1)
def convert_to_ptb(self):
ptb_string = '(' + self.value
if self.word != '_':
ptb_string += ' ' + self.word
if self.left_descendant is not None:
ptb_string += ' ' + self.left_descendant.convert_to_ptb()
if self.right_descendant is not None:
ptb_string += ' ' + self.right_descendant.convert_to_ptb()
ptb_string += ')'
return ptb_string
def get_leaf_nodes(self):
leaves = []
if self.left_descendant is None and self.right_descendant is None:
leaves.append(self.word)
else:
if self.left_descendant is not None:
leaves.extend(self.left_descendant.get_leaf_nodes())
if self.right_descendant is not None:
leaves.extend(self.right_descendant.get_leaf_nodes())
return leaves
def get_children_in_sequence(self):
sequence = []
if self.left_descendant is None and self.right_descendant is None:
sequence.append(self.index)
else:
if self.left_descendant is not None:
sequence.extend(self.left_descendant.get_children_in_sequence())
if self.right_descendant is not None:
sequence.extend(self.right_descendant.get_children_in_sequence())
sequence = [self.index] + sequence
return sequence
|
1684374
|
import logging
import pandas as pd
import glob
import os
import sys
utils_path = os.path.join(os.path.abspath(os.getenv('PROCESSING_DIR')),'utils')
if utils_path not in sys.path:
sys.path.append(utils_path)
import util_files
import util_cloud
import util_carto
import requests
from zipfile import ZipFile
import urllib
import numpy as np
# Set up logging
# Get the top-level logger object
logger = logging.getLogger()
for handler in logger.handlers: logger.removeHandler(handler)
logger.setLevel(logging.INFO)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# name of table on Carto where you want to upload data
# using preexisting table for this dataset
dataset_name = 'foo_062_rw0_fishery_production' #check
logger.info('Executing script for dataset: ' + dataset_name)
# create a new sub-directory within your specified dir called 'data'
# within this directory, create files to store raw and processed data
data_dir = util_files.prep_dirs(dataset_name)
'''
Download data and save to your data directory
'''
# insert the url used to download the data from the source website
url_list = ['http://www.fao.org/fishery/static/Data/GlobalProduction_2021.1.2.zip', 'http://www.fao.org/fishery/static/Data/Aquaculture_2021.1.2.zip', 'http://www.fao.org/fishery/static/Data/Capture_2021.1.2.zip'] #check
# construct the file paths to raw data files
raw_data_file = [os.path.join(data_dir,os.path.basename(url)) for url in url_list]
raw_data_file_unzipped = [file.split('.')[0] for file in raw_data_file]
for url, file in zip(url_list, raw_data_file):
# download the data from the source
r = requests.get(url)
with open(file, 'wb') as f:
f.write(r.content)
for file, unzipped in zip(raw_data_file, raw_data_file_unzipped):
# unzip source data
zip_ref = ZipFile(file, 'r')
zip_ref.extractall(unzipped)
zip_ref.close()
'''
Process the data
'''
# create a list to store the processed dataframes
processed_df = []
for file in raw_data_file_unzipped:
# read the dataset as a pandas dataframe
csv_data = glob.glob(os.path.join(file,'*QUANTITY.csv'))[0]
df_data = pd.read_csv(csv_data,encoding='latin-1')
# read the country code list as a pandas dataframe
csv_countries = glob.glob(os.path.join(file,'*COUNTRY_GROUPS.csv'))[0]
countries_df = pd.read_csv(csv_countries, encoding='latin-1')
# rename the UN Code column in the country code list to match the column in the dataset
countries_df.rename(columns={'UN_Code':'COUNTRY.UN_CODE'}, inplace=True)
# merge the dataframes so each country code in the dataset is matched with an ISO code and its full name
df = pd.merge(df_data,countries_df[['COUNTRY.UN_CODE','ISO3_Code','Name_En']], on='COUNTRY.UN_CODE', how='left')
# add a column to reflect the type of production measured by the value column for the dataset (ex GlobalProduction, Aquaculture, or Capture) and the variable (quantity)
type = os.path.basename(file).split('_')[0]
df['type'] = type + '_quantity'
# convert the data type of the value column to float
df['VALUE'] = df['VALUE'].astype(float)
# add the processed dataframe to the list
processed_df.append(df)
# There is additional data in the Aquaculture dataset on value
# Process this data following the procedure above
if type == 'Aquaculture':
# read the dataset as a pandas dataframe
csv_aqua_value = glob.glob(os.path.join(file,'*VALUE.csv'))[0]
df_aqua_value = pd.read_csv(csv_aqua_value,encoding='latin-1')
# merge the dataframes so each country code in the dataset is matched with an ISO code and its full name
df = pd.merge(df_aqua_value,countries_df[['COUNTRY.UN_CODE','ISO3_Code','Name_En']], on='COUNTRY.UN_CODE', how='left')
# add a column to reflect the type of production measured by the value column for the dataset (ex GlobalProduction, Aquaculture, or Capture) and the variable (value)
type = os.path.basename(file).split('_')[0]
df['type'] = type + '_value'
# convert the data type of the value column to float
df['VALUE'] = df['VALUE'].astype(float)
# add the processed dataframe to the list
processed_df.append(df)
# join the three datasets
df = pd.concat(processed_df)
# rename the period column to year
df.rename(columns={'PERIOD':'year'}, inplace=True)
# pivot the table from long to wide form
# to sum the values for each type of production of a country in a given year
table = pd.pivot_table(df, values='VALUE', index=['ISO3_Code', 'year','MEASURE'], columns=['type'], aggfunc=np.sum)
# turn all column names to lowercase
table.columns = [x.lower() for x in table.columns]
# convert Year column to datetime object
df['datetime'] = pd.to_datetime(df.year, format='%Y')
# save processed dataset to csv
processed_data_file = os.path.join(data_dir, dataset_name+'_edit.csv')
table.to_csv(processed_data_file, index=True)
'''
Upload processed data to Carto
'''
logger.info('Uploading processed data to Carto.')
util_carto.upload_to_carto(processed_data_file, 'LINK', tags = ['ow'])
'''
Upload original data and processed data to Amazon S3 storage
'''
# initialize AWS variables
aws_bucket = 'wri-public-data'
s3_prefix = 'resourcewatch/'
logger.info('Uploading original data to S3.')
# Upload raw data file to S3
# Copy the raw data into a zipped file to upload to S3
raw_data_dir = os.path.join(data_dir, dataset_name+'.zip')
with ZipFile(raw_data_dir,'w') as zipped:
for file in raw_data_file:
zipped.write(file, os.path.basename(file))
# Upload raw data file to S3
uploaded = util_cloud.aws_upload(raw_data_dir, aws_bucket, s3_prefix + os.path.basename(raw_data_dir))
logger.info('Uploading processed data to S3.')
# Copy the processed data into a zipped file to upload to S3
processed_data_dir = os.path.join(data_dir, dataset_name+'_edit.zip')
with ZipFile(processed_data_dir,'w') as zipped:
zipped.write(processed_data_file, os.path.basename(file))
# Upload processed data file to S3
uploaded = util_cloud.aws_upload(processed_data_dir, aws_bucket, s3_prefix + os.path.basename(processed_data_dir))
|
1684405
|
from src import network
bbj = network.BBJ("192.168.1.137", 7066)
def geterr(obj):
"""
Returns false if there are no errors in a network response,
else a tuple of (code integer, description string)
"""
error = obj.get("error")
if not error:
return False
return (error["code"], error["description"])
def register_prompt(user, initial=True):
if initial:
print("Register for BBJ as {}?".format(user))
reply = input("(y[es], d[ifferent name], q[uit])> ").lower()
if reply.startswith("d"):
register_prompt(input("(Username)> "))
elif reply.startswith("q"):
exit("bye!")
def getpass(ok):
p1 = input(
"(Choose a password)> " if ok else \
"(Those didn't match. Try again)> ")
p2 = input("(Now type it one more time)> ")
return p1 if p1 == p2 else getpass(False)
# this method will sha256 it for us
bbj.setuser(user, getpass(True))
response = bbj("user_register", quip="", bio="")
error = geterr(response)
if error:
exit("Registration error: " + error[1])
return response
def login(user, ok=True):
if not bbj("is_registered", target_user=user):
register_prompt(user)
else:
bbj.setuser(user, input(
"(Password)> " if ok else \
"(Invalid password, try again)> "))
if not bbj("check_auth"):
login(user, ok=False)
return bbj("user_get", target_user=user)
# user = input("(BBJ Username)> ")
# if not bbj("is_registered", target_user=user):
login(input("(Username)> "))
import urwid
f = urwid.Frame(
urwid.ListBox(
urwid.SimpleFocusListWalker(
[urwid.Text(i["body"]) for i in bbj("thread_index")["threads"]]
)
)
)
t = urwid.Overlay(
f, urwid.SolidFill('!'),
align='center',
width=('relative', 80),
height=('relative', 80),
valign='middle'
)
loop = urwid.MainLoop(t)
|
1684414
|
import torch
import torch.autograd as autograd
def gradient_penalty(fake_data, real_data, discriminator):
alpha = torch.cuda.FloatTensor(fake_data.shape[0], 1, 1, 1).uniform_(0, 1).expand(fake_data.shape)
interpolates = alpha * fake_data + (1 - alpha) * real_data
interpolates.requires_grad = True
disc_interpolates, _ = discriminator(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def consistency_term(real_data, discriminator, Mtag=0):
d1, d_1 = discriminator(real_data)
d2, d_2 = discriminator(real_data)
# why max is needed when norm is positive?
consistency_term = (d1 - d2).norm(2, dim=1) + 0.1 * (d_1 - d_2).norm(2, dim=1) - Mtag
return consistency_term.mean()
|
1684437
|
import networkx as nx
from networkx.utils.decorators import py_random_state, not_implemented_for
__all__ = ["randomized_partitioning", "one_exchange"]
@not_implemented_for("directed", "multigraph")
@py_random_state(1)
def randomized_partitioning(G, seed=None, p=0.5, weight=None):
"""Compute a random partitioning of the graph nodes and its cut value.
A partitioning is calculated by observing each node
and deciding to add it to the partition with probability `p`,
returning a random cut and its corresponding value (the
sum of weights of edges connecting different partitions).
Parameters
----------
G : NetworkX graph
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
p : scalar
Probability for each node to be part of the first partition.
Should be in [0,1]
weight : object
Edge attribute key to use as weight. If not specified, edges
have weight one.
Returns
-------
cut_size : scalar
Value of the minimum cut.
partition : pair of node sets
A partitioning of the nodes that defines a minimum cut.
"""
cut = {node for node in G.nodes() if seed.random() < p}
cut_size = nx.algorithms.cut_size(G, cut, weight=weight)
partition = (cut, G.nodes - cut)
return cut_size, partition
def _swap_node_partition(cut, node):
return cut - {node} if node in cut else cut.union({node})
@not_implemented_for("directed", "multigraph")
@py_random_state(2)
def one_exchange(G, initial_cut=None, seed=None, weight=None):
"""Compute a partitioning of the graphs nodes and the corresponding cut value.
Use a greedy one exchange strategy to find a locally maximal cut
and its value, it works by finding the best node (one that gives
the highest gain to the cut value) to add to the current cut
and repeats this process until no improvement can be made.
Parameters
----------
G : networkx Graph
Graph to find a maximum cut for.
initial_cut : set
Cut to use as a starting point. If not supplied the algorithm
starts with an empty cut.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
weight : object
Edge attribute key to use as weight. If not specified, edges
have weight one.
Returns
-------
cut_value : scalar
Value of the maximum cut.
partition : pair of node sets
A partitioning of the nodes that defines a maximum cut.
"""
if initial_cut is None:
initial_cut = set()
cut = set(initial_cut)
current_cut_size = nx.algorithms.cut_size(G, cut, weight=weight)
while True:
nodes = list(G.nodes())
# Shuffling the nodes ensures random tie-breaks in the following call to max
seed.shuffle(nodes)
best_node_to_swap = max(
nodes,
key=lambda v: nx.algorithms.cut_size(
G, _swap_node_partition(cut, v), weight=weight
),
default=None,
)
potential_cut = _swap_node_partition(cut, best_node_to_swap)
potential_cut_size = nx.algorithms.cut_size(G, potential_cut, weight=weight)
if potential_cut_size > current_cut_size:
cut = potential_cut
current_cut_size = potential_cut_size
else:
break
partition = (cut, G.nodes - cut)
return current_cut_size, partition
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.