id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1631140
|
from twisted.web import proxy
class TerminalResource(proxy.ReverseProxyResource):
def __init__(self):
proxy.ReverseProxyResource.__init__(self, "127.0.0.1", 4200, '')
|
1631208
|
from PyQt5 import QtGui, QtCore, QtWidgets
from PyQt5.QtCore import Qt
import sys
from typing import List
from neural_network import *
from snake import Snake
class NeuralNetworkViz(QtWidgets.QWidget):
def __init__(self, parent, snake: Snake):
super().__init__(parent)
self.snake = snake
self.horizontal_distance_between_layers = 50
self.vertical_distance_between_nodes = 10
self.num_neurons_in_largest_layer = max(self.snake.network.layer_nodes)
# self.setFixedSize(600,800)
self.neuron_locations = {}
self.show()
def paintEvent(self, event: QtGui.QPaintEvent) -> None:
painter = QtGui.QPainter()
painter.begin(self)
self.show_network(painter)
painter.end()
def update(self) -> None:
self.repaint()
def show_network(self, painter: QtGui.QPainter):
painter.setRenderHints(QtGui.QPainter.Antialiasing)
painter.setRenderHints(QtGui.QPainter.HighQualityAntialiasing)
painter.setRenderHint(QtGui.QPainter.TextAntialiasing)
painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform)
vertical_space = 8
radius = 8
height = self.frameGeometry().height()
width = self.frameGeometry().width()
layer_nodes = self.snake.network.layer_nodes
default_offset = 30
h_offset = default_offset
inputs = self.snake.vision_as_array
out = self.snake.network.feed_forward(inputs) # @TODO: shouldnt need this
max_out = np.argmax(out)
# Draw nodes
for layer, num_nodes in enumerate(layer_nodes):
v_offset = (height - ((2*radius + vertical_space) * num_nodes))/2
activations = None
if layer > 0:
activations = self.snake.network.params['A' + str(layer)]
for node in range(num_nodes):
x_loc = h_offset
y_loc = node * (radius*2 + vertical_space) + v_offset
t = (layer, node)
if t not in self.neuron_locations:
self.neuron_locations[t] = (x_loc, y_loc + radius)
painter.setBrush(QtGui.QBrush(Qt.white, Qt.NoBrush))
# Input layer
if layer == 0:
# Is there a value being fed in
if inputs[node, 0] > 0:
painter.setBrush(QtGui.QBrush(Qt.green))
else:
painter.setBrush(QtGui.QBrush(Qt.white))
# Hidden layers
elif layer > 0 and layer < len(layer_nodes) - 1:
try:
saturation = max(min(activations[node, 0], 1.0), 0.0)
except:
print(self.snake.network.params)
import sys
sys.exit(-1)
painter.setBrush(QtGui.QBrush(QtGui.QColor.fromHslF(125/239, saturation, 120/240)))
# Output layer
elif layer == len(layer_nodes) - 1:
text = ('U', 'D', 'L', 'R')[node]
painter.drawText(h_offset + 30, node * (radius*2 + vertical_space) + v_offset + 1.5*radius, text)
if node == max_out:
painter.setBrush(QtGui.QBrush(Qt.green))
else:
painter.setBrush(QtGui.QBrush(Qt.white))
painter.drawEllipse(x_loc, y_loc, radius*2, radius*2)
h_offset += 150
# Reset horizontal offset for the weights
h_offset = default_offset
# Draw weights
# For each layer starting at 1
for l in range(1, len(layer_nodes)):
weights = self.snake.network.params['W' + str(l)]
prev_nodes = weights.shape[1]
curr_nodes = weights.shape[0]
# For each node from the previous layer
for prev_node in range(prev_nodes):
# For all current nodes, check to see what the weights are
for curr_node in range(curr_nodes):
# If there is a positive weight, make the line blue
if weights[curr_node, prev_node] > 0:
painter.setPen(QtGui.QPen(Qt.blue))
# If there is a negative (impeding) weight, make the line red
else:
painter.setPen(QtGui.QPen(Qt.red))
# Grab locations of the nodes
start = self.neuron_locations[(l-1, prev_node)]
end = self.neuron_locations[(l, curr_node)]
# Offset start[0] by diameter of circle so that the line starts on the right of the circle
painter.drawLine(start[0] + radius*2, start[1], end[0], end[1])
|
1631236
|
from loguru import logger
from tests.conftest import MIN_MS1_INTENSITY, check_non_empty_MS2, check_mzML, OUT_DIR, BEER_CHEMS, BEER_MIN_BOUND, \
BEER_MAX_BOUND
from vimms.Common import POSITIVE
from vimms.Controller import TopNController
from vimms.Environment import Environment
from vimms.MassSpec import IndependentMassSpectrometer, TaskManager
class TestSimulatedMassSpec:
"""
Tests the Top-N controller that does standard DDA Top-N fragmentation scans with the simulated mass spec class.
"""
def test_mass_spec(self):
logger.info('Testing mass spec using the Top-N controller and QC beer chemicals')
isolation_width = 1
N = 10
rt_tol = 15
mz_tol = 10
ionisation_mode = POSITIVE
task_manager = TaskManager(buffer_size=3)
mass_spec = IndependentMassSpectrometer(ionisation_mode, BEER_CHEMS, task_manager=task_manager)
controller = TopNController(ionisation_mode, N, isolation_width, mz_tol, rt_tol, MIN_MS1_INTENSITY)
# create an environment to run both the mass spec and controller
env = Environment(mass_spec, controller, BEER_MIN_BOUND, BEER_MAX_BOUND, progress_bar=True)
# run_environment(env)
env.run()
# check that there is at least one non-empty MS2 scan
check_non_empty_MS2(controller)
# write simulated output to mzML file
filename = 'test_mass_spec.mzML'
check_mzML(env, OUT_DIR, filename)
|
1631286
|
import shortuuid
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from urllib.parse import urljoin
from app.files import RandomFileName
from app.models import TimestampedModel, models
from app.tasks import send_mail
class Languages(models.TextChoices):
RU = 'RU', _('Russian')
EN = 'EN', _('English')
class DiplomaQuerySet(models.QuerySet):
def for_viewset(self):
return self.select_related('study', 'study__student', 'study__course')
def for_user(self, user):
return self.filter(study__student=user)
class Diploma(TimestampedModel):
objects = models.Manager.from_queryset(DiplomaQuerySet)()
study = models.ForeignKey('studying.Study', on_delete=models.CASCADE)
slug = models.CharField(max_length=32, db_index=True, unique=True, default=shortuuid.uuid)
language = models.CharField(max_length=3, choices=Languages.choices, db_index=True)
image = models.ImageField(upload_to=RandomFileName('diplomas'))
class Meta:
constraints = [
models.UniqueConstraint(fields=['study', 'language'], name='unique_study'),
]
indexes = [
models.Index(fields=['study', 'language']),
]
ordering = ['-id']
permissions = [
('access_all_diplomas', _('May access diplomas of all students')),
]
verbose_name = _('Diploma')
verbose_name_plural = _('Diplomas')
def __str__(self) -> str:
return f'{self.study}: {self.language}'
def get_other_languages(self) -> DiplomaQuerySet:
return self.__class__.objects.filter(study=self.study).exclude(pk=self.pk)
def get_absolute_url(self) -> str:
return urljoin(settings.DIPLOMA_FRONTEND_URL, f'/{self.slug}/')
def send_to_student(self):
send_mail.delay(
to=self.study.student.email,
template_id='new-diploma',
ctx=dict(
course_name=self.study.course.full_name,
diploma_url=self.get_absolute_url(),
),
disable_antispam=True,
)
class DiplomaTemplate(TimestampedModel):
course = models.ForeignKey('products.Course', on_delete=models.CASCADE)
slug = models.CharField(max_length=32, help_text=_('Check out https://is.gd/eutOYr for available templates'))
language = models.CharField(max_length=3, choices=Languages.choices, db_index=True)
homework_accepted = models.BooleanField(_('This template is for students that have completed the homework'), default=False)
class Meta:
verbose_name = _('Diploma template')
verbose_name_plural = _('Diploma templates')
constraints = [
models.UniqueConstraint(fields=['course', 'language', 'homework_accepted'], name='single diploma per course option'),
]
indexes = [
models.Index(fields=['course', 'language', 'homework_accepted']),
]
|
1631300
|
import datetime
import os
from fastfec import FastFEC
def test_filing_1550126_line_callback(filing_1550126):
"""
Test the FastFEC line-by-line callback functionality, in addition
to the package's ability to parse summary, contribution and
disbursement rows.
"""
with open(filing_1550126, "rb") as filing:
with FastFEC() as fastfec:
parsed = list(fastfec.parse(filing))
assert len(parsed) == 25
# Test the summary data parse
summary_form, summary_data = parsed[1]
assert len(summary_data) == 93
assert summary_form == "F3A"
assert summary_data["filer_committee_id_number"] == "C00772335"
assert summary_data["committee_name"] == "<NAME> for US Congress"
assert summary_data["election_date"] == ""
assert summary_data["coverage_from_date"] == datetime.date(2021, 7, 1)
assert summary_data["col_a_total_contributions_no_loans"] == 4239.0
assert summary_data["col_b_total_disbursements"] == 9229.09
# Test the contribution data parse
contribution_form, contribution_data = parsed[2]
assert len(contribution_data) == 45
assert contribution_form == "SA11AI"
assert contribution_data["filer_committee_id_number"] == "C00772335"
assert contribution_data["transaction_id"] == "SA11AI.4265"
assert contribution_data["entity_type"] == "IND"
assert contribution_data["contributor_last_name"] == "barbariniweil"
assert contribution_data["contributor_first_name"] == "dale"
assert contribution_data["contribution_date"] == datetime.date(2021, 8, 5)
assert contribution_data["contribution_amount"] == 1000.0
assert contribution_data["reference_code"] is None
# Test the disbursement data parse
disbursement_form, disbursement_data = parsed[8]
assert len(disbursement_data) == 44
assert disbursement_form == "SB17"
assert disbursement_data["expenditure_date"] == datetime.date(2021, 9, 10)
assert disbursement_data["expenditure_amount"] == 2000.0
assert disbursement_data["entity_type"] == "IND"
assert disbursement_data["payee_state"] == "FL"
assert disbursement_data["election_code"] == "P2022"
assert disbursement_data["payee_street_1"] == "1111 Lake Ter"
def test_filing_1550548_parse_as_files(tmpdir, filing_1550548):
"""
Test that the FastFEC `parse_as_files` method outputs the correct files
and that they are the correct length.
"""
with open(filing_1550548, "rb") as filing:
with FastFEC() as fastfec:
fastfec.parse_as_files(filing, tmpdir)
assert (
os.listdir(tmpdir).sort()
== [
"SB23.csv",
"header.csv",
"SB21B.csv",
"F3XA.csv",
"SA11AI.csv",
].sort()
)
with open(os.path.join(tmpdir, "header.csv")) as filing:
assert len(filing.readlines()) == 2
with open(os.path.join(tmpdir, "F3XA.csv")) as filing:
assert len(filing.readlines()) == 2
with open(os.path.join(tmpdir, "SA11AI.csv")) as filing:
assert len(filing.readlines()) == 77
with open(os.path.join(tmpdir, "SB21B.csv")) as filing:
assert len(filing.readlines()) == 8
with open(os.path.join(tmpdir, "SB23.csv")) as filing:
assert len(filing.readlines()) == 36
|
1631325
|
from tx2.calc import frequent_words_in_cluster, frequent_words_by_class_in_cluster
def test_frequent_words_in_cluster(dummy_df):
freq_words = frequent_words_in_cluster(dummy_df.text)
assert freq_words == [("row", 4), ("testing", 4), ("awesome", 2)]
def test_frequent_words_by_class_in_cluster(dummy_df, dummy_encodings, dummy_clusters):
cluster_text = dummy_df.text[dummy_clusters["0"]]
cluster_text_labels = dummy_df.target[dummy_clusters["0"]]
freq_words = frequent_words_in_cluster(cluster_text)
freq_words_by_class = frequent_words_by_class_in_cluster(
freq_words, dummy_encodings, cluster_text, cluster_text_labels
)
expected_output = {
"testing": {"total": 2, "0": 2, "1": 0},
"row": {"total": 2, "0": 2, "1": 0},
}
assert freq_words_by_class == expected_output
|
1631327
|
input = """
arc(a,b,2).
arc(a,c,3).
arc(b,d,3).
arc(c,d,2).
arc(d,a,2).
% maxint is deliberately set too large to detect wrong minima.
#maxint=20.
path(X,Y,C) :- arc(X,Y,C).
path(X,Y,C) :- arc(X,Z,C1), path(Z,Y,C2), C = C1 + C2.
minpath(X,Y,C) :- path(X,Y,C), C = #min{ C1: path(X,Y,C1) }.
"""
output = """
{arc(a,b,2), arc(a,c,3), arc(b,d,3), arc(c,d,2), arc(d,a,2), minpath(a,a,7), minpath(a,b,2), minpath(a,c,3), minpath(a,d,5), minpath(b,a,5), minpath(b,b,7), minpath(b,c,8), minpath(b,d,3), minpath(c,a,4), minpath(c,b,6), minpath(c,c,7), minpath(c,d,2), minpath(d,a,2), minpath(d,b,4), minpath(d,c,5), minpath(d,d,7), path(a,a,14), path(a,a,7), path(a,b,16), path(a,b,2), path(a,b,9), path(a,c,10), path(a,c,17), path(a,c,3), path(a,d,12), path(a,d,19), path(a,d,5), path(b,a,12), path(b,a,19), path(b,a,5), path(b,b,14), path(b,b,7), path(b,c,15), path(b,c,8), path(b,d,10), path(b,d,17), path(b,d,3), path(c,a,11), path(c,a,18), path(c,a,4), path(c,b,13), path(c,b,20), path(c,b,6), path(c,c,14), path(c,c,7), path(c,d,16), path(c,d,2), path(c,d,9), path(d,a,16), path(d,a,2), path(d,a,9), path(d,b,11), path(d,b,18), path(d,b,4), path(d,c,12), path(d,c,19), path(d,c,5), path(d,d,14), path(d,d,7)}
"""
|
1631383
|
def processing(mode, text, key):
key_ints = [ord(i) for i in key]
text_ints = [ord(i) for i in text]
finished_text = ""
for i in range(len(text_ints)):
adder = key_ints[i % len(key)]
if mode == 1:
adder *= -1
char = (text_ints[i] - 32 + adder) % 95
finished_text += chr(char + 32)
return finished_text
def assembly(mode):
text = str(input("[+] Enter your text - "))
key = str(input("[+] Enter your key - "))
finished_text = processing(mode, text, key)
print("\n »» The result by Vigenere algorithm. ««")
print(finished_text)
def main():
print("[x] Vigenere cryptography algorithm. [x]")
print(" • 0. Encryption mode.\n • 1. Decryption mode.")
mode = int(input("[?] Select program mode - "))
assembly(mode)
if __name__ == '__main__':
main()
|
1631400
|
import notebookgenerator
def init():
print('Generating notebooks.')
notebookgenerator.main()
def clean(app, *args):
print('Remove rst notebook files.')
#notebookgenerator.clean()
def setup(app):
init()
app.connect('build-finished', clean)
|
1631401
|
from . import unet2d
from . import segmentation_models_pytorch as smp
def get_base(base_name, exp_dict, n_classes):
if base_name == "fcn8_vgg16":
base = fcn8_vgg16.FCN8VGG16(n_classes=n_classes)
if base_name == "unet2d":
base = unet2d.UNet(n_channels=1, n_classes=n_classes)
if base_name == 'pspnet':
kwargs = {'encoder_name': exp_dict['model']['encoder'],
'in_channels': exp_dict['num_channels'],
'encoder_weights': None, # ignore error. it still works.
'classes': n_classes}
if exp_dict['model']['base'] == 'pspnet':
net_fn = smp.PSPNet
assert net_fn is not None
base = smp.PSPNet(encoder_name=exp_dict['model']['encoder'],
in_channels=exp_dict['num_channels'],
encoder_weights=None, # ignore error. it still works.
classes=n_classes)
return base
|
1631408
|
import pandas as pd
import numpy as np
import scipy
from scipy.stats import laplace
def estimate_precsion(max, min ):
diff= 1/max
precision=(diff - min) / (max - min)
return precision
def match_vals(row, cumsum, precision):
cdf=float(cumsum[cumsum.index==row['relative_time']])
#cdf plus
val_plus= row['relative_time']+precision
if val_plus>=1:
cdf_plus=1.0
else:
cdf_plus=float(cumsum[cumsum.index <= val_plus].max())
#cdf minus
val_minus = row['relative_time'] - precision
if val_minus < 0:
cdf_minus = 0.0
else:
cdf_minus = float(cumsum[cumsum.index <= val_minus].max())
return [cdf, cdf_plus, cdf_minus]
def epsilon_vectorized_internal(data, delta):
if data.p_k+delta >=1:
#in case p_k+delta>1, set epsilon = 0.5
return 0.1
# r =1 because of normalization
return (- np.log(data.p_k / (1.0 - data.p_k) * (1.0 / (delta + data.p_k) - 1.0)))
def add_noise(data, max, min):
noise=0
sens_time=1
noise = laplace.rvs(loc=0, scale=sens_time / data['eps'], size=1)[0]
if noise+data['relative_time_original']<0:
noise=-data['relative_time_original']
# noise = abs(noise)
noise=noise *(max-min)+min
return noise
def estimate_epsilon_risk_for_start_timestamp(data,delta):
start_time=data[data.prev_state==0]
min_time = start_time['time:timestamp'].min()
start_time['time_diff'] = start_time['time:timestamp'] - min_time
"""Days resolution"""
start_time['relative_time'] = start_time['time_diff'].astype('timedelta64[D]')
result = estimate_epsilon(start_time.relative_time, delta)
# data['eps_days'] = result['eps']
# data['time_diff_days'] = data['time_diff'] + pd.to_timedelta(result['noise'], unit='D')
# df[['eps', 'p_k', 'relative_time_original', 'relative_time_max', 'relative_time_min']]
# data['eps']=result['eps']
# data['p_k']=result['p_k']
# data['relative_time_original'] = result['relative_time_original']
# data['relative_time_max'] = result['relative_time_max']
# data['relative_time_min'] = result['relative_time_min']
data.update(result[['eps', 'p_k', 'relative_time_original', 'relative_time_max', 'relative_time_min']])
# data.iloc[result.index,['eps', 'p_k', 'relative_time_original', 'relative_time_max', 'relative_time_min']]=result[['eps', 'p_k', 'relative_time_original', 'relative_time_max', 'relative_time_min']]
return data
def estimate_epsilon(vals, delta):
#normalization
min=vals.min()
max=vals.max()
precision = estimate_precsion(max, min)
norm_vals=(vals-min)/(max-min)
norm_vals=norm_vals.round(5)
norm_vals= norm_vals.sort_values()
x, counts = np.unique(norm_vals, return_counts=True)
counts = pd.Series(data=counts, index=x)
cumsum= counts.cumsum()
cumsum = cumsum / cumsum.iloc[-1]
df=norm_vals.to_frame()
df.columns = ['relative_time']
temp=df.apply( match_vals,cumsum=cumsum, precision=precision ,axis=1)
temp = temp.to_frame()
t2 = pd.DataFrame.from_records(temp[0])
t2.index = temp.index
df['cdf']=t2[0]
df['cdf_plus'] = t2[1]
df['cdf_minus'] = t2[2]
df['p_k']=df['cdf_plus']- df['cdf_minus']
df['eps']= df.apply(epsilon_vectorized_internal, delta=delta, axis=1)
df['relative_time_original']=df['relative_time'] *(max-min)+min
df['noise']=df.apply(add_noise, max=max, min= min, axis=1)
df['time_diff']=df['noise'] +df['relative_time_original']
df['relative_time_max']=max
df['relative_time_min']=min
return df[['eps','p_k','relative_time_original','relative_time_max','relative_time_min']]
|
1631419
|
from talon.voice import Context
from . import browser
from ...misc import audio
context = Context(
"netflix", func=browser.url_matches_func("https://www.netflix.com/.*")
)
context.keymap(
{"full screen": [lambda m: audio.set_volume(100), browser.send_to_page("f")]}
)
|
1631423
|
import jax
from jax.config import config; config.update("jax_enable_x64", True)
import jax.numpy as jnp
from jax.experimental import loops
import psi4
from .energy_utils import nuclear_repulsion, partial_tei_transformation, tei_transformation, cartesian_product
from .hartree_fock import restricted_hartree_fock
def restricted_mp2(geom, basis_name, xyz_path, nuclear_charges, charge, options, deriv_order=0):
nelectrons = int(jnp.sum(nuclear_charges)) - charge
ndocc = nelectrons // 2
E_scf, C, eps, G = restricted_hartree_fock(geom, basis_name, xyz_path, nuclear_charges, charge, options, deriv_order=deriv_order, return_aux_data=True)
nvirt = G.shape[0] - ndocc
nbf = G.shape[0]
G = partial_tei_transformation(G, C[:,:ndocc],C[:,ndocc:],C[:,:ndocc],C[:,ndocc:])
# Create tensor dim (occ,vir,occ,vir) of all possible orbital energy denominators
eps_occ, eps_vir = eps[:ndocc], eps[ndocc:]
e_denom = jnp.reciprocal(eps_occ.reshape(-1, 1, 1, 1) - eps_vir.reshape(-1, 1, 1) + eps_occ.reshape(-1, 1) - eps_vir)
# Tensor contraction algo
#mp2_correlation = jnp.einsum('iajb,iajb,iajb->', G, G, e_denom) +\
# jnp.einsum('iajb,iajb,iajb->', G - jnp.transpose(G, (0,3,2,1)), G, e_denom)
#mp2_total_energy = mp2_correlation + E_scf
#return E_scf + mp2_correlation
# Loop algo (lower memory, but tei transform is the memory bottleneck)
# Create all combinations of four loop variables to make XLA compilation easier
indices = cartesian_product(jnp.arange(ndocc),jnp.arange(ndocc),jnp.arange(nvirt),jnp.arange(nvirt))
with loops.Scope() as s:
s.mp2_correlation = 0.
for idx in s.range(indices.shape[0]):
i,j,a,b = indices[idx]
s.mp2_correlation += G[i, a, j, b] * (2 * G[i, a, j, b] - G[i, b, j, a]) * e_denom[i,a,j,b]
return E_scf + s.mp2_correlation
|
1631472
|
from tests.common import TestCase
import torch
from torch.autograd import Variable
import torchvision.models
from dsnt.model import ResNetHumanPoseModel
class TestResNetHumanPoseModel(TestCase):
def test_truncate(self):
resnet = torchvision.models.resnet18()
model = ResNetHumanPoseModel(resnet, n_chans=16, truncate=1)
model.cuda()
sz = model.image_specs.size
self.assertEqual(sz, 224)
out_var = model(Variable(torch.randn(1, 3, sz, sz).cuda()))
self.assertEqual(out_var.size(), torch.Size([1, 16, 2]))
hm = model.heatmaps.data
self.assertEqual(hm.size(), torch.Size([1, 16, 14, 14]))
def test_dilate(self):
resnet = torchvision.models.resnet18()
model = ResNetHumanPoseModel(resnet, n_chans=16, dilate=2)
model.cuda()
sz = model.image_specs.size
self.assertEqual(sz, 224)
out_var = model(Variable(torch.randn(1, 3, sz, sz).cuda()))
self.assertEqual(out_var.size(), torch.Size([1, 16, 2]))
hm = model.heatmaps.data
self.assertEqual(hm.size(), torch.Size([1, 16, 28, 28]))
def test_training_step(self):
Tensor = torch.cuda.FloatTensor
resnet = torchvision.models.resnet18()
model = ResNetHumanPoseModel(resnet, n_chans=16, output_strat='dsnt', reg='js')
model.type(Tensor)
old_params = []
for param in model.parameters():
old_params.append(param.data.clone())
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
in_var = Variable(Tensor(1, 3, 224, 224).uniform_(0, 1), requires_grad=False)
target_var = Variable(Tensor(1, 16, 2).uniform_(-1, 1), requires_grad=False)
out_var = model(in_var)
loss = model.forward_loss(out_var, target_var, mask_var=None)
loss.backward()
optimizer.step()
# Check that all parameter groups were updated
for param_var, old_param in zip(model.parameters(), old_params):
self.assertNotEqual(param_var.data, old_param)
|
1631496
|
import discord
from discord.ext import commands
from discord.utils import get
import datetime
from discord import Member
class Joinlog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member):
usedinvite = Noneg
logch = self.bot.db.execute("SELECT logging_join FROM guild_settings WHERE id = ?")
channel = discord.utils.get(guild.text_channels, name="logs")
vars = {
'{user.mention}': member.mention,
'{user}': str(member),
'{user.name}': member.name,
}
if logch:
embed = discord.Embed(title="Member Joined", url="https://tenor.com/view/penguin-hello-hi-hey-there-cutie-gif-3950966")
embed.set_author(name=f"{member}", icon_url=str(
member.avatar_url_as(static_format='png', size=2048))
embed.add_field(name='Account Created', value=datetime.datetime.utcnow() - member.created_at), + 'ago', inline=False)
if used invite:
embed.add_field(name="Invite used:", value=used_invite, inline=False)
embed.add_footer(name="Member Count:", value=f"{guild.members}")
try:
await logch.send(embed)
except Exception:
pass
def setup(bot):
bot.add_cog(Greetmsg(bot))
bot.logging.info("Event Loaded Joinlog!")
|
1631511
|
from __future__ import with_statement
import itertools
import functools
import logging
import os, sys, traceback
import threading
import time
log = logging.getLogger('util.primitives.synch')
def lock(f):
@functools.wraps(f)
def wrapper1(instance, *args, **kw):
if not hasattr(instance, '_lock'):
try:
instance._lock = threading.RLock()
except AttributeError:
raise NotImplementedError, '%s needs a _lock slot' % instance.__class__.__name__
with instance._lock:
val = f(instance, *args, **kw)
return val
return wrapper1
class RepeatCheck(object):
'''
A callable object that returns True if you call
it with the same object, or the same list of objects.
'''
def __init__(self, idfunc = None):
self.ids = sentinel
if idfunc is None: idfunc = id
self.id = idfunc
def __call__(self, *x):
if x == tuple():
# clear
self.ids = sentinel
return
elif len(x) != 1:
raise TypeError('takes one argument')
try:
newids = [self.id(a) for a in x]
except TypeError:
newids = [self.id(a)]
changed = newids != self.ids
self.ids = newids
return changed
def repeat_guard(func):
'Useful for observer callbacks to elimanate redunant updates.'
guard = RepeatCheck()
def wrapper(src, attr, old, new):
if guard(src): return
return func(src, attr, old, new)
return wrapper
class HangingThreadDaemon(threading.Thread):
'''
Create one, and start() it when you are closing the program.
If the program is not exiting because of non-daemon Threads
sticking around, it will tell you which ones are still running.
'''
ids = itertools.count()
def __init__(self, wait = 3, sysexit = False):
threading.Thread.__init__(self, name="HangingThreadDaemon %d" %
self.ids.next())
self.wait = wait
self.sysexit = sysexit
# the presence of this thread should not prevent normal program shutdown
self.setDaemon(True)
def run(self):
time.sleep(self.wait)
threads = list(threading.enumerate())
if threads:
print 'Remaining non-daemon threads:'
for thread in threads:
if not thread.isDaemon():
print ' ', thread
collect_garbage_and_report()
if self.sysexit:
try:
import common.commandline as cc
cc.where()
except Exception:
traceback.print_exc()
print >>sys.stderr, 'forcing shutdown...'
os._exit(1)
def collect_garbage_and_report():
import gc
garbage_count = gc.collect()
if garbage_count > 0:
log.info("Garbage collected. " + str(garbage_count) + " unreachable objects")
if garbage_count:
log.info("Garbage left (only first 20 listed): %r", gc.garbage[:20])
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
|
1631538
|
import datetime
import pytest
from eth_utils import to_wei
from web3.contract import Contract
@pytest.fixture
def release_agent(chain, team_multisig, token) -> Contract:
"""Create a simple release agent (useful for testing)."""
args = [token.address]
tx = {
"from": team_multisig
}
contract, hash = chain.provider.deploy_contract('SimpleReleaseAgent', deploy_args=args, deploy_transaction=tx)
return contract
@pytest.fixture
def released_token(chain, team_multisig, token, release_agent, customer) -> Contract:
"""Create a Crowdsale token where transfer restrictions have been lifted."""
token.transact({"from": team_multisig}).setReleaseAgent(release_agent.address)
release_agent.transact({"from": team_multisig}).release()
# Make sure customer 1 has some token balance
token.transact({"from": team_multisig}).transfer(customer, 10000)
return token
|
1631649
|
from torch._six import container_abcs
from itertools import repeat
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
__all__ = ['InducedNormLinear', 'InducedNormConv2d']
class InducedNormLinear(nn.Module):
def __init__(
self, in_features, out_features, bias=True, coeff=0.97, domain=2, codomain=2, n_iterations=None, atol=None,
rtol=None, zero_init=False, **unused_kwargs
):
del unused_kwargs
super(InducedNormLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.coeff = coeff
self.n_iterations = n_iterations
self.atol = atol
self.rtol = rtol
self.domain = domain
self.codomain = codomain
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters(zero_init)
with torch.no_grad():
domain, codomain = self.compute_domain_codomain()
h, w = self.weight.shape
self.register_buffer('scale', torch.tensor(0.))
self.register_buffer('u', normalize_u(self.weight.new_empty(h).normal_(0, 1), codomain))
self.register_buffer('v', normalize_v(self.weight.new_empty(w).normal_(0, 1), domain))
# Try different random seeds to find the best u and v.
with torch.no_grad():
self.compute_weight(True, n_iterations=200, atol=None, rtol=None)
best_scale = self.scale.clone()
best_u, best_v = self.u.clone(), self.v.clone()
if not (domain == 2 and codomain == 2):
for _ in range(10):
self.register_buffer('u', normalize_u(self.weight.new_empty(h).normal_(0, 1), codomain))
self.register_buffer('v', normalize_v(self.weight.new_empty(w).normal_(0, 1), domain))
self.compute_weight(True, n_iterations=200)
if self.scale > best_scale:
best_u, best_v = self.u.clone(), self.v.clone()
self.u.copy_(best_u)
self.v.copy_(best_v)
def reset_parameters(self, zero_init=False):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if zero_init:
# normalize cannot handle zero weight in some cases.
self.weight.data.div_(1000)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def compute_domain_codomain(self):
if torch.is_tensor(self.domain):
domain = asym_squash(self.domain)
codomain = asym_squash(self.codomain)
else:
domain, codomain = self.domain, self.codomain
return domain, codomain
def compute_one_iter(self):
domain, codomain = self.compute_domain_codomain()
u = self.u.detach()
v = self.v.detach()
weight = self.weight.detach()
u = normalize_u(torch.mv(weight, v), codomain)
v = normalize_v(torch.mv(weight.t(), u), domain)
return torch.dot(u, torch.mv(weight, v))
def compute_weight(self, update=True, n_iterations=None, atol=None, rtol=None):
u = self.u
v = self.v
weight = self.weight
if update:
n_iterations = self.n_iterations if n_iterations is None else n_iterations
atol = self.atol if atol is None else atol
rtol = self.rtol if rtol is None else atol
if n_iterations is None and (atol is None or rtol is None):
raise ValueError('Need one of n_iteration or (atol, rtol).')
max_itrs = 200
if n_iterations is not None:
max_itrs = n_iterations
with torch.no_grad():
domain, codomain = self.compute_domain_codomain()
for _ in range(max_itrs):
# Algorithm from http://www.qetlab.com/InducedMatrixNorm.
if n_iterations is None and atol is not None and rtol is not None:
old_v = v.clone()
old_u = u.clone()
u = normalize_u(torch.mv(weight, v), codomain, out=u)
v = normalize_v(torch.mv(weight.t(), u), domain, out=v)
if n_iterations is None and atol is not None and rtol is not None:
err_u = torch.norm(u - old_u) / (u.nelement()**0.5)
err_v = torch.norm(v - old_v) / (v.nelement()**0.5)
tol_u = atol + rtol * torch.max(u)
tol_v = atol + rtol * torch.max(v)
if err_u < tol_u and err_v < tol_v:
break
self.v.copy_(v)
self.u.copy_(u)
u = u.clone()
v = v.clone()
sigma = torch.dot(u, torch.mv(weight, v))
with torch.no_grad():
self.scale.copy_(sigma)
# soft normalization: only when sigma larger than coeff
factor = torch.max(torch.ones(1).to(weight.device), sigma / self.coeff)
weight = weight / factor
return weight
def forward(self, input):
weight = self.compute_weight(update=False)
return F.linear(input, weight, self.bias)
def extra_repr(self):
domain, codomain = self.compute_domain_codomain()
return (
'in_features={}, out_features={}, bias={}'
', coeff={}, domain={:.2f}, codomain={:.2f}, n_iters={}, atol={}, rtol={}, learnable_ord={}'.format(
self.in_features, self.out_features, self.bias is not None, self.coeff, domain, codomain,
self.n_iterations, self.atol, self.rtol, torch.is_tensor(self.domain)
)
)
class InducedNormConv2d(nn.Module):
def __init__(
self, in_channels, out_channels, kernel_size, stride, padding, bias=True, coeff=0.97, domain=2, codomain=2,
n_iterations=None, atol=None, rtol=None, **unused_kwargs
):
del unused_kwargs
super(InducedNormConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.coeff = coeff
self.n_iterations = n_iterations
self.domain = domain
self.codomain = codomain
self.atol = atol
self.rtol = rtol
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.register_buffer('initialized', torch.tensor(0))
self.register_buffer('spatial_dims', torch.tensor([1., 1.]))
self.register_buffer('scale', torch.tensor(0.))
self.register_buffer('u', self.weight.new_empty(self.out_channels))
self.register_buffer('v', self.weight.new_empty(self.in_channels))
def compute_domain_codomain(self):
if torch.is_tensor(self.domain):
domain = asym_squash(self.domain)
codomain = asym_squash(self.codomain)
else:
domain, codomain = self.domain, self.codomain
return domain, codomain
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def _initialize_u_v(self):
with torch.no_grad():
domain, codomain = self.compute_domain_codomain()
if self.kernel_size == (1, 1):
self.u.resize_(self.out_channels).normal_(0, 1)
self.u.copy_(normalize_u(self.u, codomain))
self.v.resize_(self.in_channels).normal_(0, 1)
self.v.copy_(normalize_v(self.v, domain))
else:
c, h, w = self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item())
with torch.no_grad():
num_input_dim = c * h * w
self.v.resize_(num_input_dim).normal_(0, 1)
self.v.copy_(normalize_v(self.v, domain))
# forward call to infer the shape
u = F.conv2d(
self.v.view(1, c, h, w), self.weight, stride=self.stride, padding=self.padding, bias=None
)
num_output_dim = u.shape[0] * u.shape[1] * u.shape[2] * u.shape[3]
# overwrite u with random init
self.u.resize_(num_output_dim).normal_(0, 1)
self.u.copy_(normalize_u(self.u, codomain))
self.initialized.fill_(1)
# Try different random seeds to find the best u and v.
self.compute_weight(True)
best_scale = self.scale.clone()
best_u, best_v = self.u.clone(), self.v.clone()
if not (domain == 2 and codomain == 2):
for _ in range(10):
if self.kernel_size == (1, 1):
self.u.copy_(normalize_u(self.weight.new_empty(self.out_channels).normal_(0, 1), codomain))
self.v.copy_(normalize_v(self.weight.new_empty(self.in_channels).normal_(0, 1), domain))
else:
self.u.copy_(normalize_u(torch.randn(num_output_dim).to(self.weight), codomain))
self.v.copy_(normalize_v(torch.randn(num_input_dim).to(self.weight), domain))
self.compute_weight(True, n_iterations=200)
if self.scale > best_scale:
best_u, best_v = self.u.clone(), self.v.clone()
self.u.copy_(best_u)
self.v.copy_(best_v)
# These two lines are important, see https://pytorch.org/docs/master/_modules/torch/nn/utils/spectral_norm.html#spectral_norm
self.u = self.u.clone(memory_format=torch.contiguous_format)
self.v = self.v.clone(memory_format=torch.contiguous_format)
def compute_one_iter(self):
if not self.initialized:
raise ValueError('Layer needs to be initialized first.')
domain, codomain = self.compute_domain_codomain()
if self.kernel_size == (1, 1):
u = self.u.detach()
v = self.v.detach()
weight = self.weight.detach().view(self.out_channels, self.in_channels)
u = normalize_u(torch.mv(weight, v), codomain)
v = normalize_v(torch.mv(weight.t(), u), domain)
return torch.dot(u, torch.mv(weight, v))
else:
u = self.u.detach()
v = self.v.detach()
weight = self.weight.detach()
c, h, w = self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item())
u_s = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
out_shape = u_s.shape
u = normalize_u(u_s.view(-1), codomain)
v_s = F.conv_transpose2d(
u.view(out_shape), weight, stride=self.stride, padding=self.padding, output_padding=0
)
v = normalize_v(v_s.view(-1), domain)
weight_v = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
return torch.dot(u.view(-1), weight_v.view(-1))
def compute_weight(self, update=True, n_iterations=None, atol=None, rtol=None):
if not self.initialized:
self._initialize_u_v()
if self.kernel_size == (1, 1):
return self._compute_weight_1x1(update, n_iterations, atol, rtol)
else:
return self._compute_weight_kxk(update, n_iterations, atol, rtol)
def _compute_weight_1x1(self, update=True, n_iterations=None, atol=None, rtol=None):
n_iterations = self.n_iterations if n_iterations is None else n_iterations
atol = self.atol if atol is None else atol
rtol = self.rtol if rtol is None else atol
if n_iterations is None and (atol is None or rtol is None):
raise ValueError('Need one of n_iteration or (atol, rtol).')
max_itrs = 200
if n_iterations is not None:
max_itrs = n_iterations
u = self.u
v = self.v
weight = self.weight.view(self.out_channels, self.in_channels)
if update:
with torch.no_grad():
domain, codomain = self.compute_domain_codomain()
itrs_used = 0
for _ in range(max_itrs):
old_v = v.clone()
old_u = u.clone()
u = normalize_u(torch.mv(weight, v), codomain, out=u)
v = normalize_v(torch.mv(weight.t(), u), domain, out=v)
itrs_used = itrs_used + 1
if n_iterations is None and atol is not None and rtol is not None:
err_u = torch.norm(u - old_u) / (u.nelement()**0.5)
err_v = torch.norm(v - old_v) / (v.nelement()**0.5)
tol_u = atol + rtol * torch.max(u)
tol_v = atol + rtol * torch.max(v)
if err_u < tol_u and err_v < tol_v:
break
if itrs_used > 0:
if domain != 1 and domain != 2:
self.v.copy_(v)
if codomain != 2 and codomain != float('inf'):
self.u.copy_(u)
# These two lines are important, see https://pytorch.org/docs/master/_modules/torch/nn/utils/spectral_norm.html#spectral_norm
u = u.clone(memory_format=torch.contiguous_format)
v = v.clone(memory_format=torch.contiguous_format)
sigma = torch.dot(u, torch.mv(weight, v))
with torch.no_grad():
self.scale.copy_(sigma)
# soft normalization: only when sigma larger than coeff
factor = torch.max(torch.ones(1).to(weight.device), sigma / self.coeff)
weight = weight / factor
return weight.view(self.out_channels, self.in_channels, 1, 1)
def _compute_weight_kxk(self, update=True, n_iterations=None, atol=None, rtol=None):
n_iterations = self.n_iterations if n_iterations is None else n_iterations
atol = self.atol if atol is None else atol
rtol = self.rtol if rtol is None else atol
if n_iterations is None and (atol is None or rtol is None):
raise ValueError('Need one of n_iteration or (atol, rtol).')
max_itrs = 200
if n_iterations is not None:
max_itrs = n_iterations
u = self.u
v = self.v
weight = self.weight
c, h, w = self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item())
if update:
with torch.no_grad():
domain, codomain = self.compute_domain_codomain()
itrs_used = 0
for _ in range(max_itrs):
old_u = u.clone()
old_v = v.clone()
u_s = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
out_shape = u_s.shape
u = normalize_u(u_s.view(-1), codomain, out=u)
v_s = F.conv_transpose2d(
u.view(out_shape), weight, stride=self.stride, padding=self.padding, output_padding=0
)
v = normalize_v(v_s.view(-1), domain, out=v)
itrs_used = itrs_used + 1
if n_iterations is None and atol is not None and rtol is not None:
err_u = torch.norm(u - old_u) / (u.nelement()**0.5)
err_v = torch.norm(v - old_v) / (v.nelement()**0.5)
tol_u = atol + rtol * torch.max(u)
tol_v = atol + rtol * torch.max(v)
if err_u < tol_u and err_v < tol_v:
break
if itrs_used > 0:
if domain != 2:
self.v.copy_(v)
if codomain != 2:
self.u.copy_(u)
# These two lines are important, see https://pytorch.org/docs/master/_modules/torch/nn/utils/spectral_norm.html#spectral_norm
v = v.clone(memory_format=torch.contiguous_format)
u = u.clone(memory_format=torch.contiguous_format)
weight_v = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
weight_v = weight_v.view(-1)
sigma = torch.dot(u.view(-1), weight_v)
with torch.no_grad():
self.scale.copy_(sigma)
# soft normalization: only when sigma larger than coeff
factor = torch.max(torch.ones(1).to(weight.device), sigma / self.coeff)
weight = weight / factor
return weight
def forward(self, input):
if not self.initialized: self.spatial_dims.copy_(torch.tensor(input.shape[2:4]).to(self.spatial_dims))
weight = self.compute_weight(update=False)
return F.conv2d(input, weight, self.bias, self.stride, self.padding, 1, 1)
def extra_repr(self):
domain, codomain = self.compute_domain_codomain()
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}' ', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.bias is None:
s += ', bias=False'
s += ', coeff={}, domain={:.2f}, codomain={:.2f}, n_iters={}, atol={}, rtol={}, learnable_ord={}'.format(
self.coeff, domain, codomain, self.n_iterations, self.atol, self.rtol, torch.is_tensor(self.domain)
)
return s.format(**self.__dict__)
def projmax_(v):
"""Inplace argmax on absolute value."""
ind = torch.argmax(torch.abs(v))
v.zero_()
v[ind] = 1
return v
def normalize_v(v, domain, out=None):
if not torch.is_tensor(domain) and domain == 2:
v = F.normalize(v, p=2, dim=0, out=out)
elif domain == 1:
v = projmax_(v)
else:
vabs = torch.abs(v)
vph = v / vabs
vph[torch.isnan(vph)] = 1
vabs = vabs / torch.max(vabs)
vabs = vabs**(1 / (domain - 1))
v = vph * vabs / vector_norm(vabs, domain)
return v
def normalize_u(u, codomain, out=None):
if not torch.is_tensor(codomain) and codomain == 2:
u = F.normalize(u, p=2, dim=0, out=out)
elif codomain == float('inf'):
u = projmax_(u)
else:
uabs = torch.abs(u)
uph = u / uabs
uph[torch.isnan(uph)] = 1
uabs = uabs / torch.max(uabs)
uabs = uabs**(codomain - 1)
if codomain == 1:
u = uph * uabs / vector_norm(uabs, float('inf'))
else:
u = uph * uabs / vector_norm(uabs, codomain / (codomain - 1))
return u
def vector_norm(x, p):
x = x.view(-1)
return torch.sum(x**p)**(1 / p)
def leaky_elu(x, a=0.3):
return a * x + (1 - a) * F.elu(x)
def asym_squash(x):
return torch.tanh(-leaky_elu(-x + 0.5493061829986572)) * 2 + 3
# def asym_squash(x):
# return torch.tanh(x) / 2. + 2.
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
if __name__ == '__main__':
p = nn.Parameter(torch.tensor(2.1))
m = InducedNormConv2d(10, 2, 3, 1, 1, atol=1e-3, rtol=1e-3, domain=p, codomain=p)
W = m.compute_weight()
m.compute_one_iter().backward()
print(p.grad)
# m.weight.data.copy_(W)
# W = m.compute_weight().cpu().detach().numpy()
# import numpy as np
# print(
# '{} {} {}'.format(
# np.linalg.norm(W, ord=2, axis=(0, 1)),
# '>' if np.linalg.norm(W, ord=2, axis=(0, 1)) > m.scale else '<',
# m.scale,
# )
# )
|
1631668
|
from pyscalambda.formula import Formula
class Operator(Formula):
pass
class UnaryOperator(Operator):
def __init__(self, operator, value):
"""
:param operator: str
:param value: Formula
"""
super(UnaryOperator, self).__init__()
self.operator = operator
self.value = value
self.children = [self.value]
def _traverse(self):
yield '('
yield self.operator
for t in self.value._traverse():
yield t
yield ')'
class BinaryOperator(Operator):
def __init__(self, operator, left, right):
"""
:type operator: str
:type left: Formula
:type right: Formula
"""
super(BinaryOperator, self).__init__()
self.operator = operator
self.left = left
self.right = right
self.children = [self.left, self.right]
def _traverse(self):
yield '('
for t in self.left._traverse():
yield t
yield self.operator
for t in self.right._traverse():
yield t
yield ')'
|
1631694
|
import unittest
from katas.kyu_6.dbftbs_djqifs import encryptor
class EncryptorTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(encryptor(13, ''), '')
def test_equals_2(self):
self.assertEqual(encryptor(13, 'Caesar Cipher'), 'Pnrfne Pvcure')
def test_equals_3(self):
self.assertEqual(encryptor(-5, 'Hello World!'), 'Czggj Rjmgy!')
def test_equals_4(self):
self.assertEqual(encryptor(27, '<NAME>'), 'Xippqj Hpmecfsh')
|
1631700
|
from goopylib.objects.GraphicsObject import GraphicsObject
from goopylib.styles import STYLES, global_style
from goopylib.colours import Colour
from goopylib.util import GraphicsError
from tkinter import Text as tkText
from tkinter import WORD as tkWORD
from tkinter import END as tkEND
from tkinter import Frame as tkFrame
class MultilineEntry(GraphicsObject):
def __init__(self, p, line_length, style=None, fill=None, outline_width=None, relief=None, cursor="arrow",
font_size=None, font_style=None, font_colour=None, number_of_lines=2, align="center", tag=None,
bounds=None, select_colour=None, font_face=None, text=""):
self.widget = None
self.text = text
if style is None:
self.style = global_style
else:
self.style = style
if isinstance(fill, Colour):
self.fill = fill
elif fill in STYLES[self.style].keys():
self.fill = STYLES[self.style][fill]
else:
if "fill" in STYLES[self.style].keys():
self.fill = STYLES[self.style]["fill"]
else:
self.fill = STYLES["default"]["fill"]
if isinstance(select_colour, Colour):
self.select_colour = select_colour
elif select_colour in STYLES[self.style].keys():
self.select_colour = STYLES[self.style][select_colour]
else:
if "select colour" in STYLES[self.style].keys():
self.select_colour = STYLES[self.style]["select colour"]
else:
self.select_colour = STYLES["default"]["select colour"]
if isinstance(font_colour, Colour):
self.font_colour = font_colour
elif font_colour in STYLES[self.style].keys():
self.font_colour = STYLES[self.style][font_colour]
else:
if "font colour" in STYLES[self.style].keys():
self.font_colour = STYLES[self.style]["font colour"]
else:
self.font_colour = STYLES["default"]["font colour"]
if isinstance(outline_width, int):
self.outline_width = outline_width
elif outline_width in STYLES[self.style].keys():
self.outline_width = STYLES[self.style][outline_width]
else:
if "width" in STYLES[self.style].keys():
self.outline_width = STYLES[self.style]["entry width"]
else:
self.outline_width = STYLES["default"]["entry width"]
if font_style in STYLES[self.style].keys():
self.font_style = STYLES[self.style][font_style]
elif isinstance(font_style, str):
self.font_style = font_style
else:
if "font style" in STYLES[self.style].keys():
self.font_style = STYLES[self.style]["font style"]
else:
self.font_style = STYLES["default"]["font style"]
if font_face in STYLES[self.style].keys():
self.font = STYLES[self.style][font_face]
elif isinstance(font_face, str):
self.font = font_face
else:
if "font face" in STYLES[self.style].keys():
self.font = STYLES[self.style]["font face"]
else:
self.font = STYLES["default"]["font face"]
self.relief = relief
self.cursor = cursor
self.font_size = 0
self.initial_font_size = 0
self.set_size(font_size, True)
self.align = align
self.line_length = line_length
self.number_of_lines = number_of_lines
self.anchor = p.clone()
super().__init__((), tag=tag, bounds=bounds)
def _draw(self, canvas, options):
x, y = self.anchor
frm = tkFrame(canvas.master)
self.set_size(self.initial_font_size / canvas.trans.x_scale, False)
self.widget = tkText(frm, bg=self.fill, bd=self.outline_width, relief=self.relief, width=self.line_length,
cursor=self.cursor, exportselection=0, fg=self.font_colour, wrap=tkWORD,
font=(self.font, self.font_size, self.font_style), height=self.number_of_lines,
insertbackground=self.font_colour)
self.widget.pack()
self.set_text(self.text)
x, y = canvas.to_screen(x, y)
return canvas.create_window(x, y, window=frm)
def get_anchor(self):
return self.anchor
def set_size(self, font_size=None, _set_font_size=False):
if isinstance(font_size, int):
self.font_size = font_size
elif font_size in STYLES[self.style].keys():
self.font_size = STYLES[self.style][font_size]
else:
if "font size" in STYLES[self.style].keys():
self.font_size = STYLES[self.style]["font size"]
else:
self.font_size = STYLES["default"]["font size"]
if _set_font_size:
self.initial_font_size = round(self.font_size)
return self
def get_font_size(self):
return self.font_size
def get_font_face(self):
return self.font
def get_font_colour(self):
return self.font_colour
def get_font_style(self):
return self.font_style
def get_selection_colour(self):
return self.select_colour
def get_outline_width(self):
return self.outline_width
def get_fill(self):
return self.fill
def get_text(self):
if self.drawn and self.graphwin.is_open():
return self.widget.get("1.0", "end-1c")
else:
raise GraphicsError("\n\nGraphicsError: get_text() function for the Multiline Entry object can only be "
"used if the object is drawn and the window is open")
def set_fill(self, colour):
self.fill = colour
if self.widget:
self.widget.config(bg=colour)
self._update_layer()
return self
def set_face(self, font_face):
if font_face in STYLES[self.style].keys():
self.font = STYLES[self.style][font_face]
elif isinstance(font_face, str):
self.font = font_face
else:
if "font face" in STYLES[self.style].keys():
self.font = STYLES[self.style]["font face"]
else:
self.font = STYLES["default"]["font face"]
if self.widget:
self.widget.config(font=(self.font, self.font_size, self.font_style))
self._update_layer()
return self
def set_font_size(self, font_size, set_initial_font_size=True):
if isinstance(font_size, int):
self.font_size = font_size
elif font_size in STYLES[self.style].keys():
self.font_size = STYLES[self.style][font_size]
else:
if "font size" in STYLES[self.style].keys():
self.font_size = STYLES[self.style]["font size"]
else:
self.font_size = STYLES["default"]["font size"]
if set_initial_font_size:
self.initial_font_size = round(font_size)
if self.widget:
self.widget.config(font=(self.font, self.font_size, self.font_style))
self._update_layer()
return self
def set_font_style(self, font_style):
if font_style in STYLES[self.style].keys():
self.font_style = STYLES[self.style][font_style]
elif isinstance(font_style, str):
self.font_style = font_style
else:
if "font style" in STYLES[self.style].keys():
self.font_style = STYLES[self.style]["font style"]
else:
self.font_style = STYLES["default"]["font style"]
if self.widget:
self.widget.config(font=(self.font, self.font_size, self.font_style))
self._update_layer()
return self
def set_text_colour(self, colour):
self.font_colour = colour
if self.widget:
self.widget.config(fg=colour)
self._update_layer()
return self
def set_text(self, text):
self.text = str(text)
if self.widget:
self.widget.delete(1.0, tkEND)
self.widget.insert(tkEND, self.text)
return self
|
1631721
|
import sys
from urlparse import urlparse
from urllib import unquote
def compare(a, b):
return (
a.scheme.lower() == b.scheme.lower()
and a.hostname == b.hostname
and (a.port or 80) == (b.port or 80)
and unquote(a.path) == unquote(b.path)
)
test_cases = open(sys.argv[1], "r")
for test in test_cases:
print compare(*map(urlparse, test.strip().split(";")))
test_cases.close()
|
1631722
|
from .core import *
from .flat_estimators import *
from .preprocessing import *
from .validation import *
from .verification import *
from .estimators import *
from .tests import NMME_IMD_ISMR
import warnings
from pathlib import Path
__version__ = "0.5.0"
__licence__ = "MIT"
__author__ = "<NAME> (<EMAIL>)"
|
1631734
|
import unittest
from kafka.tools.protocol.requests import ArgumentError
from kafka.tools.protocol.requests.list_offset_v2 import ListOffsetV2Request
class ListOffsetV1RequestTest(unittest.TestCase):
def test_process_arguments(self):
val = ListOffsetV2Request.process_arguments(['-1', 'false', 'topicname', '4,2', 'nexttopic', '9,3'])
assert val == {'replica_id': -1,
'isolation_level': 0,
'topics': [{'topic': 'topicname', 'partitions': [{'partition': 4, 'timestamp': 2}]},
{'topic': 'nexttopic', 'partitions': [{'partition': 9, 'timestamp': 3}]}]}
def test_process_arguments_nonnumeric(self):
self.assertRaises(ArgumentError, ListOffsetV2Request.process_arguments, ['foo', 'true', 'topicname', '4,2'])
def test_process_arguments_nonbool(self):
self.assertRaises(ArgumentError, ListOffsetV2Request.process_arguments, ['-1', 'notboolean', 'topicname', '4,2'])
def test_process_arguments_notenough(self):
self.assertRaises(ArgumentError, ListOffsetV2Request.process_arguments, ['-1', 'true', 'topicname'])
|
1631744
|
words_api = WordsApi(client_id = '####-####-####-####-####', client_secret = '##################')
file_name = 'test_doc.docx'
# Calls AcceptAllRevisionsOnline method for document in cloud.
request_document = open(file_name, 'rb')
request = asposewordscloud.models.requests.AcceptAllRevisionsOnlineRequest(document=request_document)
accept_all_revisions_online_result = words_api.accept_all_revisions_online(request)
copyfile(accept_all_revisions_online_result.document, 'test_result.docx')
|
1631848
|
import warnings
from django.test.utils import get_warnings_state, restore_warnings_state
from regressiontests.comment_tests.tests import CommentTestCase
class CommentFeedTests(CommentTestCase):
urls = 'regressiontests.comment_tests.urls'
feed_url = '/rss/comments/'
def test_feed(self):
response = self.client.get(self.feed_url)
self.assertEquals(response.status_code, 200)
self.assertEquals(response['Content-Type'], 'application/rss+xml')
self.assertContains(response, '<rss xmlns:atom="http://www.w3.org/2005/Atom" version="2.0">')
self.assertContains(response, '<title>example.com comments</title>')
self.assertContains(response, '<link>http://example.com/</link>')
self.assertContains(response, '</rss>')
class LegacyCommentFeedTests(CommentFeedTests):
feed_url = '/rss/legacy/comments/'
def setUp(self):
self._warnings_state = get_warnings_state()
warnings.filterwarnings("ignore", category=DeprecationWarning,
module='django.contrib.syndication.views')
warnings.filterwarnings("ignore", category=DeprecationWarning,
module='django.contrib.syndication.feeds')
def tearDown(self):
restore_warnings_state(self._warnings_state)
|
1631860
|
import dash_core_components as dcc
import dash_html_components as html
from app.dashes.components import areasDropdown, collapseExpand, enterprisesDropdown, sitesDropdown, tagsDropdown, timeRangePicker
layout = html.Div(children =
[
html.Div(id = "loadingDiv", className = "text-center", children = [html.Img(src = "/static/images/loading.gif")]),
dcc.Location(id = "url"),
dcc.Interval(id = "interval", n_intervals = 0, disabled = True),
html.Div(id = "dashDiv", style = {"display": "none"}, children =
[
timeRangePicker.layout(),
html.Br(),
collapseExpand.layout(
[
enterprisesDropdown.layout(),
sitesDropdown.layout(),
areasDropdown.layout(),
tagsDropdown.layout()
]),
html.Div(children = [dcc.Graph(id = "graph")])
])
])
|
1631864
|
from __future__ import division
import os,time,cv2
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
def lrelu(x):
return tf.maximum(x*0.2,x)
def identity_initializer():
def _initializer(shape, dtype=tf.float32, partition_info=None):
array = np.zeros(shape, dtype=float)
cx, cy = shape[0]//2, shape[1]//2
for i in range(shape[2]):
array[cx, cy, i, i] = 1
return tf.constant(array, dtype=dtype)
return _initializer
def nm(x):
w0=tf.Variable(1.0,name='w0')
w1=tf.Variable(0.0,name='w1')
return w0*x+w1*slim.batch_norm(x)
def build(input):
net=slim.conv2d(input,32,[3,3],rate=1,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv1')
net=slim.conv2d(net,32,[3,3],rate=2,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv2')
net=slim.conv2d(net,32,[3,3],rate=4,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv3')
net=slim.conv2d(net,32,[3,3],rate=8,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv4')
net=slim.conv2d(net,32,[3,3],rate=16,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv5')
net=slim.conv2d(net,32,[3,3],rate=32,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv6')
net=slim.conv2d(net,32,[3,3],rate=64,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv7')
net=slim.conv2d(net,32,[3,3],rate=128,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv8')
net=slim.conv2d(net,32,[3,3],rate=1,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv9')
net=slim.conv2d(net,3,[1,1],rate=1,activation_fn=None,scope='g_conv_last')
return net
def prepare_data():
input_names=[]
hyper_names=[]
output_names=[]
finetune_input_names=[]
finetune_output_names=[]
finetune_hyper_names=[]
val_names=[]
val_hyper_names=[]
for dirname in ['MIT-Adobe_train_480p']:#training images at 480p
for i in range(1,2501):
input_names.append("../data/%s/%06d.png"%(dirname,i))
hyper_names.append("../original_results/L0_smoothing_parameterized/%s/%06d.txt"%(dirname,i))#a single parameter in the txt
output_names.append("../original_results/L0_smoothing_parameterized/%s/%06d.png"%(dirname,i))
for dirname in ['MIT-Adobe_train_random']:#test images at random resolutions
for i in range(1,2501):
finetune_input_names.append("../data/%s/%06d.png"%(dirname,i))
finetune_hyper_names.append("../original_results/L0_smoothing_parameterized/%s/%06d.txt" % (dirname, i))#a single parameter in the txt
finetune_output_names.append("../original_results/L0_smoothing_parameterized/%s/%06d.png"%(dirname,i))
for dirname in ['MIT-Adobe_test_1080p']:#test images at 1080p
for i in range(1,2501):
val_names.append("../data/%s/%06d.png"%(dirname,i))
val_hyper_names.append("../original_results/L0_smoothing_parameterized/%s/%06d.txt"%(dirname,i))#a single parameter in the txt
return input_names,hyper_names,output_names,val_names,val_hyper_names,finetune_input_names,finetune_output_names,finetune_hyper_names
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
os.environ['CUDA_VISIBLE_DEVICES']=str(np.argmax([int(x.split()[2]) for x in open('tmp','r').readlines()]))
os.system('rm tmp')
sess=tf.Session()
is_training=False
input_names,hyper_names,output_names,val_names,val_hyper_names,finetune_input_names,finetune_output_names,finetune_hyper_names=prepare_data()
input=tf.placeholder(tf.float32,shape=[None,None,None,4])
output=tf.placeholder(tf.float32,shape=[None,None,None,3])
network=build(input)
loss=tf.reduce_mean(tf.square(network-output))
opt=tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss,var_list=[var for var in tf.trainable_variables() if var.name.startswith('g_')])
saver=tf.train.Saver(max_to_keep=1000)
sess.run(tf.global_variables_initializer())
ckpt=tf.train.get_checkpoint_state("result_parameterized")
if ckpt:
print('loaded '+ckpt.model_checkpoint_path)
saver.restore(sess,ckpt.model_checkpoint_path)
if is_training:
all=np.zeros(3000, dtype=float)
for epoch in range(1,181):
if epoch==1 or epoch==151:
input_images=[None]*len(input_names)
output_images=[None]*len(input_names)
hyper_parameters=[None]*len(input_names)
if os.path.isdir("result_parameterized/%04d"%epoch):
continue
cnt=0
for id in np.random.permutation(len(input_names)):
st=time.time()
if input_images[id] is None:
input_images[id]=np.expand_dims(np.float32(cv2.imread(input_names[id] if epoch<=150 else finetune_input_names[id],-1)),axis=0)/255.0
output_images[id]=np.expand_dims(np.float32(cv2.imread(output_names[id] if epoch<=150 else finetune_output_names[id],-1)),axis=0)/255.0
hyper_parameters[id]=np.tile(float(open(hyper_names[id] if epoch<=150 else finetune_hyper_names[id],'r').readline()),(1,input_images[id].shape[1],input_images[id].shape[2],1))
_,current=sess.run([opt,loss],feed_dict={input:np.concatenate((input_images[id],hyper_parameters[id]),axis=3),output:output_images[id]})
all[id]=current*255.0*255.0
cnt+=1
print("%d %d %.2f %.2f %.2f %s"%(epoch,cnt,current*255.0*255.0,np.mean(all[np.where(all)]),time.time()-st,os.getcwd().split('/')[-2]))
os.makedirs("result_parameterized/%04d"%epoch)
target=open("result_parameterized/%04d/score.txt"%epoch,'w')
target.write("%f"%np.mean(all[np.where(all)]))
target.close()
saver.save(sess,"result_parameterized/model.ckpt")
saver.save(sess,"result_parameterized/%04d/model.ckpt"%epoch)
for ind in range(10):
input_image=np.expand_dims(np.float32(cv2.imread(val_names[ind],-1)),axis=0)/255.0
hyper_parameter=np.tile(float(open(val_hyper_names[ind],'r').readline()),(1,input_image.shape[1],input_image.shape[2],1))
st=time.time()
output_image=sess.run(network,feed_dict={input:np.concatenate((input_image,hyper_parameter),axis=3)})
print("%.3f"%(time.time()-st))
output_image=np.minimum(np.maximum(output_image,0.0),1.0)*255.0
cv2.imwrite("result_parameterized/%04d/%06d.png"%(epoch,ind+1),np.uint8(output_image[0,:,:,:]))
if not os.path.isdir("result_parameterized/video"):
os.makedirs("result_parameterized/video")
input_image=np.expand_dims(np.float32(cv2.imread(val_names[884],-1)),axis=0)/255.0
cnt=0
for k in range(2,201):
hyper_parameter=np.tile(k/200.0,(1,input_image.shape[1],input_image.shape[2],1))
output_image=sess.run(network,feed_dict={input:np.concatenate((input_image,hyper_parameter),axis=3)})
output_image=np.minimum(np.maximum(output_image,0.0),1.0)*255.0
cnt+=1
cv2.imwrite("result_parameterized/video/%06d.png"%cnt,np.uint8(output_image[0,:,:,:]))
exit()
if not os.path.isdir("result_parameterized/MIT-Adobe_test_1080p"):
os.makedirs("result_parameterized/MIT-Adobe_test_1080p")
for ind in range(len(val_names)):
input_image=np.expand_dims(np.float32(cv2.imread(val_names[ind],-1)),axis=0)/255.0
hyper_parameter=np.tile(float(open(val_hyper_names[ind], 'r').readline()),(1,input_image.shape[1],input_image.shape[2],1))
st=time.time()
output_image=sess.run(network,feed_dict={input:np.concatenate((input_image,hyper_parameter),axis=3)})
print("%.3f"%(time.time()-st))
output_image=np.minimum(np.maximum(output_image,0.0),1.0)*255.0
cv2.imwrite("result_parameterized/MIT-Adobe_test_1080p/%06d.png"%(ind+1),np.uint8(output_image[0,:,:,:]))
|
1631869
|
import logging
import subprocess
from typing import Mapping
from .action import Action
logger = logging.getLogger(__name__)
class Shell(Action):
"""
Executes a shell command
:param str cmd: The command to execute.
Example:
- (macOS) Open all pdfs on your desktop:
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: '~/Desktop'
filters:
- extension: pdf
actions:
- shell: 'open "{path}"'
"""
def __init__(self, cmd: str) -> None:
self.cmd = cmd
def pipeline(self, args: Mapping) -> None:
full_cmd = self.fill_template_tags(self.cmd, args)
self.print("$ %s" % full_cmd)
if not args["simulate"]:
# we use call instead of run to be compatible with python < 3.5
logger.info('Executing command "%s" in shell.', full_cmd)
subprocess.call(full_cmd, shell=True)
def __str__(self) -> str:
return 'Shell(cmd="%s")' % self.cmd
|
1631871
|
import numpy, os
import tensorflow as tf
from phi.tf.util import *
from phi.control.sequences import *
from phi.control.nets.force.forcenets import forcenet2d_3x_16 as forcenet
def ik(initial_density, target_density, trainable=False):
# conv = conv_function("model", "model/smokeik/sim_000301/checkpoint_00014802/model.ckpt") # 64x64
# conv = conv_function("model", "model/smokeik/sim_000430/checkpoint_00005747/model.ckpt") # 128x128
with tf.variable_scope("ik"):
vec_pot = ik_resnet(initial_density, target_density, trainable=trainable, training=False, reuse=tf.AUTO_REUSE)
with tf.variable_scope("curl"):
velocity = vec_pot.curl()
velocity = velocity.pad(0, 1, "symmetric")
return velocity, vec_pot
def ik_resnet(initial_density, target_density, training=False, trainable=True, reuse=tf.AUTO_REUSE):
y = tf.concat([initial_density, target_density], axis=-1)
y = tf.pad(y, [[0,0], [0,1+2+4+4], [0,1+2+4+4], [0,0]])
resolutions = [ y ]
for i in range(1,4): # 1/2, 1/4, 1/8
y = tf.layers.conv2d(resolutions[0], 16, 2, strides=2, activation=tf.nn.relu, padding="valid", name="downconv_%d"%i, trainable=trainable, reuse=reuse)
for j, nb_channels in enumerate([16, 16, 16]):
y = residual_block(y, nb_channels, name="downrb_%d_%d" % (i,j), training=training, trainable=trainable, reuse=reuse)
resolutions.insert(0, y)
y = tf.layers.conv2d(y, 16, 2, 1, activation=tf.nn.relu, padding="valid", name="centerconv_1", trainable=trainable, reuse=reuse)
for j, nb_channels in enumerate([16, 16, 16]):
y = residual_block(y, nb_channels, name="centerrb_%d" % j, training=training, trainable=trainable, reuse=reuse)
for i in range(1, len(resolutions)):
y = upsample2x(y)
res_in = resolutions[i][:, 0:y.shape[1], 0:y.shape[2], :]
y = tf.concat([y, res_in], axis=-1)
if i < len(resolutions)-1:
y = tf.pad(y, [[0, 0], [0, 1], [0, 1], [0, 0]], mode="SYMMETRIC")
y = tf.layers.conv2d(y, 16, 2, 1, activation=tf.nn.relu, padding="valid", name="upconv_%d" % i, trainable=trainable, reuse=reuse)
for j, nb_channels in enumerate([16, 16, 16]):
y = residual_block(y, nb_channels, 2, name="uprb_%d_%d" % (i, j), training=training, trainable=trainable, reuse=reuse)
else:
# Last iteration
y = tf.pad(y, [[0,0], [0,1], [0,1], [0,0]], mode="SYMMETRIC")
y = tf.layers.conv2d(y, 1, 2, 1, activation=None, padding="valid", name="upconv_%d"%i, trainable=trainable, reuse=reuse)
return StaggeredGrid(y) # This is the vector potential
def sm_resnet(initial_density, target_density, training=False, trainable=True, reuse=tf.AUTO_REUSE):
y = tf.concat([initial_density, target_density], axis=-1)
downres_padding = sum([2**i for i in range(5)])
y = tf.pad(y, [[0,0], [0,downres_padding], [0,downres_padding], [0,0]])
resolutions = [ y ]
for i, filters in enumerate([4, 8, 16, 16, 16]):
y = tf.layers.conv2d(resolutions[0], filters, 2, strides=2, activation=tf.nn.relu, padding="valid", name="downconv_%d"%i, trainable=trainable, reuse=reuse)
for j in range(2):
y = residual_block(y, filters, name="downrb_%d_%d" % (i,j), training=training, trainable=trainable, reuse=reuse)
resolutions.insert(0, y)
for j, nb_channels in enumerate([16, 16, 16]):
y = residual_block(y, nb_channels, name="centerrb_%d" % j, training=training, trainable=trainable, reuse=reuse)
for i, resolution_data in enumerate(resolutions[1:]):
y = upsample2x(y)
res_in = resolution_data[:, 0:y.shape[1], 0:y.shape[2], :]
y = tf.concat([y, res_in], axis=-1)
if i < len(resolutions)-2:
y = tf.pad(y, [[0, 0], [0, 1], [0, 1], [0, 0]], mode="SYMMETRIC")
y = tf.layers.conv2d(y, 16, 2, 1, activation=tf.nn.relu, padding="valid", name="upconv_%d" % i, trainable=trainable, reuse=reuse)
for j, nb_channels in enumerate([16, 16]):
y = residual_block(y, nb_channels, 2, name="uprb_%d_%d" % (i, j), training=training, trainable=trainable, reuse=reuse)
else:
# Last iteration
y = tf.pad(y, [[0,0], [0,1], [0,1], [0,0]], mode="SYMMETRIC")
y = tf.layers.conv2d(y, 1, 2, 1, activation=None, padding="valid", name="upconv_%d"%i, trainable=trainable, reuse=reuse)
return y
class GraphBuilder(PartitioningExecutor):
def __init__(self, sim, true_densities, trainable_n, info, force_inference, ik_trainable=False):
self.sim = sim
self.true_densities = true_densities
self.trainable_n = trainable_n
self.info = info
self.force_inference = force_inference
self.ik_trainable = ik_trainable
def create_frame(self, index, step_count):
frame = PartitioningExecutor.create_frame(self, index, step_count)
frame.true = self.true_densities[index]
frame.pred = []
frame.real = None
frame.force = None
frame.prev_force = None
frame.jerk = None
frame.density = None
frame.velocity = None
frame.prev_velocity = None
if index == 0:
frame.pred = [ frame.true ]
frame.real = frame.true
frame.density = frame.true
elif index == step_count:
frame.pred = [ frame.true ]
frame.density = frame.true
return frame
def run_sm(self, n, initial_density, target_density):
with tf.variable_scope("sm%d" % n):
return sm_resnet(initial_density, target_density, trainable=n in self.trainable_n)
def run_ik(self, initial_density, target_density):
return ik(initial_density, target_density, trainable=self.ik_trainable)
def run_advect(self, velocity, density):
return velocity.advect(density)
def run_force(self, initial_velocity, target_velocity, initial_density, real_target_density):
if self.force_inference == "forcenet":
force, self.forcenet_path = forcenet(initial_density, initial_velocity, target_velocity)
else:
next_velocity = initial_velocity.advect(initial_velocity) + self.sim.buoyancy(real_target_density)
if self.force_inference == "exact":
next_velocity = self.sim.divergence_free(next_velocity)
force = target_velocity - next_velocity
return force
def run_jerk(self, initial_velocity, initial_force, next_force):
advected_force = initial_velocity.advect(initial_force)
return next_force - advected_force
def partition(self, n, initial_frame, target_frame, center_frame):
PartitioningExecutor.partition(self, n, initial_frame, target_frame, center_frame)
center_frame.density = self.run_sm(n, initial_frame.density, target_frame.density)
center_frame.pred.append(center_frame.density)
def execute_step(self, initial_frame, target_frame):
PartitioningExecutor.execute_step(self, initial_frame, target_frame)
initial_frame.velocity, initial_frame.vec_pot = target_frame.prev_velocity, _ = self.run_ik(initial_frame.real, target_frame.pred[-1])
target_frame.real = target_frame.density = self.run_advect(initial_frame.velocity, initial_frame.real)
if initial_frame.prev_velocity is not None:
initial_frame.force = self.run_force(initial_frame.prev_velocity, initial_frame.velocity, initial_frame.real, target_frame.real)
target_frame.prev_force = initial_frame.force
if initial_frame.prev_force is not None:
initial_frame.jerk = self.run_jerk(initial_frame.prev_velocity, initial_frame.prev_force, initial_frame.force)
def load_checkpoints(self, max_n, checkpoint_dict, preload_n):
# Force
if self.force_inference == "forcenet":
self.info("Loading ForceNet checkpoint from %s..." % self.forcenet_path)
self.sim.restore(self.forcenet_path, scope="ForceNet")
# IK
ik_checkpoint = os.path.expanduser(checkpoint_dict["IK"])
self.info("Loading IK checkpoint from %s..." % ik_checkpoint)
self.sim.restore(ik_checkpoint, scope="ik")
# SM
n = 2
while n <= max_n:
if n == max_n and not preload_n: return
checkpoint_path = None
i = n
while not checkpoint_path:
if "SM%d"%i in checkpoint_dict:
checkpoint_path = os.path.expanduser(checkpoint_dict["SM%d"%i])
else:
i //= 2
if i == n:
self.info("Loading SM%d checkpoint from %s..." % (n, checkpoint_path))
self.sim.restore(checkpoint_path, scope="sm%d" % n)
else:
self.info("Loading SM%d weights from SM%d checkpoint from %s..." % (n, i, checkpoint_path))
self.sim.restore_new_scope(checkpoint_path, "sm%d" % i, "sm%d" % n)
n *= 2
def load_all_from(self, max_n, ik_checkpoint, sm_checkpoint, sm_n):
# IK
self.info("Loading IK checkpoint from %s..." % ik_checkpoint)
self.sim.restore(ik_checkpoint, scope="ik")
# SM
n = 2
while n <= max_n:
source_n = sm_n(n) if callable(sm_n) else sm_n
self.info("Loading SM%d weights from SM%d checkpoint from %s..." % (n, source_n, sm_checkpoint))
self.sim.restore_new_scope(sm_checkpoint, "sm%d" % source_n, "sm%d" % n)
n *= 2
def lookup(self, array):
return array
class EagerExecutor(GraphBuilder):
def __init__(self, sim, true_densities, info, force_inference):
GraphBuilder.__init__(self, sim, true_densities, [], info, force_inference)
self.initial_density = self.sim.placeholder(name="initial_density")
self.target_density = self.sim.placeholder(name="target_density")
self.initial_velocity = self.sim.placeholder("velocity", name="initial_velocity")
self.target_velocity = self.sim.placeholder("velocity", name="target_velocity")
info("Building IK graph...")
self.ik_out_velocity, self.ik_out_vec_pot = ik(self.initial_density, self.target_density)
info("Building force graph...")
self.force_out, self.forcenet_path = forcenet(self.initial_density, self.initial_velocity, self.target_velocity)
self.sm_out_by_n = {}
n = 2
while n <= len(true_densities):
self.info("Building SM%d graph..."%n)
self.sm_out_by_n[n] = GraphBuilder.run_sm(self, n, self.initial_density, self.target_density)
n *= 2
self.feed = {}
def lookup(self, array):
if isinstance(array, numpy.ndarray):
return array
else:
return self.feed[array]
def run_ik(self, initial_density, target_density):
self.feed[self.initial_density] = self.lookup(initial_density)
self.feed[self.target_density] = self.lookup(target_density)
result = self.sim.run([self.ik_out_velocity, self.ik_out_vec_pot], feed_dict=self.feed)
return result
def run_sm(self, n, initial_density, target_density):
self.feed[self.initial_density] = self.lookup(initial_density)
self.feed[self.target_density] = self.lookup(target_density)
result = self.sim.run(self.sm_out_by_n[n], feed_dict=self.feed)
return result
def run_advect(self, velocity, density):
if not isinstance(density, numpy.ndarray):
density = self.feed[density]
if not isinstance(velocity.staggered, numpy.ndarray):
velocity = self.feed[velocity]
result = velocity.advect(density)
return result
# def run_force(self, initial_velocity, target_velocity, initial_density, real_target_density):
# self.feed[self.initial_density] = self.lookup(initial_density)
# self.feed[self.initial_velocity] = self.lookup(initial_velocity)
# self.feed[self.target_velocity] = self.lookup(target_velocity)
# return self.sim.run(self.force_out, feed_dict=self.feed)
def set_dict(self, feed_dict):
self.feed = feed_dict
def get_divide_strategy(name):
if name == "adaptive":
return AdaptivePlanSequence
elif name == "binary":
return TreeSequence
else:
raise ValueError("unknown divide strategy: %s" % name)
class MultiShapeEagerExecutor(EagerExecutor):
def __init__(self, sim, true_densities, info, force_inference):
GraphBuilder.__init__(self, sim, true_densities, [], info, force_inference)
self.initial_density = self.sim.placeholder(name="initial_density")
self.target_density = self.sim.placeholder(name="target_density")
self.initial_velocity = self.sim.placeholder("velocity", name="initial_velocity")
self.target_velocity = self.sim.placeholder("velocity", name="target_velocity")
info("Building single-batch IK graph...")
initial_sum = math.expand_dims(math.sum(self.initial_density, axis=0), axis=0)
target_sum = math.expand_dims(math.sum(self.target_density, axis=0), axis=0)
ik_single_batch = ik(initial_sum, target_sum)
ik_single_batch_vel = ik_single_batch[0].staggered
ik_single_batch_vec_pot = ik_single_batch[1].staggered
ik_vel_tiled = math.tile(ik_single_batch_vel, [math.shape(self.initial_density)[0]]+[1]*(len(initial_sum.shape)-1))
ik_vec_pot_tiled = math.tile(ik_single_batch_vec_pot, [math.shape(self.initial_density)[0]]+[1]*(len(initial_sum.shape)-1))
self.ik_out_velocity = StaggeredGrid(ik_vel_tiled)
self.ik_out_vec_pot = StaggeredGrid(ik_vec_pot_tiled)
info("Building force graph...")
self.force_out, self.forcenet_path = forcenet(self.initial_density, self.initial_velocity, self.target_velocity)
self.sm_out_by_n = {}
n = 2
while n <= len(true_densities):
self.info("Building SM%d graph..."%n)
self.sm_out_by_n[n] = GraphBuilder.run_sm(self, n, self.initial_density, self.target_density)
n *= 2
self.feed = {}
def set_dict(self, feed_dict):
self.feed = feed_dict
|
1631878
|
import os
class Registry:
def __init__(self, host, username, password):
self.cmd = 'net rpc registry -S %s -U "%s%%%s"' % (host, username, password)
def call(self, cmd, *args):
args = '"' + '" "'.join(args) + '"'
shell = ' '.join((self.cmd, cmd, args))
n, out, err = os.popen3(shell)
return out.read()
def join(self, *args):
return '\\'.join(args)
def enum(self, *base):
base = self.join(*base)
if base.count('HKEY_USERS') > 2: raise
lines = self.call('enumerate', base)
data = {}
key = ''
cur = {}
for line in lines.split('\n'):
if not line:
if key: data[key] = cur
key = ''
cur = {}
continue
typ, value = line.strip().split('=', 1)
typ, value = typ.strip(), value.strip()
if typ == 'Keyname':
key = value
elif typ == 'Valuename':
if value == '':
value = '@'
key = value
cur[typ] = value
if key: data[key] = cur
return data
def list(self, *base):
return list(self.enum(*base))
def get(self, key, name):
return self.enum(key)[name]
def set(self, key, name, typ, *values):
return self.call('setvalue', key, name, typ, *values)
def delete(self, key, name):
lines = self.call('deletevalue', key, name)
return lines
def walk(self, base):
tree = {}
keys = self.enum(base)
for key in keys:
if not 'Value' in keys[key]:
tree[key] = self.walk(base, key)
return tree
if __name__ == '__main__':
host = ''
username = ''
password = ''
reg = Registry(host, username, password)
for user in reg.enum('HKEY_USERS'):
try:
run = reg.join('HKEY_USERS', user, 'Software\\Microsoft\\Windows\\CurrentVersion\\Run')
l = reg.list(run)
if l:
print l
print reg.get(run, l[0])['Value']
print reg.enum(run)
except KeyError:
continue
|
1631893
|
import logging
from django.utils import timezone
from elasticsearch import Elasticsearch, NotFoundError, RequestError
from zentral.core.exceptions import ImproperlyConfigured
from .base import BaseExporter
logger = logging.getLogger("zentral.contrib.inventory.exporters.es_machine_snapshots")
MAX_EXPORTS_COUNT = 3
ES_ALIAS = "zentral-inventory-export-machine-snapshots"
ES_TEMPLATE_NAME = ES_ALIAS
ES_INDEX_PATTERN = '{}-*'.format(ES_ALIAS)
ES_TEMPLATE = {
'index_patterns': [ES_INDEX_PATTERN],
'settings': {'number_of_shards': 1,
'number_of_replicas': 0},
'mappings': {'date_detection': False,
'dynamic_templates': [{'strings_as_keyword': {'mapping': {'ignore_above': 1024,
'type': 'keyword'},
'match_mapping_type': 'string'}}],
'properties': {'@timestamp': {'type': 'date'},
'tags': {'ignore_above': 1024,
'type': 'keyword'}}}
}
class InventoryExporter(BaseExporter):
name = "elasticsearch machine snapshots exporter"
def __init__(self, config_g):
super().__init__(config_g)
error_msgs = []
self.es_hosts = config_g["es_hosts"]
if not self.es_hosts:
error_msgs.append("Missing es_hosts")
if not isinstance(self.es_hosts, list):
error_msgs.append("es_hosts must be a list")
if error_msgs:
raise ImproperlyConfigured("{} in {}".format(", ".join(error_msgs), self.name))
def iter_machine_snapshots(self):
for serial_number, machine_snapshots in self.get_ms_query().fetch(paginate=False, for_filtering=True):
for machine_snapshot in machine_snapshots:
yield machine_snapshot
def get_es_client(self):
self._es = Elasticsearch(hosts=self.es_hosts)
self._es_version = [int(i) for i in self._es.info()["version"]["number"].split(".")]
# template
template_body = ES_TEMPLATE
if self._es_version < [7]:
template_body["mappings"] = {"_doc": template_body.pop("mappings")}
self._es.indices.put_template(ES_TEMPLATE_NAME, template_body)
# create index
for i in range(10):
existing_indices = self._es.indices.get(ES_INDEX_PATTERN).keys()
if not len(existing_indices):
next_id = 0
else:
next_id = max(int(index.rsplit("-", 1)[-1]) for index in existing_indices) + 1
index_name = ES_INDEX_PATTERN.replace("*", "{:08d}".format(next_id))
try:
self._es.indices.create(index_name)
except RequestError:
# probably race
pass
else:
# move alias
update_aliases_body = {
"actions": [
{"add": {"index": index_name, "alias": ES_ALIAS}}
]
}
try:
old_indices = self._es.indices.get_alias(ES_ALIAS)
except NotFoundError:
old_indices = []
for old_index in old_indices:
if old_index != index_name:
update_aliases_body["actions"].append(
{"remove": {"index": old_index, "alias": ES_ALIAS}}
)
self._es.indices.update_aliases(update_aliases_body)
return index_name
def index_snapshot(self, index_name, machine_snapshot):
doc_id = "{}.{}".format(machine_snapshot["serial_number"], machine_snapshot["source"]["id"])
self._es.create(index_name, doc_id, machine_snapshot)
def prune_exports(self):
existing_indices = sorted(self._es.indices.get(ES_INDEX_PATTERN).keys(), reverse=True)
for index_name in existing_indices[MAX_EXPORTS_COUNT:]:
self._es.indices.delete(index_name)
logger.info("Removed '%s' index", index_name)
def run(self):
timestamp = timezone.now().isoformat()
index_name = self.get_es_client()
logger.info("Created '%s' index", index_name)
i = 0
for machine_snapshot in self.iter_machine_snapshots():
machine_snapshot["@timestamp"] = timestamp
self.index_snapshot(index_name, machine_snapshot)
i += 1
logger.info("Added %s machine snapshot(s)", i)
self.prune_exports()
|
1631919
|
import glob
from pyPanair.preprocess import read_wgs
if __name__ == '__main__':
print("converting LaWGS to stl")
wgs_list = glob.glob("*.wgs")
for w in wgs_list:
wgs = read_wgs(w)
wgs.create_stl(w.replace(".wgs", ".stl"))
print("success!")
|
1631959
|
import sys
import fileinput
import argparse
import time
import itertools
import pickle
import random
import codecs
from collections import defaultdict
from sklearn import svm
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
from scipy.sparse import coo_matrix, hstack, vstack
import numpy as np
from ClassifierStuff import *
from SentenceModel import *
def findEventTrigger(sentenceData,triggerid):
for sentenceid, sentence in enumerate(sentenceData):
if triggerid in sentence.predictedEntityLocs:
return sentenceid,sentence.predictedEntityLocs[triggerid]
raise RuntimeError('Unable to find location of event trigger ID ('+str(triggerid)+') in sentences')
def findArgumentTrigger(sentenceData,triggerid):
for sentenceid, sentence in enumerate(sentenceData):
if triggerid in sentence.knownEntityLocs:
return sentenceid,sentence.knownEntityLocs[triggerid]
raise RuntimeError('Unable to find location of argument trigger ID ('+str(triggerid)+') in sentences')
def findTrigger(sentenceData,triggerid):
for sentenceid, sentence in enumerate(sentenceData):
if triggerid in sentence.predictedEntityLocs:
return sentenceid,sentence.predictedEntityLocs[triggerid]
if triggerid in sentence.knownEntityLocs:
return sentenceid,sentence.knownEntityLocs[triggerid]
raise RuntimeError('Unable to find location of trigger ID ('+str(triggerid)+') in sentences')
def generateRelationExamples(sentenceAndEventData,targetRelations,targetArguments,sentenceRange,doFiltering):
examples = []
classes = []
relTypes = []
for filename in sentenceAndEventData:
#print filename
(sentenceData,relations,modifiers) = sentenceAndEventData[filename]
positiveRelations = {}
positiveRelationsProcessed = []
for (relName,id1,id2) in relations:
sentenceid1,locs1 = findTrigger(sentenceData,id1)
sentenceid2,locs2 = findTrigger(sentenceData,id2)
type1 = sentenceData[sentenceid1].locsToTriggerTypes[tuple(locs1)]
type2 = sentenceData[sentenceid2].locsToTriggerTypes[tuple(locs2)]
#if sentenceid1 != sentenceid2:
# print "WARNING: Relation split across sentences (%s and %s)" % (id1,id2)
# continue
#sentenceid = sentenceid1
#print "POSITIVE", relName, type1, type2
#key = (relName,type1,type2)
#key = relName
#print relName
if not relName in targetRelations:
continue
key = (sentenceid1,tuple(locs1),sentenceid2,tuple(locs2))
classid = targetRelations[relName]
positiveRelations[key] = classid
#positiveRelations[key] = True
# Now we go through all sentences and create examples for all possible token combinations
# Then check if any are already marked as positive and add to the appropriate list of examples
for sentenceid1 in range(len(sentenceData)):
for sentenceid2 in range(max(sentenceid1-sentenceRange,0),min(sentenceid1+sentenceRange+1,len(sentenceData))):
#print sentenceid1,sentenceid2
sentence1,sentence2 = sentenceData[sentenceid1],sentenceData[sentenceid2]
eventLocsAndTypes1 = [ (sentence1.predictedEntityLocs[id],sentence1.predictedEntityTypes[id]) for id in sentence1.predictedEntityTypes ]
argsLocsAndTypes1 = [ (sentence1.knownEntityLocs[id],sentence1.knownEntityTypes[id]) for id in sentence1.knownEntityTypes ]
possibleLocsAndTypes1 = eventLocsAndTypes1 + argsLocsAndTypes1
eventLocsAndTypes2 = [ (sentence2.predictedEntityLocs[id],sentence2.predictedEntityTypes[id]) for id in sentence2.predictedEntityTypes ]
argsLocsAndTypes2 = [ (sentence2.knownEntityLocs[id],sentence2.knownEntityTypes[id]) for id in sentence2.knownEntityTypes ]
possibleLocsAndTypes2 = eventLocsAndTypes2 + argsLocsAndTypes2
for (locs1,type1),(locs2,type2) in itertools.product(possibleLocsAndTypes1,possibleLocsAndTypes2):
if sentenceid1 == sentenceid2 and locs1 == locs2:
continue
key = (type1,type2)
if doFiltering and not key in targetArguments:
continue
#print "POTENTIAL", type1, type2
key = (sentenceid1,tuple(locs1),sentenceid2,tuple(locs2))
example = Example(filename, sentenceData, arg1_sentenceid=sentenceid1, arg1_locs=locs1, arg2_sentenceid=sentenceid2, arg2_locs=locs2)
examples.append(example)
thisClass = 0
if key in positiveRelations:
thisClass = positiveRelations[key]
#thisClass = 1
positiveRelationsProcessed.append(key)
classes.append(thisClass)
relTypes.append((type1,type2))
#print filename
for key in positiveRelations:
#assert key in allArgTriggerLocsProcessed, 'Unprocessed event trigger found: ' + str(key)
if not key in positiveRelationsProcessed:
print 'WARNING: Unprocessed argument trigger found: %s in file: %s' % (str(key), filename)
#for c,e in zip(classes,examples):
# print c,e
#sys.exit(0)
return classes, examples, relTypes
def createRelationClassifier(sentenceAndEventData,targetRelations,targetArguments,parameters=None,generateClassifier=True,sentenceRange=0,doFiltering=False):
classes,examples,relTypes = generateRelationExamples(sentenceAndEventData,targetRelations,targetArguments,sentenceRange,doFiltering)
assert min(classes) == 0, "Expecting negative cases in relation examples"
assert max(classes) > 0, "Expecting positive cases in relation examples"
#return buildClassifier(classes,examples,parameters)
vectors,vectorizer,featureSelector = buildVectorizer(classes,examples,parameters)
classifier = None
if generateClassifier:
classifier = buildClassifierFromVectors(classes,vectors,parameters)
data = (classes,examples,vectors,relTypes)
return data,vectorizer,featureSelector,classifier
def saveCOOMatrixToFile(matrix,filename):
with open(filename,'w') as f:
f.write("%%sparse\t%d\t%d\n" % (matrix.shape[0],matrix.shape[1]))
for row,col,data in zip(matrix.row,matrix.col,matrix.data):
line = "%d\t%d\t%f" % (row,col,data)
f.write(line + "\n")
def loadCOOMatrixFromFile(filename):
rows,cols = -1,-1
data = []
matrix = None
with open(filename,'r') as f:
header = True
for line in f:
if header:
_,r,c = line.split("\t")
rows,cols = int(r),int(c)
header = False
else:
x,y,value = line.split("\t")
x,y,value = int(x),int(y),float(value)
data.append((x,y,value))
xs = [ x for x,y,value in data ]
ys = [ y for x,y,value in data ]
vals = [ value for x,y,value in data ]
matrix = coo_matrix((vals,(xs,ys)),shape=(rows,cols))
return matrix
def loadNumpyArrayFromFile(filename):
return np.loadtxt(filename,comments='%')
def loadMatrixFromFile(filename):
with open(filename,'r') as f:
header = f.readline().strip().split("\t")
type,dim1,dim2 = header
#dim1,dim2 = int(dim1),int(dim2)
print filename, type
if type == "%sparse":
return loadCOOMatrixFromFile(filename)
elif type == "%dense":
return loadNumpyArrayFromFile(filename)
else:
raise RuntimeError("Unknown type of matrix: %s" % type)
# It's the main bit. Yay!
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='VERSE Relation Extraction tool')
parser.add_argument('--trainingFile', required=True, type=str, help='Parsed-text file containing the training data')
parser.add_argument('--testingFile', required=True, type=str, help='Parsed-text file containing the test data to predict modifications for')
parser.add_argument('--relationDescriptions', required=True, type=str, help='Description file containing list of relation types with arguments to predict')
parser.add_argument('--parameters', type=str, help='Parameters to use for feature construction, selection and classification')
parser.add_argument('--outFile', type=str, help='Output filename for data with predicted modifications')
args = parser.parse_args()
parameters = {}
if args.parameters:
for arg in args.parameters.split(';'):
name,value = arg.strip().split(":")
parameters[name.strip()] = value.strip()
sentenceRange = 0
if "sentenceRange" in parameters:
sentenceRange = int(parameters["sentenceRange"])
trainFilename = args.trainingFile
with open(trainFilename, 'r') as f:
trainingSentenceAndEventData = pickle.load(f)
print "Loaded " + trainFilename
tmpTargetRelations = set()
for filename,data in trainingSentenceAndEventData.iteritems():
sentenceData = data[0]
relations = data[1]
for (relName,id1,id2) in relations:
sentenceid1,locs1 = findTrigger(sentenceData,id1)
sentenceid2,locs2 = findTrigger(sentenceData,id2)
type1 = sentenceData[sentenceid1].locsToTriggerTypes[tuple(locs1)]
type2 = sentenceData[sentenceid2].locsToTriggerTypes[tuple(locs2)]
tmpTargetRelations.add((relName,type1,type2))
#print tmpTargetRelations
print "#"*30
for relName,type1,type2 in tmpTargetRelations:
print "%s\t%s\t%s" % (relName,type1,type2)
print "#"*30
doFiltering = False
if 'doFiltering' in parameters and parameters['doFiltering'] == 'True':
doFiltering = True
#targetRelations = []
targetRelations,targetArguments = set(),set()
#typeLookup = {}
with open(args.relationDescriptions,'r') as f:
for line in f:
nameAndArgs,type1,type2 = line.strip().split('\t')
# Pull out the name of arguments and sort by the argument names
nameAndArgsSplit = nameAndArgs.split(';')
# Basically don't do anything if we aren't given the argument names
if len(nameAndArgsSplit) == 1:
targetRelations.add(tuple(nameAndArgsSplit))
targetArguments.add((type1,type2))
else: # Or do sort by argument names (if they are provided)
relName,argName1,argName2 = nameAndArgs.split(';')
relArgs = [(argName1,type1),(argName2,type2)]
relArgs = sorted(relArgs)
targetRelations.add((relName,relArgs[0][0],relArgs[1][0]))
targetArguments.add((relArgs[0][1],relArgs[1][1]))
targetRelations = list(targetRelations)
targetRelations = sorted(targetRelations)
targetRelationsToIDs = { arg:i+1 for i,arg in enumerate(targetRelations) }
#print tionsToIDstargetEventsToIDs
#print targetRelationsToIDs
print "-"*30
for targetRelation in targetRelations:
print targetRelation
print "-"*30
for targetArgument in targetArguments:
print targetArgument
print "-"*30
relData,argVec,argFS,argClf = createRelationClassifier(trainingSentenceAndEventData,targetRelationsToIDs,targetArguments,parameters,True,sentenceRange,doFiltering)
with open(args.testingFile, 'r') as f:
testingSentenceAndEventData = pickle.load(f)
print "Loaded " + args.testingFile
# Empty the test data of any existing predictions (in case we load the wrong test file)
for filename in testingSentenceAndEventData:
(sentenceData,relations,modifiers) = testingSentenceAndEventData[filename]
# Empty relations
relations = []
testingSentenceAndEventData[filename] = (sentenceData,relations,modifiers)
print "generate Argument Examples..."
_,aExamples,aTypes = generateRelationExamples(testingSentenceAndEventData,targetRelationsToIDs,targetArguments,sentenceRange,doFiltering)
print "vectorize, trim and predict..."
aVectors = argVec.vectorize(aExamples)
if not argFS is None:
aVectors = argFS.transform(aVectors)
aVectors = coo_matrix(aVectors)
aPredictions = argClf.predict(aVectors)
aProbs = argClf.predict_proba(aVectors)
probColumns = { c:i for i,c in enumerate(argClf.classes_) }
#predictedEventID = 1
predictedTriggerID = 1000
predictedEventIDPerFile = Counter()
for i,(p,example) in enumerate(zip(aPredictions,aExamples)):
if p != 0:
relType = targetRelations[p-1]
#eventType = thisRelation[1]
#argTypes = thisRelation[2:]
#assert len(argTypes) == 2, "Only processing binary relations for triggerless events"
#eventType = thisRelation[0]
sentenceFilename = example.filename
sentenceID1,arg1Locs = example.arguments[0]
sentenceID2,arg2Locs = example.arguments[1]
sentence1 = testingSentenceAndEventData[sentenceFilename][0][sentenceID1]
sentence2 = testingSentenceAndEventData[sentenceFilename][0][sentenceID2]
sentence1.invertTriggers()
sentence2.invertTriggers()
arg1ID = sentence1.locsToTriggerIDs[tuple(arg1Locs)]
arg2ID = sentence2.locsToTriggerIDs[tuple(arg2Locs)]
type1ID = sentence1.locsToTriggerTypes[tuple(arg1Locs)]
type2ID = sentence2.locsToTriggerTypes[tuple(arg2Locs)]
#relType = typeLookup[type1ID]
relations = testingSentenceAndEventData[sentenceFilename][1]
prob = aProbs[i,probColumns[p]]
newR = (relType,arg1ID,arg2ID,prob)
#print "ADDING", newR
relations.append(newR)
#print "TEST",sentenceFilename,sentenceID1,sentenceID2,arg1Locs,arg2Locs,relType
with open(args.outFile, 'w') as f:
pickle.dump(testingSentenceAndEventData,f)
print "Complete."
|
1631969
|
from enum import Enum
import re
try:
import requests
except ImportError:
raise RuntimeError('Requests is required for hibp.')
try:
import gevent
from gevent import monkey
from gevent.pool import Pool
except ImportError:
raise RuntimeError('Gevent is required for hibp.')
monkey.patch_all(thread=False, select=False)
# global variables
BASE_URL = "https://haveibeenpwned.com/api/v2/"
HEADERS = {"User-Agent": "hibp-python",}
# enumerate the types of services that are callable
class Services(Enum):
AccountBreach = "accountbreach"
DomainBreach = "domainbreach"
Breach = "breach"
AllBreaches = "allbreaches"
DataClasses = "dataclasses"
# generic HIBP class
class HIBP(object):
'''
Generic HIBP object.
Attributes:
- url -> url to query
- service -> service to query
- param -> parameter to service
- response -> response object to URL request
'''
def __init__(self):
self.url = None
self.service = None
self.param = None
self.response = None
@classmethod
def get_account_breaches(cls,account):
'''
Setup request to retrieve all breaches on a particular account
Args:
- account -> account you want to query. can be email or username to
anything
Returns:
- HIBP object with updated url, service, and param attributes
'''
req = cls()
req.url = BASE_URL + "breachedaccount/{}".format(account)
req.service = Services.AccountBreach
req.param = account
return req
@classmethod
def get_domain_breaches(cls,domain):
'''
Setup request to retrieve all breaches on a particular domain
Args:
- domain -> domain you want to query. must be valid domain,
according to RFC 1035
Returns:
- HIBP object with updated url, service, and param attributes
'''
req = cls()
domain_regex = re.compile(r"[a-zA-Z\d-]{,63}(\.[a-zA-Z\d-]{,63})+")
if not re.match(domain_regex, domain):
raise ValueError("{} is an invalid domain.".format(domain))
req.url = BASE_URL + "breaches?domain={}".format(domain)
req.service = Services.DomainBreach
req.param = domain
return req
@classmethod
def get_breach(cls,name):
'''
Setup request to retrieve a specific breach.
Args:
- name -> name of breach you want to query. To get a list of
all breach names, run HIBP.get_all_breaches()
Returns:
- HIBP object with updated url, service, and param attributes
'''
req = cls()
req.url = BASE_URL + "breach/{}".format(name)
req.service = Services.Breach
req.param = name
return req
@classmethod
def get_all_breaches(cls):
'''
Setup request to retrieve all breaches recorded on HIBP.com so far.
Returns:
- HIBP object with updated url, service, and param attributes
'''
req = cls()
req.url = BASE_URL + "breaches"
req.service = Services.AllBreaches
return req
@classmethod
def get_dataclasses(cls):
'''
Setup request to retrieve all dataclasses on HIBP.
Returns:
- HIBP object with updated url, service, and param attributes
'''
req = cls()
req.url = BASE_URL + "dataclasses"
req.service = Services.DataClasses
return req
def execute(self):
'''
Execute a GET request on HIBP REST API service based on request
object setup with one of the query services above.
Returns:
- If query parameter is pwned:
HIBP object with updated response attribute that contains
parsed JSON object with pwnage data.
- Else:
HIBP object with updated response attribute that contains
string saying that the object has not been pwned.
'''
if self.url is None:
raise ValueError("setup HIBP object with a query service before executing \
request.")
try:
response = requests.get(self.url, headers = HEADERS)
except requests.exceptions.HTTPError:
print("there was an error")
return
if response.status_code == 404 and self.service == Services.AccountBreach:
self.response = "object has not been pwned."
return self
elif response.text == "[]" and self.service == Services.DomainBreach:
self.response = "object has not been pwned."
return self
elif response.status_code == 404 and self.service == Services.Breach:
raise ValueError("invalid breach name {}.".format(self.param))
elif response.status_code == 429 and self.service == Services.AccountBreach:
raise ValueError("Rate limit error {}.".format(self.param))
else:
self.response = response.json()
return self
class AsyncHIBP(object):
'''
Generic AsyncHIBP object. Use this object to do concurrent HIBP requests
on multiple queries via gevent.
Attributes:
- pool_size -> size of gevent pool
- pool -> Gevent pool
- session -> requests session object
- timeout -> timeout, how long we wait for the URL to respond
- url -> url to respond on
- response -> response object to URL request
'''
def __init__(self):
self.pool_size = None
self.pool = Pool(self.pool_size)
self.session = requests.Session()
self.timeout = 10
self.url = None
self.response = None
def send(self,hibp_obj):
'''
Spawns gevent/pool threads that will run the execute method on each
HIBP object.
Attributes:
- hibp_obj -> HIBP object
'''
if self.pool is not None:
return self.pool.spawn(hibp_obj.execute)
return gevent.spawn(hibp_obj.execute)
def map(self,hibp_objs):
'''
Asynchronously map the HIBP execution job to multiple queries.
Attributes:
- hibp_objs - list of HIBP objects
'''
jobs = [self.send(hibp_obj) for hibp_obj in hibp_objs]
gevent.joinall(jobs, timeout=self.timeout)
return hibp_objs
def imap(self,hibp_objs):
'''
Lazily + Asynchronously map the HIBP execution job to multiple queries.
Attributes:
- hibp_objs - list of HIBP objects
'''
for hibp_obj in self.pool.imap_unordered(HIBP.execute, hibp_objs):
yield hibp_obj.response
self.pool.join()
|
1631972
|
from conans import CMake, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
from glob import glob
import os
class SshtConan(ConanFile):
name = "ssht"
license = "GPL-3.0-or-later"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/astro-informatics/ssht"
description = "Fast spin spherical harmonic transforms"
settings = "os", "arch", "compiler", "build_type"
topics = ("physics", "astrophysics", "radio interferometry")
options = {"fPIC": [True, False]}
default_options = {"fPIC": True}
requires = "fftw/3.3.9"
generators = "cmake", "cmake_find_package", "cmake_paths"
exports_sources = ["CMakeLists.txt"]
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.compiler == "Visual Studio":
raise ConanInvalidConfiguration("SSHT requires C99 support for complex numbers.")
del self.settings.compiler.cppstd
del self.settings.compiler.libcxx
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = glob('ssht-*/')[0]
os.rename(extracted_dir, self._source_subfolder)
@property
def cmake(self):
if not hasattr(self, "_cmake"):
self._cmake = CMake(self)
self._cmake.definitions["tests"] = False
self._cmake.definitions["python"] = False
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
self.cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.cmake.install()
def package_info(self):
self.cpp_info.libs = ["ssht"]
|
1631995
|
from notebook_test_case import NotebooksTestCase
import unittest
class RunMostNotebooksTestCase(NotebooksTestCase):
TEST_PATHS = ['./notebooks/',
'./notebooks/solr/tmdb',
'./notebooks/elasticsearch/tmdb',
'./notebooks/elasticsearch/osc-blog']
IGNORED_NBS = ['./notebooks/solr/tmdb/evaluation (Solr).ipynb',
'./notebooks/elasticsearch/tmdb/evaluation.ipynb']
def test_paths(self):
return RunMostNotebooksTestCase.TEST_PATHS
def ignored_nbs(self):
return RunMostNotebooksTestCase.IGNORED_NBS
if __name__ == "__main__":
unittest.main()
|
1632005
|
from mrq.task import Task
from mrq.context import log
import sys
PY3 = sys.version_info > (3,)
class Simple(Task):
def run(self, params):
# Some systems may be configured like this.
if not PY3 and params.get("utf8_sys_stdout"):
import codecs
import sys
UTF8Writer = codecs.getwriter('utf8')
sys.stdout = UTF8Writer(sys.stdout)
if params["class_name"] == "unicode":
log.info(u"caf\xe9")
elif params["class_name"] == "string":
log.info("cafe")
elif params["class_name"] == "latin-1":
log.info("caf\xe9")
elif params["class_name"] == "bytes1":
log.info("Mat\xc3\xa9riels d'entra\xc3\xaenement")
return True
|
1632023
|
from opts.viz_opts import VizOpts
from data_process.coco import CocoDataSet
from visualization.visualize import visualize_masks, visualize_keypoints, visualize_heatmap, visualize_paf
from data_process.coco_process_utils import BODY_PARTS
if __name__ == '__main__':
opts = VizOpts().parse()
for split in ['train', 'val']:
coco_dataset = CocoDataSet(opts.data, opts, split)
for i in range(len(coco_dataset)):
img, heatmaps, paf, ignore_mask, keypoints = coco_dataset.get_item_raw(i)
img = (img * 255.).astype('uint8')
visualize_keypoints(img, keypoints, BODY_PARTS)
if opts.vizIgnoreMask:
visualize_masks(img, ignore_mask)
if opts.vizHeatMap:
visualize_heatmap(img, heatmaps)
if opts.vizPaf:
visualize_paf(img, paf)
|
1632120
|
import sys
import numpy as np
import theano.tensor as T
from keras.layers import Input, Conv2D, Activation, Lambda, UpSampling2D, merge
from keras.models import Model
from keras.engine.topology import Layer
from neural_style.utils import floatX
class InstanceNormalization(Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def build(self, input_shape):
self.scale = self.add_weight(shape=(input_shape[1],), initializer="uniform", trainable=True)
self.shift = self.add_weight(shape=(input_shape[1],), initializer="zero", trainable=True)
super().build(input_shape)
def call(self, x, mask=None):
hw = T.cast(x.shape[2] * x.shape[3], floatX)
mu = x.sum(axis=-1).sum(axis=-1) / hw
mu_vec = mu.dimshuffle(0, 1, "x", "x")
sig2 = T.square(x - mu_vec).sum(axis=-1).sum(axis=-1) / hw
y = (x - mu_vec) / T.sqrt(sig2.dimshuffle(0, 1, "x", "x") + 1e-5)
return self.scale.dimshuffle("x", 0, "x", "x") * y + self.shift.dimshuffle("x", 0, "x", "x")
class ReflectPadding2D(Layer):
def __init__(self, padding=(1, 1), **kwargs):
self.padding = padding
super().__init__(**kwargs)
def build(self, input_shape):
super().build(input_shape)
def call(self, x, mask=None):
p0, p1 = self.padding[0], self.padding[1]
y = T.zeros((x.shape[0], x.shape[1], x.shape[2]+(2*p0), x.shape[3]+(2*p1)), dtype=floatX)
y = T.set_subtensor(y[:, :, p0:-p0, p1:-p1], x)
y = T.set_subtensor(y[:, :, :p0, p1:-p1], x[:, :, p0:0:-1, :])
y = T.set_subtensor(y[:, :, -p0:, p1:-p1], x[:, :, -2:-2-p0:-1])
y = T.set_subtensor(y[:, :, p0:-p0, :p1], x[:, :, :, p1:0:-1])
y = T.set_subtensor(y[:, :, p0:-p0, -p1:], x[:, :, :, -2:-2-p1:-1])
y = T.set_subtensor(y[:, :, :p0, :p1], x[:, :, p0:0:-1, p1:0:-1])
y = T.set_subtensor(y[:, :, -p0:, :p1], x[:, :, -2:-2-p0:-1, p1:0:-1])
y = T.set_subtensor(y[:, :, :p0, -p1:], x[:, :, p0:0:-1, -2:-2-p1:-1])
y = T.set_subtensor(y[:, :, -p0:, -p1:], x[:, :, -2:-2-p0:-1, -2:-2-p1:-1])
return y
def get_output_shape_for(self, input_shape):
return (input_shape[0], input_shape[1], input_shape[2]+(2*self.padding[0]), input_shape[3]+(2*self.padding[1]))
def conv_layer(in_, nb_filter, filter_length, subsample=1, upsample=1, only_conv=False):
if upsample != 1:
out = UpSampling2D(size=(upsample, upsample))(in_)
else:
out = in_
padding = int(np.floor(filter_length / 2))
out = ReflectPadding2D((padding, padding))(out)
out = Conv2D(nb_filter, filter_length, filter_length, subsample=(subsample, subsample), border_mode="valid")(out)
if not only_conv:
out = InstanceNormalization()(out)
out = Activation("relu")(out)
return out
def residual_block(in_):
out = conv_layer(in_, 128, 3)
out = conv_layer(out, 128, 3, only_conv=True)
return merge([out, in_], mode="sum")
def get_transformer_net(X, weights=None):
input_ = Input(tensor=X, shape=(3, 256, 256))
y = conv_layer(input_, 32, 9)
y = conv_layer(y, 64, 3, subsample=2)
y = conv_layer(y, 128, 3, subsample=2)
y = residual_block(y)
y = residual_block(y)
y = residual_block(y)
y = residual_block(y)
y = residual_block(y)
y = conv_layer(y, 64, 3, upsample=2)
y = conv_layer(y, 32, 3, upsample=2)
y = conv_layer(y, 3, 9, only_conv=True)
y = Activation("tanh")(y)
y = Lambda(lambda x: x * 150, output_shape=(3, None, None))(y)
net = Model(input=input_, output=y)
if weights is not None:
try:
net.load_weights(weights)
except OSError as e:
print(e)
sys.exit(1)
return net
|
1632127
|
import matplotlib.pyplot as plot
import matplotlib.dates as md
from matplotlib.dates import date2num
import datetime
# from pylab import *
from numpy import polyfit
import numpy as np
f = open("deviations.csv")
values = []
timestamps = []
for (i, line) in enumerate(f):
if i >= 1:
lineArray = line.split(",")
date = datetime.datetime.strptime(lineArray[0], '%Y-%m-%d %H:%M:%S')
timestamps.append(date2num(date))
value = lineArray[1].strip()
values.append(value)
if i > 100000:
break
plot.subplots_adjust(bottom=0.2)
plot.xticks( rotation=25 )
ax=plot.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
# countArray = np.arange(0.0, len(timestamps))
floatValues = np.array(map(float, values))
fit = polyfit(timestamps,floatValues,1)
fit_fn = np.poly1d(fit) # fit_fn is now a function which takes in x and returns an estimate for y
# plot(x,y, 'yo', x, fit_fn(x), '--k')
plot.plot(timestamps, values, timestamps, fit_fn(timestamps), '--k')
#plot.plot(timestamps, values)
plot.show()
|
1632184
|
from unittest import TestCase
from propara.evaluation.metrics import Metrics
class TestMetrics(TestCase):
def setUp(self):
self.metrics = Metrics()
self.metrics.tp_increment(1)
self.metrics.fp_increment(1)
self.metrics.tp_increment(1)
self.metrics.fn_increment(1)
self.metrics_highway = Metrics()
self.metrics_highway.set_precision(0.5)
self.metrics_highway.set_recall(1.0)
def test_get_scores(self):
_2_by_3 = 0.6666666666666666
assert self.metrics.get_scores() == (0.5, _2_by_3, _2_by_3, _2_by_3)
assert self.metrics_highway.get_scores() == (-1.0, 0.5, 1.0, _2_by_3)
|
1632186
|
import random
import unittest
import numpy as np
import torch
from code_soup.common.utils import Seeding
class TestSeeding(unittest.TestCase):
"""Test the seed function."""
def test_seed(self):
"""Test that the seed is set."""
random.seed(42)
initial_state = random.getstate()
Seeding.seed(42)
final_state = random.getstate()
self.assertEqual(initial_state, final_state)
self.assertEqual(np.random.get_state()[1][0], 42)
self.assertEqual(torch.get_rng_state().tolist()[0], 42)
|
1632212
|
from sqlalchemy import schema
from sqlalchemy import util
class CompareTable:
def __init__(self, table):
self.table = table
def __eq__(self, other):
if self.table.name != other.name or self.table.schema != other.schema:
return False
for c1, c2 in util.zip_longest(self.table.c, other.c):
if (c1 is None and c2 is not None) or (
c2 is None and c1 is not None
):
return False
if CompareColumn(c1) != c2:
return False
return True
# TODO: compare constraints, indexes
def __ne__(self, other):
return not self.__eq__(other)
class CompareColumn:
def __init__(self, column):
self.column = column
def __eq__(self, other):
return (
self.column.name == other.name
and self.column.nullable == other.nullable
)
# TODO: datatypes etc
def __ne__(self, other):
return not self.__eq__(other)
class CompareIndex:
def __init__(self, index):
self.index = index
def __eq__(self, other):
return (
str(schema.CreateIndex(self.index))
== str(schema.CreateIndex(other))
and self.index.dialect_kwargs == other.dialect_kwargs
)
def __ne__(self, other):
return not self.__eq__(other)
class CompareCheckConstraint:
def __init__(self, constraint):
self.constraint = constraint
def __eq__(self, other):
return (
isinstance(other, schema.CheckConstraint)
and self.constraint.name == other.name
and (str(self.constraint.sqltext) == str(other.sqltext))
and (other.table.name == self.constraint.table.name)
and other.table.schema == self.constraint.table.schema
)
def __ne__(self, other):
return not self.__eq__(other)
class CompareForeignKey:
def __init__(self, constraint):
self.constraint = constraint
def __eq__(self, other):
r1 = (
isinstance(other, schema.ForeignKeyConstraint)
and self.constraint.name == other.name
and (other.table.name == self.constraint.table.name)
and other.table.schema == self.constraint.table.schema
)
if not r1:
return False
for c1, c2 in util.zip_longest(self.constraint.columns, other.columns):
if (c1 is None and c2 is not None) or (
c2 is None and c1 is not None
):
return False
if CompareColumn(c1) != c2:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class ComparePrimaryKey:
def __init__(self, constraint):
self.constraint = constraint
def __eq__(self, other):
r1 = (
isinstance(other, schema.PrimaryKeyConstraint)
and self.constraint.name == other.name
and (other.table.name == self.constraint.table.name)
and other.table.schema == self.constraint.table.schema
)
if not r1:
return False
for c1, c2 in util.zip_longest(self.constraint.columns, other.columns):
if (c1 is None and c2 is not None) or (
c2 is None and c1 is not None
):
return False
if CompareColumn(c1) != c2:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class CompareUniqueConstraint:
def __init__(self, constraint):
self.constraint = constraint
def __eq__(self, other):
r1 = (
isinstance(other, schema.UniqueConstraint)
and self.constraint.name == other.name
and (other.table.name == self.constraint.table.name)
and other.table.schema == self.constraint.table.schema
)
if not r1:
return False
for c1, c2 in util.zip_longest(self.constraint.columns, other.columns):
if (c1 is None and c2 is not None) or (
c2 is None and c1 is not None
):
return False
if CompareColumn(c1) != c2:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
|
1632227
|
import copy
import ctypes
import gfootball.env as football_env
import torch
import torch.multiprocessing as _mp
from a2c_ppo_acktr.base_factory import get_base
from a2c_ppo_acktr.model import Policy
from create_env import create_atari_mjc_env
from gym.spaces.discrete import Discrete
mp = _mp.get_context('spawn')
Value = mp.Value
def init_shared_var(action_space, observation_space, aug_obs_dim,
num_processes, num_agents, num_actors):
manager = mp.Manager()
shared_list = manager.list([False] * num_processes)
done_list = manager.list([False] * num_processes)
actions = torch.zeros(num_processes, num_agents, 1).long()
action_log_probs = torch.zeros(num_processes, num_agents, 1)
action_logits = torch.zeros(num_processes, num_agents, action_space.n)
values = torch.zeros(num_processes, num_agents, 1)
observations = torch.zeros(num_processes, *observation_space.shape)
aug_observations = torch.zeros(num_processes, num_agents, aug_obs_dim)
actions.share_memory_(), action_log_probs.share_memory_(
), values.share_memory_(), observations.share_memory_()
aug_observations.share_memory_(), action_logits.share_memory_()
step_dones = mp.Array(ctypes.c_int32, int(num_processes))
act_in_progs = mp.Array(ctypes.c_int32, int(num_processes))
model_updates = mp.Array(ctypes.c_int32, int(num_actors))
please_load_model = Value('i', 0)
please_load_model_actor = torch.zeros(int(num_actors)).long()
all_episode_scores = manager.list()
return shared_list, done_list, actions, action_log_probs, action_logits, values, observations, aug_observations, \
step_dones, act_in_progs, model_updates, please_load_model, please_load_model_actor, all_episode_scores
def init_policies(observation_space, action_space, base_kwargs,
num_agents, base):
actor_critics = [Policy(
observation_space.shape[1:],
action_space if num_agents == 1 else Discrete(action_space.nvec[0]),
base=get_base(base),
base_kwargs=base_kwargs) for _ in range(num_agents)]
shared_cpu_actor_critics = [Policy(
observation_space.shape[1:],
action_space if num_agents == 1 else Discrete(action_space.nvec[0]),
base=get_base(base),
base_kwargs=base_kwargs).share_memory() for _ in range(num_agents)]
shared_cpu_actor_critics_env_actor = [Policy(
observation_space.shape[1:],
action_space if num_agents == 1 else Discrete(action_space.nvec[0]),
base=get_base(base),
base_kwargs=base_kwargs).share_memory() for _ in range(num_agents)]
pytorch_total_params = sum(
p.numel() for p in actor_critics[0].parameters() if p.requires_grad)
print('number of params ', pytorch_total_params)
return actor_critics, shared_cpu_actor_critics, shared_cpu_actor_critics_env_actor
def get_policy_arg(hidden_size):
base_kwargs = {'recurrent': False, 'hidden_size': hidden_size}
aug_obs_dim = 0
return base_kwargs, aug_obs_dim
def get_env_info(env_name, state, reward_experiment, num_left_agents,
num_right_agents, representation, render, seed, num_agents):
is_football = '11' in env_name or 'academy' in env_name
if is_football:
env = football_env.create_environment(
representation=representation,
env_name=env_name,
stacked=('stacked' in state),
rewards=reward_experiment,
logdir=None,
render=render and (seed == 0),
dump_frequency=50 if render and seed == 0 else 0)
else:
env = create_atari_mjc_env(env_name)
if num_agents == 1:
from a2c_ppo_acktr.envs import ObsUnsqueezeWrapper
env = ObsUnsqueezeWrapper(env)
env.reset()
num_left_player = env.unwrapped._cached_observation[0]['left_team'].shape[0] if is_football else 1
num_right_player = env.unwrapped._cached_observation[
0]['right_team'].shape[0] if is_football else 0
observation_space = copy.deepcopy(env.observation_space)
action_space = copy.deepcopy(env.action_space)
env.close()
return num_left_player, num_right_player, observation_space, action_space
|
1632290
|
import pytest
from lcs.agents.xncs import Backpropagation, Configuration, Classifier, Effect, ClassifiersList
from lcs.agents.xcs import Condition
class TestBackpropagation:
@pytest.fixture
def cfg(self):
return Configuration(lmc=2, lem=0.2, number_of_actions=4)
def test_init(self, cfg):
bp = Backpropagation(cfg)
assert id(bp.cfg) == id(cfg)
def test_insert(self, cfg):
bp = Backpropagation(cfg)
cl = Classifier(cfg=cfg, condition=Condition("1111"), action=0, time_stamp=0)
ef = Effect("0110")
bp.insert_into_bp(cl, ef)
assert id(bp.classifiers_for_update[0]) == id(cl)
assert id(bp.update_vectors[0]) == id(ef)
assert bp.classifiers_for_update[0] == cl
assert bp.update_vectors[0] == ef
def test_update(self, cfg):
bp = Backpropagation(cfg)
cl = Classifier(cfg=cfg, condition=Condition("1111"), action=0, time_stamp=0)
ef = Effect("0110")
bp.insert_into_bp(cl, ef)
bp.update_bp()
assert cl.effect == ef
bp.insert_into_bp(cl, ef)
bp.update_bp()
assert cl.effect == ef
assert cl.error != cfg.initial_error
def test_update(self, cfg):
bp = Backpropagation(cfg)
cl = Classifier(cfg=cfg, condition=Condition("1111"), action=0, time_stamp=0)
ef = Effect("0110")
bp.insert_into_bp(cl, ef)
bp.check_and_update()
assert cl.effect is None
bp.check_and_update()
assert cl.effect is not None
|
1632295
|
from tensor2struct.languages.dsl.common.errors import ParsingError, ExecutionError
from tensor2struct.languages.dsl.common.util import START_SYMBOL, END_SYMBOL
|
1632379
|
from pyvr.renderer import Renderer
from pyvr.actors import VolumeActor
from pyvr.actors import SliceActor
from pyvr.data.volume import load_volume
from pyvr.utils.video import write_video
if __name__ == '__main__':
volume_file = 'original-image.mhd'
volume = load_volume(volume_file)
clim = (-150, 350)
renderer = Renderer()
renderer.set_camera(pos=(0,-1200,0))
renderer.add_actor(VolumeActor(volume, 'bone'))
renderer.add_actor(SliceActor(volume, normal=(1,0,0), clim=clim))
renderer.add_actor(SliceActor(volume, normal=(0,1,0), clim=clim))
renderer.add_actor(SliceActor(volume, normal=(0,0,1), clim=clim))
proj = renderer.render(rotate_angles=list(range(0,360,1)), bg=(1,1,1))
write_video(proj, 'test.mp4')
|
1632396
|
import torchvision.models as models
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append('../PNAS/')
from PNASnet import *
from genotypes import PNASNet
class PNASModel(nn.Module):
def __init__(self, num_channels=3, train_enc=False, load_weight=1):
super(PNASModel, self).__init__()
self.path = '../PNAS/PNASNet-5_Large.pth'
self.pnas = NetworkImageNet(216, 1001, 12, False, PNASNet)
if load_weight:
self.pnas.load_state_dict(torch.load(self.path))
for param in self.pnas.parameters():
param.requires_grad = train_enc
self.padding = nn.ConstantPad2d((0,1,0,1),0)
self.drop_path_prob = 0
self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2)
self.deconv_layer0 = nn.Sequential(
nn.Conv2d(in_channels = 4320, out_channels = 512, kernel_size=3, padding=1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer1 = nn.Sequential(
nn.Conv2d(in_channels = 512+2160, out_channels = 256, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer2 = nn.Sequential(
nn.Conv2d(in_channels = 1080+256, out_channels = 270, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer3 = nn.Sequential(
nn.Conv2d(in_channels = 540, out_channels = 96, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer4 = nn.Sequential(
nn.Conv2d(in_channels = 192, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer5 = nn.Sequential(
nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels = 128, out_channels = 1, kernel_size = 3, padding = 1, bias = True),
nn.Sigmoid()
)
def forward(self, images):
batch_size = images.size(0)
s0 = self.pnas.conv0(images)
s0 = self.pnas.conv0_bn(s0)
out1 = self.padding(s0)
s1 = self.pnas.stem1(s0, s0, self.drop_path_prob)
out2 = s1
s0, s1 = s1, self.pnas.stem2(s0, s1, 0)
for i, cell in enumerate(self.pnas.cells):
s0, s1 = s1, cell(s0, s1, 0)
if i==3:
out3 = s1
if i==7:
out4 = s1
if i==11:
out5 = s1
out5 = self.deconv_layer0(out5)
x = torch.cat((out5,out4), 1)
x = self.deconv_layer1(x)
x = torch.cat((x,out3), 1)
x = self.deconv_layer2(x)
x = torch.cat((x,out2), 1)
x = self.deconv_layer3(x)
x = torch.cat((x,out1), 1)
x = self.deconv_layer4(x)
x = self.deconv_layer5(x)
x = x.squeeze(1)
return x
class DenseModel(nn.Module):
def __init__(self, num_channels=3, train_enc=False, load_weight=1):
super(DenseModel, self).__init__()
self.dense = models.densenet161(pretrained=bool(load_weight)).features
for param in self.dense.parameters():
param.requires_grad = train_enc
self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2)
self.conv_layer0 = nn.Sequential(*list(self.dense)[:3])
self.conv_layer1 = nn.Sequential(
self.dense.pool0,
self.dense.denseblock1,
*list(self.dense.transition1)[:3]
)
self.conv_layer2 = nn.Sequential(
self.dense.transition1[3],
self.dense.denseblock2,
*list(self.dense.transition2)[:3]
)
self.conv_layer3 = nn.Sequential(
self.dense.transition2[3],
self.dense.denseblock3,
*list(self.dense.transition3)[:3]
)
self.conv_layer4 = nn.Sequential(
self.dense.transition3[3],
self.dense.denseblock4
)
self.deconv_layer0 = nn.Sequential(
nn.Conv2d(in_channels = 2208, out_channels = 512, kernel_size=3, padding=1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer1 = nn.Sequential(
nn.Conv2d(in_channels = 512+1056, out_channels = 256, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer2 = nn.Sequential(
nn.Conv2d(in_channels = 384+256, out_channels = 192, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer3 = nn.Sequential(
nn.Conv2d(in_channels = 192+192, out_channels = 96, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer4 = nn.Sequential(
nn.Conv2d(in_channels = 96+96, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer5 = nn.Sequential(
nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels = 128, out_channels = 1, kernel_size = 3, padding = 1, bias = True),
nn.Sigmoid()
)
def forward(self, images):
batch_size = images.size(0)
out1 = self.conv_layer0(images)
out2 = self.conv_layer1(out1)
out3 = self.conv_layer2(out2)
out4 = self.conv_layer3(out3)
out5 = self.conv_layer4(out4)
assert out1.size() == (batch_size, 96, 128, 128)
assert out2.size() == (batch_size, 192, 64, 64)
assert out3.size() == (batch_size, 384, 32, 32)
assert out4.size() == (batch_size, 1056, 16, 16)
assert out5.size() == (batch_size, 2208, 8, 8)
out5 = self.deconv_layer0(out5)
x = torch.cat((out5,out4), 1)
x = self.deconv_layer1(x)
x = torch.cat((x,out3), 1)
x = self.deconv_layer2(x)
x = torch.cat((x,out2), 1)
x = self.deconv_layer3(x)
x = torch.cat((x,out1), 1)
x = self.deconv_layer4(x)
x = self.deconv_layer5(x)
x = x.squeeze(1)
return x
class ResNetModel(nn.Module):
def __init__(self, num_channels=3, train_enc=False, load_weight=1):
super(ResNetModel, self).__init__()
self.num_channels = num_channels
self.resnet = models.resnet50(pretrained=bool(load_weight))
for param in self.resnet.parameters():
param.requires_grad = train_enc
self.conv_layer1 = nn.Sequential(
self.resnet.conv1,
self.resnet.bn1,
self.resnet.relu
)
self.conv_layer2 = nn.Sequential(
self.resnet.maxpool,
self.resnet.layer1
)
self.conv_layer3 = self.resnet.layer2
self.conv_layer4 = self.resnet.layer3
self.conv_layer5 = self.resnet.layer4
self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2)
self.deconv_layer0 = nn.Sequential(
nn.Conv2d(in_channels=2048, out_channels=1024, kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer1 = nn.Sequential(
nn.Conv2d(in_channels = 2048, out_channels = 512, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer2 = nn.Sequential(
nn.Conv2d(in_channels = 1024, out_channels = 256, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer3 = nn.Sequential(
nn.Conv2d(in_channels = 512, out_channels = 64, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer4 = nn.Sequential(
nn.Conv2d(in_channels = 128, out_channels = 64, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer5 = nn.Sequential(
nn.Conv2d(in_channels = 64, out_channels = 64, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels = 64, out_channels = 1, kernel_size = 3, padding = 1, bias = True),
nn.Sigmoid()
)
def forward(self, images):
batch_size = images.size(0)
out1 = self.conv_layer1(images)
out2 = self.conv_layer2(out1)
out3 = self.conv_layer3(out2)
out4 = self.conv_layer4(out3)
out5 = self.conv_layer5(out4)
out5 = self.deconv_layer0(out5)
assert out5.size() == (batch_size, 1024, 16, 16)
x = torch.cat((out5,out4), 1)
assert x.size() == (batch_size, 2048, 16, 16)
x = self.deconv_layer1(x)
assert x.size() == (batch_size, 512, 32, 32)
x = torch.cat((x, out3), 1)
assert x.size() == (batch_size, 1024, 32, 32)
x = self.deconv_layer2(x)
assert x.size() == (batch_size, 256, 64, 64)
x = torch.cat((x, out2), 1)
assert x.size() == (batch_size, 512, 64, 64)
x = self.deconv_layer3(x)
assert x.size() == (batch_size, 64, 128, 128)
x = torch.cat((x, out1), 1)
assert x.size() == (batch_size, 128, 128, 128)
x = self.deconv_layer4(x)
x = self.deconv_layer5(x)
assert x.size() == (batch_size, 1, 256, 256)
x = x.squeeze(1)
assert x.size() == (batch_size, 256, 256)
return x
class VGGModel(nn.Module):
def __init__(self, num_channels=3, train_enc=False, load_weight=1):
super(VGGModel, self).__init__()
self.num_channels = num_channels
self.vgg = models.vgg16(pretrained=bool(load_weight)).features
for param in self.vgg.parameters():
param.requires_grad = train_enc
self.conv_layer1 = self.vgg[:7]
self.conv_layer2 = self.vgg[7:12]
self.conv_layer3 = self.vgg[12:19]
self.conv_layer4 = self.vgg[19:24]
self.conv_layer5 = self.vgg[24:]
self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2)
self.deconv_layer1 = nn.Sequential(
nn.Conv2d(in_channels = 1024, out_channels = 512, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer2 = nn.Sequential(
nn.Conv2d(in_channels = 1024, out_channels = 256, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer3 = nn.Sequential(
nn.Conv2d(in_channels = 512, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer4 = nn.Sequential(
nn.Conv2d(in_channels = 256, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer5 = nn.Sequential(
nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels = 128, out_channels = 1, kernel_size = 3, padding = 1, bias = True),
nn.Sigmoid()
)
def forward(self, images):
batch_size = images.size(0)
out1 = self.conv_layer1(images)
out2 = self.conv_layer2(out1)
out3 = self.conv_layer3(out2)
out4 = self.conv_layer4(out3)
out5 = self.conv_layer5(out4)
out5 = self.linear_upsampling(out5)
assert out5.size() == (batch_size, 512, 16, 16)
x = torch.cat((out5,out4), 1)
assert x.size() == (batch_size, 1024, 16, 16)
x = self.deconv_layer1(x)
assert x.size() == (batch_size, 512, 32, 32)
x = torch.cat((x, out3), 1)
assert x.size() == (batch_size, 1024, 32, 32)
x = self.deconv_layer2(x)
assert x.size() == (batch_size, 256, 64, 64)
x = torch.cat((x, out2), 1)
assert x.size() == (batch_size, 512, 64, 64)
x = self.deconv_layer3(x)
assert x.size() == (batch_size, 128, 128, 128)
x = torch.cat((x, out1), 1)
assert x.size() == (batch_size, 256, 128, 128)
x = self.deconv_layer4(x)
x = self.deconv_layer5(x)
assert x.size() == (batch_size, 1, 256, 256)
x = x.squeeze(1)
assert x.size() == (batch_size, 256, 256)
return x
class MobileNetV2(nn.Module):
def __init__(self, num_channels=3, train_enc=False, load_weight=1):
super(MobileNetV2, self).__init__()
self.mobilenet = torch.hub.load('pytorch/vision:v0.4.0', 'mobilenet_v2', pretrained=True).features
for param in self.mobilenet.parameters():
param.requires_grad = train_enc
self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2)
self.conv_layer1 = self.mobilenet[:2]
self.conv_layer2 = self.mobilenet[2:4]
self.conv_layer3 = self.mobilenet[4:7]
self.conv_layer4 = self.mobilenet[7:14]
self.conv_layer5 = self.mobilenet[14:]
self.deconv_layer0 = nn.Sequential(
nn.Conv2d(in_channels = 1280, out_channels = 96, kernel_size=3, padding=1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer1 = nn.Sequential(
nn.Conv2d(in_channels = 96+96, out_channels = 32, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer2 = nn.Sequential(
nn.Conv2d(in_channels = 32+32, out_channels = 24, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer3 = nn.Sequential(
nn.Conv2d(in_channels = 24+24, out_channels = 16, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer4 = nn.Sequential(
nn.Conv2d(in_channels = 16+16, out_channels = 16, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer5 = nn.Sequential(
nn.Conv2d(in_channels = 16, out_channels = 16, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels = 16, out_channels = 1, kernel_size = 3, padding = 1, bias = True),
nn.Sigmoid()
)
def forward(self, images):
batch_size = images.size(0)
out1 = self.conv_layer1(images)
out2 = self.conv_layer2(out1)
out3 = self.conv_layer3(out2)
out4 = self.conv_layer4(out3)
out5 = self.conv_layer5(out4)
assert out1.size() == (batch_size, 16, 128, 128)
assert out2.size() == (batch_size, 24, 64, 64)
assert out3.size() == (batch_size, 32, 32, 32)
assert out4.size() == (batch_size, 96, 16, 16)
assert out5.size() == (batch_size, 1280, 8, 8)
out5 = self.deconv_layer0(out5)
x = torch.cat((out5,out4), 1)
x = self.deconv_layer1(x)
x = torch.cat((x,out3), 1)
x = self.deconv_layer2(x)
x = torch.cat((x,out2), 1)
x = self.deconv_layer3(x)
x = torch.cat((x,out1), 1)
x = self.deconv_layer4(x)
x = self.deconv_layer5(x)
x = x.squeeze(1)
return x
|
1632427
|
from typing import Dict
import torch
from torch.optim.optimizer import Optimizer
from pytorch_optimizer.base_optimizer import BaseOptimizer
from pytorch_optimizer.types import CLOSURE, DEFAULTS, PARAMETERS
class SAM(Optimizer, BaseOptimizer):
"""
Reference : https://github.com/davda54/sam
Example :
from pytorch_optimizer import SAM
...
model = YourModel()
base_optimizer = Ranger21
optimizer = SAM(model.parameters(), base_optimizer)
...
for input, output in data:
# first forward-backward pass
# use this loss for any training statistics
loss = loss_function(output, model(input))
loss.backward()
optimizer.first_step(zero_grad=True)
# second forward-backward pass
# make sure to do a full forward pass
loss_function(output, model(input)).backward()
optimizer.second_step(zero_grad=True)
Alternative Example with a single closure-based step function:
from pytorch_optimizer import SAM
...
model = YourModel()
base_optimizer = Ranger21
optimizer = SAM(model.parameters(), base_optimizer)
def closure():
loss = loss_function(output, model(input))
loss.backward()
return loss
...
for input, output in data:
loss = loss_function(output, model(input))
loss.backward()
optimizer.step(closure)
optimizer.zero_grad()
"""
def __init__(
self,
params: PARAMETERS,
base_optimizer,
rho: float = 0.05,
adaptive: bool = False,
**kwargs,
):
"""SAM
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups
:param base_optimizer: Optimizer. base optimizer
:param rho: float. size of the neighborhood for computing the max loss
:param adaptive: bool. element-wise Adaptive SAM
:param kwargs: Dict. parameters for optimizer.
"""
self.rho = rho
self.validate_parameters()
defaults: DEFAULTS = dict(rho=rho, adaptive=adaptive, **kwargs)
super().__init__(params, defaults)
self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
self.param_groups = self.base_optimizer.param_groups
def validate_parameters(self):
self.validate_rho(self.rho)
@torch.no_grad()
def reset(self):
pass
@torch.no_grad()
def first_step(self, zero_grad: bool = False):
grad_norm = self.grad_norm()
for group in self.param_groups:
scale = group['rho'] / (grad_norm + 1e-12)
for p in group['params']:
if p.grad is None:
continue
self.state[p]['old_p'] = p.clone()
e_w = (torch.pow(p, 2) if group['adaptive'] else 1.0) * p.grad * scale.to(p)
# climb to the local maximum "w + e(w)"
p.add_(e_w)
if zero_grad:
self.zero_grad()
@torch.no_grad()
def second_step(self, zero_grad: bool = False):
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# get back to "w" from "w + e(w)"
p = self.state[p]['old_p']
# do the actual "sharpness-aware" update
self.base_optimizer.step()
if zero_grad:
self.zero_grad()
@torch.no_grad()
def step(self, closure: CLOSURE = None):
if closure is None:
raise RuntimeError('[-] Sharpness Aware Minimization (SAM) requires closure')
self.first_step(zero_grad=True)
# the closure should do a full forward-backward pass
with torch.enable_grad():
closure()
self.second_step()
def grad_norm(self) -> torch.Tensor:
# put everything on the same device, in case of model parallelism
shared_device = self.param_groups[0]['params'][0].device
return torch.norm(
torch.stack(
[
((torch.abs(p) if group['adaptive'] else 1.0) * p.grad).norm(p=2).to(shared_device)
for group in self.param_groups
for p in group['params']
if p.grad is not None
]
),
p=2,
)
def load_state_dict(self, state_dict: Dict):
super().load_state_dict(state_dict)
self.base_optimizer.param_groups = self.param_groups
|
1632456
|
import numpy as np
import unittest
from partitura import EXAMPLE_MUSICXML
from partitura import load_musicxml
from partitura.musicanalysis import estimate_spelling
def compare_spelling(spelling, notes):
comparisons = np.zeros((len(spelling), 3))
for i, (n, s) in enumerate(zip(notes, spelling)):
comparisons[i, 0] = int(n.step == s["step"])
if n.alter is None and s["alter"] == 0:
comparisons[i, 1] = 1
else:
comparisons[i, 1] = int(n.alter == s["alter"])
comparisons[i, 2] = int(n.octave == s["octave"])
return comparisons
class TestKeyEstimation(unittest.TestCase):
"""
Test key estimation
"""
score = load_musicxml(EXAMPLE_MUSICXML)
def test_part(self):
spelling = estimate_spelling(self.score)
comparisons = compare_spelling(spelling, self.score.notes)
self.assertTrue(np.all(comparisons), "Incorrect spelling")
def test_note_array(self):
spelling = estimate_spelling(self.score.note_array)
comparisons = compare_spelling(spelling, self.score.notes)
self.assertTrue(np.all(comparisons), "Incorrect spelling")
|
1632492
|
from utils.registry import Registry
"""
Feature Extractor.
"""
# Backbone
BACKBONES = Registry()
# FPN
FPN_BODY = Registry()
"""
ROI Head.
"""
# Box Head
ROI_CLS_HEADS = Registry()
ROI_CLS_OUTPUTS = Registry()
ROI_BOX_HEADS = Registry()
ROI_BOX_OUTPUTS = Registry()
# OPLD Head
ROI_OPLD_HEADS = Registry()
ROI_OPLD_OUTPUTS = Registry()
|
1632516
|
import signal
import threading
import sys
def write(msg):
msg = msg + '\n' if not msg.endswith('\n') else msg
sys.stderr.write(msg)
sys.stderr.flush()
class TaskletMetrics(object):
'''This class allows a Tasklet to store any state that a Task may need to
have after the execution of its Tasklets.'''
def __init__(self):
pass
class Tasklet(object):
'''A Tasklet represents a unit of work run in a separate thread.'''
def __init__(self, id, client_params):
self.id = id
self.client_params = client_params
self._metrics = TaskletMetrics()
self._runnable = True
def launch(self):
client = self.client_params.create_client()
self.run(client, self._metrics)
def run(self, client):
raise NotImplementedError
def write(self, msg):
msg = '[thread%s] %s' % (self.id, msg)
write(msg)
class TaskState(object):
'''This class represents any state that a task needs to keep between task
phases.'''
def __init__(self):
pass
class Task(object):
'''Represents a task run against the server, which internally delegates its
work to Tasklets run in threads.
Users need to implement a Task to do any setup/teardown of the task and
define the creation of Tasklets.'''
def __init__(self, client_params):
self.client_params = client_params
def create_tasklets(self):
raise NotImplementedError
def pre_tasklets(self):
raise NotImplementedError
def run_tasklets(self, tasklets):
metrics_list = []
threads = []
def handle_signal(signal, frame):
write("Got interrupt, stopping all tasklets")
for tasklet in tasklets:
tasklet._runnable = False
signal.signal(signal.SIGINT, handle_signal)
for tasklet in tasklets:
thread = threading.Thread(target=tasklet.launch)
thread.daemon = True
metrics_list.append(tasklet._metrics)
threads.append(thread)
thread.start()
while threads:
for thread in threads:
thread.join(.05)
if not thread.isAlive():
threads.remove(thread)
return metrics_list
def post_tasklets(self):
raise NotImplementedError
def launch(self):
client = self.client_params.create_client()
state = TaskState()
tasklets = self.create_tasklets(state)
self.pre_tasklets(client, state)
metrics_list = self.run_tasklets(tasklets)
self.post_tasklets(client, state, metrics_list)
def write(self, msg):
write(msg)
|
1632530
|
import os, sys, time, os.path, pyperclip, pyautogui, ctypes
from colorama import Fore
from selenium import webdriver
def autologin() :
os.system('cls' if os.name == 'nt' else 'clear')
autologintitle()
print(f"""{y}[{w}+{y}]{w} Enter the token of the account you want to connect to""")
entertoken = str(input(f"""{y}[{b}#{y}]{w} Token: """))
try:
driver = webdriver.Chrome(executable_path=r'Additional_File/15_AutoLogin/chromedriver.exe')
driver.maximize_window()
driver.get('https://discord.com/login')
js = 'function login(token) {setInterval(() => {document.body.appendChild(document.createElement `iframe`).contentWindow.localStorage.token = `"${token}"`}, 50);setTimeout(() => {location.reload();}, 500);}'
time.sleep(3)
driver.execute_script(js + f'login("{entertoken}")')
time.sleep(10)
if driver.current_url == 'https://discord.com/login':
os.system('cls' if os.name == 'nt' else 'clear')
autologintitle()
print(f"""{y}[{Fore.LIGHTRED_EX }!{y}]{w} Connection Failed""")
driver.close()
else:
os.system('cls' if os.name == 'nt' else 'clear')
autologintitle()
print(f"""{y}[{Fore.LIGHTGREEN_EX }!{y}]{w} Connection Established""")
input(f"""{y}[{b}#{y}]{w} Press ENTER to exit""")
main()
except:
print(f""" {y}[{Fore.LIGHTRED_EX }!{y}]{w} There is a problem with your Token""")
time.sleep(2)
os.system('cls' if os.name == 'nt' else 'clear')
main()
autologin()
|
1632542
|
import pandas as pd
from weedcoco.utils import get_task_types
EXPECTED_FIELDS = ["segmentation", "bounding box"]
class WeedCOCOStats:
def __init__(self, weedcoco):
self.annotations = self.compute_annotation_frame(weedcoco)
self.category_summary = self.compute_summary(
self.annotations, ["agcontext_id", "category_id"]
)
self.agcontext_summary = self.compute_summary(
self.annotations, ["agcontext_id"]
)
@staticmethod
def compute_annotation_frame(weedcoco):
out = [
{
"annotation_id": annotation["id"],
"image_id": annotation["image_id"],
"category_id": annotation["category_id"],
**{task_type: 1 for task_type in get_task_types(annotation)},
}
for annotation in weedcoco["annotations"]
]
out = pd.DataFrame(out)
for field in EXPECTED_FIELDS:
if field not in out:
out[field] = 0
image_to_agcontext = pd.Series(
{image["id"]: image["agcontext_id"] for image in weedcoco["images"]}
)
out["agcontext_id"] = out.image_id.map(image_to_agcontext)
return out
@staticmethod
def compute_summary(annotation_frame, by):
gb = annotation_frame.groupby(by)
def get_sums(field):
return gb[field].sum().astype(int)
return pd.DataFrame(
{
"annotation_count": gb.size(),
"image_count": gb["image_id"].nunique(),
"segmentation_count": get_sums("segmentation"),
"bounding_box_count": get_sums("bounding box"),
}
)
|
1632550
|
from dataclasses import dataclass
from typing import Optional, List, Dict, Tuple, Union
from agate import Table
from dbt.contracts.connection import AdapterResponse
from dbt.adapters.base import AdapterConfig
from dbt.adapters.base.relation import BaseRelation
from dbt.adapters.spark.impl import SparkAdapter, LIST_SCHEMAS_MACRO_NAME
from dbt.adapters.databricks.column import DatabricksColumn
from dbt.adapters.databricks.connections import DatabricksConnectionManager
from dbt.adapters.databricks.relation import DatabricksRelation
from dbt.adapters.databricks.utils import undefined_proof
@dataclass
class DatabricksConfig(AdapterConfig):
file_format: str = "delta"
location_root: Optional[str] = None
partition_by: Optional[Union[List[str], str]] = None
clustered_by: Optional[Union[List[str], str]] = None
buckets: Optional[int] = None
options: Optional[Dict[str, str]] = None
merge_update_columns: Optional[str] = None
tblproperties: Optional[Dict[str, str]] = None
@undefined_proof
class DatabricksAdapter(SparkAdapter):
Relation = DatabricksRelation
Column = DatabricksColumn
ConnectionManager = DatabricksConnectionManager
connections: DatabricksConnectionManager
AdapterSpecificConfigs = DatabricksConfig
def list_schemas(self, database: Optional[str]) -> List[str]:
"""
Get a list of existing schemas in database.
If `database` is `None`, fallback to executing `show databases` because
`list_schemas` tries to collect schemas from all catalogs when `database` is `None`.
"""
if database is not None:
results = self.connections.list_schemas(database=database)
else:
results = self.execute_macro(LIST_SCHEMAS_MACRO_NAME, kwargs={"database": database})
return [row[0] for row in results]
def check_schema_exists(self, database: Optional[str], schema: str) -> bool:
"""Check if a schema exists."""
return schema.lower() in set(s.lower() for s in self.list_schemas(database=database))
def execute(
self,
sql: str,
auto_begin: bool = False,
fetch: bool = False,
*,
staging_table: Optional[BaseRelation] = None,
) -> Tuple[AdapterResponse, Table]:
try:
return super().execute(sql=sql, auto_begin=auto_begin, fetch=fetch)
finally:
if staging_table is not None:
self.drop_relation(staging_table)
|
1632595
|
import doctest
import os
import unittest
from pathlib import Path
from setuptools import find_packages, setup
requirements = ["black>=18.9b0", "loguru>=0.2.5"]
this_directory = Path(__file__).parent.resolve()
with open(Path(this_directory).joinpath("README.md"), encoding="utf-8") as readme_md:
README = readme_md.read()
exec(open("src/blackbook/version.py", "r").read())
setup(
name="blackbook",
version=__version__,
install_requires=requirements,
author="<NAME>, <NAME>, <NAME>",
author_email=("<EMAIL>"),
packages=find_packages("src"),
package_dir={"": "src"},
url="",
license="The MIT License (MIT)",
description="`Black` for Jupyter notebooks.",
long_description=README,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
entry_points={"console_scripts": "blackbook=blackbook.__main__:main"},
)
|
1632604
|
from django.template import Library
from _1327.information_pages.models import InformationDocument
register = Library()
@register.filter
def can_user_see_author(document, user):
if document.show_author_to == InformationDocument.SHOW_AUTHOR_TO_EVERYONE:
return True
elif document.show_author_to == InformationDocument.SHOW_AUTHOR_TO_LOGGED_IN_USERS:
return user.is_authenticated and not user.is_anonymous
else:
return False
|
1632625
|
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class AllTests(TranspileTestCase):
def test_all(self):
self.assertCodeExecution("print(all([None, True, False]))")
def test_all_true(self):
self.assertCodeExecution("print(all([1,True,3]))")
def test_all_false(self):
self.assertCodeExecution("print(all([0, '', 0.0]))")
def test_all_empty_list(self):
self.assertCodeExecution("print(all([]))")
def test_all_typeerror(self):
self.assertCodeExecution("""
try:
print(all(None))
except TypeError:
print("Done.")
""")
def test_all_doc(self):
self.assertCodeExecution("""
print(all.__doc__)
""")
def test_all_sequence(self):
self.assertCodeExecution("""
class Sequence:
def __init__(self, value):
self.value = value
def __len__(self):
return len(self.value)
def __getitem__(self, idx):
return self.value[idx]
not_all_values = Sequence([1,2,0,1])
print(all(not_all_values))
all_values = Sequence([1,2,3,1])
print(all(all_values))
no_values = Sequence([0,0,0])
print(all(no_values))
""")
class BuiltinAllFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
function = "all"
|
1632674
|
from collections import OrderedDict
import numpy as np
from ...data import Data
from ...instrument import Instrument
class Ice(Data):
r"""Loads ICE (NCNR) format ascii data file.
"""
def __init__(self):
super(Ice, self).__init__()
def load(self, filename, build_hkl=True, load_instrument=False):
r"""Loads the ICE (NCNR) format ascii data file.
Parameters
----------
filename : str
Path to file to open
build_hkl : bool, optional
Option to build Q = [h, k, l, e, temp]
load_instrument : bool, optional
Option to build Instrument from file header
"""
with open(filename) as f:
file_header = []
for line in f:
if 'Columns' in line:
args = line.split()
col_headers = [head for head in args[1:]]
break
args = np.genfromtxt(filename, usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8),
unpack=True, comments="#", dtype=np.float64)
data = OrderedDict()
for head, col in zip(col_headers, args):
data[head] = col
self._data = data
self.data_keys = {'detector': 'Detector', 'monitor': 'Monitor', 'time': 'Time'}
self._file_header = file_header
if build_hkl:
self.Q_keys = {'<KEY> 'l': 'QZ', 'e': 'E', 'temp': 'Temp'}
if load_instrument:
instrument = Instrument()
self.instrument = instrument
|
1632713
|
import json
import botocore
from . import clients, resources, cloudwatch_logging
class ARN:
fields = "arn partition service region account_id resource".split()
_default_region, _default_account_id, _default_iam_username = None, None, None
def __init__(self, arn="arn:aws::::", **kwargs):
self.__dict__.update(dict(zip(self.fields, arn.split(":", 5)), **kwargs))
if "region" not in kwargs and not self.region:
self.region = self.get_region()
if "account_id" not in kwargs and not self.account_id:
self.account_id = self.get_account_id()
@classmethod
def get_region(cls):
if cls._default_region is None:
cls._default_region = botocore.session.Session().get_config_variable("region")
return cls._default_region
@classmethod
def get_account_id(cls):
if cls._default_account_id is None:
cls._default_account_id = clients.sts.get_caller_identity()["Account"]
return cls._default_account_id
def __str__(self):
return ":".join(getattr(self, field) for field in self.fields)
def send_sns_msg(topic_arn, message, attributes=None):
sns_topic = resources.sns.Topic(str(topic_arn))
args = {'Message': json.dumps(message)}
if attributes is not None:
args['MessageAttributes'] = attributes
sns_topic.publish(**args)
|
1632780
|
from __future__ import print_function
print("needsthis found")
print("data file: ", open('data/datafile.txt', 'r').read())
|
1632798
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from markitup.fields import MarkupField
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50)
body = MarkupField()
def __str__(self):
return self.title
class AbstractParent(models.Model):
content = MarkupField()
class Meta:
abstract = True
class NoRendered(models.Model):
"""
Test that the no_rendered_field keyword arg works.
"""
body = MarkupField(no_rendered_field=True)
class CallableDefault(models.Model):
"""
A callable default on a field triggers hidden widget rendering by Django.
"""
body = MarkupField(default=lambda: '')
|
1632858
|
from collections import deque
from tests.entities import (DataClassIntImmutableDefault,
DataClassMutableDefaultDict,
DataClassMutableDefaultList, DataClassWithDeque,
DataClassWithDict, DataClassWithDictInt,
DataClassWithFrozenSet, DataClassWithList,
DataClassWithListStr, DataClassWithMyCollection,
DataClassWithOptional, DataClassWithOptionalStr,
DataClassWithSet, DataClassWithTuple,
DataClassWithUnionIntNone, MyCollection)
class TestEncoder:
def test_list(self):
assert DataClassWithList([1]).to_json() == '{"xs": [1]}'
def test_list_str(self):
assert DataClassWithListStr(['1']).to_json() == '{"xs": ["1"]}'
def test_dict(self):
assert DataClassWithDict({'1': 'a'}).to_json() == '{"kvs": {"1": "a"}}'
def test_dict_int(self):
assert DataClassWithDictInt({1: 'a'}).to_json() == '{"kvs": {"1": "a"}}'
def test_set(self):
assert DataClassWithSet({1}).to_json() == '{"xs": [1]}'
def test_tuple(self):
assert DataClassWithTuple((1,)).to_json() == '{"xs": [1]}'
def test_frozenset(self):
assert DataClassWithFrozenSet(frozenset([1])).to_json() == '{"xs": [1]}'
def test_deque(self):
assert DataClassWithDeque(deque([1])).to_json() == '{"xs": [1]}'
def test_optional(self):
assert DataClassWithOptional(1).to_json() == '{"x": 1}'
assert DataClassWithOptional(None).to_json() == '{"x": null}'
def test_optional_str(self):
assert DataClassWithOptionalStr('1').to_json() == '{"x": "1"}'
assert DataClassWithOptionalStr(None).to_json() == '{"x": null}'
assert DataClassWithOptionalStr().to_json() == '{"x": null}'
def test_union_int_none(self):
assert DataClassWithUnionIntNone(1).to_json() == '{"x": 1}'
assert DataClassWithUnionIntNone(None).to_json() == '{"x": null}'
def test_my_collection(self):
assert DataClassWithMyCollection(
MyCollection([1])).to_json() == '{"xs": [1]}'
def test_immutable_default(self):
assert DataClassIntImmutableDefault().to_json() == '{"x": 0}'
def test_mutable_default_list(self):
assert DataClassMutableDefaultList().to_json() == '{"xs": []}'
def test_mutable_default_dict(self):
assert DataClassMutableDefaultDict().to_json() == '{"xs": {}}'
class TestDecoder:
def test_list(self):
assert (DataClassWithList.from_json('{"xs": [1]}') ==
DataClassWithList([1]))
def test_list_str(self):
assert (DataClassWithListStr.from_json('{"xs": ["1"]}') ==
DataClassWithListStr(["1"]))
def test_dict(self):
assert (DataClassWithDict.from_json('{"kvs": {"1": "a"}}') ==
DataClassWithDict({'1': 'a'}))
def test_dict_int(self):
assert (DataClassWithDictInt.from_json('{"kvs": {"1": "a"}}') ==
DataClassWithDictInt({1: 'a'}))
def test_set(self):
assert (DataClassWithSet.from_json('{"xs": [1]}') ==
DataClassWithSet({1}))
def test_tuple(self):
assert (DataClassWithTuple.from_json('{"xs": [1]}') ==
DataClassWithTuple((1,)))
def test_frozenset(self):
assert (DataClassWithFrozenSet.from_json('{"xs": [1]}') ==
DataClassWithFrozenSet(frozenset([1])))
def test_deque(self):
assert (DataClassWithDeque.from_json('{"xs": [1]}') ==
DataClassWithDeque(deque([1])))
def test_optional(self):
assert (DataClassWithOptional.from_json('{"x": 1}') ==
DataClassWithOptional(1))
assert (DataClassWithOptional.from_json('{"x": null}') ==
DataClassWithOptional(None))
def test_optional_str(self):
assert (DataClassWithOptionalStr.from_json('{"x": "1"}') ==
DataClassWithOptionalStr("1"))
assert (DataClassWithOptionalStr.from_json('{"x": null}') ==
DataClassWithOptionalStr(None))
assert (DataClassWithOptionalStr.from_json('{}', infer_missing=True) ==
DataClassWithOptionalStr())
def test_my_collection(self):
assert (DataClassWithMyCollection.from_json('{"xs": [1]}') ==
DataClassWithMyCollection(MyCollection([1])))
def test_immutable_default(self):
assert (DataClassIntImmutableDefault.from_json('{"x": 0}')
== DataClassIntImmutableDefault())
assert (DataClassMutableDefaultList.from_json('{}', infer_missing=True)
== DataClassMutableDefaultList())
def test_mutable_default_list(self):
assert (DataClassMutableDefaultList.from_json('{"xs": []}')
== DataClassMutableDefaultList())
assert (DataClassMutableDefaultList.from_json('{}', infer_missing=True)
== DataClassMutableDefaultList())
def test_mutable_default_dict(self):
assert (DataClassMutableDefaultDict.from_json('{"kvs": {}}')
== DataClassMutableDefaultDict())
assert (DataClassMutableDefaultDict.from_json('{}', infer_missing=True)
== DataClassMutableDefaultDict())
|
1632877
|
import FWCore.ParameterSet.Config as cms
from RecoEcal.EgammaClusterProducers.particleFlowSuperClusterECAL_cfi import *
particleFlowSuperClusterOOTECAL = particleFlowSuperClusterECAL.clone(
PFClusters = "particleFlowClusterOOTECAL",
ESAssociation = "particleFlowClusterOOTECAL",
PFBasicClusterCollectionBarrel = "particleFlowBasicClusterOOTECALBarrel",
PFSuperClusterCollectionBarrel = "particleFlowSuperClusterOOTECALBarrel",
PFBasicClusterCollectionEndcap = "particleFlowBasicClusterOOTECALEndcap",
PFSuperClusterCollectionEndcap = "particleFlowSuperClusterOOTECALEndcap",
PFBasicClusterCollectionPreshower = "particleFlowBasicClusterOOTECALPreshower",
PFSuperClusterCollectionEndcapWithPreshower = "particleFlowSuperClusterOOTECALEndcapWithPreshower",
## modification for Algo
isOOTCollection = True,
barrelRecHits = "ecalRecHit:EcalRecHitsEB",
endcapRecHits = "ecalRecHit:EcalRecHitsEE"
)
from Configuration.Eras.Modifier_run2_miniAOD_80XLegacy_cff import run2_miniAOD_80XLegacy
run2_miniAOD_80XLegacy.toModify(
particleFlowSuperClusterOOTECAL,
barrelRecHits = "reducedEcalRecHitsEB",
endcapRecHits = "reducedEcalRecHitsEE"
)
run2_miniAOD_80XLegacy.toModify(
particleFlowSuperClusterOOTECAL.regressionConfig,
ecalRecHitsEB = "reducedEcalRecHitsEB",
ecalRecHitsEE = "reducedEcalRecHitsEE"
)
|
1632884
|
import dateutil.parser
from maestro_agent.services.maestro_api.run import (
Run,
RunApi,
RunStatus,
)
def test_maestro_run_get(mocker):
run_id = "1-2-3-4"
get_mock = mocker.patch(
"maestro_agent.services.maestro_api.MaestroApiClient.get",
)
RunApi.get(run_id)
get_mock.assert_called_with(
"/api/run/1-2-3-4",
mapper=RunApi.run_json_to_object,
)
get_mock.assert_called_once()
def test_maestro_run_mapped_response():
run_id = "tr_id_1"
run_plan_id = "tp_id_1"
agent_ids = ["sd_id_1"]
custom_data_ids = ["cd_id_1"]
start = 1
end = 10
duration = 10
host = "test.ff.net"
ip = "127.0.0.2"
custom_property = "custom_prop_test"
custom_property_value = "123"
created_at = "2021-05-19T17:31:47.560000"
updated_at = "2021-06-19T17:31:47.560000"
expected = Run(
id=run_id,
run_status=RunStatus.PENDING.value,
run_plan_id=run_plan_id,
agent_ids=agent_ids,
custom_data_ids=custom_data_ids,
hosts=[dict(host=host, ip=ip)],
custom_properties=[dict(name=custom_property, value=custom_property_value)],
load_profile=[dict(start=start, end=end, duration=duration)],
created_at=dateutil.parser.parse(created_at),
updated_at=dateutil.parser.parse(updated_at),
)
actual = RunApi.run_json_to_object(
dict(
id=run_id,
run_status=RunStatus.PENDING.value,
run_plan_id=run_plan_id,
agent_ids=agent_ids,
custom_data_ids=custom_data_ids,
hosts=[dict(host=host, ip=ip)],
custom_properties=[dict(name=custom_property, value=custom_property_value)],
load_profile=[dict(start=start, end=end, duration=duration)],
created_at=created_at,
updated_at=updated_at,
)
)
assert expected.id == actual.id
assert expected.run_status == actual.run_status
assert expected.run_plan_id == actual.run_plan_id
assert expected.agent_ids == actual.agent_ids
assert expected.custom_data_ids == actual.custom_data_ids
assert expected.hosts[0].host == actual.hosts[0].host
assert expected.hosts[0].ip == actual.hosts[0].ip
assert expected.load_profile[0].start == actual.load_profile[0].start
assert expected.load_profile[0].end == actual.load_profile[0].end
assert expected.load_profile[0].duration == actual.load_profile[0].duration
assert expected.created_at == actual.created_at
assert expected.updated_at == actual.updated_at
def test_maestro_run_update(mocker):
run_id = "1-2-3-4"
run_status = "RUNNING"
data = {"run_status": run_status}
put_mock = mocker.patch(
"maestro_agent.services.maestro_api.MaestroApiClient.put",
)
RunApi.update(run_id, run_status)
put_mock.assert_called_with(
"/api/run/1-2-3-4",
data=data,
mapper=RunApi.run_json_to_object,
)
put_mock.assert_called_once()
def test_maestro_run_send_metrics(mocker):
run_id = "1-2-3-4"
metrics = {"test": 1}
post_mock = mocker.patch(
"maestro_agent.services.maestro_api.MaestroApiClient.post",
)
RunApi.send_metrics(run_id, metrics)
post_mock.assert_called_with(
"/api/run_metrics/1-2-3-4",
data={"metrics": metrics},
)
post_mock.assert_called_once()
|
1632903
|
from typing import Dict, Any
from idewavecore.debug import Logger
from idewavecore.network import (
BaseServer,
ServerFactory,
)
from idewavecore.session import Storage
from .ProxyBuilder import ProxyBuilder
__author__ = '<NAME>'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2021, Idewavecore'
class ServerBuilder:
__slots__ = (
'global_storage',
'connections_map'
)
def __init__(self, **kwargs):
self.global_storage: Storage = kwargs.pop('global_storage')
self.connections_map: Dict[str, Any] = kwargs.pop('connections_map')
def build_from(self, config: Dict[str, Any]) -> BaseServer:
connection = config.get('connection')
options = config.get('options')
proxy_settings = config.get('proxy')
Logger.info(f'Building server "{options.get("server_name")}"')
middlewares_entry = config.get('middlewares')
db_connection_name = config.get('db_connection')
params = {
**connection,
**options,
'middlewares_entry': middlewares_entry,
'global_storage': self.global_storage,
'db_connection': self.connections_map.get(db_connection_name),
}
if proxy_settings:
params['proxy'] = ProxyBuilder.build_from(proxy_settings)
connection_type = connection.get('connection_type')
if not connection_type:
Logger.error('Connection type is required')
return ServerFactory(connection_type).get_server(**params)
|
1633002
|
import numpy as np
import torch
from pgbar import progress_bar
class RayS(object):
def __init__(self, model, epsilon=0.031, order=np.inf):
self.model = model
self.ord = order
self.epsilon = epsilon
self.sgn_t = None
self.d_t = None
self.x_final = None
self.queries = None
def get_xadv(self, x, v, d, lb=0., ub=1.):
if isinstance(d, int):
d = torch.tensor(d).repeat(len(x)).cuda()
out = x + d.view(len(x), 1, 1, 1) * v
out = torch.clamp(out, lb, ub)
return out
def attack_hard_label(self, x, y, target=None, query_limit=10000, seed=None):
""" Attack the original image and return adversarial example
model: (pytorch model)
(x, y): original image
"""
shape = list(x.shape)
dim = np.prod(shape[1:])
if seed is not None:
np.random.seed(seed)
# init variables
self.queries = torch.zeros_like(y).cuda()
self.sgn_t = torch.sign(torch.ones(shape)).cuda()
self.d_t = torch.ones_like(y).float().fill_(float("Inf")).cuda()
working_ind = (self.d_t > self.epsilon).nonzero().flatten()
stop_queries = self.queries.clone()
dist = self.d_t.clone()
self.x_final = self.get_xadv(x, self.sgn_t, self.d_t)
block_level = 0
block_ind = 0
for i in range(query_limit):
block_num = 2 ** block_level
block_size = int(np.ceil(dim / block_num))
start, end = block_ind * block_size, min(dim, (block_ind + 1) * block_size)
valid_mask = (self.queries < query_limit)
attempt = self.sgn_t.clone().view(shape[0], dim)
attempt[valid_mask.nonzero().flatten(), start:end] *= -1.
attempt = attempt.view(shape)
self.binary_search(x, y, target, attempt, valid_mask)
block_ind += 1
if block_ind == 2 ** block_level or end == dim:
block_level += 1
block_ind = 0
dist = torch.norm((self.x_final - x).view(shape[0], -1), self.ord, 1)
stop_queries[working_ind] = self.queries[working_ind]
working_ind = (dist > self.epsilon).nonzero().flatten()
if torch.sum(self.queries >= query_limit) == shape[0]:
print('out of queries')
break
progress_bar(torch.min(self.queries.float()), query_limit,
'd_t: %.4f | adbd: %.4f | queries: %.4f | rob acc: %.4f | iter: %d'
% (torch.mean(self.d_t), torch.mean(dist), torch.mean(self.queries.float()),
len(working_ind) / len(x), i + 1))
stop_queries = torch.clamp(stop_queries, 0, query_limit)
return self.x_final, stop_queries, dist, (dist <= self.epsilon)
# check whether solution is found
def search_succ(self, x, y, target, mask):
self.queries[mask] += 1
if target:
return self.model.predict_label(x[mask]) == target[mask]
else:
return self.model.predict_label(x[mask]) != y[mask]
# binary search for decision boundary along sgn direction
def binary_search(self, x, y, target, sgn, valid_mask, tol=1e-3):
sgn_norm = torch.norm(sgn.view(len(x), -1), 2, 1)
sgn_unit = sgn / sgn_norm.view(len(x), 1, 1, 1)
d_start = torch.zeros_like(y).float().cuda()
d_end = self.d_t.clone()
initial_succ_mask = self.search_succ(self.get_xadv(x, sgn_unit, self.d_t), y, target, valid_mask)
to_search_ind = valid_mask.nonzero().flatten()[initial_succ_mask]
d_end[to_search_ind] = torch.min(self.d_t, sgn_norm)[to_search_ind]
while len(to_search_ind) > 0:
d_mid = (d_start + d_end) / 2.0
search_succ_mask = self.search_succ(self.get_xadv(x, sgn_unit, d_mid), y, target, to_search_ind)
d_end[to_search_ind[search_succ_mask]] = d_mid[to_search_ind[search_succ_mask]]
d_start[to_search_ind[~search_succ_mask]] = d_mid[to_search_ind[~search_succ_mask]]
to_search_ind = to_search_ind[((d_end - d_start)[to_search_ind] > tol)]
to_update_ind = (d_end < self.d_t).nonzero().flatten()
if len(to_update_ind) > 0:
self.d_t[to_update_ind] = d_end[to_update_ind]
self.x_final[to_update_ind] = self.get_xadv(x, sgn_unit, d_end)[to_update_ind]
self.sgn_t[to_update_ind] = sgn[to_update_ind]
def __call__(self, data, label, target=None, query_limit=10000):
return self.attack_hard_label(data, label, target=target, query_limit=query_limit)
|
1633009
|
import gevent
import gevent.pool
import gevent.monkey
import requests
import pytest
import tests.fixtures as fxt
gevent.monkey.patch_socket(dns=True)
def test_catch_all_gevented_requests(vts_rec_on, movie_server, http_get):
"""Keep this test at the very end to avoid messing up with the rest of the
tests, since it's monkey patching the network related operations.
Maybe write a custom pytest order enforcer later."""
def _job():
return http_get(movie_server.url)
pool = gevent.pool.Pool()
for x in range(10):
pool.spawn(_job)
pool.join()
assert len(vts_rec_on.cassette) == 10
class BgTask(object):
def __init__(self, url, delayed=True):
self.url = url
self.bg = gevent.Greenlet(self.target)
self.delayed = delayed
def target(self):
while self.delayed:
print("delayed is {}".format(self.delayed))
gevent.sleep(0.2)
return fxt.make_req(requests.Request(method="GET", url=self.url))
def join(self, delayed=False, *args, **kwargs):
self.delayed = delayed
return self.bg.join(*args, **kwargs)
@property
def rv(self):
return self.bg.get()
@pytest.mark.xfail(reason="The rougue background jobs which attempts an http request after teardown happened are not going through responses")
def test_threading(vts_machine, tmpdir, chpy_custom_server2):
"""using rawer fixture vts_machine to allow to control when teardown is
called"""
# recording mode
domain = chpy_custom_server2
vts_machine.setup(basedir=tmpdir)
assert vts_machine.is_recording
# client function
active = BgTask(
"{}{}".format(domain, "/background"), delayed=False)
fxt.make_req(requests.Request(
method="GET", url="{}/foreground".format(domain)))
active.bg.start()
active.join()
assert active.rv
# now switch to playback mode
vts_machine.setup_playback()
assert vts_machine.is_playing
assert vts_machine.cassette
# request should be served by vts from cassette
fxt.make_req(requests.Request(
method="GET", url="{}/foreground".format(domain)))
dormant = BgTask(
"{}{}".format(domain, "/not-recorded"), delayed=True)
dormant.bg.start() # but is actually slower than this main thread
# client function ends => test ends => teardown is called
vts_machine.teardown()
# dormant bg task gets a chance to execute
dormant.join()
# since the cassette doesn't have a track for /not-recorded a
# ConnectionRefused should be raised
with pytest.raises(Exception):
dormant.rv
|
1633011
|
from django.conf.urls import patterns, url
from django.contrib.admindocs import views
urlpatterns = patterns('',
url('^$',
views.doc_index,
name='django-admindocs-docroot'
),
url('^bookmarklets/$',
views.bookmarklets,
name='django-admindocs-bookmarklets'
),
url('^tags/$',
views.template_tag_index,
name='django-admindocs-tags'
),
url('^filters/$',
views.template_filter_index,
name='django-admindocs-filters'
),
url('^views/$',
views.view_index,
name='django-admindocs-views-index'
),
url('^views/(?P<view>[^/]+)/$',
views.view_detail,
name='django-admindocs-views-detail'
),
url('^models/$',
views.model_index,
name='django-admindocs-models-index'
),
url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.model_detail,
name='django-admindocs-models-detail'
),
url('^templates/(?P<template>.*)/$',
views.template_detail,
name='django-admindocs-templates'
),
)
|
1633013
|
from numpy import array
from pybimstab.astar import Astar
grid = array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0]])
for heuristic in ['manhattan', 'euclidean']:
astar = Astar(grid, startNode=(0, 0), goalNode=(9, 9),
heuristic=heuristic, reverseLeft=True,
reverseUp=True, preferredPath=None)
fig = astar.plot()
|
1633067
|
import lldb
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbtest as lldbtest
import lldbsuite.test.lldbutil as lldbutil
import re
class TestCase(lldbtest.TestBase):
mydir = lldbtest.TestBase.compute_mydir(__file__)
@swiftTest
@skipIf(oslist=['windows', 'linux'])
def test(self):
"""Test step-in to async functions"""
self.build()
src = lldb.SBFileSpec('main.swift')
_, process, _, _ = lldbutil.run_to_source_breakpoint(self, 'await', src)
# When run with debug info enabled builds, this prevents stepping from
# stopping in Swift Concurrency runtime functions.
self.runCmd("settings set target.process.thread.step-avoid-libraries libswift_Concurrency.dylib")
# All thread actions are done on the currently selected thread.
thread = process.GetSelectedThread
num_async_steps = 0
while True:
stop_reason = thread().stop_reason
if stop_reason == lldb.eStopReasonNone:
break
elif stop_reason == lldb.eStopReasonPlanComplete:
# Run until the next `await` breakpoint.
process.Continue()
elif stop_reason == lldb.eStopReasonBreakpoint:
caller_before = thread().frames[0].function.GetDisplayName()
line_before = thread().frames[0].line_entry.line
thread().StepInto()
caller_after = thread().frames[1].function.GetDisplayName()
line_after = thread().frames[0].line_entry.line
# Breakpoints on lines with an `await` may result in more than
# one breakpoint location. Specifically a location before an
# async function is called, and then a location on the resume
# function. In this case, running `step` on these lines will
# move execution forward within the same function, _not_ step
# into a new function.
#
# As this test is for stepping into async functions, when the
# step-in keeps execution on the same or next line -- not a
# different function, then it can be ignored. rdar://76116620
if line_after in (line_before, line_before + 1):
# When stepping stops at breakpoint, don't continue.
if thread().stop_reason != lldb.eStopReasonBreakpoint:
process.Continue()
continue
# The entry function is missing this prefix dedicating resume functions.
prefix = re.compile(r'^\([0-9]+\) await resume partial function for ')
self.assertEqual(prefix.sub('', caller_after),
prefix.sub('', caller_before))
num_async_steps += 1
self.assertGreater(num_async_steps, 0)
|
1633079
|
from .alerts import Alerts
class ToolsNotifications:
def __init__(self, logger, ifttt_alerts):
self._logger = logger
self._ifttt_alerts = ifttt_alerts
self._alerts = Alerts(self._logger)
self._printer_was_printing_above_tool0_low = False # Variable used for tool0 cooling alerts
self._printer_alerted_reached_tool0_target = False # Variable used for tool0 warm alerts
def check_temps(self, settings, printer):
temps = printer.get_current_temperatures()
# self._logger.debug(u"CheckTemps(): %r" % (temps,))
if not temps:
# self._logger.debug(u"No Temperature Data")
return
for k in temps.keys():
# example dictionary from octoprint
# {
# 'bed': {'actual': 0.9, 'target': 0.0, 'offset': 0},
# 'tool0': {'actual': 0.0, 'target': 0.0, 'offset': 0},
# 'tool1': {'actual': 0.0, 'target': 0.0, 'offset': 0}
# }
if k == 'tool0':
tool0_threshold_low = settings.get_int(['tool0_low'])
target_temp = settings.get(['tool0_target_temp'])
else:
continue
# Check if tool0 has cooled down to specified temperature once print is finished
# Remember if we are printing and current tool0 temp is above the low tool0 threshold
if not self._printer_was_printing_above_tool0_low and printer.is_printing() and tool0_threshold_low and \
temps[k]['actual'] > tool0_threshold_low:
self._printer_was_printing_above_tool0_low = True
# If we are not printing and we were printing before with tool0 temp above threshold and tool0 temp is now
# below threshold
if self._printer_was_printing_above_tool0_low and not printer.is_printing() and tool0_threshold_low \
and temps[k]['actual'] < tool0_threshold_low:
self._logger.debug(
"Print done and tool0 temp is now below threshold {0}. Actual {1}.".format(tool0_threshold_low,
temps[k]['actual']))
self._printer_was_printing_above_tool0_low = False
self.__send__tool_notification(settings, "tool0-cooled", tool0_threshold_low)
# Check if tool0 has reached target temp and user wants to receive alerts for this event
if temps[k]['target'] > 0 and target_temp:
diff = temps[k]['actual'] - temps[k]['target']
# If we have not alerted user and printer reached target temp then alert user. Only alert
# when actual is equal to target or passed target by 5. Useful if hotend is too hot after
# print and you want to be alerted when it cooled down to a target temp
if not self._printer_alerted_reached_tool0_target and 0 <= diff < 5:
self._printer_alerted_reached_tool0_target = True
self.__send__tool_notification(settings, "tool0-warmed", temps[k]['target'])
elif temps[k]['target'] == 0:
# There is no target temp so reset alert flag so we can alert again
# once a target temp is set
self._printer_alerted_reached_tool0_target = False
##~~ Private functions - Tool Notifications
def __send__tool_notification(self, settings, event_code, temperature):
# Send IFTTT Notifications
self._ifttt_alerts.fire_event(settings, event_code, temperature)
server_url = settings.get(["server_url"])
if not server_url or not server_url.strip():
# No APNS server has been defined so do nothing
return -1
tokens = settings.get(["tokens"])
if len(tokens) == 0:
# No iOS devices were registered so skip notification
return -2
# For each registered token we will send a push notification
# We do it individually since 'printerID' is included so that
# iOS app can properly render local notification with
# proper printer name
used_tokens = []
last_result = None
for token in tokens:
apns_token = token["apnsToken"]
# Ignore tokens that already received the notification
# This is the case when the same OctoPrint instance is added twice
# on the iOS app. Usually one for local address and one for public address
if apns_token in used_tokens:
continue
# Keep track of tokens that received a notification
used_tokens.append(apns_token)
if 'printerName' in token and token["printerName"] is not None:
# We can send non-silent notifications (the new way) so notifications are rendered even if user
# killed the app
printer_name = token["printerName"]
language_code = token["languageCode"]
url = server_url + '/v1/push_printer'
last_result = self._alerts.send_alert_code(settings, language_code, apns_token, url, printer_name,
event_code, None, None)
return last_result
|
1633136
|
import os
# Canidates for the builddir (relative to the path of this file),
# which we use to find some generated files:
builddir_canidates = [ "build", "b" ]
# Determine actual builddir
builddir = builddir_canidates[0]
for d in builddir_canidates:
if os.path.exists(d):
builddir = d
break
# This file is loosely based upon the file
# cpp/ycm/.ycm_extra_conf.py from the youcompleteme daemon process
# available on github:
# https://github.com/Valloric/ycmd/blob/master/cpp/ycm/.ycm_extra_conf.py
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
flags = [
# Warnings: For a very detailed discussion about this
# see the following stackexchange post:
# https://programmers.stackexchange.com/questions/122608#124574
'-Wall',
'-Wextra',
'-Wnon-virtual-dtor',
'-Woverloaded-virtual',
'-Wold-style-cast',
'-Wcast-align',
'-Wconversion',
'-Wsign-conversion',
'-pedantic',
'-Werror',
# Generate unwind information
'-fexceptions',
# TODO Why is this needed
'-Dbhxx_EXPORTS',
# Compile as c++11
'-std=c++11',
#
# Treat .h header files as c++:
'-x', 'c++',
# Include other libraries and show errors and
# warnings within them
# To suppress errors shown here, use "-isystem"
# instead of "-I"
'-I', 'include',
'-I', builddir + '/include',
'-I', './bridge/cxx/include',
'-I', builddir + '/bridge/cxx/include',
'-I', './bridge/cpp', # TODO Why is this needed
# Explicit clang includes:
'-isystem', '/usr/lib/ycmd/clang_includes',
]
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def FlagsForFile( filename, **kwargs ):
relative_to = os.path.dirname( os.path.abspath( __file__ ) )
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
1633155
|
import io
import numpy as np
import tensorflow as tf
from hparams import hparams
from models import create_model
from util import audio, textinput
class Synthesizer:
def load(self, checkpoint_path, model_name='tacotron'):
print('Constructing model: %s' % model_name)
inputs = tf.placeholder(tf.int32, [1, None], 'inputs')
input_lengths = tf.placeholder(tf.int32, [1], 'input_lengths')
with tf.variable_scope('model') as scope:
self.model = create_model(model_name, hparams)
self.model.initialize(inputs, input_lengths)
print('Loading checkpoint: %s' % checkpoint_path)
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(self.session, checkpoint_path)
def synthesize(self, text):
seq = textinput.to_sequence(text,
force_lowercase=hparams.force_lowercase,
expand_abbreviations=hparams.expand_abbreviations)
feed_dict = {
self.model.inputs: [np.asarray(seq, dtype=np.int32)],
self.model.input_lengths: np.asarray([len(seq)], dtype=np.int32)
}
spec = self.session.run(self.model.linear_outputs[0], feed_dict=feed_dict)
out = io.BytesIO()
audio.save_wav(audio.inv_spectrogram(spec.T), out)
return out.getvalue()
|
1633166
|
from Adversary.utils import *
def test_flatten_unique():
l = [[1, 2], [1, 3, 4], [5]]
assert(flatten_unique(l) == [1, 2, 3, 4, 5])
def test_combinations_of_len():
l = [1, 2, 3]
assert(combinations_of_len(l, 2) == [(1,), (2,), (3,), (1, 2), (1, 3), (2, 3)])
def test_fancy_titles():
cols = ['change_case', 'insert_duplicate_characters', 'synonym']
assert(fancy_titles(cols) == ['Change Case', 'Insert Duplicate Characters', 'Synonym'])
|
1633172
|
import easypost
easypost.api_key = "API_KEY"
order = easypost.Order.create(
to_address={
"company": "Oakland Dmv Office",
"name": "Customer",
"street1": "5300 Claremont Ave",
"city": "Oakland",
"state": "CA",
"zip": "94618",
"country": "US",
"phone": "800-777-0133",
},
from_address={
"name": "EasyPost",
"company": "EasyPost",
"street1": "164 Townsend St",
"city": "San Francisco",
"state": "CA",
"zip": "94107",
"phone": "415-456-7890",
},
shipments=[
{
"parcel": easypost.Parcel.create(
weight=21.2,
length=12,
width=12,
height=3,
),
"options": {"label_format": "PDF"},
},
{
"parcel": easypost.Parcel.create(
weight=16,
length=8,
width=5,
height=5,
),
"options": {"label_format": "PDF"},
},
],
)
print(order)
order.buy(carrier="USPS", service="Priority")
for shipment in order.shipments:
# Insure the parcel
shipment.insure(amount=100)
print(shipment.postage_label.label_url)
print(shipment.tracking_code)
|
1633216
|
def test_else_block():
a = 5
s = "blibli"
if s != "blabla":
print("ok")
'''TEST
els$
@0 else:
status: ok
'''
def test_import_keyword():
import os
'''TEST
imp$
@0 `import `
status: ok
'''
def test_no_import_as():
import os
'''TEST
import nump$
@0 numpy
@! `numpy as np`
status: ok
'''
def test_only_empty_call():
import requests
my_url = "www.google.fr"
'''TEST
requests.ge$
@0 get()
@! get(<url>)
@! get(my_url)
@! get(<my_url>)
status: ok
'''
|
1633232
|
from .load_files_from_local_data import load_files_from_local_data
def load_frontend_files_from_local_data(who_is_asking_file):
return load_files_from_local_data(who_is_asking_file, dir_type="frontend")
|
1633262
|
import asyncio
import time
import traceback
from src import database, amino_async, configs
from src.utils import service_align, logger, file_logger
DEVICES = open(configs.DEVICES_PATH, "r").readlines()
async def login(account: tuple):
client = amino_async.Client()
email = account[0]
password = account[1]
while True:
try:
await client.login(email, password)
return client
except amino_async.utils.exceptions.ActionNotAllowed:
client.device_id = client.headers.device_id = random.choice(DEVICES).strip()
except amino_async.utils.exceptions.VerificationRequired as verify:
logger.error("[" + email + "]: " + str(verify.args[0]["url"]))
await client.session.close()
return False
except Exception as e:
logger.error("[" + email + "]: " + e.args[0]["api:message"])
file_logger.debug(traceback.format_exc())
await client.session.close()
return False
async def login_sid(account: tuple):
email = account[0]
sid = account[2]
is_valid = account[3]
if is_valid == 1:
client = amino_async.Client()
while True:
try:
await client.login_sid(sid)
return client
except amino_async.utils.exceptions.ActionNotAllowed:
client.device_id = client.headers.device_id = random.choice(DEVICES).strip()
except amino_async.utils.exceptions.VerificationRequired as verify:
service_align(email, verify.args[0]["url"], level="error")
await client.session.close()
return False
except Exception as e:
service_align(email, e.args[0]["api:message"], level="error")
file_logger.debug(traceback.format_exc())
await client.session.close()
return False
async def check_accounts():
accounts = database.get_bots()
invalids = []
bads = []
for i in accounts:
sid = i[2]
is_valid = i[3]
valid_time = i[4]
if is_valid == 0:
invalids.append(i)
continue
if sid is None or valid_time is None or is_valid is None:
bads.append(i)
continue
if valid_time <= int(time.time()):
bads.append(i)
continue
if invalids:
logger.warning(f"{len(invalids)} нерабочих аккаунтов")
if bads:
logger.warning(f"{len(bads)} аккаунтов поставлено в очередь для обновления SID...")
valid_list = await asyncio.gather(*[asyncio.create_task(update_sid(i)) for i in bads])
for i in valid_list:
database.remove_bot(i.get("email"))
database.set_bots(list(valid_list))
async def update_sid(account: tuple):
email = account[0]
password = account[1]
client = await login(account)
if client:
service_align(email, "SID обновлён")
await client.session.close()
return {"email": email, "password": password, "sid": client.sid, "isValid": True, "validTime": int(time.time()) + 43200}
else:
return {"email": email, "password": password, "isValid": False}
|
1633286
|
from datetime import datetime
import os
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.subdag_operator import SubDagOperator
from dsbox.examples.tree_disease_usecase.ml.feature_engineering import join_dataframes
from dsbox.examples.tree_disease_usecase.ml.modeling import fit_write_model, read_predict_model, model_performance
from dsbox.examples.tree_disease_usecase.ml.sub_dags import feature_engineering_sub_dag
from dsbox.operators.data_operator import DataOperator
from dsbox.operators.data_unit import DataInputFileUnit, DataOutputFileUnit, DataInputMultiFileUnit, DataOutputDBUnit
from dsbox.utils import FilenameGenerator
db_url = 'sqlite:///tree_disease.db'
project_path = os.getenv('PROJECT_PATH')
def dummy_function(dataframe):
return dataframe
features_selection = ['ADR_SECTEUR', 'ANNEEDEPLANTATION', 'coord_x', 'coord_y',
'ANNEEREALISATIONDIAGNOSTIC', 'ANNEETRAVAUXPRECONISESDIAG',
'GENRE_BOTA', 'ESPECE', 'DIAMETREARBREAUNMETRE', 'FREQUENTATIONCIBLE',
'NOTEDIAGNOSTIC', 'PRIORITEDERENOUVELLEMENT', 'RAISONDEPLANTATION', 'REMARQUES',
'SOUS_CATEGORIE', 'STADEDEDEVELOPPEMENT', 'STADEDEVELOPPEMENTDIAG',
'TRAITEMENTCHENILLES', 'TRAVAUXPRECONISESDIAG', 'TROTTOIR',
'VARIETE', 'VIGUEUR', 'CODE_PARENT']
feature_target = 'Defaut'
prediction_column_name = 'y_prediction'
filename_generator = FilenameGenerator(path=project_path + 'datasets/temp/')
temp_files = []
for i in range(0, 100):
temp_files.append(filename_generator.generate_filename() + '.parquet')
dag = DAG(dag_id='Tree_Disease_Prediction', description='Tree Disease Prediction Example',
schedule_interval='0 12 * * *', start_date=datetime(2017, 3, 20), catchup=False)
input_csv_files_unit = DataInputMultiFileUnit([project_path + 'datasets/input/X_tree_egc_t1.csv',
project_path + 'datasets/input/X_geoloc_egc_t1.csv',
project_path + 'datasets/input/Y_tree_egc_t1.csv'], sep=';')
output_parquet_unit = DataOutputFileUnit(project_path + 'datasets/temp/X_train_raw.parquet',
pandas_write_function_name='to_parquet')
task_concate_train_files = DataOperator(operation_function=join_dataframes,
input_unit=input_csv_files_unit,
output_unit=output_parquet_unit,
dag=dag, task_id='Join_train_data_source_files')
task_feature_engineering_for_train = SubDagOperator(
subdag=feature_engineering_sub_dag(dag.dag_id, 'Feature_engineering_for_train',
model_path=project_path + 'models/',
input_file=project_path + 'datasets/temp/X_train_raw.parquet',
output_file=project_path + 'datasets/temp/X_train_final.parquet',
temp_files=temp_files[0:10],
start_date=dag.start_date,
schedule_interval=dag.schedule_interval),
task_id='Feature_engineering_for_train',
dag=dag,
)
task_concate_train_files.set_downstream(task_feature_engineering_for_train)
input_parquet_raw_file_unit = DataInputFileUnit(project_path + 'datasets/temp/X_train_final.parquet',
pandas_read_function_name='read_parquet')
task_model_learning = DataOperator(operation_function=fit_write_model,
params={'columns_selection': features_selection,
'column_target': feature_target,
'write_path': project_path + 'models/ensemble.model'
},
input_unit=input_parquet_raw_file_unit,
dag=dag, task_id='Model_learning')
task_feature_engineering_for_train.set_downstream(task_model_learning)
input_csv_files_unit = DataInputMultiFileUnit([project_path + 'datasets/input/X_tree_egc_t2.csv',
project_path + 'datasets/input/X_geoloc_egc_t2.csv',
project_path + 'datasets/input/Y_tree_egc_t2.csv'], sep=';')
output_parquet_unit = DataOutputFileUnit(project_path + 'datasets/temp/X_test_raw.parquet',
pandas_write_function_name='to_parquet')
task_concate_test_files = DataOperator(operation_function=join_dataframes,
input_unit=input_csv_files_unit,
output_unit=output_parquet_unit,
dag=dag, task_id='Join_test_data_source_files')
task_feature_engineering_for_test = SubDagOperator(
subdag=feature_engineering_sub_dag(dag.dag_id, 'Feature_engineering_for_test',
model_path=project_path + 'models/',
input_file=project_path + 'datasets/temp/X_test_raw.parquet',
output_file=project_path + 'datasets/temp/X_test_final.parquet',
temp_files=temp_files[10:],
start_date=dag.start_date,
schedule_interval=dag.schedule_interval,
mode='predict'),
task_id='Feature_engineering_for_test',
dag=dag,
)
task_concate_test_files.set_downstream(task_feature_engineering_for_test)
task_feature_engineering_for_train.set_downstream(task_feature_engineering_for_test)
task_model_predict = DataOperator(operation_function=read_predict_model,
params={'columns_selection': features_selection,
'read_path': project_path + 'models/ensemble.model',
'y_pred_column_name': prediction_column_name
},
input_unit=DataInputFileUnit(project_path + 'datasets/temp/X_test_final.parquet',
pandas_read_function_name='read_parquet'),
output_unit=DataOutputFileUnit(project_path + 'datasets/output/X_predict.csv',
pandas_write_function_name='to_csv', sep=';',
index=False),
dag=dag, task_id='Model_prediction')
task_feature_engineering_for_test.set_downstream(task_model_predict)
task_model_learning.set_downstream(task_model_predict)
input_result_file_unit = DataInputFileUnit(project_path + 'datasets/output/X_predict.csv',
pandas_read_function_name='read_csv', sep=';')
task_model_metric = DataOperator(operation_function=model_performance,
params={'y_true_column_name': feature_target,
'y_pred_column_name': prediction_column_name
},
input_unit=input_result_file_unit,
dag=dag, task_id='Model_metrics')
task_model_predict.set_downstream(task_model_metric)
input_csv_predict_files_unit = DataInputMultiFileUnit([project_path + 'datasets/input/X_tree_egc_t2.csv',
project_path + 'datasets/input/X_geoloc_egc_t2.csv',
project_path + 'datasets/input/Y_tree_egc_t2.csv',
project_path + 'datasets/output/X_predict.csv'], sep=';')
output_prediction_parquet_unit = DataOutputFileUnit(project_path + 'datasets/output/X_predict.parquet',
pandas_write_function_name='to_parquet')
task_concate_prediction_files = DataOperator(operation_function=join_dataframes,
params={'lsuffix': '_fe'},
input_unit=input_csv_predict_files_unit,
output_unit=output_prediction_parquet_unit,
dag=dag, task_id='Join_prediction_and_data_source_files')
task_model_predict.set_downstream(task_concate_prediction_files)
input_full_result_file_unit = DataInputFileUnit(project_path + 'datasets/output/X_predict.parquet',
pandas_read_function_name='read_parquet')
output_result_unit = DataOutputDBUnit('predictions', db_url, if_exists='replace', index=False)
task_export_to_sqlite = DataOperator(operation_function=dummy_function,
input_unit=input_full_result_file_unit,
output_unit=output_result_unit,
dag=dag, task_id='Export_result_to_sqlite')
task_concate_prediction_files.set_downstream(task_export_to_sqlite)
task_purge_temp_files = BashOperator(task_id='Purge_temp_files',
bash_command='rm ' + project_path + 'datasets/temp/*',
dag=dag)
task_export_to_sqlite.set_downstream(task_purge_temp_files)
|
1633348
|
import torch
# from image_synthesis.data.base_dataset import ConcatDatasetWithIndex as ConcatDataset
from torch.utils.data import ConcatDataset
from image_synthesis.utils.misc import instantiate_from_config
from image_synthesis.distributed.distributed import is_distributed
def build_dataloader(config, args=None, return_dataset=False):
dataset_cfg = config['dataloader']
train_dataset = []
for ds_cfg in dataset_cfg['train_datasets']:
ds_cfg['params']['data_root'] = dataset_cfg.get('data_root', '')
ds = instantiate_from_config(ds_cfg)
train_dataset.append(ds)
if len(train_dataset) > 1:
train_dataset = ConcatDataset(train_dataset)
else:
train_dataset = train_dataset[0]
val_dataset = []
for ds_cfg in dataset_cfg['validation_datasets']:
ds_cfg['params']['data_root'] = dataset_cfg.get('data_root', '')
ds = instantiate_from_config(ds_cfg)
val_dataset.append(ds)
if len(val_dataset) > 1:
val_dataset = ConcatDataset(val_dataset)
else:
val_dataset = val_dataset[0]
if args is not None and args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)
train_iters = len(train_sampler) // dataset_cfg['batch_size']
val_iters = len(val_sampler) // dataset_cfg['batch_size']
else:
train_sampler = None
val_sampler = None
train_iters = len(train_dataset) // dataset_cfg['batch_size']
val_iters = len(val_dataset) // dataset_cfg['batch_size']
# if args is not None and not args.debug:
# num_workers = max(2*dataset_cfg['batch_size'], dataset_cfg['num_workers'])
# num_workers = min(64, num_workers)
# else:
# num_workers = dataset_cfg['num_workers']
num_workers = dataset_cfg['num_workers']
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=dataset_cfg['batch_size'],
shuffle=(train_sampler is None),
num_workers=num_workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True,
persistent_workers=True)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=dataset_cfg['batch_size'],
shuffle=False, #(val_sampler is None),
num_workers=num_workers,
pin_memory=True,
sampler=val_sampler,
drop_last=True,
persistent_workers=True)
dataload_info = {
'train_loader': train_loader,
'validation_loader': val_loader,
'train_iterations': train_iters,
'validation_iterations': val_iters
}
if return_dataset:
dataload_info['train_dataset'] = train_dataset
dataload_info['validation_dataset'] = val_dataset
return dataload_info
|
1633358
|
import os.path
import json
from core import constants, log, json_util
from core.validators import validator_util
def validate(file_path, template):
log.log_subline_bold(f"Reading source file : '{file_path}'.")
source_data = json_util.read_json_data(file_path)
for language in constants.languages.values():
translation_file = template.replace('<lang>', language)
if not os.path.exists(translation_file):
log.log_warning(f"'{translation_file}' doesn't exist.")
continue
log.log_subline(f"Validating '{translation_file}'.")
translation_data = json_util.read_json_data(translation_file)
validate_translation_data(source_data, translation_data,
translation_file)
def validate_translation_data(source_data, translation_data,
translation_file):
for i in range(0, len(translation_data)):
validate_fields(i, source_data, translation_data, translation_file)
def validate_fields(i, source_data, translation_data, translation_file):
# version
validator_util.match_property(i, "version", source_data, translation_data,
translation_file)
for mi in range(0, len(source_data[i]["messages"])):
# [i]["messages"][mi]["image"]
validator_util.match_particular_property(i,
source_data[i]
["messages"][mi]["image"],
translation_data[i]
["messages"][mi]["image"],
translation_file)
for pi in range(0, len(source_data[i]["messages"][mi]["platforms"])):
# [i]["messages"][mi]["platforms"][pi]
validator_util.match_particular_property(i,
source_data[i]
["messages"][mi]
["platforms"][pi],
translation_data[i]
["messages"][mi]
["platforms"][pi],
translation_file)
|
1633370
|
from setuptools import setup, find_packages, Command
version = __import__('pytest_sftpserver').get_version()
class Test(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
raise SystemExit(subprocess.call(['tox']))
with open("README.rst", "r") as readme:
README = readme.read()
setup(
name='pytest-sftpserver',
version=version,
author='<NAME>',
author_email='<EMAIL>',
license='MIT License',
description='py.test plugin to locally test sftp server connections.',
long_description=README,
url='http://github.com/ulope/pytest-sftpserver/',
packages=find_packages(),
package_data={"pytest_sftpserver": ["keys/*.pub", "keys/*.priv"]},
install_requires=[
"paramiko",
"six",
],
tests_require=[
'tox',
],
entry_points={
'pytest11': ['sftpserver = pytest_sftpserver.plugin']
},
cmdclass={
'test': Test
},
zip_safe=False,
keywords='py.test pytest plugin server local sftp localhost',
classifiers=[
'Operating System :: OS Independent',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Testing'
]
)
|
1633410
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_databases(host):
with host.sudo():
# test if databases exist
databases = host.run('mysql -u root -e "show databases"')
assert 'vhost1db' in databases.stdout
assert 'vhost2db' in databases.stdout
assert 'vhost3db' in databases.stdout
assert 'extern1' in databases.stdout
# test users
dbaccess = host.run('mysql -u vhost1db -pvhost1pass vhost1db -e "SELECT user();"')
assert dbaccess.rc == 0
dbaccess = host.run('mysql -u vhost2db -pvhost2pass vhost2db -e "SELECT user();"')
assert dbaccess.rc == 0
dbaccess = host.run('mysql -u vhost3db -pvhost3pass vhost3db -e "SELECT user();"')
assert dbaccess.rc == 0
dbaccess = host.run('mysql -u extern1 -pextpass1 -e "SELECT user();"')
assert dbaccess.rc == 0
# test remote access grant
dbaccess = host.run('mysql -u root -e "show grants for extern1;"')
assert "'extern1'@'%'" in dbaccess.stdout
|
1633431
|
class Solution:
digit_char_map = {
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z'],
}
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
if not digits:
return []
result = []
self.dfs(0, digits, "", result)
return result
def dfs(self, index, digits, pattern, result):
if index == len(digits):
return result.append(pattern)
else:
for char in self.digit_char_map[digits[index]]:
self.dfs(index + 1, digits, pattern + char, result)
|
1633439
|
from setuptools import setup, find_packages
setup(
name='pyAHP',
version='0.1.2',
packages=find_packages(),
install_requires=[
'numpy>=1.14.0',
'scipy>=1.0.0'
],
author='<NAME>',
author_email='<EMAIL>',
description='Analytic Hierarchy Process solver',
license='MIT',
url='https://github.com/pyAHP/pyAHP',
keywords='ahp analytic hierarchy process',
python_requires='>=3'
)
|
1633452
|
class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self, start):
self.start = start
def detect_middle(self):
if self.start.next is None:
return self.start
i = self.start
j = self.start
while j.next is not None:
j = j.next.next
if j is not None:
i = i.next
else:
break
if j is not None:
return i.value
return i.value, i.next.value
def main():
n1 = Node(1)
n2 = Node(2)
n3 = Node(3)
n4 = Node(4)
n5 = Node(5)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
l = LinkedList(n1)
assert (l.detect_middle() == 3)
main()
|
1633453
|
import re
import os
import subprocess
from functools import lru_cache
from typing import Dict, List
import pandas as pd
from helpers import (
NOW,
RemoteCommand,
Settings,
create_settings,
nix_build,
spawn,
flamegraph_env,
read_stats,
write_stats,
scone_env
)
from network import Network, NetworkKind, setup_remote_network
from storage import Storage, StorageKind
@lru_cache(maxsize=1)
def sysbench_command(settings: Settings) -> RemoteCommand:
path = nix_build("sysbench")
return settings.remote_command(path)
@lru_cache(maxsize=1)
def nc_command(settings: Settings) -> RemoteCommand:
path = nix_build("netcat-native")
return settings.remote_command(path)
def parse_sysbench(output: str) -> Dict[str, str]:
stats_found = False
section = ""
data = {}
for line in output.split("\n"):
print(line)
if line.startswith("SQL statistics"):
stats_found = True
if stats_found:
col = line.split(":")
if len(col) != 2:
continue
name = col[0].strip()
# remove trailing statistics, e.g.:
# transform
# transactions: 3228 (322.42 per sec.)
# to
# transactions: 3228
value = re.sub(r"\([^)]+\)$", "", col[1]).strip()
if value == "" and name != "queries performed":
section = name
continue
data[f"{section} {name}"] = value
return data
def process_sysbench(output: str, system: str, stats: Dict[str, List]) -> None:
data = parse_sysbench(output)
for k, v in data.items():
stats[k].append(v)
stats["system"].append(system)
class Benchmark:
def __init__(self, settings: Settings) -> None:
self.settings = settings
self.network = Network(settings)
self.storage = Storage(settings)
def run(
self,
attr: str,
system: str,
mnt: str,
stats: Dict[str, List],
extra_env: Dict[str, str] = {},
) -> None:
env = dict(SGXLKL_CWD=mnt)
env.update(flamegraph_env(f"{os.getcwd()}/mysql-{system}"))
env.update(extra_env)
mysql = nix_build(attr)
sysbench = sysbench_command(self.storage.settings)
with spawn(
mysql,
"bin/mysqld",
f"--datadir={mnt}/var/lib/mysql",
"--socket=/tmp/mysql.sock",
extra_env=env,
):
common_flags = [
f"--mysql-host={self.settings.local_dpdk_ip}",
"--mysql-db=root",
"--mysql-user=root",
"--mysql-password=<PASSWORD>",
"--mysql-ssl=on",
"--table-size=500000",
f"{sysbench.nix_path}/share/sysbench/oltp_read_write.lua",
]
while True:
try:
proc = nc_command(self.settings).run(
"bin/nc", ["-z", "-v", self.settings.local_dpdk_ip, "3306"]
)
break
except subprocess.CalledProcessError:
print(".")
pass
sysbench.run("bin/sysbench", common_flags + ["prepare"])
proc = sysbench.run("bin/sysbench", common_flags + ["run"])
process_sysbench(proc.stdout, system, stats)
sysbench.run("bin/sysbench", common_flags + ["cleanup"])
def benchmark_native(benchmark: Benchmark, stats: Dict[str, List]) -> None:
extra_env = benchmark.network.setup(NetworkKind.NATIVE)
mount = benchmark.storage.setup(StorageKind.NATIVE)
extra_env.update(mount.extra_env())
with mount as mnt:
benchmark.run("mysql-native", "native", mnt, stats, extra_env=extra_env)
def benchmark_sgx_lkl(benchmark: Benchmark, stats: Dict[str, List]) -> None:
extra_env = benchmark.network.setup(NetworkKind.TAP)
mount = benchmark.storage.setup(StorageKind.LKL)
extra_env.update(mount.extra_env())
with mount as mnt:
benchmark.run("mysql-sgx-lkl", "sgx-lkl", mnt, stats, extra_env=extra_env)
def benchmark_sgx_io(benchmark: Benchmark, stats: Dict[str, List]) -> None:
extra_env = benchmark.network.setup(NetworkKind.DPDK)
mount = benchmark.storage.setup(StorageKind.SPDK)
extra_env.update(mount.extra_env())
with mount as mnt:
benchmark.run("mysql-sgx-io", "sgx-io", mnt, stats, extra_env=extra_env)
def benchmark_scone(benchmark: Benchmark, stats: Dict[str, List]) -> None:
mount = benchmark.storage.setup(StorageKind.SCONE)
with mount as mnt:
extra_env = scone_env(mnt)
extra_env.update(benchmark.network.setup(NetworkKind.NATIVE))
extra_env.update(mount.extra_env())
benchmark.run("mysql-scone", "scone", mnt, stats, extra_env=extra_env)
def main() -> None:
stats = read_stats("mysql.json")
settings = create_settings()
benchmark = Benchmark(settings)
benchmarks = {
"native": benchmark_native,
"sgx-lkl": benchmark_sgx_lkl,
"sgx-io": benchmark_sgx_io,
"scone": benchmark_scone,
}
setup_remote_network(settings)
system = set(stats["system"])
for name, benchmark_func in benchmarks.items():
if name in system:
print(f"skip {name} benchmark")
continue
benchmark_func(benchmark, stats)
write_stats("mysql.json", stats)
csv = f"mysql-{NOW}.tsv"
print(csv)
throughput_df = pd.DataFrame(stats)
throughput_df.to_csv(csv, index=False, sep="\t")
throughput_df.to_csv("mysql-latest.tsv", index=False, sep="\t")
if __name__ == "__main__":
main()
|
1633460
|
import os
import textwrap
from django.utils.translation import ugettext_lazy as _
from orchestra.contrib.orchestration import ServiceController, replace
from . import WebAppServiceMixin
from .. import settings
class uWSGIPythonController(WebAppServiceMixin, ServiceController):
"""
<a href="http://uwsgi-docs.readthedocs.org/en/latest/Emperor.html">Emperor mode</a>
"""
verbose_name = _("Python uWSGI")
default_route_match = "webapp.type.endswith('python')"
doc_settings = (settings, (
'WEBAPPS_UWSGI_BASE_DIR',
'WEBAPPS_PYTHON_MAX_REQUESTS',
'WEBAPPS_PYTHON_DEFAULT_MAX_WORKERS',
'WEBAPPS_PYTHON_DEFAULT_TIMEOUT',
))
def save(self, webapp):
context = self.get_context(webapp)
self.create_webapp_dir(context)
self.set_under_construction(context)
self.save_uwsgi(webapp, context)
def delete(self, webapp):
context = self.get_context(webapp)
self.delete_uwsgi(webapp, context)
self.delete_webapp_dir(context)
def save_uwsgi(self, webapp, context):
self.append("echo '%(uwsgi_config)s' > %(vassal_path)s" % context)
def delete_uwsgi(self, webapp, context):
self.append("rm -f %(vassal_path)s" % context)
def get_uwsgi_ini(self, context):
return textwrap.dedent("""\
# %(banner)s
[uwsgi]
plugins = python{python_version_number}
chdir = {app_path}
module = {app_name}.wsgi
chmod-socket = 660
stats = /run/uwsgi/%(deb-confnamespace)/%(deb-confname)/statsocket
vacuum = true
uid = {user}
gid = {group}
env = HOME={home}
harakiri = {timeout}
max-requests = {max_requests}
cheaper-algo = spare
cheaper = 1
workers = {workers}
cheaper-step = 1
cheaper-overload = 5"""
).format(context)
def update_uwsgi_context(self, webapp, context):
context.update({
'uwsgi_ini': self.get_uwsgi_ini(context),
'uwsgi_dir': settings.WEBAPPS_UWSGI_BASE_DIR,
'vassal_path': os.path.join(settings.WEBAPPS_UWSGI_BASE_DIR,
'vassals/%s' % context['app_name']),
})
return context
def get_context(self, webapp):
context = super(PHPController, self).get_context(webapp)
options = webapp.get_options()
context.update({
'python_version': webapp.type_instance.get_python_version(),
'python_version_number': webapp.type_instance.get_python_version_number(),
'max_requests': settings.WEBAPPS_PYTHON_MAX_REQUESTS,
'workers': options.get('processes', settings.WEBAPPS_PYTHON_DEFAULT_MAX_WORKERS),
'timeout': options.get('timeout', settings.WEBAPPS_PYTHON_DEFAULT_TIMEOUT),
})
self.update_uwsgi_context(webapp, context)
replace(context, "'", '"')
return context
|
1633495
|
import win32api, win32con, win32security, ntsecuritycon
new_privs = (
(
win32security.LookupPrivilegeValue("", ntsecuritycon.SE_SECURITY_NAME),
win32con.SE_PRIVILEGE_ENABLED,
),
(
win32security.LookupPrivilegeValue("", ntsecuritycon.SE_TCB_NAME),
win32con.SE_PRIVILEGE_ENABLED,
),
)
ph = win32api.GetCurrentProcess()
th = win32security.OpenProcessToken(
ph, win32security.TOKEN_ALL_ACCESS | win32con.TOKEN_ADJUST_PRIVILEGES
)
win32security.AdjustTokenPrivileges(th, 0, new_privs)
hkey = win32api.RegOpenKey(
win32con.HKEY_LOCAL_MACHINE, None, 0, win32con.KEY_ALL_ACCESS
)
win32api.RegCreateKey(hkey, "SYSTEM\\NOTMP")
notmpkey = win32api.RegOpenKey(
hkey, "SYSTEM\\notmp", 0, win32con.ACCESS_SYSTEM_SECURITY
)
tmp_sid = win32security.LookupAccountName("", "tmp")[0]
sacl = win32security.ACL()
sacl.AddAuditAccessAce(win32security.ACL_REVISION, win32con.GENERIC_ALL, tmp_sid, 1, 1)
sd = win32security.SECURITY_DESCRIPTOR()
sd.SetSecurityDescriptorSacl(1, sacl, 1)
win32api.RegSetKeySecurity(notmpkey, win32con.SACL_SECURITY_INFORMATION, sd)
|
1633504
|
from .vit_tiny_patch16_224 import model
model.patch_size = 14
model.embed_dim = 1408
model.mlp_ratio = 48 / 11
model.depth = 40
model.num_heads = 16
|
1633509
|
def f_linear(x):
y = x
z = y
return z
def one_branch(x):
if x:
return 1
else:
return 2
def two_branch(x, y):
if x:
y += 1
else:
y += 2
if y:
return 1
else:
return 2
def nested(x, y):
if x:
if y:
return 0
else:
return 1
else:
if y:
return 2
else:
return 3
def exceptions(x, y):
try:
x.attr
x + y
x[y]
read()
except IOError:
pass
#ODASA-5114
def must_be_positive(self, obj, value):
try:
return int(value)
except:
self.error(obj, value)
|
1633524
|
from django.http import Http404
from django.shortcuts import render
from project_first_app.models import Owner, Car, DriverLicense, Ownership
def owner_detail(request, owner_id):
try:
p = Owner.objects.get(pk=owner_id)
except Owner.DoesNotExist:
raise Http404("Owner does not exist")
return render(request, 'owner.html', {'owner': p})
|
1633565
|
from dataclasses import dataclass
from unittest import TestCase
from dataclass_factory import Factory, Schema
@dataclass
class Data:
a: str = ""
b: str = ""
c_: str = ""
_d: str = ""
class TestFactory(TestCase):
def test_only_mapping(self):
factory = Factory(
schemas={
Data: Schema(
only=("b",),
name_mapping={"a": "A"},
only_mapped=True,
),
},
)
data = Data("AA", "BB", "CC")
serial = {"b": "BB"}
self.assertEqual(factory.dump(data), serial)
serial = {"a": "XXX", "b": "BB"}
data2 = Data(b="BB")
self.assertEqual(factory.load(serial, Data), data2)
def test_only_exclude(self):
factory = Factory(
schemas={
Data: Schema(
only=("a", "b"),
exclude=("a",),
),
},
)
data = Data("AA", "BB", "CC")
serial = {"b": "BB"}
self.assertEqual(factory.dump(data), serial)
serial = {"a": "XXX", "b": "BB"}
data2 = Data(b="BB")
self.assertEqual(factory.load(serial, Data), data2)
def test_trailing_mapping(self):
factory = Factory(
schemas={
Data: Schema(
name_mapping={"c_": "c_"},
trim_trailing_underscore=True,
),
},
)
data = Data("AA", "BB", "CC")
serial = {"a": "AA", "b": "BB", "c_": "CC"}
self.assertEqual(factory.dump(data), serial)
self.assertEqual(factory.load(serial, Data), data)
def test_internal_only(self):
factory = Factory(
schemas={
Data: Schema(
only=("_d",),
skip_internal=True,
),
},
)
data = Data("AA", "BB", "CC", "DD")
serial = {"_d": "DD"}
self.assertEqual(factory.dump(data), serial)
serial = {"a": "XXX", "_d": "DD"}
data2 = Data(_d="DD")
self.assertEqual(factory.load(serial, Data), data2)
def test_internal_mapping(self):
factory = Factory(
schemas={
Data: Schema(
name_mapping={"_d": "_d"},
skip_internal=True,
),
},
)
data = Data("AA", "BB", "CC", "DD")
serial = {"a": "AA", "b": "BB", "c": "CC", "_d": "DD"}
self.assertEqual(factory.dump(data), serial)
serial = {"a": "XXX", "_d": "DD"}
data2 = Data(a="XXX", _d="DD")
self.assertEqual(factory.load(serial, Data), data2)
|
1633577
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.optim import lr_scheduler
import numpy as np
import contextlib
import math
from medseg.models.segmentation_models.unet import UNet
from medseg.common_utils.basic_operations import check_dir
# Partially based on: https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/training/moving_averages.py
class ExponentialMovingAverage:
"""
Maintains (exponential) moving average of a set of parameters.
"""
def __init__(self, parameters, decay, use_num_updates=True):
"""
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the result of
`model.parameters()`.
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.decay = decay
self.num_updates = 0 if use_num_updates else None
self.shadow_params = [p.clone().detach()
for p in parameters if p.requires_grad]
self.collected_params = []
def update(self, parameters):
"""
Update currently maintained parameters.
Call this every time the parameters are updated, such as the result of
the `optimizer.step()` call.
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the same set of
parameters used to initialize this object.
"""
decay = self.decay
if self.num_updates is not None:
self.num_updates += 1
decay = min(decay, (1 + self.num_updates) /
(10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
s_param.sub_(one_minus_decay * (s_param - param))
def copy_to(self, parameters):
"""
Copy current parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages.
"""
for s_param, param in zip(self.shadow_params, parameters):
if param.requires_grad:
param.data.copy_(s_param.data)
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone()
for param in parameters
if param.requires_grad]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
if len(self.collected_params) > 0:
for c_param, param in zip(self.collected_params, parameters):
if param.requires_grad:
param.data.copy_(c_param.data)
else:
print('did not find any copy, use the original params')
def cross_entropy_2D(input, target, weight=None, size_average=True):
n, c, h, w = input.size()
log_p = F.log_softmax(input, dim=1)
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
if len(target.size()) == 3:
target = target.view(target.numel())
if not weight is None:
# sum(weight) =C, for numerical stability.
weight = torch.softmax(weight, dim=0) * c
loss = F.nll_loss(log_p, target, weight=weight, reduction='sum')
if size_average:
loss /= float(target.numel() + 1e-10)
elif len(target.size()) == 4:
# ce loss=-qlog(p)
reference = F.softmax(target, dim=1) # M,C
reference = reference.transpose(1, 2).transpose(
2, 3).contiguous().view(-1, c) # M,C
if weight is None:
plogq = torch.mean(torch.mean(reference * log_p, dim=1))
else:
# sum(weight) =C
weight = torch.softmax(weight, dim=0) * c
plogq_class_wise = reference * log_p
plogq_sum_class = 0.
for i in range(plogq_class_wise.size(1)):
plogq_sum_class += torch.mean(
plogq_class_wise[:, i] * weight[i])
plogq = plogq_sum_class
loss = -1 * plogq
else:
raise NotImplementedError
return loss
def clip_grad(optimizer):
# https://github.com/rosinality/igebm-pytorch/blob/master/train.py
# clip the gradient of parameters before optimization.
with torch.no_grad():
for group in optimizer.param_groups:
for p in group['params']:
state = optimizer.state[p]
if 'step' not in state or state['step'] < 1:
continue
step = state['step']
exp_avg_sq = state['exp_avg_sq']
_, beta2 = group['betas']
bound = 3 * torch.sqrt(exp_avg_sq / (1 - beta2 ** step)) + 0.1
p.grad.data.copy_(
torch.max(torch.min(p.grad.data, bound), -bound))
def set_model_grad(model, state=False):
assert model
for p in model.parameters(): # reset requires_grad
p.requires_grad = state
def set_grad(module, requires_grad=False):
for p in module.parameters(): # reset requires_grad
p.requires_grad = requires_grad
def make_one_hot(y, num_classes=4):
batch_size, h, w = y.size(0), y.size(1), y.size(2)
flatten_y = y.view(batch_size * h * w, 1)
y_onehot = torch.zeros(batch_size * h * w, num_classes,
dtype=torch.float32, device=y.device)
y_onehot.scatter_(1, flatten_y, 1)
y_onehot = y_onehot.view(batch_size, h, w, num_classes)
y_onehot = y_onehot.permute(0, 3, 1, 2)
y_onehot.requires_grad = False
return y_onehot
def mask_latent_code_channel_wise(latent_code, decoder_function, label, num_classes=2, percentile=1 / 3.0, random=False, loss_type='corr', if_detach=True, if_soft=False):
"""
given a latent code return a perturbed code where top % channels are masked
Args:
latent_code (torch tensor): latent code, z_i or z_s
decoder_function (nn.module): a specific decoder function, which maps the latent code to the output space/image space
label (torch tensor): targeted output, e.g. image or segmentation label
num_classes (int): number of segmentation classes (incl. background), only used when 'label' is a labelmap
percentile (float, optional): percentile of masked codes. Defaults to 1/3.0.
random (bool, optional): if set to true, then randomly draw a threshold from (0,percentile) to mask. Defaults to False.
loss_type (str, optional): name of the loss function. Defaults to 'corr'.
if_detach (bool, optional): if false, will directly apply masking to the original code. Defaults to True.
if_soft (bool, optional): if true, perform soft masking instead of hard masking. Defaults to False.
Returns:
[type]: [description]
"""
'''
'''
use_gpu = True if latent_code.device != torch.device('cpu') else False
code = makeVariable(latent_code, use_gpu=use_gpu,
type='float', requires_grad=True)
feature_channels = code.size(1)
num_images = code.size(0)
if len(label.size()) < len(code.size()):
gt_y = make_one_hot(label, num_classes)
else:
gt_y = label
if loss_type == 'corr':
# self-challenging algorithm uses the correlation/similarity loss
loss = torch.mean(decoder_function(code) * gt_y)
elif loss_type == 'mse':
loss = torch.mean((decoder_function(code) - gt_y)**2)
elif loss_type == 'ce':
logit = decoder_function(code)
loss = cross_entropy_2D(input=logit, target=label,
weight=None, size_average=True)
loss = torch.mean(loss)
gradient = torch.autograd.grad(loss, [code])[0]
gradient_channel_mean = torch.mean(
gradient.view(num_images, feature_channels, -1), dim=2)
# select the threshold at top XX percentile
# random percentile
if random:
percentile = np.random.rand() * percentile
vector_thresh_percent = int(feature_channels * percentile)
vector_thresh_value = torch.sort(gradient_channel_mean, dim=1, descending=True)[
0][:, vector_thresh_percent]
vector_thresh_value = vector_thresh_value.view(
num_images, 1).expand(num_images, feature_channels)
if if_soft:
vector = torch.where(gradient_channel_mean > vector_thresh_value,
0.5 * torch.rand_like(gradient_channel_mean),
torch.ones_like(gradient_channel_mean))
else:
vector = torch.where(gradient_channel_mean > vector_thresh_value,
torch.zeros_like(gradient_channel_mean),
torch.ones_like(gradient_channel_mean))
mask_all = vector.view(num_images, feature_channels, 1, 1)
if not if_detach:
masked_latent_code = latent_code * mask_all
else:
masked_latent_code = code * mask_all
try:
decoder_function.zero_grad()
except:
pass
return masked_latent_code, mask_all
def mask_latent_code_spatial_wise(latent_code, decoder_function, label, num_classes, percentile=1 / 3.0, random=False, loss_type='corr', if_detach=True, if_soft=False):
'''
given a latent code return a perturbed code where top % areas are masked
'''
use_gpu = True if latent_code.device != torch.device('cpu') else False
code = makeVariable(latent_code, use_gpu=use_gpu,
type='float', requires_grad=True)
num_images = code.size(0)
spatial_size = code.size(2) * code.size(3)
H, W = code.size(2), code.size(3)
if len(label.size()) < len(code.size()):
gt_y = make_one_hot(label, num_classes)
else:
gt_y = label
if loss_type == 'corr':
loss = torch.mean(decoder_function(code) * gt_y)
elif loss_type == 'mse':
loss = torch.mean((decoder_function(code) - gt_y)**2)
elif loss_type == 'ce':
logit = decoder_function(code)
loss = cross_entropy_2D(input=logit, target=label,
weight=None, size_average=True)
loss = torch.mean(loss)
gradient = torch.autograd.grad(loss, [code])[0]
# mask gradient with largest response:
spatial_mean = torch.mean(gradient, dim=1, keepdim=True)
spatial_mean = spatial_mean.squeeze().view(num_images, spatial_size)
# select the threshold at top XX percentile
if random:
percentile = np.random.rand() * percentile
vector_thresh_percent = int(spatial_size * percentile)
vector_thresh_value = torch.sort(spatial_mean, dim=1, descending=True)[
0][:, vector_thresh_percent]
vector_thresh_value = vector_thresh_value.view(
num_images, 1).expand(num_images, spatial_size)
if if_soft:
vector = torch.where(spatial_mean > vector_thresh_value,
0.5 * torch.rand_like(spatial_mean),
torch.ones_like(spatial_mean))
else:
vector = torch.where(spatial_mean > vector_thresh_value,
torch.zeros_like(spatial_mean),
torch.ones_like(spatial_mean))
mask_all = vector.view(num_images, 1, H, W)
if not if_detach:
masked_latent_code = latent_code * mask_all
else:
masked_latent_code = code * mask_all
try:
decoder_function.zero_grad()
except:
pass
return masked_latent_code, mask_all
def get_unet_model(model_path, num_classes=2, device=None, model_arch='UNet_16'):
'''
init model and load the trained parameters from the disk.
model path: string. path to the model checkpoint
device: torch device
return pytorch nn.module model
'''
assert check_dir(model_path) == 1, model_path + ' does not exists'
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model_arch == 'UNet_16':
model = UNet(input_channel=1, num_classes=num_classes, feature_scale=4)
elif model_arch == 'UNet_64':
model = UNet(input_channel=1, num_classes=num_classes, feature_scale=1)
else:
raise NotImplementedError
model.load_state_dict(torch.load(model_path))
model = model.to(device)
return model
def filter_unlabelled_predictions(predictions, threshold=0.8):
'''
given a batch of predictions,
find the max prob for each pixel, if exceed the given threshhold return 1
else return 0
return: a batch of confidence maps NCHW, 0,1
'''
# find the maximum prob for each mask
foreground_predictions = predictions.detach()
max_prob_for_each_image = torch.max(foreground_predictions, dim=1)[0]
max_prob_for_each_image = torch.clamp(
max_prob_for_each_image - threshold, 0, 1)
max_prob_for_each_image[foreground_predictions > 0] = 1
confidence_maps = max_prob_for_each_image.unsqueeze(
1).expand_as(predictions)
return confidence_maps
def sharpen_predictions(predictions, temperature=0.5):
'''
shapen the predictions
predictions: N*C*H*W: probabistic predictions (in mixmatch, this is an averaged value)
'''
predictions = F.softmax(predictions, dim=1)
calibrated_p = predictions**(1 / temperature)
return calibrated_p / calibrated_p.sum(axis=1, keepdims=True)
def stash_grad(model, grad_dict):
for k, v in model.named_parameters():
if v.grad is not None:
if k in grad_dict.keys():
grad_dict[k] += v.grad.clone()
else:
grad_dict[k] = v.grad.clone()
model.zero_grad()
#print ('gradient stashed')
return grad_dict
def restore_grad(model, grad_dict):
for k, v in model.named_parameters():
if k in grad_dict.keys():
grad = grad_dict[k]
if v.grad is None:
v.grad = grad
else:
v.grad += grad
#print ('gradient restored')
def unit_norm(x, use_p_norm=False):
# ## rescale
abs_max = torch.max(
torch.abs(x.view(x.size(0), -1)), 1, keepdim=True)[0].view(
x.size(0), 1, 1, 1)
x /= 1e-10 + abs_max
# ## normalize
if use_p_norm:
batch_size = x.size(0)
old_size = x.size()
x = x.view(batch_size, -1)
x = F.normalize(x, p=2, dim=1)
x = x.view(old_size)
return x
@contextlib.contextmanager
def _disable_tracking_bn_stats(model):
def switch_attr(model, new_state=None, hist_states=None):
"""[summary]
Args:
model ([torch.nn.Module]): [description]
new_state ([bool], optional): [description]. Defaults to None.
hist_states ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
old_states = {}
for name, module in model.named_modules():
if isinstance(module, torch.nn.BatchNorm2d):
# print('here batch norm')
old_states[name] = module.track_running_stats
if hist_states is not None:
module.track_running_stats = hist_states[name]
# disable optimizing the beta and gamma for feature normalization
if hasattr(module, 'weight'):
module.weight.requires_grad_(hist_states[name])
if hasattr(module, 'bias'):
module.bias.requires_grad_(hist_states[name])
else:
if new_state is not None:
module.track_running_stats = new_state
if hasattr(module, 'weight'):
module.weight.requires_grad_(new_state)
if hasattr(module, 'bias'):
module.bias.requires_grad_(new_state)
return old_states
old_states = switch_attr(model, False)
yield
switch_attr(model, hist_states=old_states)
class SizeEstimator(object):
def __init__(self, model, input_size=(1, 1, 32, 32), bits=32):
'''
Estimates the size of PyTorch models in memory
for a given input size
'''
self.model = model
self.input_size = input_size
self.bits = 32
return
def get_parameter_sizes(self):
'''Get sizes of all parameters in `models`'''
mods = list(self.model.modules())
sizes = []
for i in range(1, len(mods)):
m = mods[i]
p = list(m.parameters())
for j in range(len(p)):
sizes.append(np.array(p[j].size()))
self.param_sizes = sizes
return
def get_output_sizes(self):
'''Run sample input through each layer to get output sizes'''
input_ = Variable(torch.FloatTensor(*self.input_size), volatile=True)
mods = list(self.model.modules())
out_sizes = []
for i in range(1, len(mods)):
m = mods[i]
out = m(input_)
out_sizes.append(np.array(out.size()))
input_ = out
self.out_sizes = out_sizes
return
def calc_param_bits(self):
'''Calculate total number of bits to store `models` parameters'''
total_bits = 0
for i in range(len(self.param_sizes)):
s = self.param_sizes[i]
bits = np.prod(np.array(s)) * self.bits
total_bits += bits
self.param_bits = total_bits
return
def calc_forward_backward_bits(self):
'''Calculate bits to store forward and backward pass'''
total_bits = 0
for i in range(len(self.out_sizes)):
s = self.out_sizes[i]
bits = np.prod(np.array(s)) * self.bits
total_bits += bits
# multiply by 2 for both forward AND backward
self.forward_backward_bits = (total_bits * 2)
return
def calc_input_bits(self):
'''Calculate bits to store input'''
self.input_bits = np.prod(np.array(self.input_size)) * self.bits
return
def estimate_size(self):
'''Estimate models size in memory in megabytes and bits'''
self.get_parameter_sizes()
self.get_output_sizes()
self.calc_param_bits()
self.calc_forward_backward_bits()
self.calc_input_bits()
total = self.param_bits + self.forward_backward_bits + self.input_bits
total_megabytes = (total / 8) / (1024 ** 2)
return total_megabytes, total
def save_model_to_file(model_name, model, epoch, optimizer, save_path):
state_dict = model.module.state_dict() if isinstance(
model, torch.nn.DataParallel) else model.state_dict()
state = {'model_name': model_name,
'epoch': epoch + 1,
'model_state': state_dict,
'optimizer_state': optimizer.state_dict()
}
torch.save(state, save_path)
def encode_3D(label_map, n_classes, use_gpu=False):
'''
input label as tensor
return onehot label N*D*H*W
:param label: batch_size*target_z*target_h*target_w
:return:label:batch_size*n_classes*target_z*target_h*target_w
'''
# create one-hot vector for label map
label_map = label_map[:, None, :, :, :]
size = label_map.size()
# print (size)
oneHot_size = (size[0], n_classes, size[2], size[3], size[4])
input_label = torch.zeros(torch.Size(oneHot_size)).float()
if use_gpu:
input_label = input_label.cuda()
input_label = input_label.scatter_(1, label_map.long().cuda(), 1.0)
else:
input_label = input_label
input_label = input_label.scatter_(1, label_map.long(), 1.0)
return input_label
def encode_2D(label_map, n_classes, use_gpu=False):
'''
input label as tensor N*H*W
return onehot label N*C*H*W
:return:label:batch_size*n_classes*target_z*target_h*target_w
'''
# create one-hot vector for label map
size = label_map[:, None, :, :].size()
oneHot_size = (size[0], n_classes, size[2], size[3])
input_label = torch.zeros(torch.Size(oneHot_size)).float()
if use_gpu:
input_label = input_label.cuda()
input_label = input_label.scatter_(
1, label_map[:, None, :, :].long().cuda(), 1.0)
else:
input_label = input_label
input_label = input_label.scatter_(
1, label_map[:, None, :, :].long(), 1.0)
return input_label
def lr_poly(base_lr, iter, max_iter, power):
return base_lr * ((1 - float(iter) / max_iter) ** (power))
def adjust_learning_rate(optimizer, i_iter, initial_learning_rate, total_steps, power=0.985):
lr = lr_poly(initial_learning_rate, i_iter, total_steps, power)
print('lr', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
optimizer.param_groups[0]['lr'] = lr
if len(optimizer.param_groups) > 1:
optimizer.param_groups[1]['lr'] = lr * 10
def makeVariable(tensor, use_gpu=True, type='long', requires_grad=True):
# conver type
tensor = tensor.data
if type == 'long':
tensor = tensor.long()
elif type == 'float':
tensor = tensor.float()
else:
raise NotImplementedError
# make is as Variable
if use_gpu:
variable = Variable(tensor.cuda(), requires_grad=requires_grad)
else:
variable = Variable(tensor, requires_grad=requires_grad)
return variable
def get_scheduler(optimizer, lr_policy, lr_decay_iters=5, epoch_count=None, niter=None, niter_decay=None):
print('lr_policy = [{}]'.format(lr_policy))
if lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + epoch_count -
niter) / float(niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif lr_policy == 'step':
scheduler = lr_scheduler.StepLR(
optimizer, step_size=lr_decay_iters, gamma=0.5)
elif lr_policy == 'step2':
scheduler = lr_scheduler.StepLR(
optimizer, step_size=lr_decay_iters, gamma=0.1)
elif lr_policy == 'plateau':
print('schedular=plateau')
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.1, threshold=0.01, patience=5)
elif lr_policy == 'plateau2':
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif lr_policy == 'step_warmstart':
def lambda_rule(epoch):
# print(epoch)
if epoch < 5:
lr_l = 0.1
elif 5 <= epoch < 100:
lr_l = 1
elif 100 <= epoch < 200:
lr_l = 0.1
elif 200 <= epoch:
lr_l = 0.01
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif lr_policy == 'step_warmstart2':
def lambda_rule(epoch):
# print(epoch)
if epoch < 5:
lr_l = 0.1
elif 5 <= epoch < 50:
lr_l = 1
elif 50 <= epoch < 100:
lr_l = 0.1
elif 100 <= epoch:
lr_l = 0.01
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', lr_policy)
return scheduler
class HookBasedFeatureExtractor(nn.Module):
def __init__(self, submodule, layername, upscale=False):
super(HookBasedFeatureExtractor, self).__init__()
self.submodule = submodule
self.submodule.eval()
self.layername = layername
self.outputs_size = None
self.outputs = None
self.inputs = None
self.inputs_size = None
self.upscale = upscale
def get_input_array(self, m, i, o):
if isinstance(i, tuple):
self.inputs = [i[index].data.clone() for index in range(len(i))]
self.inputs_size = [input.size() for input in self.inputs]
else:
self.inputs = i.data.clone()
self.inputs_size = self.input.size()
print('Input Array Size: ', self.inputs_size)
def get_output_array(self, m, i, o):
if isinstance(o, tuple):
self.outputs = [o[index].data.clone() for index in range(len(o))]
self.outputs_size = [output.size() for output in self.outputs]
else:
self.outputs = o.data.clone()
self.outputs_size = self.outputs.size()
print('Output Array Size: ', self.outputs_size)
def rescale_output_array(self, newsize):
us = nn.Upsample(size=newsize[2:], mode='bilinear')
if isinstance(self.outputs, list):
for index in range(len(self.outputs)):
self.outputs[index] = us(self.outputs[index]).data()
else:
self.outputs = us(self.outputs).data()
def forward(self, x):
target_layer = self.submodule._modules.get(self.layername)
# Collect the output tensor
h_inp = target_layer.register_forward_hook(self.get_input_array)
h_out = target_layer.register_forward_hook(self.get_output_array)
self.submodule(x)
h_inp.remove()
h_out.remove()
# Rescale the feature-map if it's required
if self.upscale:
self.rescale_output_array(x.size())
return self.inputs, self.outputs
|
1633614
|
class GeocodioError(Exception):
"""General but unknown error from Geocodio"""
pass
class GeocodioAuthError(GeocodioError):
"""HTTP 403 Access Forbidden, likely due to bad API key"""
pass
class GeocodioDataError(GeocodioError):
"""HTTP 422 Unprocessable Entity, likely poorly formed address"""
pass
class GeocodioServerError(GeocodioError):
"""HTTP 500 Server Error, remote server failure"""
pass
|
1633628
|
from .utils import NoCheckpointSetError
from .throttled_request import ThrottledRequestAlreadyFinished
from .throttler import \
ThrottlerStatusError, \
FullRequestsPoolError
__all__ = ["NoCheckpointSetError",
"ThrottledRequestAlreadyFinished",
"ThrottlerStatusError",
"FullRequestsPoolError"]
|
1633635
|
import json
import os
import glob
TRANSLATIONS = {}
FALLBACK_LOCALE_CODE = 'en-US'
def I18n(app):
"""Initializes the I18n engine.
:param app: An object that answers to 'add_template_filter(fn)'.
"""
if TRANSLATIONS:
return
locales_path = os.path.join(
os.path.dirname(__file__), '..', 'public', 'locales', 'json'
)
try:
for file_name in glob.glob(os.path.join(locales_path, '*.json')):
locale_name = os.path.basename(file_name).replace('.json', '')
with open(file_name, 'r', encoding='utf8') as f:
TRANSLATIONS[locale_name] = json.loads(f.read())
app.add_template_filter(trans)
except Exception as e:
print('Error loading localization files.')
print(e)
def trans(symbol, locale='en-US'):
"""Translation filter for templates.
Fetches the current locale from the request.
:param symbol: String to be localized.
:param locale: String representing locale to localize in.
:return: Localized string.
Usage:
{{ 'coursesLabel'|trans(current_locale.code) }}
"Courses"
"""
return translate(symbol, locale)
def translate(symbol, locale='en-US'):
"""Translates a symbol for a given locale.
:param symbol: String to be localized.
:param locale: Locale file to look the localization at.
:return: Localized string.
Usage:
>>> translate('coursesLabel', 'en-US')
"Courses"
"""
locale_dict = TRANSLATIONS.get(locale, None)
if locale_dict is None:
locale_dict = TRANSLATIONS[FALLBACK_LOCALE_CODE]
translated_value = locale_dict.get(symbol, None)
if translated_value is None:
return 'Translation not found for {0} in {1}'.format(symbol, locale)
return translated_value
def is_translation_available(symbol, locale='en-US'):
"""Returns if a translation is available for the symbol in a given locale.
:param symbol: String to be localized.
:param locale: Locale file to look the localization at.
:return: True/False
Usage:
>>> is_translation_available('coursesLabel', 'en-US')
True
>>> is_translation_available('nonExistent', 'en-US')
False
>>> is_translation_available('coursesLabel', 'non-Existent')
False
"""
return bool(TRANSLATIONS.get(locale, {}).get(symbol, False))
|
1633641
|
import os
import tornado.ioloop
import tornado.httpserver
import tornado.escape
from tornado.options import define, options
from application.server import Application
# Define command line arguments
define("port", default=3000, help="run on the given port", type=int)
def main():
# tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
port = int(os.environ.get("PORT", options.port))
print("server is running on port {0}".format(port))
http_server.listen(port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
try:
main()
except Exception as ex:
print(ex)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.