id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11578268
|
import unittest
import numpy as np
try:
import nifty
except ImportError:
nifty = None
@unittest.skipUnless(nifty, "Need nifty")
class TestOperations(unittest.TestCase):
def _test_op_array(self, op, op_exp, inplace):
shape = 3 * (64,)
block_shape = 3 * (16,)
x = np.random.rand(*shape)
y = np.random.rand(*shape)
exp = op_exp(x, y)
if inplace:
op(x, y, block_shape=block_shape)
self.assertTrue(np.allclose(exp, x))
else:
x_cpy = x.copy()
res = np.zeros_like(x)
res = op(x, y, out=res, block_shape=block_shape)
self.assertTrue(np.allclose(exp, res))
# make sure x is unchaged
self.assertTrue(np.allclose(x, x_cpy))
def _test_op_scalar(self, op, op_exp, inplace):
shape = 3 * (64,)
block_shape = 3 * (16,)
x = np.random.rand(*shape)
y = np.random.rand()
exp = op_exp(x, y)
if inplace:
op(x, y, block_shape=block_shape)
self.assertTrue(np.allclose(exp, x))
else:
x_cpy = x.copy()
res = np.zeros_like(exp)
res = op(x, y, out=res, block_shape=block_shape)
self.assertTrue(np.allclose(exp, res))
# make sure x is unchaged
self.assertTrue(np.allclose(x, x_cpy))
def _test_op_broadcast(self, op, op_exp):
shapex = 3 * (64,)
shapey = (1, 64, 64)
block_shape = 3 * (16,)
x = np.random.rand(*shapex)
y = np.random.rand(*shapey)
exp = op_exp(x, y)
op(x, y, block_shape=block_shape)
self.assertTrue(np.allclose(exp, x))
def _test_op_masked(self, op, op_exp):
shape = 3 * (64,)
block_shape = 3 * (16,)
x = np.random.rand(*shape)
y = np.random.rand()
mask = np.random.rand(*shape) > .5
exp = x.copy()
exp[mask] = op_exp(x[mask], y)
op(x, y, block_shape=block_shape, mask=mask)
self.assertTrue(np.allclose(exp, x))
def _test_op_roi(self, op, op_exp):
shape = 3 * (64,)
block_shape = 3 * (16,)
rois = [np.s_[:], np.s_[:32, 32:], np.s_[:47, :], np.s_[:, 13:], np.s_[2:31, 5:59]]
for roi in rois:
x = np.random.rand(*shape)
y = np.random.rand()
res = x.copy()
op(res, y, block_shape=block_shape, roi=roi)
exp = op_exp(x[roi], y)
self.assertTrue(np.allclose(exp, res[roi]))
not_roi = np.ones(res.shape, dtype='bool')
not_roi[roi] = False
self.assertTrue(np.allclose(res[not_roi], x[not_roi]))
def _test_op(self, op1, op2):
self._test_op_array(op1, op2, True)
self._test_op_array(op1, op2, False)
self._test_op_scalar(op1, op2, True)
self._test_op_scalar(op1, op2, False)
self._test_op_broadcast(op1, op2)
self._test_op_masked(op1, op2)
self._test_op_roi(op1, op2)
def test_add(self):
from elf.parallel import add
self._test_op(add, np.add)
def test_subtract(self):
from elf.parallel import subtract
self._test_op(subtract, np.subtract)
def test_multiply(self):
from elf.parallel import multiply
self._test_op(multiply, np.multiply)
def test_divide(self):
from elf.parallel import divide
self._test_op(divide, np.divide)
def test_greater(self):
from elf.parallel import greater
self._test_op(greater, np.greater)
def test_greater_equal(self):
from elf.parallel import greater_equal
self._test_op(greater_equal, np.greater_equal)
def test_less(self):
from elf.parallel import less
self._test_op(less, np.less)
def test_less_equal(self):
from elf.parallel import less_equal
self._test_op(less_equal, np.less_equal)
def test_minimum(self):
from elf.parallel import minimum
self._test_op(minimum, np.minimum)
def test_maximum(self):
from elf.parallel import maximum
self._test_op(maximum, np.maximum)
if __name__ == '__main__':
unittest.main()
|
11578307
|
import numpy as np
class SurvivalAnalysis(object):
""" This class contains methods used in survival analysis.
"""
def c_index(self, risk, T, C):
"""Calculate concordance index to evaluate model prediction.
C-index calulates the fraction of all pairs of subjects whose predicted
survival times are correctly ordered among all subjects that can actually
be ordered, i.e. both of them are uncensored or the uncensored time of
one is smaller than the censored survival time of the other.
Parameters
----------
risk: numpy.ndarray
m sized array of predicted risk (do not confuse with predicted survival time)
T: numpy.ndarray
m sized vector of time of death or last follow up
C: numpy.ndarray
m sized vector of censored status (do not confuse with observed status)
Returns
-------
A value between 0 and 1 indicating concordance index.
"""
n_orderable = 0.0
score = 0.0
for i in range(len(T)):
for j in range(i+1,len(T)):
if(C[i] == 0 and C[j] == 0):
n_orderable = n_orderable + 1
if(T[i] > T[j]):
if(risk[j] > risk[i]):
score = score + 1
elif(T[j] > T[i]):
if(risk[i] > risk[j]):
score = score + 1
else:
if(risk[i] == risk[j]):
score = score + 1
elif(C[i] == 1 and C[j] == 0):
if(T[i] >= T[j]):
n_orderable = n_orderable + 1
if(T[i] > T[j]):
if(risk[j] > risk[i]):
score = score + 1
elif(C[j] == 1 and C[i] == 0):
if(T[j] >= T[i]):
n_orderable = n_orderable + 1
if(T[j] > T[i]):
if(risk[i] > risk[j]):
score = score + 1
#print score to screen
return score / n_orderable
def calc_at_risk(self, X, T, O):
"""Calculate the at risk group of all patients.
For every patient i, this function returns the index of the first
patient who died after i, after sorting the patients w.r.t. time of death.
Refer to the definition of Cox proportional hazards log likelihood for
details: https://goo.gl/k4TsEM
Parameters
----------
X: numpy.ndarray
m*n matrix of input data
T: numpy.ndarray
m sized vector of time of death
O: numpy.ndarray
m sized vector of observed status (1 - censoring status)
Returns
-------
X: numpy.ndarray
m*n matrix of input data sorted w.r.t time of death
T: numpy.ndarray
m sized sorted vector of time of death
O: numpy.ndarray
m sized vector of observed status sorted w.r.t time of death
at_risk: numpy.ndarray
m sized vector of starting index of risk groups
"""
tmp = list(T)
T = np.asarray(tmp).astype('float64')
order = np.argsort(T)
sorted_T = T[order]
at_risk = np.asarray([list(sorted_T).index(x) for x in sorted_T]).astype('int32')
T = np.asarray(sorted_T)
O = O[order]
X = X[order]
return X, T, O, at_risk
|
11578327
|
import os
from unittest.mock import patch, mock_open
from unittest import mock
import pytest
import cryptowatch
from cryptowatch.auth import read_config
from cryptowatch import rest_endpoint
def test_stream_auth_api_key_missing():
# No API key set, should raise an exception
with pytest.raises(cryptowatch.errors.APIKeyError):
cryptowatch.api_key = ""
cryptowatch.stream.subscriptions = ["markets:*:trades"]
cryptowatch.stream.connect()
def test_allowance_not_mandatory_field(requests_mock):
# 1st request has no allowance key
requests_mock.get(
"{}/markets/{}/{}/ohlc".format(rest_endpoint, "binance", "btcusdt"),
status_code=200,
text="""{
"result": {
"86400": [[1381190400,123.610000,123.610000,123.610000,123.610000,0.100000,0.0],
[1381276800,123.610000,124.190000,123.900000,124.180000,3.991600,0.0]]
}
}""",
)
candles = cryptowatch.markets.get("BINANCE:BTCUSDT", ohlc=True)
assert candles._allowance == None
# 2nd request has an allowance key
requests_mock.get(
"{}/markets/{}/{}/ohlc".format(rest_endpoint, "binance", "btcusdt"),
status_code=200,
text="""{
"result": {
"86400": [[1381190400,123.610000,123.610000,123.610000,123.610000,0.100000,0.0],
[1381276800,123.610000,124.190000,123.900000,124.180000,3.991600,0.0]]
},
"allowance": {"cost":4239786,"remaining":3862293338,"remainingPaid":0,
"upgrade":"Upgrade for a higher allowance, starting at $15/month for 16 seconds/hour. https://cryptowat.ch/pricing"}
}""",
)
candles = cryptowatch.markets.get("BINANCE:BTCUSDT", ohlc=True)
assert candles._allowance != None
assert candles._allowance.cost != None
assert candles._allowance.upgrade != None
assert candles._allowance.remaining_paid != None
assert candles._allowance.remaining != None
# 3rd request has an allowance key but no remainingPaid
requests_mock.get(
"{}/markets/{}/{}/ohlc".format(rest_endpoint, "binance", "btcusdt"),
status_code=200,
text="""{
"result": {
"86400": [[1381190400,123.610000,123.610000,123.610000,123.610000,0.100000,0.0],
[1381276800,123.610000,124.190000,123.900000,124.180000,3.991600,0.0]]
},
"allowance": {"cost":4239786,"remaining":3862293338,
"upgrade":"Upgrade for a higher allowance, starting at $15/month for 16 seconds/hour. https://cryptowat.ch/pricing"}
}""",
)
candles = cryptowatch.markets.get("BINANCE:BTCUSDT", ohlc=True)
assert candles._allowance.remaining_paid == 0
assert candles._allowance != None
assert candles._allowance.cost != None
assert candles._allowance.upgrade != None
assert candles._allowance.remaining != None
def test_open_config_file(mocker):
# Mock open()
with patch("cryptowatch.auth.open", mock_open()) as config:
# Mock os.environ.get()
with mock.patch.dict("os.environ", {"HOME": "/sweet/home"}):
# This should open() the credential file
read_config()
# Forge credential file path
user_home_dir = os.environ.get("HOME")
filepath = "{}/.cw/credentials.yml".format(user_home_dir)
# Check it was well open()'ed
config.assert_called_once_with(filepath, "r")
|
11578384
|
import numpy as np
import chainer.links as L
from chainer import Variable
from lda2vec import dirichlet_likelihood
def test_concentration():
""" Test that alpha > 1.0 on a dense vector has a higher likelihood
than alpha < 1.0 on a dense vector, and test that a sparse vector
has the opposite character. """
dense = np.random.randn(5, 10).astype('float32')
sparse = np.random.randn(5, 10).astype('float32')
sparse[:, 1:] /= 1e5
weights = Variable(dense)
dhl_dense_10 = dirichlet_likelihood(weights, alpha=10.0).data
dhl_dense_01 = dirichlet_likelihood(weights, alpha=0.1).data
weights = Variable(sparse)
dhl_sparse_10 = dirichlet_likelihood(weights, alpha=10.0).data
dhl_sparse_01 = dirichlet_likelihood(weights, alpha=0.1).data
msg = "Sparse vector has higher likelihood than dense with alpha=0.1"
assert dhl_sparse_01 > dhl_dense_01, msg
msg = "Dense vector has higher likelihood than sparse with alpha=10.0"
assert dhl_dense_10 > dhl_sparse_10, msg
def test_embed():
""" Test that embedding is treated like a Variable"""
embed_dense = L.EmbedID(5, 10)
embed_sparse = L.EmbedID(5, 10)
embed_dense.W.data[:] = np.random.randn(5, 10).astype('float32')
embed_sparse.W.data[:] = np.random.randn(5, 10).astype('float32')
embed_sparse.W.data[:, 1:] /= 1e5
dhl_dense_01 = dirichlet_likelihood(embed_dense, alpha=0.1).data
dhl_sparse_01 = dirichlet_likelihood(embed_sparse, alpha=0.1).data
msg = "Sparse vector has higher likelihood than dense with alpha=0.1"
assert dhl_sparse_01 > dhl_dense_01, msg
|
11578417
|
import pkg_resources
from pikachu.drawing.drawing import Drawer, Options, draw_multiple
from pikachu.math_functions import Vector
import datetime
class MolFileWriter:
"""
NOTE: This MolFileWriter purely exports atom coordinates (x and y) found by PIKAChU; these are unitless and should
not be interpreted as angstrom.
"""
charge_to_value = {-3: 7,
-2: 6,
-1: 5,
0: 0,
1: 3,
2: 2,
3: 1}
bond_to_value = {'single': 1,
'double': 2,
'triple': 3,
'aromatic': 4}
chiral_symbol_to_value = {None: 0,
'/': 1,
'\\': 6}
def __init__(self, structure, filename, drawing_options=None, multiple=False):
self.original_structure = structure
if not drawing_options:
if multiple:
self.drawing = draw_multiple(structure, coords_only=True)
else:
self.drawing = Drawer(structure, coords_only=True)
else:
if multiple:
self.drawing = draw_multiple(structure, coords_only=True, options=drawing_options)
else:
self.drawing = Drawer(structure, coords_only=True, options=drawing_options)
self.drawn_structure = self.drawing.structure
self.filename = filename
self.title = filename.split('.')[0]
self.atom_to_coords = self.get_atom_coords()
self.datetime = datetime.datetime.now()
self.software_version = pkg_resources.get_distribution('pikachu-chem').version
self.atom_count = self.get_atom_count()
self.bond_count, self.drawn_bonds = self.get_bond_count()
def get_atom_coords(self):
atom_to_coords = {}
for atom in self.drawn_structure.graph:
if atom.draw.is_drawn:
atom_to_coords[atom] = atom.draw.position
return atom_to_coords
def get_atom_count(self):
count = 0
for atom in self.drawn_structure.graph:
original_atom = self.original_structure.atoms[atom.nr]
if atom.draw.is_drawn:
count += 1
elif original_atom.type == 'H' and original_atom.has_neighbour('N') and original_atom.get_neighbour('N').pyrrole:
count += 1
return count
def get_bond_count(self):
count = 0
bonds = []
for bond_nr, bond in self.drawn_structure.bonds.items():
original_bond = self.original_structure.bonds[bond_nr]
if (bond.atom_1.draw.is_drawn and bond.atom_2.draw.is_drawn)\
or (original_bond.atom_1.pyrrole and original_bond.atom_2.type == 'H')\
or (original_bond.atom_2.pyrrole and original_bond.atom_1.type == 'H'):
count += 1
bonds.append(bond)
return count, bonds
def write_mol_file(self):
atom_to_line_nr = {}
with open(self.filename, 'w') as molfile:
molfile.write(f'{self.title}\n')
molfile.write(f' PIKAChU {self.software_version} {self.datetime}\n')
molfile.write('\n')
molfile.write(f'{str(self.atom_count).rjust(3)}{str(self.bond_count).rjust(3)} 0 0 1 0 0 0 0 0999 V2000\n')
line_nr = 0
for atom in self.drawn_structure.graph:
original_atom = self.original_structure.atoms[atom.nr]
if atom.draw.is_drawn:
line_nr += 1
atom_to_line_nr[atom] = line_nr
x_string = f'{atom.draw.position.x:.4f}'.rjust(10)
y_string = f'{atom.draw.position.y:.4f}'.rjust(10)
z_string = f' 0.0000'
charge_string = f'{str(self.charge_to_value[atom.charge]).rjust(3)}'
molfile.write(f'{x_string}{y_string}{z_string} {atom.type.ljust(3)} 0{charge_string} 0 0 0 0 0 0 0 0 0 0\n')
elif original_atom.type == 'H' and original_atom.has_neighbour('N') and original_atom.get_neighbour('N').pyrrole:
line_nr += 1
atom_to_line_nr[atom] = line_nr
position = Vector.add_vectors(atom.get_neighbour('N').draw.position, Vector(0, -15))
x_string = f'{position.x:.4f}'.rjust(10)
y_string = f'{position.y:.4f}'.rjust(10)
z_string = f' 0.0000'
molfile.write(
f'{x_string}{y_string}{z_string} {atom.type.ljust(3)} 0{charge_string} 0 0 0 0 0 0 0 0 0 0\n')
for bond_nr, bond in self.original_structure.bonds.items():
drawn_bond = self.drawn_structure.bonds[bond_nr]
chiral_val = None
if (drawn_bond.atom_1.draw.is_drawn and drawn_bond.atom_2.draw.is_drawn)\
or (bond.atom_1.pyrrole and bond.atom_2.type == 'H')\
or (bond.atom_2.pyrrole and bond.atom_1.type == 'H'):
if drawn_bond in self.drawing.chiral_bonds:
wedge, atom = self.drawing.chiral_bond_to_orientation[bond]
if wedge == 'front':
chiral_val = 1
else:
chiral_val = 6
reverse = False
if atom == bond.atom_2:
reverse = True
if chiral_val:
if reverse:
molfile.write(
f'{str(atom_to_line_nr[bond.atom_2]).rjust(3)}{str(atom_to_line_nr[bond.atom_1]).rjust(3)}{str(self.bond_to_value[bond.type]).rjust(3)}{str(chiral_val).rjust(3)} 0 0 0\n')
else:
molfile.write(
f'{str(atom_to_line_nr[bond.atom_1]).rjust(3)}{str(atom_to_line_nr[bond.atom_2]).rjust(3)}{str(self.bond_to_value[bond.type]).rjust(3)}{str(chiral_val).rjust(3)} 0 0 0\n')
elif bond.type == 'double' and not bond.chiral and not bond.atom_1.chiral and not bond.atom_2.chiral:
molfile.write(
f'{str(atom_to_line_nr[bond.atom_1]).rjust(3)}{str(atom_to_line_nr[bond.atom_2]).rjust(3)}{str(self.bond_to_value[bond.type]).rjust(3)} 3 0 0 0\n')
else:
molfile.write(
f'{str(atom_to_line_nr[bond.atom_1]).rjust(3)}{str(atom_to_line_nr[bond.atom_2]).rjust(3)}{str(self.bond_to_value[bond.type]).rjust(3)} 0 0 0 0\n')
molfile.write('M END\n')
|
11578456
|
class Solution:
def cleanRoom(self, robot, move = [(-1, 0), (0, -1), (1, 0), (0, 1)]):
def dfs(i, j, cleaned, ind):
robot.clean()
cleaned.add((i, j))
k = 0
for x, y in move[ind:] + move[:ind]:
if (i + x, j + y) not in cleaned and robot.move():
dfs(i + x, j + y, cleaned, (ind + k) % 4)
robot.turnLeft()
robot.turnLeft()
robot.move()
robot.turnRight()
robot.turnRight()
robot.turnLeft()
k += 1
dfs(0, 0, set(), 0)
|
11578476
|
from __future__ import division
import numpy as np
class VogelsteinClassifier(object):
"""Oncogene and TSG classifier based on the 20/20 rule.
Essentially the 20/20 rule states that oncogenes have at least
20% recurrent missense mutations. While tumor suppressor genes
have atleast 20% deleterius mutations. This is a simple rule-
based classifier. To reduce errors for genes with low mutation
counts, vogelstein et al. manually curated genes with between
7 and 18 mutations. This class can not reproduce their manual
curation but can give an estimate on the accuracy of a naive
implementation of the 20/20 rule. The threshold of 20% is also
changeable.
Information on the 20/20 rule from vogelstein's science paper:
http://www.sciencemag.org/content/339/6127/1546.full
"""
def __init__(self,
onco_threshold=.2,
tsg_threshold=.2,
kind='vogelstein',
min_count=0,
tsg_min=7,
onco_min=10,
db_size=404863): # db size is as reported from Cancer Genome Landscapes paper
# check valid percentage
if not 0 < onco_threshold < 1:
raise ValueError("Oncogene threshold is invalid")
if not 0 < tsg_threshold < 1:
raise ValueError("TSG threshold is invalid")
self.kind = kind # either 'vogelstein' or 'min'
# set parameters as reported in Cancer genome landscapes paper
self.db_size = db_size
self.db_tsg_min = tsg_min
self.db_onco_min = onco_min
# assign percentage thresholds
self.onco_threshold = onco_threshold
self.tsg_threshold = tsg_threshold
# set min count to classify gene
self.min_count = min_count
self.tsg_min = tsg_min
self.onco_min = onco_min
# labels to classify genes as
self.onco_label = "oncogene"
self.tsg_label = "tsg"
self.other_label = "other"
def _subsample_count(recur_ct, del_ct, total_ct, desired_ct):
if total_ct <= desired_ct:
# no need for subsampling
return recur_ct, del_ct, total_ct
else:
# sub-sample to desired number of counts
prng = np.random.RandomState()
ct_array = np.array([recur_ct, del_ct,
total_ct - (recur_ct + del_ct)])
prob = ct_array.astype(float) / ct_array.sum()
multinomial_sample = prng.multinomial(desired_ct, # total counts for multinomial
prob) # probability
return multinomial_sample
def predict_list(self, input_list,
kind='count',
scale_type=None,
subsample=None):
"""Predict a list of inputs as either oncogene/tsg/other.
**Parameters**
input_list : list of list
list containing a list of recurrent count,
deleterious count or %, and total count or %, in that
order.
kind : str ('count' | 'percent')
whether recur/del are percents or counts
scale_type : str (None | 'linear')
whether to scale count thresholds based on size of database
based on cancer genome landscapes paper
subsample : (None | int)
whether to subsample total mutations to a certain number of
mutations.
"""
# scale count thresholds
all_cts = sum([x[-1] for x in input_list])
if scale_type:
self.tsg_min = self.db_tsg_min * float(all_cts)/self.db_size
self.onco_min = self.db_onco_min * float(all_cts)/self.db_size
else:
self.tsg_min = self.db_tsg_min
self.onco_min = self.db_onco_min
# perform prediction
gene_class_list = []
if kind == 'count':
for recur_ct, del_ct, total_ct in input_list:
tmp_gene_class = self.predict_by_cts(recur_ct,
del_ct,
total_ct)
gene_class_list.append(tmp_gene_class)
else:
for recur_pct, del_pct, total_cts in input_list:
tmp_gene_class = self.predict_by_pct(recur_pct,
del_pct,
total_cts)
gene_class_list.append(tmp_gene_class)
return gene_class_list
def predict_by_cts(self, recurrent, deleterious, total):
"""Predicts oncogene/tsg/other by gene mutation counts."""
if total < self.min_count:
# too few mutations case
return self.other_label
# sufficient number of counts
recur_perc = recurrent / float(total)
del_perc = deleterious / float(total)
gene_class = self.predict_by_pct(recur_perc,
del_perc,
total)
return gene_class
def predict_by_pct(self, recur_pct, del_pct, total):
"""The actual 20/20 rule logic to classify genes."""
# calc counts
recur_ct = recur_pct * total
del_ct = del_pct * total
# 20/20 rule logic
if self.kind == 'vogelstein':
if recur_pct >= self.onco_threshold and recur_ct >= self.onco_min:
if del_pct <= .05:
return self.onco_label
elif del_ct >= self.tsg_min:
return self.tsg_label
else:
return self.other_label
elif del_pct >= self.tsg_threshold and del_ct >= self.tsg_min:
return self.tsg_label
else:
return self.other_label
elif self.kind == 'min':
if total < self.min_count:
# too few mutations case
return self.other_label
# if recur_pct >= self.onco_threshold and (total*recur_pct) >= self.min_count:
elif recur_pct >= self.onco_threshold:
# high number of recurrent missense case
if recur_pct >= del_pct:
return self.onco_label
else:
return self.tsg_label
elif del_pct >= self.tsg_threshold:
# high number of deleterious mutations case
return self.tsg_label
else:
# doesn't classify as oncogene or tsg
return self.other_label
def set_onco_threshold(self, threshold):
"""Setter for percentage threshold for recurrent missense mutations
to call it an oncogene."""
if 0 < threshold < 1:
self.onco_threshold = threshold
def set_tsg_threshold(self, threshold):
"""Setter for percentage threshold for deleterious mutations to
call it a tsg."""
if 0 < threshold < 1:
self.tsg_threshold = threshold
def set_min_count(self, count):
"""Setter for minimum count that can be classified for either a
oncogene or tsg."""
if count > 0:
self.min_count = count
|
11578487
|
import numpy
import sklearn
import pandas
#import eli5
import dill
import xgboost
import hdbscan
print('All package imported succesfully')
|
11578511
|
from newspaperdemo import app
from newspaperdemo.controllers import article
if __name__ == '__main__':
app.register_blueprint(article.mod)
app.run(debug=True)
|
11578587
|
import os.path
import logging
import kpm.utils
import re
__all__ = ['new_package']
logger = logging.getLogger(__name__)
DIRECTORIES = ['templates']
MANIFEST = """---
package:
name: {name}
author: <author>
version: 1.0.0
description: {app}
license: MIT
# Defaults variables
# i.e:
# variables:
# namespace: kube-system
# replicas: 1
# image: "gcr.io/google_containers/heapster:v0.18.2"
# svc_type: "NodePort"
variables: {{}}
# List the resources
# resources :
# - file: nginx-rc.yaml # Template file , relative to ./templates
# name: nginx # kubernetes resource name
# type: rc # kubernetes resource type (ds,rc,svc,secret....)
# sharded: no # Optional: use the shards to generate this resource
# patch: # Optional: array of 'json-patch'
# - {{op: replace, path: /metadata/labels/app-name, value: 'nginx'}}
resources: []
# - file: {app}-rc.yaml
# name: {app}
# type: rc
# - file: {app}-svc.yaml
# name: {app}
# type: svc
# Shard list (optional)
# shards:
# - name: shard-name # will be append to the resource name
# variables: {{}} # Optional: apply vars only to this shard
shards: []
# List de dependencies
# Special name '$self' to deploy current package.
# Useful to sort the dependencies
# i.e:
# deploy:
# - name: postgresql
# - name $self
deploy:
- name: $self
"""
README = """
{name}
===========
# Install
kpm deploy {name}
"""
def new_package(name, dest=".", with_comments=False):
kpm.utils.check_package_name(name, force_check=True)
_, app = name.split("/")
path = os.path.join(dest, name)
kpm.utils.mkdir_p(path)
readme = open(os.path.join(path, 'README.md'), 'w')
readme.write(README.format(name=name))
readme.close()
manifest = open(os.path.join(path, 'manifest.yaml'), 'w')
if with_comments:
m = MANIFEST
else:
m = re.sub(r'(?m)^#.*\n?', '', MANIFEST)
manifest.write(m.format(app=app, name=name))
manifest.close()
for directory in DIRECTORIES:
kpm.utils.mkdir_p(os.path.join(path, directory))
return path
|
11578592
|
class Solution:
def minDeletionSize(self, A: List[str]) -> int:
return sum(list(column) != sorted(column) for column in zip(*A))
|
11578668
|
from django.shortcuts import render
from rest_framework.response import Response
import json
from ibm_watson import VisualRecognitionV3
from rest_framework.views import status
from django.http import JsonResponse, HttpResponse
from django.conf import settings
def object_detection(request, picture_url):
visual_recognition = VisualRecognitionV3('2018-03-19',iam_apikey = settings.IBM_API_KEY)
classes_result = visual_recognition.classify(url = picture_url).get_result()
return JsonResponse(classes_result)
|
11578670
|
from __future__ import absolute_import, division, print_function
import os
import re
import tensorflow as tf
from absl import app, flags
from albert import AlbertConfig, AlbertModel
from albert_model import pretrain_model
FLAGS = flags.FLAGS
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
flags.DEFINE_enum("model_type","albert_encoder",["albert_encoder","albert"],
"Select model type for weight conversion.\n"
"albert_enoder for finetuning tasks.\n"
"albert for MLM & SOP FineTuning on domain specific data.")
flags.DEFINE_string("config", "/work/ALBERT-TF2.0-master/model_configs/xxlarge/config.json", "tf hub model version to convert 1 or 2.")
flags.DEFINE_string("model_dir", "/work/ALBERT-master_google/export", "tf1.x albert model file")
flags.DEFINE_enum("model","xxlarge",["base", "large", "xlarge", "xxlarge"],"model for converison")
weight_map = {
"bert/embeddings/word_embeddings": "albert_model/word_embeddings/embeddings:0",
"bert/embeddings/token_type_embeddings": "albert_model/embedding_postprocessor/type_embeddings:0",
"bert/embeddings/position_embeddings": "albert_model/embedding_postprocessor/position_embeddings:0",
"bert/embeddings/LayerNorm/beta": "albert_model/embedding_postprocessor/layer_norm/beta:0",
"bert/embeddings/LayerNorm/gamma": "albert_model/embedding_postprocessor/layer_norm/gamma:0",
"bert/encoder/embedding_hidden_mapping_in/kernel": "albert_model/embedding_postprocessor/embedding_hidden_mapping_in/kernel:0",
"bert/encoder/embedding_hidden_mapping_in/bias": "albert_model/embedding_postprocessor/embedding_hidden_mapping_in/bias:0",
"bert/encoder/transformer/group_0/inner_group_0/attention_1/self/query/kernel": "albert_model/encoder/shared_layer/self_attention/query/kernel:0",
"bert/encoder/transformer/group_0/inner_group_0/attention_1/self/query/bias": "albert_model/encoder/shared_layer/self_attention/query/bias:0",
"bert/encoder/transformer/group_0/inner_group_0/attention_1/self/key/kernel": "albert_model/encoder/shared_layer/self_attention/key/kernel:0",
"bert/encoder/transformer/group_0/inner_group_0/attention_1/self/key/bias": "albert_model/encoder/shared_layer/self_attention/key/bias:0",
"bert/encoder/transformer/group_0/inner_group_0/attention_1/self/value/kernel": "albert_model/encoder/shared_layer/self_attention/value/kernel:0",
"bert/encoder/transformer/group_0/inner_group_0/attention_1/self/value/bias": "albert_model/encoder/shared_layer/self_attention/value/bias:0",
"bert/encoder/transformer/group_0/inner_group_0/attention_1/output/dense/kernel": "albert_model/encoder/shared_layer/self_attention_output/kernel:0",
"bert/encoder/transformer/group_0/inner_group_0/attention_1/output/dense/bias": "albert_model/encoder/shared_layer/self_attention_output/bias:0",
"bert/encoder/transformer/group_0/inner_group_0/LayerNorm/beta": "albert_model/encoder/shared_layer/self_attention_layer_norm/beta:0",
"bert/encoder/transformer/group_0/inner_group_0/LayerNorm/gamma": "albert_model/encoder/shared_layer/self_attention_layer_norm/gamma:0",
"bert/encoder/transformer/group_0/inner_group_0/ffn_1/intermediate/dense/kernel": "albert_model/encoder/shared_layer/intermediate/kernel:0",
"bert/encoder/transformer/group_0/inner_group_0/ffn_1/intermediate/dense/bias": "albert_model/encoder/shared_layer/intermediate/bias:0",
"bert/encoder/transformer/group_0/inner_group_0/ffn_1/intermediate/output/dense/kernel": "albert_model/encoder/shared_layer/output/kernel:0",
"bert/encoder/transformer/group_0/inner_group_0/ffn_1/intermediate/output/dense/bias": "albert_model/encoder/shared_layer/output/bias:0",
"bert/encoder/transformer/group_0/inner_group_0/LayerNorm_1/beta": "albert_model/encoder/shared_layer/output_layer_norm/beta:0",
"bert/encoder/transformer/group_0/inner_group_0/LayerNorm_1/gamma": "albert_model/encoder/shared_layer/output_layer_norm/gamma:0",
"bert/pooler/dense/kernel": "albert_model/pooler_transform/kernel:0",
"bert/pooler/dense/bias": "albert_model/pooler_transform/bias:0",
"cls/predictions/transform/dense/kernel": "cls/predictions/transform/dense/kernel:0",
"cls/predictions/transform/dense/bias": "cls/predictions/transform/dense/bias:0",
"cls/predictions/transform/LayerNorm/beta": "cls/predictions/transform/LayerNorm/beta:0",
"cls/predictions/transform/LayerNorm/gamma": "cls/predictions/transform/LayerNorm/gamma:0",
"cls/predictions/output_bias": "cls/predictions/output_bias:0",
'cls/seq_relationship/output_weights': 'cls/seq_relationship/output_weights:0',
'cls/seq_relationship/output_bias': 'cls/seq_relationship/output_bias:0'
}
weight_map = {v: k for k, v in weight_map.items()}
def main(_):
model_path = FLAGS.model_dir
max_seq_length = 256
float_type = tf.float32
input_word_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids')
input_mask = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_mask')
input_type_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids')
albert_config = AlbertConfig.from_json_file(FLAGS.config)
tags = []
stock_values = {}
input_checkpoint = "/work/ALBERT-master_google/export/"
# with tf.Graph().as_default():
# sm = tf.compat.v2.saved_model.load(FLAGS.model_dir, tags=tags)
# with tf.compat.v1.Session() as sess:
# sess.run(tf.compat.v1.global_variables_initializer())
# stock_values = {v.name.split(":")[0]: v.read_value()
# for v in sm.variables}
# stock_values = sess.run(stock_values)
with tf.Graph().as_default():
# sm = tf.compat.v2.saved_model.load(FLAGS.model_dir, tags=tags)
saver = tf.compat.v1.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True)
with tf.compat.v1.Session() as sess:
# sess.run(tf.compat.v1.global_variables_initializer())
saver.restore(sess, input_checkpoint)
print("succes!")
# stock_values = {v.name.split(":")[0]: v.read_value()
# for v in sm.variables}
# stock_values = sess.run(stock_values)
# for v in sm.variables:
# print(sess.run({v.name.split(":")[0]:v.read_value()}))
print(tf.compat.v1.global_variables())
for vname in tf.compat.v1.global_variables():
stock_values[vname.name.split(":")[0]] = tf.keras.backend.get_value(vname)
print(vname.name)
#print(stock_values)
loaded_weights = set()
skip_count = 0
weight_value_tuples = []
skipped_weight_value_tuples = []
if FLAGS.model_type == "albert_encoder":
albert_layer = AlbertModel(config=albert_config, float_type=float_type)
pooled_output, sequence_output, attention_output, embedding_tensor= albert_layer(input_word_ids, input_mask,
input_type_ids)
albert_model = tf.keras.Model(
inputs=[input_word_ids, input_mask, input_type_ids],
outputs=[pooled_output, sequence_output, attention_output, embedding_tensor])
albert_params = albert_model.weights
param_values = tf.keras.backend.batch_get_value(albert_params)
else:
albert_full_model,_ = pretrain_model(albert_config, max_seq_length, max_predictions_per_seq=25)
albert_layer = albert_full_model.get_layer("albert_model")
albert_params = albert_full_model.weights
param_values = tf.keras.backend.batch_get_value(albert_params)
for ndx, (param_value, param) in enumerate(zip(param_values, albert_params)):
stock_name = weight_map[param.name]
if stock_name in stock_values:
ckpt_value = stock_values[stock_name]
if param_value.shape != ckpt_value.shape:
print("loader: Skipping weight:[{}] as the weight shape:[{}] is not compatible "
"with the checkpoint:[{}] shape:{}".format(param.name, param.shape,
stock_name, ckpt_value.shape))
skipped_weight_value_tuples.append((param, ckpt_value))
continue
weight_value_tuples.append((param, ckpt_value))
loaded_weights.add(stock_name)
else:
print("loader: No value for:[{}], i.e.:[{}] in:[{}]".format(
param.name, stock_name, FLAGS.model_dir))
skip_count += 1
tf.keras.backend.batch_set_value(weight_value_tuples)
print("Done loading {} ALBERT weights from: {} into {} (prefix:{}). "
"Count of weights not found in the checkpoint was: [{}]. "
"Count of weights with mismatched shape: [{}]".format(
len(weight_value_tuples), FLAGS.model_dir, albert_layer, "albert", skip_count, len(skipped_weight_value_tuples)))
print("Unused weights from saved model:",
"\n\t" + "\n\t".join(sorted(set(stock_values.keys()).difference(loaded_weights))))
if FLAGS.model_type == "albert_encoder":
albert_model.save_weights(f"{FLAGS.model_dir}/tf2_model.h5")
else:
albert_full_model.save_weights(f"{FLAGS.model_dir}/tf2_model_full.h5")
if __name__ == "__main__":
flags.mark_flag_as_required("model_dir")
flags.mark_flag_as_required("model")
flags.mark_flag_as_required("model_type")
app.run(main)
|
11578672
|
import numpy as np
import napari
from .utils import *
# Shift, Control, Alt, Meta, Up, Down, Left, Right, PageUp, PageDown, Insert,
# Delete, Home, End, Escape, Backspace, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10,
# F11, F12, Space, Enter, Tab
KEYS = {"focus_next": "]",
"focus_previous": "[",
"hide_others": "Control-Shift-A",
"reslice": "/",
"to_front": "Control-Shift-F",
"reset_view": "Control-Shift-R",
}
# Overwrite napari shortcut
@napari.Viewer.bind_key("Alt-Up", overwrite=True)
def z_up(viewer:"napari.Viewer"):
axes = "".join(viewer.dims.axis_labels)
i = axes.find("z")
if i < 0:
return None
else:
step = list(viewer.dims.current_step)
step[i] = min(step[i]+1, viewer.dims.nsteps[i]-1)
viewer.dims.current_step = step
return None
@napari.Viewer.bind_key("Alt-Down", overwrite=True)
def z_down(viewer:"napari.Viewer"):
axes = "".join(viewer.dims.axis_labels)
i = axes.find("z")
if i < 0:
return None
else:
step = list(viewer.dims.current_step)
step[i] = max(step[i]-1, 0)
viewer.dims.current_step = step
return None
__all__ = list(KEYS.keys())
def bind_key(func):
return napari.Viewer.bind_key(KEYS[func.__name__])(func)
@bind_key
def focus_next(viewer:"napari.Viewer"):
_change_focus(viewer, 1)
return None
@bind_key
def focus_previous(viewer:"napari.Viewer"):
_change_focus(viewer, -1)
return None
def _change_focus(viewer:"napari.Viewer", ind:int):
# assert one Shapes or Points layer is selected
selected_layer = get_a_selected_layer(viewer)
if not isinstance(selected_layer, (napari.layers.Shapes, napari.layers.Points)):
return None
# check if one shape/point is selected
selected_data = list(selected_layer.selected_data)
if len(selected_data) != 1:
return None
selected_data = selected_data[0]
# determine next/previous index/data to select
ndata = len(selected_layer.data)
next_to_select = (selected_data + ind) % ndata
next_data = np.atleast_2d(selected_layer.data[next_to_select])
# update camera
scale = selected_layer.scale
next_center = np.mean(next_data, axis=0) * scale
viewer.dims.current_step = list(next_data[0, :].astype(np.int64))
viewer.camera.center = next_center
zoom = viewer.camera.zoom
viewer.camera.events.zoom() # Here events are emitted and zoom changes automatically.
viewer.camera.zoom = zoom
selected_layer.selected_data = {next_to_select}
selected_layer._set_highlight()
return None
@bind_key
def hide_others(viewer:"napari.Viewer"):
"""
Make selected layers visible and others invisible.
"""
selected = viewer.layers.selection
visibility_old = [layer.visible for layer in viewer.layers]
visibility_new = [layer in selected for layer in viewer.layers]
if visibility_old != visibility_new:
for layer, vis in zip(viewer.layers, visibility_new):
layer.visible = vis
else:
for layer in viewer.layers:
layer.visible = True
@bind_key
def to_front(viewer:"napari.Viewer"):
"""
Let selected layers move to front.
"""
not_selected_index = [i for i, l in enumerate(viewer.layers)
if l not in viewer.layers.selection]
viewer.layers.move_multiple(not_selected_index, 0)
@bind_key
def reset_view(viewer:"napari.Viewer"):
"""
Reset translate/scale parameters to the initial value.
"""
for layer in viewer.layers.selection:
layer.translate -= (layer.translate - layer.metadata["init_translate"])
layer.scale = layer.metadata["init_scale"]
@bind_key
def reslice(viewer:"napari.Viewer"):
"""
2D Reslice with currently selected lines/paths and images.
"""
if viewer.dims.ndisplay == 3:
viewer.status = "Cannot reslice in 3D mode."
imglist = list(iter_selected_layer(viewer, "Image"))
ndim = np.unique([shape_layer.ndim for shape_layer
in iter_selected_layer(viewer, "Shapes")])
if len(ndim) > 1:
viewer.status = "Cannot crop using Shapes layers with different number of dimensions."
else:
ndim = ndim[0]
if ndim == viewer.dims.ndim == 3:
active_plane = [-3, -2, -1]
else:
active_plane = [-2, -1]
if len(imglist) == 0:
imglist = [front_image(viewer)]
paths = []
for shape_layer in iter_selected_layer(viewer, "Shapes"):
for shape, type_ in zip(shape_layer.data, shape_layer.shape_type):
if type_ in ("line", "path"):
paths.append((shape, shape_layer.scale)) # shape = float pixel
out = []
for path, shape_layer_scale in paths:
for layer in imglist:
factor = layer.scale[active_plane]/shape_layer_scale[active_plane]
dr = layer.translate[active_plane] / layer.scale[active_plane]
out_ = layer.data.reslice(path[:,active_plane]/factor - dr)
out.append(out_)
viewer.window._results.append(out)
return None
|
11578751
|
from asynctest import mock
from meltano.core.block.ioblock import IOBlock
from meltano.core.block.parser import generate_job_id, is_command_block
from meltano.core.environment import Environment
class TestParserUtils:
def test_is_command_block(self, tap, dbt):
"""Verify that the is_command_block function returns True when the block is an IOBlock and has a command."""
assert not is_command_block(tap)
assert is_command_block(dbt)
def test_generate_job_id(self):
"""Verify that the job id is generated correctly when an environment is provided."""
block1 = mock.Mock(spec=IOBlock)
block1.string_id = "block1"
block2 = mock.Mock(spec=IOBlock)
block2.string_id = "block2"
project = mock.Mock()
project.active_environment = None
assert not generate_job_id(project, block1, block2)
project.active_environment = Environment(name="test")
assert generate_job_id(project, block1, block2) == "test-block1-block2"
|
11578901
|
import shutil
import uuid
from time import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyspark
from pyspark.sql.functions import col, rand, when
from deltalake import DeltaTable
spark = (
pyspark.sql.SparkSession.builder.appName("deltalake")
.config("spark.jars.packages", "io.delta:delta-core_2.12:0.7.0")
.config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension")
.config("spark.executor.memory", "24g")
.config("spark.driver.memory", "24g")
.config(
"spark.sql.catalog.spark_catalog",
"org.apache.spark.sql.delta.catalog.DeltaCatalog",
)
.getOrCreate()
)
for n in np.logspace(3, 8):
path = f"tests/data/{str(uuid.uuid4())}/table1"
df = (
spark.range(0, n)
.withColumn("number", rand())
.withColumn("number2", when(col("id") < 500, 0).otherwise(1))
)
df.write.format("delta").mode("append").save(path)
table = DeltaTable(path)
t = time()
df_pandas = table.to_pandas()
t_dt = time() - t
t = time()
df_spark = spark.read.format("delta").load(table.path).toPandas()
t_spark = time() - t
print(f"{n},t_df,{t_dt}\n{n},t_spark,{t_spark}")
with open("performance_tests/results.txt", "a") as f:
print(f"{n},delta-lake-reader,{t_dt}", file=f)
print(f"{n},spark,{t_spark}", file=f)
shutil.rmtree(path)
plt.style.use("fivethirtyeight")
df = pd.read_csv("performance_tests/results.txt")
df.columns = ["n", "type", "t"]
df.groupby(["n", "type"]).sum()["t"].unstack().plot()
plt.yscale("log")
plt.xscale("log")
plt.xlabel("Number of rows in table")
plt.ylabel("Time to load dataframe [s]")
plt.show()
|
11578905
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^$', 'tests.views.someview'),
)
|
11578950
|
print("bienvenido a nuestro cine \nIngrese su edad para consultar el precio del boleto \nSi desea dejar de agregar introduzca -1")
edad = int(input("Edad: "))
while edad >= 0:
if edad >= 0 and edad < 3:
print("El boleto es gratis")
elif edad >=3 and edad < 12:
print("El boleto cuesta $10")
else:
print("El boleto cuesta $15")
edad = int(input("Edad: "))
|
11578997
|
from http import HTTPStatus
import typing
class HTTPError(Exception):
"""Raised when an HTTP error occurs.
You can raise this within a view or an error handler to interrupt
request processing.
# Parameters
status (int or HTTPStatus):
the status code of the error.
detail (any):
extra detail information about the error. The exact rendering is
determined by the configured error handler for `HTTPError`.
# See Also
- [HTTP response status codes (MDN web docs)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status)
"""
__slots__ = ("_status", "detail")
def __init__(
self, status: typing.Union[int, HTTPStatus], detail: typing.Any = ""
):
if isinstance(status, int):
status = HTTPStatus( # pylint: disable=no-value-for-parameter
status
)
else:
assert isinstance(
status, HTTPStatus
), f"Expected int or HTTPStatus, got {type(status)}"
self._status = status
self.detail = detail
@property
def status_code(self) -> int:
"""Return the HTTP error's status code, e.g. `404`."""
return self._status.value
@property
def status_phrase(self) -> str:
"""Return the HTTP error's status phrase, e.g. `"Not Found"`."""
return self._status.phrase
@property
def title(self) -> str:
"""Return the HTTP error's title, e.g. `"404 Not Found"`."""
return f"{self.status_code} {self.status_phrase}"
def as_json(self) -> dict:
data = {"error": self.title, "status": self.status_code}
if self.detail:
data["detail"] = self.detail
return data
def __str__(self):
return self.title
|
11579020
|
import os
from betamax import Betamax
import pytest
import xbox
from xbox.tests import TestBase
class TestgamerProfile(TestBase):
def test_gamer_profile_init(self):
settings = [{
"id": "AppDisplayName",
"value": "FakeProfile"
}, {
"id": "DisplayPic",
"value": "http://compass.xbox.com/assets/70/52/7052948b-c50d-4850-baff-abbcad07b631.jpg?n=004.jpg"
}, {
"id": "Gamerscore",
"value": "22786"
}, {
"id": "Gamertag",
"value": "FakeProfile"
}, {
"id": "PublicGamerpic",
"value": "http://images-eds.xboxlive.com/image?url=z951ykn43p4FqWbbFvR2Ec.8vbDhj8G2Xe7JngaTToBrrCmIEEXHC9UNrdJ6P7KIFXxmxGDtE9Vkd62rOpb7JcGvME9LzjeruYo3cC50qVYelz5LjucMJtB5xOqvr7WR"
}, {
"id": "XboxOneRep",
"value": "GoodPlayer"
}]
gt = xbox.GamerProfile('fake-xuid', settings, {})
assert gt.gamertag == 'FakeProfile'
assert gt.gamerscore == '22786'
assert gt.gamerpic == 'http://images-eds.xboxlive.com/image?url=z951ykn43p4FqWbbFvR2Ec.8vbDhj8G2Xe7JngaTToBrrCmIEEXHC9UNrdJ6P7KIFXxmxGDtE9Vkd62rOpb7JcGvME9LzjeruYo3cC50qVYelz5LjucMJtB5xOqvr7WR'
assert len(gt.raw_json) == 0
def test_get_by_gamertag(self):
with Betamax(xbox.client.session) as vcr:
match_on = ['uri', 'method', 'headers', 'body']
os.environ['MS_LOGIN'] = '<EMAIL>'
os.environ['MS_PASSWD'] = 'password'
vcr.use_cassette(
'get_profile_by_gamertag_success',
match_on=match_on,
record_mode='never',
)
gt = xbox.GamerProfile.from_gamertag('joealcorn')
assert gt.gamertag == 'JoeAlcorn'
assert gt.gamerpic == 'http://images-eds.xboxlive.com/image?url=z951ykn43p4FqWbbFvR2Ec.8vbDhj8G2Xe7JngaTToBrrCmIEEXHC9UNrdJ6P7KIFXxmxGDtE9Vkd62rOpb7JcGvME9LzjeruYo3cC50qVYelz5LjucMJtB5xOqvr7WR'
assert gt.xuid == '2533274812246958'
assert gt.gamerscore == '22791'
assert gt.xuid in repr(gt)
assert gt.gamertag in repr(gt)
assert gt.raw_json
def test_get_by_xuid(self):
with Betamax(xbox.client.session) as vcr:
match_on = ['uri', 'method', 'headers', 'body']
os.environ['MS_LOGIN'] = '<EMAIL>'
os.environ['MS_PASSWD'] = 'password'
vcr.use_cassette(
'get_profile_by_xuid_success',
match_on=match_on,
record_mode='never',
)
gt = xbox.GamerProfile.from_xuid('2533274812246958')
assert gt.gamertag == 'JoeAlcorn'
assert gt.gamerpic == 'http://images-eds.xboxlive.com/image?url=z951ykn43p4FqWbbFvR2Ec.8vbDhj8G2Xe7JngaTToBrrCmIEEXHC9UNrdJ6P7KIFXxmxGDtE9Vkd62rOpb7JcGvME9LzjeruYo3cC50qVYelz5LjucMJtB5xOqvr7WR'
assert gt.xuid == '2533274812246958'
assert gt.gamerscore == '22791'
assert gt.xuid in repr(gt)
assert gt.gamertag in repr(gt)
assert gt.raw_json
def test_get_by_xuid_failure(self):
with Betamax(xbox.client.session) as vcr:
match_on = ['uri', 'method', 'headers', 'body']
os.environ['MS_LOGIN'] = '<EMAIL>'
os.environ['MS_PASSWD'] = 'password'
vcr.use_cassette(
'get_profile_by_xuid_failure',
match_on=match_on,
record_mode='once',
)
with pytest.raises(xbox.exceptions.GamertagNotFound):
xbox.GamerProfile.from_xuid('0000000000000000')
def test_get_by_gamertag_failure(self):
with Betamax(xbox.client.session) as vcr:
match_on = ['uri', 'method', 'headers', 'body']
os.environ['MS_LOGIN'] = '<EMAIL>'
os.environ['MS_PASSWD'] = 'password'
vcr.use_cassette(
'get_profile_by_gamertag_failure',
match_on=match_on,
record_mode='once',
)
with pytest.raises(xbox.exceptions.GamertagNotFound):
xbox.GamerProfile.from_gamertag('2533274812246958')
|
11579049
|
from icevision.models.fastai.unet import backbones
from icevision.models.fastai.unet.dataloaders import *
from icevision.models.fastai.unet.model import *
from icevision.models.fastai.unet.prediction import *
from icevision.models.fastai.unet.show_results import *
from icevision.soft_dependencies import SoftDependencies
if SoftDependencies.fastai:
from icevision.models.fastai.unet import fastai
if SoftDependencies.pytorch_lightning:
from icevision.models.fastai.unet import lightning
|
11579080
|
import uuid
from datetime import timedelta
from typing import Any, List, Optional, Tuple
import pytest
import temporalio.api.enums.v1
import temporalio.api.workflowservice.v1
import temporalio.common
import temporalio.exceptions
from temporalio.client import (
CancelWorkflowInput,
Client,
Interceptor,
OutboundInterceptor,
QueryWorkflowInput,
RPCError,
RPCStatusCode,
SignalWorkflowInput,
StartWorkflowInput,
TerminateWorkflowInput,
WorkflowContinuedAsNewError,
WorkflowExecutionStatus,
WorkflowFailureError,
WorkflowHandle,
WorkflowQueryRejectedError,
)
from tests.helpers.worker import (
ExternalWorker,
KSAction,
KSContinueAsNewAction,
KSErrorAction,
KSQueryHandlerAction,
KSResultAction,
KSSignalAction,
KSSleepAction,
KSWorkflowParams,
)
async def test_start_id_reuse(client: Client, worker: ExternalWorker):
# Run to return "some result"
id = str(uuid.uuid4())
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(
actions=[KSAction(result=KSResultAction(value="some result"))]
),
id=id,
task_queue=worker.task_queue,
)
assert "some result" == await handle.result()
# Run again with reject duplicate
with pytest.raises(RPCError) as err:
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(
actions=[KSAction(result=KSResultAction(value="some result 2"))]
),
id=id,
task_queue=worker.task_queue,
id_reuse_policy=temporalio.common.WorkflowIDReusePolicy.REJECT_DUPLICATE,
)
await handle.result()
assert err.value.status == RPCStatusCode.ALREADY_EXISTS
# Run again allowing duplicate (the default)
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(
actions=[KSAction(result=KSResultAction(value="some result 3"))]
),
id=id,
task_queue=worker.task_queue,
)
assert "some result 3" == await handle.result()
async def test_start_with_signal(client: Client, worker: ExternalWorker):
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(action_signal="my-signal"),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
start_signal="my-signal",
start_signal_args=[KSAction(result=KSResultAction(value="some signal arg"))],
)
assert "some signal arg" == await handle.result()
async def test_result_follow_continue_as_new(client: Client, worker: ExternalWorker):
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(
actions=[
KSAction(continue_as_new=KSContinueAsNewAction(while_above_zero=1)),
KSAction(result=KSResultAction(run_id=True)),
],
),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
)
final_run_id = await handle.result()
assert len(final_run_id) > 5 and handle.run_id != final_run_id
# Get a handle and check result without following and confirm
# continue-as-new error
with pytest.raises(WorkflowContinuedAsNewError) as err:
await handle.result(follow_runs=False)
assert err.value.new_execution_run_id == final_run_id
async def test_workflow_failed(client: Client, worker: ExternalWorker):
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(
actions=[
KSAction(
error=KSErrorAction(
message="some error", details={"foo": "bar", "baz": 123.45}
)
)
],
),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
)
with pytest.raises(WorkflowFailureError) as err:
await handle.result()
assert isinstance(err.value.cause, temporalio.exceptions.ApplicationError)
assert str(err.value.cause) == "some error"
assert list(err.value.cause.details)[0] == {"foo": "bar", "baz": 123.45}
async def test_cancel(client: Client, worker: ExternalWorker):
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(actions=[KSAction(sleep=KSSleepAction(millis=50000))]),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
)
await handle.cancel()
with pytest.raises(WorkflowFailureError) as err:
await handle.result()
assert isinstance(err.value.cause, temporalio.exceptions.CancelledError)
async def test_terminate(client: Client, worker: ExternalWorker):
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(actions=[KSAction(sleep=KSSleepAction(millis=50000))]),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
)
await handle.terminate("arg1", "arg2", reason="some reason")
with pytest.raises(WorkflowFailureError) as err:
await handle.result()
assert isinstance(err.value.cause, temporalio.exceptions.TerminatedError)
assert str(err.value.cause) == "some reason"
assert list(err.value.cause.details) == ["arg1", "arg2"]
async def test_cancel_not_found(client: Client):
with pytest.raises(RPCError) as err:
await client.get_workflow_handle("does-not-exist").cancel()
assert err.value.status == RPCStatusCode.NOT_FOUND
async def test_describe(client: Client, worker: ExternalWorker):
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(actions=[KSAction(result=KSResultAction(value="some value"))]),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
)
assert "some value" == await handle.result()
desc = await handle.describe()
assert desc.status == WorkflowExecutionStatus.COMPLETED
assert (
desc.raw_message.workflow_execution_info.status
== temporalio.api.enums.v1.WorkflowExecutionStatus.WORKFLOW_EXECUTION_STATUS_COMPLETED
)
async def test_query(client: Client, worker: ExternalWorker):
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(
actions=[KSAction(query_handler=KSQueryHandlerAction(name="some query"))]
),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
)
await handle.result()
assert "some query arg" == await handle.query("some query", "some query arg")
# Try a query not on the workflow
with pytest.raises(RPCError) as err:
await handle.query("does not exist")
# TODO(cretz): Is this the status we expect all SDKs to report?
assert err.value.status == RPCStatusCode.INVALID_ARGUMENT
async def test_query_rejected(client: Client, worker: ExternalWorker):
# Make a queryable workflow that waits on a signal
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(
actions=[
KSAction(query_handler=KSQueryHandlerAction(name="some query")),
KSAction(signal=KSSignalAction(name="some signal")),
],
),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
)
# Confirm we can query w/ a not-open rejection condition since it's still
# open
assert "some query arg" == await handle.query(
"some query",
"some query arg",
reject_condition=temporalio.common.QueryRejectCondition.NOT_OPEN,
)
# But if we signal then wait for result, that same query should fail
await handle.signal("some signal", "some signal arg")
await handle.result()
with pytest.raises(WorkflowQueryRejectedError) as err:
assert "some query arg" == await handle.query(
"some query",
"some query arg",
reject_condition=temporalio.common.QueryRejectCondition.NOT_OPEN,
)
assert err.value.status == WorkflowExecutionStatus.COMPLETED
async def test_signal(client: Client, worker: ExternalWorker):
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(action_signal="some signal"),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
)
await handle.signal(
"some signal",
KSAction(result=KSResultAction(value="some signal arg")),
)
assert "some signal arg" == await handle.result()
async def test_retry_policy(client: Client, worker: ExternalWorker):
# Make the workflow retry 3 times w/ no real backoff
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(actions=[KSAction(error=KSErrorAction(attempt=True))]),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
retry_policy=temporalio.common.RetryPolicy(
initial_interval=timedelta(milliseconds=1),
maximum_attempts=3,
),
)
with pytest.raises(WorkflowFailureError) as err:
await handle.result()
assert isinstance(err.value.cause, temporalio.exceptions.ApplicationError)
assert str(err.value.cause) == "attempt 3"
async def test_single_client_config_change(client: Client, worker: ExternalWorker):
# Make sure normal query works on completed workflow
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(
actions=[KSAction(query_handler=KSQueryHandlerAction(name="some query"))]
),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
)
await handle.result()
assert "some query arg" == await handle.query("some query", "some query arg")
# Now create a client with the rejection condition changed to not open
config = client.config()
config[
"default_workflow_query_reject_condition"
] = temporalio.common.QueryRejectCondition.NOT_OPEN
reject_client = Client(**config)
with pytest.raises(WorkflowQueryRejectedError):
await reject_client.get_workflow_handle(handle.id).query(
"some query", "some query arg"
)
class TracingClientInterceptor(Interceptor):
def intercept_client(self, next: OutboundInterceptor) -> OutboundInterceptor:
self.traces: List[Tuple[str, Any]] = []
return TracingClientOutboundInterceptor(self, next)
class TracingClientOutboundInterceptor(OutboundInterceptor):
def __init__(
self,
parent: TracingClientInterceptor,
next: OutboundInterceptor,
) -> None:
super().__init__(next)
self._parent = parent
async def start_workflow(
self, input: StartWorkflowInput
) -> WorkflowHandle[Any, Any]:
self._parent.traces.append(("start_workflow", input))
return await super().start_workflow(input)
async def cancel_workflow(self, input: CancelWorkflowInput) -> None:
self._parent.traces.append(("cancel_workflow", input))
return await super().cancel_workflow(input)
async def query_workflow(self, input: QueryWorkflowInput) -> Any:
self._parent.traces.append(("query_workflow", input))
return await super().query_workflow(input)
async def signal_workflow(self, input: SignalWorkflowInput) -> None:
self._parent.traces.append(("signal_workflow", input))
return await super().signal_workflow(input)
async def terminate_workflow(self, input: TerminateWorkflowInput) -> None:
self._parent.traces.append(("terminate_workflow", input))
return await super().terminate_workflow(input)
async def test_interceptor(client: Client, worker: ExternalWorker):
# Create new client from existing client but with a tracing interceptor
interceptor = TracingClientInterceptor()
config = client.config()
config["interceptors"] = [interceptor]
client = Client(**config)
# Do things that would trigger the interceptors
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(
actions=[
KSAction(query_handler=KSQueryHandlerAction(name="some query")),
KSAction(signal=KSSignalAction(name="some signal")),
],
),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
)
await handle.query("some query", "some query arg")
await handle.signal("some signal")
await handle.result()
await handle.cancel()
# Ignore this error
with pytest.raises(RPCError):
await handle.terminate()
# Check trace
assert len(interceptor.traces) == 5
assert interceptor.traces[0][0] == "start_workflow"
assert interceptor.traces[0][1].workflow == "kitchen_sink"
assert interceptor.traces[1][0] == "query_workflow"
assert interceptor.traces[1][1].query == "some query"
assert interceptor.traces[2][0] == "signal_workflow"
assert interceptor.traces[2][1].signal == "some signal"
assert interceptor.traces[3][0] == "cancel_workflow"
assert interceptor.traces[3][1].id == handle.id
assert interceptor.traces[4][0] == "terminate_workflow"
assert interceptor.traces[4][1].id == handle.id
async def test_interceptor_callable(client: Client, worker: ExternalWorker):
# Create new client from existing client but with a tracing interceptor
# callable and only check a simple call
interceptor = TracingClientInterceptor()
config = client.config()
config["interceptors"] = [interceptor.intercept_client]
client = Client(**config)
handle = await client.start_workflow(
"kitchen_sink",
KSWorkflowParams(),
id=str(uuid.uuid4()),
task_queue=worker.task_queue,
)
await handle.result()
# Check trace
assert interceptor.traces[0][0] == "start_workflow"
assert interceptor.traces[0][1].workflow == "kitchen_sink"
async def test_tls_config(tls_client: Optional[Client]):
if not tls_client:
pytest.skip("No TLS client")
resp = await tls_client.service.describe_namespace(
temporalio.api.workflowservice.v1.DescribeNamespaceRequest(
namespace=tls_client.namespace
)
)
assert resp.namespace_info.name == tls_client.namespace
|
11579154
|
from typing import Callable
from web3.contract import Contract
from raiden_contracts.constants import TEST_SETTLE_TIMEOUT_MIN, ChannelEvent
from raiden_contracts.tests.utils import call_and_transact
from raiden_contracts.utils.events import check_channel_opened, check_new_deposit
def test_channel_open_with_deposit_basics(
get_accounts: Callable,
token_network: Contract,
assign_tokens: Callable,
) -> None:
"""Some basic checks that `open_withDeposit` works.
Detailed tests exist for `openChannel` and `setTotalDeposit`, and as `openChannelWithDeposit`
is a simple wrapper, these don't need to be duplicated.
"""
(A, B, C, D) = get_accounts(4)
deposit = 100
# Check channel creation by participant
assign_tokens(A, deposit)
call_and_transact(
token_network.functions.openChannelWithDeposit(A, B, TEST_SETTLE_TIMEOUT_MIN, deposit),
{"from": A},
)
assert token_network.functions.getChannelIdentifier(A, B).call() == 1
assert token_network.functions.getChannelParticipantInfo(1, A, B).call()[0] == 100
assert token_network.functions.getChannelParticipantInfo(1, B, A).call()[0] == 0
# Check channel creation by delegate
assign_tokens(D, deposit)
call_and_transact(
token_network.functions.openChannelWithDeposit(B, C, TEST_SETTLE_TIMEOUT_MIN, deposit),
{"from": D},
)
assert token_network.functions.getChannelIdentifier(B, C).call() == 2
assert token_network.functions.getChannelParticipantInfo(2, B, C).call()[0] == 100
assert token_network.functions.getChannelParticipantInfo(2, C, B).call()[0] == 0
def test_channel_open_with_deposit_events(
get_accounts: Callable,
token_network: Contract,
event_handler: Callable,
assign_tokens: Callable,
) -> None:
"""A successful openChannelWithDeposit() causes an OPENED and DEPOSIT event"""
ev_handler = event_handler(token_network)
(A, B) = get_accounts(2)
deposit = 100
assign_tokens(A, deposit)
txn_hash = call_and_transact(
token_network.functions.openChannelWithDeposit(A, B, TEST_SETTLE_TIMEOUT_MIN, deposit),
{"from": A},
)
channel_identifier = token_network.functions.getChannelIdentifier(A, B).call()
ev_handler.add(
txn_hash,
ChannelEvent.OPENED,
check_channel_opened(channel_identifier, A, B, TEST_SETTLE_TIMEOUT_MIN),
)
ev_handler.add(
txn_hash,
ChannelEvent.DEPOSIT,
check_new_deposit(channel_identifier, A, deposit),
)
ev_handler.check()
|
11579183
|
from pytest_testrail.plugin import pytestrail
@pytestrail.case("C1")
@pytestrail.case("C1")
def test_with_multiple_pytestrail_decorator() -> None:
assert True
|
11579200
|
from gym_malware.envs.malware_env import MalwareEnv
from gym_malware.envs.malware_score_env import MalwareScoreEnv
from gym_malware.envs import utils
|
11579235
|
import shutil
import os.path
from paraview.simple import *
from paraview import smtesting
smtesting.ProcessCommandLineArguments()
canex2 = OpenDataFile(smtesting.DataDir + "/Testing/Data/can.ex2")
# get animation scene
animationScene1 = GetAnimationScene()
# update animation scene based on data timesteps
animationScene1.UpdateAnimationUsingDataTimeSteps()
dname = os.path.join(smtesting.TempDir, "reload_reader")
shutil.rmtree(dname, ignore_errors=True)
# Save 10 timesteps.
fnames = []
for ts in range(10):
fname = os.path.join(dname, "can_%d.vtpc" % ts)
fnames.append(fname)
SaveData(fname, proxy=canex2)
animationScene1.GoToNext()
canvtms = OpenDataFile(fnames)
assert len(canvtms.TimestepValues) == len(fnames)
for ts in range(10,20):
fname = os.path.join(dname, "can_%d.vtpc" % ts)
fnames.append(fname)
SaveData(fname, proxy=canex2)
animationScene1.GoToNext()
ExtendFileSeries(canvtms)
assert len(canvtms.TimestepValues) == len(fnames)
# Remove temp dir is test passed.
shutil.rmtree(dname, ignore_errors=True)
|
11579249
|
import redis
range=100
factor=32
port=6666
r = redis.StrictRedis(host='localhost', port=port, db=0, password='<PASSWORD>')
# string
rst = r.set('foo', 2) # update old
assert rst
rst = r.set('foo2', 2) # add new
assert rst
rst = r.setex('foo_ex', 7200, 2)
assert rst
# zset
rst = r.zadd('zfoo', 4, 'd')
assert(rst == 1)
rst = r.zrem('zfoo', 'd')
assert(rst == 1)
# list
rst = r.lset('lfoo', 0, 'a')
assert(rst == 1)
rst = r.rpush('lfoo', 'a')
assert(rst == 5)
rst = r.lpush('lfoo', 'b')
assert(rst == 6)
rst = r.lpop('lfoo')
assert(rst == 'b')
rst = r.rpop('lfoo')
assert(rst == 'a')
rst = r.ltrim('lfoo', 0, 2)
assert rst
# set
rst = r.sadd('sfoo', 'f')
assert(rst == 1)
rst = r.srem('sfoo', 'f')
assert(rst == 1)
# hash
rst = r.hset('hfoo', 'b', 2)
assert(rst == 1)
rst = r.hdel('hfoo', 'b')
assert(rst == 1)
# bitmap
rst = r.setbit('bfoo', 0, 0) # update old
assert(rst == 1)
rst = r.setbit('bfoo', 900000, 1) # add new
assert(rst == 0)
# expire cmd
rst = r.expire('foo', 7200)
assert rst
rst = r.expire('zfoo', 7200)
assert rst
# del cmd
rst = r.delete('foo')
assert rst
rst = r.delete('zfoo')
assert rst
|
11579347
|
from unittest import TestCase
import numpy as np
from qilib.data_set import DataArray, DataSet
from scipy.signal import sawtooth
from qtt.measurements.post_processing import ProcessSawtooth1D
class TestProcessSawtooth1D(TestCase):
def test_run_process_has_correct_shape(self):
sample_rate = 21e7
width = 0.9375
resolution = 64
period = resolution / sample_rate
sawtooth_1d_processor = ProcessSawtooth1D()
data_set = self.__dummy_data_set(period, sample_rate, width, resolution)
output_result = sawtooth_1d_processor.run_process(data_set)
image_shape = np.multiply(resolution, width)
data_array = next(iter(output_result.data_arrays.values()))
data_shape = data_array.T.shape
self.assertEqual(image_shape, data_shape)
@staticmethod
def __dummy_time_data(period, sample_rate):
return np.linspace(0, period, int(np.rint(period * sample_rate)))
@staticmethod
def __dummy_scope_data(time_data, sawteeth_count, period, width):
return sawtooth(2 * np.pi * sawteeth_count * time_data / period, width)
@staticmethod
def __dummy_data_array(set_array: DataArray, scope_data: np.ndarray, channel_index: int = 1, trace_number: int = 1):
idenifier = 'ScopeTrace_{:03d}'.format(trace_number)
label = 'Channel_{}'.format(channel_index)
return DataArray(idenifier, label, preset_data=scope_data, set_arrays=[set_array])
@staticmethod
def __dummy_data_set(period, sample_rate, width, resolution):
time_data = TestProcessSawtooth1D.__dummy_time_data(period, sample_rate)
set_array = DataArray('ScopeTime', 'Time', unit='seconds', is_setpoint=True, preset_data=time_data)
scope_data_1 = TestProcessSawtooth1D.__dummy_scope_data(time_data, resolution, period, width)
data_array_1 = TestProcessSawtooth1D.__dummy_data_array(set_array, scope_data_1, channel_index=1,
trace_number=1)
data_set = DataSet()
data_set.user_data = {'resolution': resolution, 'width': width}
data_set.add_array(data_array_1)
return data_set
|
11579353
|
import unittest
from rl_starterpack import OpenAIGym, RandomAgent, experiment
class TestRandomAgent(unittest.TestCase):
def test_cartpole(self):
env = OpenAIGym(level='CartPole', max_timesteps=100)
agent = RandomAgent(state_space=env.state_space, action_space=env.action_space)
experiment.train(agent=agent, env=env, num_episodes=10)
experiment.evaluate(agent=agent, env=env, num_episodes=10)
agent.close()
env.close()
def test_pendulum(self):
env = OpenAIGym(level='Pendulum', max_timesteps=100)
agent = RandomAgent(state_space=env.state_space, action_space=env.action_space)
experiment.train(agent=agent, env=env, num_episodes=10)
experiment.evaluate(agent=agent, env=env, num_episodes=10)
agent.close()
env.close()
|
11579437
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
dtSummaryClients = DQMEDHarvester("DTSummaryClients")
|
11579465
|
import pandas as pd
import os
import pickle
import logging
from tqdm import tqdm
import sys
from flashtext import KeywordProcessor
import joblib
import multiprocessing
import numpy as np
import urllib.request
import zipfile
import numpy as np
import hashlib
import json
from .flags import flags
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)-5.5s] [%(name)-12.12s]: %(message)s')
log = logging.getLogger(__name__)
class Geocode():
def __init__(self, min_population_cutoff=30000, large_city_population_cutoff=200000, location_types=None):
self.kp = None
self.geo_data = None
self.min_population_cutoff = min_population_cutoff
self.large_city_population_cutoff = large_city_population_cutoff
self.geo_data_field_names = ['name', 'official_name', 'country_code', 'longitude', 'latitude', 'geoname_id', 'location_type', 'population']
self.default_location_types = ['city', 'place', 'country', 'admin1', 'admin2', 'admin3', 'admin4', 'admin5', 'admin6', 'admin_other', 'continent', 'region']
self.location_types = self._get_location_types(location_types)
self.argument_hash = self.get_arguments_hash()
def load(self, recompute=False):
if recompute or not (os.path.isfile(self.geonames_pickle_path) and os.path.isfile(self.keyword_processor_pickle_path)):
# geonames data
log.info('Recomputing pickle files...')
if os.path.isfile(self.geonames_pickle_path) and not recompute:
log.info('Pickled geonames file is already present!')
else:
self.create_geonames_pickle()
# keyword processor
if os.path.isfile(self.keyword_processor_pickle_path) and not recompute:
log.info('Pickled keyword processor file is already present!')
else:
self.create_keyword_processor_pickle()
# load data into memory
self.kp = self.get_keyword_processor_pickle()
self.geo_data = self.get_geonames_pickle()
def get_geonames_data(self):
geonames_data_path = self.get_cache_path('allCountries.txt')
if not os.path.isfile(geonames_data_path):
# download file
url = 'https://download.geonames.org/export/dump/allCountries.zip'
log.info(f'Downloading data from {url}')
geonames_data_path_zip = self.get_cache_path('allCountries.zip')
urllib.request.urlretrieve(url, geonames_data_path_zip)
log.info(f'... done')
log.info('Extracting data...')
# extract
with zipfile.ZipFile(geonames_data_path_zip, 'r') as f:
f.extractall(self.data_dir)
log.info('...done')
# remove zip file
os.remove(geonames_data_path_zip)
log.info(f'Reading data from {geonames_data_path}...')
dtypes = {'name': str, 'latitude': float, 'longitude': float, 'country_code': str, 'population': int, 'feature_code': str, 'alternatenames': str, 'geoname_id': str}
geonames_columns = ['geoname_id', 'name', 'asciiname', 'alternatenames', 'latitude', 'longitude', 'feature_class', 'feature_code', 'country_code', 'cc2', 'admin1', 'admin2', 'admin3', 'admin4', 'population', 'elevation', 'dem', 'timezone', 'modification_date']
df = pd.read_csv(geonames_data_path, names=geonames_columns, sep='\t', dtype=dtypes, usecols=dtypes.keys())
# remove data file
os.remove(geonames_data_path)
return df
def get_feature_names_data(self):
feature_code_path = self.get_cache_path('featureCodes_en.txt')
if not os.path.isfile(feature_code_path):
# download file
url = 'https://download.geonames.org/export/dump/featureCodes_en.txt'
log.info(f'Downloading data from {url}')
urllib.request.urlretrieve(url, feature_code_path)
log.info(f'... done')
log.info(f'Reading data from {feature_code_path}...')
df_features = pd.read_csv(feature_code_path, sep='\t', names=['feature_code', 'description-short', 'description-long'])
df_features['feature_code_class'] = ''
df_features.loc[:, ['feature_code_class', 'feature_code']] = df_features.feature_code.str.split('.', expand=True).values
# remove data file
os.remove(feature_code_path)
return df_features
@property
def geonames_pickle_path(self):
cache_path = self.get_cache_path(f'geonames_{self.argument_hash}.pkl')
return cache_path
@property
def keyword_processor_pickle_path(self):
cache_path = self.get_cache_path(f'geonames_keyword_processor_{self.argument_hash}.pkl')
return cache_path
@property
def data_dir(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(current_dir, 'data')
def get_cache_path(self, name):
if not os.path.isdir(self.data_dir):
os.makedirs(self.data_dir)
return os.path.join(self.data_dir, name)
def get_geonames_pickle(self):
with open(self.geonames_pickle_path, 'rb') as f:
df = pickle.load(f)
return df
def get_keyword_processor_pickle(self):
with open(self.keyword_processor_pickle_path, 'rb') as f:
kp = pickle.load(f)
return kp
def create_geonames_pickle(self):
"""Create list of place/country data from geonames data and sort according to priorities"""
def is_ascii(s):
try:
s.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return False
else:
return True
log.info('Reading geo data...')
df = self.get_geonames_data()
# Global filtering
log.info('Reading feature class data...')
df_features = self.get_feature_names_data()
df = df.merge(df_features, on='feature_code', how='left')
df.loc[df.geoname_id == '3355338', 'country_code'] = 'NA' # strangely, Namibia is missing the country_code
# Apply the following filters:
# - only keep places with feature class A (admin) and P (place), and CONT (continent)
df = df[
(df.feature_code_class.isin(['A', 'P'])) |
(df.feature_code.isin(['CONT', 'RGN']))
]
# - remove everything below min_population_cutoff
df = df[
(df['population'] > self.min_population_cutoff) |
(df.feature_code.isin(['CONT', 'RGN']))
]
# - get rid of items without a country code, usually administrative zones without country codes (e.g. "The Commonwealth")
df = df[
(~df.country_code.isnull()) |
(df.feature_code.isin(['CONT', 'RGN']))
]
# - remove certain administrative regions (such as zones, historical divisions, territories)
df = df[~df.feature_code.isin(['ZN', 'PCLH', 'TERR'])]
# Expansion of altnames
df['official_name'] = df['name']
# - expand alternate names
df.loc[:, 'alternatenames'] = df.alternatenames.str.split(',')
df['is_altname'] = False
_df = df.explode('alternatenames')
_df['name'] = _df['alternatenames']
_df['is_altname'] = True
df = pd.concat([df, _df])
df = df.drop(columns=['alternatenames'])
log.info(f'... read a total of {len(df):,} location names')
# Filtering applied to names and altnames
log.info('Apply filters...')
# - remove all names that are floats/ints
df['is_str'] = df.name.apply(lambda s: isinstance(s, str))
df = df[df['is_str']]
# - only allow 2 character names if 1) name is non-ascii (e.g. Chinese characters) 2) is an alternative name for a country (e.g. UK)
# 3) is a US state or Canadian province
df['is_country'] = df.feature_code.str.startswith('PCL')
df['is_ascii'] = df.name.apply(is_ascii)
# add "US" manually since it's missing in geonames
row_usa = df[df.is_country & (df.name == 'USA')].iloc[0]
row_usa['name'] = 'US'
df = df.append(row_usa)
df = df[
(~df.is_ascii) |
(df.name.str.len() > 2) |
((df.name.str.len() == 2) & (df.country_code == 'US')) |
((df.name.str.len() == 2) & (df.country_code == 'CA')) |
((df.name.str.len() == 2) & (df.is_country))
]
# - altnames need to have at least 4 characters (removes e.g. 3-letter codes)
df = df[~(
(~df.is_country) &
(~df.country_code.isin(['US', 'CA'])) &
(df.is_ascii) &
(df.is_altname) &
(df.name.str.len() < 4)
)]
# - remove altnames of insignificant admin levels and of places that are very small
# set admin level
df['admin_level'] = None
df.loc[df.feature_code.isin(['PCLI', 'PCLD', 'PCLF', 'PCLS', 'PCLIX', 'PCLX', 'PCL']), 'admin_level'] = 0
for admin_level in range(1, 6):
df.loc[df.feature_code.isin([f'ADM{admin_level}', f'ADM{admin_level}H']), 'admin_level'] = admin_level
df = df[
~(
((df.is_altname) & (df.admin_level.isin([3,4,5]))) |
((df.is_altname) & (df.feature_code_class == 'P') & (df.population < 100000))
)
]
# Add flags
df_countries = df[(df.geoname_id.isin([str(v) for v in flags.values()])) & (~df.is_altname)].copy()
for flag, geoname_id in flags.items():
try:
row = df_countries[(df_countries.geoname_id == str(geoname_id))].iloc[0].copy()
except IndexError:
pass
else:
row['name'] = flag
df = df.append(row)
# Sort by priorities and drop duplicate names
# Priorities
# 1) Large cities (population size > large_city_population_cutoff)
# 2) States/provinces (admin_level == 1)
# 3) Countries (admin_level = 0)
# 4) Places
# 5) counties (admin_level > 1)
# 6) continents
# 7) regions
# (within each group we will sort according to population size)
# Assigning priorities
df['priority'] = np.nan
df.loc[(df.feature_code == 'RGN'), 'priority'] = 7
df.loc[(df.feature_code == 'CONT'), 'priority'] = 6
df.loc[(df.feature_code_class == 'A') & (df.admin_level > 1), 'priority'] = 5
df.loc[df.feature_code_class == 'P', 'priority'] = 4
df.loc[(df.feature_code_class == 'A') & (df.admin_level == 0), 'priority'] = 3
df.loc[(df.feature_code_class == 'A') & (df.admin_level == 1), 'priority'] = 2
df.loc[(df.population > self.large_city_population_cutoff) & (df.feature_code_class == 'P') & (~df.is_altname), 'priority'] = 1
# Sorting
log.info('Sorting by priority...')
df.sort_values(by=['priority', 'population'], ascending=[True, False], inplace=True)
# drop name duplicates by keeping only the high priority elements
df['name_lower'] = df['name'].str.lower()
df = df.drop_duplicates('name_lower', keep='first')
# set location_types
df['location_type'] = np.nan
for admin_level in range(1, 6):
df.loc[df.admin_level == admin_level, 'location_type'] = f'admin{admin_level}'
df.loc[df.feature_code == 'ADMD', 'location_type'] = 'admin_other'
df.loc[df.admin_level == 0, 'location_type'] = 'country'
df.loc[(df.population <= self.large_city_population_cutoff) & (df.feature_code_class == 'P'), 'location_type'] = 'place'
df.loc[(df.population > self.large_city_population_cutoff) & (df.feature_code_class == 'P'), 'location_type'] = 'city'
df.loc[df.feature_code == 'CONT', 'location_type'] = 'continent'
df.loc[df.feature_code == 'RGN', 'location_type'] = 'region'
if len(df[df.location_type.isna()]) > 0:
log.warning(f'{len(df[df.location_type.isna()]):,} locations could not be matched to a location_type. These will be ignored.')
# filter by user-defined location types
df = df[df.location_type.isin(self.location_types)]
location_types_counts = dict(df.location_type.value_counts())
log.info(f'Collected a total of {len(df):,} location names. Breakdown by location types:')
for loc_type, loc_type_count in location_types_counts.items():
log.info(f'... type {loc_type}: {loc_type_count:,}')
# Write as pickled list
log.info(f'Writing geonames data to file {self.geonames_pickle_path}...')
df = df[self.geo_data_field_names].values.tolist()
with open(self.geonames_pickle_path, 'wb') as f:
pickle.dump(df, f)
def create_keyword_processor_pickle(self):
"""Builds trie lookup data structure for name lookup. This maps input names to position IDs."""
geo_data = self.get_geonames_pickle()
kp = KeywordProcessor()
log.info('Adding terms to keyword processor (building trie)...')
for i, item in tqdm(enumerate(geo_data), total=len(geo_data)):
idx = str(i)
kp.add_keyword(item[0], idx)
log.info(f'Writing keyword processor pickle to file {self.keyword_processor_pickle_path}...')
with open(self.keyword_processor_pickle_path, 'wb') as f:
pickle.dump(kp, f)
def decode(self, input_text):
matches = self.kp.extract_keywords(input_text)
if len(matches) == 0:
return []
# sort by priorities
matches = sorted(list(set(int(m) for m in matches)))
return [dict(zip(self.geo_data_field_names, self.geo_data[m])) for m in matches]
def decode_parallel(self, input_texts, num_cpus=None):
"""Run decode in parallel"""
def process_chunk(chunk, gc):
gc.load() # load pickles
results = []
for item in tqdm(chunk, total=len(chunk)):
res = gc.decode(item)
results.append(res)
return results
if num_cpus is None:
num_cpus = max(multiprocessing.cpu_count() - 1, 1)
else:
num_cpus = max(num_cpus, 1)
# remove pickles from memory
self.kp = None
self.geo_data = None
log.info(f'Running decode in parallel with {num_cpus} cores')
process_chunk_delayed = joblib.delayed(process_chunk)
result = joblib.Parallel(n_jobs=num_cpus)(process_chunk_delayed(chunk, self) for chunk in tqdm(np.array_split(input_texts, num_cpus)))
return [r for res in result for r in res]
def get_arguments_hash(self):
geocode_args = [str(self.min_population_cutoff), str(self.large_city_population_cutoff)] + self.location_types
return hashlib.sha256(','.join(geocode_args).encode()).hexdigest()[:15]
# private
def _get_location_types(self, location_types):
if location_types is None:
return self.default_location_types
_location_types = []
for _t in location_types:
if _t not in self.default_location_types:
log.warning(f'Location type {_t} will be ignored as it is not part of the default location types ({",".join(self.default_location_types)})')
continue
_location_types.append(_t)
return _location_types
|
11579525
|
from string import ascii_lowercase as letters
def shift(list, num):
"""
this function shifts the position of an existing list or string and returns it as a new list
"""
list = [x for x in list]
for i in range(num):
first = list[0]
list.pop(0)
list.append(first)
return list
def cipher(string, key):
"""
this function ciphers a string using the caesar's cipher method
string: the message to cipher
key: the number of times to shift the letters in the alphabet
it returns the ciphered string"""
nletters = shift(letters, key)
def cipher_word(string):
msg = []
for i in string:
index = letters.index(i)
i = nletters[index]
msg.append(i)
return "".join(msg)
sentence = string.split(" ")
nsentence = list(map(cipher_word, sentence))
return " ".join(nsentence)
if __name__ == "__main__":
# just a test
print(cipher("xyz", 2))
print(cipher("cat", 5))
|
11579546
|
import threading
import time
from dataclasses import asdict
from datetime import datetime
from typing import Optional, Tuple, List, Dict, Set
from uuid import uuid4 as uuid
from server.queue.fake.log_storage import FakeTaskLogStorage
from server.queue.fake.safe_observer import SafeObserver
from server.queue.framework import TaskQueue, BaseObserver, StatusFilterSpec, TaskLogStorage
from server.queue.model import Task, Request, TaskStatus
from server.queue.request_transformer import RequestTransformer
from server.queue.task_utils import task_status_filter
class FakeTaskQueue(TaskQueue):
def __init__(self, transformer: RequestTransformer, maxtasks=100):
self._transformer = transformer
self._tasks: Dict[str, Task] = {}
self._observers: Set[BaseObserver] = set()
self._lock = threading.RLock()
self._condition = threading.Condition(lock=self._lock)
self._maxtasks = maxtasks
self._log_storage = FakeTaskLogStorage()
@property
def log_storage(self) -> TaskLogStorage:
"""Get fake log storage."""
return self._log_storage
def dispatch(self, request: Request) -> Task:
"""Dispatch a new task."""
with self._lock:
task = self._make_task(request)
# Add task to pending list
self._tasks[task.id] = task
self._evict()
self._condition.notify()
# Notify observers
for observer in self._observers:
observer.on_task_sent(self._clone(task))
return self._clone(task)
def terminate(self, task_id: str):
"""Terminate task by id."""
with self._lock:
# Make sure task exists
task = self._tasks.get(task_id)
if task is None:
return
# Check if task is revocable
revocable = {TaskStatus.RUNNING, TaskStatus.PENDING}
if task.status not in revocable:
return
# Revoke task
task.status = TaskStatus.REVOKED
task.status_updated = datetime.now()
for observer in self._observers:
observer.on_task_revoked(self._clone(task))
def delete(self, task_id: str):
"""Delete task by id."""
with self._lock:
# Make sure task exists
task = self._tasks.get(task_id)
if task is None:
return
# Terminate and delete task
self.terminate(task.id)
del self._tasks[task.id]
self._log_storage.delete_logs(task_id)
for observer in self._observers:
observer.on_task_deleted(task.id)
def get_task(self, task_id: str) -> Optional[Task]:
"""Get task by id."""
with self._lock:
task = self._tasks.get(task_id)
if task is not None:
return self._clone(task)
def list_tasks(self, status: Optional[StatusFilterSpec] = None, offset=0, limit=None) -> Tuple[List, int]:
"""List tasks with the given status."""
with self._lock:
selected_tasks = list(filter(task_status_filter(status), self._tasks.values()))
limit = limit or len(selected_tasks)
return selected_tasks[offset : offset + limit], len(selected_tasks)
def exists(self, task_id: str) -> bool:
"""Check if task with the given id exists."""
with self._lock:
return task_id in self._tasks
def observe(self, observer: BaseObserver):
"""Add observer to the queue notification list."""
with self._lock:
self._observers.add(SafeObserver(wrapped=observer))
def stop_observing(self, observer: BaseObserver):
"""Remove observer from the queue notification list."""
with self._lock:
if observer in self._observers:
self._observers.remove(observer)
def listen(self):
"""Listen for queue events and notify observers.
This is a blocking method, it should be executed in a background thread.
"""
while True:
task = self._select_and_start_task()
self._execute_task(task)
def _select_and_start_task(self) -> Task:
"""Select pending task."""
with self._condition:
selected = self._find_pending_task()
while selected is None:
self._condition.wait()
selected = self._find_pending_task()
selected.status = TaskStatus.RUNNING
selected.status_updated = datetime.now()
for observer in self._observers:
observer.on_task_started(self._clone(selected))
return selected
def _find_pending_task(self) -> Optional[Task]:
"""Try to find available pending task."""
with self._lock:
pending = filter(task_status_filter(TaskStatus.PENDING), self._tasks.values())
return min(pending, default=None, key=lambda task: task.created)
def _execute_task(self, task: Task):
"""Emulate task execution."""
total_seconds = 30
logs = self._log_storage.create_logs(task.id)
for i in range(total_seconds):
logs.append(f"[{datetime.now()} INFO] Extracting signature for file {i} of {total_seconds}\n")
time.sleep(1)
with self._lock:
if task.status is not TaskStatus.RUNNING:
logs.finish()
return
task.progress = float(i + 1) / total_seconds
for observer in self._observers:
observer.on_task_meta_updated(self._clone(task))
with self._lock:
logs.finish()
task.status = TaskStatus.SUCCESS
task.status_updated = datetime.now()
for observer in self._observers:
observer.on_task_succeeded(self._clone(task))
def _clone(self, task: Task) -> Task:
"""Clone task."""
result = Task(**asdict(task))
result.request = self._clone_req(task.request)
return result
def _clone_req(self, request: Request) -> Request:
"""Clone task request."""
return self._transformer.fromdict(request.asdict())
def _make_task(self, request: Request) -> Task:
"""Create a new task instance from the request."""
return Task(
id=str(uuid()),
created=datetime.now(),
status_updated=datetime.now(),
request=self._clone_req(request),
status=TaskStatus.PENDING,
)
def _evict(self):
"""Evict tasks exceeding limits."""
with self._lock:
evict_count = max(0, len(self._tasks) - self._maxtasks)
if evict_count == 0:
return
not_running = set(TaskStatus) - {TaskStatus.RUNNING}
inactive_tasks = list(filter(task_status_filter(not_running), self._tasks.values()))
inactive_tasks.sort(key=lambda task: task.created)
to_be_evicted = inactive_tasks[:evict_count]
for evicted_task in to_be_evicted:
del self._tasks[evicted_task.id]
self._log_storage.delete_logs(evicted_task.id)
for observer in self._observers:
observer.on_task_deleted(evicted_task.id)
|
11579604
|
import pytest
from multiprocessing import Pool
from covidsimulation.cache import get_signature
def get_parameters(arg):
from covidsimulation.regions.br_saopaulo import params as br_saopaulo_params
return br_saopaulo_params
def test_parameters_have_stable_signature():
p = Pool(2)
params = p.map(get_parameters, [0, 1])
assert get_signature(params[0]) == get_signature(params[1])
|
11579625
|
import os
from glob import glob
from typing import List, Tuple
import cv2
import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm
from tools.correlational_tracker.multi_tracker_with_merge_on_fly import initialize_tracker
from tools.correlational_tracker.multi_tracker_with_merge_on_fly import merge_boxes_using_distance
# from i3d_inception_net.testing.testing_with_volumes_on_the_fly import get_tracked_volumes
from tools.crf.IoU_test import get_bounding_boxes
from tools.video_tubes.remove_noisy_false_positives_by_tracking import get_video_wise_list
network_checkpoint = ''
# Folder containing all the video frames
FRAMES_FOLDER = ''
# Folder containing detection masks
DETECTION_MASKS_FOLDER = ''
# Folder where to output tracker output
TRACKER_OUTPUT_FOLDER = ''
frames_in_volume = 8
keyframe_number = frames_in_volume // 2
patch_dimensions = 100
def generate_patch_coords_from_box(height, width, box, patch_dimensions) -> Tuple[int, int, int, int]:
dimension_on_either_side = patch_dimensions // 2
x1, y1, w, h = box
# get center points
cx = x1 + (w // 2)
cy = y1 + (h // 2)
# get patch coordinates
p_x1 = cx - dimension_on_either_side
p_x1 = p_x1 if p_x1 > 0 else 0
p_y1 = cy - dimension_on_either_side
p_y1 = p_y1 if p_y1 > 0 else 0
p_x2 = cx + dimension_on_either_side
p_x2 = p_x2 if p_x2 < width else width
p_y2 = cy + dimension_on_either_side
p_y2 = p_y2 if p_y2 < height else height
return int(p_y1), int(p_x1), int(p_y2), int(p_x2)
def get_patches_by_tracker(tracker, frames_list: List[np.ndarray]):
patches = []
for frame in frames_list:
ret, tracker_box = tracker.update(frame)
p_y1, p_x1, p_y2, p_x2 = generate_patch_coords_from_box(frame.shape[0], frame.shape[1],
tracker_box, patch_dimensions)
patches.append(frame[p_y1:p_y2, p_x1:p_x2])
return patches
def get_tracked_volumes(frames_list: List[np.ndarray], masks_list: List[np.ndarray], key_frame_number: int):
key_frame = frames_list[key_frame_number]
key_mask = masks_list[key_frame_number]
key_mask[key_mask > 0] = 255
# from frame 0 to less than key frame
previous_frames = frames_list[0:key_frame_number]
# reverse frames
previous_frames = previous_frames[-1::-1]
# from keyframe+1 to end
next_frames = frames_list[key_frame_number + 1:]
final_volumes = []
# get boxes for key frames
boxes = get_bounding_boxes(key_mask)
for box in boxes:
y1 = box['y1']
y2 = box['y2']
x1 = box['x1']
x2 = box['x2']
w = x2 - x1
h = y2 - y1
if w * h < 4 and (w <= 2 or h <= 2):
print('Box ignored due to small size')
continue
tracker = cv2.TrackerCSRT_create()
try:
tracker.init(key_frame, (x1, y1, w, h))
except:
print('Box ignored because tracker could not initialize')
continue
key_frame_patch_location = generate_patch_coords_from_box(key_frame.shape[0], key_frame.shape[1],
(x1, y1, w, h), patch_dimensions)
key_frame_patch = key_frame[key_frame_patch_location[0]:key_frame_patch_location[2],
key_frame_patch_location[1]:key_frame_patch_location[3]]
previous_patches = get_patches_by_tracker(tracker, previous_frames)
previous_patches = previous_patches[-1::-1]
next_patches = get_patches_by_tracker(tracker, next_frames)
final_volume = []
final_volume.extend(previous_patches)
final_volume.append(key_frame_patch)
final_volume.extend(next_patches)
final_volumes.append({
'location': key_frame_patch_location,
'volume': final_volume
})
return final_volumes
def get_tracked_mask(tracker, frame):
ret, boxes = tracker.update(frame)
new_mask = np.zeros(frame.shape[0:2], dtype=np.uint8)
# adding tracked boxes to new mask
for box in boxes:
box = [int(round(b)) for b in box]
box_x1, box_y1, box_w, box_h = box
box_x2 = box_x1 + box_w
box_y2 = box_y1 + box_h
new_mask[box_y1:box_y2, box_x1: box_x2] = 255
return new_mask
def track_frames(frames_list: List[str], detection_masks_folder: str, output_folder: str, model,
reverse_track: bool = False):
frames_list = frames_list.copy()
if reverse_track:
frames_list.sort(reverse=True)
description = 'Reverse tracking objects in frames'
else:
frames_list.sort()
description = 'Tracking objects in frames'
tracker = None
for frame_number in tqdm(list(range(0, len(frames_list) - 8)), desc=description, unit='frame'):
frame_path = frames_list[frame_number]
frame_filename = os.path.basename(frame_path)
frame_filename = os.path.splitext(frame_filename)[0] + '.png'
frame = cv2.imread(frame_path)
detection_path = os.path.join(detection_masks_folder, frame_filename)
if tracker is None:
try:
tracker = initialize_tracker(tracker, frame, detection_path)
except:
print('Code 0: Initialization failed')
continue
else:
# get tracked predictions in frame if tracker is already initialized
try:
tracked_mask = get_tracked_mask(tracker, frame)
except:
print('Code 1: Tracking failed')
tracker = initialize_tracker(tracker, frame, detection_path)
continue
output_path = os.path.join(output_folder, frame_filename)
# if tracking in reverse order
if reverse_track:
# if previous tracked output exists
if os.path.exists(output_path):
previous_tracked_mask = cv2.imread(output_path)
plt.figure('Previous tracked mask')
plt.imshow(previous_tracked_mask)
plt.figure('tracked mask')
plt.imshow(tracked_mask)
tracked_mask = merge_boxes_using_distance(previous_tracked_mask, tracked_mask, 200)
plt.figure('tracked and previous merged')
# get 8 frames
__frames_list = []
__masks_list = []
for i in range(frames_in_volume):
f_path = frames_list[frame_number + i]
__frames_list.append(cv2.imread(f_path))
f_filename = os.path.basename(f_path)
m_path = os.path.join(detection_masks_folder, f_filename[0:-4] + '.png')
__masks_list.append(cv2.imread(m_path))
__masks_list[keyframe_number] = tracked_mask
try:
tracked_volumes = get_tracked_volumes(__frames_list, __masks_list, keyframe_number)
except:
print("Couldn't update trackr")
continue
for tracked_volume in tracked_volumes:
tv_location = tracked_volume['location']
tv_volume = tracked_volume['volume']
for tvi in range(len(tv_volume)):
tv_volume[tvi] = cv2.resize(tv_volume[tvi], (224, 224))
tv_volume = np.array(tv_volume)
segmentation_output, _ = model.predict_segmentation(
input_img_frames_patch_volume=tv_volume.copy(),
threshold=0.2,
)
segmentation_output = cv2.cvtColor(segmentation_output.astype(np.uint8), cv2.COLOR_BGR2GRAY)
p_y1, p_x1, p_y2, p_x2 = tv_location
p_w = p_x2 - p_x1
p_h = p_y2 - p_y1
plt.figure('Tracked mask 2')
plt.imshow(tracked_mask[p_y1:p_y2, p_x1:p_x2])
if np.max(segmentation_output) > 0:
segmentation_output = cv2.resize(segmentation_output, (p_w, p_h), interpolation=cv2.INTER_NEAREST)
tracked_mask[p_y1:p_y2, p_x1:p_x2] = segmentation_output
plt.figure('sementation output')
plt.imshow(segmentation_output)
else:
tracked_mask[p_y1:p_y2, p_x1:p_x2] = 0
cv2.imwrite(output_path, tracked_mask)
tracker = initialize_tracker(tracker, frame, detection_path, tracked_mask)
plt.show()
plt.close('all')
def main():
import keras_segmentation
os.makedirs(TRACKER_OUTPUT_FOLDER, exist_ok=True)
frame_paths = glob(os.path.join(FRAMES_FOLDER, '*'))
frame_paths.sort()
model = keras_segmentation.predict.model_from_checkpoint_given_path(network_checkpoint)
video_wise_frame_paths = get_video_wise_list(frame_paths)
for video_name in tqdm(video_wise_frame_paths.keys(), desc='Processing videos'):
video_frames = video_wise_frame_paths[video_name]
video_frames.sort()
# forward tracking
track_frames(frame_paths, DETECTION_MASKS_FOLDER, TRACKER_OUTPUT_FOLDER, model, False)
# reverse tracking
track_frames(frame_paths, DETECTION_MASKS_FOLDER, TRACKER_OUTPUT_FOLDER, model, True)
if __name__ == '__main__':
main()
|
11579653
|
import unittest
from sacrerouge.common.testing.util import sacrerouge_command_exists
class TestDUC2004Subcommand(unittest.TestCase):
def test_command_exists(self):
assert sacrerouge_command_exists(['setup-dataset', 'duc2004'])
|
11579658
|
from unittest import mock
import pytest
from asserts import assert_cli_runner
from meltano.cli import cli
from meltano.core.plugin import PluginType
from meltano.core.project_add_service import PluginAlreadyAddedException
class TestCliRemove:
@pytest.fixture(scope="class")
def tap_gitlab(self, project_add_service):
try:
return project_add_service.add(PluginType.EXTRACTORS, "tap-gitlab")
except PluginAlreadyAddedException as err:
return err.plugin
def test_remove(self, project, tap, cli_runner):
with mock.patch("meltano.cli.remove.remove_plugins") as remove_plugins_mock:
result = cli_runner.invoke(cli, ["remove", tap.type, tap.name])
assert_cli_runner(result)
remove_plugins_mock.assert_called_once_with(project, [tap])
def test_remove_multiple(self, project, tap, tap_gitlab, cli_runner):
with mock.patch("meltano.cli.remove.remove_plugins") as remove_plugins_mock:
result = cli_runner.invoke(
cli, ["remove", "extractors", tap.name, tap_gitlab.name]
)
assert_cli_runner(result)
remove_plugins_mock.assert_called_once_with(project, [tap, tap_gitlab])
def test_remove_type_name(self, project, tap, target, cli_runner):
with mock.patch("meltano.cli.remove.remove_plugins") as remove_plugins_mock:
result = cli_runner.invoke(cli, ["remove", "extractor", tap.name])
assert_cli_runner(result)
remove_plugins_mock.assert_called_with(project, [tap])
result = cli_runner.invoke(cli, ["remove", "loader", target.name])
assert_cli_runner(result)
remove_plugins_mock.assert_called_with(project, [target])
assert remove_plugins_mock.call_count == 2
|
11579675
|
import torch
import numpy as np
import sys
import torch.nn.functional as torch_nn_func
class SineGen(torch.nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num=0,
sine_amp=0.1, noise_std=0.003,
voiced_threshold=0,
flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
device=f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
* 2 * np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim,
device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
# uv = torch.ones(f0.shape)
# uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
# . for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
class PulseGen(torch.nn.Module):
""" Definition of Pulse train generator
There are many ways to implement pulse generator.
Here, PulseGen is based on SinGen. For a perfect
"""
def __init__(self, samp_rate, pulse_amp = 0.1,
noise_std = 0.003, voiced_threshold = 0):
super(PulseGen, self).__init__()
self.pulse_amp = pulse_amp
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.noise_std = noise_std
self.l_sinegen = SineGen(self.sampling_rate, harmonic_num=0, \
sine_amp=self.pulse_amp, noise_std=0, \
voiced_threshold=self.voiced_threshold, \
flag_for_pulse=True)
def forward(self, f0):
""" Pulse train generator
pulse_train, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output pulse_train: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
Note: self.l_sine doesn't make sure that the initial phase of
a voiced segment is np.pi, the first pulse in a voiced segment
may not be at the first time step within a voiced segment
"""
with torch.no_grad():
sine_wav, uv, noise = self.l_sinegen(f0)
# sine without additive noise
pure_sine = sine_wav - noise
# step t corresponds to a pulse if
# sine[t] > sine[t+1] & sine[t] > sine[t-1]
# & sine[t-1], sine[t+1], and sine[t] are voiced
# or
# sine[t] is voiced, sine[t-1] is unvoiced
# we use torch.roll to simulate sine[t+1] and sine[t-1]
sine_1 = torch.roll(pure_sine, shifts=1, dims=1)
uv_1 = torch.roll(uv, shifts=1, dims=1)
uv_1[:, 0, :] = 0
sine_2 = torch.roll(pure_sine, shifts=-1, dims=1)
uv_2 = torch.roll(uv, shifts=-1, dims=1)
uv_2[:, -1, :] = 0
loc = (pure_sine > sine_1) * (pure_sine > sine_2) \
* (uv_1 > 0) * (uv_2 > 0) * (uv > 0) \
+ (uv_1 < 1) * (uv > 0)
# pulse train without noise
pulse_train = pure_sine * loc
# additive noise to pulse train
# note that noise from sinegen is zero in voiced regions
pulse_noise = torch.randn_like(pure_sine) * self.noise_std
# with additive noise on pulse, and unvoiced regions
pulse_train += pulse_noise * loc + pulse_noise * (1 - uv)
return pulse_train, sine_wav, uv, pulse_noise
class SignalsConv1d(torch.nn.Module):
""" Filtering input signal with time invariant filter
Note: FIRFilter conducted filtering given fixed FIR weight
SignalsConv1d convolves two signals
Note: this is based on torch.nn.functional.conv1d
"""
def __init__(self):
super(SignalsConv1d, self).__init__()
def forward(self, signal, system_ir):
""" output = forward(signal, system_ir)
signal: (batchsize, length1, dim)
system_ir: (length2, dim)
output: (batchsize, length1, dim)
"""
if signal.shape[-1] != system_ir.shape[-1]:
print("Error: SignalsConv1d expects shape:")
print("signal (batchsize, length1, dim)")
print("system_id (batchsize, length2, dim)")
print("But received signal: {:s}".format(str(signal.shape)))
print(" system_ir: {:s}".format(str(system_ir.shape)))
sys.exit(1)
padding_length = system_ir.shape[0] - 1
groups = signal.shape[-1]
# pad signal on the left
signal_pad = torch_nn_func.pad(signal.permute(0, 2, 1), \
(padding_length, 0))
# prepare system impulse response as (dim, 1, length2)
# also flip the impulse response
ir = torch.flip(system_ir.unsqueeze(1).permute(2, 1, 0), \
dims=[2])
# convolute
output = torch_nn_func.conv1d(signal_pad, ir, groups=groups)
return output.permute(0, 2, 1)
class CyclicNoiseGen_v1(torch.nn.Module):
""" CyclicnoiseGen_v1
Cyclic noise with a single parameter of beta.
Pytorch v1 implementation assumes f_t is also fixed
"""
def __init__(self, samp_rate,
noise_std=0.003, voiced_threshold=0):
super(CyclicNoiseGen_v1, self).__init__()
self.samp_rate = samp_rate
self.noise_std = noise_std
self.voiced_threshold = voiced_threshold
self.l_pulse = PulseGen(samp_rate, pulse_amp=1.0,
noise_std=noise_std,
voiced_threshold=voiced_threshold)
self.l_conv = SignalsConv1d()
def noise_decay(self, beta, f0mean):
""" decayed_noise = noise_decay(beta, f0mean)
decayed_noise = n[t]exp(-t * f_mean / beta / samp_rate)
beta: (dim=1) or (batchsize=1, 1, dim=1)
f0mean (batchsize=1, 1, dim=1)
decayed_noise (batchsize=1, length, dim=1)
"""
with torch.no_grad():
# exp(-1.0 n / T) < 0.01 => n > -log(0.01)*T = 4.60*T
# truncate the noise when decayed by -40 dB
length = 4.6 * self.samp_rate / f0mean
length = length.int()
time_idx = torch.arange(0, length, device=beta.device)
time_idx = time_idx.unsqueeze(0).unsqueeze(2)
time_idx = time_idx.repeat(beta.shape[0], 1, beta.shape[2])
noise = torch.randn(time_idx.shape, device=beta.device)
# due to Pytorch implementation, use f0_mean as the f0 factor
decay = torch.exp(-time_idx * f0mean / beta / self.samp_rate)
return noise * self.noise_std * decay
def forward(self, f0s, beta):
""" Producde cyclic-noise
"""
# pulse train
pulse_train, sine_wav, uv, noise = self.l_pulse(f0s)
pure_pulse = pulse_train - noise
# decayed_noise (length, dim=1)
if (uv < 1).all():
# all unvoiced
cyc_noise = torch.zeros_like(sine_wav)
else:
f0mean = f0s[uv > 0].mean()
decayed_noise = self.noise_decay(beta, f0mean)[0, :, :]
# convolute
cyc_noise = self.l_conv(pure_pulse, decayed_noise)
# add noise in invoiced segments
cyc_noise = cyc_noise + noise * (1.0 - uv)
return cyc_noise, pulse_train, sine_wav, uv, noise
class SineGen(torch.nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num=0,
sine_amp=0.1, noise_std=0.003,
voiced_threshold=0,
flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
device=f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
* 2 * np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \
device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
# uv = torch.ones(f0.shape)
# uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
# . for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
class SourceModuleCycNoise_v1(torch.nn.Module):
""" SourceModuleCycNoise_v1
SourceModule(sampling_rate, noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
noise_std: std of Gaussian noise (default: 0.003)
voiced_threshold: threshold to set U/V given F0 (default: 0)
cyc, noise, uv = SourceModuleCycNoise_v1(F0_upsampled, beta)
F0_upsampled (batchsize, length, 1)
beta (1)
cyc (batchsize, length, 1)
noise (batchsize, length, 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, noise_std=0.003, voiced_threshod=0):
super(SourceModuleCycNoise_v1, self).__init__()
self.sampling_rate = sampling_rate
self.noise_std = noise_std
self.l_cyc_gen = CyclicNoiseGen_v1(sampling_rate, noise_std,
voiced_threshod)
def forward(self, f0_upsamped, beta):
"""
cyc, noise, uv = SourceModuleCycNoise_v1(F0, beta)
F0_upsampled (batchsize, length, 1)
beta (1)
cyc (batchsize, length, 1)
noise (batchsize, length, 1)
uv (batchsize, length, 1)
"""
# source for harmonic branch
cyc, pulse, sine, uv, add_noi = self.l_cyc_gen(f0_upsamped, beta)
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.noise_std / 3
return cyc, noise, uv
class SourceModuleHnNSF(torch.nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0):
super(SourceModuleHnNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
sine_amp, add_noise_std, voiced_threshod)
# to merge source harmonics into a single excitation
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
self.l_tanh = torch.nn.Tanh()
def forward(self, x):
"""
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
"""
# source for harmonic branch
sine_wavs, uv, _ = self.l_sin_gen(x)
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.sine_amp / 3
return sine_merge, noise, uv
if __name__ == '__main__':
source = SourceModuleCycNoise_v1(24000)
x = torch.randn(16, 25600, 1)
|
11579713
|
import datetime
from django.db.models import Count
from reviews.models import Review
def get_books_read(username):
"""Get the list of books read by a user.
:param: str username for whom the book records should be returned
:return: list of dict of books read and date of posting the review
"""
books = Review.objects.filter(creator__username__contains=username).all()
return [{'title': book_read.book.title, 'completed_on': book_read.date_created} for book_read in books]
def get_books_read_by_month(username):
"""Get the books read by the user on per month basis.
:param: str The username for which the books needs to be returned
:return: dict of month wise books read
"""
current_year = datetime.datetime.now().year
books = Review.objects.filter(creator__username__contains=username,date_created__year=current_year).values('date_created__month').annotate(book_count=Count('book__title'))
return books
|
11579740
|
import torch.nn as nn
import torch
import numpy as np
from ValidationUtils import RunningAverage
from ValidationUtils import MovingAverage
from DataVisualization import DataVisualization
from EarlyStopping import EarlyStopping
from ValidationUtils import Metrics
import logging
class ModelTrainer:
def __init__(self, model, num_epochs=100):
self.num_epochs = num_epochs
self.model = model
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
logging.info(device)
self.device = torch.device(device)
self.model.to(self.device)
# Loss and optimizer
self.criterion = nn.L1Loss()
self.optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
self.folderPath = "Models/"
def GetModel(self):
return self.model
def TrainSingleEpoch(self, training_generator):
self.model.train()
train_loss_x = MovingAverage()
train_loss_y = MovingAverage()
train_loss_z = MovingAverage()
train_loss_phi = MovingAverage()
i = 0
for batch_samples, batch_targets in training_generator:
batch_targets = batch_targets.to(self.device)
batch_samples = batch_samples.to(self.device)
outputs = self.model(batch_samples)
loss_x = self.criterion(outputs[0], (batch_targets[:, 0]).view(-1, 1))
loss_y = self.criterion(outputs[1], (batch_targets[:, 1]).view(-1, 1))
loss_z = self.criterion(outputs[2], (batch_targets[:, 2]).view(-1, 1))
loss_phi = self.criterion(outputs[3], (batch_targets[:, 3]).view(-1, 1))
loss = loss_x + loss_y + loss_z + loss_phi
# Backward and optimize
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
train_loss_x.update(loss_x)
train_loss_y.update(loss_y)
train_loss_z.update(loss_z)
train_loss_phi.update(loss_phi)
if (i + 1) % 100 == 0:
logging.info("Step [{}]: Average train loss {}, {}, {}, {}".format(i+1, train_loss_x.value, train_loss_y.value, train_loss_z.value,
train_loss_phi.value))
i += 1
return train_loss_x.value, train_loss_y.value, train_loss_z.value, train_loss_phi.value
def ValidateSingleEpoch(self, validation_generator):
self.model.eval()
valid_loss = RunningAverage()
valid_loss_x = RunningAverage()
valid_loss_y = RunningAverage()
valid_loss_z = RunningAverage()
valid_loss_phi = RunningAverage()
y_pred = []
gt_labels = []
with torch.no_grad():
for batch_samples, batch_targets in validation_generator:
gt_labels.extend(batch_targets.cpu().numpy())
batch_targets = batch_targets.to(self.device)
batch_samples = batch_samples.to(self.device)
outputs = self.model(batch_samples)
loss_x = self.criterion(outputs[0], (batch_targets[:, 0]).view(-1, 1))
loss_y = self.criterion(outputs[1], (batch_targets[:, 1]).view(-1, 1))
loss_z = self.criterion(outputs[2], (batch_targets[:, 2]).view(-1, 1))
loss_phi = self.criterion(outputs[3], (batch_targets[:, 3]).view(-1, 1))
loss = loss_x + loss_y + loss_z + loss_phi
valid_loss.update(loss)
valid_loss_x.update(loss_x)
valid_loss_y.update(loss_y)
valid_loss_z.update(loss_z)
valid_loss_phi.update(loss_phi)
outputs = torch.stack(outputs, 0)
outputs = torch.squeeze(outputs)
outputs = torch.t(outputs)
y_pred.extend(outputs.cpu().numpy())
logging.info("Average validation loss {}, {}, {}, {}".format(valid_loss_x.value, valid_loss_y.value, valid_loss_z.value,
valid_loss_phi.value))
return valid_loss_x.value, valid_loss_y.value, valid_loss_z.value, valid_loss_phi.value, y_pred, gt_labels
def Train(self, training_generator, validation_generator):
metrics = Metrics()
early_stopping = EarlyStopping(patience=10, verbose=True)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=np.sqrt(0.1),
patience=5, verbose=False,
threshold=0.0001, threshold_mode='rel', cooldown=0,
min_lr=0.1e-6, eps=1e-08)
loss_epoch_m1 = 1e3
for epoch in range(1, self.num_epochs + 1):
logging.info("Starting Epoch {}".format(epoch))
train_loss_x, train_loss_y, train_loss_z, train_loss_phi = self.TrainSingleEpoch(training_generator)
valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi, y_pred, gt_labels = self.ValidateSingleEpoch(
validation_generator)
valid_loss = valid_loss_x + valid_loss_y + valid_loss_z + valid_loss_phi
scheduler.step(valid_loss)
gt_labels = torch.tensor(gt_labels, dtype=torch.float32)
y_pred = torch.tensor(y_pred, dtype=torch.float32)
MSE, MAE, r_score = metrics.Update(y_pred, gt_labels,
[train_loss_x, train_loss_y, train_loss_z, train_loss_phi],
[valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi])
logging.info('Validation MSE: {}'.format(MSE))
logging.info('Validation MAE: {}'.format(MAE))
logging.info('Validation r_score: {}'.format(r_score))
checkpoint_filename = self.folderPath + self.model.name + '-{:03d}.pkl'.format(epoch)
early_stopping(valid_loss, self.model, epoch, checkpoint_filename)
if early_stopping.early_stop:
logging.info("Early stopping")
break
MSEs = metrics.GetMSE()
MAEs = metrics.GetMAE()
r2_score = metrics.Get()
y_pred_viz = metrics.GetPred()
gt_labels_viz = metrics.GetLabels()
train_losses_x, train_losses_y, train_losses_z, train_losses_phi, valid_losses_x, valid_losses_y, valid_losses_z, valid_losses_phi = metrics.GetLosses()
DataVisualization.desc = "Train_"
DataVisualization.PlotLoss(train_losses_x, train_losses_y, train_losses_z, train_losses_phi , valid_losses_x, valid_losses_y, valid_losses_z, valid_losses_phi)
DataVisualization.PlotMSE(MSEs)
DataVisualization.PlotMAE(MAEs)
DataVisualization.PlotR2Score(r2_score)
DataVisualization.PlotGTandEstimationVsTime(gt_labels_viz, y_pred_viz)
DataVisualization.PlotGTVsEstimation(gt_labels_viz, y_pred_viz)
DataVisualization.DisplayPlots()
def PerdictSingleSample(self, frame, label):
self.model.eval()
logging.info('GT Values: {}'.format(label.cpu().numpy()))
with torch.no_grad():
frame = frame.to(self.device)
outputs = self.model(frame)
outputs = torch.stack(outputs, 0)
outputs = torch.squeeze(outputs)
outputs = torch.t(outputs)
outputs = outputs.cpu().numpy()
logging.info('Prediction Values: {}'.format(outputs))
return outputs
def InferSingleSample(self, frame):
shape = frame.shape
if len(frame.shape) == 3:
frame = np.reshape(frame, (1, shape[0], shape[1], shape[2]))
frame = np.swapaxes(frame, 1, 3)
frame = np.swapaxes(frame, 2, 3)
frame = frame.astype(np.float32)
frame = torch.from_numpy(frame)
self.model.eval()
with torch.no_grad():
frame = frame.to(self.device)
outputs = self.model(frame)
outputs = torch.stack(outputs, 0)
outputs = torch.squeeze(outputs)
outputs = torch.t(outputs)
outputs = outputs.cpu().numpy()
return outputs
def Predict(self, test_generator):
metrics = Metrics()
valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi, y_pred, gt_labels = self.ValidateSingleEpoch(
test_generator)
gt_labels = torch.tensor(gt_labels, dtype=torch.float32)
y_pred = torch.tensor(y_pred, dtype=torch.float32)
MSE, MAE, r2_score = metrics.Update(y_pred, gt_labels,
[0, 0, 0, 0],
[valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi])
y_pred_viz = metrics.GetPred()
gt_labels_viz = metrics.GetLabels()
DataVisualization.desc = "Test_"
DataVisualization.PlotGTandEstimationVsTime(gt_labels_viz, y_pred_viz)
DataVisualization.PlotGTVsEstimation(gt_labels_viz, y_pred_viz)
DataVisualization.DisplayPlots()
logging.info('Test MSE: {}'.format(MSE))
logging.info('Test MAE: {}'.format(MAE))
logging.info('Test r2_score: {}'.format(r2_score))
return y_pred
def Infer(self, live_generator):
valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi, y_pred, gt_labels = self.ValidateSingleEpoch(live_generator)
return y_pred
|
11579744
|
from ..check import Check
class CheckFlags(Check):
'''Ensure tests don't contain any contradicting or redundant flag combinations.'''
ID = 'FLAGS'
def run(self, name, meta, source):
if meta is None or meta.get('flags') is None:
return
flags = meta['flags']
onlyStrict = 'onlyStrict' in flags
noStrict = 'noStrict' in flags
module = 'module' in flags
raw = 'raw' in flags
canBlockIsFalse = 'CanBlockIsFalse' in flags
canBlockIsTrue = 'CanBlockIsTrue' in flags
if onlyStrict and noStrict:
return '"onlyStrict" and "noStrict" flags are mutually exclusive'
if canBlockIsFalse and canBlockIsTrue:
return '"CanBlockIsFalse" and "CanBlockIsTrue" flags are mutually exclusive'
if raw and onlyStrict:
return 'Raw tests cannot prepend a "use strict" directive'
if raw and noStrict:
return '"raw" flag implies no "use strict" directive should be prepended'
if module and onlyStrict:
return 'Module tests cannot be run in non-strict mode'
if module and noStrict:
return '"module" flag implies the test is run in strict mode'
|
11579778
|
import torch
import torchvision as tv
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
import argparse
import numpy as np
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
# input_size=(1*28*28)
self.conv1 = nn.Sequential(
# in_channels, out_channels, kernel_size
nn.Conv2d(1, 6, 5, padding=2),
# input_size=(6*28*28)
nn.ReLU(),
# output_size=(6*14*14)
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(6, 16, 5),
# input_size=(16*10*10)
nn.ReLU(),
# output_size=(16*5*5)
nn.MaxPool2d(2, 2)
)
self.fc1 = nn.Sequential(
nn.Linear(16*5*5, 120),
nn.ReLU()
)
self.fc2 = nn.Sequential(
nn.Linear(120, 84),
nn.ReLU()
)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size()[0], -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
EPOCH = 12 #
BATCH_SIZE = 64 #
LR = 0.01 #
def train_lenet(trainloader, testloader, opt):
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.1)
for epoch in range(EPOCH):
sum_loss = 0.0
for i, data in enumerate(trainloader):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# forward + backward
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
sum_loss += loss.item()
if i % 100 == 99:
print('[epoch %d, iter %d] loss: %.03f'
% (epoch + 1, i + 1, sum_loss / 100))
sum_loss = 0.0
scheduler.step()
with torch.no_grad():
correct = 0
total = 0
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
torch.save(net.state_dict(), '%s/net_%03d.pth'%(opt.outf, epoch + 1))
if __name__ == "__main__":
from thop import profile
parser = argparse.ArgumentParser()
parser.add_argument('--outf', default='model/',
help='folder to output images and model checkpoints')
opt = parser.parse_args()
transform = transforms.ToTensor()
trainset = tv.datasets.MNIST(
root='data/',
train=True,
download=True,
transform=transform)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=BATCH_SIZE,
shuffle=True)
testset = tv.datasets.MNIST(
root='data/',
train=False,
download=True,
transform=transform)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=BATCH_SIZE,
shuffle=False)
net = LeNet()
input = torch.randn(1, 1, 28, 28)
macs, params = profile(net, inputs=(input, ))
print('macs: {}, params: {}'.format(macs, params))
net = net.to(device)
train_lenet(trainloader, testloader, opt)
|
11579790
|
from pytest import mark
from coinflip import randtests_refimpl as randtests
from .examples import *
@mark.parametrize(example_fields, examples)
def test_examples(randtest, bits, statistic_expect, p_expect, kwargs):
randtest_method = getattr(randtests, randtest)
statistic, p = randtest_method(bits, **kwargs)
assert_statistic(statistic, statistic_expect)
assert_p(p, p_expect)
@mark.parametrize(multi_example_fields, multi_examples)
def test_multi_examples(randtest, bits, expected_statistics, expected_pvalues, kwargs):
randtest_method = getattr(randtests, randtest)
statistics, pvalues = randtest_method(bits, **kwargs)
assert_statistics(statistics, expected_statistics)
assert_pvalues(pvalues, expected_pvalues)
|
11579805
|
import os
import platform
from time import sleep
from moviepy.editor import *
def mp4_conversion(mp4_path, mp3_path):
try:
video = VideoFileClip(os.path.join(mp4_path))
except OSError as e:
print()
print(f"** ERROR: the file at {mp4_path} could not be found! **")
print()
try:
video.audio.write_audiofile(os.path.join(mp3_path))
except NameError as e:
print()
print(f'** ERROR: Could not locate codec at path "{mp3_path}" **')
print()
except ValueError as e:
print()
print(f'** ERROR: Could not find the codec associated with the path"{mp3_path}" **')
print('** Make sure to add the extension ".mp3" to the file name. **')
print()
os.system('clear')
operating_system = platform.system()
default_save_location = "Desktop/"
file_number = 1
continue_converting = 'y'
#main loop for converting files in case someone wants to do multiple files.
while continue_converting != 'n':
default_file_name = "converted" + str(file_number) + ".mp3"
multiple_files = input("Are you converting multiple files or a single mp4 file? [s/m]: ")
if multiple_files == 's':
#Check the user's operating system in order to save/check save location successfully.
if operating_system == "Linux":
mp3_path = os.path.join(os.path.join(
os.path.expanduser('~')), default_save_location)
mp4_path = mp3_path
else:
mp3_path = os.path.join(os.path.join(
os.environ['USERPROFILE']), default_save_location)
mp4_path = mp3_path
change_mp4_path = input("The program will check your desktop for mp4 files, would you like to change the search location? [y/n]: ")
if change_mp4_path == "y":
mp4_path = input("Enter path to mp4 file you want to convert (include file and extension): ")
else:
mp4_name = input("Enter the name of the mp4 file (including extension): ")
mp4_path += mp4_name
change_save_location = input("Default save location is on the Desktop, would you like to save elsewhere? [y/n]: ")
if change_save_location == "y":
mp3_path = input("Enter custom save path: ")
change_filename = input(f'Default audio file name is {default_file_name} would you like to change this? [y/n]: ')
if change_filename == "y":
mp3_filename = input("Enter new mp3 file name (including extension): ")
mp3_path += mp3_filename
else:
mp3_path += default_file_name
mp4_conversion(mp4_path, mp3_path)
else:
mp4_folder_path = input("Enter path to folder containing mp4 files: ")
mp3_folder_path = input("Enter destination path for mp3 files: ")
mp4_files = []
for (dirpath, dirnames, filenames) in os.walk(mp4_folder_path):
for file in filenames:
if file[-3:] == "mp4":
mp4_files.append(file)
name_files = input(f'The files are named "{default_file_name}" and increment by one by default. Would you like to name them uniquely? [y/n]: ')
if name_files == "n":
for file in mp4_files:
default_file_name = "converted" + str(file_number) + ".mp3"
mp4_conversion(mp4_folder_path + file, mp3_folder_path + default_file_name)
file_number += 1
else:
for file in mp4_files:
name = input("Enter name for this file (include extension): ")
mp4_conversion(mp4_folder_path + file, mp3_folder_path + name)
print()
continue_converting = input("Would you like to convert more mp4 files? [y/n]: ")
#increments the file number so you don't overwrite audio files if you choose to use the default file name and convert multiple files.
file_number += 1
os.system('clear')
print('Goodbye!')
sleep(1)
os.system('clear')
|
11579820
|
import json
from typing import Optional
import zquantum.core.graph
from zquantum.core.graph import save_graph
from zquantum.core.typing import Specs
from zquantum.core.utils import create_object
def _make_sampler(specs: Optional[Specs] = None) -> zquantum.core.graph.Sampler:
if specs is None:
return zquantum.core.graph.constant_sampler(1)
elif isinstance(specs, str):
sampler_dict = json.loads(specs)
return create_object(sampler_dict)()
elif isinstance(specs, dict):
return create_object(specs)()
else:
raise ValueError(f"Invalid specs {specs}")
def generate_random_graph_erdos_renyi(
number_of_nodes: int,
edge_probability: float,
sampler_specs: Optional[Specs] = None,
seed: Optional[int] = None,
):
graph = zquantum.core.graph.generate_random_graph_erdos_renyi(
number_of_nodes,
edge_probability,
_make_sampler(sampler_specs),
seed,
)
save_graph(graph, "graph.json")
def generate_random_regular_graph(
number_of_nodes: int,
degree: int,
sampler_specs: Optional[Specs] = None,
seed: Optional[int] = None,
):
graph = zquantum.core.graph.generate_random_regular_graph(
number_of_nodes,
degree,
_make_sampler(sampler_specs),
seed,
)
save_graph(graph, "graph.json")
def generate_complete_graph(
number_of_nodes: int,
sampler_specs: Optional[Specs] = None,
seed: Optional[int] = None,
):
graph = zquantum.core.graph.generate_random_graph_erdos_renyi(
number_of_nodes,
1.0,
_make_sampler(sampler_specs),
seed,
)
save_graph(graph, "graph.json")
def generate_caveman_graph(
number_of_cliques: int,
size_of_cliques: int,
sampler_specs: Optional[Specs] = None,
seed: Optional[int] = None,
):
graph = zquantum.core.graph.generate_caveman_graph(
number_of_cliques,
size_of_cliques,
_make_sampler(sampler_specs),
seed,
)
save_graph(graph, "graph.json")
def generate_ladder_graph(
length_of_ladder: int,
sampler_specs: Optional[Specs] = None,
seed: Optional[int] = None,
):
graph = zquantum.core.graph.generate_ladder_graph(
length_of_ladder,
_make_sampler(sampler_specs),
seed,
)
save_graph(graph, "graph.json")
def generate_barbell_graph(
number_of_vertices_complete_graph: int,
sampler_specs: Optional[Specs] = None,
seed: Optional[int] = None,
):
graph = zquantum.core.graph.generate_barbell_graph(
number_of_vertices_complete_graph,
_make_sampler(sampler_specs),
seed,
)
save_graph(graph, "graph.json")
def generate_graph_from_specs(graph_specs: str):
graph_specs_dict = json.loads(graph_specs)
graph = zquantum.core.graph.generate_graph_from_specs(graph_specs_dict)
save_graph(graph, "graph.json")
|
11579873
|
class Solution:
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
return int("".join([str((int(i)+1)%2) for i in bin(num)[2:]]),2)
|
11579877
|
import json
import requests
import re
import threading
import time
import urllib2
initial_words = ['hello', 'bob', 'how','are','you','there','twitter','static','check','cracker','apple','pear', 'after', 'finish', 'his', 'just', 'word', 'yet', 'random', 'destroy', 'monster', 'killer', 'gun', 'ravage', 'ramp', 'original', 'wheat', 'happy', 'strive', 'friday', 'monday', 'tuesday', 'help', 'shell', 'edit', 'view', 'window', 'july', 'june', 'august', 'september', 'january', 'february', 'may', 'march', 'april']
wordsByLength = {}
max_word_length = 11
min_word_length = 3
path_to_save = '../app/src/main/assets/words/'
file_prefix = 'words-length-'
lock = threading.Lock()
# Determines if we need to keep adding words to the dict
is_satisfied = False
# Target number of words at each length
target = {}
target.update({
3: 250,
4: 700,
5: 1000,
6: 1000,
7: 1000,
8: 1000,
9: 800,
10: 600,
11: 300
})
class WorkerThread(threading.Thread):
def iterate_through_words(self, s):
s = re.sub('[.!,?:;]', '', s)
s = s.split()
for word in s:
add_to_dict(word.strip())
class WordWorkerThread(threading.Thread):
def run(self):
while not is_satisfied:
try:
req = requests.post('http://watchout4snakes.com/wo4snakes/Random/RandomWord')
add_to_dict(req.text)
except:
pass
class SentenceWorkerThread(WorkerThread):
def run(self):
while not is_satisfied:
try:
req = requests.post('http://watchout4snakes.com/wo4snakes/Random/NewRandomSentence')
self.iterate_through_words(req.text)
except:
pass
class ParagraphWorkerThread(WorkerThread):
def run(self):
while not is_satisfied:
try:
req = requests.post('http://watchout4snakes.com/wo4snakes/Random/RandomParagraph', data={'subject1':'', 'subject2':''})
self.iterate_through_words(req.text)
except:
pass
# Static methods
def add_to_dict(word):
if word.isalpha() and len(word) >= min_word_length and len(word) <= max_word_length:
length = len(word)
lock.acquire()
if word not in wordsByLength[length] and len(wordsByLength[length]) < target[length]:
wordsByLength[length].add(word.lower())
lock.release()
def check_is_satisfied():
for k, v in wordsByLength.iteritems():
if len(v) < target[k]:
return False
return True
def set_up_initial_words():
for i in xrange(min_word_length, max_word_length + 1):
wordsByLength[i] = set()
for word in initial_words:
add_to_dict(word)
def print_summary():
print "Summary"
for k, v in wordsByLength.iteritems():
print '{0} : {1}'.format(k, len(v))
set_up_initial_words()
# Start 8 threads to constantly make url requests and add words to the dict
# The wait time for responses was very high, multithread to make use of wasted time
WordWorkerThread().start()
WordWorkerThread().start()
SentenceWorkerThread().start()
SentenceWorkerThread().start()
SentenceWorkerThread().start()
ParagraphWorkerThread().start()
ParagraphWorkerThread().start()
ParagraphWorkerThread().start()
while not is_satisfied:
time.sleep(5)
if check_is_satisfied():
break
print_summary()
is_satisfied = True
for length, words in wordsByLength.iteritems():
with open('{0}{1}{2}'.format(path_to_save, file_prefix, length), 'w') as file:
for word in words:
file.write('{0}\n'.format(word.encode('utf-8')))
|
11579905
|
import pkg.plant.poison.eggplant
print pkg
print pkg.plant
print pkg.plant.poison
print pkg.plant.poison.eggplant
|
11579916
|
import os, time, json, hashlib, threading
cachedir = os.path.expanduser('~') + '/.cache/scrycall/'
cachelimit = 86400 # 24 hours
# object to handle cache for a particular query
class CacheFile:
def __init__(self, url):
# make sure the cache directory exists
checkcachedir()
# cache file name is a hash of the api query
self.file = hashlib.md5(url.encode()).hexdigest()
self.path = cachedir + self.file
self.exists = os.path.isfile(self.path)
self.writethread = None
# check if cached data has become stale
def isexpired(self):
return time.time() - os.path.getmtime(self.path) > cachelimit
# load json from a saved cache file
def read(self):
with open(self.path, 'r') as cachefile:
return json.load(cachefile)
# spin up a thread to write json to a cache file
def write(self, data):
if self.writethread != None:
self.writethread.join()
self.writethread = threading.Thread(target=write_threaded, args=(self.path, data), daemon=True)
self.writethread.start()
# delete the cache file
def delete(self):
os.remove(self.path)
self.exists = False
# make sure the directory to save cache files exists
def checkcachedir():
if not os.path.isdir(cachedir):
os.makedirs(cachedir)
# delete all cache files older than a specified time
def cleancache(expiretime=cachelimit):
now = time.time()
if os.path.isdir(cachedir):
for file in os.listdir(cachedir):
if now - os.path.getmtime(cachedir + file) > expiretime:
os.remove(cachedir + file)
# delete all cache files
def deletecache():
cleancache(0)
# write json to file (intended for new threads)
def write_threaded(path, data):
with open(path, 'w') as cachefile:
json.dump(data, cachefile)
|
11579930
|
import tempfile
import subprocess as sp
import sys
from pathlib import Path
from functools import partial
import pytest
run_cmd = partial(sp.run, encoding="utf-8", stdout=sp.PIPE)
@pytest.fixture(scope="session")
def tmpdir() -> Path:
with tempfile.TemporaryDirectory() as dir_name:
yield Path(dir_name)
@pytest.fixture(scope="session", autouse=True)
def download_model() -> None:
run_cmd([sys.executable, "-m", "pip", "install", "ja-ginza"])
run_cmd([sys.executable, "-m", "pip", "install", "ja-ginza-electra"])
yield
|
11579945
|
class MemorySet(list):
def __init__(self, *args, **kwargs):
"""
A ordered set that keeps track of the last iteration trough it and starts at the end in the next iteration.
"""
self.last_len = 0
super(MemorySet, self).__init__(*args, **kwargs)
def __iter__(self):
"""
Start iteration at the last index that was not iterated trough last time.
"""
iterator = iter(self[self.last_len:])
self.last_len = len(self)
return iterator
def update(self, more_entries):
"""
Inserts new elements into the MemorySet. Values that are already present in the MemorySet will not be
added again.
:param more_entries: Iterable that can be converted to a set. All elements not already present will be added.
:returns True, if any elements were added, False otherwise.
"""
temp_entries = set(more_entries)
additions = [x for x in temp_entries if x not in self]
if len(additions) == 0:
return False
else:
self.extend(additions)
return True
|
11579964
|
from pylab import *
def get_trunc(particle,cluster,ic):
cluster.neighbor_trunc = int(ceil((cluster.nsigma_trunc-cluster.nsigma_box)/2/\
cluster.nsigma_box))
cluster.trunc_length = cluster.nsigma_trunc*particle.sigma/2+1e-5
# loop through all clusters
ista = cluster.ista[ic]
iend = cluster.iend[ic]
if ista <= iend:
xc = cluster.xc[ic]
yc = cluster.yc[ic]
ix = cluster.ix[ic]
iy = cluster.iy[ic]
jx_min = max(0,ix-cluster.neighbor_trunc)
jx_max = min(cluster.nx-1,ix+cluster.neighbor_trunc)
jy_min = max(0,iy-cluster.neighbor_trunc)
jy_max = min(cluster.ny-1,iy+cluster.neighbor_trunc)
# put all particles in the center box into the corresponding cell structure
jsta = cluster.jsta[ic]
jend = cluster.jend[ic]
clusterx = []
clustery = []
clusterg = []
clusterx.extend(particle.xj[jsta:jend+1])
clustery.extend(particle.yj[jsta:jend+1])
clusterg.extend(particle.gj[jsta:jend+1])
# loop through all neighbor
for jx in range(jx_min,jx_max+1):
for jy in range(jy_min,jy_max+1):
if ix != jx or iy != jy:
jc = jx*cluster.ny+jy
jsta = cluster.jsta[jc]
jend = cluster.jend[jc]
# select from the particles in the neighbor boxes, the ones that belong in the trunc zone
xj = particle.xj[jsta:jend+1]
yj = particle.yj[jsta:jend+1]
gj = particle.gj[jsta:jend+1]
trunc = abs(xj-xc)<cluster.trunc_length
xj = extract(trunc,xj)
yj = extract(trunc,yj)
gj = extract(trunc,gj)
trunc = abs(yj-yc)<cluster.trunc_length
xj = extract(trunc,xj)
yj = extract(trunc,yj)
gj = extract(trunc,gj)
# add all particles in the neighbor boxes into the corresponding cell structure
clusterx.extend(xj)
clustery.extend(yj)
clusterg.extend(gj)
cluster.xjt=clusterx
cluster.yjt=clustery
cluster.gjt=clusterg
cluster.nptruncj = len(clusterx)
else:
cluster.xjt=[]
cluster.yjt=[]
cluster.gjt=[]
cluster.nptruncj = 0
return particle,cluster
|
11579981
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from .a2c_ppo_acktr.utils import init
import numpy as np
import math
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.dim()
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
class Gaussian(object):
def __init__(self, mu, rho):
super().__init__()
self.mu = mu.cuda()
self.rho = rho.cuda()
self.normal = torch.distributions.Normal(0,1)
@property
def sigma(self):
return torch.log1p(torch.exp(self.rho))
def sample(self):
epsilon = self.normal.sample(self.mu.size()).cuda()
return self.mu + self.sigma * epsilon
class BayesianLinear(nn.Module):
def __init__(self, in_features, out_features, ratio = 0.5):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features))
fan_in, _ = _calculate_fan_in_and_fan_out(self.weight_mu)
gain = 1 # Var[w] + sigma^2 = 2/fan_in
total_var = 2 / fan_in
noise_var = total_var * ratio
mu_var = total_var - noise_var
noise_std, mu_std = math.sqrt(noise_var), math.sqrt(mu_var)
bound = math.sqrt(3.0) * mu_std
rho_init = np.log(np.exp(noise_std)-1)
nn.init.uniform_(self.weight_mu, -bound, bound)
self.bias = nn.Parameter(torch.Tensor(out_features).uniform_(0,0))
self.weight_rho = nn.Parameter(torch.Tensor(out_features,1).uniform_(rho_init,rho_init))
self.weight = Gaussian(self.weight_mu, self.weight_rho)
def forward(self, input, sample=False):
if sample:
weight = self.weight.sample()
bias = self.bias
else:
weight = self.weight.mu
bias = self.bias
return F.linear(input, weight, bias)
|
11579996
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from models.smp_layers import SimplifiedFastSMPLayer, FastSMPLayer, SMPLayer
from models.utils.layers import GraphExtractor, EdgeCounter, BatchNorm
from models.utils.misc import create_batch_info, map_x_to_u
class SMP(torch.nn.Module):
def __init__(self, num_input_features: int, num_classes: int, num_layers: int, hidden: int, layer_type: str,
hidden_final: int, dropout_prob: float, use_batch_norm: bool, use_x: bool, map_x_to_u: bool,
num_towers: int, simplified: bool):
""" num_input_features: number of node features
layer_type: 'SMP', 'FastSMP' or 'SimplifiedFastSMP'
hidden_final: size of the feature map after pooling
use_x: for ablation study, run a MPNN instead of SMP
map_x_to_u: map the node features to the local context
num_towers: inside each SMP layers, use towers to reduce the number of parameters
simplified: less layers in the feature extractor.
"""
super().__init__()
self.map_x_to_u, self.use_x = map_x_to_u, use_x
self.dropout_prob = dropout_prob
self.use_batch_norm = use_batch_norm
self.edge_counter = EdgeCounter()
self.num_classes = num_classes
self.no_prop = GraphExtractor(in_features=num_input_features, out_features=hidden_final, use_x=use_x)
self.initial_lin = nn.Linear(num_input_features, hidden)
layer_type_dict = {'SMP': SMPLayer, 'FastSMP': FastSMPLayer, 'SimplifiedFastSMP': SimplifiedFastSMPLayer}
conv_layer = layer_type_dict[layer_type]
self.convs = nn.ModuleList()
self.batch_norm_list = nn.ModuleList()
self.feature_extractors = torch.nn.ModuleList([])
for i in range(0, num_layers):
self.convs.append(conv_layer(in_features=hidden, num_towers=num_towers, out_features=hidden, use_x=use_x))
self.batch_norm_list.append(BatchNorm(hidden, use_x))
self.feature_extractors.append(GraphExtractor(in_features=hidden, out_features=hidden_final, use_x=use_x,
simplified=simplified))
# Last layers
self.simplified = simplified
self.after_conv = nn.Linear(hidden_final, hidden_final)
self.final_lin = nn.Linear(hidden_final, num_classes)
def forward(self, data):
""" data.x: (num_nodes, num_features)"""
x, edge_index = data.x, data.edge_index
batch_info = create_batch_info(data, self.edge_counter)
# Create the context matrix
if self.use_x:
assert x is not None
u = x
elif self.map_x_to_u:
u = map_x_to_u(data, batch_info)
else:
u = data.x.new_zeros((data.num_nodes, batch_info['n_colors']))
u.scatter_(1, data.coloring, 1)
u = u[..., None]
# Forward pass
out = self.no_prop(u, batch_info)
u = self.initial_lin(u)
for i, (conv, bn, extractor) in enumerate(zip(self.convs, self.batch_norm_list, self.feature_extractors)):
if self.use_batch_norm and i > 0:
u = bn(u)
u = conv(u, edge_index, batch_info)
global_features = extractor.forward(u, batch_info)
out += global_features / len(self.convs)
# Two layer MLP with dropout and residual connections:
if not self.simplified:
out = torch.relu(self.after_conv(out)) + out
out = F.dropout(out, p=self.dropout_prob, training=self.training)
out = self.final_lin(out)
if self.num_classes > 1:
# Classification
return F.log_softmax(out, dim=-1)
else:
# Regression
assert out.shape[1] == 1
return out[:, 0]
def reset_parameters(self):
for layer in [self.no_prop, self.initial_lin, *self.convs, *self.batch_norm_list, *self.feature_extractors,
self.after_conv, self.final_lin]:
layer.reset_parameters()
def __repr__(self):
return self.__class__.__name__
|
11580011
|
import matplotlib.pyplot as plt
import time
from copy import copy
from itertools import product
import Q_functions, Q_functions_optimized, G_functions
from input_validator import validate_input2
# ----------------------------------------------------------------------------------------------------------------------
# Q FUNCTIONS TESTS
def test_Qs(function, all_function, max_profundity, num_atomics, arity, verbose=False):
"""
Tests the Q functions with the all_functions
:param function: the numeric function to test
:param all_function: the generator function of all formulae to test it with
:param max_profundity: zero or positive integer, profundity up to which the test will take place
:param num_atomics: positive integer, number of atomics up to which the test will take place
:param arity: positive integer, maximum connective arity up to which the test will take place
:param verbose: if True, will print the results of each test as it takes place. Else, prints only the global result
:return:
"""
max_profundity += 1
num_atomics += 1
arity += 1
atomics = ["p"+str(x) for x in range(num_atomics)]
for profundity in range(1, max_profundity):
if verbose:
print(f'\nProfundity {profundity}')
for ats in range(1, num_atomics):
for con in range(2, arity+1):
constants_dict = {con-1: {str(con-1)}}
generator = len(all_function(profundity, set(atomics[:ats]), constants_dict, False))
calculation = function(profundity, set(atomics[:ats]), constants_dict, False)
if generator != calculation:
return f"Error with n={profundity}, P={set(atomics[:ats])}, C={constants_dict}: {generator} vs {calculation}"
if verbose:
print(f"\tn={profundity}, P={set(atomics[:ats])}, C={constants_dict}: {calculation} OK")
constants_dict2 = {x: {str(x)} for x in range(1, con)}
generator2 = len(all_function(profundity, set(atomics[:ats]), constants_dict2, False))
calculation2 = function(profundity, set(atomics[:ats]), constants_dict2, False)
if generator2 != calculation2:
return f"Error with n={profundity}, P={set(atomics[:ats])}, C={constants_dict2}: {generator2} vs {calculation2}"
if verbose:
print(f"\tn={profundity}, P={set(atomics[:ats])}, C={constants_dict2}: {calculation2} OK")
return "All tests passed"
# ----------------------------------------------------------------------------------------------------------------------
# G FUNCTIONS UNIFORMITY TESTS
def generate_distribution_Gs(iterations, G_function, all_function, n, P, C, optimized):
"""
:param iterations: positive integer, number of formulae to be generated
:param G_function: function that will generate the random formulae
:param all_function: function that will generate all the formulae
:param n: positive integer, depth
:param P: set of strings, atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param optimized: if the G_function is in the optimized module, choose True, False otherwise
:return: dict, keys: formulas, values: number of times generated
"""
all_formulae = all_function(n, P, C, validate=False)
times_generated = {f: 0 for f in all_formulae}
if not optimized:
for x in range(iterations):
f = G_function(n, P, C, validate=False)
times_generated[f] += 1
else:
f_list = G_function(iterations, n, P, C, validate=False)
for f in f_list:
times_generated[f] += 1
return times_generated
def run_uniformity_test(iterations, G_function, all_G_function, n, P, C, draw=True, save=False, optimized=False):
"""
:param iterations: positive integer, number of formulae to be generated
:param G_function: function that will generate the random formulae
:param all_function: function that will generate all the formulae
:param n: positive integer, depth
:param P: set of strings, atomics
:param C: dict, keys: positive integer (arities), values: sets of strings (constants of that arity)
:param draw: If True, plots the distribution obtained. Otherwise prints the distribution to the console
:param save: IF True, saves the plot to a file "Figure.png" in the wd
:param optimized: if the G_function is in the optimized module, choose True, False otherwise
:return: None. Only prints.
"""
distrib = generate_distribution_Gs(iterations, G_function, all_G_function, n, P, C, optimized)
if draw:
#plt.figure(figsize=(4, 5))
plt.bar(range(len(distrib)), list([x / sum(distrib.values()) for x in distrib.values()]), align='center')#, width=0.5)
plt.xticks(range(len(distrib)), list(distrib.keys()), rotation='vertical')
if save:
plt.subplots_adjust(bottom=0.35)
plt.savefig('Figure.png')
plt.show()
else:
for x in distrib:
print(x, distrib[x])
# ----------------------------------------------------------------------------------------------------------------------
# G FUNCTIONS BENCHMARK TEST
def benchmark(G_function, iterations, n, P, C, optimized):
"""
:param G_function: function that will generate the random formulae
:param iterations: positive integer, number of formulae to generate
:param n: positive integer, depth
:param P: set of strings, atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param optimized: if the G_function is in the optimized module, choose True, False otherwise
:return: float, time elapsed from beggining to end
"""
start = time.time()
if not optimized:
for x in range(iterations):
G_function(n, P, C, validate=False)
else:
G_function(iterations, n, P, C, validate=False)
return time.time() - start
# ----------------------------------------------------------------------------------------------------------------------
# ACTUALLY GENERATE ALL FORMULAE
def all_G_US(n, P, C, validate=True):
"""
:param n: zero or positive integer, depth
:param P: set of strings, atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param validate: if True, validates that the input provided is in the correct form
:return: set of all formulae with at most n depth and containing (all or some) P
"""
if validate:
validate_input2(n, P, C)
if n == 0:
return P
else:
prev_formulae = all_G_US(n-1, P, C, False)
all_formulae = copy(prev_formulae)
for arity in C:
possible_combinations = set(product(prev_formulae, repeat=arity))
if arity in C:
for logical_constant in C[arity]:
for combination in possible_combinations:
str_combination = str(combination).replace(',)', ')').replace("'", "").replace('"', '')
all_formulae.add(f'{logical_constant}{str_combination}')
return all_formulae
def all_G_ES(n, P, C, validate=True):
"""
:param n: zero or positive integer, depth
:param P: set of strings, atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param validate: if True, validates that the input provided is in the correct form
:return: set of all formulae with exactly n depth and containing (all or some) P
"""
if type(P) == int:
P = {f'p{x}' for x in range(P)}
if validate:
validate_input2(n, P, C)
if n == 0:
return P
else:
return all_G_US(n, P, C, False) - all_G_US(n-1, P, C, False)
def all_G_EA(n, P, C, validate=True):
"""
:param n: zero or positive integer, depth
:param P: set of strings, atomics
:param C: dict, keys: positive integers (arities), values: sets of strings (constants of that arity)
:param validate: if True, validates that the input provided is in the correct form
:return: set of all formulae with exactly n depth and containing all P
"""
if validate:
validate_input2(n, P, C)
if len(P) > max(C) ** n:
return set()
if n == 0 and len(P) == 1:
return P
else:
discount = set()
for p in P:
P_copy = copy(P) - {p}
discount = discount | (all_G_ES(n, P_copy, C, False))
return all_G_ES(n, P, C, False) - discount
'''# For profiling purposes
if __name__ == '__main__':
from G_functions_optimized import G_EA_uniform as G_EA_uniform_optimized
from G_functions import G_EA_uniform
import time
start = time.time()
for x in range(1000):
G_EA_uniform(4, {'p', 'q', 'r', 's'}, {1: {'Β¬'}, 2: {'&'}}, validate=False)
print(time.time() - start)
start = time.time()
G_EA_uniform_optimized(1000, 4, {'p', 'q', 'r', 's'}, {1: {'Β¬'}, 2:{'&'}}, validate=False)
print(time.time() - start)'''
|
11580019
|
from model.utils import *
from dataloader.util_func import *
class BERTCompareRetrieval(nn.Module):
def __init__(self, **args):
super(BERTCompareRetrieval, self).__init__()
model = args['pretrained_model']
self.inner_bsz = args['inner_bsz']
self.num_labels = args['num_labels']
self.model = SABertForSequenceClassification.from_pretrained(model, num_labels=self.num_labels)
# add the [EOS]
self.model.resize_token_embeddings(self.model.config.vocab_size+1)
self.criterion = nn.BCEWithLogitsLoss()
# vocabulary
self.vocab = BertTokenizerFast.from_pretrained(args['tokenizer'])
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
def forward(self, batch, scaler=None, optimizer=None, scheduler=None, grad_clip=1.0):
inpt = batch['ids']
sids = batch['sids']
tids = batch['tids']
label = batch['label'] # list
# shuffle
random_idx = list(range(len(inpt)))
random.shuffle(random_idx)
inpt = [inpt[i] for i in random_idx]
sids = [sids[i] for i in random_idx]
tids = [tids[i] for i in random_idx]
label = [label[i] for i in random_idx]
label = torch.stack(label)
token_acc, acc, tloss, counter = 0, 0, 0, 0
for i in range(0, len(inpt), self.inner_bsz):
sub_ids = pad_sequence(
inpt[i:i+self.inner_bsz],
batch_first=True,
padding_value=self.pad,
)
sub_sids = pad_sequence(
sids[i:i+self.inner_bsz],
batch_first=True,
padding_value=self.pad,
)
sub_tids = pad_sequence(
tids[i:i+self.inner_bsz],
batch_first=True,
padding_value=self.pad,
)
sub_attn_mask = generate_mask(sub_ids)
sub_label = label[i:i+self.inner_bsz]
sub_label = sub_label.to(torch.float)
sub_ids, sub_sids, sub_tids, sub_attn_mask, sub_label = to_cuda(sub_ids, sub_sids, sub_tids, sub_attn_mask, sub_label)
with autocast():
output = self.model(
input_ids=sub_ids,
attention_mask=sub_attn_mask,
token_type_ids=sub_tids,
speaker_ids=sub_sids,
# output_hidden_states=True,
)
# CLS token classifcation loss
logits = output.logits.squeeze(dim=1) # [B]
loss = self.criterion(logits, sub_label)
# token-level classification loss
# hidden_states = output.hidden_states[-1] # [B, S, E]
# hidden_states = self.token_level_cls(hidden_states) # [B, S, 2]
# loss += self.token_level_criterion(
# hidden_states.view(-1, 2),
# sub_tlids.view(-1)
# )
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
clip_grad_norm_(self.parameters(), grad_clip)
scaler.step(optimizer)
scaler.update()
scheduler.step()
tloss += loss
acc += torch.sum((torch.sigmoid(logits) > 0.5) == sub_label).item()/len(sub_label)
# hidden_states, sub_tlids = hidden_states.view(-1, 2), sub_tlids.view(-1)
# token_acc += (hidden_states.max(dim=-1)[1] == sub_tlids).to(torch.float).mean().item()
counter += 1
tloss /= counter
return tloss, acc, counter
def predict(self, batch):
inpt = batch['ids']
sids = batch['sids']
tids = batch['tids']
mask = batch['mask']
logits = self.model(
input_ids=inpt,
attention_mask=mask,
token_type_ids=tids,
speaker_ids=sids,
)[0]
logits = torch.sigmoid(logits.squeeze(dim=-1)) # [B]
return logits
class BERTCompareTokenEncoder(nn.Module):
def __init__(self, **args):
super(BERTCompareTokenEncoder, self).__init__()
model = args['pretrained_model']
self.inner_bsz = args['inner_bsz']
self.model = BertSAModel.from_pretrained(model)
self.cls = nn.Sequential(
nn.Dropout(p=args['dropout']) ,
nn.Linear(768, 2)
)
# add the [EOS]
self.model.resize_token_embeddings(self.model.config.vocab_size+1)
self.criterion = nn.CrossEntropyLoss()
# vocabulary
self.vocab = BertTokenizerFast.from_pretrained(args['tokenizer'])
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
def forward(self, batch, scaler=None, optimizer=None, scheduler=None, grad_clip=1.0):
ids = batch['ids']
sids = batch['sids']
tids = batch['tids']
tlids = batch['tlids']
# shuffle
random_idx = list(range(len(ids)))
random.shuffle(random_idx)
ids = [ids[i] for i in random_idx]
sids = [sids[i] for i in random_idx]
tids = [tids[i] for i in random_idx]
tlids = [tlids[i] for i in random_idx]
token_acc, acc, tloss, counter = 0, 0, 0, 0
for i in range(0, len(ids), self.inner_bsz):
sub_ids = pad_sequence(
ids[i:i+self.inner_bsz],
batch_first=True,
padding_value=self.pad,
)
sub_sids = pad_sequence(
sids[i:i+self.inner_bsz],
batch_first=True,
padding_value=self.pad,
)
sub_tids = pad_sequence(
tids[i:i+self.inner_bsz],
batch_first=True,
padding_value=self.pad,
)
sub_tlids = pad_sequence(
tlids[i:i+self.inner_bsz],
batch_first=True,
padding_value=-100,
)
sub_attn_mask = generate_mask(sub_ids)
sub_ids, sub_sids, sub_tids, sub_attn_mask, sub_tlids = to_cuda(sub_ids, sub_sids, sub_tids, sub_attn_mask, sub_tlids)
with autocast():
output = self.model(
input_ids=sub_ids,
attention_mask=sub_attn_mask,
token_type_ids=sub_tids,
speaker_ids=sub_sids,
)[0]
logits = self.cls(output) # [B, S, 2]
loss = self.criterion(logits.view(-1, 2), sub_tlids.view(-1))
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
clip_grad_norm_(self.parameters(), grad_clip)
scaler.step(optimizer)
scaler.update()
scheduler.step()
tloss += loss
# acc
mask = sub_tlids != -100
valid_num = mask.to(torch.float).sum().item()
acc_num = ((logits.max(dim=-1)[1] == sub_tlids) & mask).sum().item()
acc += acc_num / valid_num
counter += 1
tloss /= counter
return tloss, acc, counter
def predict(self, batch):
inpt = batch['ids']
sids = batch['sids']
tids = batch['tids']
mask = batch['mask']
lids = batch['tlids'] # [B, S]
logits = self.model(
input_ids=inpt,
attention_mask=mask,
token_type_ids=tids,
speaker_ids=sids,
)[0] # [B, S, E]
logits = F.softmax(self.cls(logits), dim=-1) # [B, S, 2]
# gather the speacial tokens
rest = [[] for _ in range(len(inpt))]
for i in range(len(inpt)):
index = (lids[i] != -100).to(torch.float).nonzero().squeeze(-1).tolist() # [2]
for j in index:
rest[i].append(logits[i, j, 1])
rest = torch.stack([torch.stack(i) for i in rest]) # [B, 2]
# return i->j and j->i
return rest[:, 0], rest[:, 1]
|
11580059
|
from simple_salesforce import Salesforce
from simple_salesforce.exceptions import SalesforceExpiredSession
import pandas as pd
import os
class sf_Manager:
def __init__(self):
# Create a free SalesForce account: https://developer.salesforce.com/signup
self.sf = Salesforce(
username=os.getenv("USERNAME"),
password=<PASSWORD>("PASSWORD"),
security_token=os.getenv("TOKEN"),
)
def login(self):
# Create a free SalesForce account: https://developer.salesforce.com/signup
self.sf = Salesforce(
username=os.getenv("USERNAME"),
password=<PASSWORD>("PASSWORD"),
security_token=os.getenv("TOKEN"),
)
return 0
def dict_to_df(self, query_result, date=True):
items = {
val: dict(query_result["records"][val])
for val in range(query_result["totalSize"])
}
df = pd.DataFrame.from_dict(items, orient="index").drop(["attributes"], axis=1)
if date: # date indicates if the df contains datetime column
df["CreatedDate"] = pd.to_datetime(
df["CreatedDate"], format="%Y-%m-%d"
) # convert to datetime
df["CreatedDate"] = df["CreatedDate"].dt.strftime(
"%Y-%m-%d"
) # reset string
return df
def get_leads(self):
try:
desc = self.sf.Lead.describe()
except SalesforceExpiredSession as e:
self.login()
desc = self.sf.Lead.describe()
field_names = [field["name"] for field in desc["fields"]]
soql = "SELECT {} FROM Lead".format(",".join(field_names))
query_result = self.sf.query_all(soql)
leads = self.dict_to_df(query_result)
return leads
def get_opportunities(self):
query_text = "SELECT CreatedDate, Name, StageName, ExpectedRevenue, Amount, LeadSource, IsWon, IsClosed, Type, Probability FROM Opportunity"
try:
query_result = self.sf.query(query_text)
except SalesforceExpiredSession as e:
self.login()
query_result = self.sf.query(query_text)
opportunities = self.dict_to_df(query_result)
return opportunities
def get_cases(self):
query_text = "SELECT CreatedDate, Type, Reason, Status, Origin, Subject, Priority, IsClosed, OwnerId, IsDeleted, AccountId FROM Case"
try:
query_result = self.sf.query(query_text)
except SalesforceExpiredSession as e:
self.login()
query_result = self.sf.query(query_text)
cases = self.dict_to_df(query_result)
return cases
def get_contacts(self):
query_text = "SELECT Id, Salutation, FirstName, LastName FROM Contact"
try:
query_result = self.sf.query(query_text)
except SalesforceExpiredSession as e:
self.login()
query_result = self.sf.query(query_text)
contacts = self.dict_to_df(query_result, False)
return contacts
def get_users(self):
query_text = "SELECT Id,FirstName, LastName FROM User"
try:
query_result = self.sf.query(query_text)
except SalesforceExpiredSession as e:
self.login()
query_result = self.sf.query(query_text)
users = self.dict_to_df(query_result, False)
return users
def get_accounts(self):
query_text = "SELECT Id, Name FROM Account"
try:
query_result = self.sf.query(query_text)
except SalesforceExpiredSession as e:
self.login()
query_result = self.sf.query(query_text)
accounts = self.dict_to_df(query_result, False)
return accounts
def add_lead(self, query):
try:
self.sf.Lead.create(query)
except SalesforceExpiredSession as e:
self.login()
self.sf.Lead.create(query)
return 0
def add_opportunity(self, query):
try:
self.sf.Opportunity.create(query)
except SalesforceExpiredSession as e:
self.login()
self.sf.Opportunity.create(query)
return 0
def add_case(self, query):
try:
self.sf.Case.create(query)
except SalesforceExpiredSession as e:
self.login()
self.sf.Case.create(query)
return 0
|
11580184
|
import subprocess
from torchpack.utils.logging import logger
import argparse
import time
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str)
parser.add_argument('--name', type=str)
# parser.add_argument('--space', type=str)
parser.add_argument('--nparams', type=int, nargs='+')
args = parser.parse_args()
pres = ['python',
'examples/eval.py',
f'examples/configs/'
f'{args.dataset}/{args.name}/eval/x2/real/opt2/300.yml',
'--jobs=4',
'--run-dir']
with open(f"logs/x2/rand/{args.dataset}.{args.name}."
f"{'-'.join(list(map(str, args.nparams)))}.rand.seed.txt",
'a') as wfid:
for k, space in enumerate([f'u3cu3_s0',
f'seth_s0',
f'barren_s0',
f'farhi_s0',
f'maxwell_s0']):
for seed in range(3):
exp = f'runs/{args.dataset}.{args.name}.train.baseline.' \
f'{space}.rand.param{args.nparams[k]}.seed{seed}'
logger.info(f"running command {pres + [exp]}")
subprocess.call(pres + [exp], stderr=wfid)
|
11580212
|
from __future__ import print_function
import os
import tensorflow as tf
import cv2
import numpy as np
data_dir = 'training_set/'
class_names = [
"Black",
"White",
"Red",
"Green",
"Blue",
"Orange",
"Yellow",
"Purple",
]
if (os.path.isdir(data_dir)):
print('Training set folder was found')
else:
print('There is no traing set folder in root')
n_files = 0
training_set = list()
training_labels = list()
for file in os.listdir(data_dir):
if file.endswith(".jpg"):
img_file = os.path.join(data_dir, file)
label_name = str(file).split('_')
training_set.append(cv2.imread(img_file, 1).reshape(1, 2764800))
training_labels.append(label_name[0])
n_files += 1
import tensorflow as tf
def integerize(data):
Y = list()
for i in range(n_files):
a = data[i]
if a == 'Black':
Y.append(0)
elif a == 'White':
Y.append(1)
elif a == 'Red':
Y.append(2)
elif a == 'Green':
Y.append(3)
elif a == 'Blue':
Y.append(4)
elif a == 'Orange':
Y.append(5)
elif a == 'Yellow':
Y.append(6)
elif a == 'Purple':
Y.append(7)
return Y
y = integerize(training_labels)
x = training_set
# Parameters
learning_rate = 0.001
training_epochs = n_files
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 256 # 2nd layer number of neurons
n_input = 2764800 # MNIST data input (img shape: 28*28)
n_classes = 8 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, n_input])
Y = tf.placeholder("int32")
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Create model
def multilayer_perceptron(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
# Hidden fully connected layer with 256 neurons
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Construct model
logits = multilayer_perceptron(X)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
batch_x = training_set[epoch]
batch_y = y[epoch]
_, c = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})
# Compute average loss
avg_cost += c / n_files
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost={:.9f}".format(avg_cost))
print("Optimization Finished!")
# Test model
pred = tf.nn.softmax(logits) # Apply softmax to logits
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
|
11580242
|
from __future__ import absolute_import
from django.test import TestCase
from .models import Person
class PropertyTests(TestCase):
def setUp(self):
self.a = Person(first_name='John', last_name='Lennon')
self.a.save()
def test_getter(self):
self.assertEqual(self.a.full_name, '<NAME>')
def test_setter(self):
# The "full_name" property hasn't provided a "set" method.
self.assertRaises(AttributeError, setattr, self.a, 'full_name', '<NAME>')
# But "full_name_2" has, and it can be used to initialise the class.
a2 = Person(full_name_2 = '<NAME>')
a2.save()
self.assertEqual(a2.first_name, 'Paul')
|
11580258
|
OUT_LOGGER = "ab-out"
OUT_LOGGER_FORMAT = "%(message)s"
TIMESTAMP_FORMAT = "%Y%m%d-%H%M%S%f"
TIMESTAMP_FORMAT_TOGETHER = "%Y%m%d%H%M%S%f"
NO_CACHE_TAG = "no-cache"
# configuration related constants
ANNOTATIONS_KEY = "annotations"
# ansible playbook template in yaml
PLAYBOOK_TEMPLATE = """---
- name: Containerized version of $project
hosts: all
vars:
a_variable: value
# configuration specific for ansible-bender
ansible_bender:
base_image: fedora:latest
target_image:
# command to run by default when invoking the container
cmd: /command.sh
name: $project
working_container:
volumes:
# mount this git repo to the working container at /src
- "{{ playbook_dir }}:/src"
tasks:
- name: install dependencies needed to run project $project
package:
name:
- a_package
- another_package
state: present
"""
|
11580269
|
from requests_mock import Mocker
from howfairis import Checker
from howfairis import Compliance
from howfairis import Repo
from howfairis.readme import Readme
from tests.contracts.checker import Contract
from tests.helpers import list_user_files_from_local_data
def get_checker():
user_files = list_user_files_from_local_data(__file__)
user_config_filename = user_files["/.howfairis-skipreasons.yml"]
repo = Repo("https://github.com/fair-software/repo1")
return Checker(repo, user_config_filename=user_config_filename)
class TestCheckerWithSkipReasonsUserConfig(Contract):
def test_check_checklist(self, mocker: Mocker, capsys):
with mocker:
checker = get_checker()
assert checker.check_checklist() is True
captured = capsys.readouterr()
assert "I don't want checklist checks" in captured.out
def test_check_citation(self, mocker: Mocker, capsys):
with mocker:
checker = get_checker()
assert checker.check_citation() is True
captured = capsys.readouterr()
assert "I don't want citation checks" in captured.out
def test_check_license(self, mocker: Mocker, capsys):
with mocker:
checker = get_checker()
assert checker.check_license() is True
captured = capsys.readouterr()
assert "I don't want license checks" in captured.out
def test_check_registry(self, mocker: Mocker, capsys):
with mocker:
checker = get_checker()
assert checker.check_registry() is True
captured = capsys.readouterr()
assert "I don't want registry checks" in captured.out
def test_check_repository(self, mocker: Mocker, capsys):
with mocker:
checker = get_checker()
assert checker.check_repository() is True
captured = capsys.readouterr()
assert "I don't want repository checks" in captured.out
def test_compliance(self, mocker):
with mocker:
checker = get_checker()
actual_compliance = checker.check_five_recommendations()
expected_compliance = Compliance(True, True, True, True, True)
assert actual_compliance == expected_compliance
def test_has_ascl_badge(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_ascl_badge() is False
def test_has_bintray_badge(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_bintray_badge() is False
def test_has_citation_file(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_citation_file() is False
def test_has_citationcff_file(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_citationcff_file() is False
def test_has_codemeta_file(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_codemeta_file() is False
def test_has_conda_badge(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_conda_badge() is False
def test_has_core_infrastructures_badge(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_core_infrastructures_badge() is False
def test_has_cran_badge(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_cran_badge() is False
def test_has_crates_badge(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_crates_badge() is False
def test_has_license(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_license() is True
def test_has_maven_badge(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_maven_badge() is False
def test_has_npm_badge(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_npm_badge() is False
def test_has_open_repository(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_open_repository() is True
def test_has_pypi_badge(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_pypi_badge() is False
def test_has_rsd_badge(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_rsd_badge() is False
def test_has_zenodo_badge(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_zenodo_badge() is False
def test_has_zenodo_metadata_file(self, mocker):
with mocker:
checker = get_checker()
assert checker.has_zenodo_metadata_file() is False
def test_is_on_github_marketplace(self, mocker):
with mocker:
checker = get_checker()
assert checker.is_on_github_marketplace() is True
def test_readme(self, mocker):
with mocker:
checker = get_checker()
assert isinstance(checker.readme, Readme)
def test_repo(self, mocker):
with mocker:
checker = get_checker()
assert isinstance(checker.repo, Repo)
|
11580295
|
from app import application, flask_db, database
from models import *
from views import *
def create_tables():
# Create table for each model if it does not exist.
database.create_tables([Entry, Tag, EntryTags, FTSEntry], safe=True)
if __name__ == '__main__':
create_tables()
# Run on port 8000 for Sandstorm
application.run(host='0.0.0.0', port=8000)
|
11580318
|
import torch
import random
import torchvision.transforms as transforms
from PIL import Image
def random_distort(
img,
brightness_delta=32/255.,
contrast_delta=0.5,
saturation_delta=0.5,
hue_delta=0.1):
'''A color related data augmentation used in SSD.
Args:
img: (PIL.Image) image to be color augmented.
brightness_delta: (float) shift of brightness, range from [1-delta,1+delta].
contrast_delta: (float) shift of contrast, range from [1-delta,1+delta].
saturation_delta: (float) shift of saturation, range from [1-delta,1+delta].
hue_delta: (float) shift of hue, range from [-delta,delta].
Returns:
img: (PIL.Image) color augmented image.
'''
def brightness(img, delta):
if random.random() < 0.5:
img = transforms.ColorJitter(brightness=delta)(img)
return img
def contrast(img, delta):
if random.random() < 0.5:
img = transforms.ColorJitter(contrast=delta)(img)
return img
def saturation(img, delta):
if random.random() < 0.5:
img = transforms.ColorJitter(saturation=delta)(img)
return img
def hue(img, delta):
if random.random() < 0.5:
img = transforms.ColorJitter(hue=delta)(img)
return img
img = brightness(img, brightness_delta)
if random.random() < 0.5:
img = contrast(img, contrast_delta)
img = saturation(img, saturation_delta)
img = hue(img, hue_delta)
else:
img = saturation(img, saturation_delta)
img = hue(img, hue_delta)
img = contrast(img, contrast_delta)
return img
|
11580360
|
from __future__ import print_function
from __future__ import division
from OpenGL.GL import GL_FRONT, GL_AMBIENT_AND_DIFFUSE, GL_SPECULAR, GL_SHININESS, glMaterialfv, glMaterialf
import numpy as np
import copy
import time; currentMillis = lambda: int(round(time.time() * 1000))
from illuminance.helper_illuminance import get_led_indexes, get_surface_evaluations, are_all_surfaces_hit, get_statistics_on_data, get_tuned_intensities_data, \
write_illumination_result_data_to_file, write_led_set_lambertian_scores_appended_result_file
from visualisations import draw_point, draw_wire_sphere, make_triangle_face, draw_wire_frame_of_obj_from_filename, draw_text
from brightness_control_tuning.tuning_selector import BrightnessControlStrategy, TuningInputDataContainer, TuningOutputDataContainer
from options import *
from service import WebServiceProcessForker, WebBrowserProcessForker, DataServiceDockerManager, \
BaselineDataService, ConfigurationDataService, DataService_Server_SphericalGradient
class EvaluatorGeneric:
"""
Abstract Base Class (ABC) for LED position optimsation evaluators.
"""
# warned=False
def display(self, triangles, frame, leds_vertices, intensities):
"""
Draw the target shape and light vertex positions.
"""
self.__draw_visualisations(triangles, frame, leds_vertices, intensities)
def evaluate(self, triangles, frame, leds_vertices, intensities):
"""
Evaluate the standard deviation of lambertian illuminance of the terget shape, from the loaded leds vertex positions.
"""
surfaces = get_surface_evaluations(triangles, leds_vertices, intensities)
if not are_all_surfaces_hit(surfaces):
print("---FAILED--- to hit all surfaces. Result not written to file.")
# assert are_all_surfaces_hit(surfaces) , "FAILED: Some surfaces were not hit by these LED Light Vertex positions. Result not written to file. Aborting"
else:
header_data, row_data = get_statistics_on_data( surfaces=surfaces, all_leds=frame, leds_vertex_set=leds_vertices, intensities=intensities,
evaluator_shortname=self.shortname , source_filename=self._source_filename, evaluator_class=type(self).__name__ )
write_illumination_result_data_to_file(header_data, row_data, filename_suffix="_"+type(self).__name__, path_prefix=self._path_prefix)
result_dict_data = dict(zip(header_data, row_data))
self.__print_evaluation_results( result_dict_data )
def tune(self, triangles, frame, leds_vertices, intensities ):
"""
Tune the intensities of the mapped leds vertex positions to balance the standard deviation.
"""
_start_intensities = intensities[:]
surfaces = get_surface_evaluations(triangles, leds_vertices, intensities)
if not are_all_surfaces_hit(surfaces):
print("---FAILED--- to hit all surfaces. Result not written to file.")
else:
input_data = TuningInputDataContainer(surface_tris=triangles , led_vertices=leds_vertices, intensities=intensities)
output_data = BrightnessControlStrategy().selector(input_data)
intensities = output_data.intensities
surfaces = output_data.surface_scores
# Result data:
header_data, row_data = get_statistics_on_data( surfaces=surfaces, all_leds=frame, leds_vertex_set=leds_vertices, intensities=intensities,
evaluator_shortname=self.shortname , source_filename=self._source_filename, evaluator_class=type(self).__name__ )
write_illumination_result_data_to_file(header_data, row_data, filename_suffix="_"+type(self).__name__, path_prefix=self._path_prefix)
result_dict_data = dict(zip(header_data, row_data))
# Tuned Result data file:
header_tuned, row_tuned, filename_suffix = get_tuned_intensities_data(frame, leds_vertices, intensities, result_dict_data)
write_illumination_result_data_to_file(header_tuned, row_tuned, filename_suffix=filename_suffix, path_prefix=self._path_prefix, RowDataAsRows=True)
self.__print_evaluation_results( result_dict_data )
self.__print_tuning_results(intensities, _start_intensities)
_init_web_service = False
_init_web_browser = False
_init_db_service = False
def sequence_runner(self, triangles, frame, leds_vertices, intensities ):
"""
Display demo content and post data to web service:
"""
# Start the Docker Redis DB container:
if not EvaluatorGeneric._init_db_service:
self.ds = DataServiceDockerManager()
EvaluatorGeneric._init_db_service = True
# LED Index Data
led_indexes = get_led_indexes( frame, leds_vertices )
published_intensity_set = zip( led_indexes, intensities )
#Connect to the DBADOs:
baseline_ds = BaselineDataService()
baseline_ds.set_default_intensities( published_intensity_set )
config_ds = ConfigurationDataService()
config_ds.set_config_data()
sequence_ds = DataService_Server_SphericalGradient(copy.deepcopy(leds_vertices), copy.deepcopy(led_indexes), copy.deepcopy(intensities))
sequence_ds.subscribe_to_service()
if not EvaluatorGeneric._init_web_service:
self.ws = WebServiceProcessForker()
EvaluatorGeneric._init_web_service = True
if not EvaluatorGeneric._init_web_browser:
self.wb = WebBrowserProcessForker("http://127.0.0.1:8080")
EvaluatorGeneric._init_web_browser = True
# Display visualisation:
self.__draw_visualisations(triangles, frame, leds_vertices, intensities)
""" ===============================================================================================================================
- Private functions :
===============================================================================================================================
"""
def __draw_visualisations(self, triangles, frame, leds_vertices, intensities):
# dict_properties = getPropertiesFile("../properties/default.properties")
# _frame_objfilename = dict_properties['FrameModel']['frame.objfilename']
# _frame_scale = float(dict_properties['FrameModel']['frame.scale'])
_frame_objfilename = property_to_string(section="FrameModel", key="frame.objfilename")
_frame_scale = property_to_number(section="FrameModel", key="frame.scale", vmin=0, vmax=None, vtype=float)
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, (0.0, 0.0, 0.0, 1))
glMaterialfv(GL_FRONT, GL_SPECULAR, (0, 0, 0, .2))
glMaterialf(GL_FRONT, GL_SHININESS, 20)
draw_wire_frame_of_obj_from_filename(_frame_objfilename, scale=float(_frame_scale)*1.04)
# if not EvaluatorGeneric.warned:
# print("Frame has n mounting points available:"+str(len(frame)))
# EvaluatorGeneric.warned=True
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, (.2, 0.2, 0.2, 1))
glMaterialfv(GL_FRONT, GL_SPECULAR, (1, 0, 0, .2))
glMaterialf(GL_FRONT, GL_SHININESS, 20)
for j in range(len(frame)):
draw_point( frame[j], size=8 ) if frame[j] is not None else None
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, (1.0, 0.0, 0.0, 1))
glMaterialfv(GL_FRONT, GL_SPECULAR, (1, 0, 0, .2))
glMaterialf(GL_FRONT, GL_SHININESS, 20)
for i in range(len(leds_vertices)):
led = leds_vertices[i]
draw_wire_sphere( vertex=led, size=0.35, scale=1 ) if led is not None else None
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, (0.2, 0.2, 0.6, 1))
glMaterialfv(GL_FRONT, GL_SPECULAR, (1, 1, 1, .2))
glMaterialf(GL_FRONT, GL_SHININESS, 99)
for tri in triangles:
make_triangle_face( tri )
#draw_text("", 10, 10, DISABLE_LIGHTING=False, translate_point=leds_vertices[0])
def __print_evaluation_results(self, result_dict_data):
"""
Private function to output result data to STDOUT after evaluation and tuning operations completes.
"""
print("Evaluator type: " + str(type(self).__name__))
print("Source Filename: " + str(self._source_filename))
print("Finished with standard deviation:" + str( result_dict_data["surface_stdev"] ))
if not result_dict_data["UNIFORM_LED_INTENSITIES"]:
print("Warning: Be aware that the normalised standard deviation (Std/Qty) metric is *unreliable* when adjusting Qty and using non-uniform light intensities (a second dependent variable).")
# -------------------------------------------------------------------------------------------------------------
# Do not change the format of this STDOUT line, tests will fail.
# The test harness depends on this output string's exact content:
# @todo: make this not a hack: Introduce a results object, accessible from test harness.
print("Finished with normalised standard deviation:" + str(result_dict_data["normalised_stdev_n"]))
# -------------------------------------------------------------------------------------------------------------
print("Normalised standard deviation is std(surface lumens)/num_leds")
print("Finished with Evenness (/n):" + str(result_dict_data["normalised_stdev_n"]))
print("Finished with Evenness (/n/mean_intensity):" + str(result_dict_data["normalised_stdev_n_intensity"]))
print("Finished with Relative Standard Deviation/ Coefficient Variation (std/mean):" + str(result_dict_data["coefficient_of_stdev"]))
print("Finished with Median:" + str(result_dict_data["surface_median"]))
print("Finished with IQR:" + str(result_dict_data["surface_iqrange"]))
print("Finished with Relative IQR (IQR/Median):" + str(result_dict_data["coefficient_of_iqr_median"]))
print("Finished with LEDS Selected/Available: " + str( result_dict_data["Qty_Selected_LED_Indexes"] )+ " / " + str( result_dict_data["Qty_Available_LED_Indexes"] ))
def __print_tuning_results(self, intensities, _start_intensities):
"""
Private function to output result data to STDOUT after evaluation and tuning operations completes.
"""
print("Count of LEDs with changed output intensities: " + str( [x != y for x,y in zip(intensities, _start_intensities)].count(True) )+" of "+str(len(intensities)))
print("Mean Difference of changed output intensities: " + str( (np.mean(intensities) - np.mean(_start_intensities)) ) + " Prior:" + str(np.mean(_start_intensities))+ " Post:" + str(np.mean(intensities)) )
print("-----------------------------------------------------")
print("(See Output Results File for Tuned Intensity Values)")
print("-----------------------------------------------------")
|
11580385
|
import os
import random
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import time
from mesa import Model
from mesa.datacollection import DataCollector
from mesa.space import MultiGrid
from mesa.time import RandomActivation
from .agent import Human, Wall, FireExit, Furniture, Fire, Door
class FireEvacuation(Model):
MIN_HEALTH = 0.75
MAX_HEALTH = 1
MIN_SPEED = 1
MAX_SPEED = 2
MIN_NERVOUSNESS = 1
MAX_NERVOUSNESS = 10
MIN_EXPERIENCE = 1
MAX_EXPERIENCE = 10
MIN_VISION = 1
# MAX_VISION is simply the size of the grid
def __init__(self, floor_plan_file, human_count, collaboration_percentage, fire_probability, visualise_vision, random_spawn, save_plots):
# Load floorplan
# floorplan = np.genfromtxt(path.join("fire_evacuation/floorplans/", floor_plan_file))
with open(os.path.join("fire_evacuation/floorplans/", floor_plan_file), "rt") as f:
floorplan = np.matrix([line.strip().split() for line in f.readlines()])
# Rotate the floorplan so it's interpreted as seen in the text file
floorplan = np.rot90(floorplan, 3)
# Check what dimension our floorplan is
width, height = np.shape(floorplan)
# Init params
self.width = width
self.height = height
self.human_count = human_count
self.collaboration_percentage = collaboration_percentage
self.visualise_vision = visualise_vision
self.fire_probability = fire_probability
self.fire_started = False # Turns to true when a fire has started
self.save_plots = save_plots
# Set up model objects
self.schedule = RandomActivation(self)
self.grid = MultiGrid(height, width, torus=False)
# Used to start a fire at a random furniture location
self.furniture_list = []
# Used to easily see if a location is a FireExit or Door, since this needs to be done a lot
self.fire_exit_list = []
self.door_list = []
# If random spawn is false, spawn_list will contain the list of possible spawn points according to the floorplan
self.random_spawn = random_spawn
self.spawn_list = []
# Load floorplan objects
for (x, y), value in np.ndenumerate(floorplan):
value = str(value)
floor_object = None
if value is "W":
floor_object = Wall((x, y), self)
elif value is "E":
floor_object = FireExit((x, y), self)
self.fire_exit_list.append((x, y))
self.door_list.append((x, y)) # Add fire exits to doors as well, since, well, they are
elif value is "F":
floor_object = Furniture((x, y), self)
self.furniture_list.append((x, y))
elif value is "D":
floor_object = Door((x, y), self)
self.door_list.append((x, y))
elif value is "S":
self.spawn_list.append((x, y))
if floor_object:
self.grid.place_agent(floor_object, (x, y))
self.schedule.add(floor_object)
# Create a graph of traversable routes, used by agents for pathing
self.graph = nx.Graph()
for agents, x, y in self.grid.coord_iter():
pos = (x, y)
# If the location is empty, or a door
if not agents or any(isinstance(agent, Door) for agent in agents):
neighbors = self.grid.get_neighborhood(pos, moore=True, include_center=True, radius=1)
for neighbor in neighbors:
# If there is contents at this location and they are not Doors or FireExits, skip them
if not self.grid.is_cell_empty(neighbor) and neighbor not in self.door_list:
continue
self.graph.add_edge(pos, neighbor)
# Collects statistics from our model run
self.datacollector = DataCollector(
{
"Alive": lambda m: self.count_human_status(m, Human.Status.ALIVE),
"Dead": lambda m: self.count_human_status(m, Human.Status.DEAD),
"Escaped": lambda m: self.count_human_status(m, Human.Status.ESCAPED),
"Incapacitated": lambda m: self.count_human_mobility(m, Human.Mobility.INCAPACITATED),
"Normal": lambda m: self.count_human_mobility(m, Human.Mobility.NORMAL),
"Panic": lambda m: self.count_human_mobility(m, Human.Mobility.PANIC),
"Verbal Collaboration": lambda m: self.count_human_collaboration(m, Human.Action.VERBAL_SUPPORT),
"Physical Collaboration": lambda m: self.count_human_collaboration(m, Human.Action.PHYSICAL_SUPPORT),
"Morale Collaboration": lambda m: self.count_human_collaboration(m, Human.Action.MORALE_SUPPORT)
}
)
# Calculate how many agents will be collaborators
number_collaborators = int(round(self.human_count * (self.collaboration_percentage / 100)))
# Start placing human agents
for i in range(0, self.human_count):
if self.random_spawn: # Place human agents randomly
pos = self.grid.find_empty()
else: # Place human agents at specified spawn locations
pos = random.choice(self.spawn_list)
if pos:
# Create a random human
health = random.randint(self.MIN_HEALTH * 100, self.MAX_HEALTH * 100) / 100
speed = random.randint(self.MIN_SPEED, self.MAX_SPEED)
if number_collaborators > 0:
collaborates = True
number_collaborators -= 1
else:
collaborates = False
# Vision statistics obtained from http://www.who.int/blindness/GLOBALDATAFINALforweb.pdf
vision_distribution = [0.0058, 0.0365, 0.0424, 0.9153]
vision = int(np.random.choice(np.arange(self.MIN_VISION, self.width + 1, (self.width / len(vision_distribution))), p=vision_distribution))
nervousness_distribution = [0.025, 0.025, 0.1, 0.1, 0.1, 0.3, 0.2, 0.1, 0.025, 0.025] # Distribution with slight higher weighting for above median nerovusness
nervousness = int(np.random.choice(range(self.MIN_NERVOUSNESS, self.MAX_NERVOUSNESS + 1), p=nervousness_distribution)) # Random choice starting at 1 and up to and including 10
experience = random.randint(self.MIN_EXPERIENCE, self.MAX_EXPERIENCE)
belief_distribution = [0.9, 0.1] # [Believes, Doesn't Believe]
believes_alarm = np.random.choice([True, False], p=belief_distribution)
human = Human(pos, health=health, speed=speed, vision=vision, collaborates=collaborates, nervousness=nervousness, experience=experience, believes_alarm=believes_alarm, model=self)
self.grid.place_agent(human, pos)
self.schedule.add(human)
else:
print("No tile empty for human placement!")
self.running = True
# Plots line charts of various statistics from a run
def save_figures(self):
DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
OUTPUT_DIR = DIR + "/output"
results = self.datacollector.get_model_vars_dataframe()
dpi = 100
fig, axes = plt.subplots(figsize=(1920 / dpi, 1080 / dpi), dpi=dpi, nrows=1, ncols=3)
status_results = results.loc[:, ['Alive', 'Dead', 'Escaped']]
status_plot = status_results.plot(ax=axes[0])
status_plot.set_title("Human Status")
status_plot.set_xlabel("Simulation Step")
status_plot.set_ylabel("Count")
mobility_results = results.loc[:, ['Incapacitated', 'Normal', 'Panic']]
mobility_plot = mobility_results.plot(ax=axes[1])
mobility_plot.set_title("Human Mobility")
mobility_plot.set_xlabel("Simulation Step")
mobility_plot.set_ylabel("Count")
collaboration_results = results.loc[:, ['Verbal Collaboration', 'Physical Collaboration', 'Morale Collaboration']]
collaboration_plot = collaboration_results.plot(ax=axes[2])
collaboration_plot.set_title("Human Collaboration")
collaboration_plot.set_xlabel("Simulation Step")
collaboration_plot.set_ylabel("Successful Attempts")
collaboration_plot.set_ylim(ymin=0)
timestr = time.strftime("%Y%m%d-%H%M%S")
plt.suptitle("Percentage Collaborating: " + str(self.collaboration_percentage) + "%, Number of Human Agents: " + str(self.human_count), fontsize=16)
plt.savefig(OUTPUT_DIR + "/model_graphs/" + timestr + ".png")
plt.close(fig)
# Starts a fire at a random piece of furniture with file_probability chance
def start_fire(self):
rand = random.random()
if rand < self.fire_probability:
fire_furniture = random.choice(self.furniture_list)
fire = Fire(fire_furniture, self)
self.grid.place_agent(fire, fire_furniture)
self.schedule.add(fire)
self.fire_started = True
print("Fire started at:", fire_furniture)
def step(self):
"""
Advance the model by one step.
"""
self.schedule.step()
# If there's no fire yet, attempt to start one
if not self.fire_started:
self.start_fire()
self.datacollector.collect(self)
# If no more agents are alive, stop the model and collect the results
if self.count_human_status(self, Human.Status.ALIVE) == 0:
self.running = False
if self.save_plots:
self.save_figures()
@staticmethod
def count_human_collaboration(model, collaboration_type):
"""
Helper method to count the number of collaborations performed by Human agents in the model
"""
count = 0
for agent in model.schedule.agents:
if isinstance(agent, Human):
if collaboration_type == Human.Action.VERBAL_SUPPORT:
count += agent.get_verbal_collaboration_count()
elif collaboration_type == Human.Action.MORALE_SUPPORT:
count += agent.get_morale_collaboration_count()
elif collaboration_type == Human.Action.PHYSICAL_SUPPORT:
count += agent.get_physical_collaboration_count()
return count
@staticmethod
def count_human_status(model, status):
"""
Helper method to count the status of Human agents in the model
"""
count = 0
for agent in model.schedule.agents:
if isinstance(agent, Human):
if agent.get_status() == status:
count += 1
return count
@staticmethod
def count_human_mobility(model, mobility):
"""
Helper method to count the mobility of Human agents in the model
"""
count = 0
for agent in model.schedule.agents:
if isinstance(agent, Human):
if agent.get_mobility() == mobility:
count += 1
return count
|
11580412
|
import win32com.client as wincl
from tkinter import *
def text2Speech():
text = e.get()
speak = wincl.Dispatch("SAPI.SpVoice")
speak.Speak(text)
#window configs
tts = Tk()
tts.wm_title("Text to Speech")
tts.geometry("225x105")
tts.config(background="#708090")
f=Frame(tts,height=280,width=500,bg="#bebebe")
f.grid(row=0,column=0,padx=10,pady=5)
lbl=Label(f,text="Enter your Text here : ")
lbl.grid(row=1,column=0,padx=10,pady=2)
e=Entry(f,width=30)
e.grid(row=2,column=0,padx=10,pady=2)
btn=Button(f,text="Speak",command=text2Speech)
btn.grid(row=3,column=0,padx=20,pady=10)
tts.mainloop()
|
11580417
|
from braintree.search import Search
class IdsSearch:
ids = Search.MultipleValueNodeBuilder("ids")
|
11580422
|
from selenium.webdriver import Chrome
browser = Chrome()
browser.get('http://selenium.dunossauro.live/aula_11_b')
browser.current_window_handle # id da janela atual
wids = browser.window_handles # ids de todas as janelas
def find_window(url: str):
wids = browser.window_handles
for window in wids:
browser.switch_to.window(window)
if url in browser.current_url:
break
find_window('duckduckgo')
|
11580459
|
from typing import Callable, List, Tuple
from EventManager.Models.RobotRunnerEvents import RobotRunnerEvents
class EventSubscriptionController:
__call_back_register: dict = dict()
@staticmethod
def subscribe_to_single_event(event: RobotRunnerEvents, callback_method: Callable):
EventSubscriptionController.__call_back_register[event] = callback_method
@staticmethod
def subscribe_to_multiple_events(subscriptions: List[Tuple[RobotRunnerEvents, Callable]]):
for sub in subscriptions:
event, callback = sub[0], sub[1]
EventSubscriptionController.subscribe_to_single_event(event, callback)
@staticmethod
def raise_event(event: RobotRunnerEvents, robot_runner_context = None):
try:
event_callback = EventSubscriptionController.__call_back_register[event]
except KeyError:
return None
if robot_runner_context:
return event_callback(robot_runner_context)
else:
return event_callback()
@staticmethod
def get_event_callback(event: RobotRunnerEvents):
try:
return EventSubscriptionController.__call_back_register[event]
except KeyError:
return None
|
11580471
|
from pyFHE.mulfft import PolyMul, TwistGen,TwistFFT,TwistIFFT
from pyFHE.utils import dtot32
from pyFHE.key import SecretKey
import numpy as np
N = 1024
Bg = 1024
twist = TwistGen(N)
for i in range(10000):
a = dtot32(np.random.random(N)) * 2 ** -32
b = np.int32(np.random.randint(-Bg // 2, Bg // 2, N)) # choose max = Bg
ab = np.flip(np.array(np.poly1d(np.flip(a)) * np.poly1d(np.flip(b))))
y = (ab[:N] - np.append(ab[N:], np.zeros(2 * N - len(ab)))) % 1
# y = np.flip(np.array((ab/np.poly1d(np.append(1,np.append(np.zeros(N-1),-1))))[0])%1)
a = np.uint32(dtot32(a))
c = PolyMul(a, np.uint32(b), twist)
if np.any((np.abs(c * 2 ** -32) - y) > 2 ** -60):
print(i)
print(y)
print(c * 2 ** -32)
break
|
11580476
|
import concurrent.futures
import logging
import os
import queue
import re
import threading
from pathlib import Path
from typing import Optional, Tuple
import boto3
import botocore
from botocore.endpoint import MAX_POOL_CONNECTIONS
from botocore.exceptions import NoCredentialsError
from .exceptions import DirectoryDoesNotExistError
from .exceptions import NoCredentialsError as S3FetchNoCredentialsError
from .exceptions import PermissionError as S3FetchPermissionError
from .exceptions import RegexError
logging.basicConfig()
class S3Fetch:
def __init__(
self,
s3_uri: str,
region: str = "us-east-1",
debug: bool = False,
download_dir: Optional[str] = None,
regex: Optional[str] = None,
threads: Optional[int] = None,
dry_run: bool = False,
delimiter: str = "/",
quiet: bool = False,
) -> None:
"""S3Fetch
:param s3_uri: S3 URI
:type s3_uri: str
:param region: AWS region the bucket is located in, defaults to "us-east-1"
:type region: str, optional
:param debug: Enable debug output, defaults to False
:type debug: bool, optional
:param download_dir: Directory to download objects to, defaults to None
:type download_dir: Optional[str], optional
:param regex: Regex to use to filter objects, defaults to None
:type regex: Optional[str], optional
:param threads: Number of threads to use, 1 thread used per object, defaults to None
:type threads: Optional[int], optional
:param dry_run: Enable dry run mode, don't actually download anything, defaults to False
:type dry_run: bool, optional
:param delimiter: S3 object path delimiter, defaults to "/"
:type delimiter: str, optional
:param quiet: Don't print anything to stdout, defaults to False
:type delimiter: bool, optional
"""
self._logger = logging.getLogger("s3fetch")
self._logger.setLevel(logging.DEBUG if debug else logging.INFO)
self._bucket, self._prefix = self._parse_and_split_s3_uri(s3_uri, delimiter)
self._debug = debug
self._regex = regex
self._dry_run = dry_run
self._delimiter = delimiter
self._quiet = quiet
if self._dry_run and not self._quiet:
print("Operating in dry run mode. Will not download objects.")
self._download_dir = self._determine_download_dir(download_dir)
self._threads = threads or len(os.sched_getaffinity(0))
self._logger.debug(f"Using {self._threads} threads.")
# https://stackoverflow.com/questions/53765366/urllib3-connectionpool-connection-pool-is-full-discarding-connection
# https://github.com/boto/botocore/issues/619#issuecomment-461859685
# max_pool_connections here is passed to the max_size param of urllib3.HTTPConnectionPool()
connection_pool_connections = max(MAX_POOL_CONNECTIONS, self._threads)
client_config = botocore.config.Config(
max_pool_connections=connection_pool_connections,
)
self.client = boto3.client("s3", region_name=region, config=client_config)
self._object_queue = queue.Queue()
self._failed_downloads = []
self._successful_downloads = 0
self._keyboard_interrupt_exit = threading.Event()
self._print_lock = threading.Lock()
def _parse_and_split_s3_uri(self, s3_uri: str, delimiter: str) -> Tuple[str, str]:
"""Parse and split the S3 URI into bucket and path prefix.
:param s3_uri: S3 URI
:type s3_uri: str
:param delimiter: S3 path delimiter.
:type delimiter: str
:return: Tuple containing the S3 bucket and path prefix.
:rtype: Tuple[str, str]
"""
tmp_path = s3_uri.replace("s3://", "", 1)
try:
bucket, prefix = tmp_path.split(delimiter, maxsplit=1)
except ValueError:
bucket = tmp_path
prefix = ""
self._logger.debug(f"bucket={bucket}, prefix={prefix}")
return bucket, prefix
def _determine_download_dir(self, download_dir: Optional[str]) -> Path:
"""Determine the correct download directory to use.
:param download_dir: Download directory, `None` results in current directory.
:type download_dir: Optional[str]
:raises DirectoryDoesNotExistError: Raised if the specified directory does not exist.
:return: Path object representing the download directory.
:rtype: Path
"""
if not download_dir:
download_directory = Path(os.getcwd())
else:
download_directory = Path(download_dir)
if not download_directory.is_dir():
raise DirectoryDoesNotExistError(
f"The directory '{download_directory}' does not exist."
)
self._logger.debug(f"download_directory={download_directory}")
return Path(download_directory)
def _retrieve_list_of_objects(self) -> None:
"""Retrieve a list of objects in the S3 bucket under the specified path prefix."""
if not self._quiet:
prefix = f"'{self._prefix}'" if self._prefix else "no prefix"
print(f"Listing objects in bucket '{self._bucket}' with prefix {prefix}...")
paginator = self.client.get_paginator("list_objects_v2")
for page in paginator.paginate(Bucket=self._bucket, Prefix=self._prefix):
if "Contents" not in page:
if not self._quiet:
print("No objects found under prefix.")
break
if self._keyboard_interrupt_exit.is_set():
raise KeyboardInterrupt
for key in filter(
self._filter_object,
(obj["Key"] for obj in page["Contents"]),
):
self._object_queue.put_nowait(key)
# Send sentinel value indicating pagination complete.
self._object_queue.put_nowait(None)
def run(self) -> None:
"""Executes listing, filtering and downloading objects from the S3 bucket."""
try:
threading.Thread(target=self._retrieve_list_of_objects).start()
self._download_objects()
self._check_for_failed_downloads()
except NoCredentialsError as e:
raise S3FetchNoCredentialsError(e) from e
def _download_objects(self) -> None:
"""Download objects from the specified S3 bucket and path prefix."""
if not self._quiet:
print("Starting downloads...")
with concurrent.futures.ThreadPoolExecutor(
max_workers=self._threads
) as executor:
futures = {}
while True:
item = self._object_queue.get(block=True)
if item is None: # Check for sentinel value
break
futures[item] = executor.submit(self._download_object, item)
for key, future in futures.items():
try:
future.result()
self._successful_downloads += 1
except KeyboardInterrupt:
if not self._quiet:
print("\nThreads are exiting...")
executor.shutdown(wait=False)
self._keyboard_interrupt_exit.set()
raise
except Exception as e:
self._failed_downloads.append((key, e))
def _check_for_failed_downloads(self) -> None:
"""Print out a list of objects that failed to download (if any)."""
if self._failed_downloads and not self._quiet:
print()
print(f"{len(self._failed_downloads)} objects failed to download.")
for key, reason in self._failed_downloads:
print(f"{key}: {reason}")
def _rollup_prefix(self, key: str) -> Tuple[Optional[str], str]:
# First roll up everything under the prefix to the right most delimiter, leaving us with the object key
# after the rolled up prefix.
# Example for prefix of '/example/obj'
# /example/objects/obj1
# /example/objects/obj2
# Result: objects/obj1 & objects/obj2
# Determine rollup prefix
if self._prefix:
# Get prefix up to last delimiter
try:
rollup_prefix, _ = self._prefix.rsplit(self._delimiter, maxsplit=1)
except ValueError:
rollup_prefix = None
else:
rollup_prefix = None
# Remove prefix from key
if rollup_prefix:
_, tmp_key = key.rsplit(rollup_prefix + self._delimiter, maxsplit=1)
else:
tmp_key = key
# Split object key into directory and filename
try:
directory, filename = tmp_key.rsplit(self._delimiter, maxsplit=1)
except ValueError:
directory = None
filename = tmp_key
return directory, filename
def _download_object(self, key: str) -> None:
"""Download S3 object from the specified bucket.
:param key: S3 object key
:type key: str
:raises KeyboardInterrupt: Raised hit user cancels operation with CTRL-C.
:raises S3FetchPermissionError: Raised if a permission error is encountered when writing object to disk.
"""
tmp_dest_directory, tmp_dest_filename = self._rollup_prefix(key)
if tmp_dest_directory:
destination_directory = self._download_dir / Path(tmp_dest_directory)
else:
destination_directory = self._download_dir
if not destination_directory.is_dir():
try:
destination_directory.mkdir(parents=True)
except FileExistsError:
pass
destination_filename = destination_directory / Path(tmp_dest_filename)
if self._keyboard_interrupt_exit.is_set():
raise KeyboardInterrupt
self._logger.debug(f"Downloading s3://{self._bucket}{self._delimiter}{key}")
try:
if not self._dry_run:
self.client.download_file(
Bucket=self._bucket,
Key=key,
Filename=str(destination_filename),
Callback=self._download_callback,
)
except PermissionError as e:
if not self._quiet:
self._tprint(f"{key}...error")
raise S3FetchPermissionError(
f"Permission error when attempting to write object to {destination_filename}"
) from e
else:
if not self._keyboard_interrupt_exit.is_set():
if not self._quiet:
self._tprint(f"{key}...done")
def _download_callback(self, *args, **kwargs):
"""boto3 callback, called whenever boto3 finishes downloading a chunk of an S3 object.
:raises SystemExit: Raised if KeyboardInterrupt is raised in the main thread.
"""
if self._keyboard_interrupt_exit.is_set():
self._logger.debug("Main thread has told us to exit, so exiting.")
raise SystemExit(1)
def _filter_object(self, key: str) -> bool:
"""Filter function for the `filter()` call used to determine if an
object key should be included in the list of objects to download.
:param key: S3 object key.
:type key: str
:returns: True if object key matches regex or no regex provided. False otherwise.
:raises RegexError: Raised if the regular expression is invalid.
"""
# Discard key if it's a 'directory'
if key.endswith(self._delimiter):
return False
if not self._regex:
self._logger.debug("No regex detected.")
return True
try:
rexp = re.compile(rf"{self._regex}")
except re.error as e:
msg = f"Regex error: {repr(e)}"
if self._debug:
raise RegexError(msg) from e
raise RegexError(msg)
if rexp.search(key):
self._logger.debug(f"Object {key} matched regex, added to object list.")
return True
else:
self._logger.debug(f"Object {key} did not match regex, skipped.")
return False
def _tprint(self, msg: str) -> None:
"""Thread safe printing.
:param msg: Text to print to the screen.
:type msg: str
"""
self._print_lock.acquire(timeout=1)
print(msg)
self._print_lock.release()
|
11580486
|
import gym
import rospy
import roslaunch
import time
import numpy as np
from gym import utils, spaces
from gym_gazebo.envs import gazebo_env
from geometry_msgs.msg import Twist
from std_srvs.srv import Empty
from sensor_msgs.msg import LaserScan
from gym.utils import seeding
class GazeboRoundTurtlebotLidarEnv(gazebo_env.GazeboEnv):
def __init__(self):
# Launch the simulation with the given launchfile name
gazebo_env.GazeboEnv.__init__(self, "GazeboRoundTurtlebotLidar_v0.launch")
self.vel_pub = rospy.Publisher('/mobile_base/commands/velocity', Twist, queue_size=5)
self.unpause = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
self.pause = rospy.ServiceProxy('/gazebo/pause_physics', Empty)
self.reset_proxy = rospy.ServiceProxy('/gazebo/reset_simulation', Empty)
self.action_space = spaces.Discrete(3) #F,L,R
self.reward_range = (-np.inf, np.inf)
self._seed()
def discretize_observation(self,data,new_ranges):
discretized_ranges = []
min_range = 0.2
done = False
mod = len(data.ranges)/new_ranges
for i, item in enumerate(data.ranges):
if (i%mod==0):
if data.ranges[i] == float ('Inf'):
discretized_ranges.append(6)
elif np.isnan(data.ranges[i]):
discretized_ranges.append(0)
else:
discretized_ranges.append(int(data.ranges[i]))
if (min_range > data.ranges[i] > 0):
done = True
return discretized_ranges,done
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
rospy.wait_for_service('/gazebo/unpause_physics')
try:
self.unpause()
except (rospy.ServiceException) as e:
print ("/gazebo/unpause_physics service call failed")
if action == 0: #FORWARD
vel_cmd = Twist()
vel_cmd.linear.x = 0.3
vel_cmd.angular.z = 0.0
self.vel_pub.publish(vel_cmd)
elif action == 1: #LEFT
vel_cmd = Twist()
vel_cmd.linear.x = 0.1
vel_cmd.angular.z = 0.3
self.vel_pub.publish(vel_cmd)
elif action == 2: #RIGHT
vel_cmd = Twist()
vel_cmd.linear.x = 0.1
vel_cmd.angular.z = -0.3
self.vel_pub.publish(vel_cmd)
data = None
while data is None:
try:
data = rospy.wait_for_message('/scan', LaserScan, timeout=5)
except:
pass
rospy.wait_for_service('/gazebo/pause_physics')
try:
#resp_pause = pause.call()
self.pause()
except (rospy.ServiceException) as e:
print ("/gazebo/pause_physics service call failed")
state,done = self.discretize_observation(data,5)
if not done:
if action == 0:
reward = 5
else:
reward = 1
else:
reward = -200
return state, reward, done, {}
def reset(self):
# Resets the state of the environment and returns an initial observation.
rospy.wait_for_service('/gazebo/reset_simulation')
try:
#reset_proxy.call()
self.reset_proxy()
except (rospy.ServiceException) as e:
print ("/gazebo/reset_simulation service call failed")
# Unpause simulation to make observation
rospy.wait_for_service('/gazebo/unpause_physics')
try:
#resp_pause = pause.call()
self.unpause()
except (rospy.ServiceException) as e:
print ("/gazebo/unpause_physics service call failed")
#read laser data
data = None
while data is None:
try:
data = rospy.wait_for_message('/scan', LaserScan, timeout=5)
except:
pass
rospy.wait_for_service('/gazebo/pause_physics')
try:
#resp_pause = pause.call()
self.pause()
except (rospy.ServiceException) as e:
print ("/gazebo/pause_physics service call failed")
state = self.discretize_observation(data,5)
return state
|
11580510
|
from .client_proxy import ProxyKB
from .client_async import AsyncKB
from .client_sync import SyncKB
from .server import launch
__all__ = (
"SyncKB",
"AsyncKB",
"ProxyKB",
"launch",
)
|
11580564
|
import unittest
import pytest
from tfsnippet.examples.utils import MLConfig
class MLConfigTestCase(unittest.TestCase):
def test_assign(self):
class MyConfig(MLConfig):
a = 123
config = MyConfig()
self.assertEqual(config.a, 123)
config.a = 234
self.assertEqual(config.a, 234)
with pytest.raises(AttributeError, match='Config key \'non_exist\' '
'does not exist'):
config.non_exist = 12345
def test_defaults_and_to_dict(self):
self.assertDictEqual(MLConfig.defaults(), {})
self.assertDictEqual(MLConfig().to_dict(), {})
class MyConfig(MLConfig):
a = 123
b = 456
self.assertDictEqual(MyConfig.defaults(), {'a': 123, 'b': 456})
config = MyConfig()
self.assertDictEqual(config.to_dict(), {'a': 123, 'b': 456})
config.a = 333
self.assertDictEqual(config.to_dict(), {'a': 333, 'b': 456})
class MyConfig2(MyConfig):
a = 234
c = 1234
self.assertDictEqual(MyConfig2.defaults(),
{'a': 234, 'b': 456, 'c': 1234})
config = MyConfig2()
self.assertDictEqual(config.to_dict(), {'a': 234, 'b': 456, 'c': 1234})
config.a = 333
config.c = 444
self.assertDictEqual(config.to_dict(), {'a': 333, 'b': 456, 'c': 444})
|
11580600
|
import pytest
import datetime
from cryptocompy import helper_functions
@pytest.mark.parametrize("to_ts, timestamp", [
("FALSE", False),
(datetime.datetime(2019, 5, 16, 18, 1, 48), 1558022508),
(1558022508, 1558022508),
("1558022508", 1558022508),
])
def test_to_ts_args_to_timestamp(to_ts, timestamp):
assert helper_functions.to_ts_args_to_timestamp(to_ts) == timestamp
|
11580634
|
import numpy
import chainer
from chainer import functions, cuda
from chainer import links
from chainer import reporter
from chainer_pointnet.models.linear_block import LinearBlock
from chainer_pointnet.models.pointnet2.set_abstraction_all_block import \
SetAbstractionGroupAllModule
from chainer_pointnet.models.pointnet2.set_abstraction_block import \
SetAbstractionModule
class PointNet2ClsMSG(chainer.Chain):
"""Classification PointNet++ MSG (Multi Scale Grouping)
Input is (minibatch, K, N, 1), output is (minibatch, out_dim)
Args:
out_dim (int): output dimension, number of class for classification
in_dim: input dimension for each point. default is 3, (x, y, z).
dropout_ratio (float): dropout ratio
use_bn (bool): use batch normalization or not.
compute_accuracy (bool): compute & report accuracy or not
residual (bool): use residual connection or not
"""
def __init__(self, out_dim, in_dim=3, dropout_ratio=0.5,
use_bn=True, compute_accuracy=True, residual=False):
super(PointNet2ClsMSG, self).__init__()
with self.init_scope():
# initial_idx is set to ensure deterministic behavior of
# fathest_point_sampling
self.sam11 = SetAbstractionModule(
k=512, num_sample_in_region=16, radius=0.1,
mlp=[32, 32, 64], mlp2=None, initial_idx=0,
residual=residual)
self.sam12 = SetAbstractionModule(
k=512, num_sample_in_region=32, radius=0.2,
mlp=[64, 64, 128], mlp2=None, initial_idx=0,
residual=residual)
self.sam13 = SetAbstractionModule(
k=512, num_sample_in_region=128, radius=0.4,
mlp=[64, 96, 128], mlp2=None, initial_idx=0,
residual=residual)
self.sam21 = SetAbstractionModule(
k=128, num_sample_in_region=32, radius=0.2,
mlp=[64, 64, 128], mlp2=None, initial_idx=0,
residual=residual)
self.sam22 = SetAbstractionModule(
k=128, num_sample_in_region=64, radius=0.4,
mlp=[128, 128, 256], mlp2=None, initial_idx=0,
residual=residual)
self.sam23 = SetAbstractionModule(
k=128, num_sample_in_region=128, radius=0.8,
mlp=[128, 128, 256], mlp2=None, initial_idx=0,
residual=residual)
self.sam3 = SetAbstractionGroupAllModule(
mlp=[256, 512, 1024], mlp2=None,
residual=residual)
self.fc_block4 = LinearBlock(
1024, 512, use_bn=use_bn, dropout_ratio=dropout_ratio,)
self.fc_block5 = LinearBlock(
512, 256, use_bn=use_bn, dropout_ratio=dropout_ratio,)
self.fc6 = links.Linear(256, out_dim)
self.compute_accuracy = compute_accuracy
def calc(self, x):
# x: (minibatch, K, N, 1)
# N - num_point
# K - feature degree (this is 3 for xyz input, 64 for middle layer)
assert x.ndim == 4
assert x.shape[-1] == 1
coord_points = functions.transpose(x[:, :, :, 0], (0, 2, 1))
feature_points = None
cp11, fp11, _ = self.sam11(coord_points, feature_points)
cp12, fp12, _ = self.sam12(coord_points, feature_points)
cp13, fp13, _ = self.sam13(coord_points, feature_points)
# assert numpy.allclose(cuda.to_cpu(cp11.data), cuda.to_cpu(cp12.data))
# assert numpy.allclose(cuda.to_cpu(cp11.data), cuda.to_cpu(cp13.data))
del cp12, cp13
feature_points = functions.concat([fp11, fp12, fp13], axis=2)
cp21, fp21, _ = self.sam21(cp11, feature_points)
cp22, fp22, _ = self.sam21(cp11, feature_points)
cp23, fp23, _ = self.sam21(cp11, feature_points)
# assert numpy.allclose(cuda.to_cpu(cp21.data), cuda.to_cpu(cp22.data))
# assert numpy.allclose(cuda.to_cpu(cp21.data), cuda.to_cpu(cp23.data))
del cp22, cp23
feature_points = functions.concat([fp21, fp22, fp23], axis=2)
coord_points, feature_points = self.sam3(cp21, feature_points)
# coord (bs, k, coord), feature (bs, k, ch')
h = self.fc_block4(feature_points)
h = self.fc_block5(h)
h = self.fc6(h)
return h
def __call__(self, x, t):
h = self.calc(x)
cls_loss = functions.softmax_cross_entropy(h, t)
# reporter.report({'cls_loss': cls_loss}, self)
loss = cls_loss
reporter.report({'loss': loss}, self)
if self.compute_accuracy:
acc = functions.accuracy(h, t)
reporter.report({'accuracy': acc}, self)
return loss
|
11580664
|
import argparse
import os
import sys
import tabulate
import time
import torch
import torch.nn.functional as F
import curves
import data
import models
import utils
import numpy as np
parser = argparse.ArgumentParser(description='DNN curve training')
parser.add_argument('--dir', type=str, default='injection_Baseline50_randomstart/', metavar='DIR',
help='training directory (default: /tmp/curve/)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true', default=True,
help='switches between validation and test set (default: validation)')
parser.add_argument('--transform', type=str, default='VGG', metavar='TRANSFORM',
help='transform name (default: VGG)')
parser.add_argument('--data_path', type=str, default='data/', metavar='PATH',
help='path to datasets location (default: None)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num-workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default='PreResNet110', metavar='MODEL', #required=True,
help='model name (default: None)')
parser.add_argument('--resume', type=str, default=True, metavar='CKPT',
help='checkpoint to resume training from (default: None)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 200)')
parser.add_argument('--save_freq', type=int, default=50, metavar='N',
help='save frequency (default: 50)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='initial learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=1e-4, metavar='WD',
help='weight decay (default: 1e-4)')
parser.add_argument('--ckpt', type=str, default='Para2/checkpoint-100.pt', metavar='CKPT',
help='checkpoint to eval (default: None)')
parser.add_argument('--seed', type=int, default=222, metavar='S', help='random seed (default: 1)')
parser.add_argument('--numS', type=int, default=4, metavar='NS', help='number of S')
parser.add_argument('--numT', type=int, default=1000, metavar='NT', help='number of T')
args = parser.parse_args()
os.makedirs(args.dir, exist_ok=True)
with open(os.path.join(args.dir, 'command.sh'), 'w') as f:
f.write(' '.join(sys.argv))
f.write('\n')
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
loaders, num_classes = data.loaders_part_test(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test
)
architecture = getattr(models, args.model)
model = architecture.base(num_classes=num_classes, **architecture.kwargs)
model.cuda()
#ave_parameters = list(model.parameters())
#print('Resume training from %d' % 0)
#resume = args.ckpt
#checkpoint = torch.load(resume)
#model.load_state_dict(checkpoint['model_state'])
def learning_rate_schedule(base_lr, epoch, total_epochs):
alpha = epoch / total_epochs
if alpha <= 0.5:
factor = 1.0
elif alpha <= 0.9:
factor = 1.0 - (alpha - 0.5) / 0.4 * 0.99
else:
factor = 0.01
return factor * base_lr
criterion = F.cross_entropy
regularizer = None
optimizer = torch.optim.SGD(
filter(lambda param: param.requires_grad, model.parameters()),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd
)
start_epoch = 1
columns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'te_nll', 'te_acc', 'accS', 'accT', 'time']
utils.save_checkpoint(
args.dir,
start_epoch - 1,
model_state=model.state_dict(),
optimizer_state=optimizer.state_dict()
)
has_bn = utils.check_bn(model)
test_res = {'loss': None, 'accuracy': None, 'nll': None}
D = np.load('files_res.npz')
inputs = D['inputs']
targets = D['targets']
print('Resume training model')
checkpoint = torch.load('./Para13/checkpoint-100.pt')
model.load_state_dict(checkpoint['model_state'])
for epoch in range(start_epoch, args.epochs + 1):
time_ep = time.time()
lr = learning_rate_schedule(args.lr, epoch, args.epochs)
# lr = args.lr
utils.adjust_learning_rate(optimizer, lr)
train_res = utils.train(loaders['train'], model, optimizer, criterion, regularizer)
test_res = utils.test(loaders['test'], model, criterion, regularizer)
test_poison_res = utils.test_poison(inputs, targets, model, args.numS, criterion)
if epoch % args.save_freq == 0:
utils.save_checkpoint(
args.dir,
epoch,
model_state=model.state_dict(),
optimizer_state=optimizer.state_dict()
)
time_ep = time.time() - time_ep
values = [epoch, lr, train_res['loss'], train_res['accuracy'], test_res['nll'], test_res['accuracy'],
test_poison_res['accuracyS'], test_poison_res['accuracyT'], time_ep]
table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='9.4f')
if epoch % 40 == 1 or epoch == start_epoch:
table = table.split('\n')
table = '\n'.join([table[1]] + table)
else:
table = table.split('\n')[2]
print(table)
if args.epochs % args.save_freq != 0:
utils.save_checkpoint(
args.dir,
args.epochs,
model_state=model.state_dict(),
optimizer_state=optimizer.state_dict()
)
|
11580714
|
from sympy import (residue, Symbol, Function, sin, I, exp, log, pi,
factorial, sqrt, Rational, cot)
from sympy.testing.pytest import XFAIL, raises
from sympy.abc import x, z, a, s
def test_basic1():
assert residue(1/x, x, 0) == 1
assert residue(-2/x, x, 0) == -2
assert residue(81/x, x, 0) == 81
assert residue(1/x**2, x, 0) == 0
assert residue(0, x, 0) == 0
assert residue(5, x, 0) == 0
assert residue(x, x, 0) == 0
assert residue(x**2, x, 0) == 0
def test_basic2():
assert residue(1/x, x, 1) == 0
assert residue(-2/x, x, 1) == 0
assert residue(81/x, x, -1) == 0
assert residue(1/x**2, x, 1) == 0
assert residue(0, x, 1) == 0
assert residue(5, x, 1) == 0
assert residue(x, x, 1) == 0
assert residue(x**2, x, 5) == 0
def test_f():
f = Function("f")
assert residue(f(x)/x**5, x, 0) == f(x).diff(x, 4).subs(x, 0)/24
def test_functions():
assert residue(1/sin(x), x, 0) == 1
assert residue(2/sin(x), x, 0) == 2
assert residue(1/sin(x)**2, x, 0) == 0
assert residue(1/sin(x)**5, x, 0) == Rational(3, 8)
def test_expressions():
assert residue(1/(x + 1), x, 0) == 0
assert residue(1/(x + 1), x, -1) == 1
assert residue(1/(x**2 + 1), x, -1) == 0
assert residue(1/(x**2 + 1), x, I) == -I/2
assert residue(1/(x**2 + 1), x, -I) == I/2
assert residue(1/(x**4 + 1), x, 0) == 0
assert residue(1/(x**4 + 1), x, exp(I*pi/4)).equals(-(Rational(1, 4) + I/4)/sqrt(2))
assert residue(1/(x**2 + a**2)**2, x, a*I) == -I/4/a**3
@XFAIL
def test_expressions_failing():
n = Symbol('n', integer=True, positive=True)
assert residue(exp(z)/(z - pi*I/4*a)**n, z, I*pi*a) == \
exp(I*pi*a/4)/factorial(n - 1)
def test_NotImplemented():
raises(NotImplementedError, lambda: residue(exp(1/z), z, 0))
def test_bug():
assert residue(2**(z)*(s + z)*(1 - s - z)/z**2, z, 0) == \
1 + s*log(2) - s**2*log(2) - 2*s
def test_issue_5654():
assert residue(1/(x**2 + a**2)**2, x, a*I) == -I/(4*a**3)
def test_issue_6499():
assert residue(1/(exp(z) - 1), z, 0) == 1
def test_issue_14037():
assert residue(sin(x**50)/x**51, x, 0) == 1
def test_issue_21176():
assert residue(x**2*cot(pi*x)/(x**4 + 1), x, -sqrt(2)/2 - sqrt(2)*I/2) == 0
|
11580754
|
import basix
def to_x(p):
if len(p) == 1:
return 100 * p[0]
if len(p) == 2:
return 100 * p[0]
if len(p) == 3:
return 100 * p[0] + 30 * p[1]
def to_y(p):
if len(p) == 1:
return 120
if len(p) == 2:
return 120 - 100 * p[1]
if len(p) == 3:
return 120 - 100 * p[2] - 40 * p[1]
for shape in ["interval", "triangle", "tetrahedron",
"quadrilateral", "hexahedron", "prism", "pyramid"]:
cell_type = getattr(basix.CellType, shape)
geometry = basix.geometry(cell_type)
topology = basix.topology(cell_type)
yadd = 0
width = 100
if shape == "interval":
yadd = -100
if shape == "hexahedron":
yadd = 40
width = 140
if shape == "pyramid":
width = 140
if shape == "prism":
yadd = 40
svg = ""
if geometry.shape[1] == 1:
svg += (f"<line x1='20' y1='{120 + yadd}' x2='50' y2='{120 + yadd}' />\n"
f"<line x1='40' y1='{115 + yadd}' x2='50' y2='{120 + yadd}' />\n"
f"<line x1='40' y1='{125 + yadd}' x2='50' y2='{120 + yadd}' />\n"
f"<text x='60' y='{120 + yadd}' fill='#000000' dy='.3em' style='font-family:\"Libertinus Serif Semibold Italic\";font-size:20px'>x</text>\n")
elif geometry.shape[1] == 2:
svg += (f"<line x1='20' y1='{120 + yadd}' x2='20' y2='{90 + yadd}' />\n"
f"<line x1='15' y1='{100 + yadd}' x2='20' y2='{90 + yadd}' />\n"
f"<line x1='25' y1='{100 + yadd}' x2='20' y2='{90 + yadd}' />\n"
f"<line x1='20' y1='{120 + yadd}' x2='50' y2='{120 + yadd}' />\n"
f"<line x1='40' y1='{115 + yadd}' x2='50' y2='{120 + yadd}' />\n"
f"<line x1='40' y1='{125 + yadd}' x2='50' y2='{120 + yadd}' />\n"
f"<text x='60' y='{120 + yadd}' fill='#000000' dy='.3em' style='font-family:\"Libertinus Serif Semibold Italic\";font-size:20px'>x</text>\n"
f"<text x='20' y='{75 + yadd}' fill='#000000' dy='.3em' style='font-family:\"Libertinus Serif Semibold Italic\";font-size:20px'>y</text>\n")
elif geometry.shape[1] == 3:
svg += (f"<line x1='20' y1='{120 + yadd}' x2='20' y2='{90 + yadd}' />\n"
f"<line x1='15' y1='{100 + yadd}' x2='20' y2='{90 + yadd}' />\n"
f"<line x1='25' y1='{100 + yadd}' x2='20' y2='{90 + yadd}' />\n"
f"<line x1='20' y1='{120 + yadd}' x2='50' y2='{120 + yadd}' />\n"
f"<line x1='40' y1='{115 + yadd}' x2='50' y2='{120 + yadd}' />\n"
f"<line x1='40' y1='{125 + yadd}' x2='50' y2='{120 + yadd}' />\n"
f"<line x1='20' y1='{120 + yadd}' x2='44' y2='{102 + yadd}' />\n"
f"<line x1='33' y1='{104 + yadd}' x2='44' y2='{102 + yadd}' />\n"
f"<line x1='39' y1='{112 + yadd}' x2='44' y2='{102 + yadd}' />\n"
f"<text x='60' y='{120 + yadd}' fill='#000000' dy='.3em' style='font-family:\"Libertinus Serif Semibold Italic\";font-size:20px'>x</text>\n"
f"<text x='52' y='{91 + yadd}' fill='#000000' dy='.3em' style='font-family:\"Libertinus Serif Semibold Italic\";font-size:20px'>y</text>\n"
f"<text x='20' y='{75 + yadd}' fill='#000000' dy='.3em' style='font-family:\"Libertinus Serif Semibold Italic\";font-size:20px'>z</text>\n")
xpos = 100
for dim, es in enumerate(topology):
lines = []
for e in topology[1]:
lines.append(f"<line x1='{xpos + to_x(geometry[e[0]])}' y1='{yadd + to_y(geometry[e[0]])}'"
f" x2='{xpos + to_x(geometry[e[1]])}' y2='{yadd + to_y(geometry[e[1]])}' />\n")
entities = []
for n_e, e in enumerate(es):
points = [geometry[i] for i in e]
mid = sum(points) / len(points)
e_svg = f"<circle cx='{xpos + to_x(mid)}' cy='{yadd + to_y(mid)}' r='12px' />"
e_svg += f"<text x='{xpos + to_x(mid)}' y='{yadd + to_y(mid)}' dy='.3em'"
if n_e >= 10:
e_svg += " style='font-size:10px'"
e_svg += f">{n_e}</text>\n"
entities.append(e_svg)
if shape == "hexahedron" and dim == 1:
svg += "".join([lines[i] for i in [5, 6, 7, 11]])
svg += "".join([entities[i] for i in [5, 6, 7, 11]])
svg += "".join([lines[i] for i in [1, 3, 9, 10]])
svg += "".join([entities[i] for i in [1, 3, 9, 10]])
svg += "".join([lines[i] for i in [0, 2, 4, 8]])
svg += "".join([entities[i] for i in [0, 2, 4, 8]])
elif shape == "hexahedron" and dim == 2:
svg += "".join([lines[i] for i in [5, 6, 7, 11]])
svg += "".join([entities[i] for i in [4]])
svg += "".join([lines[i] for i in [1, 3, 9, 10]])
svg += "".join([entities[i] for i in [0, 2, 3, 5]])
svg += "".join([lines[i] for i in [0, 2, 4, 8]])
svg += "".join([entities[i] for i in [1]])
elif shape == "tetrahedron" and dim == 1:
svg += "".join([lines[i] for i in [0, 2, 4]])
svg += "".join([entities[i] for i in [0, 2, 4]])
svg += "".join([lines[i] for i in [1, 3, 5]])
svg += "".join([entities[i] for i in [1, 3, 5]])
elif shape == "tetrahedron" and dim == 2:
svg += "".join([lines[i] for i in [0, 2, 4]])
svg += "".join([entities[i] for i in [0, 1, 3]])
svg += "".join([lines[i] for i in [1, 3, 5]])
svg += "".join([entities[i] for i in [2]])
elif shape == "prism" and dim == 1:
svg += lines[5]
svg += entities[5]
svg += "".join([lines[i] for i in [1, 2, 7, 8]])
svg += "".join([entities[i] for i in [1, 2, 7, 8]])
svg += "".join([lines[i] for i in [0, 3, 4, 6]])
svg += "".join([entities[i] for i in [0, 3, 4, 6]])
elif shape == "pyramid" and dim == 1:
svg += lines[2]
svg += entities[2]
svg += "".join([lines[i] for i in [1, 3, 6, 7]])
svg += "".join([entities[i] for i in [1, 3, 6, 7]])
svg += "".join([lines[i] for i in [0, 4, 5]])
svg += "".join([entities[i] for i in [0, 4, 5]])
elif shape == "pyramid" and dim == 2:
svg += lines[5]
svg += entities[3]
svg += "".join([lines[i] for i in [1, 3, 6, 7]])
svg += "".join([entities[i] for i in [0, 2, 4]])
svg += "".join([lines[i] for i in [0, 4, 2]])
svg += "".join([entities[i] for i in [1]])
elif shape == "pyramid" and dim == 3:
svg += "".join([lines[i] for i in [1, 3, 5, 6, 7]])
svg += entities[0]
svg += "".join([lines[i] for i in [0, 2, 4]])
else:
svg += "\n".join(lines)
svg += "\n".join(entities)
xpos += width + 70
with open(f"{shape}_numbering.svg", "w") as f:
f.write(f"<svg width='{xpos - 50}' height='{140 + yadd}'>\n")
f.write("<style type=\"text/css\"><![CDATA[\n"
" text { text-anchor:middle; font-size:15px; font-family: \"Lato Bold\" }\n"
" line { stroke-width: 4px; stroke-linecap: round; stroke: #000000 }\n"
" circle { fill: #FFFFFF; stroke: #000000; stroke-width:4px}\n"
" rect { fill: #FFFFFF; stroke: #FFFFFF; stroke-width:4px}\n"
"]]></style>\n")
f.write(f"<rect x='0' y='0' width='{xpos - 50}' height='{140 + yadd}' />\n")
f.write(svg)
f.write("</svg>")
|
11580761
|
import requests
import hashlib
from decouple import config
def send_request(start_char):
"""
The function sends a request to the API with the URL containing
first 5 characters of the hashed password
"""
# Concatenating first 5 characters of hashed password to the URL
url = "https://api.pwnedpasswords.com/range/" + start_char
try:
res = requests.get(url)
# Only status code of 200 includes all password hashes beginning
# with the starting characters alongside its prevalent counts
if res.status_code != 200:
print("\nError fetching results!!!")
return 0
return res
except:
print("\nConnection Error!!!")
return 0
def get_count(res, suffix):
"""
Function to get the count of the number of times the partial hash of the
password appears in the dataset of breached passwords
"""
# The data has a ':' delimiter separating the hashed password and its count
results = (line.split(":") for line in res.text.splitlines())
for hashed, count in results:
# Checking if there is a match for the last 5 characters
# the resultant tuple
if hashed == suffix:
return count
return 0
def password_hashing(password):
"""
Function to generate the SHA-1 hash of a UTF-8 encoded password
"""
sha1pass = hashlib.sha1(password.encode("utf -8")).hexdigest().upper()
# Inorder to maintain anonymity, storing only the partial hash for searching
head, tail = sha1pass[:5], sha1pass[5:]
return head, tail
print("\n\n\t------ !!CHECK IF YOUR PASSWORD IS SAFE!! ------")
print("Reading your password from the .env file and fetching results...")
password = config("PASSWORD")
start, end = password_hashing(password)
res = send_request(start)
if res:
num = get_count(res, end)
if num:
print(
f"\nYour password was found {num} many times in the dataset,"
f" it is recommended to change it ASAP!"
)
else:
print(
f"\nYour password was not found in the dataset. "
f"You have a safe password!"
)
|
11580764
|
import tensorflow as tf
import numpy as np
import data_loader as dl
class Model:
def __init__(self, train=False, fromCheckpoint=None):
self.IMG_SIZE = 40
self.NUM_LABEL = 10
self.setupModel()
self.dataset = dl.Dataset()
if (train):
self.setupTraining()
self.dataset.loadData(train='train/', test='test/', categories=['sad', 'dead', 'at', 'hash', 'conf', 'empty', 'dot', 'dollar', 'plus', 'dash'])
self.sess = tf.Session()
self.saver = tf.train.Saver()
if fromCheckpoint:
self.saver.restore(self.sess, fromCheckpoint)
else:
init = tf.global_variables_initializer()
self.sess.run(init)
def setupModel(self):
# INPUT 100x100 grayscale
self.X = tf.placeholder(tf.float32, [None, self.IMG_SIZE, self.IMG_SIZE, 1])
# 3 labels [plus, minus, mult]
self.Y_ = tf.placeholder(tf.float32, [None, self.NUM_LABEL])
# dropout
self.pkeep = tf.placeholder(tf.float32);
###
### WEIGHTS
###
W0 = tf.Variable(tf.truncated_normal([6, 6, 1, 6] ,stddev=0.1))
B0 = tf.Variable(tf.ones([6]) / 10)
W1 = tf.Variable(tf.truncated_normal([5, 5, 6, 12] ,stddev=0.1))
B1 = tf.Variable(tf.ones([12]) / 10)
W2 = tf.Variable(tf.truncated_normal([4, 4, 12, 24] ,stddev=0.1))
B2 = tf.Variable(tf.ones([24]) / 10)
W3 = tf.Variable(tf.truncated_normal([(self.IMG_SIZE / 4) * (self.IMG_SIZE / 4) * 24, 200] ,stddev=0.1))
B3 = tf.Variable(tf.ones([200]) / 10)
W4 = tf.Variable(tf.truncated_normal([200, self.NUM_LABEL] ,stddev=0.1))
B4 = tf.Variable(tf.ones([self.NUM_LABEL]) / 10)
###
### LAYERS
###
# 100x100 input image
Y0 = tf.nn.conv2d(self.X, W0, strides=[1,1,1,1], padding='SAME');
Y0d = tf.nn.relu(Y0 + B0);
# 100x100 layer
Y1 = tf.nn.conv2d(Y0d, W1, strides=[1,2,2,1], padding='SAME');
Y1d = tf.nn.relu(Y1 + B1);
# 50x50 layer
Y2 = tf.nn.conv2d(Y1d, W2, strides=[1,2,2,1], padding='SAME');
Y2d = tf.nn.relu(Y2 + B2);
# 25x25
Y3 = tf.matmul(tf.reshape(Y2d, [-1, (self.IMG_SIZE / 4) * (self.IMG_SIZE / 4) * 24]), W3)
Y3d = tf.nn.relu(Y3 + B3)
Y3dd = tf.nn.dropout(Y3d, self.pkeep)
# Fully connected + Dropout
self.YLogits = tf.matmul(Y3dd, W4) + B4
self.Y = tf.nn.softmax(self.YLogits)
def setupTraining(self):
self.cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.YLogits, labels=self.Y_)
self.cross_entropy = tf.reduce_mean(self.cross_entropy) * 100
correct = tf.equal(tf.argmax(self.Y, 1), tf.argmax(self.Y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
self.training_rate = tf.placeholder(tf.float32);
self.tr_max = 0.005
self.tr_min = 0.0001
self.train_step = tf.train.AdamOptimizer(self.training_rate).minimize(self.cross_entropy)
def run_training(self, iterations, checkpointFolder='checkpoints', ident=0):
for i in range(iterations+1):
self.run_training_step(i, checkpointFolder, ident)
def run_training_step(self, i, checkpointFolder, ident):
if i % 10 == 0:
# evaluate
acc, loss = self.sess.run([self.accuracy, self.cross_entropy], feed_dict={self.X: self.dataset.test_images, self.Y_: self.dataset.test_labels, self.pkeep: 1})
print('Step: ' + str(i) + '\tAccuracy: ' + str(acc) + '\tLoss: ' + str(loss))
else:
print('Step: ' + str(i))
batch_x, batch_y = self.dataset.getTrainBatch(200)
# train
tr = self.tr_min + (self.tr_max - self.tr_min) * (np.exp(-i / 2000))
self.sess.run(self.train_step, feed_dict={self.X: batch_x, self.Y_: batch_y, self.training_rate: tr, self.pkeep: 0.75})
# Save
if (i % 50 == 0):
path = checkpointFolder + '/model_' + str(ident) + '_['+ str(i) + '].ckpt'
p = self.saver.save(self.sess, path)
print('Checkpoint saved ['+p+']')
def run_predict(self, images):
self.dataset.loadUnknown(images)
Y = self.sess.run([self.Y], feed_dict={self.X: self.dataset.unknown_images, self.pkeep: 1})
return Y;
|
11580769
|
import torch.nn as nn
import torch.nn.functional as F
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5836943/pdf/MINF-37-na.pdf
class CharRNN(nn.Module):
def __init__(self, vocab_size, emb_size, max_len=320):
super(CharRNN, self).__init__()
self.max_len = max_len
self.emb = nn.Embedding(vocab_size, emb_size)
self.lstm = nn.LSTM(emb_size, 256, dropout=0.1, num_layers=4)
self.linear = nn.Linear(256, vocab_size)
# pass x as a pack padded sequence please.
def forward(self, x, with_softmax=False):
# do stuff to train
x = [self.emb(x_) for x_ in x]
x = nn.utils.rnn.pack_sequence(x, enforce_sorted=False)
x, _ = self.lstm(x)
x, _ = nn.utils.rnn.pad_packed_sequence(x, padding_value=0, total_length=self.max_len)
x = self.linear(x)
if with_softmax:
return F.softmax(x, dim=-1)
else:
return x
def sample(self):
return None
|
11580773
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('cadillac.png')
rows,cols,ch = img.shape()
pts1 = np.float32([[50,50],[200,50],[50,200]])
pts2 = np.float32([[10,100],[200,50],[100,250]])
M = cv2.getAffineTransform(pts1,pts2)
dst = cv2.warpAffine(img,M,(cols,rows))
plt.subplot(121),plt.imshow(img),plt.title('Input')
plt.subplot(122),plt.imshow(dst),plt.title('Output')
plt.show()
|
11580774
|
from itertools import dropwhile
import os
import subprocess
ext2lang = {
'.py': 'Python',
}
def execute(*args):
popen = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = popen.communicate()
popen.wait()
if errors:
raise RuntimeError('Errors while executing %r: %s' % (
' '.join(args),
errors),
)
elif popen.returncode != 0:
raise RuntimeError('Got returncode %d != 0 when executing %r' % (
popen.returncode, ' '.join(args),
))
return output
def make_messages(locale, domain, version, inputdir,
localedir='locale',
extensions=('.py',),
):
assert os.path.isdir(localedir), 'no directory %s found' % (localedir,)
assert os.path.isdir(inputdir), 'no directory %s found' % (inputdir,)
languages = []
if locale == 'all':
languages = [lang for lang in os.listdir(localedir) if not lang.startswith('.')]
else:
languages.append(locale)
for locale in languages:
print "processing language", locale
basedir = os.path.join(localedir, locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % domain)
potfile = os.path.join(basedir, '%s.pot' % domain)
if os.path.exists(potfile):
os.unlink(potfile)
all_files = []
for (dirpath, dirnames, filenames) in os.walk(inputdir):
all_files.extend([(dirpath, f) for f in filenames])
all_files.sort()
for dirpath, filename in all_files:
file_base, file_ext = os.path.splitext(filename)
if file_ext not in extensions:
continue
msgs = execute(
'xgettext',
'--default-domain', domain,
'--language', ext2lang[file_ext],
'--from-code', 'UTF-8',
'--output', '-',
os.path.join(dirpath, filename),
)
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
if msgs:
open(potfile, 'ab').write(msgs)
if os.path.exists(potfile):
msgs = execute('msguniq', '--to-code', 'UTF-8', potfile)
open(potfile, 'wb').write(msgs)
if os.path.exists(pofile):
msgs = execute('msgmerge', '--quiet', pofile, potfile)
open(pofile, 'wb').write(msgs)
os.unlink(potfile)
def compile_messages(localedir='locale'):
for dirpath, dirnames, filenames in os.walk(localedir):
for f in filenames:
if f.endswith('.po'):
path = os.path.join(dirpath, f)
mo_filename = os.path.splitext(path)[0] + '.mo'
try:
execute('msgfmt', '--check-format', '--output-file', mo_filename, path)
except RuntimeError as exc:
print 'Could not compile %r: %s' % (path, exc)
|
11580777
|
import os
import re
from loguru import logger
from flexget import plugin
from flexget.event import event
logger = logger.bind(name='rtorrent_magnet')
pat = re.compile('xt=urn:btih:([^&/]+)')
class PluginRtorrentMagnet:
"""
Process Magnet URI's into rtorrent compatible torrent files
Magnet URI's will look something like this:
magnet:?xt=urn:btih:190F1ABAED7AE7252735A811149753AA83E34309&dn=URL+Escaped+Torrent+Name
rTorrent would expect to see something like meta-URL_Escaped_Torrent_Name.torrent
The torrent file must also contain the text:
d10:magnet-uri88:xt=urn:btih:190F1ABAED7AE7252735A811149753AA83E34309&dn=URL+Escaped+Torrent+Namee
This plugin will check if a download URL is a magnet link, and then create the appropriate torrent file.
Example:
rtorrent_magnet: ~/torrents/
"""
schema = {'type': 'string', 'format': 'path'}
def write_torrent_file(self, task, entry, path):
path = os.path.join(path, 'meta-%s.torrent' % entry['title'])
path = os.path.expanduser(path)
if task.options.test:
logger.info('Would write: {}', path)
else:
logger.info('Writing rTorrent Magnet File: {}', path)
with open(path, 'w') as f:
f.write('d10:magnet-uri%d:%se' % (len(entry['url']), entry['url']))
entry['output'] = path
# Run after download plugin to only pick up entries it did not already handle
@plugin.priority(0)
def on_task_output(self, task, config):
for entry in task.accepted:
if 'output' in entry:
logger.debug(
'Ignoring, {} already has an output file: {}', entry['title'], entry['output']
)
continue
for url in entry.get('urls', [entry['url']]):
if url.startswith('magnet:'):
logger.debug('Magnet URI detected for url {} ({})', url, entry['title'])
if pat.search(url):
self.write_torrent_file(task, entry, entry.get('path', config))
break
else:
logger.warning('Unrecognized Magnet URI Format: {}', url)
@event('plugin.register')
def register_plugin():
plugin.register(PluginRtorrentMagnet, 'rtorrent_magnet', api_ver=2)
|
11580799
|
def get_dolphot_idc(cam, ccdchip, filt, forward=True):
IDC = fits_table('dolphot-idc-%s.fits' % (cam.lower()))
IDC.order = 4
if forward:
dirn = 'FORWARD'
else:
dirn = 'INVERSE'
row = np.flatnonzero((IDC.direction == dirn) *
(IDC.detchip == ccdchip) *
(IDC.filter == filt))
assert(len(row) == 1)
idc = IDC[row[0]]
return idc
def get_dolphot_shifts(fx, fy, rx, ry, RW, RH, idc, scale):
#fx = fx.copy()
#fy = fy.copy()
#rx = rx.copy()
#ry = ry.copy()
# -convert from FITS to dolphot-convention pixels
fx -= 0.5
fy -= 0.5
rx -= 0.5
ry -= 0.5
# Ref: subtract half ref img size
rx -= RW / 2.
ry -= RH / 2.
rx *= scale
ry *= scale
# Img: subtract IDC xref,yref;
fx -= idc.xref
fy -= idc.yref
#print 'fx,fy 1', fx,fy
# push through IDC.
fx,fy = apply_idc(idc, fx, fy)
#print 'fx,fy 2', fx,fy
# convert back to pixels via scale
fx /= idc.scale
fy /= idc.scale
#print 'fx,fy 3', fx,fy
#print 'rx,ry', rx,ry
#print 'sx,sy', fx-rx, fy-ry
return fx-rx, fy-ry
def apply_idc(idc, x, y):
fx,fy = np.zeros_like(x), np.zeros_like(y)
for i in range(1, idc.order+1):
for j in range(i+1):
xpow = j
ypow = i - j
cx = idc.get('cx%i%i' % (i,j))
cy = idc.get('cy%i%i' % (i,j))
dx = cx * (x)**xpow * (y)**ypow
dy = cy * (x)**xpow * (y)**ypow
fx += dx
fy += dy
return fx,fy
def parse_phot_info(txt, fnregex=None):
'''
Returns [ (name, chip, dx, dy, scale), ... ]
'''
if fnregex is None:
fnregex = r'(?P<id>[a-zA-Z0-9]{9,10})_F[\d]{3}W_flt(.chip(?P<chip>[12]))?'
fnre = re.compile(fnregex)
# as per http://docs.python.org/library/re.html
floatre = r'[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?'
alignre = re.compile((r'\s(' + floatre.replace('(','(?:') + ')')*5)
flts = []
for m in fnre.finditer(txt):
print 'got match with id', m.group('id'), 'and chip', m.group('chip')
if m.group('chip') is not None and len(m.group('chip')):
chip = int(m.group('chip'))
else:
chip = None
flts.append((m.group('id'), chip))
aligns = []
lines = txt.split('\n')
started = False
for x in lines:
if 'Alignment' in x:
started = True
if not started:
continue
if 'Aperture corrections' in x:
break
m = alignre.match(x)
if not m:
continue
#print m.groups()
aligns.append([float(y) for y in m.groups()])
print 'got', len(flts), 'FLT chips and', len(aligns), 'alignments.'
if len(flts) != len(aligns):
return None
rtn = []
for f,a in zip(flts,aligns):
# name, chip, dx, dy, scale
rtn.append((f[0], f[1], a[0], a[1], a[2]))
return rtn
|
11580825
|
import datetime
import locale
dt = datetime.datetime(2018, 1, 1)
print(dt)
# 2018-01-01 00:00:00
print(dt.strftime('%A, %a, %B, %b'))
# Monday, Mon, January, Jan
print(locale.getlocale(locale.LC_TIME))
# (None, None)
locale.setlocale(locale.LC_TIME, 'ja_JP.UTF-8')
print(locale.getlocale(locale.LC_TIME))
# ('ja_JP', 'UTF-8')
print(dt.strftime('%A, %a, %B, %b'))
# ζζζ₯, ζ, 1ζ, 1
locale.setlocale(locale.LC_TIME, 'en_US.UTF-8')
print(dt.strftime('%A, %a, %B, %b'))
# Monday, Mon, January, Jan
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
print(dt.strftime('%A, %a, %B, %b'))
# Montag, Mo, Januar, Jan
locale.setlocale(locale.LC_TIME, 'ja_JP.UTF-8')
s = '2018-01-01'
s_dow = datetime.datetime.strptime(s, '%Y-%m-%d').strftime('%A')
print(s_dow)
# ζζζ₯
|
11580837
|
import numpy as np
import sacc
from srd_models import (
add_srci_lensj_ell_cl,
add_lensi_lensi_ell_cl,
add_srci_srcj_ell_cl,
add_lens_tracers,
add_src_tracers
)
sacc_data = sacc.Sacc()
####################################
# first lets add the tracers
add_src_tracers(sacc_data)
add_lens_tracers(sacc_data)
####################################
# now we add the data points in
# the correct order and the cov mat
cov_file = (
'LSST_DESC_SRD_v1_release/forecasting/WL-LSS-CL/cov'
'/Y1_3x2pt_clusterN_clusterWL_cov')
data_file = (
'LSST_DESC_SRD_v1_release/forecasting/WL-LSS-CL/datav'
'/3x2pt_clusterN_clusterWL_Y1_fid')
nggl = 7 # number of ggl power spectra
ngcl = 6 # number of cluster-source galaxy power spectra
nlens = 5 # number of lens bins
nlenscl = 3 # number of cluster redshift bins
nshear = 15 # number of shear tomographic power spectra
ncl = 20 # number of ell-bins
nclgcl = 5 # number of cluster ell-bins
nrich = 5 # number of richness bins
ndata = (
(nshear + nggl + nlens) * ncl +
nlenscl * nrich +
nrich * ngcl * nclgcl)
n2pt = (nshear + nggl + nlens) * ncl
datav = np.genfromtxt(data_file)
covfile = np.genfromtxt(cov_file)
cov = np.zeros((ndata, ndata))
for i in range(covfile.shape[0]):
iind, jind = int(covfile[i, 0]), int(covfile[i, 1])
val = covfile[i, 8] + covfile[i, 9]
cov[iind, jind] = val
cov[jind, iind] = val
cov = cov[:n2pt, :n2pt]
datav = datav[:n2pt]
msks = []
# shear-shear
loc = 0
for i in range(5):
for j in range(i, 5):
msk = add_srci_srcj_ell_cl(sacc_data, i, j, datav[loc:loc+ncl, 1])
msks.append(msk)
loc += ncl
# shear-gal
# i is the source index
# j is the lens index
for j, i in [(0, 2), (0, 3), (0, 4), (1, 3), (1, 4), (2, 4), (3, 4)]:
tr = sacc_data.get_tracer('lens%d' % j)
mean_z = np.sum(tr.z * tr.nz) / np.sum(tr.nz)
msk = add_srci_lensj_ell_cl(sacc_data, i, j, mean_z, datav[loc:loc+ncl, 1])
msks.append(msk)
loc += ncl
# gal-gal
for i in range(5):
tr = sacc_data.get_tracer('lens%d' % j)
mean_z = np.sum(tr.z * tr.nz) / np.sum(tr.nz)
msk = add_lensi_lensi_ell_cl(sacc_data, i, mean_z, datav[loc:loc+ncl, 1])
msks.append(msk)
loc += ncl
# make sure the masks are the right size
mask = np.concatenate(msks)
assert mask.shape[0] == datav.shape[0]
# make sure we got everything
assert loc == datav.shape[0]
# deal with the covariances
mask_inds = np.cumsum(mask) - 1
n_keep = np.sum(mask)
masked_cov = np.zeros((n_keep, n_keep))
for _i in range(cov.shape[0]):
for _j in range(cov.shape[1]):
if mask[_i] and mask[_j]:
i = mask_inds[_i]
j = mask_inds[_j]
masked_cov[i, j] = cov[_i, _j]
sacc_data.add_covariance(masked_cov)
sacc_data.save_fits('srd_v1_sacc_data.fits', overwrite=True)
|
11580838
|
import json
import pprint
import sys
from elasticsearch import Elasticsearch
def get_top_k(query, k=5):
results = es.search(index='python-code', params={"q": query})['hits']['hits'][:k]
for doc in results:
print("Score: ", doc['_score'])
print("Docstring: ", doc['_source']['doc']['docstring'])
print("Code: ", doc['_source']['doc']['code'])
print("URL: ", doc['_source']['doc']['url'])
print("\n\n")
if __name__ == '__main__':
es = Elasticsearch()
query = sys.argv[1]
get_top_k(query)
|
11580854
|
from helpers import serialize_text, deserialize_text, translate, hash
import yaml
import sys
import os
import re
class DB:
def __init__(self, path):
self.path = path
self.db = {}
def load(self):
if os.path.exists(self.path):
with open(self.path) as input_file:
self.db = yaml.load(input_file.read(), Loader=yaml.BaseLoader)
def get(self, language, key):
return self.db.get(language, {}).get(key)
def set(self, language, key, hv):
if not language in self.db:
self.db[language] = {}
self.db[language][key] = hv
# we immediately write the DB to disk
self.write()
def write(self):
with open(self.path, "wb") as output_file:
output_file.write(yaml.dump(self.db, encoding='utf-8', allow_unicode=True, indent=2, sort_keys=True))
def deserialize_struct(struct, translated=False):
for k, v in struct.items():
if isinstance(v, str):
if not translated:
# we make a roundtrip to remove translation markup...
struct[k] = deserialize_text(serialize_text(v))
# if there are <tr-snip> tags, we only return the text within them.
if re.match(r".*<tr-snip>", v):
snippets = []
for m in re.finditer(r"<tr-snip>([^<]+?)</tr-snip>", v):
snippets.append(m.group(1).strip())
struct[k] = " ".join(snippets)
else:
deserialize_struct(v, translated=translated)
return process_filters(struct)
filtermap = {
'capitalize' : lambda v : " ".join([vv.capitalize() if i == 0 else vv for (i, vv) in enumerate(v.split(" ")) ]),
}
def process_filters(struct):
filtered_struct = {}
for k, v in struct.items():
kp = k.split("|")
kr = kp[0]
filters = kp[1:]
if isinstance(v, str):
for filter in filters:
if not filter in filtermap:
sys.stderr.write(f"Unknown filter: {filter}\n")
continue
v = filtermap[filter](v)
filtered_struct[kr] = v
else:
filtered_struct[k] = process_filters(v)
return filtered_struct
def translate_struct(ref_lang, target_lang, ref_struct, target_struct, db, token, parent_key=None):
for k, v in ref_struct.items():
kp = k.split("|")
kr = kp[0]
full_key = kr if parent_key is None else f'{parent_key}.{kr}'
if isinstance(v, str):
kh = hash(v)
rh = db.get(target_lang, full_key)
if rh == kh:
continue # translation is still up-to-date
print(f"Translating {full_key} from {ref_lang} to {target_lang}...")
try:
if target_lang == ref_lang:
translation = deserialize_text(serialize_text(v))
else:
translation = deserialize_text(translate(serialize_text(v), ref_lang, target_lang, token))
except:
print("Cannot translate, skipping...")
continue
target_struct[k] = translation
db.set(target_lang, full_key, kh)
else:
if not isinstance(target_struct.get(k), dict):
target_struct[k] = {}
translate_struct(ref_lang, target_lang, v, target_struct[k], db, token, parent_key=full_key)
return target_struct
def update_translations(translations_path, ref_lang, token):
"""
- We load the reference English translations from {ref_lang}.ref.yml
- We load the translation cache from src/translations/{ref_lang}.trans
- We generate translations for all target languages based on the keys in
the reference language translations file.
- We post-process and store the translations.
- We also post-process the reference translations and store them.
"""
ref_translations_path = os.path.join(translations_path, f"{ref_lang}.ref.yml")
ref_translations_generated_path = os.path.join(translations_path, f"{ref_lang}.yml")
db_path = os.path.join(translations_path, f"{ref_lang}.trans")
db = DB(db_path)
db.load()
with open(ref_translations_path) as input_file:
ref_translations = yaml.load(input_file.read(), Loader=yaml.BaseLoader)
for target_lang in TARGET_LANGS:
target_lang_translations_path = os.path.join(translations_path, f"{target_lang}.yml")
if os.path.exists(target_lang_translations_path):
with open(target_lang_translations_path) as input_file:
target_translations = yaml.load(input_file.read(), Loader=yaml.BaseLoader)
else:
target_translations = {}
translate_struct(ref_lang, target_lang, ref_translations, target_translations, db, token)
with open(target_lang_translations_path, "wb") as output_file:
output_file.write(f"# Warning, this file is partially automatically generated!\n".encode("utf-8"))
output_file.write(yaml.dump(deserialize_struct(target_translations, True), encoding='utf-8', allow_unicode=True, indent=2, sort_keys=True))
with open(ref_translations_generated_path, "wb") as output_file:
output_file.write(f"# Warning, this file is automatically generated from '{ref_lang}.ref.yml', edit that file instead!\n".encode("utf-8"))
output_file.write(yaml.dump(deserialize_struct(ref_translations), encoding='utf-8', allow_unicode=True, indent=2, sort_keys=True))
TRANSLATIONS_PATH = os.environ.get('TRANSLATIONS') or os.path.join(os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'src')), 'translations')
TOKEN = os.environ.get('TOKEN')
REF_LANG = "en"
TARGET_LANGS = ["es", "de", "fr", "pt", "it", "nl", "pl", "zh"]
#TARGET_LANGS = ["de", "fr"]
if __name__ == '__main__':
if len(sys.argv) > 1:
TARGET_LANGS = sys.argv[1].split(",")
if not TOKEN:
sys.stderr.write(f"Please provide a DeepL token in the 'TOKEN' environment variable.\n")
sys.exit(-1)
update_translations(TRANSLATIONS_PATH, REF_LANG, TOKEN)
|
11580859
|
r"""
This file implements the functional equivalence test, which ensures that
functional yield attributes which return the same value as a numerical setting
(or in the case of the AGB yield settings, an un-modified interpolator),
predict numerically similar results.
In practice, this tests passes with a percent difference in the predicted
masses of less than a part per million.
"""
from ...core import singlezone
from ...testing import unittest
from .. import agb
from .. import ccsne
from .. import sneia
_OUTTIMES_ = [0.05 * i for i in range(201)]
_CCSN_YIELD_O_ = ccsne.settings['o']
_CCSN_YIELD_FE_ = ccsne.settings['fe']
_CCSN_YIELD_SR_ = ccsne.settings['sr']
_SNIA_YIELD_O_ = sneia.settings['o']
_SNIA_YIELD_FE_ = sneia.settings['fe']
_SNIA_YIELD_SR_ = sneia.settings['sr']
def ccsn_yield_o(z):
r"""
Returns the current CCSN yield setting for oxygen
"""
return _CCSN_YIELD_O_
def ccsn_yield_fe(z):
r"""
Returns the current CCSN yield setting for iron
"""
return _CCSN_YIELD_FE_
def ccsn_yield_sr(z):
r"""
Returns the current CCSN yield setting for strontium
"""
return _CCSN_YIELD_SR_
def snia_yield_o(z):
r"""
Returns the current SN Ia yield setting for oxygen
"""
return _SNIA_YIELD_O_
def snia_yield_fe(z):
r"""
Returns the current SN Ia yield setting for iron
"""
return _SNIA_YIELD_FE_
def snia_yield_sr(z):
r"""
Returns the current SN Ia yield setting for strontium
"""
return _SNIA_YIELD_SR_
class agb_interpolator_mimic(agb.interpolator):
# The AGB yield calculator forces yields to zero if a negative yield is
# calculated for progenitor masses < 1.5 Msun to avoid numerical artifacts.
# The agb.interpolator objects does not do this, so this functionality is
# duplicated here in a subclass for testing purposes.
def __call__(self, mass, metallicity):
y = super().__call__(mass, metallicity)
if mass < 1.5 and y < 0:
return 0
else:
return y
@unittest
def equivalence_test():
r"""
equivalence test for functional yields.
"""
def test():
agb.settings['o'] = "cristallo11"
agb.settings['fe'] = "cristallo11"
agb.settings['sr'] = "cristallo11"
attrs = {
"name": "test",
"elements": ["fe", "sr", "o"],
"dt": 0.05
}
try:
out1 = singlezone.singlezone(**attrs).run(_OUTTIMES_,
overwrite = True, capture = True)
except:
return None
try:
ccsne.settings['o'] = ccsn_yield_o
ccsne.settings['fe'] = ccsn_yield_fe
ccsne.settings['sr'] = ccsn_yield_sr
sneia.settings['o'] = snia_yield_o
sneia.settings['fe'] = snia_yield_fe
sneia.settings['sr'] = snia_yield_sr
agb.settings['o'] = agb_interpolator_mimic('o')
agb.settings['fe'] = agb_interpolator_mimic('fe')
agb.settings['sr'] = agb_interpolator_mimic('sr')
except:
return None
try:
out2 = singlezone.singlezone(**attrs).run(_OUTTIMES_,
overwrite = True, capture = True)
except:
return None
status = True
for i in range(len(out1.history["time"])):
for elem in ["fe", "sr", "o"]:
col = "mass(%s)" % (elem)
if out1.history[col][i]:
percent_diff = abs(
(out1.history[col][i] - out2.history[col][i]) /
out1.history[col][i])
else:
percent_diff = abs(out2.history[col][i])
status &= percent_diff <= 1.e-6
if not status: break
if not status: break
return status
return ["vice.yields edge case : functional equivalence", test]
|
11580916
|
from processes.paying_for_won_item.saga import PayingForWonItem, PayingForWonItemData
from processes.paying_for_won_item.saga_handler import PayingForWonItemHandler
__all__ = ["PayingForWonItem", "PayingForWonItemData", "PayingForWonItemHandler"]
|
11580959
|
import geosoft.gxpy.gx as gx
import geosoft.gxpy.map as gxmap
import geosoft.gxpy.view as gxview
import geosoft.gxpy.group as gxgroup
import geosoft.gxpy.agg as gxagg
import geosoft.gxpy.grid as gxgrd
import geosoft.gxpy.viewer as gxviewer
gxc = gx.GXpy()
# create a map from grid coordinate system and extent
with gxgrd.Grid('Wittichica Creek Residual Total Field.grd') as grd:
grid_file_name = grd.file_name_decorated
# create a map for this grid on A4 media, scale to fit the extent
with gxmap.Map.new('Wittichica residual TMI',
data_area=grd.extent_2d(),
media="A4",
margins=(1, 3.5, 3, 1),
coordinate_system=grd.coordinate_system,
overwrite=True) as gmap:
map_file_name = gmap.file_name
# draw into the views on the map. We are reopening the map as the Aggregate class only works with a closed grid.
with gxmap.Map.open(map_file_name) as gmap:
# work with the data view, draw a line around the data view
with gxview.View.open(gmap, "data") as v:
# add the grid image to the view, with shading, 20 nT contour interval to match default contour lines
with gxagg.Aggregate_image.new(grid_file_name, shade=True, contour=20) as agg:
gxgroup.Aggregate_group.new(v, agg)
# colour legend
gxgroup.legend_color_bar(v, 'TMI_legend',
title='Res TMI\nnT',
location=(1.2,0),
cmap=agg.layer_color_map(0),
cmap2=agg.layer_color_map(1))
# contour the grid
gxgroup.contour(v, 'TMI_contour', grid_file_name)
# map title and creator tag
with gxview.View.open(gmap, "base") as v:
with gxgroup.Draw(v, 'title') as g:
g.text("Tutorial Example\nresidual mag",
reference=gxgroup.REF_BOTTOM_CENTER,
location=(100, 10),
text_def=gxgroup.Text_def(height=3.5,
weight=gxgroup.FONT_WEIGHT_BOLD))
g.text("created by:" + gxc.gid,
location=(1, 1.5),
text_def=gxgroup.Text_def(height=1.2,
italics=True))
# add a map surround to the map
gmap.surround(outer_pen='kt500', inner_pen='kt100', gap=0.1)
# annotate the data view locations
gmap.annotate_data_xy(grid=gxmap.GRID_CROSSES)
gmap.annotate_data_ll(grid=gxmap.GRID_LINES,
grid_pen=gxgroup.Pen(line_color='b'),
text_def=gxgroup.Text_def(color='b',
height=0.15,
italics=True))
# scale bar
gmap.scale_bar(location=(1, 3, 1.5),
text_def=gxgroup.Text_def(height=0.15))
# display the map in a Geosoft viewer
gxviewer.view_document(map_file_name, wait_for_close=False)
# save to a PNG file
gxmap.save_as_image(map_file_name, "wittichica_mag.png", type=gxmap.RASTER_FORMAT_PNG)
|
11580983
|
class Result():
def __init__(
self,
status=None,
full_name=None,
failed_expectations=None,
deprecation_warnings=None,
runnable_id=None,
description=None,
pending_reason=None
):
if failed_expectations is None:
failed_expectations = {}
self._status = status
self._full_name = full_name
self._failed_expectations = failed_expectations
self._deprecation_warnings = deprecation_warnings
self._runnable_id = runnable_id
self._description = description
self._pending_reason = pending_reason
@property
def status(self):
return self._status
@property
def full_name(self):
return self._full_name
@property
def failed_expectations(self):
return self._failed_expectations
@property
def deprecation_warnings(self):
return self._deprecation_warnings
@property
def runnable_id(self):
return self._runnable_id
@property
def description(self):
return self._description
@property
def pending_reason(self):
return self._pending_reason
|
11580996
|
def main():
N = int(input())
for _ in range(N):
A,B,C = map(int, input().split())
sol = False
for x in range(-22, 23):
if sol:
break
if x*x <= C:
for y in range(-100, 101):
if sol:
break
if y != x and x*x + y*y <= C:
for z in range(-100, 101):
if sol:
break
if z != x and z != y and x+y+z == A and x*y*z == B and x*x + y*y + z*z == C:
print("{} {} {}".format(x, y, z))
sol = True
if not sol:
print("No solution.")
main()
|
11581007
|
from contextlib import suppress
from typing import ClassVar, List
from frozendict import frozendict
from logger import log
from test_infra import consts
from test_infra.assisted_service_api import ClientFactory, InventoryClient
from test_infra.consts import resources
from test_infra.utils import utils
from test_infra.utils.global_variables.env_variables_defaults import \
_EnvVariablesDefaults
_triggers = frozendict(
{
(("platform", consts.Platforms.NONE),): {
"user_managed_networking": True,
"vip_dhcp_allocation": False,
},
(("platform", consts.Platforms.VSPHERE),): {
"user_managed_networking": False,
},
(("masters_count", 1),): {
"workers_count": 0,
"nodes_count": 1,
"high_availability_mode": consts.HighAvailabilityMode.NONE,
"user_managed_networking": True,
"vip_dhcp_allocation": False,
"openshift_version": consts.OpenshiftVersion.VERSION_4_8.value,
"master_memory": resources.DEFAULT_MASTER_SNO_MEMORY,
"master_vcpu": resources.DEFAULT_MASTER_SNO_CPU,
},
(("is_ipv4", False), ("is_ipv6", True),): {
"cluster_networks": consts.DEFAULT_CLUSTER_NETWORKS_IPV6,
"service_networks": consts.DEFAULT_SERVICE_NETWORKS_IPV6,
"vip_dhcp_allocation": False,
"openshift_version": consts.OpenshiftVersion.VERSION_4_8.value,
"network_type": consts.NetworkType.OVNKubernetes
},
(("is_ipv4", True), ("is_ipv6", True),): {
"cluster_networks": consts.DEFAULT_CLUSTER_NETWORKS_IPV4V6,
"service_networks": consts.DEFAULT_SERVICE_NETWORKS_IPV4V6,
"network_type": consts.NetworkType.OVNKubernetes,
}
}
)
class GlobalVariables(_EnvVariablesDefaults):
_triggered: ClassVar[List[str]] = list()
def __post_init__(self):
super().__post_init__()
client=None
if not self.is_kube_api:
with suppress(RuntimeError, TimeoutError):
client=self.get_api_client()
self._set("openshift_version", utils.get_openshift_version(allow_default=True, client=client))
for conditions, values in _triggers.items():
assert isinstance(conditions, tuple) and isinstance(conditions[0], tuple), f"Key {conditions} must be tuple of tuples"
if all(map(lambda condition: self.is_set(condition[0], condition[1]), conditions)):
self._handle_trigger(conditions, values)
def is_set(self, var, expected):
return getattr(self, var) == expected
def _handle_trigger(self, conditions, values):
for k, v in values.items():
self._set(k, v)
self._triggered.append(conditions)
log.info(f"{conditions} is triggered. Updating global variables: {values}")
def __getattribute__(self, item):
try:
return super().__getattribute__(item)
except BaseException:
return None
def get_api_client(self, offline_token=None, **kwargs) -> InventoryClient:
url = self.remote_service_url
offline_token = offline_token or self.offline_token
if not url:
url = utils.get_local_assisted_service_url(
self.namespace, 'assisted-service', utils.get_env('DEPLOY_TARGET'))
return ClientFactory.create_client(url, offline_token, **kwargs)
|
11581044
|
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as func
from torchsupport.interacting.off_policy_training import OffPolicyTraining
class CRRTraining(OffPolicyTraining):
def __init__(self, policy, value, agent, environment,
beta=1.0, clip=None, tau=5e-3, **kwargs):
self.value = ...
super().__init__(
policy, agent, environment,
{"value": value}, **kwargs
)
self.beta = beta
self.clip = clip
self.tau = tau
self.target = deepcopy(value)
self.policy_target = deepcopy(policy)
def _update_target(self, target, source):
if self.step_id % 100 == 1:
with torch.no_grad():
tp = target.parameters()
ap = source.parameters()
for t, a in zip(tp, ap):
t -= t
t += a
def update_target(self):
self._update_target(self.target, self.value)
self._update_target(self.policy_target, self.policy)
def action_nll(self, policy, action):
return func.cross_entropy(policy, action, reduction='none')
def policy_loss(self, policy, action, advantage):
weight = torch.exp(advantage / self.beta)
if self.clip is not None:
weight = weight.clamp(0, self.clip)
negative_log_likelihood = self.action_nll(policy, action)
weighted_loss = negative_log_likelihood * weight
return weighted_loss.mean()
def state_value(self, state, value=None, policy=None):
value = value or self.value
policy = policy or self.policy
action_value = value(state)
policy = policy(state)
pmax = policy.argmax(dim=1)
ind = torch.arange(
action_value.size(0),
dtype=torch.long,
device=action_value.device
)
expected = action_value[ind, pmax]
return expected
def run_policy(self, sample):
initial_state = sample.initial_state
action = sample.action
with torch.no_grad():
action_value = self.value(initial_state)
inds = torch.arange(action.size(0), device=action.device)
action_value = action_value[inds, action]
value = self.state_value(initial_state)
advantage = action_value - value
self.current_losses["mean advantage"] = float(advantage.mean())
policy = self.policy(initial_state)
return policy, action, advantage
def auxiliary_loss(self, value, target):
return func.mse_loss(value.view(-1), target.view(-1))
def run_auxiliary(self, sample):
self.update_target()
initial_state = sample.initial_state
final_state = sample.final_state
action = sample.action
rewards = sample.rewards
action_value = self.value(initial_state)
inds = torch.arange(action.size(0), device=action.device)
action_value = action_value[inds, action]
with torch.no_grad():
state_value = self.state_value(
final_state, value=self.target,
policy=self.policy_target
)
done_mask = 1.0 - sample.done.float()
target = rewards + self.discount * done_mask * state_value
self.current_losses["mean state value"] = float(state_value.mean())
self.current_losses["mean target value"] = float(target.mean())
return action_value, target
|
11581059
|
from itertools import chain
from enum import Enum
class BertSampleProviderTypes(Enum):
"""
Supported format types.
"""
"""
Default formatter
"""
CLASSIF_M = 0
"""
Natural Language Inference samplers
paper: https://www.aclweb.org/anthology/N19-1035.pdf
"""
QA_M = 1
NLI_M = 2
QA_B = 3
NLI_B = 4
class SampleFormattersService(object):
__fmt_names = {
BertSampleProviderTypes.CLASSIF_M: 'c_m',
BertSampleProviderTypes.QA_M: "qa_m",
BertSampleProviderTypes.NLI_M: 'nli_m',
BertSampleProviderTypes.QA_B: "qa_b",
BertSampleProviderTypes.NLI_B: "nli_b"
}
# region private methods
@staticmethod
def __iter_multiple():
yield BertSampleProviderTypes.CLASSIF_M
yield BertSampleProviderTypes.QA_M
yield BertSampleProviderTypes.NLI_M
@staticmethod
def __iter_binary():
yield BertSampleProviderTypes.QA_B
yield BertSampleProviderTypes.NLI_B
@staticmethod
def __iter_all():
return chain(SampleFormattersService.__iter_binary(),
SampleFormattersService.__iter_multiple())
# endregion
@staticmethod
def is_binary(formatter_type):
binary = list(SampleFormattersService.__iter_binary())
return formatter_type in binary
@staticmethod
def is_multiple(formatter_type):
multiple = list(SampleFormattersService.__iter_multiple())
return formatter_type in multiple
@staticmethod
def iter_supported_names(return_values=False):
for fmt_type in SampleFormattersService.__iter_all():
yield SampleFormattersService.__fmt_names[fmt_type] if return_values else fmt_type
@staticmethod
def type_to_name(fmt_type):
return SampleFormattersService.__fmt_names[fmt_type]
@staticmethod
def find_fmt_type_by_name(name):
for fmt_type, fmt_type_name in SampleFormattersService.__fmt_names.items():
if fmt_type_name == name:
return fmt_type
raise NotImplemented("Formatting type '{}' does not supported".format(name))
|
11581103
|
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.backend_bases import RendererBase
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import matplotlib.path as path
import numpy as np
import os
import shutil
import tempfile
def test_uses_per_path():
id = transforms.Affine2D()
paths = [path.Path.unit_regular_polygon(i) for i in range(3, 7)]
tforms = [id.rotate(i) for i in range(1, 5)]
offsets = np.arange(20).reshape((10, 2))
facecolors = ['red', 'green']
edgecolors = ['red', 'green']
def check(master_transform, paths, all_transforms,
offsets, facecolors, edgecolors):
rb = RendererBase()
raw_paths = list(rb._iter_collection_raw_paths(
master_transform, paths, all_transforms))
gc = rb.new_gc()
ids = [path_id for xo, yo, path_id, gc0, rgbFace in
rb._iter_collection(gc, master_transform, all_transforms,
range(len(raw_paths)), offsets,
transforms.IdentityTransform(),
facecolors, edgecolors, [], [], [False],
[], 'data')]
uses = rb._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
if raw_paths:
seen = np.bincount(ids, minlength=len(raw_paths))
assert set(seen).issubset([uses - 1, uses])
check(id, paths, tforms, offsets, facecolors, edgecolors)
check(id, paths[0:1], tforms, offsets, facecolors, edgecolors)
check(id, [], tforms, offsets, facecolors, edgecolors)
check(id, paths, tforms[0:1], offsets, facecolors, edgecolors)
check(id, paths, [], offsets, facecolors, edgecolors)
for n in range(0, offsets.shape[0]):
check(id, paths, tforms, offsets[0:n, :], facecolors, edgecolors)
check(id, paths, tforms, offsets, [], edgecolors)
check(id, paths, tforms, offsets, facecolors, [])
check(id, paths, tforms, offsets, [], [])
check(id, paths, tforms, offsets, facecolors[0:1], edgecolors)
def test_get_default_filename():
try:
test_dir = tempfile.mkdtemp()
plt.rcParams['savefig.directory'] = test_dir
fig = plt.figure()
canvas = FigureCanvasBase(fig)
filename = canvas.get_default_filename()
assert filename == 'image.png'
finally:
shutil.rmtree(test_dir)
def test_get_default_filename_already_exists():
# From #3068: Suggest non-existing default filename
try:
test_dir = tempfile.mkdtemp()
plt.rcParams['savefig.directory'] = test_dir
fig = plt.figure()
canvas = FigureCanvasBase(fig)
# create 'image.png' in figure's save dir
open(os.path.join(test_dir, 'image.png'), 'w').close()
filename = canvas.get_default_filename()
assert filename == 'image-1.png'
finally:
shutil.rmtree(test_dir)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.